├── .github └── workflows │ └── ci.yaml ├── .gitignore ├── .rustfmt.toml ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── acid_alloc_hater ├── Cargo.toml └── src │ └── lib.rs ├── alloc_hater ├── Cargo.toml └── src │ └── lib.rs ├── fuzz ├── .gitignore ├── Cargo.toml └── fuzz_targets │ ├── buddy_contiguous.rs │ ├── buddy_discontiguous.rs │ ├── bump.rs │ └── slab.rs ├── src ├── base.rs ├── bitmap.rs ├── buddy.rs ├── bump.rs ├── core.rs ├── lib.rs ├── slab.rs └── tests.rs └── test-all-configs.sh /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: Continuous integration 4 | 5 | jobs: 6 | fmt: 7 | name: fmt 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - uses: actions-rs/toolchain@v1 12 | with: 13 | profile: minimal 14 | toolchain: nightly 15 | override: true 16 | - run: rustup component add rustfmt 17 | - uses: actions-rs/cargo@v1 18 | with: 19 | command: fmt 20 | args: --all -- --check 21 | 22 | clippy: 23 | name: clippy 24 | runs-on: ubuntu-latest 25 | strategy: 26 | matrix: 27 | features: 28 | - sptr 29 | - unstable 30 | steps: 31 | - uses: actions/checkout@v2 32 | - uses: actions-rs/toolchain@v1 33 | with: 34 | profile: minimal 35 | toolchain: nightly 36 | override: true 37 | - run: rustup component add clippy 38 | # alloc disabled 39 | - uses: actions-rs/cargo@v1 40 | with: 41 | command: clippy 42 | args: --no-default-features --features=${{ matrix.features }} -- -D warnings 43 | # alloc enabled 44 | - uses: actions-rs/cargo@v1 45 | with: 46 | command: clippy 47 | args: --workspace --all-targets --no-default-features --features=sptr,alloc -- -D warnings 48 | 49 | test_stable: 50 | name: test (stable) 51 | runs-on: ubuntu-latest 52 | steps: 53 | - uses: actions/checkout@v2 54 | - uses: actions-rs/toolchain@v1 55 | with: 56 | profile: minimal 57 | toolchain: stable 58 | override: true 59 | # alloc disabled 60 | - uses: actions-rs/cargo@v1 61 | with: 62 | command: test 63 | args: --no-default-features --features=sptr 64 | # alloc enabled 65 | - uses: actions-rs/cargo@v1 66 | with: 67 | command: test 68 | args: --workspace --no-default-features --features=sptr,alloc 69 | 70 | test_nightly: 71 | name: test (nightly) 72 | runs-on: ubuntu-latest 73 | strategy: 74 | matrix: 75 | features: 76 | - sptr 77 | - unstable 78 | steps: 79 | - uses: actions/checkout@v2 80 | - uses: actions-rs/toolchain@v1 81 | with: 82 | profile: minimal 83 | toolchain: nightly 84 | override: true 85 | # alloc disabled 86 | - uses: actions-rs/cargo@v1 87 | with: 88 | command: test 89 | args: --no-default-features --features=${{ matrix.features }} 90 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target/ 3 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | unstable_features = true 2 | 3 | comment_width = 100 4 | wrap_comments = true 5 | 6 | imports_granularity = "Crate" 7 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "acid_alloc" 3 | description = "Bare-metal allocators" 4 | authors = ["dataphract "] 5 | version = "0.1.0" 6 | edition = "2021" 7 | license = "MIT OR Apache-2.0" 8 | 9 | repository = "https://github.com/dataphract/acid_alloc" 10 | documentation = "https://docs.rs/acid_alloc" 11 | readme = "README.md" 12 | 13 | # crates.io configuration 14 | keywords = ["allocator", "no_std"] 15 | categories = ["memory-management", "no-std"] 16 | 17 | [package.metadata.docs.rs] 18 | all-features = true 19 | rustdoc-args = ["--cfg", "docs_rs"] 20 | 21 | [workspace] 22 | members = [ 23 | "acid_alloc_hater", 24 | "alloc_hater", 25 | ] 26 | 27 | [features] 28 | default = ["sptr"] 29 | 30 | alloc = [] 31 | 32 | # Enables nightly-only unstable Rust features. 33 | unstable = [] 34 | 35 | [dependencies] 36 | sptr = { version = "0.2.3", optional = true } 37 | 38 | [dev-dependencies] 39 | quickcheck = "1.0.3" 40 | version-sync = "0.9.2" 41 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2022 dataphract 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so, 8 | subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `acid_alloc` 2 | 3 | [![CI](https://github.com/dataphract/acid_alloc/actions/workflows/ci.yaml/badge.svg)](https://github.com/dataphract/acid_alloc/actions) 4 | [![crates-io](https://img.shields.io/crates/v/acid_alloc.svg)](https://crates.io/crates/acid_alloc) 5 | [![api-docs](https://docs.rs/acid_alloc/badge.svg)](https://docs.rs/acid_alloc) 6 | 7 | ## Bare-metal allocators. 8 | 9 | This crate provides allocators that are suitable for use on bare metal or with 10 | OS allocation facilities like `mmap(2)`/`brk(2)`. 11 | 12 | The following allocator types are available: 13 | 14 | - **`Buddy`, a binary-buddy allocator**. O(log2_levels_) worst-case 15 | allocation and deallocation. Supports splitting and coalescing blocks by 16 | powers of 2. Good choice for periodic medium-to-large allocations. 17 | - **`Bump`, a bump allocator**. O(1) allocation. Extremely fast to allocate and 18 | flexible in terms of allocation layout, but unable to deallocate individual 19 | items. Good choice for allocations that will never be deallocated or that will 20 | be deallocated en masse. 21 | - **`Slab`, a slab allocator**. O(1) allocation and deallocation. All 22 | allocated blocks are the same size, making this allocator a good choice when 23 | allocating many similarly-sized objects. 24 | 25 | ## Features 26 | 27 | All allocators provided by this crate are available in a `#![no_std]`, 28 | `#![cfg(no_global_oom_handling)]` environment. Additional functionality is 29 | available when enabling feature flags: 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 45 | 46 | 47 | 48 | 49 | 50 | 55 | 56 | 57 | 58 | 59 | 60 | 63 | 64 |
FlagDefault?Requires nightly?Description
sptrYesNo 43 | Uses the sptr polyfill for Strict Provenance. 44 |
unstableNoYes 51 | Exposes constructors for allocators backed by implementors of the 52 | unstable Allocator trait, and enables the internal use of 53 | nightly-only Rust features. Obviates sptr. 54 |
allocNoNo 61 | Exposes constructors for allocators backed by the global allocator. 62 |
65 | 66 | [`sptr`]: https://crates.io/crates/sptr 67 | 68 | ## Acknowledgments 69 | 70 | This crate includes stable-compatible polyfills for a number of unstable 71 | standard-library APIs whose implementations are reproduced verbatim here. These 72 | features are listed below along with their authors and/or maintainers: 73 | 74 | - `alloc_layout_extra`, by [Amanieu d'Antras] 75 | - `int_log`, by [Yoshua Wuyts] 76 | - `strict_provenance`, by [Aria Beingessner (Gankra)] 77 | 78 | This crate also depends on [`sptr`] (also authored by Gankra) to reproduce 79 | strict provenance for normal pointers on stable Rust. 80 | 81 | _If I've misattributed any of this work, or a contributor to these features is 82 | missing, please open an issue!_ 83 | 84 | [amanieu d'antras]: https://github.com/Amanieu 85 | [yoshua wuyts]: https://github.com/yoshuawuyts 86 | [aria beingessner (gankra)]: https://github.com/Gankra 87 | 88 | ## License 89 | 90 | Licensed under either of 91 | 92 | - Apache License, Version 2.0 93 | ([LICENSE-APACHE](LICENSE-APACHE) or ) 94 | - MIT license 95 | ([LICENSE-MIT](LICENSE-MIT) or ) 96 | 97 | at your option. 98 | 99 | ## Contribution 100 | 101 | Unless you explicitly state otherwise, any contribution intentionally submitted 102 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 103 | dual licensed as above, without any additional terms or conditions. 104 | -------------------------------------------------------------------------------- /acid_alloc_hater/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "acid_alloc_hater" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | acid_alloc = { path = "../", features = ["alloc", "sptr"] } 10 | alloc_hater = { path = "../alloc_hater" } 11 | -------------------------------------------------------------------------------- /acid_alloc_hater/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(unsafe_op_in_unsafe_fn)] 2 | 3 | use std::{alloc::Layout, ops::Range, ptr::NonNull}; 4 | 5 | use acid_alloc::{AllocInitError, Buddy, Bump, Global, Slab}; 6 | use alloc_hater::Subject; 7 | 8 | pub struct BuddySubject( 9 | Buddy, 10 | ); 11 | 12 | impl BuddySubject { 13 | pub fn new(num_blocks: usize) -> Result { 14 | let b = Buddy::try_new(num_blocks)?; 15 | Ok(BuddySubject(b)) 16 | } 17 | 18 | pub fn new_with_offset_gaps( 19 | num_blocks: usize, 20 | gaps: impl IntoIterator>, 21 | ) -> Result { 22 | let b = Buddy::try_new_with_offset_gaps(num_blocks, gaps)?; 23 | Ok(BuddySubject(b)) 24 | } 25 | } 26 | 27 | impl Subject for BuddySubject { 28 | type Op = (); 29 | type AllocError = acid_alloc::AllocError; 30 | 31 | fn allocate(&mut self, layout: Layout) -> Result, Self::AllocError> { 32 | self.0.allocate(layout) 33 | } 34 | 35 | unsafe fn deallocate(&mut self, ptr: NonNull, _layout: std::alloc::Layout) { 36 | unsafe { self.0.deallocate(ptr) }; 37 | } 38 | 39 | fn handle_custom_op(&mut self, (): ()) {} 40 | } 41 | 42 | pub struct SlabSubject(Slab); 43 | 44 | impl SlabSubject { 45 | pub fn new(block_size: usize, num_blocks: usize) -> Result { 46 | let s = Slab::try_new(block_size, num_blocks)?; 47 | Ok(SlabSubject(s)) 48 | } 49 | } 50 | 51 | impl Subject for SlabSubject { 52 | type Op = (); 53 | type AllocError = acid_alloc::AllocError; 54 | 55 | fn allocate(&mut self, layout: Layout) -> Result, Self::AllocError> { 56 | self.0.allocate(layout) 57 | } 58 | 59 | unsafe fn deallocate(&mut self, ptr: NonNull, _layout: Layout) { 60 | unsafe { self.0.deallocate(ptr) }; 61 | } 62 | 63 | fn handle_custom_op(&mut self, (): ()) {} 64 | } 65 | 66 | pub struct BumpSubject(Bump); 67 | 68 | impl BumpSubject { 69 | pub fn new(layout: Layout) -> Result { 70 | let b = Bump::try_new(layout)?; 71 | Ok(BumpSubject(b)) 72 | } 73 | } 74 | 75 | impl Subject for BumpSubject { 76 | type Op = (); 77 | 78 | type AllocError = acid_alloc::AllocError; 79 | 80 | fn allocate(&mut self, layout: Layout) -> Result, Self::AllocError> { 81 | self.0.allocate(layout) 82 | } 83 | 84 | unsafe fn deallocate(&mut self, ptr: NonNull, _layout: Layout) { 85 | unsafe { self.0.deallocate(ptr) } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /alloc_hater/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "alloc_hater" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | arbitrary = { version = "1.1.3", features = ["derive"] } 10 | quickcheck = "1.0.3" 11 | -------------------------------------------------------------------------------- /alloc_hater/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A small library for ~~hating on~~ evaluating the correctness of allocators. 2 | #![deny(unsafe_op_in_unsafe_fn)] 3 | 4 | use core::{alloc::Layout, mem::MaybeUninit, ptr::NonNull, slice}; 5 | 6 | /// A wrapper around `Layout` which implements `Arbitrary`. 7 | #[derive(Clone, Debug)] 8 | pub struct ArbLayout(pub Layout); 9 | 10 | impl arbitrary::Arbitrary<'_> for ArbLayout { 11 | fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { 12 | // Select a random bit index and shift to obtain a power of two. 13 | let align_shift = u8::arbitrary(u)? % (usize::BITS - 1) as u8; 14 | 15 | let align: usize = 1 << align_shift; 16 | assert!(align.is_power_of_two()); 17 | 18 | let size = usize::arbitrary(u)? % (isize::MAX as usize - (align - 1)); 19 | 20 | let layout = match Layout::from_size_align(size, align) { 21 | Ok(l) => l, 22 | Err(_) => { 23 | panic!("invalid layout params: size=0x{size:X} align=0x{align:X}"); 24 | } 25 | }; 26 | 27 | Ok(ArbLayout(layout)) 28 | } 29 | } 30 | 31 | #[derive(arbitrary::Arbitrary)] 32 | enum AllocatorOpTag { 33 | Alloc, 34 | Dealloc, 35 | } 36 | 37 | #[derive(Clone, Debug)] 38 | pub enum AllocatorOp { 39 | Alloc(Layout), 40 | Dealloc(usize), 41 | } 42 | 43 | impl arbitrary::Arbitrary<'_> for AllocatorOp { 44 | fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { 45 | let tag = AllocatorOpTag::arbitrary(u)?; 46 | 47 | let op = match tag { 48 | AllocatorOpTag::Alloc => AllocatorOp::Alloc(ArbLayout::arbitrary(u)?.0), 49 | AllocatorOpTag::Dealloc => AllocatorOp::Dealloc(usize::arbitrary(u)?), 50 | }; 51 | 52 | Ok(op) 53 | } 54 | } 55 | 56 | pub trait Subject { 57 | type Op: for<'a> arbitrary::Arbitrary<'a>; 58 | type AllocError; 59 | 60 | /// Allocates a block of memory according to `layout`. 61 | fn allocate(&mut self, layout: Layout) -> Result, Self::AllocError>; 62 | 63 | /// Deallocates the block of memory with layout `layout` pointed to by `ptr`. 64 | /// 65 | /// # Safety 66 | /// 67 | /// `ptr` must denote a block of memory currently allocated by this 68 | /// allocator, and it must have been allocated with `layout`. 69 | unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout); 70 | 71 | fn handle_custom_op(&mut self, op: Self::Op) { 72 | // To silence the unused variable warning. 73 | drop(op); 74 | } 75 | } 76 | 77 | /// A list of allocated blocks. 78 | #[derive(Default)] 79 | pub struct Blocks { 80 | blocks: Vec, 81 | } 82 | 83 | impl Blocks { 84 | pub fn new() -> Blocks { 85 | Blocks { blocks: Vec::new() } 86 | } 87 | 88 | pub fn push(&mut self, block: Block) { 89 | self.blocks.push(block); 90 | } 91 | 92 | pub fn remove_modulo(&mut self, idx: usize) -> Option { 93 | let len = self.blocks.len(); 94 | (len != 0).then(|| self.blocks.swap_remove(idx % len)) 95 | } 96 | } 97 | 98 | impl IntoIterator for Blocks { 99 | type Item = Block; 100 | 101 | type IntoIter = std::vec::IntoIter; 102 | 103 | fn into_iter(self) -> Self::IntoIter { 104 | self.blocks.into_iter() 105 | } 106 | } 107 | 108 | /// An allocated block of memory. 109 | pub struct Block { 110 | // A pointer to the allocated region. 111 | ptr: NonNull<[u8]>, 112 | // The original allocation layout. 113 | layout: Layout, 114 | // The unique ID of the last operation that wrote to this allocation. 115 | id: u64, 116 | } 117 | 118 | unsafe fn slice_ptr_to_uninit_slice_mut<'a>(ptr: NonNull<[u8]>) -> &'a mut [MaybeUninit] { 119 | unsafe { slice::from_raw_parts_mut(ptr.cast().as_ptr(), ptr.len()) } 120 | } 121 | 122 | unsafe fn paint(slice: &mut [MaybeUninit], id: u64) { 123 | let id_bytes = id.to_le_bytes().into_iter().cycle(); 124 | 125 | for (byte, value) in slice.iter_mut().zip(id_bytes) { 126 | byte.write(value); 127 | } 128 | } 129 | 130 | impl Block { 131 | /// Creates a block from `ptr` and paints it according to `id`. 132 | /// 133 | /// # Safety 134 | /// 135 | /// The caller must uphold the following invariants: 136 | /// - `ptr` must be valid for reads and writes for `ptr.len()` bytes. 137 | /// - `ptr` must have been allocated according to `layout`. 138 | /// - No references to the memory at `ptr` may exist when this function is called. 139 | /// - No accesses to the memory at `ptr` may be made except by way of the returned `Block` said 140 | /// `Block` is dropped. 141 | pub unsafe fn init(ptr: NonNull<[u8]>, layout: Layout, id: u64) -> Block { 142 | let mut b = Block { ptr, layout, id }; 143 | b.paint(id); 144 | b 145 | } 146 | 147 | /// Returns the `Block`'s memory as a slice of uninitialized bytes. 148 | pub fn as_uninit_slice(&self) -> &[MaybeUninit] { 149 | // SAFETY: self is immutably borrowed, so only immutable references to 150 | // the slice can exist 151 | unsafe { &*slice_ptr_to_uninit_slice_mut(self.ptr) } 152 | } 153 | 154 | /// Returns the `Block`'s memory as a mutable slice of uninitialized bytes. 155 | pub fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit] { 156 | // SAFETY: self is mutably borrowed, so no other references to the 157 | // slice can exist 158 | unsafe { slice_ptr_to_uninit_slice_mut(self.ptr) } 159 | } 160 | 161 | pub fn into_raw_parts(self) -> (NonNull<[u8]>, Layout) { 162 | (self.ptr, self.layout) 163 | } 164 | 165 | /// "Paints" the memory contained by `self` with the value of `id`. 166 | pub fn paint(&mut self, id: u64) { 167 | unsafe { paint(self.as_uninit_slice_mut(), id) }; 168 | } 169 | 170 | /// Verifies that the memory contained by `self` has not been overwritten. 171 | pub fn verify(&self) -> bool { 172 | let slice: &[u8] = unsafe { self.ptr.as_ref() }; 173 | let id_bytes = self.id.to_le_bytes().into_iter().cycle(); 174 | 175 | for (byte, value) in slice.iter().zip(id_bytes) { 176 | if *byte != value { 177 | return false; 178 | } 179 | } 180 | 181 | true 182 | } 183 | } 184 | 185 | pub struct Evaluator { 186 | subject: S, 187 | } 188 | 189 | #[derive(Clone, Debug)] 190 | pub struct Failed { 191 | pub completed: Vec, 192 | pub failed_op: AllocatorOp, 193 | } 194 | 195 | impl Evaluator { 196 | pub fn new(subject: S) -> Evaluator { 197 | Evaluator { subject } 198 | } 199 | 200 | pub fn evaluate(&mut self, ops: I) -> Result<(), Failed> 201 | where 202 | I: for<'a> IntoIterator, 203 | { 204 | let mut completed = Vec::new(); 205 | let mut blocks = Blocks::new(); 206 | 207 | for (op_id, op) in ops.into_iter().enumerate() { 208 | let op_id: u64 = op_id.try_into().unwrap(); 209 | match op { 210 | AllocatorOp::Alloc(layout) => { 211 | let ptr = match self.subject.allocate(layout) { 212 | Ok(p) => p, 213 | Err(_) => continue, 214 | }; 215 | 216 | let block = unsafe { Block::init(ptr, layout, op_id) }; 217 | blocks.push(block); 218 | } 219 | 220 | AllocatorOp::Dealloc(raw_idx) => { 221 | let mut block = match blocks.remove_modulo(raw_idx) { 222 | Some(b) => b, 223 | None => continue, 224 | }; 225 | 226 | if !block.verify() { 227 | return Err(Failed { 228 | completed, 229 | failed_op: op, 230 | }); 231 | } 232 | 233 | unsafe { 234 | block.paint(op_id); 235 | self.subject.deallocate(block.ptr.cast(), block.layout); 236 | } 237 | } 238 | } 239 | 240 | completed.push(op); 241 | } 242 | 243 | for block in blocks { 244 | // TODO: verify these blocks 245 | unsafe { self.subject.deallocate(block.ptr.cast(), block.layout) }; 246 | } 247 | 248 | Ok(()) 249 | } 250 | } 251 | -------------------------------------------------------------------------------- /fuzz/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | corpus 3 | artifacts 4 | -------------------------------------------------------------------------------- /fuzz/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "acid_alloc_fuzz" 3 | version = "0.0.0" 4 | authors = ["Automatically generated"] 5 | publish = false 6 | edition = "2021" 7 | 8 | [package.metadata] 9 | cargo-fuzz = true 10 | 11 | [dependencies] 12 | acid_alloc_hater = { path = "../acid_alloc_hater" } 13 | alloc_hater = { path = "../alloc_hater" } 14 | arbitrary = { version = "1.1.3", features = ["derive"] } 15 | libfuzzer-sys = "0.4" 16 | 17 | [dependencies.acid_alloc] 18 | path = ".." 19 | features = ["alloc", "sptr"] 20 | 21 | # Prevent this from interfering with workspaces 22 | [workspace] 23 | members = ["."] 24 | 25 | [[bin]] 26 | name = "buddy_contiguous" 27 | path = "fuzz_targets/buddy_contiguous.rs" 28 | test = false 29 | doc = false 30 | 31 | [[bin]] 32 | name = "buddy_discontiguous" 33 | path = "fuzz_targets/buddy_discontiguous.rs" 34 | test = false 35 | doc = false 36 | 37 | [[bin]] 38 | name = "slab" 39 | path = "fuzz_targets/slab.rs" 40 | test = false 41 | doc = false 42 | 43 | [[bin]] 44 | name = "bump" 45 | path = "fuzz_targets/bump.rs" 46 | test = false 47 | doc = false 48 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/buddy_contiguous.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | #![feature(allocator_api)] 3 | 4 | use acid_alloc_hater::BuddySubject; 5 | use alloc_hater::AllocatorOp; 6 | use arbitrary::{Arbitrary, Unstructured}; 7 | use libfuzzer_sys::fuzz_target; 8 | 9 | const BLK_SIZE: usize = 16384; 10 | const LEVELS: usize = 8; 11 | 12 | const MAX_BLOCKS: usize = 1024; 13 | 14 | #[derive(Clone, Debug)] 15 | struct Args { 16 | num_blocks: usize, 17 | ops: Vec, 18 | } 19 | 20 | impl Arbitrary<'_> for Args { 21 | fn arbitrary(un: &mut Unstructured) -> arbitrary::Result { 22 | let num_blocks = usize::arbitrary(un)? % MAX_BLOCKS; 23 | let ops = Vec::arbitrary(un)?; 24 | 25 | Ok(Args { num_blocks, ops }) 26 | } 27 | } 28 | 29 | fuzz_target!(|args: Args| { 30 | let buddy: BuddySubject = match BuddySubject::new(args.num_blocks) { 31 | Ok(a) => a, 32 | Err(_) => return, 33 | }; 34 | 35 | let mut eval = alloc_hater::Evaluator::new(buddy); 36 | eval.evaluate(args.ops).unwrap(); 37 | }); 38 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/buddy_discontiguous.rs: -------------------------------------------------------------------------------- 1 | #![deny(unsafe_op_in_unsafe_fn)] 2 | #![no_main] 3 | #![feature(allocator_api)] 4 | #![feature(strict_provenance)] 5 | 6 | use std::{ops::Range, ptr::NonNull}; 7 | 8 | use acid_alloc::{AllocInitError, Buddy, Raw}; 9 | use alloc_hater::{ArbLayout, Block, Blocks}; 10 | use arbitrary::{Arbitrary, Unstructured}; 11 | use libfuzzer_sys::fuzz_target; 12 | 13 | const BLK_SIZE: usize = 16384; 14 | const LEVELS: usize = 8; 15 | 16 | const MAX_BLOCKS: usize = 1024; 17 | 18 | #[derive(Clone, Debug, Arbitrary)] 19 | pub enum AllocatorOp { 20 | Allocate(ArbLayout), 21 | Deallocate(usize), 22 | AddRegion(usize), 23 | } 24 | 25 | #[derive(Clone, Debug)] 26 | struct Args { 27 | num_blocks: usize, 28 | regions: Vec>, 29 | ops: Vec, 30 | } 31 | 32 | // Generates arbitrary, non-overlapping regions that can be added to an allocator. 33 | fn regions( 34 | un: &mut Unstructured, 35 | num_blocks: usize, 36 | ) -> arbitrary::Result>> { 37 | let min_block_size = Buddy::::min_block_size().unwrap(); 38 | let num_atomic_blocks = (BLK_SIZE / min_block_size) * num_blocks; 39 | let num_regions = usize::arbitrary(un)? & num_atomic_blocks; 40 | 41 | let mut boundaries = Vec::with_capacity(num_regions); 42 | for _ in 0..num_regions { 43 | let bound = usize::arbitrary(un)? % num_regions; 44 | boundaries.push(bound); 45 | } 46 | boundaries.sort_unstable(); 47 | boundaries.dedup(); 48 | 49 | Ok(boundaries 50 | .windows(2) 51 | .map(|s| { 52 | let start = min_block_size.checked_mul(s[0]).unwrap(); 53 | let end = min_block_size.checked_mul(s[1]).unwrap(); 54 | start..end 55 | }) 56 | .collect()) 57 | } 58 | 59 | impl Arbitrary<'_> for Args { 60 | fn arbitrary(un: &mut Unstructured) -> arbitrary::Result> { 61 | let num_blocks = usize::arbitrary(un)? % MAX_BLOCKS; 62 | let regions = regions::(un, num_blocks)?; 63 | 64 | let ops = Vec::arbitrary(un)?; 65 | 66 | Ok(Args { 67 | num_blocks, 68 | regions, 69 | ops, 70 | }) 71 | } 72 | } 73 | 74 | fn create_buddy( 75 | num_blocks: usize, 76 | ) -> Result, AllocInitError> { 77 | let metadata_layout = Buddy::::metadata_layout(num_blocks)?; 78 | let region_layout = Buddy::::region_layout(num_blocks)?; 79 | 80 | let metadata = NonNull::new(unsafe { std::alloc::alloc(metadata_layout) }) 81 | .ok_or(AllocInitError::AllocFailed(metadata_layout))?; 82 | let region = NonNull::new(unsafe { std::alloc::alloc(region_layout) }).ok_or_else(|| { 83 | unsafe { std::alloc::dealloc(metadata.as_ptr(), metadata_layout) }; 84 | AllocInitError::AllocFailed(region_layout) 85 | })?; 86 | 87 | unsafe { Buddy::new_raw_unpopulated(metadata, region, num_blocks) } 88 | } 89 | 90 | unsafe fn destroy_buddy( 91 | buddy: Buddy, 92 | ) { 93 | unsafe { 94 | let parts = buddy.into_raw_parts(); 95 | std::alloc::dealloc(parts.metadata.as_ptr(), parts.metadata_layout); 96 | std::alloc::dealloc(parts.region.as_ptr(), parts.region_layout); 97 | } 98 | } 99 | 100 | fuzz_target!(|args: Args| { 101 | let Args { 102 | num_blocks, 103 | regions, 104 | ops, 105 | } = args; 106 | 107 | let mut buddy = match create_buddy::(num_blocks) { 108 | Ok(b) => b, 109 | Err(e) => return, 110 | }; 111 | 112 | let base_addr = buddy.region().cast::().addr(); 113 | let mut regions = regions 114 | .iter() 115 | .map(|range| { 116 | let start = base_addr.checked_add(range.start).unwrap(); 117 | let end = base_addr.checked_add(range.end).unwrap(); 118 | start..end 119 | }) 120 | .collect::>(); 121 | 122 | let mut blocks = Blocks::new(); 123 | 124 | let mut completed = Vec::new(); 125 | 126 | for (op_id, op) in ops.into_iter().enumerate() { 127 | let op_id: u64 = op_id.try_into().unwrap(); 128 | match op.clone() { 129 | AllocatorOp::Allocate(layout) => { 130 | let ptr = match buddy.allocate(layout.0) { 131 | Ok(p) => p, 132 | Err(_) => continue, 133 | }; 134 | 135 | blocks.push(unsafe { Block::init(ptr, layout.0, op_id) }); 136 | } 137 | 138 | AllocatorOp::Deallocate(idx) => { 139 | let mut block = match blocks.remove_modulo(idx) { 140 | Some(b) => b, 141 | None => continue, 142 | }; 143 | 144 | if !block.verify() { 145 | panic!("\nblock failed verification.\nnum blocks: {num_blocks}\ncompleted: {completed:?}\nfailed: {op:?}"); 146 | } 147 | 148 | unsafe { block.paint(op_id) }; 149 | 150 | let (ptr, layout) = block.into_raw_parts(); 151 | unsafe { buddy.deallocate(ptr.cast()) }; 152 | } 153 | 154 | AllocatorOp::AddRegion(idx) => { 155 | let len = regions.len(); 156 | 157 | if len == 0 { 158 | continue; 159 | } 160 | 161 | let idx = idx % len; 162 | let region = regions.swap_remove(idx); 163 | unsafe { buddy.add_region(region) }; 164 | } 165 | } 166 | 167 | completed.push(op); 168 | } 169 | 170 | unsafe { destroy_buddy(buddy) }; 171 | }); 172 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/bump.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | use std::alloc::Layout; 3 | 4 | use acid_alloc_hater::BumpSubject; 5 | use alloc_hater::{AllocatorOp, ArbLayout}; 6 | use arbitrary::{Arbitrary, Unstructured}; 7 | use libfuzzer_sys::fuzz_target; 8 | 9 | const MAX_SIZE: usize = 64 * 1024; 10 | const MAX_ALIGN_SHIFT: u8 = 12; // 4096 bytes 11 | 12 | #[derive(Clone, Debug)] 13 | struct Args { 14 | layout: Layout, 15 | ops: Vec, 16 | } 17 | 18 | impl Arbitrary<'_> for Args { 19 | fn arbitrary(un: &mut Unstructured) -> arbitrary::Result { 20 | let size = usize::arbitrary(un)? % MAX_SIZE; 21 | let align_shift = u8::arbitrary(un)? % MAX_ALIGN_SHIFT; 22 | let align = 1_usize << align_shift; 23 | let layout = Layout::from_size_align(size, align).unwrap(); 24 | let ops = Vec::arbitrary(un)?; 25 | 26 | Ok(Args { layout, ops }) 27 | } 28 | } 29 | 30 | fuzz_target!(|args: Args| { 31 | let Args { layout, ops } = args; 32 | 33 | let mut bump = match BumpSubject::new(layout) { 34 | Ok(s) => s, 35 | Err(_) => return, 36 | }; 37 | 38 | let mut eval = alloc_hater::Evaluator::new(bump); 39 | eval.evaluate(ops).unwrap(); 40 | }); 41 | -------------------------------------------------------------------------------- /fuzz/fuzz_targets/slab.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | use acid_alloc_hater::SlabSubject; 3 | use alloc_hater::AllocatorOp; 4 | use arbitrary::{Arbitrary, Unstructured}; 5 | use libfuzzer_sys::fuzz_target; 6 | 7 | const MAX_NUM_BLOCKS: usize = 1024; 8 | const MAX_BLOCK_SIZE: usize = 1024; 9 | 10 | #[derive(Clone, Debug)] 11 | struct Args { 12 | block_size: usize, 13 | num_blocks: usize, 14 | ops: Vec, 15 | } 16 | 17 | impl Arbitrary<'_> for Args { 18 | fn arbitrary(un: &mut Unstructured) -> arbitrary::Result { 19 | let block_size = usize::arbitrary(un)? % MAX_BLOCK_SIZE; 20 | let num_blocks = usize::arbitrary(un)? % MAX_NUM_BLOCKS; 21 | let ops = Vec::arbitrary(un)?; 22 | 23 | Ok(Args { 24 | block_size, 25 | num_blocks, 26 | ops, 27 | }) 28 | } 29 | } 30 | 31 | fuzz_target!(|args: Args| { 32 | let Args { 33 | block_size, 34 | num_blocks, 35 | ops, 36 | } = args; 37 | 38 | let mut slab = match SlabSubject::new(args.block_size, args.num_blocks) { 39 | Ok(s) => s, 40 | Err(_) => return, 41 | }; 42 | 43 | let mut eval = alloc_hater::Evaluator::new(slab); 44 | eval.evaluate(ops).unwrap(); 45 | }); 46 | -------------------------------------------------------------------------------- /src/base.rs: -------------------------------------------------------------------------------- 1 | use crate::core::{ 2 | num::NonZeroUsize, 3 | ptr::{self, NonNull}, 4 | }; 5 | 6 | #[cfg(not(feature = "unstable"))] 7 | use crate::core::ptr::{NonNullStrict, Strict}; 8 | 9 | /// A pointer to the base of the region of memory managed by an allocator. 10 | #[derive(Copy, Clone, Debug)] 11 | pub struct BasePtr { 12 | ptr: NonNull, 13 | extent: usize, 14 | } 15 | 16 | impl BasePtr { 17 | /// Creates a `BasePtr` from `ptr`. 18 | /// 19 | /// The returned value assumes the provenance of `ptr`. 20 | #[inline] 21 | pub fn new(ptr: NonNull, extent: usize) -> BasePtr { 22 | ptr.addr() 23 | .get() 24 | .checked_add(extent) 25 | .expect("region limit overflows usize"); 26 | 27 | BasePtr { ptr, extent } 28 | } 29 | 30 | /// Returns the base pointer as a `NonNull`. 31 | #[inline] 32 | pub fn ptr(self) -> NonNull { 33 | self.ptr 34 | } 35 | 36 | #[inline] 37 | pub fn limit(self) -> NonZeroUsize { 38 | NonZeroUsize::new(self.ptr.addr().get() + self.extent).unwrap() 39 | } 40 | 41 | #[inline] 42 | pub fn contains_addr(self, addr: NonZeroUsize) -> bool { 43 | self.ptr.addr() <= addr && addr < self.limit() 44 | } 45 | 46 | /// Returns the address of the base pointer. 47 | #[inline] 48 | pub fn addr(self) -> NonZeroUsize { 49 | self.ptr.addr() 50 | } 51 | 52 | /// Calculates the offset from `self` to `block`. 53 | pub fn offset_to(self, block: NonZeroUsize) -> usize { 54 | block.get().checked_sub(self.ptr.addr().get()).unwrap() 55 | } 56 | 57 | /// Initializes a `BlockLink` at the given address. 58 | /// 59 | /// # Safety 60 | /// 61 | /// The caller must uphold the following invariants: 62 | /// - `addr` must be a properly aligned address for `BlockLink` values. 63 | /// - The memory at `addr` must be within the provenance of `self` and valid 64 | /// for reads and writes for `size_of::()` bytes. 65 | /// - The memory at `addr` must be unallocated by the associated allocator. 66 | #[inline] 67 | pub unsafe fn init_link_at(self, addr: NonZeroUsize, link: BlockLink) { 68 | #[cfg(debug_assertions)] 69 | { 70 | debug_assert!(self.contains_addr(addr)); 71 | if let Some(next) = link.next { 72 | debug_assert!(self.contains_addr(next), "next link out of region"); 73 | } 74 | } 75 | 76 | unsafe { 77 | self.with_addr(addr) 78 | .cast::() 79 | .as_ptr() 80 | .write(link) 81 | }; 82 | } 83 | 84 | /// Initializes a `DoubleBlockLink` at the given address. 85 | /// 86 | /// # Safety 87 | /// 88 | /// The caller must uphold the following invariants: 89 | /// - `addr` must be a properly aligned address for `DoubleBlockLink` values. 90 | /// - The memory at `addr` must be within the provenance of `self` and valid 91 | /// for reads and writes for `size_of::()` bytes. 92 | /// - The memory at `addr` must be unallocated by the associated allocator. 93 | #[inline] 94 | pub unsafe fn init_double_link_at(self, addr: NonZeroUsize, link: DoubleBlockLink) { 95 | debug_assert!(self.contains_addr(addr)); 96 | 97 | debug_assert!( 98 | link.next.map_or(true, |next| self.contains_addr(next)), 99 | "next link out of region" 100 | ); 101 | debug_assert!( 102 | link.prev.map_or(true, |prev| self.contains_addr(prev),), 103 | "prev link out of region" 104 | ); 105 | 106 | unsafe { 107 | self.with_addr(addr) 108 | .cast::() 109 | .as_ptr() 110 | .write(link) 111 | }; 112 | } 113 | 114 | /// Returns a mutable reference to the `BlockLink` at `link`. 115 | /// 116 | /// # Safety 117 | /// 118 | /// The caller must uphold the following invariants: 119 | /// - `link` must be a properly aligned address for `BlockLink` values. 120 | /// - The memory at `link` must contain a properly initialized `BlockLink` value. 121 | /// - The memory at `link` must be within the provenance of `self` and 122 | /// unallocated by the associated allocator. 123 | #[inline] 124 | pub unsafe fn link_mut<'a>(self, link: NonZeroUsize) -> &'a mut BlockLink { 125 | debug_assert!(self.contains_addr(link)); 126 | 127 | unsafe { self.ptr.with_addr(link).cast::().as_mut() } 128 | } 129 | 130 | /// Returns a mutable reference to the `DoubleBlockLink` at `link`. 131 | /// 132 | /// # Safety 133 | /// 134 | /// The caller must uphold the following invariants: 135 | /// - `link` must be a properly aligned address for `DoubleBlockLink` values. 136 | /// - The memory at `link` must contain a properly initialized `DoubleBlockLink` value. 137 | /// - The memory at `link` must be within the provenance of `self` and 138 | /// unallocated by the associated allocator. 139 | #[inline] 140 | pub unsafe fn double_link_mut<'a>(self, link: NonZeroUsize) -> &'a mut DoubleBlockLink { 141 | debug_assert!(self.contains_addr(link)); 142 | 143 | let link = unsafe { self.ptr.with_addr(link).cast::().as_mut() }; 144 | 145 | debug_assert!( 146 | link.next.map_or(true, |next| self.contains_addr(next)), 147 | "next link out of region" 148 | ); 149 | debug_assert!( 150 | link.prev.map_or(true, |prev| self.contains_addr(prev),), 151 | "prev link out of region" 152 | ); 153 | 154 | link 155 | } 156 | 157 | /// Creates a new pointer with the given address. 158 | /// 159 | /// The returned pointer has the provenance of this pointer. 160 | #[inline] 161 | pub fn with_addr(self, addr: NonZeroUsize) -> NonNull { 162 | debug_assert!(self.contains_addr(addr)); 163 | 164 | self.ptr.with_addr(addr) 165 | } 166 | 167 | #[inline] 168 | pub fn with_addr_and_size(self, addr: NonZeroUsize, len: usize) -> NonNull<[u8]> { 169 | debug_assert!(self.contains_addr(addr)); 170 | 171 | let ptr = self.ptr.as_ptr().with_addr(addr.get()); 172 | let raw_slice = ptr::slice_from_raw_parts_mut(ptr, len); 173 | 174 | unsafe { NonNull::new_unchecked(raw_slice) } 175 | } 176 | 177 | /// Creates a new pointer with the given offset. 178 | /// 179 | /// The returned pointer has the provenance of this pointer. 180 | #[inline] 181 | pub fn with_offset(self, offset: usize) -> Option> { 182 | let raw = self.ptr.addr().get().checked_add(offset)?; 183 | let addr = NonZeroUsize::new(raw)?; 184 | 185 | debug_assert!(self.contains_addr(addr)); 186 | 187 | Some(self.ptr.with_addr(addr)) 188 | } 189 | } 190 | 191 | // Rather than using pointers, store only the addresses of the previous and 192 | // next links. This avoids accidentally violating stacked borrows; the 193 | // links "point to" other blocks, but by forgoing actual pointers, no borrow 194 | // is implied. 195 | // 196 | // NOTE: Using this method, any actual pointer to a block must be acquired 197 | // via the allocator base pointer, and NOT by casting these addresses 198 | // directly! 199 | 200 | /// A link in a linked list of blocks of memory. 201 | /// 202 | /// This type is meant to be embedded in the block itself, forming an intrusive 203 | /// linked list. 204 | #[repr(C)] 205 | pub struct BlockLink { 206 | pub next: Option, 207 | } 208 | 209 | /// A double link in a linked list of blocks of memory. 210 | /// 211 | /// This type is meant to be embedded in the block itself, forming an intrusive 212 | /// doubly linked list. 213 | #[repr(C)] 214 | #[derive(Debug)] 215 | pub struct DoubleBlockLink { 216 | pub prev: Option, 217 | pub next: Option, 218 | } 219 | -------------------------------------------------------------------------------- /src/bitmap.rs: -------------------------------------------------------------------------------- 1 | use crate::core::{alloc::Layout, mem}; 2 | 3 | #[cfg(not(feature = "unstable"))] 4 | use crate::core::alloc::LayoutExt; 5 | 6 | pub struct Bitmap { 7 | num_bits: usize, 8 | map: *mut u64, 9 | } 10 | 11 | impl Bitmap { 12 | pub fn map_layout(num_bits: usize) -> Layout { 13 | let num_blocks = Self::num_blocks(num_bits); 14 | 15 | Layout::new::() 16 | .repeat(num_blocks) 17 | .expect("bitmap metadata layout error") 18 | .0 19 | } 20 | 21 | /// Constructs a new bitmap of `len` bits, backed by `map`. 22 | /// 23 | /// A `Layout` describing a suitable region for `map` can be obtained with 24 | /// `PageFrameBitmap::map_layout(num_bits)`. 25 | /// 26 | /// # Safety 27 | /// 28 | /// Behavior is undefined if any of the following conditions are violated: 29 | /// - `map` must be valid for reads and writes for `len * 30 | /// mem::size_of::()` many bytes, and it must be properly aligned. 31 | /// - `map` must point to `(num_bits + 63) / 64` consecutive properly 32 | /// initialized `u64` values. 33 | pub unsafe fn new(num_bits: usize, map: *mut u64) -> Bitmap { 34 | assert!(num_bits > 0); 35 | assert!(!map.is_null()); 36 | assert!(map.align_offset(mem::align_of::()) == 0); 37 | 38 | let num_blocks = Self::num_blocks(num_bits); 39 | 40 | for i in 0..(num_blocks as isize) { 41 | unsafe { map.offset(i).write(0) }; 42 | } 43 | 44 | Bitmap { num_bits, map } 45 | } 46 | 47 | #[inline] 48 | // TODO: make const when Option::unwrap becomes const 49 | pub fn num_blocks(num_bits: usize) -> usize { 50 | (num_bits.checked_add(u64::BITS as usize - 1).unwrap()) 51 | .checked_div(64) 52 | .unwrap() 53 | } 54 | 55 | /// Returns a tuple of the index of the `u64` containing `bit` and a mask 56 | /// which extracts it. 57 | #[inline] 58 | const fn index_and_mask(bit: usize) -> (usize, u64) { 59 | ( 60 | bit / u64::BITS as usize, 61 | 1 << (bit as u64 % u64::BITS as u64), 62 | ) 63 | } 64 | 65 | /// Gets the value of the indexed bit. 66 | #[inline] 67 | pub fn get(&self, index: usize) -> bool { 68 | assert!(index < self.num_bits); 69 | 70 | let (block_idx, mask) = Self::index_and_mask(index); 71 | 72 | let block_idx: isize = block_idx 73 | .try_into() 74 | .expect("get: index overflowed an isize"); 75 | 76 | unsafe { self.map.offset(block_idx).read() & mask != 0 } 77 | } 78 | 79 | /// Sets the value of the indexed bit. 80 | #[inline] 81 | pub fn set(&mut self, index: usize, value: bool) { 82 | assert!(index < self.num_bits); 83 | 84 | let (block_idx, mask) = Self::index_and_mask(index); 85 | 86 | let block_idx: isize = block_idx 87 | .try_into() 88 | .expect("set: index overflowed an isize"); 89 | 90 | unsafe { 91 | let block_ptr = self.map.offset(block_idx); 92 | let block = block_ptr.read(); 93 | block_ptr.write(match value { 94 | true => block | mask, 95 | false => block & !mask, 96 | }); 97 | } 98 | } 99 | 100 | /// Toggles the value of the indexed bit. 101 | #[inline] 102 | pub fn toggle(&mut self, index: usize) { 103 | assert!(index < self.num_bits); 104 | 105 | let (block_idx, mask) = Self::index_and_mask(index); 106 | 107 | let block_idx: isize = block_idx 108 | .try_into() 109 | .expect("toggle: index overflowed an isize"); 110 | 111 | unsafe { 112 | let block_ptr = self.map.offset(block_idx); 113 | let block = block_ptr.read(); 114 | block_ptr.write(block ^ mask); 115 | } 116 | } 117 | 118 | #[cfg(test)] 119 | pub fn iter(&self) -> BitmapIter { 120 | BitmapIter { 121 | idx: 0, 122 | bitmap: self, 123 | } 124 | } 125 | } 126 | 127 | #[cfg(test)] 128 | pub struct BitmapIter<'a> { 129 | idx: usize, 130 | bitmap: &'a Bitmap, 131 | } 132 | 133 | #[cfg(test)] 134 | impl<'a> Iterator for BitmapIter<'a> { 135 | type Item = bool; 136 | 137 | fn next(&mut self) -> Option { 138 | if self.idx >= self.bitmap.num_bits { 139 | return None; 140 | } 141 | 142 | let bit = self.bitmap.get(self.idx); 143 | self.idx += 1; 144 | 145 | Some(bit) 146 | } 147 | } 148 | 149 | #[cfg(test)] 150 | mod tests { 151 | extern crate std; 152 | 153 | use core::mem::ManuallyDrop; 154 | use std::prelude::rust_2021::*; 155 | 156 | use super::*; 157 | 158 | struct VecBitmap { 159 | bitmap: ManuallyDrop, 160 | len: usize, 161 | cap: usize, 162 | } 163 | 164 | impl VecBitmap { 165 | fn new(num_bits: usize) -> VecBitmap { 166 | let num_blocks = Bitmap::num_blocks(num_bits); 167 | 168 | let v = std::vec![0; num_blocks]; 169 | 170 | // TODO: use Vec::into_raw_parts when stable 171 | let mut v = ManuallyDrop::new(v); 172 | let map = v.as_mut_ptr(); 173 | let len = v.len(); 174 | let cap = v.capacity(); 175 | 176 | VecBitmap { 177 | bitmap: ManuallyDrop::new(unsafe { Bitmap::new(num_bits, map) }), 178 | len, 179 | cap, 180 | } 181 | } 182 | } 183 | 184 | impl Drop for VecBitmap { 185 | fn drop(&mut self) { 186 | unsafe { 187 | let Bitmap { map, .. } = ManuallyDrop::take(&mut self.bitmap); 188 | 189 | // Reconstitute the original Vec. 190 | let v = Vec::from_raw_parts(map, self.len, self.cap); 191 | 192 | // Explicit for clarity. 193 | drop(v); 194 | } 195 | } 196 | } 197 | 198 | #[test] 199 | fn init_many() { 200 | for num_bits in 1..=256 { 201 | let _ = VecBitmap::new(num_bits); 202 | } 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /src/buddy.rs: -------------------------------------------------------------------------------- 1 | //! Binary-buddy allocation. 2 | //! 3 | //! The buddy algorithm divides the managed region into a fixed number of 4 | //! equal, power-of-two-sized blocks. Each block can be recursively split in 5 | //! half a fixed number of times in order to provide finer-grained allocations. 6 | //! Buddy allocators excel in cases where most allocations have a power-of-two 7 | //! size. 8 | //! 9 | //! ## Characteristics 10 | //! 11 | //! #### Time complexity 12 | //! 13 | //! | Operation | Best-case | Worst-case | 14 | //! |--------------------------|-----------|----------------------------| 15 | //! | Allocate (size <= align) | O(1) | O(log2_levels_) | 16 | //! | Deallocate | O(1) | O(log2_levels_) | 17 | //! 18 | //! #### Fragmentation 19 | //! 20 | //! Buddy allocators exhibit limited external fragmentation, but suffer up to 21 | //! 50% internal fragmentation because all allocatable blocks have a 22 | //! power-of-two size. 23 | 24 | #![cfg(any(feature = "sptr", feature = "unstable"))] 25 | 26 | use core::{ 27 | iter::{self, Peekable}, 28 | mem::ManuallyDrop, 29 | ops::Range, 30 | }; 31 | 32 | use crate::core::{ 33 | alloc::{AllocError, Layout}, 34 | cmp, fmt, 35 | mem::{self, MaybeUninit}, 36 | num::NonZeroUsize, 37 | ptr::NonNull, 38 | }; 39 | 40 | #[cfg(feature = "unstable")] 41 | use crate::core::alloc::Allocator; 42 | 43 | #[cfg(not(feature = "unstable"))] 44 | use crate::core::{ 45 | alloc::LayoutExt, 46 | num::UsizeExt, 47 | ptr::{NonNullStrict, Strict}, 48 | }; 49 | 50 | #[cfg(all(any(feature = "alloc", test), feature = "unstable"))] 51 | use alloc::alloc::Global; 52 | 53 | use crate::{bitmap::Bitmap, AllocInitError, BackingAllocator, BasePtr, DoubleBlockLink, Raw}; 54 | 55 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 56 | use crate::Global; 57 | 58 | struct BuddyLevel { 59 | block_size: usize, 60 | block_pow: u32, 61 | free_list: Option, 62 | buddies: Bitmap, 63 | splits: Option, 64 | } 65 | 66 | impl BuddyLevel { 67 | /// Retrieves the index of the block which starts `block_ofs` bytes from the 68 | /// base. 69 | /// 70 | /// `block_ofs` must be a multiple of `self.block_size`. 71 | #[inline] 72 | fn index_of(&self, block_ofs: usize) -> usize { 73 | // Safe unchecked shr: public Buddy ctors guarantee that 74 | // self.block_pow < usize::BITS 75 | block_ofs >> self.block_pow as usize 76 | } 77 | 78 | /// Retrieves the index of the buddy bit for the block which starts 79 | /// `block_ofs` bytes from the base. 80 | #[inline] 81 | fn buddy_bit(&self, block_ofs: usize) -> usize { 82 | // Safe unchecked shr: RHS is < usize::BITS 83 | self.index_of(block_ofs) >> 1 84 | } 85 | 86 | /// Retrieves the offset of the buddy of the block which starts 87 | /// `block_ofs` bytes from the base. 88 | #[inline] 89 | fn buddy_ofs(&self, block_ofs: usize) -> usize { 90 | block_ofs ^ self.block_size 91 | } 92 | 93 | #[cfg(test)] 94 | fn enumerate_free_list(&self, base: BasePtr) -> usize { 95 | let mut item = self.free_list; 96 | let mut num = 0; 97 | let mut prev = None; 98 | 99 | while let Some(it) = item { 100 | num += 1; 101 | 102 | let link = unsafe { base.double_link_mut(it) }; 103 | 104 | assert_eq!(link.prev, prev); 105 | 106 | prev = item; 107 | item = link.next; 108 | } 109 | 110 | num 111 | } 112 | 113 | /// Pushes a block onto the free list. 114 | unsafe fn free_list_push(&mut self, base: BasePtr, block: NonZeroUsize) { 115 | assert_eq!(block.get() & (mem::align_of::() - 1), 0); 116 | 117 | let new_head = block; 118 | 119 | if let Some(old_head) = self.free_list { 120 | let old_head_mut = unsafe { base.double_link_mut(old_head) }; 121 | old_head_mut.prev = Some(new_head); 122 | } 123 | 124 | let old_head = self.free_list; 125 | 126 | // If `old_head` exists, it now points back to `new_head`. 127 | 128 | unsafe { 129 | base.init_double_link_at( 130 | new_head, 131 | DoubleBlockLink { 132 | next: old_head, 133 | prev: None, 134 | }, 135 | ) 136 | }; 137 | 138 | // `new_head` now points forward to `old_head`. 139 | // `old_head` now points back to `new_head`. 140 | 141 | self.free_list = Some(new_head); 142 | } 143 | 144 | /// Removes the specified block from the free list. 145 | /// 146 | /// # Safety 147 | /// 148 | /// The caller must uphold the following invariants: 149 | /// - The memory at `block` must be within the provenance of `base` and valid for reads and 150 | /// writes for `size_of::()` bytes. 151 | /// - `block` must be the address of an element of `self.free_list`. 152 | unsafe fn free_list_remove(&mut self, base: BasePtr, block: NonZeroUsize) { 153 | unsafe { 154 | let removed = base.double_link_mut(block); 155 | debug_assert!(removed.next.map_or(true, |next| base.contains_addr(next))); 156 | 157 | match removed.prev { 158 | // Link `prev` forward to `next`. 159 | Some(p) => { 160 | base.double_link_mut(p).next = removed.next; 161 | } 162 | 163 | // If there's no previous block, then `removed` is the head of 164 | // the free list. 165 | None => self.free_list = removed.next, 166 | } 167 | 168 | if let Some(n) = removed.next { 169 | // Link `next` back to `prev`. 170 | base.double_link_mut(n).prev = removed.prev; 171 | } 172 | } 173 | } 174 | 175 | /// Allocates a block from the free list. 176 | unsafe fn allocate(&mut self, base: BasePtr) -> Option { 177 | let block = self.free_list?; 178 | 179 | unsafe { self.free_list_remove(base, block) }; 180 | let ofs = base.offset_to(block); 181 | self.buddies.toggle(self.buddy_bit(ofs)); 182 | 183 | Some(block) 184 | } 185 | 186 | /// Assigns a block to this level. 187 | unsafe fn assign(&mut self, base: BasePtr, block: NonZeroUsize) { 188 | let ofs = base.offset_to(block); 189 | 190 | let buddy_bit = self.buddy_bit(ofs); 191 | assert!(!self.buddies.get(buddy_bit)); 192 | self.buddies.set(buddy_bit, true); 193 | 194 | unsafe { self.free_list_push(base, block) }; 195 | } 196 | 197 | unsafe fn deallocate( 198 | &mut self, 199 | base: BasePtr, 200 | block: NonNull, 201 | coalesce: bool, 202 | ) -> Option> { 203 | let block = block.addr(); 204 | let block_ofs = base.offset_to(block); 205 | let buddy_bit = self.buddy_bit(block_ofs); 206 | self.buddies.toggle(buddy_bit); 207 | 208 | let split_bit = self.index_of(block_ofs); 209 | if let Some(splits) = self.splits.as_mut() { 210 | splits.set(split_bit, false); 211 | } 212 | 213 | if !coalesce || self.buddies.get(buddy_bit) { 214 | unsafe { self.free_list_push(base, block) }; 215 | None 216 | } else { 217 | let buddy_ofs = self.buddy_ofs(block_ofs); 218 | let buddy = 219 | NonZeroUsize::new(base.addr().get().checked_add(buddy_ofs).unwrap()).unwrap(); 220 | 221 | // Remove the buddy block from the free list. 222 | unsafe { self.free_list_remove(base, buddy) }; 223 | 224 | // Return the coalesced block. 225 | let coalesced_ofs = buddy_ofs & !self.block_size; 226 | Some(base.with_offset(coalesced_ofs).unwrap()) 227 | } 228 | } 229 | } 230 | 231 | /// A binary-buddy allocator. 232 | /// 233 | /// For a discussion of the buddy algorithm, see the [module-level documentation]. 234 | /// 235 | /// # Configuration 236 | /// 237 | /// This type has two const parameters: 238 | /// - `BLK_SIZE` is the size of the largest allocations the allocator can make. 239 | /// - `LEVELS` is the number of levels in the allocator. 240 | /// 241 | /// Each constructor also takes one runtime parameter: 242 | /// - `num_blocks` is the number of top-level blocks of `BLK_SIZE` bytes managed by the allocator. 243 | /// 244 | /// These parameters are subject to the following invariants: 245 | /// - `BLK_SIZE` must be a power of two. 246 | /// - `LEVELS` must be nonzero and less than `usize::BITS`. 247 | /// - The minumum block size must be at least `2 * mem::size_of()`; it can be calculated with 248 | /// the formula `BLK_SIZE >> (LEVELS - 1)`. 249 | /// - The total size in bytes of the managed region must be less than `usize::MAX`. 250 | /// 251 | /// Attempting to construct a `Buddy` whose const parameters violate 252 | /// these invariants will result in an error. 253 | /// 254 | /// # Unpopulated initialization 255 | /// 256 | /// A `Buddy` can be initialized for a region of memory without immediately allowing it to access 257 | /// any of that memory using the [`Buddy::new_raw_unpopulated`] constructor. Memory in that region 258 | /// can then be added to the allocator over time. 259 | /// 260 | /// This is particularly useful when using a `Buddy` to allocate discontiguous regions of physical 261 | /// memory. For example, consider a 16 KiB region of physical memory in which the region between 4-8 262 | /// KiB is reserved by the platform for memory-mapped I/O. 263 | /// 264 | /// ```text 265 | /// | RAM | MMIO | RAM | RAM | 266 | /// 0x##0000 0x##1000 0x##2000 0x##3000 0x##4000 267 | /// ``` 268 | /// 269 | /// Initializing an allocator normally for the entire region would result in writes to the MMIO 270 | /// region, with unpredictable consequences. 271 | /// 272 | /// ```no_run 273 | /// // These values result in a minimum block size of 4096. 274 | /// type Buddy = acid_alloc::Buddy<16384, 3, acid_alloc::Raw>; 275 | /// 276 | /// # fn main() { 277 | /// # /* 278 | /// let region = /* ... */; 279 | /// let metadata = /* ... */; 280 | /// # */ 281 | /// # let region: core::ptr::NonNull = unimplemented!(); 282 | /// # let metadata: core::ptr::NonNull = unimplemented!(); 283 | /// 284 | /// // ⚠ Bad Things occur! 285 | /// let buddy = unsafe { Buddy::new_raw(region, metadata, 1).unwrap() }; 286 | /// # } 287 | /// ``` 288 | /// 289 | /// However, creating the allocator using `new_raw_unpopulated()` does not issue any writes. 290 | /// Instead, valid subregions can be added to the allocator explicitly. 291 | /// 292 | /// ```no_run 293 | /// # use core::{alloc::Layout, num::NonZeroUsize}; 294 | /// # const BLK_SIZE: usize = 16384; 295 | /// # const LEVELS: usize = 3; 296 | /// # type Buddy = acid_alloc::Buddy; 297 | /// # fn main() { 298 | /// # let region: core::ptr::NonNull = unimplemented!(); 299 | /// # let metadata: core::ptr::NonNull = unimplemented!(); 300 | /// // Define the usable regions of memory. 301 | /// let low_start = NonZeroUsize::new(region.as_ptr() as usize).unwrap(); 302 | /// let low_end = NonZeroUsize::new(low_start.get() + 4096).unwrap(); 303 | /// let high_start = NonZeroUsize::new(low_start.get() + 8192).unwrap(); 304 | /// let high_end = NonZeroUsize::new(low_start.get() + 16384).unwrap(); 305 | /// 306 | /// unsafe { 307 | /// // No accesses to `region` are made during this call. 308 | /// let mut buddy = Buddy::new_raw_unpopulated(region, metadata, 1).unwrap(); 309 | /// 310 | /// // The allocator has no memory yet, so it can't make allocations. 311 | /// assert!(buddy.allocate(Layout::new::<[u8; 4096]>()).is_err()); 312 | /// 313 | /// // Add the valid regions to the allocator. 314 | /// buddy.add_region(low_start..low_end); 315 | /// buddy.add_region(high_start..high_end); 316 | /// 317 | /// // Now allocations succeed. 318 | /// let block = buddy.allocate(Layout::new::<[u8; 4096]>()).unwrap(); 319 | /// } 320 | /// 321 | /// # } 322 | /// ``` 323 | /// 324 | /// # Example 325 | /// 326 | /// ``` 327 | /// # #![cfg_attr(feature = "unstable", feature(allocator_api))] 328 | /// # #[cfg(feature = "alloc")] 329 | /// use core::{alloc::Layout, mem::MaybeUninit, ptr::NonNull}; 330 | /// 331 | /// # #[cfg(feature = "alloc")] 332 | /// use acid_alloc::{Buddy, Global}; 333 | /// 334 | /// // Minimum block size == BLK_SIZE >> (LEVELS - 1) 335 | /// // 16 == 4096 >> ( 9 - 1) 336 | /// # #[cfg(feature = "alloc")] 337 | /// type CustomBuddy = Buddy<4096, 9, Global>; 338 | /// 339 | /// # #[cfg(feature = "alloc")] 340 | /// # fn main() { 341 | /// // Create a new `Buddy` with four 4KiB blocks, backed by the global allocator. 342 | /// let mut buddy = CustomBuddy::try_new(4).expect("buddy initialization failed."); 343 | /// 344 | /// // Allocate space for an array of `u64`s. 345 | /// let len = 8; 346 | /// let layout = Layout::array::(len).expect("layout overflowed"); 347 | /// let mut buf: NonNull<[u8]> = buddy.allocate(layout).expect("allocation failed"); 348 | /// let mut uninit: NonNull<[MaybeUninit; 8]> = buf.cast(); 349 | /// 350 | /// // Initialize the array. 351 | /// unsafe { 352 | /// let mut arr: &mut [MaybeUninit; 8] = uninit.as_mut(); 353 | /// for (i, elem) in arr.iter_mut().enumerate() { 354 | /// elem.as_mut_ptr().write(i as u64); 355 | /// } 356 | /// } 357 | /// 358 | /// // Deallocate the array. 359 | /// unsafe { buddy.deallocate(buf.cast()) }; 360 | /// # } 361 | /// 362 | /// # #[cfg(not(feature = "alloc"))] 363 | /// # fn main() {} 364 | /// ``` 365 | /// 366 | /// [module-level documentation]: crate::buddy 367 | pub struct Buddy { 368 | raw: RawBuddy, 369 | backing_allocator: A, 370 | } 371 | 372 | unsafe impl Send 373 | for Buddy 374 | { 375 | } 376 | unsafe impl Sync 377 | for Buddy 378 | { 379 | } 380 | 381 | impl Buddy { 382 | /// Constructs a new `Buddy` from raw pointers. 383 | /// 384 | /// # Safety 385 | /// 386 | /// The caller must uphold the following invariants: 387 | /// - `region` must be a pointer to a region that satisfies the [`Layout`] returned by 388 | /// [`Self::region_layout(num_blocks)`], and it must be valid for reads and writes for the 389 | /// entire size indicated by that `Layout`. 390 | /// - `metadata` must be a pointer to a region that satisfies the [`Layout`] returned by 391 | /// [`Self::metadata_layout(num_blocks)`], and it must be valid for reads and writes for the 392 | /// entire size indicated by that `Layout`. 393 | /// - The regions pointed to by `region` and `metadata` must not overlap. 394 | /// - No references to the memory at `region` or `metadata` may exist when this function is 395 | /// called. 396 | /// - As long as the returned `Buddy` exists: 397 | /// - No accesses may be made to the memory at `region` except by way of methods on the 398 | /// returned `Buddy`. 399 | /// - No accesses may be made to the memory at `metadata`. 400 | /// 401 | /// # Errors 402 | /// 403 | /// This constructor returns an error if the allocator configuration is 404 | /// invalid. 405 | /// 406 | /// [`Self::region_layout(num_blocks)`]: Self::region_layout 407 | /// [`Self::metadata_layout(num_blocks)`]: Self::metadata_layout 408 | /// [`Layout`]: core::alloc::Layout 409 | pub unsafe fn new_raw( 410 | metadata: NonNull, 411 | region: NonNull, 412 | num_blocks: usize, 413 | ) -> Result, AllocInitError> { 414 | unsafe { 415 | RawBuddy::try_new_with_address_gaps(metadata, region, num_blocks, iter::empty()) 416 | .map(|p| p.with_backing_allocator(Raw)) 417 | } 418 | } 419 | 420 | /// Constructs a new `Buddy` from raw pointers without populating it. 421 | /// 422 | /// The returned `Buddy` will be unable to allocate until address ranges are made available to 423 | /// it using [`Buddy::add_region`]. See [unpopulated initialization] for details. 424 | /// 425 | /// [unpopulated initialization]: crate::buddy::Buddy#unpopulated-initialization 426 | /// 427 | /// # Safety 428 | /// 429 | /// The caller must uphold the following invariants: 430 | /// - `region` must be a pointer to a region that satisfies the [`Layout`] returned by 431 | /// [`Self::region_layout(num_blocks)`]. 432 | /// - `metadata` must be a pointer to a region that satisfies the [`Layout`] returned by 433 | /// [`Self::metadata_layout(num_blocks)`], and it must be valid for reads and writes for the 434 | /// entire size indicated by that `Layout`. 435 | /// - The regions pointed to by `region` and `metadata` must not overlap. 436 | /// - No references to the memory at `metadata` may exist when this function is called. 437 | /// - As long as the returned `Buddy` exists, no accesses may be made to the memory at 438 | /// `metadata`. 439 | /// 440 | /// [`Self::region_layout(num_blocks)`]: Self::region_layout 441 | /// [`Self::metadata_layout(num_blocks)`]: Self::metadata_layout 442 | /// [`Layout`]: core::alloc::Layout 443 | pub unsafe fn new_raw_unpopulated( 444 | metadata: NonNull, 445 | region: NonNull, 446 | num_blocks: usize, 447 | ) -> Result, AllocInitError> { 448 | unsafe { 449 | RawBuddy::try_new_unpopulated(metadata, region, num_blocks) 450 | .map(|p| p.with_backing_allocator(Raw)) 451 | } 452 | } 453 | 454 | /// Populates a region not already managed by this allocator. 455 | /// 456 | /// # Panics 457 | /// 458 | /// This method panics if any of the following are true: 459 | /// - Either bound of `addr_range` falls outside the allocator region. 460 | /// - Either bound of `addr_range` is not aligned to the value returned by 461 | /// [`Self::min_block_size()`][0]. 462 | /// 463 | /// [0]: Buddy::min_block_size 464 | /// 465 | /// # Safety 466 | /// 467 | /// The caller must uphold the following invariants: 468 | /// - `range` must not overlap any range of addresses already managed by this allocator. 469 | /// - No references to the memory indicated by `addr_range` may exist when this function is 470 | /// called. 471 | /// - As long as `self` exists, no accesses may be made to the memory indicated by `addr_range` 472 | /// except by way of methods on `self`. 473 | pub unsafe fn add_region(&mut self, addr_range: Range) { 474 | unsafe { self.raw.add_region(addr_range) }; 475 | } 476 | 477 | /// Decomposes the allocator into its raw parts. 478 | /// 479 | /// # Safety 480 | /// 481 | /// This function must only be called if no references to outstanding 482 | /// allocations exist . 483 | pub unsafe fn into_raw_parts(self) -> RawBuddyParts { 484 | let metadata = self.raw.metadata; 485 | let metadata_layout = Self::metadata_layout(self.raw.num_blocks.get()).unwrap(); 486 | let region = self.raw.base.ptr(); 487 | let region_layout = Self::region_layout(self.raw.num_blocks.get()).unwrap(); 488 | 489 | let _ = ManuallyDrop::new(self); 490 | 491 | RawBuddyParts { 492 | metadata, 493 | metadata_layout, 494 | region, 495 | region_layout, 496 | } 497 | } 498 | } 499 | 500 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 501 | impl Buddy { 502 | /// Attempts to construct a new `Buddy` backed by the global allocator. 503 | /// 504 | /// # Errors 505 | /// 506 | /// If allocation fails, returns `Err(AllocError)`. 507 | pub fn try_new(num_blocks: usize) -> Result, AllocInitError> { 508 | Self::try_new_with_offset_gaps(num_blocks, iter::empty()) 509 | } 510 | 511 | /// Attempts to construct a new `Buddy` backed by the global allocator, with 512 | /// gaps specified by offset ranges. 513 | /// 514 | /// # Errors 515 | /// 516 | /// If allocation fails, returns `Err(AllocError)`. 517 | #[doc(hidden)] 518 | pub fn try_new_with_offset_gaps( 519 | num_blocks: usize, 520 | gaps: I, 521 | ) -> Result, AllocInitError> 522 | where 523 | I: IntoIterator>, 524 | { 525 | let region_layout = Self::region_layout(num_blocks)?; 526 | let metadata_layout = Self::metadata_layout(num_blocks)?; 527 | 528 | let num_blocks = NonZeroUsize::new(num_blocks).ok_or(AllocInitError::InvalidConfig)?; 529 | 530 | unsafe { 531 | let region_ptr = { 532 | let raw = alloc::alloc::alloc(region_layout); 533 | NonNull::new(raw).ok_or(AllocInitError::AllocFailed(region_layout))? 534 | }; 535 | 536 | let metadata_ptr = { 537 | let raw = alloc::alloc::alloc(metadata_layout); 538 | NonNull::new(raw).ok_or_else(|| { 539 | alloc::alloc::dealloc(region_ptr.as_ptr(), region_layout); 540 | AllocInitError::AllocFailed(metadata_layout) 541 | })? 542 | }; 543 | 544 | match RawBuddy::::try_new_with_offset_gaps( 545 | metadata_ptr, 546 | region_ptr, 547 | num_blocks.get(), 548 | gaps, 549 | ) { 550 | Ok(b) => Ok(b.with_backing_allocator(Global)), 551 | Err(e) => { 552 | alloc::alloc::dealloc(region_ptr.as_ptr(), region_layout); 553 | alloc::alloc::dealloc(metadata_ptr.as_ptr(), metadata_layout); 554 | 555 | Err(e) 556 | } 557 | } 558 | } 559 | } 560 | } 561 | 562 | #[cfg(all(any(feature = "alloc", test), feature = "unstable"))] 563 | impl Buddy { 564 | /// Attempts to construct a new `Buddy` backed by the global allocator. 565 | /// 566 | /// # Errors 567 | /// 568 | /// If allocation fails, or if the allocator configuration is invalid, 569 | /// returns `Err`. 570 | #[cfg_attr(docs_rs, doc(cfg(feature = "alloc")))] 571 | pub fn try_new(num_blocks: usize) -> Result, AllocInitError> { 572 | Buddy::::try_new_in(num_blocks, Global) 573 | } 574 | 575 | /// Attempts to construct a new `Buddy` backed by the global allocator, with 576 | /// gaps specified by offset ranges. 577 | /// 578 | /// # Errors 579 | /// 580 | /// If allocation fails, or if the allocator configuration is invalid, 581 | /// returns `Err`. 582 | #[doc(hidden)] 583 | pub fn try_new_with_offset_gaps( 584 | num_blocks: usize, 585 | gaps: I, 586 | ) -> Result, AllocInitError> 587 | where 588 | I: IntoIterator>, 589 | { 590 | Buddy::::try_new_with_offset_gaps_in(num_blocks, gaps, Global) 591 | } 592 | } 593 | 594 | #[cfg(feature = "unstable")] 595 | impl Buddy { 596 | /// Attempts to construct a new `Buddy` backed by `allocator`. 597 | /// 598 | /// # Errors 599 | /// 600 | /// If allocation fails, returns `Err(AllocError)`. 601 | #[cfg_attr(docs_rs, doc(cfg(feature = "unstable")))] 602 | pub fn try_new_in( 603 | num_blocks: usize, 604 | allocator: A, 605 | ) -> Result, AllocInitError> { 606 | Self::try_new_with_offset_gaps_in(num_blocks, iter::empty(), allocator) 607 | } 608 | 609 | /// Attempts to construct a new `Buddy` backed by `allocator`, with gaps 610 | /// specified by offset ranges. 611 | /// 612 | /// # Errors 613 | /// 614 | /// If allocation fails, returns `Err(AllocError)`. 615 | #[doc(hidden)] 616 | #[cfg_attr(docs_rs, doc(cfg(feature = "unstable")))] 617 | pub fn try_new_with_offset_gaps_in( 618 | num_blocks: usize, 619 | gaps: I, 620 | allocator: A, 621 | ) -> Result, AllocInitError> 622 | where 623 | I: IntoIterator>, 624 | { 625 | let region_layout = Self::region_layout(num_blocks)?; 626 | let metadata_layout = Self::metadata_layout(num_blocks)?; 627 | 628 | let num_blocks = NonZeroUsize::new(num_blocks).ok_or(AllocInitError::InvalidConfig)?; 629 | 630 | let region = allocator 631 | .allocate(region_layout) 632 | .map_err(|_| AllocInitError::AllocFailed(region_layout))?; 633 | 634 | let metadata = match allocator.allocate(metadata_layout) { 635 | Ok(m) => m, 636 | Err(_) => unsafe { 637 | // SAFETY: region was received as NonNull via Allocator::allocate 638 | let region_ptr = NonNull::new_unchecked(region.as_ptr() as *mut u8); 639 | allocator.deallocate(region_ptr, region_layout); 640 | return Err(AllocInitError::AllocFailed(metadata_layout)); 641 | }, 642 | }; 643 | 644 | unsafe { 645 | // SAFETY: both pointers were received as NonNull via Allocator::allocate 646 | let region_ptr = NonNull::new_unchecked(region.as_ptr() as *mut u8); 647 | let metadata_ptr = NonNull::new_unchecked(metadata.as_ptr() as *mut u8); 648 | 649 | RawBuddy::::try_new_with_offset_gaps( 650 | metadata_ptr, 651 | region_ptr, 652 | num_blocks.get(), 653 | gaps, 654 | ) 655 | .map(|p| p.with_backing_allocator(allocator)) 656 | } 657 | } 658 | } 659 | 660 | impl Buddy { 661 | /// Returns the smallest block size that can be allocated by an allocator of this type. 662 | pub const fn min_block_size() -> Result { 663 | if LEVELS == 0 || !BLK_SIZE.is_power_of_two() || LEVELS >= usize::BITS as usize { 664 | return Err(AllocInitError::InvalidConfig); 665 | } 666 | 667 | let min_block_size = BLK_SIZE >> (LEVELS - 1); 668 | 669 | if min_block_size < mem::size_of::() { 670 | return Err(AllocInitError::InvalidConfig); 671 | } 672 | 673 | Ok(min_block_size) 674 | } 675 | 676 | /// Returns the layout requirements of the region managed by an allocator of 677 | /// this type. 678 | pub fn region_layout(num_blocks: usize) -> Result { 679 | let num_blocks = NonZeroUsize::new(num_blocks).ok_or(AllocInitError::InvalidConfig)?; 680 | Self::region_layout_impl(num_blocks) 681 | } 682 | 683 | fn region_layout_impl(num_blocks: NonZeroUsize) -> Result { 684 | let min_block_size = Self::min_block_size()?; 685 | let levels: u32 = LEVELS 686 | .try_into() 687 | .map_err(|_| AllocInitError::InvalidConfig)?; 688 | 689 | let size = 2usize 690 | .pow(levels - 1) 691 | .checked_mul(min_block_size) 692 | .ok_or(AllocInitError::InvalidConfig)? 693 | .checked_mul(num_blocks.get()) 694 | .ok_or(AllocInitError::InvalidConfig)?; 695 | let align = BLK_SIZE; 696 | 697 | Layout::from_size_align(size, align).map_err(|_| AllocInitError::InvalidConfig) 698 | } 699 | 700 | /// Returns the layout requirements of the metadata region for an allocator 701 | /// of this type. 702 | pub fn metadata_layout(num_blocks: usize) -> Result { 703 | let num_blocks = NonZeroUsize::new(num_blocks).ok_or(AllocInitError::InvalidConfig)?; 704 | Self::metadata_layout_impl(num_blocks) 705 | } 706 | 707 | /// Returns the layout requirements of the metadata region for an allocator 708 | /// of this type. 709 | fn metadata_layout_impl(num_blocks: NonZeroUsize) -> Result { 710 | const fn sum_of_powers_of_2(max: u32) -> usize { 711 | 2_usize.pow(max + 1) - 1 712 | } 713 | 714 | let levels: u32 = LEVELS.try_into().unwrap(); 715 | 716 | // Each level needs one buddy bit per pair of blocks. 717 | let num_pairs = (num_blocks.get() + 1) / 2; 718 | 719 | // This is the layout required for the buddy bitmap of level 0. 720 | let buddy_l0_layout = Bitmap::map_layout(num_pairs); 721 | 722 | // Each subsequent level requires at most twice as much space as the 723 | // level 0 bitmap. It may require less if the number of level 0 blocks 724 | // is not a multiple of the bitmap block size, but for simplicity each 725 | // level is given exactly twice the space of the previous level. 726 | let (buddy_layout, _) = buddy_l0_layout 727 | .repeat(sum_of_powers_of_2(levels - 1)) 728 | .unwrap(); 729 | 730 | if LEVELS == 1 { 731 | // There's only one level, so no split bitmap is required. 732 | return Ok(buddy_layout); 733 | } 734 | 735 | // Each level except level (LEVELS - 1) needs one split bit per block. 736 | let split_l0_layout = Bitmap::map_layout(num_blocks.get()); 737 | 738 | // Let K equal the size of a split bitmap for level 0. If LEVELS is: 739 | // - 2, then 1 split bitmap is needed of size (2 - 1)K = K. 740 | // - 3, then 2 split bitmaps are needed of total size (3 - 1)K + (2 - 1)K = 3K. 741 | // - ... 742 | // - N, then 2 ^ (N - 2) split bitmaps are needed of total size: 743 | // 744 | // (N - 1)K + ((N - 1) - 1)K + ... + (2 - 1)K 745 | // = 2 * (2 ^ (N - 1) - 1) * K 746 | // = (sum from x = 1 to (LEVELS - 1) of 2^x) * K 747 | let (split_layout, _) = split_l0_layout 748 | .repeat(sum_of_powers_of_2(levels - 1)) 749 | .map_err(|_| AllocInitError::InvalidConfig)?; 750 | let (full_layout, _) = buddy_layout 751 | .extend(split_layout) 752 | .map_err(|_| AllocInitError::InvalidConfig)?; 753 | 754 | Ok(full_layout) 755 | } 756 | 757 | #[cfg(test)] 758 | #[inline] 759 | fn enumerate_free_list(&self, level: usize) -> usize { 760 | self.raw.enumerate_free_list(level) 761 | } 762 | 763 | /// Attempts to allocate a block of memory. 764 | /// 765 | /// On success, returns a [`NonNull<[u8]>`][0] which satisfies `layout`. 766 | /// 767 | /// The contents of the block are uninitialized. 768 | /// 769 | /// # Errors 770 | /// 771 | /// Returns `Err` if a suitable block could not be allocated. 772 | /// 773 | /// [0]: core::ptr::NonNull 774 | pub fn allocate(&mut self, layout: Layout) -> Result, AllocError> { 775 | self.raw.allocate(layout) 776 | } 777 | 778 | /// Deallocates the memory referenced by `ptr`. 779 | /// 780 | /// # Safety 781 | /// 782 | /// `ptr` must denote a block of memory [*currently allocated*] via this allocator. 783 | /// 784 | /// [*currently allocated*]: https://doc.rust-lang.org/nightly/alloc/alloc/trait.Allocator.html#currently-allocated-memory 785 | pub unsafe fn deallocate(&mut self, ptr: NonNull) { 786 | unsafe { self.raw.deallocate(ptr) } 787 | } 788 | } 789 | 790 | impl fmt::Debug 791 | for Buddy 792 | { 793 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 794 | f.debug_struct("Buddy") 795 | .field("metadata", &self.raw.metadata) 796 | .field("base", &self.raw.base.ptr()) 797 | .field("num_blocks", &self.raw.num_blocks) 798 | .finish() 799 | } 800 | } 801 | 802 | impl Drop 803 | for Buddy 804 | { 805 | fn drop(&mut self) { 806 | let region = self.raw.base.ptr(); 807 | let metadata = self.raw.metadata; 808 | let num_blocks = self.raw.num_blocks; 809 | 810 | let region_layout = Self::region_layout_impl(num_blocks).unwrap(); 811 | let metadata_layout = Self::metadata_layout_impl(num_blocks).unwrap(); 812 | 813 | unsafe { 814 | self.backing_allocator.deallocate(region, region_layout); 815 | self.backing_allocator.deallocate(metadata, metadata_layout); 816 | } 817 | } 818 | } 819 | 820 | /// The raw parts of a `Buddy`. 821 | #[derive(Debug)] 822 | pub struct RawBuddyParts { 823 | /// A pointer to the metadata region. 824 | pub metadata: NonNull, 825 | /// The layout of the metadata region. 826 | pub metadata_layout: Layout, 827 | /// A pointer to the managed region. 828 | pub region: NonNull, 829 | /// The layout of the managed region. 830 | pub region_layout: Layout, 831 | } 832 | 833 | /// Like a `Buddy`, but without a `Drop` impl or an associated 834 | /// allocator. 835 | /// 836 | /// This assists in tacking on the allocator type parameter because this struct can be 837 | /// moved out of, while `Buddy` itself cannot. 838 | struct RawBuddy { 839 | base: BasePtr, 840 | metadata: NonNull, 841 | num_blocks: NonZeroUsize, 842 | levels: [BuddyLevel; LEVELS], 843 | } 844 | 845 | impl RawBuddy { 846 | fn with_backing_allocator( 847 | self, 848 | backing_allocator: A, 849 | ) -> Buddy { 850 | Buddy { 851 | raw: self, 852 | backing_allocator, 853 | } 854 | } 855 | 856 | #[cfg(any(feature = "unstable", feature = "alloc", test))] 857 | unsafe fn try_new_with_offset_gaps( 858 | metadata: NonNull, 859 | base: NonNull, 860 | num_blocks: usize, 861 | gaps: I, 862 | ) -> Result, AllocInitError> 863 | where 864 | I: IntoIterator>, 865 | { 866 | let region_addr = base.addr().get(); 867 | 868 | let gaps = gaps.into_iter().map(|ofs| { 869 | let start = NonZeroUsize::new(region_addr.checked_add(ofs.start).unwrap()).unwrap(); 870 | let end = NonZeroUsize::new(region_addr.checked_add(ofs.end).unwrap()).unwrap(); 871 | start..end 872 | }); 873 | 874 | unsafe { Self::try_new_with_address_gaps(metadata, base, num_blocks, gaps) } 875 | } 876 | 877 | /// Construct a new `RawBuddy` from raw pointers, with internal gaps. 878 | /// 879 | /// The address ranges in `gaps` are guaranteed not to be read from or 880 | /// written to. 881 | unsafe fn try_new_with_address_gaps( 882 | metadata: NonNull, 883 | base: NonNull, 884 | num_blocks: usize, 885 | gaps: I, 886 | ) -> Result, AllocInitError> 887 | where 888 | I: IntoIterator>, 889 | { 890 | let mut buddy = unsafe { Self::try_new_unpopulated(metadata, base, num_blocks) }?; 891 | 892 | let min_block_size = Buddy::::min_block_size().unwrap(); 893 | let gaps = AlignedRanges::new(gaps.into_iter(), min_block_size); 894 | 895 | let mut start = buddy.base.addr(); 896 | for gap in gaps { 897 | let end = gap.start; 898 | unsafe { buddy.add_region(start..end) }; 899 | start = gap.end; 900 | } 901 | let end = buddy.base.limit(); 902 | 903 | unsafe { buddy.add_region(start..end) }; 904 | 905 | Ok(buddy) 906 | } 907 | 908 | unsafe fn add_region(&mut self, addr_range: Range) { 909 | // Make sure the region can be managed by this allocator. 910 | assert!(self.base.addr() <= addr_range.start); 911 | assert!(addr_range.end <= self.base.limit()); 912 | 913 | let min_block_size = Self::min_block_size().unwrap(); 914 | 915 | // Require the range bounds to be aligned on block boundaries. 916 | assert_eq!(addr_range.start.get() % min_block_size, 0); 917 | assert_eq!(addr_range.end.get() % min_block_size, 0); 918 | 919 | let min_pow = min_block_size.trailing_zeros(); 920 | let max_pow = BLK_SIZE.trailing_zeros(); 921 | 922 | let mut curs = addr_range.start; 923 | while curs < addr_range.end { 924 | let curs_pow = curs.trailing_zeros().min(max_pow); 925 | // Cursor should never go out of alignment with min block size. 926 | assert!(curs_pow >= min_pow); 927 | 928 | let curs_align = 1 << curs_pow; 929 | let curs_ofs = curs.get() - self.base.addr().get(); 930 | 931 | // Safe unchecked sub: `curs < range.end` 932 | let remaining = addr_range.end.get() - curs.get(); 933 | // Safe unchecked sub and shift: `remaining` is nonzero, so 934 | // `remaining.leading_zeros() + 1 <= usize::BITS` 935 | let remaining_po2 = 1 << (usize::BITS - (remaining.leading_zeros() + 1)); 936 | 937 | // Necessarily <= BLK_SIZE, as curs_pow is in the range 938 | // [min_pow, max_pow]. 939 | let block_size: usize = cmp::min(remaining_po2, curs_align); 940 | 941 | // Split all blocks that begin at this cursor position but are larger 942 | // than `block_size`. 943 | // 944 | // Note that the blocks may already be split, as sub-blocks may 945 | // already have been populated. 946 | let init_level = (max_pow - curs_pow) as usize; 947 | let target_level = (max_pow - block_size.trailing_zeros()) as usize; 948 | for lv in self.levels.iter_mut().take(target_level).skip(init_level) { 949 | // Mark the block as split. 950 | let split_bit = lv.index_of(curs_ofs); 951 | if let Some(s) = lv.splits.as_mut() { 952 | s.set(split_bit, true); 953 | } 954 | } 955 | 956 | unsafe { self.deallocate(self.base.with_addr(curs)) }; 957 | 958 | // TODO: call curs.checked_add() directly when nonzero_ops is stable 959 | curs = curs 960 | .get() 961 | .checked_add(block_size) 962 | .and_then(NonZeroUsize::new) 963 | .unwrap(); 964 | } 965 | } 966 | 967 | pub unsafe fn try_new_unpopulated( 968 | metadata: NonNull, 969 | base: NonNull, 970 | num_blocks: usize, 971 | ) -> Result, AllocInitError> { 972 | let num_blocks = NonZeroUsize::new(num_blocks).ok_or(AllocInitError::InvalidConfig)?; 973 | let min_block_size = Buddy::::min_block_size()?; 974 | let meta_layout = Buddy::::metadata_layout_impl(num_blocks)?; 975 | let region_layout = Buddy::::region_layout_impl(num_blocks)?; 976 | 977 | // Ensure pointer calculations will not overflow. 978 | // TODO: use checked_add directly on NonNull when nonzero_ops is stable. 979 | let meta_end = metadata 980 | .addr() 981 | .get() 982 | .checked_add(meta_layout.size()) 983 | .ok_or(AllocInitError::InvalidLocation)?; 984 | base.addr() 985 | .get() 986 | .checked_add(region_layout.size()) 987 | .ok_or(AllocInitError::InvalidLocation)?; 988 | 989 | // TODO: use MaybeUninit::uninit_array when not feature gated 990 | let mut levels: [MaybeUninit; LEVELS] = unsafe { 991 | // SAFETY: An uninitialized `[MaybeUninit<_>; _]` is valid. 992 | MaybeUninit::<[MaybeUninit; LEVELS]>::uninit().assume_init() 993 | }; 994 | 995 | let mut meta_curs = metadata.as_ptr(); 996 | 997 | // Initialize the per-level metadata. 998 | for (li, level) in levels.iter_mut().enumerate() { 999 | let block_size = 2_usize.pow((LEVELS - li) as u32 - 1) * min_block_size; 1000 | let block_factor = 2_usize.pow(li as u32); 1001 | let num_blocks = block_factor * num_blocks.get(); 1002 | let num_pairs = (num_blocks + 1) / 2; 1003 | 1004 | let buddy_size = Bitmap::map_layout(num_pairs).size(); 1005 | let buddy_bitmap = unsafe { Bitmap::new(num_pairs, meta_curs as *mut u64) }; 1006 | 1007 | meta_curs = unsafe { 1008 | meta_curs.offset( 1009 | buddy_size 1010 | .try_into() 1011 | .expect("buddy bitmap layout size overflows isize"), 1012 | ) 1013 | }; 1014 | 1015 | let split_bitmap = if li < LEVELS - 1 { 1016 | let split_size = Bitmap::map_layout(num_blocks).size(); 1017 | let split_bitmap = unsafe { Bitmap::new(num_blocks, meta_curs as *mut u64) }; 1018 | 1019 | meta_curs = unsafe { 1020 | meta_curs.offset( 1021 | split_size 1022 | .try_into() 1023 | .expect("split bitmap layout size overflows isize"), 1024 | ) 1025 | }; 1026 | 1027 | Some(split_bitmap) 1028 | } else { 1029 | None 1030 | }; 1031 | 1032 | unsafe { 1033 | level.as_mut_ptr().write(BuddyLevel { 1034 | block_size, 1035 | block_pow: block_size.trailing_zeros(), 1036 | free_list: None, 1037 | buddies: buddy_bitmap, 1038 | splits: split_bitmap, 1039 | }); 1040 | } 1041 | } 1042 | 1043 | if meta_curs.addr() > meta_end { 1044 | panic!( 1045 | "metadata cursor overran layout size: curs = {meta_end}, layout = {meta_layout:?}" 1046 | ); 1047 | } 1048 | 1049 | // Convert to an initialized array. 1050 | let levels = unsafe { 1051 | // TODO: When `MaybeUninit::array_assume_init()` is stable, use that 1052 | // instead. 1053 | // 1054 | // SAFETY: 1055 | // - `levels` is fully initialized. 1056 | // - `MaybeUninit` and `T` have the same layout. 1057 | // - `MaybeUninit` won't drop `T`, so no double-frees. 1058 | (&levels as *const _ as *const [BuddyLevel; LEVELS]).read() 1059 | }; 1060 | 1061 | let base = BasePtr::new(base, region_layout.size()); 1062 | 1063 | Ok(RawBuddy { 1064 | base, 1065 | metadata, 1066 | num_blocks, 1067 | levels, 1068 | }) 1069 | } 1070 | 1071 | const fn min_block_size() -> Result { 1072 | if LEVELS == 0 || !BLK_SIZE.is_power_of_two() || LEVELS >= usize::BITS as usize { 1073 | return Err(AllocInitError::InvalidConfig); 1074 | } 1075 | 1076 | let min_block_size = BLK_SIZE >> (LEVELS - 1); 1077 | 1078 | if min_block_size < mem::size_of::() { 1079 | return Err(AllocInitError::InvalidConfig); 1080 | } 1081 | 1082 | Ok(min_block_size) 1083 | } 1084 | 1085 | #[cfg(test)] 1086 | #[inline] 1087 | fn enumerate_free_list(&self, level: usize) -> usize { 1088 | self.levels[level].enumerate_free_list(self.base) 1089 | } 1090 | 1091 | fn level_for(&self, size: usize) -> Option { 1092 | fn round_up_pow2(x: usize) -> Option { 1093 | match x { 1094 | 0 => None, 1095 | 1 => Some(1), 1096 | x if x >= (1 << 63) => None, 1097 | _ => Some(2usize.pow((x - 1).ilog2() as u32 + 1)), 1098 | } 1099 | } 1100 | 1101 | let min_block_size = self.levels[LEVELS - 1].block_size; 1102 | let max_block_size = self.levels[0].block_size; 1103 | if size > max_block_size { 1104 | return None; 1105 | } 1106 | 1107 | let alloc_size = cmp::max(round_up_pow2(size).unwrap(), min_block_size); 1108 | let level: usize = (max_block_size.ilog2() - alloc_size.ilog2()) 1109 | .try_into() 1110 | .unwrap(); 1111 | 1112 | Some(level) 1113 | } 1114 | 1115 | fn min_free_level(&self, block_ofs: usize) -> usize { 1116 | let min_block_size = self.levels[LEVELS - 1].block_size; 1117 | let max_block_size = self.levels[0].block_size; 1118 | 1119 | if block_ofs == 0 { 1120 | return 0; 1121 | } 1122 | 1123 | let max_size = 1 << block_ofs.trailing_zeros(); 1124 | if max_size > max_block_size { 1125 | return 0; 1126 | } 1127 | 1128 | assert!(max_size >= min_block_size); 1129 | 1130 | (max_block_size.ilog2() - max_size.ilog2()) 1131 | .try_into() 1132 | .unwrap() 1133 | } 1134 | 1135 | /// Attempts to allocate a block of memory. 1136 | /// 1137 | /// On success, returns a [`NonNull<[u8]>`][0] which satisfies `layout`. 1138 | /// 1139 | /// The contents of the block are uninitialized. 1140 | /// 1141 | /// # Errors 1142 | /// 1143 | /// Returns `Err` if a suitable block could not be allocated. 1144 | /// 1145 | /// [0]: core::ptr::NonNull 1146 | pub fn allocate(&mut self, layout: Layout) -> Result, AllocError> { 1147 | if layout.size() == 0 || layout.align() > layout.size() { 1148 | return Err(AllocError); 1149 | } 1150 | 1151 | let target_level = self.level_for(layout.size()).ok_or(AllocError)?; 1152 | 1153 | // If there is a free block of the correct size, return it immediately. 1154 | if let Some(block) = unsafe { self.levels[target_level].allocate(self.base) } { 1155 | return Ok(self.base.with_addr_and_size(block, layout.size())); 1156 | } 1157 | 1158 | // Otherwise, scan increasing block sizes until a free block is found. 1159 | let (block, init_level) = (0..target_level) 1160 | .rev() 1161 | .find_map(|level| unsafe { 1162 | self.levels[level] 1163 | .allocate(self.base) 1164 | .map(|block| (block, level)) 1165 | }) 1166 | .ok_or(AllocError)?; 1167 | 1168 | let block_ofs = self.base.offset_to(block); 1169 | 1170 | // Split the block repeatedly to obtain a suitably sized block. 1171 | for level in init_level..target_level { 1172 | // Split the block. The address of the front half does not change. 1173 | let half_block_size = self.levels[level].block_size / 2; 1174 | let back_half = NonZeroUsize::new(block.get() + half_block_size).unwrap(); 1175 | 1176 | // Mark the block as split. 1177 | let split_bit = self.levels[level].index_of(block_ofs); 1178 | if let Some(s) = self.levels[level].splits.as_mut() { 1179 | s.set(split_bit, true); 1180 | } 1181 | 1182 | // Add one half of the split block to the next level's free list. 1183 | unsafe { self.levels[level + 1].assign(self.base, back_half) }; 1184 | } 1185 | 1186 | // The returned block inherits the provenance of the base pointer. 1187 | Ok(self.base.with_addr_and_size(block, layout.size())) 1188 | } 1189 | 1190 | /// Deallocates the memory referenced by `ptr`. 1191 | /// 1192 | /// # Safety 1193 | /// 1194 | /// `ptr` must denote a block of memory [*currently allocated*] via this allocator. 1195 | /// 1196 | /// [*currently allocated*]: https://doc.rust-lang.org/nightly/alloc/alloc/trait.Allocator.html#currently-allocated-memory 1197 | pub unsafe fn deallocate(&mut self, ptr: NonNull) { 1198 | // Some addresses can't come from earlier levels because their addresses 1199 | // imply a smaller block size. 1200 | let block_ofs = self.base.offset_to(ptr.addr()); 1201 | let min_level = self.min_free_level(block_ofs); 1202 | 1203 | let mut at_level = None; 1204 | for level in min_level..self.levels.len() { 1205 | if self.levels[level] 1206 | .splits 1207 | .as_ref() 1208 | .map(|s| !s.get(self.levels[level].index_of(block_ofs))) 1209 | .unwrap_or(true) 1210 | { 1211 | at_level = Some(level); 1212 | break; 1213 | } 1214 | } 1215 | 1216 | let at_level = at_level.expect("no level found to free block"); 1217 | 1218 | let mut block = Some(ptr); 1219 | for level in (0..=at_level).rev() { 1220 | match block.take() { 1221 | Some(b) => unsafe { 1222 | block = self.levels[level].deallocate(self.base, b, level != 0); 1223 | }, 1224 | None => break, 1225 | } 1226 | } 1227 | 1228 | assert!(block.is_none(), "top level coalesced a block"); 1229 | } 1230 | } 1231 | 1232 | struct AlignedRanges 1233 | where 1234 | I: Iterator>, 1235 | { 1236 | align: usize, 1237 | inner: Peekable, 1238 | } 1239 | 1240 | impl AlignedRanges 1241 | where 1242 | I: Iterator>, 1243 | { 1244 | fn new(iter: I, align: usize) -> AlignedRanges { 1245 | assert!(align.is_power_of_two()); 1246 | 1247 | AlignedRanges { 1248 | align, 1249 | inner: iter.peekable(), 1250 | } 1251 | } 1252 | } 1253 | 1254 | impl Iterator for AlignedRanges 1255 | where 1256 | I: Iterator>, 1257 | { 1258 | type Item = Range; 1259 | 1260 | fn next(&mut self) -> Option { 1261 | let mut cur; 1262 | loop { 1263 | cur = self.inner.next()?; 1264 | if !cur.is_empty() { 1265 | break; 1266 | } 1267 | } 1268 | 1269 | let align = self.align; 1270 | 1271 | // Align start down. 1272 | let start = cur.start.get() & !(align - 1); 1273 | 1274 | let mut end; 1275 | loop { 1276 | // Align end up. 1277 | end = { 1278 | let less_one = cur.end.get() - 1; 1279 | let above = less_one 1280 | .checked_add(align) 1281 | .expect("end overflowed when aligned up"); 1282 | above & !(align - 1) 1283 | }; 1284 | 1285 | // Peek the next range. 1286 | let next = match self.inner.peek() { 1287 | Some(next) => next.clone(), 1288 | None => break, 1289 | }; 1290 | 1291 | assert!(next.start >= cur.end); 1292 | let next_start = next.start.get() & !(align - 1); 1293 | 1294 | if next_start <= end { 1295 | assert!(next.end >= cur.end); 1296 | 1297 | // Merge contiguous ranges. `cur.end` will be aligned up on the 1298 | // next loop iteration. 1299 | cur.end = next.end; 1300 | 1301 | // Consume the peeked item. 1302 | let _ = self.inner.next(); 1303 | } else { 1304 | // The ranges are discontiguous. 1305 | break; 1306 | } 1307 | } 1308 | 1309 | cur.start = NonZeroUsize::new(start).expect("start aligned down to null"); 1310 | cur.end = NonZeroUsize::new(end).unwrap(); 1311 | 1312 | Some(cur) 1313 | } 1314 | } 1315 | 1316 | #[cfg(test)] 1317 | mod tests { 1318 | use super::*; 1319 | 1320 | extern crate std; 1321 | 1322 | use crate::core::{alloc::Layout, ptr::NonNull, slice}; 1323 | use std::prelude::rust_2021::*; 1324 | 1325 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 1326 | use crate::Global; 1327 | 1328 | #[cfg(all(any(feature = "alloc", test), feature = "unstable"))] 1329 | use alloc::alloc::Global; 1330 | 1331 | #[test] 1332 | fn zero_levels_errors() { 1333 | let _ = Buddy::<256, 0, Global>::try_new(8).unwrap_err(); 1334 | } 1335 | 1336 | #[test] 1337 | fn overflow_address_space_errors() { 1338 | let _ = Buddy::<256, 1, Global>::try_new(usize::MAX).unwrap_err(); 1339 | } 1340 | 1341 | #[test] 1342 | fn too_many_levels_errors() { 1343 | const LEVELS: usize = usize::BITS as usize; 1344 | let _ = Buddy::<256, LEVELS, Global>::try_new(8).unwrap_err(); 1345 | } 1346 | 1347 | #[test] 1348 | fn non_power_of_two_block_size_errors() { 1349 | let _ = Buddy::<0, 1, Global>::try_new(8).unwrap_err(); 1350 | let _ = Buddy::<255, 4, Global>::try_new(8).unwrap_err(); 1351 | } 1352 | 1353 | #[test] 1354 | fn too_small_min_block_size_errors() { 1355 | const LEVELS: usize = 8; 1356 | const MIN_SIZE: usize = core::mem::size_of::() / 2; 1357 | const BLK_SIZE: usize = MIN_SIZE << (LEVELS - 1); 1358 | const NUM_BLOCKS: usize = 8; 1359 | 1360 | let _ = Buddy::::try_new(NUM_BLOCKS).unwrap_err(); 1361 | } 1362 | 1363 | #[test] 1364 | fn zero_blocks_errors() { 1365 | Buddy::<128, 4, Global>::try_new(0).unwrap_err(); 1366 | } 1367 | 1368 | #[test] 1369 | fn one_level() { 1370 | Buddy::<16, 1, Global>::try_new(1).unwrap(); 1371 | Buddy::<128, 1, Global>::try_new(1).unwrap(); 1372 | Buddy::<4096, 1, Global>::try_new(1).unwrap(); 1373 | } 1374 | 1375 | #[test] 1376 | fn create_and_destroy() { 1377 | // These parameters give a maximum block size of 1KiB and a total size of 8KiB. 1378 | const LEVELS: usize = 8; 1379 | const MIN_SIZE: usize = 16; 1380 | const BLK_SIZE: usize = MIN_SIZE << (LEVELS - 1); 1381 | const NUM_BLOCKS: usize = 8; 1382 | 1383 | let allocator = Buddy::::try_new(NUM_BLOCKS).unwrap(); 1384 | drop(allocator); 1385 | } 1386 | 1387 | #[test] 1388 | fn alloc_empty() { 1389 | const LEVELS: usize = 4; 1390 | const MIN_SIZE: usize = 16; 1391 | const BLK_SIZE: usize = MIN_SIZE << (LEVELS - 1); 1392 | const NUM_BLOCKS: usize = 8; 1393 | 1394 | let mut allocator = Buddy::::try_new(NUM_BLOCKS).unwrap(); 1395 | 1396 | let layout = Layout::from_size_align(0, 1).unwrap(); 1397 | allocator.allocate(layout).unwrap_err(); 1398 | } 1399 | 1400 | #[test] 1401 | fn alloc_min_size() { 1402 | const LEVELS: usize = 4; 1403 | const MIN_SIZE: usize = 16; 1404 | const BLK_SIZE: usize = MIN_SIZE << (LEVELS - 1); 1405 | const NUM_BLOCKS: usize = 8; 1406 | 1407 | let mut allocator = Buddy::::try_new(NUM_BLOCKS).unwrap(); 1408 | 1409 | let layout = Layout::from_size_align(1, 1).unwrap(); 1410 | let a = allocator.allocate(layout).unwrap(); 1411 | let _b = allocator.allocate(layout).unwrap(); 1412 | let c = allocator.allocate(layout).unwrap(); 1413 | unsafe { 1414 | allocator.deallocate(a.cast()); 1415 | allocator.deallocate(c.cast()); 1416 | } 1417 | } 1418 | 1419 | #[test] 1420 | fn alloc_write_and_free() { 1421 | const LEVELS: usize = 8; 1422 | const MIN_SIZE: usize = 16; 1423 | const BLK_SIZE: usize = MIN_SIZE << (LEVELS - 1); 1424 | const NUM_BLOCKS: usize = 8; 1425 | 1426 | let mut allocator = Buddy::::try_new(NUM_BLOCKS).unwrap(); 1427 | 1428 | unsafe { 1429 | let layout = Layout::from_size_align(64, MIN_SIZE).unwrap(); 1430 | let ptr: NonNull = allocator.allocate(layout).unwrap().cast(); 1431 | 1432 | { 1433 | // Do this in a separate scope so that the slice no longer 1434 | // exists when ptr is freed 1435 | let buf: &mut [u8] = slice::from_raw_parts_mut(ptr.as_ptr(), layout.size()); 1436 | for (i, byte) in buf.iter_mut().enumerate() { 1437 | *byte = i as u8; 1438 | } 1439 | } 1440 | 1441 | allocator.deallocate(ptr); 1442 | } 1443 | } 1444 | 1445 | #[test] 1446 | fn coalesce_one() { 1447 | // This configuration gives a 2-level buddy allocator with one 1448 | // splittable top-level block. 1449 | const LEVELS: usize = 2; 1450 | const MIN_SIZE: usize = 16; 1451 | const BLK_SIZE: usize = MIN_SIZE << (LEVELS - 1); 1452 | const NUM_BLOCKS: usize = 1; 1453 | 1454 | let mut allocator = Buddy::::try_new(NUM_BLOCKS).unwrap(); 1455 | 1456 | let full_layout = Layout::from_size_align(2 * MIN_SIZE, MIN_SIZE).unwrap(); 1457 | let half_layout = Layout::from_size_align(MIN_SIZE, MIN_SIZE).unwrap(); 1458 | 1459 | unsafe { 1460 | // Allocate two minimum-size blocks to split the top block. 1461 | let a = allocator.allocate(half_layout).unwrap(); 1462 | let b = allocator.allocate(half_layout).unwrap(); 1463 | 1464 | // Free both blocks, coalescing them. 1465 | allocator.deallocate(a.cast()); 1466 | allocator.deallocate(b.cast()); 1467 | 1468 | // Allocate the entire region to ensure coalescing worked. 1469 | let c = allocator.allocate(full_layout).unwrap(); 1470 | allocator.deallocate(c.cast()); 1471 | 1472 | // Same as above. 1473 | let a = allocator.allocate(half_layout).unwrap(); 1474 | let b = allocator.allocate(half_layout).unwrap(); 1475 | 1476 | // Free both blocks, this time in reverse order. 1477 | allocator.deallocate(a.cast()); 1478 | allocator.deallocate(b.cast()); 1479 | 1480 | let c = allocator.allocate(full_layout).unwrap(); 1481 | allocator.deallocate(c.cast()); 1482 | } 1483 | } 1484 | 1485 | #[test] 1486 | fn coalesce_many() { 1487 | const LEVELS: usize = 4; 1488 | const MIN_SIZE: usize = 16; 1489 | const BLK_SIZE: usize = MIN_SIZE << (LEVELS - 1); 1490 | const NUM_BLOCKS: usize = 8; 1491 | 1492 | let mut allocator = Buddy::::try_new(NUM_BLOCKS).unwrap(); 1493 | 1494 | for lvl in (0..LEVELS).rev() { 1495 | let alloc_size = 2usize.pow((LEVELS - lvl - 1) as u32) * MIN_SIZE; 1496 | let layout = Layout::from_size_align(alloc_size, MIN_SIZE).unwrap(); 1497 | let num_allocs = 2usize.pow(lvl as u32) * NUM_BLOCKS; 1498 | 1499 | let mut allocs = Vec::with_capacity(num_allocs); 1500 | for _ in 0..num_allocs { 1501 | let ptr = allocator.allocate(layout).unwrap(); 1502 | 1503 | { 1504 | // Do this in a separate scope so that the slice no longer 1505 | // exists when ptr is freed 1506 | let buf: &mut [u8] = 1507 | unsafe { slice::from_raw_parts_mut(ptr.as_ptr().cast(), layout.size()) }; 1508 | for (i, byte) in buf.iter_mut().enumerate() { 1509 | *byte = (i % 256) as u8; 1510 | } 1511 | } 1512 | 1513 | allocs.push(ptr); 1514 | } 1515 | 1516 | for alloc in allocs { 1517 | unsafe { 1518 | allocator.deallocate(alloc.cast()); 1519 | } 1520 | } 1521 | } 1522 | } 1523 | 1524 | #[test] 1525 | fn one_level_gaps() { 1526 | type Alloc = Buddy<16, 1, Global>; 1527 | let layout = Layout::from_size_align(1, 1).unwrap(); 1528 | 1529 | for gaps in std::vec![[0..1], [15..16], [0..16], [15..32]] { 1530 | let mut allocator = Alloc::try_new_with_offset_gaps(1, gaps).unwrap(); 1531 | allocator.allocate(layout).unwrap_err(); 1532 | } 1533 | 1534 | for gaps in std::vec![[0..32], [0..17], [15..32], [15..17]] { 1535 | let mut allocator = Alloc::try_new_with_offset_gaps(2, gaps).unwrap(); 1536 | allocator.allocate(layout).unwrap_err(); 1537 | } 1538 | 1539 | for gaps in std::vec![[0..48], [0..33], [15..48], [15..33]] { 1540 | let mut allocator = Alloc::try_new_with_offset_gaps(3, gaps).unwrap(); 1541 | allocator.allocate(layout).unwrap_err(); 1542 | } 1543 | 1544 | for gaps in std::vec![[0..32], [0..17], [15..32], [15..17], [16..48], [16..33]] { 1545 | let mut allocator = Alloc::try_new_with_offset_gaps(3, gaps).unwrap(); 1546 | let a = allocator.allocate(layout).unwrap(); 1547 | unsafe { allocator.deallocate(a.cast()) }; 1548 | } 1549 | 1550 | let mut allocator = Alloc::try_new_with_offset_gaps(1, [16..32]).unwrap(); 1551 | let a = allocator.allocate(layout).unwrap(); 1552 | unsafe { allocator.deallocate(a.cast()) }; 1553 | } 1554 | 1555 | #[test] 1556 | fn two_level_gaps() { 1557 | type Alloc = Buddy<32, 2, Global>; 1558 | let one_byte = Layout::from_size_align(1, 1).unwrap(); 1559 | let half = Layout::from_size_align(16, 16).unwrap(); 1560 | let full = Layout::from_size_align(32, 32).unwrap(); 1561 | 1562 | for gaps in std::vec![[0..32], [0..17], [15..32], [8..24], [0..48], [15..48]] { 1563 | let mut allocator = Alloc::try_new_with_offset_gaps(1, gaps).unwrap(); 1564 | allocator.allocate(one_byte).unwrap_err(); 1565 | } 1566 | 1567 | for gaps in std::vec![[0..16], [16..32], [16..48]] { 1568 | let mut allocator = Alloc::try_new_with_offset_gaps(1, gaps).unwrap(); 1569 | 1570 | // Can't allocate the entire region. 1571 | allocator.allocate(full).unwrap_err(); 1572 | 1573 | // Can allocate the half-region not covered by the gap. 1574 | let a = allocator.allocate(half).unwrap(); 1575 | // No memory left after that allocation. 1576 | allocator.allocate(one_byte).unwrap_err(); 1577 | unsafe { allocator.deallocate(a.cast()) }; 1578 | 1579 | // Ensure freeing doesn't cause coalescing into the gap. 1580 | allocator.allocate(full).unwrap_err(); 1581 | } 1582 | } 1583 | 1584 | #[test] 1585 | fn three_level_gaps() { 1586 | type Alloc = Buddy<128, 4, Global>; 1587 | let layout = Layout::from_size_align(16, 4).unwrap(); 1588 | 1589 | // X = gap, s = split, f = free 1590 | // [ s | F ] 1591 | // [ s | s | | ] 1592 | // [ X | X | s | F | | | | ] 1593 | // [ | | | |X|F| | | | | | | | | | ] 1594 | let mut allocator = Alloc::try_new_with_offset_gaps(2, [0..72]).unwrap(); 1595 | std::println!("base = {:08x}", allocator.raw.base.addr().get()); 1596 | 1597 | // [ s | F ] 128 1598 | // [ s | s | | ] 64 1599 | // [ X | X | s | F | | | | ] 32 1600 | // [ | | | |X|a| | | | | | | | | | ] 16 1601 | let a = allocator.allocate(layout).unwrap(); 1602 | std::println!("a = {:08x}", a.as_ptr() as *mut () as usize); 1603 | assert_eq!(allocator.enumerate_free_list(0), 1); 1604 | assert_eq!(allocator.enumerate_free_list(1), 0); 1605 | assert_eq!(allocator.enumerate_free_list(2), 1); 1606 | assert_eq!(allocator.enumerate_free_list(3), 0); 1607 | 1608 | // [ s | F ] 128 1609 | // [ s | s | | ] 64 1610 | // [ X | X | s | s | | | | ] 32 1611 | // [ | | | |X|a|b|F| | | | | | | | ] 16 1612 | let b = allocator.allocate(layout).unwrap(); 1613 | std::println!("b = {:08x}", b.as_ptr() as *mut () as usize); 1614 | assert_eq!(allocator.enumerate_free_list(0), 1); 1615 | assert_eq!(allocator.enumerate_free_list(1), 0); 1616 | assert_eq!(allocator.enumerate_free_list(2), 0); 1617 | assert_eq!(allocator.enumerate_free_list(3), 1); 1618 | 1619 | // [ s | F ] 128 1620 | // [ s | s | | ] 64 1621 | // [ X | X | s | s | | | | ] 32 1622 | // [ | | | |X|F|b|F| | | | | | | | ] 16 1623 | unsafe { allocator.deallocate(a.cast()) }; 1624 | assert_eq!(allocator.enumerate_free_list(0), 1); 1625 | assert_eq!(allocator.enumerate_free_list(1), 0); 1626 | assert_eq!(allocator.enumerate_free_list(2), 0); 1627 | assert_eq!(allocator.enumerate_free_list(3), 2); 1628 | for lev in allocator.raw.levels.iter() { 1629 | for bit in lev.buddies.iter() { 1630 | let s = if bit { "1" } else { "0" }; 1631 | 1632 | std::print!("{s}"); 1633 | } 1634 | 1635 | std::println!(); 1636 | } 1637 | 1638 | // [ s | F ] 128 1639 | // [ s | s | | ] 64 1640 | // [ X | X | s | F | | | | ] 32 1641 | // [ | | | |X|F| | | | | | | | | | ] 16 1642 | unsafe { allocator.deallocate(b.cast()) }; 1643 | } 1644 | 1645 | #[test] 1646 | fn add_coalesce() { 1647 | const BLK_SIZE: usize = 128; 1648 | const LEVELS: usize = 2; 1649 | type Alloc = Buddy; 1650 | const NUM_BLOCKS: usize = 1; 1651 | 1652 | let region_layout = Alloc::region_layout(NUM_BLOCKS).unwrap(); 1653 | let metadata_layout = Alloc::metadata_layout(NUM_BLOCKS).unwrap(); 1654 | let region = NonNull::new(unsafe { std::alloc::alloc(region_layout) }).unwrap(); 1655 | let metadata = NonNull::new(unsafe { std::alloc::alloc(metadata_layout) }).unwrap(); 1656 | 1657 | let mut buddy = 1658 | unsafe { Alloc::new_raw_unpopulated(metadata, region, NUM_BLOCKS).unwrap() }; 1659 | 1660 | let base_addr = buddy.raw.base.addr(); 1661 | let middle = base_addr 1662 | .get() 1663 | .checked_add(BLK_SIZE / 2) 1664 | .and_then(NonZeroUsize::new) 1665 | .unwrap(); 1666 | let limit = buddy.raw.base.limit(); 1667 | 1668 | let left = base_addr..middle; 1669 | let right = middle..limit; 1670 | 1671 | let half_layout = Layout::from_size_align(BLK_SIZE / 2, BLK_SIZE / 2).unwrap(); 1672 | let full_layout = Layout::from_size_align(BLK_SIZE, BLK_SIZE).unwrap(); 1673 | 1674 | // The allocator is unpopulated, so this should fail. 1675 | buddy.allocate(half_layout).unwrap_err(); 1676 | 1677 | // Populate the left block. 1678 | unsafe { buddy.add_region(left) }; 1679 | 1680 | // Now that the left half is populated, allocation should succeed. 1681 | let left_blk = buddy.allocate(half_layout).unwrap(); 1682 | unsafe { buddy.deallocate(left_blk.cast()) }; 1683 | 1684 | // Populate the right block. This should cause the blocks to coalesce. 1685 | unsafe { buddy.add_region(right) }; 1686 | 1687 | // Since both halves have been populated and coalesced, this should succeed. 1688 | let full_blk = buddy.allocate(full_layout).unwrap(); 1689 | unsafe { buddy.deallocate(full_blk.cast()) }; 1690 | 1691 | drop(buddy); 1692 | 1693 | unsafe { 1694 | std::alloc::dealloc(region.as_ptr(), region_layout); 1695 | std::alloc::dealloc(metadata.as_ptr(), metadata_layout); 1696 | } 1697 | } 1698 | } 1699 | -------------------------------------------------------------------------------- /src/bump.rs: -------------------------------------------------------------------------------- 1 | //! Bump allocation. 2 | //! 3 | //! A bump allocator is a simple and fast allocator well-suited to allocating 4 | //! large numbers of objects that will be deallocated en masse. However, a bump 5 | //! allocator cannot free individual objects; all outstanding allocations must 6 | //! be freed in order to reclaim memory. 7 | //! 8 | //! ## Characteristics 9 | //! 10 | //! #### Time complexity 11 | //! 12 | //! | Operation | Best-case | Worst-case | 13 | //! |--------------------------|-----------|------------| 14 | //! | Allocate | O(1) | O(1) | 15 | //! | Deallocate all | O(1) | O(1) | 16 | //! 17 | //! #### Fragmentation 18 | //! 19 | //! Because bump allocators can allocate blocks of any size, they suffer minimal internal 20 | //! fragmentation. External fragmentation becomes significant when many deallocations occur 21 | //! without deallocating all outstanding allocations. 22 | 23 | use core::fmt; 24 | 25 | use crate::{ 26 | core::{ 27 | alloc::{AllocError, Layout}, 28 | num::NonZeroUsize, 29 | ptr::NonNull, 30 | }, 31 | AllocInitError, BackingAllocator, BasePtr, Raw, 32 | }; 33 | 34 | #[cfg(feature = "unstable")] 35 | use crate::core::alloc::Allocator; 36 | 37 | #[cfg(not(feature = "unstable"))] 38 | use crate::core::ptr::NonNullStrict; 39 | 40 | #[cfg(any(feature = "alloc", test))] 41 | use crate::Global; 42 | 43 | /// A bump allocator. 44 | /// 45 | /// For a general discussion of bump allocation, see the [module-level documentation]. 46 | /// 47 | /// [module-level documentation]: crate::bump 48 | pub struct Bump { 49 | base: BasePtr, 50 | low_mark: NonZeroUsize, 51 | outstanding: usize, 52 | layout: Layout, 53 | backing_allocator: A, 54 | } 55 | 56 | impl Bump { 57 | /// Constructs a new `Bump` from a raw pointer. 58 | /// 59 | /// # Safety 60 | /// 61 | /// The caller must uphold the following invariants: 62 | /// - `region` must be a pointer to a region that fits `layout`, and it must be valid for reads 63 | /// and writes for the entire size indicated by `layout`. 64 | /// - No references to the memory at `region` may exist when this function is called. 65 | /// - As long as the returned `Bump` exists, no accesses may be made to the memory at `region` 66 | /// except by way of methods on the returned `Bump`. 67 | pub unsafe fn new_raw( 68 | region: NonNull, 69 | layout: Layout, 70 | ) -> Result, AllocInitError> { 71 | unsafe { RawBump::try_new(region, layout).map(|b| b.with_backing_allocator(Raw)) } 72 | } 73 | 74 | /// Splits the allocator at the low mark. 75 | /// 76 | /// Returns a tuple of two `Bump`s. The first manages all of `self`'s 77 | /// unallocated memory, while the second manages all of `self`'s allocated 78 | /// memory; all prior allocations from `self` are backed by the second 79 | /// element. 80 | // TODO: this API is not finalized. It should not be considered part of the stable public API. 81 | #[doc(hidden)] 82 | pub fn split(self) -> (Bump, Bump) { 83 | let base_addr = self.base.addr(); 84 | let lower_size = self.low_mark.get().checked_sub(base_addr.get()).unwrap(); 85 | let lower_limit = self.low_mark; 86 | let lower = Bump { 87 | base: BasePtr::new(self.base.ptr(), lower_size), 88 | outstanding: 0, 89 | low_mark: lower_limit, 90 | layout: Layout::from_size_align(lower_size, 1).unwrap(), 91 | backing_allocator: Raw, 92 | }; 93 | 94 | let upper_size = self.layout.size().checked_sub(lower_size).unwrap(); 95 | let new_base = BasePtr::new(self.base.with_addr(self.low_mark), upper_size); 96 | let upper = Bump { 97 | base: new_base, 98 | low_mark: self.low_mark, 99 | outstanding: self.outstanding, 100 | // TODO: Alignment may be higher in some cases. Is that useful with Raw? 101 | layout: Layout::from_size_align(upper_size, 1).unwrap(), 102 | backing_allocator: Raw, 103 | }; 104 | 105 | (lower, upper) 106 | } 107 | } 108 | 109 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 110 | #[cfg_attr(docs_rs, doc(cfg(all(feature = "alloc"))))] 111 | impl Bump { 112 | /// Attempts to construct a new `Bump` backed by the global allocator. 113 | /// 114 | /// The memory managed by this `Bump` is allocated from the global allocator according to 115 | /// `layout`. 116 | /// 117 | /// # Errors 118 | /// 119 | /// Returns an error if any of the following are true: 120 | /// - `layout.size()` is zero. 121 | /// - Sufficient memory could not be allocated from the global allocator. 122 | pub fn try_new(layout: Layout) -> Result, AllocInitError> { 123 | if layout.size() == 0 { 124 | return Err(AllocInitError::InvalidConfig); 125 | } 126 | 127 | unsafe { 128 | let region_raw = alloc::alloc::alloc(layout); 129 | let region_ptr = NonNull::new(region_raw).ok_or(AllocInitError::AllocFailed(layout))?; 130 | 131 | match RawBump::try_new(region_ptr, layout) { 132 | Ok(b) => Ok(b.with_backing_allocator(Global)), 133 | Err(e) => { 134 | alloc::alloc::dealloc(region_ptr.as_ptr(), layout); 135 | Err(e) 136 | } 137 | } 138 | } 139 | } 140 | } 141 | 142 | #[cfg(all(any(feature = "alloc", test), feature = "unstable"))] 143 | #[cfg_attr(docs_rs, doc(cfg(all(feature = "alloc"))))] 144 | impl Bump { 145 | /// Attempts to construct a new `Bump` backed by the global allocator. 146 | /// 147 | /// The memory managed by this `Bump` is allocated from the global allocator according to 148 | /// `layout`. 149 | /// 150 | /// # Errors 151 | /// 152 | /// Returns an error if sufficient memory could not be allocated from the global allocator. 153 | pub fn try_new(layout: Layout) -> Result, AllocInitError> { 154 | Self::try_new_in(layout, Global) 155 | } 156 | } 157 | 158 | #[cfg(feature = "unstable")] 159 | #[cfg_attr(docs_rs, doc(cfg(feature = "unstable")))] 160 | impl Bump 161 | where 162 | A: Allocator, 163 | { 164 | /// Attempts to construct a new `Bump` backed by `backing_allocator`. 165 | /// 166 | /// The memory managed by this `Bump` is allocated from `backing_allocator` according to 167 | /// `layout`. 168 | /// 169 | /// # Errors 170 | /// 171 | /// Returns an error if sufficient memory could not be allocated from `backing_allocator`. 172 | pub fn try_new_in(layout: Layout, backing_allocator: A) -> Result, AllocInitError> { 173 | unsafe { 174 | let region_ptr = backing_allocator 175 | .allocate(layout) 176 | .map_err(|_| AllocInitError::AllocFailed(layout))?; 177 | 178 | match RawBump::try_new(region_ptr.cast(), layout) { 179 | Ok(b) => Ok(b.with_backing_allocator(backing_allocator)), 180 | Err(e) => { 181 | backing_allocator.deallocate(region_ptr.cast(), layout); 182 | Err(e) 183 | } 184 | } 185 | } 186 | } 187 | } 188 | 189 | impl Bump 190 | where 191 | A: BackingAllocator, 192 | { 193 | /// Attempts to allocate a block of memory according to `layout`. 194 | /// 195 | /// # Errors 196 | /// 197 | /// Returns `Err` if there is insufficient memory remaining to accommodate 198 | /// `layout`. 199 | pub fn allocate(&mut self, layout: Layout) -> Result, AllocError> { 200 | if layout.size() == 0 { 201 | return Err(AllocError); 202 | } 203 | 204 | let new_low_unaligned = self 205 | .low_mark 206 | .get() 207 | .checked_sub(layout.size()) 208 | .ok_or(AllocError)?; 209 | 210 | let new_low_mark = new_low_unaligned & !(layout.align() - 1); 211 | 212 | if new_low_mark < self.base.addr().get() { 213 | return Err(AllocError); 214 | } 215 | 216 | self.outstanding += 1; 217 | 218 | // SAFETY: new_low_mark >= base, which is non-null 219 | self.low_mark = unsafe { NonZeroUsize::new_unchecked(new_low_mark) }; 220 | 221 | Ok(self.base.with_addr_and_size(self.low_mark, layout.size())) 222 | } 223 | 224 | /// Deallocates a block of memory. 225 | /// 226 | /// This operation does not increase the amount of available memory unless 227 | /// `ptr` is the last outstanding allocation from this allocator. 228 | /// 229 | /// # Safety 230 | /// 231 | /// `ptr` must denote a block of memory [*currently allocated*] via this allocator. 232 | /// 233 | /// [*currently allocated*]: https://doc.rust-lang.org/nightly/alloc/alloc/trait.Allocator.html#currently-allocated-memory 234 | pub unsafe fn deallocate(&mut self, ptr: NonNull) { 235 | let _ = ptr; 236 | 237 | self.outstanding = self.outstanding.checked_sub(1).unwrap(); 238 | 239 | if self.outstanding == 0 { 240 | // Reset the allocator. 241 | self.low_mark = self.base.limit(); 242 | } 243 | } 244 | 245 | /// Resets the bump allocator. 246 | /// 247 | /// This method invalidates all outstanding allocations from this allocator. 248 | /// The destructors of allocated objects will not be run. 249 | /// 250 | /// # Safety 251 | /// 252 | /// The caller must uphold the following invariants: 253 | /// - No references to memory allocated by this `Bump` may exist when the method is called. 254 | /// - Any pointers to memory previously allocated by this allocator may no longer be 255 | /// dereferenced or passed to [`Bump::deallocate()`]. 256 | /// 257 | /// [`Bump::deallocate()`]: Bump::deallocate 258 | pub unsafe fn reset(&mut self) { 259 | self.low_mark = self.base.limit(); 260 | } 261 | } 262 | 263 | impl Drop for Bump 264 | where 265 | A: BackingAllocator, 266 | { 267 | fn drop(&mut self) { 268 | unsafe { 269 | self.backing_allocator 270 | .deallocate(self.base.ptr(), self.layout) 271 | }; 272 | } 273 | } 274 | 275 | impl fmt::Debug for Bump 276 | where 277 | A: BackingAllocator, 278 | { 279 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 280 | f.debug_struct("Bump") 281 | .field("base", &self.base) 282 | .field("low_mark", &self.low_mark) 283 | .finish() 284 | } 285 | } 286 | 287 | struct RawBump { 288 | base: BasePtr, 289 | limit: NonZeroUsize, 290 | layout: Layout, 291 | } 292 | 293 | impl RawBump { 294 | fn with_backing_allocator(self, backing_allocator: A) -> Bump { 295 | Bump { 296 | base: self.base, 297 | low_mark: self.limit, 298 | outstanding: 0, 299 | layout: self.layout, 300 | backing_allocator, 301 | } 302 | } 303 | 304 | unsafe fn try_new(region: NonNull, layout: Layout) -> Result { 305 | // Verify that the base pointer matches the layout. 306 | let addr = region.addr().get(); 307 | if addr & !(layout.align() - 1) != addr { 308 | return Err(AllocInitError::InvalidConfig); 309 | } 310 | 311 | let base = BasePtr::new(region, layout.size()); 312 | let limit = NonZeroUsize::new( 313 | region 314 | .addr() 315 | .get() 316 | .checked_add(layout.size()) 317 | .ok_or(AllocInitError::InvalidLocation)?, 318 | ) 319 | .unwrap(); 320 | 321 | Ok(RawBump { 322 | base, 323 | limit, 324 | layout, 325 | }) 326 | } 327 | } 328 | -------------------------------------------------------------------------------- /src/core.rs: -------------------------------------------------------------------------------- 1 | //! Re-exports of `core`, plus stable polyfills. 2 | //! 3 | //! The stable-compatible implementations in this module are copied more-or-less 4 | //! verbatim from the standard library source. 5 | 6 | #![cfg(not(feature = "unstable"))] 7 | 8 | pub use core::{cmp, fmt, mem, slice, sync}; 9 | 10 | pub(crate) mod alloc { 11 | pub use core::alloc::*; 12 | 13 | // #![feature(alloc_layout_extra)] 14 | 15 | use core::ptr::NonNull; 16 | 17 | use sptr::invalid_mut; 18 | 19 | use crate::layout_error; 20 | 21 | /// Indicates that an allocation failed. 22 | /// 23 | /// This may occur due to resource exhaustion or an unsupported set of arguments. 24 | /// 25 | /// When the `unstable` feature is enabled, this type is a re-export of 26 | /// [`core::alloc::AllocError`]. 27 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 28 | pub struct AllocError; 29 | 30 | pub trait LayoutExt { 31 | fn padding_needed_for(&self, align: usize) -> usize; 32 | fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> 33 | where 34 | Self: Sized; 35 | fn dangling(&self) -> NonNull; 36 | } 37 | 38 | impl LayoutExt for Layout { 39 | #[inline] 40 | fn padding_needed_for(&self, align: usize) -> usize { 41 | let len = self.size(); 42 | 43 | // Rounded up value is: 44 | // len_rounded_up = (len + align - 1) & !(align - 1); 45 | // and then we return the padding difference: `len_rounded_up - len`. 46 | // 47 | // We use modular arithmetic throughout: 48 | // 49 | // 1. align is guaranteed to be > 0, so align - 1 is always 50 | // valid. 51 | // 52 | // 2. `len + align - 1` can overflow by at most `align - 1`, 53 | // so the &-mask with `!(align - 1)` will ensure that in the 54 | // case of overflow, `len_rounded_up` will itself be 0. 55 | // Thus the returned padding, when added to `len`, yields 0, 56 | // which trivially satisfies the alignment `align`. 57 | // 58 | // (Of course, attempts to allocate blocks of memory whose 59 | // size and padding overflow in the above manner should cause 60 | // the allocator to yield an error anyway.) 61 | 62 | let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); 63 | len_rounded_up.wrapping_sub(len) 64 | } 65 | 66 | #[inline] 67 | fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> { 68 | // This cannot overflow. Quoting from the invariant of Layout: 69 | // > `size`, when rounded up to the nearest multiple of `align`, 70 | // > must not overflow (i.e., the rounded value must be less than 71 | // > `usize::MAX`) 72 | let padded_size = self.size() + self.padding_needed_for(self.align()); 73 | let alloc_size = padded_size.checked_mul(n).ok_or_else(layout_error)?; 74 | 75 | // SAFETY: self.align is already known to be valid and alloc_size has been 76 | // padded already. 77 | unsafe { 78 | Ok(( 79 | Layout::from_size_align_unchecked(alloc_size, self.align()), 80 | padded_size, 81 | )) 82 | } 83 | } 84 | 85 | #[inline] 86 | #[cfg(feature = "sptr")] 87 | fn dangling(&self) -> NonNull { 88 | // SAFETY: align is guaranteed to be non-zero 89 | unsafe { NonNull::new_unchecked(invalid_mut::(self.align())) } 90 | } 91 | } 92 | 93 | #[cfg(test)] 94 | mod tests { 95 | use super::*; 96 | 97 | #[test] 98 | fn layout_error_returns_error() { 99 | let _: LayoutError = layout_error(); 100 | } 101 | } 102 | } 103 | 104 | pub mod num { 105 | pub use core::num::*; 106 | 107 | // #![feature(int_log)] 108 | 109 | pub trait UsizeExt { 110 | fn ilog2(self) -> u32; 111 | } 112 | 113 | impl UsizeExt for usize { 114 | #[inline] 115 | fn ilog2(self) -> u32 { 116 | Self::BITS - 1 - self.leading_zeros() 117 | } 118 | } 119 | } 120 | 121 | #[cfg(not(feature = "sptr"))] 122 | pub(crate) use core::ptr; 123 | 124 | #[cfg(feature = "sptr")] 125 | pub(crate) mod ptr { 126 | pub use core::ptr::*; 127 | 128 | // #![feature(strict_provenance)] 129 | 130 | use core::num::NonZeroUsize; 131 | 132 | pub use sptr::Strict; 133 | 134 | pub trait NonNullStrict { 135 | fn addr(self) -> NonZeroUsize 136 | where 137 | T: Sized; 138 | 139 | fn with_addr(self, addr: NonZeroUsize) -> Self 140 | where 141 | T: Sized; 142 | 143 | fn map_addr(self, f: impl FnOnce(NonZeroUsize) -> NonZeroUsize) -> Self 144 | where 145 | T: Sized; 146 | } 147 | 148 | impl NonNullStrict for NonNull { 149 | fn addr(self) -> NonZeroUsize 150 | where 151 | T: Sized, 152 | { 153 | // SAFETY: The pointer is guaranteed by the type to be non-null, 154 | // meaning that the address will be non-zero. 155 | unsafe { NonZeroUsize::new_unchecked(self.as_ptr().addr()) } 156 | } 157 | 158 | fn with_addr(self, addr: NonZeroUsize) -> Self 159 | where 160 | T: Sized, 161 | { 162 | // SAFETY: The result of `ptr::from::with_addr` is non-null because `addr` is guaranteed 163 | // to be non-zero. 164 | unsafe { NonNull::new_unchecked(self.as_ptr().with_addr(addr.get()) as *mut _) } 165 | } 166 | 167 | fn map_addr(self, f: impl FnOnce(NonZeroUsize) -> NonZeroUsize) -> Self 168 | where 169 | T: Sized, 170 | { 171 | self.with_addr(f(self.addr())) 172 | } 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Bare-metal allocators. 2 | //! 3 | //! --- 4 | //! This crate provides allocators that are suitable for use on bare metal or with low-level 5 | //! allocation facilities like `mmap(2)`/`brk(2)`. 6 | //! 7 | //! ## Allocators 8 | //! 9 | //! The following allocators are available: 10 | //! 11 | //! - **[`Buddy`], a binary-buddy allocator**. O(log2_levels_) worst-case allocation and 12 | //! deallocation. Supports splitting and coalescing blocks by powers of 2. Good choice for 13 | //! periodic medium-to-large allocations. 14 | //! - **[`Bump`], a bump allocator**. O(1) allocation. Extremely fast to allocate and flexible in 15 | //! terms of allocation layout, but unable to deallocate individual items. Good choice for 16 | //! allocations that will never be deallocated or that will be deallocated en masse. 17 | //! - **[`Slab`], a slab allocator**. O(1) allocation and deallocation. All allocated blocks are the 18 | //! same size, making this allocator a good choice when allocating many similarly-sized objects. 19 | //! 20 | //! ## Features 21 | //! 22 | //! All allocators provided by this crate are available in a `#![no_std]`, 23 | //! `#![cfg(no_global_oom_handling)]` environment. Additional functionality is available when 24 | //! enabling feature flags: 25 | //! 26 | //! 27 | //! 28 | //! 29 | //! 30 | //! 31 | //! 32 | //! 33 | //! 34 | //! 35 | //! 36 | //! 37 | //! 40 | //! 41 | //! 42 | //! 43 | //! 44 | //! 45 | //! 50 | //! 51 | //! 52 | //! 53 | //! 54 | //! 55 | //! 58 | //! 59 | //!
FlagDefault?Requires nightly?Description
sptrYesNo 38 | //! Uses the sptr polyfill for Strict Provenance. 39 | //!
unstableNoYes 46 | //! Exposes constructors for allocators backed by implementors of the 47 | //! unstable Allocator trait, and enables the internal use of 48 | //! nightly-only Rust features. Obviates sptr. 49 | //!
allocNoNo 56 | //! Exposes constructors for allocators backed by the global allocator. 57 | //!
60 | //! 61 | //! [`sptr`]: https://crates.io/crates/sptr 62 | 63 | #![doc(html_root_url = "https://docs.rs/acid_alloc/0.1.0")] 64 | #![no_std] 65 | #![warn(missing_debug_implementations)] 66 | #![warn(missing_docs)] 67 | #![deny(unsafe_op_in_unsafe_fn)] 68 | #![cfg_attr(feature = "unstable", feature(alloc_layout_extra))] 69 | #![cfg_attr(feature = "unstable", feature(allocator_api))] 70 | #![cfg_attr(feature = "unstable", feature(int_log))] 71 | #![cfg_attr(feature = "unstable", feature(strict_provenance))] 72 | #![cfg_attr(docs_rs, feature(doc_cfg))] 73 | // This is necessary to allow `sptr` and `crate::core` to shadow methods provided by unstable 74 | // features. 75 | #![allow(unstable_name_collisions)] 76 | 77 | #[cfg(test)] 78 | extern crate std; 79 | 80 | macro_rules! requires_sptr_or_unstable { 81 | ($($it:item)*) => { 82 | $( 83 | #[cfg(any(feature = "sptr", feature = "unstable"))] 84 | $it 85 | )* 86 | }; 87 | } 88 | 89 | #[cfg(not(any(feature = "sptr", feature = "unstable")))] 90 | compile_error!("At least one of these crate features must be enabled: [\"sptr\", \"unstable\"]."); 91 | 92 | #[cfg(any(feature = "alloc", test))] 93 | extern crate alloc; 94 | 95 | requires_sptr_or_unstable! { 96 | mod base; 97 | mod bitmap; 98 | pub mod buddy; 99 | pub mod bump; 100 | pub mod slab; 101 | 102 | #[cfg(test)] 103 | mod tests; 104 | 105 | #[cfg(not(feature = "unstable"))] 106 | pub(crate) mod core; 107 | 108 | #[cfg(feature = "unstable")] 109 | pub(crate) mod core { 110 | pub use core::{alloc, cmp, fmt, mem, num, ptr, slice, sync}; 111 | } 112 | 113 | use crate::{ 114 | base::{BasePtr, BlockLink, DoubleBlockLink}, 115 | core::{ 116 | alloc::{Layout}, 117 | ptr::NonNull, 118 | }, 119 | }; 120 | 121 | #[cfg(not(feature = "unstable"))] 122 | use crate::core::alloc::LayoutError; 123 | 124 | #[cfg(feature = "unstable")] 125 | use crate::core::alloc::Allocator; 126 | 127 | #[doc(inline)] 128 | pub use crate::{buddy::Buddy, bump::Bump, core::alloc::AllocError, slab::Slab}; 129 | 130 | #[cfg(not(feature = "unstable"))] 131 | pub(crate) fn layout_error() -> LayoutError { 132 | // HACK: LayoutError is #[non_exhaustive], so it can't be constructed outside the standard 133 | // library. As a workaround, deliberately pass bad values to the constructor to get one. 134 | Layout::from_size_align(0, 0).unwrap_err() 135 | } 136 | 137 | /// The error type for allocator constructors. 138 | #[derive(Clone, Debug)] 139 | pub enum AllocInitError { 140 | /// A necessary allocation failed. 141 | /// 142 | /// This variant is returned when a constructor attempts to allocate memory, either for 143 | /// metadata or the managed region, but the underlying allocator fails. 144 | /// 145 | /// The variant contains the [`Layout`] that could not be allocated. 146 | AllocFailed(Layout), 147 | 148 | /// The configuration of the allocator is invalid. 149 | /// 150 | /// This variant is returned when an allocator's configuration parameters are impossible to 151 | /// satisfy. 152 | InvalidConfig, 153 | 154 | /// The location of the allocator is invalid. 155 | /// 156 | /// This variant is returned when the full size of the managed region would not fit at the 157 | /// provided address, i.e., pointer calculations would overflow. 158 | InvalidLocation, 159 | } 160 | 161 | /// Types which provide memory which backs an allocator. 162 | /// 163 | /// This is a supertrait of [`Allocator`], and is implemented by the following types: 164 | /// - The `Raw` marker type indicates that an allocator is not backed by another allocator. This 165 | /// is the case when constructing the allocator from raw pointers. Memory used by this 166 | /// allocator can be reclaimed using `.into_raw_parts()`. 167 | /// - The `Global` marker type indicates that an allocator is backed by the global allocator. 168 | /// The allocator will free its memory on drop. 169 | /// - Any type `A` which implements [`Allocator`] indicates that an allocator is backed by an 170 | /// instance of `A`. The allocator will free its memory on drop. 171 | /// 172 | /// [`Allocator`]: https://doc.rust-lang.org/stable/core/alloc/trait.Allocator.html 173 | pub trait BackingAllocator: Sealed { 174 | /// Deallocates the memory referenced by `ptr`. 175 | /// 176 | /// # Safety 177 | /// 178 | /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and 179 | /// * `layout` must [*fit*] that block of memory. 180 | /// 181 | /// [*currently allocated*]: https://doc.rust-lang.org/nightly/alloc/alloc/trait.Allocator.html#currently-allocated-memory 182 | /// [*fit*]: https://doc.rust-lang.org/nightly/alloc/alloc/trait.Allocator.html#memory-fitting 183 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); 184 | } 185 | 186 | /// A marker type indicating that an allocator is backed by raw pointers. 187 | /// 188 | /// Allocators using this type will not deallocate owned memory on drop. The memory can be 189 | /// reclaimed using the appropriate `.into_raw_parts()` method. 190 | #[derive(Clone, Debug)] 191 | pub struct Raw; 192 | impl Sealed for Raw {} 193 | impl BackingAllocator for Raw { 194 | unsafe fn deallocate(&self, _: NonNull, _: Layout) {} 195 | } 196 | 197 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 198 | /// The global memory allocator. 199 | /// 200 | /// When both the `alloc` and `unstable` features are enabled, this type is a re-export of 201 | /// [`alloc::alloc::Global`]. 202 | #[derive(Clone, Debug)] 203 | pub struct Global; 204 | 205 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 206 | impl Sealed for Global {} 207 | 208 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 209 | impl BackingAllocator for Global { 210 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 211 | unsafe { alloc::alloc::dealloc(ptr.as_ptr(), layout) }; 212 | } 213 | } 214 | 215 | #[cfg(all(any(feature = "alloc", test), feature = "unstable"))] 216 | pub use alloc::alloc::Global; 217 | 218 | #[cfg(feature = "unstable")] 219 | impl Sealed for A {} 220 | #[cfg(feature = "unstable")] 221 | impl BackingAllocator for A { 222 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 223 | unsafe { Allocator::deallocate(self, ptr, layout) }; 224 | } 225 | } 226 | 227 | #[doc(hidden)] 228 | mod private { 229 | pub trait Sealed {} 230 | } 231 | use private::Sealed; 232 | } 233 | -------------------------------------------------------------------------------- /src/slab.rs: -------------------------------------------------------------------------------- 1 | //! Slab allocation. 2 | //! 3 | //! A slab allocator divides the managed region into a fixed number of equally-sized blocks. This 4 | //! design is ideal for allocating many values of a single type, and a combination of slab sizes can 5 | //! be useful in constructing more general-purpose allocators. 6 | //! 7 | //! ## Characteristics 8 | //! 9 | //! #### Time complexity 10 | //! 11 | //! | Operation | Best-case | Worst-case | 12 | //! |--------------------------|-----------|------------| 13 | //! | Allocate | O(1) | O(1) | 14 | //! | Deallocate | O(1) | O(1) | 15 | //! 16 | //! #### Fragmentation 17 | //! 18 | //! Due to slab allocators' fixed-size allocations, they exhibit no external 19 | //! fragmentation. The degree of internal fragmentation is dependent on the 20 | //! difference between the average allocation size and the allocator's block 21 | //! size. 22 | 23 | use crate::core::{ 24 | alloc::{AllocError, Layout}, 25 | fmt, mem, 26 | num::NonZeroUsize, 27 | ptr::NonNull, 28 | }; 29 | 30 | #[cfg(all(any(feature = "alloc", test), feature = "unstable"))] 31 | use alloc::alloc::Global; 32 | 33 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 34 | use crate::{core::alloc::LayoutExt, Global}; 35 | 36 | #[cfg(feature = "unstable")] 37 | use crate::core::alloc::Allocator; 38 | 39 | #[cfg(not(feature = "unstable"))] 40 | use crate::core::ptr::NonNullStrict; 41 | 42 | use crate::{AllocInitError, BackingAllocator, BasePtr, BlockLink, Raw}; 43 | 44 | /// A slab allocator. 45 | /// 46 | /// For a general discussion of slab allocation, see the [module-level 47 | /// documentation]. 48 | /// 49 | /// # Configuration 50 | /// 51 | /// Each constructor takes the following parameters: 52 | /// - `block_size` is the size in bytes of each allocation. 53 | /// - `num_blocks` is the maximum number of allocations that a `Slab` may make at once. 54 | /// 55 | /// Blocks are packed as tightly as possible. As such, the minimum guaranteed alignment of a block 56 | /// is 57 | /// 58 | /// ```text 59 | /// 1 << block_size.trailing_zeros() 60 | /// ``` 61 | /// 62 | /// If a higher guaranteed alignment is required, the block size must be rounded up to the proper 63 | /// alignment when creating the allocator. Calls to [`Slab::allocate`] with a `Layout` alignment 64 | /// greater than the minimum guaranteed alignment will result in an error. 65 | /// 66 | /// [module-level documentation]: crate::slab 67 | pub struct Slab { 68 | base: BasePtr, 69 | free_list: Option, 70 | block_size: u32, 71 | block_align: u32, 72 | num_blocks: u32, 73 | outstanding: u32, 74 | backing_allocator: A, 75 | } 76 | 77 | unsafe impl
Send for Slab where A: BackingAllocator + Send {} 78 | unsafe impl Sync for Slab where A: BackingAllocator + Sync {} 79 | 80 | impl Slab { 81 | /// Constructs a new `Slab` from a raw pointer to a region of memory. 82 | /// 83 | /// # Errors 84 | /// 85 | /// Returns an error if [`Slab::region_layout(block_size, num_blocks)`][0] would return an error 86 | /// due to overflow. 87 | /// 88 | /// # Safety 89 | /// 90 | /// The caller must uphold the following invariants: 91 | /// - `region` must be a pointer to a region that fits the [`Layout`] returned by 92 | /// [`Slab::region_layout(block_size, num_blocks)`][0], and it must be valid for reads and 93 | /// writes for the entire size indicated by that `Layout`. 94 | /// - No references to the memory at `region` may exist when this function is called. 95 | /// - As long as the returned `Slab` exists, no accesses may be made to the memory at `region` 96 | /// except by way of methods on the returned `Slab`. 97 | /// 98 | /// [module-level documentation]: crate::slab 99 | /// [0]: Slab::region_layout 100 | pub unsafe fn new_raw( 101 | region: NonNull, 102 | block_size: usize, 103 | num_blocks: usize, 104 | ) -> Result, AllocInitError> { 105 | unsafe { 106 | RawSlab::try_new(region, block_size, num_blocks).map(|s| s.with_backing_allocator(Raw)) 107 | } 108 | } 109 | } 110 | 111 | #[cfg(all(any(feature = "alloc", test), not(feature = "unstable")))] 112 | impl Slab { 113 | /// Attempts to construct a new `Slab` backed by the global allocator. 114 | /// 115 | /// The memory managed by this `Slab` is allocated from the global allocator according to the 116 | /// layout indicated by [`Slab::region_layout(block_size, num_blocks)`][0]. 117 | /// 118 | /// # Errors 119 | /// 120 | /// Returns an error if sufficient memory could not be allocated from the global allocator. 121 | /// 122 | /// [0]: Slab::region_layout 123 | #[cfg_attr(docs_rs, doc(cfg(feature = "alloc")))] 124 | pub fn try_new(block_size: usize, num_blocks: usize) -> Result, AllocInitError> { 125 | let region_layout = Self::region_layout(block_size, num_blocks) 126 | .map_err(|_| AllocInitError::InvalidConfig)?; 127 | 128 | unsafe { 129 | let region_ptr = if region_layout.size() == 0 { 130 | region_layout.dangling() 131 | } else { 132 | // SAFETY: region size is not zero 133 | let region_raw = alloc::alloc::alloc(region_layout); 134 | NonNull::new(region_raw).ok_or_else(|| { 135 | alloc::alloc::dealloc(region_raw, region_layout); 136 | AllocInitError::AllocFailed(region_layout) 137 | })? 138 | }; 139 | 140 | match RawSlab::try_new(region_ptr, block_size, num_blocks) { 141 | Ok(s) => Ok(s.with_backing_allocator(Global)), 142 | Err(e) => { 143 | if region_layout.size() != 0 { 144 | alloc::alloc::dealloc(region_ptr.as_ptr(), region_layout); 145 | } 146 | 147 | Err(e) 148 | } 149 | } 150 | } 151 | } 152 | } 153 | 154 | #[cfg(all(any(feature = "alloc", test), feature = "unstable"))] 155 | impl Slab { 156 | /// Attempts to construct a new `Slab` backed by the global allocator. 157 | /// 158 | /// The memory managed by this `Slab` is allocated from the global allocator according to the 159 | /// layout indicated by [`Slab::region_layout(block_size, num_blocks)`][0]. 160 | /// 161 | /// # Errors 162 | /// 163 | /// Returns an error if sufficient memory could not be allocated from the global allocator. 164 | /// 165 | /// [0]: Slab::region_layout 166 | #[cfg_attr(docs_rs, doc(cfg(feature = "alloc")))] 167 | pub fn try_new(block_size: usize, num_blocks: usize) -> Result, AllocInitError> { 168 | Self::try_new_in(block_size, num_blocks, Global) 169 | } 170 | } 171 | 172 | #[cfg(feature = "unstable")] 173 | impl Slab 174 | where 175 | A: Allocator, 176 | { 177 | /// Attempts to construct a new `Slab` backed by `backing_allocator`. 178 | /// 179 | /// The memory managed by this `Slab` is allocated from `backing_allocator` according to the 180 | /// layout indicated by [`Slab::region_layout(block_size, num_blocks)`][0]. 181 | /// 182 | /// # Errors 183 | /// 184 | /// Returns an error if sufficient memory could not be allocated from `backing_allocator`. 185 | /// 186 | /// [0]: Slab::region_layout 187 | #[cfg_attr(docs_rs, doc(cfg(feature = "unstable")))] 188 | pub fn try_new_in( 189 | block_size: usize, 190 | num_blocks: usize, 191 | backing_allocator: A, 192 | ) -> Result, AllocInitError> { 193 | let region_layout = Self::region_layout(block_size, num_blocks) 194 | .map_err(|_| AllocInitError::InvalidConfig)?; 195 | 196 | unsafe { 197 | let region_ptr = if region_layout.size() == 0 { 198 | region_layout.dangling() 199 | } else { 200 | backing_allocator 201 | .allocate(region_layout) 202 | .map_err(|_| AllocInitError::AllocFailed(region_layout))? 203 | .cast() 204 | }; 205 | 206 | match RawSlab::try_new(region_ptr, block_size, num_blocks) { 207 | Ok(s) => Ok(s.with_backing_allocator(backing_allocator)), 208 | Err(e) => { 209 | if region_layout.size() != 0 { 210 | backing_allocator.deallocate(region_ptr, region_layout); 211 | } 212 | 213 | Err(e) 214 | } 215 | } 216 | } 217 | } 218 | } 219 | 220 | impl Slab 221 | where 222 | A: BackingAllocator, 223 | { 224 | /// Returns the layout requirements of the region managed by a `Slab` of this type. 225 | /// 226 | /// # Errors 227 | /// 228 | /// Returns `Err` if the total size and alignment of the region cannot be 229 | /// represented as a [`Layout`]. 230 | pub fn region_layout(block_size: usize, num_blocks: usize) -> Result { 231 | // Round block size up to the alignment of BlockLink. 232 | let block_size = { 233 | let align = mem::align_of::(); 234 | 235 | // Safe unchecked sub: align is always nonzero 236 | let up = block_size 237 | .checked_add(align) 238 | .ok_or(AllocInitError::InvalidConfig)? 239 | - 1; 240 | up & !(align - 1) 241 | }; 242 | 243 | if block_size < mem::size_of::() { 244 | return Err(AllocInitError::InvalidConfig); 245 | } 246 | 247 | let total_size = block_size 248 | .checked_mul(num_blocks) 249 | .ok_or(AllocInitError::InvalidConfig)?; 250 | u32::try_from(total_size).map_err(|_| AllocInitError::InvalidConfig)?; 251 | 252 | let align = 1_usize 253 | .checked_shl(block_size.trailing_zeros()) 254 | .ok_or(AllocInitError::InvalidConfig)?; 255 | u32::try_from(align).map_err(|_| AllocInitError::InvalidConfig)?; 256 | 257 | Layout::from_size_align(total_size, align).map_err(|_| AllocInitError::InvalidConfig) 258 | } 259 | 260 | /// Attempts to allocate a block of memory with the specified layout. 261 | /// 262 | /// The returned block is guaranteed to be aligned to [`self.block_align()`][1] bytes. 263 | /// 264 | /// # Errors 265 | /// 266 | /// Returns `Err` if any of the following are true: 267 | /// - No blocks are available. 268 | /// - `layout.size()` is greater than the value returned by [`self.block_size()`][0]. 269 | /// - `layout.align()` is greater than the value returned by [`self.block_align()`][1]. 270 | /// 271 | /// [0]: Slab::block_size 272 | /// [1]: Slab::block_align 273 | pub fn allocate(&mut self, layout: Layout) -> Result, AllocError> { 274 | if layout.size() > self.block_size as usize || layout.align() > self.block_align as usize { 275 | return Err(AllocError); 276 | } 277 | 278 | let old_head = self.free_list.take().ok_or(AllocError)?; 279 | 280 | unsafe { 281 | let link_mut = self.base.link_mut(old_head); 282 | self.free_list = link_mut.next.take(); 283 | } 284 | 285 | self.outstanding += 1; 286 | 287 | Ok(self.base.with_addr_and_size(old_head, layout.size())) 288 | } 289 | 290 | /// Attempts to allocate a block of memory from the slab. 291 | /// 292 | /// The returned block has a size of [`self.block_size()`][0] and an alignment of 293 | /// [`self.block_align()`][1]. 294 | /// 295 | /// [0]: Slab::block_size 296 | /// [1]: Slab::block_align 297 | /// 298 | /// # Errors 299 | /// 300 | /// Returns `Err` if no blocks are available. 301 | pub fn allocate_block(&mut self) -> Result, AllocError> { 302 | let old_head = self.free_list.take().ok_or(AllocError)?; 303 | 304 | unsafe { 305 | let link_mut = self.base.link_mut(old_head); 306 | self.free_list = link_mut.next.take(); 307 | } 308 | 309 | self.outstanding += 1; 310 | 311 | Ok(self.base.with_addr_and_size(old_head, self.block_size())) 312 | } 313 | 314 | /// Deallocates the memory referenced by `ptr`. 315 | /// 316 | /// # Safety 317 | /// 318 | /// `ptr` must denote a block of memory [*currently allocated*] via this allocator. 319 | /// 320 | /// [*currently allocated*]: https://doc.rust-lang.org/nightly/alloc/alloc/trait.Allocator.html#currently-allocated-memory 321 | pub unsafe fn deallocate(&mut self, ptr: NonNull) { 322 | let addr = ptr.addr(); 323 | 324 | unsafe { 325 | self.base.link_mut(addr).next = self.free_list; 326 | self.free_list = Some(addr); 327 | } 328 | 329 | self.outstanding -= 1; 330 | } 331 | 332 | /// Returns the size in bytes of blocks allocated by this `Slab`. 333 | /// 334 | /// # Example 335 | /// 336 | /// ``` 337 | /// # #[cfg(feature = "alloc")] 338 | /// # fn main() { 339 | /// use acid_alloc::Slab; 340 | /// 341 | /// let slab = Slab::try_new(96, 8).unwrap(); 342 | /// assert_eq!(slab.block_size(), 96); 343 | /// # } 344 | /// 345 | /// # #[cfg(not(feature = "alloc"))] 346 | /// # fn main() {} 347 | /// ``` 348 | #[inline] 349 | pub fn block_size(&self) -> usize { 350 | // Safe cast: block size is provided to constructors as a usize. 351 | self.block_size as usize 352 | } 353 | 354 | /// Returns the minimum alignment of blocks allocated by this `Slab`. 355 | /// 356 | /// # Example 357 | /// 358 | /// ``` 359 | /// # #[cfg(feature = "alloc")] 360 | /// # fn main() { 361 | /// use acid_alloc::Slab; 362 | /// 363 | /// let slab = Slab::try_new(48, 12).unwrap(); 364 | /// assert_eq!(slab.block_align(), 16); 365 | /// # } 366 | /// 367 | /// # #[cfg(not(feature = "alloc"))] 368 | /// # fn main() {} 369 | /// ``` 370 | #[inline] 371 | pub fn block_align(&self) -> usize { 372 | // Safe cast: block align cannot exceed 1 << (usize::BITS - 1). 373 | self.block_align as usize 374 | } 375 | 376 | /// Returns the number of blocks managed by this allocator. 377 | /// 378 | /// # Example 379 | /// 380 | /// ``` 381 | /// # #[cfg(feature = "alloc")] 382 | /// # fn main() { 383 | /// use acid_alloc::Slab; 384 | /// 385 | /// let slab = Slab::try_new(64, 7).unwrap(); 386 | /// assert_eq!(slab.num_blocks(), 7); 387 | /// # } 388 | /// 389 | /// # #[cfg(not(feature = "alloc"))] 390 | /// # fn main() {} 391 | /// ``` 392 | #[inline] 393 | pub fn num_blocks(&self) -> usize { 394 | // Safe cast: num_blocks is provided to constructors as a usize. 395 | self.num_blocks as usize 396 | } 397 | 398 | /// Returns the size in bytes of the managed region. 399 | /// 400 | /// # Example 401 | /// 402 | /// ``` 403 | /// # #[cfg(feature = "alloc")] 404 | /// # fn main() { 405 | /// use acid_alloc::Slab; 406 | /// 407 | /// let slab = Slab::try_new(128, 4).unwrap(); 408 | /// assert_eq!(slab.size(), 512); 409 | /// # } 410 | /// 411 | /// # #[cfg(not(feature = "alloc"))] 412 | /// # fn main() {} 413 | /// ``` 414 | #[inline] 415 | pub fn size(&self) -> usize { 416 | // Safe unchecked mul: checked by constructor. 417 | self.block_size() * self.num_blocks() 418 | } 419 | 420 | /// Returns the first address above the managed region. 421 | /// 422 | /// If the managed region ends at the end of the address space, returns `None`. 423 | #[inline] 424 | pub fn limit(&self) -> Option { 425 | self.base 426 | .addr() 427 | .get() 428 | .checked_add(self.size()) 429 | .and_then(NonZeroUsize::new) 430 | } 431 | 432 | /// Returns `true` _iff_ `ptr` is within this allocator's managed region. 433 | /// 434 | /// Note that a return value of `true` does not indicate whether or not `ptr` points into an 435 | /// outstanding allocation. 436 | #[inline] 437 | pub fn contains_ptr(&self, ptr: NonNull) -> bool { 438 | self.base.addr() <= ptr.addr() && self.limit().map(|lim| ptr.addr() < lim).unwrap_or(true) 439 | } 440 | 441 | /// Returns `true` _iff_ this allocator can make at least one additional allocation. 442 | #[inline] 443 | pub fn can_allocate(&self) -> bool { 444 | self.free_list.is_some() 445 | } 446 | 447 | /// Returns the number of outstanding allocations. 448 | #[inline] 449 | pub fn outstanding(&self) -> usize { 450 | self.outstanding.try_into().unwrap() 451 | } 452 | } 453 | 454 | impl fmt::Debug for Slab 455 | where 456 | A: BackingAllocator, 457 | { 458 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 459 | f.debug_struct("Slab") 460 | .field("base", &self.base.ptr()) 461 | .field("block_size", &self.block_size) 462 | .field("num_blocks", &self.num_blocks) 463 | .finish() 464 | } 465 | } 466 | 467 | impl Drop for Slab 468 | where 469 | A: BackingAllocator, 470 | { 471 | fn drop(&mut self) { 472 | // Safe unwrap: this layout was checked when the allocator was constructed. 473 | let region_layout = Self::region_layout(self.block_size(), self.num_blocks()).unwrap(); 474 | 475 | if region_layout.size() != 0 { 476 | unsafe { 477 | self.backing_allocator 478 | .deallocate(self.base.ptr(), region_layout) 479 | } 480 | } 481 | } 482 | } 483 | 484 | struct RawSlab { 485 | base: BasePtr, 486 | free_list: Option, 487 | block_size: u32, 488 | block_align: u32, 489 | num_blocks: u32, 490 | } 491 | 492 | impl RawSlab { 493 | /// Attempts to construct a new `Slab` from a raw pointer. 494 | /// 495 | /// # Safety 496 | /// 497 | /// `region` must be a pointer to a region that satisfies the [`Layout`] 498 | /// returned by [`Self::region_layout(num_blocks)`], and it must be valid 499 | /// for reads and writes for the entire size indicated by that `Layout`. 500 | unsafe fn try_new( 501 | region: NonNull, 502 | block_size: usize, 503 | num_blocks: usize, 504 | ) -> Result { 505 | assert_eq!(region.addr().get() % mem::align_of::(), 0); 506 | 507 | if block_size < mem::size_of::() { 508 | return Err(AllocInitError::InvalidConfig); 509 | } 510 | 511 | // Round block size up to the alignment of BlockLink. 512 | let block_size = { 513 | let align = mem::align_of::(); 514 | 515 | // Safe unchecked sub: align is always nonzero 516 | let up = block_size.checked_add(align).unwrap() - 1; 517 | up & !(align - 1) 518 | }; 519 | 520 | assert_eq!(block_size % mem::align_of::(), 0); 521 | 522 | // Ensure the region size fits in a usize. 523 | let layout = Slab::::region_layout(block_size, num_blocks) 524 | .map_err(|_| AllocInitError::InvalidConfig)?; 525 | 526 | // Ensure pointer calculations will not overflow. 527 | // TODO: use checked_add directly on region.addr() when nonzero_ops is stable. 528 | let region_end = region 529 | .addr() 530 | .get() 531 | .checked_add(layout.size()) 532 | .and_then(NonZeroUsize::new) 533 | .ok_or(AllocInitError::InvalidLocation)?; 534 | 535 | let base = BasePtr::new(region, layout.size()); 536 | 537 | // Initialize the free list by emplacing links in each block. 538 | for block_addr in (region.addr().get()..region_end.get()).step_by(block_size) { 539 | let block_addr = NonZeroUsize::new(block_addr).unwrap(); 540 | 541 | // Safe unchecked sub: region_end is nonzero. 542 | let is_not_last = block_addr.get() < region_end.get() - block_size; 543 | 544 | let next = is_not_last.then(|| { 545 | // Safe unchecked add: block_addr is at least block_size less than 546 | // region_end, as region_end is the upper bound of the iterator 547 | // and block_size is the step. 548 | let next_addr = block_addr.get() + block_size; 549 | 550 | NonZeroUsize::new(next_addr).unwrap() 551 | }); 552 | 553 | assert_eq!(block_addr.get() % mem::align_of::(), 0); 554 | 555 | unsafe { base.init_link_at(block_addr, BlockLink { next }) }; 556 | } 557 | 558 | let block_align = 1_u32 559 | .checked_shl(block_size.trailing_zeros()) 560 | .ok_or(AllocInitError::InvalidConfig)?; 561 | 562 | Ok(RawSlab { 563 | base, 564 | free_list: (num_blocks > 0).then(|| base.addr()), 565 | block_size: block_size 566 | .try_into() 567 | .map_err(|_| AllocInitError::InvalidConfig)?, 568 | block_align, 569 | num_blocks: num_blocks 570 | .try_into() 571 | .map_err(|_| AllocInitError::InvalidConfig)?, 572 | }) 573 | } 574 | 575 | fn with_backing_allocator(self, backing_allocator: A) -> Slab { 576 | Slab { 577 | base: self.base, 578 | free_list: self.free_list, 579 | block_size: self.block_size, 580 | block_align: self.block_align, 581 | num_blocks: self.num_blocks, 582 | outstanding: 0, 583 | backing_allocator, 584 | } 585 | } 586 | } 587 | 588 | #[cfg(test)] 589 | mod tests { 590 | use super::*; 591 | 592 | #[test] 593 | fn too_small_block_size_errors() { 594 | Slab::::try_new(0, 0).unwrap_err(); 595 | Slab::::try_new(0, 1).unwrap_err(); 596 | Slab::::try_new(1, 0).unwrap_err(); 597 | Slab::::try_new(1, 1).unwrap_err(); 598 | Slab::::try_new(mem::size_of::() - 1, 0).unwrap_err(); 599 | Slab::::try_new(mem::size_of::() - 1, 1).unwrap_err(); 600 | } 601 | 602 | #[test] 603 | fn overflow_address_space_errors() { 604 | Slab::::try_new(usize::MAX, 2).unwrap_err(); 605 | } 606 | 607 | #[test] 608 | fn zero_blocks() { 609 | let mut slab = Slab::::try_new(128, 0).unwrap(); 610 | slab.allocate(Layout::from_size_align(0, 1).unwrap()) 611 | .unwrap_err(); 612 | } 613 | 614 | #[test] 615 | fn one_block() { 616 | const BLK_SIZE: usize = 128; 617 | let mut slab = Slab::::try_new(BLK_SIZE, 1).unwrap(); 618 | 619 | assert!(slab.can_allocate()); 620 | let layout = Layout::from_size_align(0, 1).unwrap(); 621 | let b = slab.allocate(layout).unwrap(); 622 | assert_eq!(slab.outstanding(), 1); 623 | 624 | assert!(!slab.can_allocate()); 625 | slab.allocate(layout).unwrap_err(); 626 | assert_eq!(slab.outstanding(), 1); 627 | unsafe { slab.deallocate(b.cast()) }; 628 | assert_eq!(slab.outstanding(), 0); 629 | 630 | slab.allocate(Layout::from_size_align(BLK_SIZE + 1, 1).unwrap()) 631 | .unwrap_err(); 632 | } 633 | 634 | #[test] 635 | fn two_blocks() { 636 | const BLK_SIZE: usize = 128; 637 | 638 | let mut slab = Slab::::try_new(BLK_SIZE, 2).unwrap(); 639 | let layout = Layout::from_size_align(1, 1).unwrap(); 640 | 641 | // Allocate a, b 642 | // Free a, b 643 | assert!(slab.can_allocate()); 644 | let a = slab.allocate(layout).unwrap(); 645 | assert_eq!(slab.outstanding(), 1); 646 | 647 | assert!(slab.can_allocate()); 648 | let b = slab.allocate(layout).unwrap(); 649 | assert_eq!(slab.outstanding(), 2); 650 | 651 | assert!(!slab.can_allocate()); 652 | slab.allocate(layout).unwrap_err(); 653 | assert_eq!(slab.outstanding(), 2); 654 | 655 | unsafe { 656 | slab.deallocate(a.cast()); 657 | assert_eq!(slab.outstanding(), 1); 658 | assert!(slab.can_allocate()); 659 | 660 | slab.deallocate(b.cast()); 661 | assert_eq!(slab.outstanding(), 0); 662 | assert!(slab.can_allocate()); 663 | } 664 | 665 | // Allocate a, b 666 | // Free b, a 667 | assert!(slab.can_allocate()); 668 | let a = slab.allocate(layout).unwrap(); 669 | assert_eq!(slab.outstanding(), 1); 670 | 671 | assert!(slab.can_allocate()); 672 | let b = slab.allocate(layout).unwrap(); 673 | assert_eq!(slab.outstanding(), 2); 674 | 675 | assert!(!slab.can_allocate()); 676 | slab.allocate(layout).unwrap_err(); 677 | assert_eq!(slab.outstanding(), 2); 678 | 679 | unsafe { 680 | slab.deallocate(b.cast()); 681 | assert_eq!(slab.outstanding(), 1); 682 | assert!(slab.can_allocate()); 683 | 684 | slab.deallocate(a.cast()); 685 | assert_eq!(slab.outstanding(), 0); 686 | assert!(slab.can_allocate()); 687 | } 688 | } 689 | } 690 | -------------------------------------------------------------------------------- /src/tests.rs: -------------------------------------------------------------------------------- 1 | #![cfg(test)] 2 | extern crate std; 3 | 4 | use core::{marker::PhantomData, mem, ops::Range, ptr}; 5 | 6 | use crate::{ 7 | bump::Bump, 8 | core::{alloc::Layout, cmp, fmt::Debug, ptr::NonNull, slice}, 9 | slab::Slab, 10 | AllocError, AllocInitError, Buddy, Global, 11 | }; 12 | 13 | use alloc::{boxed::Box, vec::Vec}; 14 | use quickcheck::{Arbitrary, Gen, QuickCheck}; 15 | 16 | trait QcAllocator: Sized { 17 | type Params: Arbitrary + Debug; 18 | 19 | fn with_params(params: Self::Params) -> Result; 20 | 21 | fn allocate(&mut self, layout: Layout) -> Result, AllocError>; 22 | 23 | unsafe fn deallocate(&mut self, ptr: NonNull, _: Layout); 24 | } 25 | 26 | // Slab ======================================================================= 27 | 28 | #[derive(Clone, Debug)] 29 | struct SlabParams { 30 | block_size: usize, 31 | num_blocks: usize, 32 | } 33 | 34 | impl Arbitrary for SlabParams { 35 | fn arbitrary(g: &mut Gen) -> Self { 36 | SlabParams { 37 | block_size: cmp::max(mem::size_of::(), usize::arbitrary(g) % g.size()), 38 | num_blocks: usize::arbitrary(g) % g.size(), 39 | } 40 | } 41 | } 42 | 43 | impl QcAllocator for Slab { 44 | type Params = SlabParams; 45 | 46 | fn with_params(params: Self::Params) -> Result { 47 | Slab::try_new(params.block_size, params.num_blocks) 48 | } 49 | 50 | fn allocate(&mut self, layout: Layout) -> Result, AllocError> { 51 | self.allocate(layout) 52 | } 53 | 54 | unsafe fn deallocate(&mut self, ptr: NonNull, _: Layout) { 55 | unsafe { self.deallocate(ptr) } 56 | } 57 | } 58 | 59 | // Buddy ====================================================================== 60 | 61 | #[derive(Clone, Debug)] 62 | struct BuddyParams { 63 | num_blocks: usize, 64 | gaps: Vec>, 65 | } 66 | 67 | impl Arbitrary for BuddyParams { 68 | fn arbitrary(g: &mut Gen) -> Self { 69 | //let num_blocks = cmp::max(usize::arbitrary(g) % 8, 1); 70 | let num_blocks = 2; 71 | 72 | let gaps = { 73 | let mut v = Vec::::arbitrary(g); 74 | 75 | v = v 76 | .into_iter() 77 | .map(|ofs| ofs % (BLK_SIZE * num_blocks)) 78 | .take(usize::arbitrary(g) % 2 * num_blocks) 79 | .collect(); 80 | 81 | v.sort(); 82 | 83 | v.chunks_exact(2).map(|pair| pair[0]..pair[1]).collect() 84 | }; 85 | 86 | BuddyParams { num_blocks, gaps } 87 | } 88 | 89 | fn shrink(&self) -> Box> { 90 | let mut items = Vec::with_capacity(self.gaps.capacity() * 2); 91 | for i in 0..self.gaps.len() { 92 | items.push(BuddyParams { 93 | num_blocks: self.num_blocks, 94 | gaps: { 95 | let mut v = self.gaps.clone(); 96 | v.remove(i); 97 | v 98 | }, 99 | }); 100 | items.push(BuddyParams { 101 | num_blocks: self.num_blocks - 1, 102 | gaps: { 103 | let mut v = self.gaps.clone(); 104 | v.remove(i); 105 | v 106 | }, 107 | }); 108 | } 109 | 110 | Box::new(items.into_iter()) 111 | } 112 | } 113 | 114 | impl QcAllocator for Buddy { 115 | type Params = BuddyParams; 116 | 117 | fn with_params(params: Self::Params) -> Result { 118 | Buddy::try_new_with_offset_gaps(params.num_blocks, params.gaps) 119 | } 120 | 121 | fn allocate(&mut self, layout: Layout) -> Result, AllocError> { 122 | self.allocate(layout) 123 | } 124 | 125 | unsafe fn deallocate(&mut self, ptr: NonNull, _: Layout) { 126 | unsafe { self.deallocate(ptr) } 127 | } 128 | } 129 | 130 | // Bump ====================================================================== 131 | 132 | #[derive(Clone, Debug)] 133 | struct BumpParams { 134 | layout: Layout, 135 | } 136 | 137 | impl Arbitrary for BumpParams { 138 | fn arbitrary(g: &mut Gen) -> Self { 139 | BumpParams { 140 | layout: Layout::from_size_align( 141 | cmp::max(usize::arbitrary(g) % 8192, 1), 142 | 1 << (usize::arbitrary(g) % 5), 143 | ) 144 | .unwrap(), 145 | } 146 | } 147 | } 148 | 149 | impl QcAllocator for Bump { 150 | type Params = BumpParams; 151 | 152 | fn with_params(params: Self::Params) -> Result { 153 | Bump::try_new(params.layout) 154 | } 155 | 156 | fn allocate(&mut self, layout: Layout) -> Result, AllocError> { 157 | self.allocate(layout) 158 | } 159 | 160 | unsafe fn deallocate(&mut self, ptr: NonNull, _: Layout) { 161 | unsafe { self.deallocate(ptr) } 162 | } 163 | } 164 | 165 | enum AllocatorOpTag { 166 | Allocate, 167 | Free, 168 | } 169 | 170 | #[derive(Clone, Debug)] 171 | enum AllocatorOp { 172 | /// Allocate a buffer that can hold `len` `u32` values. 173 | Allocate { params: P }, 174 | /// Free an existing allocation. 175 | /// 176 | /// Given `n` outstanding allocations, the allocation to free is at index 177 | /// `index % n`. 178 | Free { index: usize }, 179 | } 180 | 181 | /// Limit on allocation size, expressed in bits. 182 | const ALLOC_LIMIT_BITS: u8 = 16; 183 | 184 | fn limited_size(g: &mut Gen) -> usize { 185 | let exp = u8::arbitrary(g) % (ALLOC_LIMIT_BITS + 1); 186 | usize::arbitrary(g) % 2_usize.pow(exp.into()) 187 | } 188 | 189 | impl Arbitrary for AllocatorOp

{ 190 | fn arbitrary(g: &mut Gen) -> Self { 191 | match g 192 | .choose(&[AllocatorOpTag::Allocate, AllocatorOpTag::Free]) 193 | .unwrap() 194 | { 195 | AllocatorOpTag::Allocate => AllocatorOp::Allocate { 196 | params: P::arbitrary(g), 197 | }, 198 | AllocatorOpTag::Free => AllocatorOp::Free { 199 | index: usize::arbitrary(g), 200 | }, 201 | } 202 | } 203 | } 204 | 205 | type OpId = u32; 206 | 207 | struct RawAllocation { 208 | id: OpId, 209 | ptr: NonNull<[u8]>, 210 | layout: Layout, 211 | } 212 | 213 | type AllocResult = Result, AllocError>; 214 | 215 | trait PropAllocation { 216 | type Params: Arbitrary; 217 | 218 | fn layout(params: &Self::Params) -> Layout; 219 | fn from_raw(params: &Self::Params, raw: RawAllocation) -> Self; 220 | fn into_raw(self) -> RawAllocation; 221 | } 222 | 223 | trait Prop { 224 | /// The allocator to test for this property. 225 | type Allocator: QcAllocator; 226 | 227 | type Allocation: PropAllocation; 228 | 229 | /// Examines the result of an allocation. 230 | fn post_allocate( 231 | op_id: OpId, 232 | params: &::Params, 233 | res: &mut AllocResult, 234 | ) -> bool { 235 | let _ = (op_id, params, res); 236 | true 237 | } 238 | 239 | fn pre_deallocate(allocation: &Self::Allocation) -> bool { 240 | let _ = allocation; 241 | true 242 | } 243 | 244 | fn check( 245 | params: ::Params, 246 | ops: Vec::Params>>, 247 | ) -> bool; 248 | } 249 | 250 | struct AllocatorChecker { 251 | allocator: P::Allocator, 252 | allocations: Vec, 253 | num_ops: u32, 254 | } 255 | 256 | impl AllocatorChecker

{ 257 | fn new( 258 | params: ::Params, 259 | capacity: usize, 260 | ) -> Result { 261 | Ok(AllocatorChecker { 262 | allocator: P::Allocator::with_params(params)?, 263 | allocations: Vec::with_capacity(capacity), 264 | num_ops: 0, 265 | }) 266 | } 267 | 268 | fn do_op(&mut self, op: AllocatorOp<::Params>) -> bool { 269 | let op_id = self.num_ops; 270 | self.num_ops += 1; 271 | 272 | match op { 273 | AllocatorOp::Allocate { params } => { 274 | let layout = P::Allocation::layout(¶ms); 275 | let mut res = self.allocator.allocate(layout); 276 | 277 | if !P::post_allocate(op_id, ¶ms, &mut res) { 278 | return false; 279 | } 280 | 281 | match res { 282 | Ok(ptr) => { 283 | self.allocations.push(P::Allocation::from_raw( 284 | ¶ms, 285 | RawAllocation { 286 | id: op_id, 287 | ptr, 288 | layout, 289 | }, 290 | )); 291 | } 292 | 293 | // If the allocation should have succeeded, this is handled 294 | // by post_allocate 295 | Err(AllocError) => (), 296 | } 297 | } 298 | 299 | AllocatorOp::Free { index } => { 300 | if self.allocations.is_empty() { 301 | return true; 302 | } 303 | 304 | let index = index % self.allocations.len(); 305 | let a = self.allocations.swap_remove(index); 306 | 307 | if !P::pre_deallocate(&a) { 308 | return false; 309 | } 310 | 311 | let a = a.into_raw(); 312 | 313 | unsafe { self.allocator.deallocate(a.ptr.cast::(), a.layout) }; 314 | } 315 | } 316 | 317 | true 318 | } 319 | 320 | fn run(&mut self, ops: Vec::Params>>) -> bool { 321 | if !ops.into_iter().all(|op| self.do_op(op)) { 322 | return false; 323 | } 324 | 325 | // Free any outstanding allocations. 326 | for alloc in self.allocations.drain(..) { 327 | let alloc = alloc.into_raw(); 328 | unsafe { 329 | self.allocator 330 | .deallocate(alloc.ptr.cast::(), alloc.layout) 331 | }; 332 | } 333 | 334 | true 335 | } 336 | } 337 | 338 | // Miri is substantially slower to run property tests, so the number of test 339 | // cases is reduced to keep the runtime in check. 340 | 341 | #[cfg(not(miri))] 342 | const MAX_TESTS: u64 = 100; 343 | 344 | #[cfg(miri)] 345 | const MAX_TESTS: u64 = 20; 346 | 347 | struct MutuallyExclusive { 348 | phantom: PhantomData, 349 | } 350 | 351 | struct MutuallyExclusiveAllocation { 352 | op_id: OpId, 353 | ptr: NonNull<[u32]>, 354 | layout: Layout, 355 | } 356 | 357 | #[derive(Clone, Debug)] 358 | struct MutuallyExclusiveAllocationParams { 359 | len: usize, 360 | } 361 | 362 | impl Arbitrary for MutuallyExclusiveAllocationParams { 363 | fn arbitrary(g: &mut Gen) -> Self { 364 | MutuallyExclusiveAllocationParams { 365 | len: limited_size(g), 366 | } 367 | } 368 | } 369 | 370 | impl PropAllocation for MutuallyExclusiveAllocation { 371 | type Params = MutuallyExclusiveAllocationParams; 372 | 373 | fn layout(params: &Self::Params) -> Layout { 374 | Layout::array::(params.len).unwrap() 375 | } 376 | 377 | fn from_raw(params: &Self::Params, raw: RawAllocation) -> Self { 378 | MutuallyExclusiveAllocation { 379 | op_id: raw.id, 380 | ptr: NonNull::new(ptr::slice_from_raw_parts_mut( 381 | raw.ptr.as_ptr().cast(), 382 | params.len, 383 | )) 384 | .unwrap(), 385 | layout: raw.layout, 386 | } 387 | } 388 | 389 | fn into_raw(self) -> RawAllocation { 390 | // TODO: use size_of_val_raw when stable 391 | let num_bytes = mem::size_of::() * unsafe { self.ptr.as_ref().len() }; 392 | 393 | let bytes = NonNull::new(ptr::slice_from_raw_parts_mut( 394 | self.ptr.cast().as_ptr(), 395 | num_bytes, 396 | )) 397 | .unwrap(); 398 | 399 | RawAllocation { 400 | id: self.op_id, 401 | ptr: bytes, 402 | layout: self.layout, 403 | } 404 | } 405 | } 406 | 407 | impl Prop for MutuallyExclusive { 408 | type Allocator = A; 409 | 410 | type Allocation = MutuallyExclusiveAllocation; 411 | 412 | fn check( 413 | params: A::Params, 414 | ops: Vec::Params>>, 415 | ) -> bool { 416 | let mut checker: AllocatorChecker> = 417 | AllocatorChecker::new(params, ops.capacity()).unwrap(); 418 | checker.run(ops) 419 | } 420 | 421 | fn post_allocate( 422 | op_id: OpId, 423 | params: &MutuallyExclusiveAllocationParams, 424 | res: &mut AllocResult, 425 | ) -> bool { 426 | if let Ok(alloc) = res { 427 | let u32_ptr: NonNull = alloc.cast(); 428 | let slice = unsafe { slice::from_raw_parts_mut(u32_ptr.as_ptr(), params.len) }; 429 | slice.fill(op_id); 430 | } 431 | 432 | true 433 | } 434 | 435 | fn pre_deallocate(allocation: &Self::Allocation) -> bool { 436 | let slice = unsafe { allocation.ptr.as_ref() }; 437 | slice.iter().copied().all(|elem| elem == allocation.op_id) 438 | } 439 | } 440 | 441 | fn check( 442 | params: ::Params, 443 | ops: Vec::Params>>, 444 | ) -> bool { 445 | let mut checker: AllocatorChecker

= AllocatorChecker::new(params, ops.capacity()).unwrap(); 446 | checker.run(ops) 447 | } 448 | 449 | #[test] 450 | fn slab_allocations_are_mutually_exclusive() { 451 | let mut qc = QuickCheck::new().max_tests(MAX_TESTS); 452 | qc.quickcheck(check::>> as fn(_, _) -> bool); 453 | } 454 | 455 | #[test] 456 | fn buddy_allocations_are_mutually_exclusive() { 457 | let mut qc = QuickCheck::new().max_tests(MAX_TESTS); 458 | qc.quickcheck(check::>> as fn(_, _) -> bool); 459 | qc.quickcheck(check::>> as fn(_, _) -> bool); 460 | qc.quickcheck(check::>> as fn(_, _) -> bool); 461 | qc.quickcheck(check::>> as fn(_, _) -> bool); 462 | } 463 | 464 | #[test] 465 | fn bump_allocations_are_mutually_exclusive() { 466 | let mut qc = QuickCheck::new().max_tests(MAX_TESTS); 467 | qc.quickcheck(check::>> as fn(_, _) -> bool); 468 | } 469 | 470 | // Version sync ================================================================ 471 | #[test] 472 | fn html_root_url() { 473 | version_sync::assert_html_root_url_updated!("src/lib.rs"); 474 | } 475 | -------------------------------------------------------------------------------- /test-all-configs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -x 4 | 5 | cargo t -q && 6 | cargo t -q --features alloc && 7 | cargo t -q --no-default-features --features alloc,unstable && 8 | RUSTFLAGS="--cfg no_global_oom_handling" cargo t -q && 9 | RUSTFLAGS="--cfg no_global_oom_handling" cargo t -q --features alloc && 10 | RUSTFLAGS="--cfg no_global_oom_handling" cargo t -q --no-default-features --features alloc,unstable 11 | --------------------------------------------------------------------------------