├── .gitignore ├── bumpalo.png ├── rust-toolchain.toml ├── tests ├── all │ ├── capacity.rs │ ├── boxed.rs │ ├── main.rs │ ├── collect_in.rs │ ├── string.rs │ ├── alloc_fill.rs │ ├── quickcheck.rs │ ├── alloc_with.rs │ ├── try_alloc_with.rs │ ├── allocation_limit.rs │ ├── alloc_try_with.rs │ ├── try_alloc_try_with.rs │ ├── serde.rs │ ├── vec.rs │ ├── tests.rs │ ├── allocator_api.rs │ └── quickchecks.rs └── try_alloc.rs ├── justfile ├── valgrind.supp ├── LICENSE-MIT ├── src ├── collections │ ├── str │ │ ├── mod.rs │ │ └── lossy.rs │ ├── mod.rs │ ├── collect_in.rs │ └── raw_vec.rs └── boxed.rs ├── Cargo.toml ├── .github └── workflows │ └── rust.yaml ├── README.md ├── LICENSE-APACHE ├── benches └── benches.rs └── CHANGELOG.md /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /bumpalo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxc-project/oxc-bumpalo/main/bumpalo.png -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.81.0" 3 | profile = "default" 4 | -------------------------------------------------------------------------------- /tests/all/capacity.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::Bump; 2 | 3 | #[test] 4 | fn try_with_capacity_too_large() { 5 | // Shouldn't panic even though the capacity is too large for a `Layout`. 6 | let _ = Bump::try_with_capacity(isize::MAX as usize + 1); 7 | } 8 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env -S just --justfile 2 | 3 | set windows-shell := ["powershell"] 4 | set shell := ["bash", "-cu"] 5 | 6 | _default: 7 | @just --list -u 8 | 9 | alias r := ready 10 | 11 | ready: 12 | just check 13 | 14 | check: 15 | cargo check --all-features 16 | -------------------------------------------------------------------------------- /tests/all/boxed.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "boxed")] 2 | 3 | use bumpalo::boxed::Box; 4 | use bumpalo::Bump; 5 | 6 | #[test] 7 | fn into_raw_aliasing() { 8 | let bump = Bump::new(); 9 | let boxed = Box::new_in(1, &bump); 10 | let raw = Box::into_raw(boxed); 11 | 12 | let mut_ref = unsafe { &mut *raw }; 13 | dbg!(mut_ref); 14 | } 15 | -------------------------------------------------------------------------------- /tests/all/main.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(feature = "allocator_api", feature(allocator_api))] 2 | 3 | mod alloc_fill; 4 | mod alloc_try_with; 5 | mod alloc_with; 6 | mod allocation_limit; 7 | mod allocator_api; 8 | mod boxed; 9 | mod capacity; 10 | mod collect_in; 11 | mod quickcheck; 12 | mod quickchecks; 13 | mod string; 14 | mod tests; 15 | mod try_alloc_try_with; 16 | mod try_alloc_with; 17 | mod vec; 18 | 19 | #[cfg(feature = "serde")] 20 | mod serde; 21 | 22 | fn main() {} 23 | -------------------------------------------------------------------------------- /tests/all/collect_in.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "collections")] 2 | 3 | use crate::quickcheck; 4 | use bumpalo::collections::{CollectIn, String, Vec}; 5 | use bumpalo::Bump; 6 | use std::string::String as StdString; 7 | use std::vec::Vec as StdVec; 8 | 9 | quickcheck! { 10 | fn test_string_collect(input: StdString) -> bool { 11 | let bump = Bump::new(); 12 | let bump_str = input.chars().collect_in::(&bump); 13 | 14 | bump_str == input 15 | } 16 | 17 | fn test_vec_collect(input: StdVec) -> bool { 18 | let bump = Bump::new(); 19 | let bump_vec = input.clone().into_iter().collect_in::>(&bump); 20 | 21 | bump_vec.as_slice() == input.as_slice() 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /valgrind.supp: -------------------------------------------------------------------------------- 1 | { 2 | 3 | Memcheck:FishyValue 4 | malloc(size) 5 | fun:malloc 6 | obj:/**/target/*/deps/tests-* 7 | } 8 | { 9 | 10 | Memcheck:Param 11 | statx(buf) 12 | fun:syscall 13 | fun:statx 14 | } 15 | { 16 | 17 | Memcheck:Param 18 | statx(file_name) 19 | fun:syscall 20 | fun:statx 21 | } 22 | { 23 | 24 | Memcheck:Param 25 | statx(buf) 26 | fun:statx 27 | fun:statx 28 | } 29 | { 30 | 31 | Memcheck:Param 32 | statx(file_name) 33 | fun:statx 34 | fun:statx 35 | } 36 | -------------------------------------------------------------------------------- /tests/all/string.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "collections")] 2 | use bumpalo::{collections::String, format, Bump}; 3 | use std::fmt::Write; 4 | 5 | #[test] 6 | fn format_a_bunch_of_strings() { 7 | let b = Bump::new(); 8 | let mut s = String::from_str_in("hello", &b); 9 | for i in 0..1000 { 10 | write!(&mut s, " {}", i).unwrap(); 11 | } 12 | } 13 | 14 | #[test] 15 | fn trailing_comma_in_format_macro() { 16 | let b = Bump::new(); 17 | let v = format![in &b, "{}{}", 1, 2, ]; 18 | assert_eq!(v, "12"); 19 | } 20 | 21 | #[test] 22 | fn push_str() { 23 | let b = Bump::new(); 24 | let mut s = String::new_in(&b); 25 | s.push_str("abc"); 26 | assert_eq!(s, "abc"); 27 | s.push_str("def"); 28 | assert_eq!(s, "abcdef"); 29 | s.push_str(""); 30 | assert_eq!(s, "abcdef"); 31 | s.push_str(&"x".repeat(4000)); 32 | assert_eq!(s.len(), 4006); 33 | s.push_str("ghi"); 34 | assert_eq!(s.len(), 4009); 35 | assert_eq!(&s[s.len() - 5..], "xxghi"); 36 | } 37 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Nick Fitzgerald 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /tests/all/alloc_fill.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::Bump; 2 | use std::alloc::Layout; 3 | use std::cmp; 4 | use std::mem; 5 | 6 | #[test] 7 | fn alloc_slice_fill_zero() { 8 | let b = Bump::new(); 9 | let layout = Layout::new::(); 10 | 11 | let ptr1 = b.alloc_layout(layout); 12 | 13 | struct MyZeroSizedType; 14 | 15 | b.alloc_slice_copy::(&[]); 16 | b.alloc_slice_clone::(&[]); 17 | b.alloc_slice_fill_with::(0, |_| panic!("should not happen")); 18 | b.alloc_slice_fill_copy(0, 42u64); 19 | b.alloc_slice_fill_clone(0, &"hello".to_string()); 20 | b.alloc_slice_fill_default::(0); 21 | let ptr2 = b.alloc(MyZeroSizedType); 22 | let alignment = cmp::max(mem::align_of::(), mem::align_of::()); 23 | assert_eq!( 24 | ptr1.as_ptr() as usize & !(alignment - 1), 25 | ptr2 as *mut _ as usize 26 | ); 27 | 28 | let ptr3 = b.alloc_layout(layout); 29 | assert_eq!(ptr2 as *mut _ as usize, ptr3.as_ptr() as usize + 1); 30 | } 31 | 32 | #[test] 33 | #[should_panic(expected = "out of memory")] 34 | fn alloc_slice_overflow() { 35 | let b = Bump::new(); 36 | 37 | b.alloc_slice_fill_default::(usize::max_value()); 38 | } 39 | -------------------------------------------------------------------------------- /tests/all/quickcheck.rs: -------------------------------------------------------------------------------- 1 | /// A redefinition/wrapper macro of `quickcheck::quickcheck!` that supports 2 | /// limiting the number of test iterations to one when we are running under 3 | /// MIRI. 4 | #[macro_export] 5 | macro_rules! quickcheck { 6 | ( 7 | $( 8 | $(#[$m:meta])* 9 | fn $fn_name:ident($($arg_name:ident : $arg_ty:ty),*) -> $ret:ty { 10 | $($code:tt)* 11 | } 12 | )* 13 | ) => { 14 | $( 15 | #[test] 16 | $(#[$m])* 17 | fn $fn_name() { 18 | fn prop($($arg_name: $arg_ty),*) -> $ret { 19 | $($code)* 20 | } 21 | 22 | let mut qc = ::quickcheck::QuickCheck::new(); 23 | 24 | // Use the `QUICKCHECK_TESTS` environment variable from 25 | // compiletime to avoid violating MIRI's isolation by looking at 26 | // the runtime environment variable. 27 | let tests = option_env!("QUICKCHECK_TESTS").and_then(|s| s.parse().ok()); 28 | 29 | // Limit quickcheck tests to a single iteration under MIRI, 30 | // since they are otherwise super slow. 31 | #[cfg(miri)] 32 | let tests = tests.or(Some(1)); 33 | 34 | if let Some(tests) = tests { 35 | eprintln!("Executing at most {} quickchecks", tests); 36 | qc = qc.tests(tests); 37 | } 38 | 39 | qc.quickcheck(prop as fn($($arg_ty),*) -> $ret); 40 | } 41 | )* 42 | }; 43 | } 44 | -------------------------------------------------------------------------------- /src/collections/str/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT 2 | // file at the top-level directory of this distribution and at 3 | // http://rust-lang.org/COPYRIGHT. 4 | // 5 | // Licensed under the Apache License, Version 2.0 or the MIT license 7 | // , at your 8 | // option. This file may not be copied, modified, or distributed 9 | // except according to those terms. 10 | 11 | //! String manipulation 12 | //! 13 | //! For more details, see std::str 14 | 15 | #[allow(missing_docs)] 16 | pub mod lossy; 17 | 18 | // https://tools.ietf.org/html/rfc3629 19 | #[rustfmt::skip] 20 | static UTF8_CHAR_WIDTH: [u8; 256] = [ 21 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 22 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F 23 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 24 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F 25 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 26 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F 27 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 28 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F 29 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 30 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F 31 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 32 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF 33 | 0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 34 | 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF 35 | 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF 36 | 4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF 37 | ]; 38 | 39 | /// Given a first byte, determines how many bytes are in this UTF-8 character. 40 | #[inline] 41 | pub fn utf8_char_width(b: u8) -> usize { 42 | UTF8_CHAR_WIDTH[b as usize] as usize 43 | } 44 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Nick Fitzgerald "] 3 | categories = ["memory-management", "rust-patterns", "no-std"] 4 | description = "A fast bump allocation arena for Rust." 5 | documentation = "https://docs.rs/bumpalo" 6 | edition = "2021" 7 | license = "MIT OR Apache-2.0" 8 | name = "bumpalo" 9 | readme = "README.md" 10 | repository = "https://github.com/fitzgen/bumpalo" 11 | version = "3.16.0" 12 | exclude = ["/.github/*", "/benches", "/tests", "valgrind.supp", "bumpalo.png"] 13 | rust-version = "1.71.1" 14 | 15 | [package.metadata.docs.rs] 16 | all-features = true 17 | 18 | [lib] 19 | path = "src/lib.rs" 20 | bench = false 21 | 22 | [[bench]] 23 | name = "benches" 24 | path = "benches/benches.rs" 25 | harness = false 26 | required-features = ["collections"] 27 | 28 | [[test]] 29 | name = "try_alloc" 30 | path = "tests/try_alloc.rs" 31 | harness = false 32 | 33 | [dependencies] 34 | # This dependency provides a version of the unstable nightly Rust `Allocator` 35 | # trait on stable Rust. Enabling this feature means that `bumpalo` will 36 | # implement its `Allocator` trait. 37 | allocator-api2 = { version = "0.2.8", default-features = false, optional = true } 38 | 39 | # This dependency is here to allow integration with Serde, if the `serde` feature is enabled 40 | serde = { version = "1.0.171", optional = true } 41 | 42 | [dev-dependencies] 43 | quickcheck = "1.0.3" 44 | criterion = "0.3.6" 45 | rand = "0.8.5" 46 | serde = { version = "1.0.197", features = ["derive"] } 47 | serde_json = "1.0.115" 48 | 49 | [features] 50 | default = [] 51 | collections = [] 52 | boxed = [] 53 | allocator_api = [] 54 | std = [] 55 | serde = ["dep:serde"] 56 | 57 | # [profile.bench] 58 | # debug = true 59 | -------------------------------------------------------------------------------- /tests/all/alloc_with.rs: -------------------------------------------------------------------------------- 1 | // All of these alloc_with tests will fail with "fatal runtime error: stack overflow" unless LLVM 2 | // manages to optimize the stack writes away. 3 | // 4 | // We only run them when debug_assertions are not set, as we expect them to fail outside release 5 | // mode. 6 | 7 | use bumpalo::Bump; 8 | 9 | #[test] 10 | #[cfg_attr(debug_assertions, ignore)] 11 | fn alloc_with_large_array() { 12 | let b = Bump::new(); 13 | 14 | b.alloc_with(|| [4u8; 10_000_000]); 15 | } 16 | 17 | #[allow(dead_code)] 18 | struct LargeStruct { 19 | small: usize, 20 | big1: [u8; 20_000_000], 21 | big2: [u8; 20_000_000], 22 | big3: [u8; 20_000_000], 23 | } 24 | 25 | #[test] 26 | #[cfg_attr(debug_assertions, ignore)] 27 | fn alloc_with_large_struct() { 28 | let b = Bump::new(); 29 | 30 | b.alloc_with(|| LargeStruct { 31 | small: 1, 32 | big1: [2; 20_000_000], 33 | big2: [3; 20_000_000], 34 | big3: [4; 20_000_000], 35 | }); 36 | } 37 | 38 | #[test] 39 | #[cfg_attr(debug_assertions, ignore)] 40 | fn alloc_with_large_tuple() { 41 | let b = Bump::new(); 42 | 43 | b.alloc_with(|| { 44 | ( 45 | 1u32, 46 | LargeStruct { 47 | small: 2, 48 | big1: [3; 20_000_000], 49 | big2: [4; 20_000_000], 50 | big3: [5; 20_000_000], 51 | }, 52 | ) 53 | }); 54 | } 55 | 56 | enum LargeEnum { 57 | Small, 58 | #[allow(dead_code)] 59 | Large([u8; 10_000_000]), 60 | } 61 | 62 | #[test] 63 | #[cfg_attr(debug_assertions, ignore)] 64 | fn alloc_with_large_enum() { 65 | let b = Bump::new(); 66 | 67 | b.alloc_with(|| LargeEnum::Small); 68 | } 69 | -------------------------------------------------------------------------------- /tests/all/try_alloc_with.rs: -------------------------------------------------------------------------------- 1 | // All of these try_alloc_with tests will fail with "fatal runtime error: stack overflow" unless LLVM 2 | // manages to optimize the stack writes away. 3 | // 4 | // We only run them when debug_assertions are not set, as we expect them to fail outside release 5 | // mode. 6 | 7 | use bumpalo::Bump; 8 | 9 | #[test] 10 | #[cfg_attr(debug_assertions, ignore)] 11 | fn try_alloc_with_large_array() { 12 | let b = Bump::new(); 13 | 14 | b.try_alloc_with(|| [4u8; 10_000_000]).unwrap(); 15 | } 16 | 17 | #[allow(dead_code)] 18 | struct LargeStruct { 19 | small: usize, 20 | big1: [u8; 20_000_000], 21 | big2: [u8; 20_000_000], 22 | big3: [u8; 20_000_000], 23 | } 24 | 25 | #[test] 26 | #[cfg_attr(debug_assertions, ignore)] 27 | fn try_alloc_with_large_struct() { 28 | let b = Bump::new(); 29 | 30 | b.try_alloc_with(|| LargeStruct { 31 | small: 1, 32 | big1: [2; 20_000_000], 33 | big2: [3; 20_000_000], 34 | big3: [4; 20_000_000], 35 | }) 36 | .unwrap(); 37 | } 38 | 39 | #[test] 40 | #[cfg_attr(debug_assertions, ignore)] 41 | fn try_alloc_with_large_tuple() { 42 | let b = Bump::new(); 43 | 44 | b.try_alloc_with(|| { 45 | ( 46 | 1u32, 47 | LargeStruct { 48 | small: 2, 49 | big1: [3; 20_000_000], 50 | big2: [4; 20_000_000], 51 | big3: [5; 20_000_000], 52 | }, 53 | ) 54 | }) 55 | .unwrap(); 56 | } 57 | 58 | enum LargeEnum { 59 | Small, 60 | #[allow(dead_code)] 61 | Large([u8; 10_000_000]), 62 | } 63 | 64 | #[test] 65 | #[cfg_attr(debug_assertions, ignore)] 66 | fn try_alloc_with_large_enum() { 67 | let b = Bump::new(); 68 | 69 | b.try_alloc_with(|| LargeEnum::Small).unwrap(); 70 | } 71 | -------------------------------------------------------------------------------- /tests/all/allocation_limit.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::Bump; 2 | 3 | #[test] 4 | fn allocation_limit_trivial() { 5 | let bump = Bump::with_capacity(0); 6 | bump.set_allocation_limit(Some(0)); 7 | 8 | assert!(bump.try_alloc(5).is_err()); 9 | assert!(bump.allocation_limit().unwrap() >= bump.allocated_bytes()); 10 | 11 | bump.set_allocation_limit(None); 12 | 13 | assert!(bump.try_alloc(5).is_ok()); 14 | } 15 | 16 | #[test] 17 | fn change_allocation_limit_with_live_allocations() { 18 | let bump = Bump::new(); 19 | 20 | bump.set_allocation_limit(Some(512)); 21 | 22 | bump.alloc(10); 23 | 24 | assert!(bump.try_alloc([0; 2048]).is_err()); 25 | 26 | bump.set_allocation_limit(Some(16384)); 27 | 28 | assert!(bump.try_alloc([0; 2048]).is_ok()); 29 | assert!(bump.allocation_limit().unwrap() >= bump.allocated_bytes()); 30 | } 31 | 32 | #[test] 33 | fn remove_allocation_limit_with_live_allocations() { 34 | let bump = Bump::new(); 35 | 36 | bump.set_allocation_limit(Some(512)); 37 | 38 | bump.alloc(10); 39 | 40 | assert!(bump.try_alloc([0; 2048]).is_err()); 41 | assert!(bump.allocation_limit().unwrap() >= bump.allocated_bytes()); 42 | 43 | bump.set_allocation_limit(None); 44 | 45 | assert!(bump.try_alloc([0; 2048]).is_ok()); 46 | } 47 | 48 | #[test] 49 | fn reset_preserves_allocation_limits() { 50 | let mut bump = Bump::new(); 51 | 52 | bump.set_allocation_limit(Some(512)); 53 | bump.reset(); 54 | 55 | assert!(bump.try_alloc([0; 2048]).is_err()); 56 | assert!(bump.allocation_limit().unwrap() >= bump.allocated_bytes()); 57 | } 58 | 59 | #[test] 60 | fn reset_updates_allocated_bytes() { 61 | let mut bump = Bump::new(); 62 | 63 | bump.alloc([0; 1 << 9]); 64 | 65 | // This second allocation should be a big enough one 66 | // after the first to force a new chunk allocation 67 | bump.alloc([0; 1 << 9]); 68 | 69 | let allocated_bytes_before_reset = bump.allocated_bytes(); 70 | 71 | bump.reset(); 72 | 73 | let allocated_bytes_after_reset = bump.allocated_bytes(); 74 | 75 | assert!(allocated_bytes_after_reset < allocated_bytes_before_reset); 76 | } 77 | 78 | #[test] 79 | fn new_bump_allocated_bytes_is_zero() { 80 | let bump = Bump::new(); 81 | 82 | assert_eq!(bump.allocated_bytes(), 0); 83 | } 84 | 85 | #[test] 86 | fn small_allocation_limit() { 87 | let bump = Bump::new(); 88 | 89 | bump.set_allocation_limit(Some(64)); 90 | assert!(bump.try_alloc([0; 1]).is_ok()); 91 | } 92 | -------------------------------------------------------------------------------- /src/collections/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT 2 | // file at the top-level directory of this distribution and at 3 | // http://rust-lang.org/COPYRIGHT. 4 | // 5 | // Licensed under the Apache License, Version 2.0 or the MIT license 7 | // , at your 8 | // option. This file may not be copied, modified, or distributed 9 | // except according to those terms. 10 | 11 | //! Collection types that allocate inside a [`Bump`] arena. 12 | //! 13 | //! [`Bump`]: ../struct.Bump.html 14 | 15 | #![allow(deprecated)] 16 | 17 | mod raw_vec; 18 | 19 | pub mod vec; 20 | pub use self::vec::Vec; 21 | 22 | mod str; 23 | pub mod string; 24 | pub use self::string::String; 25 | 26 | mod collect_in; 27 | pub use collect_in::{CollectIn, FromIteratorIn}; 28 | 29 | // pub mod binary_heap; 30 | // mod btree; 31 | // pub mod linked_list; 32 | // pub mod vec_deque; 33 | 34 | // pub mod btree_map { 35 | // //! A map based on a B-Tree. 36 | // pub use super::btree::map::*; 37 | // } 38 | 39 | // pub mod btree_set { 40 | // //! A set based on a B-Tree. 41 | // pub use super::btree::set::*; 42 | // } 43 | 44 | // #[doc(no_inline)] 45 | // pub use self::binary_heap::BinaryHeap; 46 | 47 | // #[doc(no_inline)] 48 | // pub use self::btree_map::BTreeMap; 49 | 50 | // #[doc(no_inline)] 51 | // pub use self::btree_set::BTreeSet; 52 | 53 | // #[doc(no_inline)] 54 | // pub use self::linked_list::LinkedList; 55 | 56 | // #[doc(no_inline)] 57 | // pub use self::vec_deque::VecDeque; 58 | 59 | use crate::alloc::{AllocErr, LayoutErr}; 60 | 61 | /// Augments `AllocErr` with a `CapacityOverflow` variant. 62 | #[derive(Clone, PartialEq, Eq, Debug)] 63 | // #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] 64 | pub enum CollectionAllocErr { 65 | /// Error due to the computed capacity exceeding the collection's maximum 66 | /// (usually `isize::MAX` bytes). 67 | CapacityOverflow, 68 | /// Error due to the allocator (see the documentation for the [`AllocErr`] type). 69 | AllocErr, 70 | } 71 | 72 | // #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] 73 | impl From for CollectionAllocErr { 74 | #[inline] 75 | fn from(AllocErr: AllocErr) -> Self { 76 | CollectionAllocErr::AllocErr 77 | } 78 | } 79 | 80 | // #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] 81 | impl From for CollectionAllocErr { 82 | #[inline] 83 | fn from(_: LayoutErr) -> Self { 84 | CollectionAllocErr::CapacityOverflow 85 | } 86 | } 87 | 88 | // /// An intermediate trait for specialization of `Extend`. 89 | // #[doc(hidden)] 90 | // trait SpecExtend { 91 | // /// Extends `self` with the contents of the given iterator. 92 | // fn spec_extend(&mut self, iter: I); 93 | // } 94 | -------------------------------------------------------------------------------- /tests/all/alloc_try_with.rs: -------------------------------------------------------------------------------- 1 | // All of these alloc_try_with tests will fail with "fatal runtime error: stack overflow" unless 2 | // LLVM manages to optimize the stack writes away. 3 | // 4 | // We only run them when debug_assertions are not set, as we expect them to fail outside release 5 | // mode. 6 | 7 | use bumpalo::Bump; 8 | 9 | #[test] 10 | #[cfg_attr(debug_assertions, ignore)] 11 | fn alloc_try_with_large_array() -> Result<(), ()> { 12 | let b = Bump::new(); 13 | 14 | b.alloc_try_with(|| Ok([4u8; 10_000_000]))?; 15 | 16 | Ok(()) 17 | } 18 | 19 | #[test] 20 | #[cfg_attr(debug_assertions, ignore)] 21 | fn alloc_try_with_large_array_err() { 22 | let b = Bump::new(); 23 | 24 | assert!(b 25 | .alloc_try_with(|| Result::<[u8; 10_000_000], _>::Err(())) 26 | .is_err()); 27 | } 28 | 29 | #[allow(dead_code)] 30 | struct LargeStruct { 31 | small: usize, 32 | big1: [u8; 20_000_000], 33 | big2: [u8; 20_000_000], 34 | big3: [u8; 20_000_000], 35 | } 36 | 37 | #[test] 38 | #[cfg_attr(debug_assertions, ignore)] 39 | fn alloc_try_with_large_struct() -> Result<(), ()> { 40 | let b = Bump::new(); 41 | 42 | b.alloc_try_with(|| { 43 | Ok(LargeStruct { 44 | small: 1, 45 | big1: [2; 20_000_000], 46 | big2: [3; 20_000_000], 47 | big3: [4; 20_000_000], 48 | }) 49 | })?; 50 | 51 | Ok(()) 52 | } 53 | 54 | #[test] 55 | #[cfg_attr(debug_assertions, ignore)] 56 | fn alloc_try_with_large_struct_err() { 57 | let b = Bump::new(); 58 | 59 | assert!(b 60 | .alloc_try_with(|| Result::::Err(())) 61 | .is_err()); 62 | } 63 | 64 | #[test] 65 | #[cfg_attr(debug_assertions, ignore)] 66 | fn alloc_try_with_large_tuple() -> Result<(), ()> { 67 | let b = Bump::new(); 68 | 69 | b.alloc_try_with(|| { 70 | Ok(( 71 | 1u32, 72 | LargeStruct { 73 | small: 2, 74 | big1: [3; 20_000_000], 75 | big2: [4; 20_000_000], 76 | big3: [5; 20_000_000], 77 | }, 78 | )) 79 | })?; 80 | 81 | Ok(()) 82 | } 83 | 84 | #[test] 85 | #[cfg_attr(debug_assertions, ignore)] 86 | fn alloc_try_with_large_tuple_err() { 87 | let b = Bump::new(); 88 | 89 | assert!(b 90 | .alloc_try_with(|| { Result::<(u32, LargeStruct), _>::Err(()) }) 91 | .is_err()); 92 | } 93 | 94 | enum LargeEnum { 95 | Small, 96 | #[allow(dead_code)] 97 | Large([u8; 10_000_000]), 98 | } 99 | 100 | #[test] 101 | #[cfg_attr(debug_assertions, ignore)] 102 | fn alloc_try_with_large_enum() -> Result<(), ()> { 103 | let b = Bump::new(); 104 | 105 | b.alloc_try_with(|| Ok(LargeEnum::Small))?; 106 | 107 | Ok(()) 108 | } 109 | 110 | #[test] 111 | #[cfg_attr(debug_assertions, ignore)] 112 | fn alloc_try_with_large_enum_err() { 113 | let b = Bump::new(); 114 | 115 | assert!(b 116 | .alloc_try_with(|| Result::::Err(())) 117 | .is_err()); 118 | } 119 | -------------------------------------------------------------------------------- /.github/workflows/rust.yaml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | RUST_BACKTRACE: 1 12 | 13 | jobs: 14 | build: 15 | strategy: 16 | matrix: 17 | rust_channel: ["stable", "beta", "nightly", "1.73.0"] 18 | feature_set: ["--features collections,boxed"] 19 | include: 20 | - rust_channel: "nightly" 21 | feature_set: "--all-features" 22 | - rust_channel: "stable" 23 | feature_set: "--no-default-features" 24 | exclude: 25 | - rust_channel: "nightly" 26 | feature_set: "--features collections,boxed" 27 | 28 | runs-on: ubuntu-latest 29 | steps: 30 | - name: Install rustup 31 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal 32 | - name: Install rust channel 33 | run: rustup install ${{matrix.rust_channel}} && rustup default ${{matrix.rust_channel}} 34 | 35 | - uses: actions/checkout@v4 36 | 37 | - name: Run tests (no features) 38 | run: cargo test --verbose 39 | - name: Run tests (features) 40 | run: cargo test --verbose ${{matrix.feature_set}} 41 | 42 | miri: 43 | runs-on: ubuntu-latest 44 | 45 | env: 46 | MIRIFLAGS: "-Zmiri-strict-provenance -Zmiri-ignore-leaks" 47 | 48 | steps: 49 | - name: Install rustup 50 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain nightly -y 51 | - name: Install miri 52 | run: rustup toolchain install nightly --allow-downgrade --profile minimal --component miri 53 | 54 | - uses: actions/checkout@v4 55 | 56 | - name: Run miri 57 | run: cargo miri test --all-features 58 | 59 | valgrind: 60 | runs-on: ubuntu-latest 61 | 62 | env: 63 | # Don't leak-check, as Rust globals tend to cause false positives. 64 | CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER: "valgrind --suppressions=valgrind.supp --leak-check=no --error-exitcode=1 --gen-suppressions=all" 65 | 66 | steps: 67 | - name: Install rustup 68 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal 69 | - name: Install rust stable 70 | run: rustup install stable && rustup default stable 71 | 72 | - name: Install valgrind 73 | run: sudo apt update && sudo apt install valgrind 74 | 75 | - uses: actions/checkout@v4 76 | 77 | - name: Test under valgrind (no features) 78 | run: cargo test --verbose 79 | - name: Test under valgrind (features) 80 | run: cargo test --verbose --features collections,boxed 81 | 82 | benches: 83 | runs-on: ubuntu-latest 84 | 85 | steps: 86 | - name: Install rustup 87 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal 88 | - name: Install rust nightly 89 | run: rustup install nightly && rustup default nightly 90 | 91 | - uses: actions/checkout@v4 92 | 93 | - name: Check that benches build 94 | run: cargo check --benches --all-features 95 | -------------------------------------------------------------------------------- /tests/all/try_alloc_try_with.rs: -------------------------------------------------------------------------------- 1 | // All of these try_alloc_try_with tests will fail with "fatal runtime error: stack overflow" unless 2 | // LLVM manages to optimize the stack writes away. 3 | // 4 | // We only run them when debug_assertions are not set, as we expect them to fail outside release 5 | // mode. 6 | 7 | use bumpalo::{AllocOrInitError, Bump}; 8 | 9 | #[test] 10 | #[cfg_attr(debug_assertions, ignore)] 11 | fn try_alloc_try_with_large_array() -> Result<(), AllocOrInitError<()>> { 12 | let b = Bump::new(); 13 | 14 | b.try_alloc_try_with(|| Ok([4u8; 10_000_000]))?; 15 | 16 | Ok(()) 17 | } 18 | 19 | #[test] 20 | #[cfg_attr(debug_assertions, ignore)] 21 | fn try_alloc_try_with_large_array_err() { 22 | let b = Bump::new(); 23 | 24 | assert!(b 25 | .try_alloc_try_with(|| Result::<[u8; 10_000_000], _>::Err(())) 26 | .is_err()); 27 | } 28 | 29 | #[allow(dead_code)] 30 | struct LargeStruct { 31 | small: usize, 32 | big1: [u8; 20_000_000], 33 | big2: [u8; 20_000_000], 34 | big3: [u8; 20_000_000], 35 | } 36 | 37 | #[test] 38 | #[cfg_attr(debug_assertions, ignore)] 39 | fn try_alloc_try_with_large_struct() -> Result<(), AllocOrInitError<()>> { 40 | let b = Bump::new(); 41 | 42 | b.try_alloc_try_with(|| { 43 | Ok(LargeStruct { 44 | small: 1, 45 | big1: [2; 20_000_000], 46 | big2: [3; 20_000_000], 47 | big3: [4; 20_000_000], 48 | }) 49 | })?; 50 | 51 | Ok(()) 52 | } 53 | 54 | #[test] 55 | #[cfg_attr(debug_assertions, ignore)] 56 | fn try_alloc_try_with_large_struct_err() { 57 | let b = Bump::new(); 58 | 59 | assert!(b 60 | .try_alloc_try_with(|| Result::::Err(())) 61 | .is_err()); 62 | } 63 | 64 | #[test] 65 | #[cfg_attr(debug_assertions, ignore)] 66 | fn try_alloc_try_with_large_tuple() -> Result<(), AllocOrInitError<()>> { 67 | let b = Bump::new(); 68 | 69 | b.try_alloc_try_with(|| { 70 | Ok(( 71 | 1u32, 72 | LargeStruct { 73 | small: 2, 74 | big1: [3; 20_000_000], 75 | big2: [4; 20_000_000], 76 | big3: [5; 20_000_000], 77 | }, 78 | )) 79 | })?; 80 | 81 | Ok(()) 82 | } 83 | 84 | #[test] 85 | #[cfg_attr(debug_assertions, ignore)] 86 | fn try_alloc_try_with_large_tuple_err() { 87 | let b = Bump::new(); 88 | 89 | assert!(b 90 | .try_alloc_try_with(|| { Result::<(u32, LargeStruct), _>::Err(()) }) 91 | .is_err()); 92 | } 93 | 94 | enum LargeEnum { 95 | Small, 96 | #[allow(dead_code)] 97 | Large([u8; 10_000_000]), 98 | } 99 | 100 | #[test] 101 | #[cfg_attr(debug_assertions, ignore)] 102 | fn try_alloc_try_with_large_enum() -> Result<(), AllocOrInitError<()>> { 103 | let b = Bump::new(); 104 | 105 | b.try_alloc_try_with(|| Ok(LargeEnum::Small))?; 106 | 107 | Ok(()) 108 | } 109 | 110 | #[test] 111 | #[cfg_attr(debug_assertions, ignore)] 112 | fn try_alloc_try_with_large_enum_err() { 113 | let b = Bump::new(); 114 | 115 | assert!(b 116 | .try_alloc_try_with(|| Result::::Err(())) 117 | .is_err()); 118 | } 119 | -------------------------------------------------------------------------------- /tests/all/serde.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "collections")] 2 | #![cfg(feature = "serde")] 3 | 4 | use bumpalo::{boxed::Box, vec, Bump}; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | macro_rules! compare_std_vec { 8 | (in $bump:ident; $($x:expr),+) => {{ 9 | let vec = vec![in &$bump; $($x),+]; 10 | let std_vec = std::vec![$($x),+]; 11 | (vec, std_vec) 12 | }} 13 | } 14 | 15 | macro_rules! compare_std_box { 16 | (in $bump:ident; $x:expr) => { 17 | (Box::new_in($x, &$bump), std::boxed::Box::new($x)) 18 | }; 19 | } 20 | 21 | macro_rules! assert_eq_json { 22 | ($a:ident, $b:ident) => { 23 | assert_eq!( 24 | serde_json::to_string(&$a).unwrap(), 25 | serde_json::to_string(&$b).unwrap(), 26 | ) 27 | }; 28 | } 29 | 30 | #[derive(Serialize, Deserialize, Debug, PartialEq)] 31 | #[serde(tag = "t", content = "c")] 32 | enum Test { 33 | First, 34 | Second, 35 | } 36 | 37 | #[derive(Serialize, Deserialize, Debug, PartialEq)] 38 | #[serde()] 39 | struct Mixed { 40 | i: i32, 41 | s: String, 42 | o: Option, 43 | e: Test, 44 | } 45 | 46 | #[test] 47 | fn test_vec_serializes_str() { 48 | let bump = Bump::new(); 49 | let (vec, std_vec) = compare_std_vec![in bump; "hello", "world"]; 50 | assert_eq_json!(vec, std_vec); 51 | let de: std::vec::Vec = 52 | serde_json::from_str(&serde_json::to_string(&vec).unwrap()).unwrap(); 53 | assert_eq!(de, std_vec); 54 | } 55 | 56 | #[test] 57 | fn test_vec_serializes_f32() { 58 | let bump = Bump::new(); 59 | let (vec, std_vec) = compare_std_vec![in bump; 1.5707964, 3.1415927]; 60 | assert_eq_json!(vec, std_vec); 61 | let de: std::vec::Vec = 62 | serde_json::from_str(&serde_json::to_string(&vec).unwrap()).unwrap(); 63 | assert_eq!(de, std_vec); 64 | } 65 | 66 | #[cfg(feature = "serde")] 67 | #[test] 68 | fn test_vec_serializes_complex() { 69 | let bump = Bump::new(); 70 | let (vec, std_vec) = compare_std_vec![ 71 | in bump; 72 | Mixed { 73 | i: 8, 74 | s: "a".into(), 75 | o: None, 76 | e: Test::Second, 77 | }, 78 | Mixed { 79 | i: 8, 80 | s: "b".into(), 81 | o: Some("some".into()), 82 | e: Test::First, 83 | } 84 | ]; 85 | assert_eq_json!(vec, std_vec); 86 | let de: std::vec::Vec = 87 | serde_json::from_str(&serde_json::to_string(&vec).unwrap()).unwrap(); 88 | assert_eq!(de, std_vec); 89 | } 90 | 91 | #[test] 92 | fn test_box_serializes() { 93 | let bump = Bump::new(); 94 | let (box_int, std_box_int) = compare_std_box!(in bump; 1); 95 | assert_eq_json!(box_int, std_box_int); 96 | let (box_str, std_box_str) = compare_std_box!(in bump; 1); 97 | assert_eq_json!(box_str, std_box_str); 98 | let (box_vec, std_box_vec) = compare_std_box!(in bump; std::vec!["hello", "world"]); 99 | assert_eq_json!(box_vec, std_box_vec); 100 | } 101 | 102 | #[cfg(feature = "serde")] 103 | #[test] 104 | fn test_box_serializes_complex() { 105 | let bump = Bump::new(); 106 | let (vec, std_vec) = compare_std_box![ 107 | in bump; 108 | Mixed { 109 | i: 8, 110 | s: "a".into(), 111 | o: None, 112 | e: Test::Second, 113 | } 114 | ]; 115 | assert_eq_json!(vec, std_vec); 116 | let de: std::boxed::Box = 117 | serde_json::from_str(&serde_json::to_string(&vec).unwrap()).unwrap(); 118 | assert_eq!(de, std_vec); 119 | } 120 | -------------------------------------------------------------------------------- /tests/all/vec.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "collections")] 2 | 3 | use crate::quickcheck; 4 | use bumpalo::{collections::Vec, vec, Bump}; 5 | use std::cell::{Cell, RefCell}; 6 | use std::ops::Deref; 7 | 8 | #[test] 9 | fn push_a_bunch_of_items() { 10 | let b = Bump::new(); 11 | let mut v = Vec::new_in(&b); 12 | for x in 0..10_000 { 13 | v.push(x); 14 | } 15 | } 16 | 17 | #[test] 18 | fn trailing_comma_in_vec_macro() { 19 | let b = Bump::new(); 20 | let v = vec![in &b; 1, 2, 3,]; 21 | assert_eq!(v, [1, 2, 3]); 22 | } 23 | 24 | #[test] 25 | fn recursive_vecs() { 26 | // The purpose of this test is to see if the data structures with 27 | // self references are allowed without causing a compile error 28 | // because of the dropck 29 | let b = Bump::new(); 30 | 31 | struct Node<'a> { 32 | myself: Cell>>, 33 | edges: Cell>>, 34 | } 35 | 36 | let node1: &Node = b.alloc(Node { 37 | myself: Cell::new(None), 38 | edges: Cell::new(Vec::new_in(&b)), 39 | }); 40 | let node2: &Node = b.alloc(Node { 41 | myself: Cell::new(None), 42 | edges: Cell::new(Vec::new_in(&b)), 43 | }); 44 | 45 | node1.myself.set(Some(node1)); 46 | node1.edges.set(bumpalo::vec![in &b; node1, node1, node2]); 47 | 48 | node2.myself.set(Some(node2)); 49 | node2.edges.set(bumpalo::vec![in &b; node1, node2]); 50 | } 51 | 52 | #[test] 53 | fn test_into_bump_slice_mut() { 54 | let b = Bump::new(); 55 | let v = bumpalo::vec![in &b; 1, 2, 3]; 56 | let slice = v.into_bump_slice_mut(); 57 | 58 | slice[0] = 3; 59 | slice[2] = 1; 60 | 61 | assert_eq!(slice, [3, 2, 1]); 62 | } 63 | 64 | quickcheck! { 65 | fn vec_resizes_causing_reallocs(sizes: std::vec::Vec) -> () { 66 | // Exercise `realloc` by doing a bunch of `resize`s followed by 67 | // `shrink_to_fit`s. 68 | 69 | let b = Bump::new(); 70 | let mut v = bumpalo::vec![in &b]; 71 | 72 | for len in sizes { 73 | // We don't want to get too big and OOM. 74 | const MAX_SIZE: usize = 1 << 15; 75 | 76 | // But we want allocations to get fairly close to the minimum chunk 77 | // size, so that we are exercising both realloc'ing within a chunk 78 | // and when we need new chunks. 79 | const MIN_SIZE: usize = 1 << 7; 80 | 81 | let len = std::cmp::min(len, MAX_SIZE); 82 | let len = std::cmp::max(len, MIN_SIZE); 83 | 84 | v.resize(len, 0); 85 | v.shrink_to_fit(); 86 | } 87 | } 88 | } 89 | 90 | #[test] 91 | fn test_vec_items_get_dropped() { 92 | struct Foo<'a>(&'a RefCell); 93 | impl<'a> Drop for Foo<'a> { 94 | fn drop(&mut self) { 95 | self.0.borrow_mut().push_str("Dropped!"); 96 | } 97 | } 98 | 99 | let buffer = RefCell::new(String::new()); 100 | let bump = Bump::new(); 101 | { 102 | let mut vec_foo = Vec::new_in(&bump); 103 | vec_foo.push(Foo(&buffer)); 104 | vec_foo.push(Foo(&buffer)); 105 | } 106 | assert_eq!("Dropped!Dropped!", buffer.borrow().deref()); 107 | } 108 | 109 | #[test] 110 | fn test_extend_from_slice_copy() { 111 | let bump = Bump::new(); 112 | let mut vec = vec![in ≎ 1, 2, 3]; 113 | assert_eq!(&[1, 2, 3][..], vec.as_slice()); 114 | 115 | vec.extend_from_slice_copy(&[4, 5, 6]); 116 | assert_eq!(&[1, 2, 3, 4, 5, 6][..], vec.as_slice()); 117 | 118 | // Confirm that passing an empty slice is a no-op 119 | vec.extend_from_slice_copy(&[]); 120 | assert_eq!(&[1, 2, 3, 4, 5, 6][..], vec.as_slice()); 121 | 122 | vec.extend_from_slice_copy(&[7]); 123 | assert_eq!(&[1, 2, 3, 4, 5, 6, 7][..], vec.as_slice()); 124 | } 125 | 126 | #[test] 127 | fn test_extend_from_slices_copy() { 128 | let bump = Bump::new(); 129 | let mut vec = vec![in ≎ 1, 2, 3]; 130 | assert_eq!(&[1, 2, 3][..], vec.as_slice()); 131 | 132 | // Confirm that passing an empty slice of slices is a no-op 133 | vec.extend_from_slices_copy(&[]); 134 | assert_eq!(&[1, 2, 3][..], vec.as_slice()); 135 | 136 | // Confirm that an empty slice in the slice-of-slices is a no-op 137 | vec.extend_from_slices_copy(&[&[4, 5, 6], &[], &[7]]); 138 | assert_eq!(&[1, 2, 3, 4, 5, 6, 7][..], vec.as_slice()); 139 | 140 | vec.extend_from_slices_copy(&[&[8], &[9, 10, 11], &[12]]); 141 | assert_eq!(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], vec.as_slice()); 142 | } 143 | 144 | #[cfg(feature = "std")] 145 | #[test] 146 | fn test_vec_write() { 147 | use std::io::Write; 148 | 149 | let b = Bump::new(); 150 | let mut v = bumpalo::vec![in &b]; 151 | 152 | assert_eq!(v.write(&[]).unwrap(), 0); 153 | 154 | v.flush().unwrap(); 155 | 156 | assert_eq!(v.write(&[1]).unwrap(), 1); 157 | 158 | v.flush().unwrap(); 159 | 160 | v.write_all(&[]).unwrap(); 161 | 162 | v.flush().unwrap(); 163 | 164 | v.write_all(&[2, 3]).unwrap(); 165 | 166 | v.flush().unwrap(); 167 | 168 | assert_eq!(v, &[1, 2, 3]); 169 | } 170 | -------------------------------------------------------------------------------- /src/collections/collect_in.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "boxed")] 2 | use crate::boxed::Box; 3 | use crate::collections::{String, Vec}; 4 | use crate::Bump; 5 | 6 | /// A trait for types that support being constructed from an iterator, parameterized by an allocator. 7 | pub trait FromIteratorIn { 8 | /// The allocator type 9 | type Alloc; 10 | 11 | /// Similar to [`FromIterator::from_iter`][from_iter], but with a given allocator. 12 | /// 13 | /// [from_iter]: https://doc.rust-lang.org/std/iter/trait.FromIterator.html#tymethod.from_iter 14 | /// 15 | /// ``` 16 | /// # use bumpalo::collections::{FromIteratorIn, Vec}; 17 | /// # use bumpalo::Bump; 18 | /// # 19 | /// let five_fives = std::iter::repeat(5).take(5); 20 | /// let bump = Bump::new(); 21 | /// 22 | /// let v = Vec::from_iter_in(five_fives, &bump); 23 | /// 24 | /// assert_eq!(v, [5, 5, 5, 5, 5]); 25 | /// ``` 26 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 27 | where 28 | I: IntoIterator; 29 | } 30 | 31 | #[cfg(feature = "boxed")] 32 | impl<'bump, T> FromIteratorIn for Box<'bump, [T]> { 33 | type Alloc = &'bump Bump; 34 | 35 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 36 | where 37 | I: IntoIterator, 38 | { 39 | Box::from_iter_in(iter, alloc) 40 | } 41 | } 42 | 43 | impl<'bump, T> FromIteratorIn for Vec<'bump, T> { 44 | type Alloc = &'bump Bump; 45 | 46 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 47 | where 48 | I: IntoIterator, 49 | { 50 | Vec::from_iter_in(iter, alloc) 51 | } 52 | } 53 | 54 | impl> FromIteratorIn> for Option { 55 | type Alloc = V::Alloc; 56 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 57 | where 58 | I: IntoIterator>, 59 | { 60 | iter.into_iter() 61 | .map(|x| x.ok_or(())) 62 | .collect_in::>(alloc) 63 | .ok() 64 | } 65 | } 66 | 67 | impl> FromIteratorIn> for Result { 68 | type Alloc = V::Alloc; 69 | /// Takes each element in the `Iterator`: if it is an `Err`, no further 70 | /// elements are taken, and the `Err` is returned. Should no `Err` occur, a 71 | /// container with the values of each `Result` is returned. 72 | /// 73 | /// Here is an example which increments every integer in a vector, 74 | /// checking for overflow: 75 | /// 76 | /// ``` 77 | /// # use bumpalo::collections::{FromIteratorIn, CollectIn, Vec, String}; 78 | /// # use bumpalo::Bump; 79 | /// # 80 | /// let bump = Bump::new(); 81 | /// 82 | /// let v = vec![1, 2, u32::MAX]; 83 | /// let res: Result, &'static str> = v.iter().take(2).map(|x: &u32| 84 | /// x.checked_add(1).ok_or("Overflow!") 85 | /// ).collect_in(&bump); 86 | /// assert_eq!(res, Ok(bumpalo::vec![in ≎ 2, 3])); 87 | /// 88 | /// let res: Result, &'static str> = v.iter().map(|x: &u32| 89 | /// x.checked_add(1).ok_or("Overflow!") 90 | /// ).collect_in(&bump); 91 | /// assert_eq!(res, Err("Overflow!")); 92 | /// ``` 93 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 94 | where 95 | I: IntoIterator>, 96 | { 97 | let mut iter = iter.into_iter(); 98 | let mut error = None; 99 | let container = core::iter::from_fn(|| match iter.next() { 100 | Some(Ok(x)) => Some(x), 101 | Some(Err(e)) => { 102 | error = Some(e); 103 | None 104 | } 105 | None => None, 106 | }) 107 | .collect_in(alloc); 108 | 109 | match error { 110 | Some(e) => Err(e), 111 | None => Ok(container), 112 | } 113 | } 114 | } 115 | 116 | impl<'bump> FromIteratorIn for String<'bump> { 117 | type Alloc = &'bump Bump; 118 | 119 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 120 | where 121 | I: IntoIterator, 122 | { 123 | String::from_iter_in(iter, alloc) 124 | } 125 | } 126 | 127 | /// Extension trait for iterators, in order to allow allocator-parameterized collections to be constructed more easily. 128 | pub trait CollectIn: Iterator + Sized { 129 | /// Collect all items from an iterator, into a collection parameterized by an allocator. 130 | /// Similar to [`Iterator::collect`][collect]. 131 | /// 132 | /// [collect]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect 133 | /// 134 | /// ``` 135 | /// # use bumpalo::collections::{FromIteratorIn, CollectIn, Vec, String}; 136 | /// # use bumpalo::Bump; 137 | /// # 138 | /// let bump = Bump::new(); 139 | /// 140 | /// let str = "hello, world!".to_owned(); 141 | /// let bump_str: String = str.chars().collect_in(&bump); 142 | /// assert_eq!(&bump_str, &str); 143 | /// 144 | /// let nums: Vec = (0..=3).collect_in::>(&bump); 145 | /// assert_eq!(&nums, &[0,1,2,3]); 146 | /// ``` 147 | fn collect_in>(self, alloc: C::Alloc) -> C { 148 | C::from_iter_in(self, alloc) 149 | } 150 | } 151 | 152 | impl CollectIn for I {} 153 | -------------------------------------------------------------------------------- /tests/all/tests.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::Bump; 2 | use std::alloc::Layout; 3 | use std::mem; 4 | use std::usize; 5 | 6 | #[test] 7 | fn can_iterate_over_allocated_things() { 8 | let mut bump = Bump::new(); 9 | 10 | #[cfg(not(miri))] 11 | const MAX: u64 = 131_072; 12 | 13 | #[cfg(miri)] // Miri is very slow, pick a smaller max that runs in a reasonable amount of time 14 | const MAX: u64 = 1024; 15 | 16 | let mut chunk_ends = vec![]; 17 | let mut last = None; 18 | 19 | for i in 0..MAX { 20 | let this = bump.alloc(i); 21 | assert_eq!(*this, i); 22 | let this = this as *const _ as usize; 23 | 24 | if match last { 25 | Some(last) if last - mem::size_of::() == this => false, 26 | _ => true, 27 | } { 28 | let chunk_end = this + mem::size_of::(); 29 | println!("new chunk ending @ 0x{:x}", chunk_end); 30 | assert!( 31 | !chunk_ends.contains(&chunk_end), 32 | "should not have already allocated this chunk" 33 | ); 34 | chunk_ends.push(chunk_end); 35 | } 36 | 37 | last = Some(this); 38 | } 39 | 40 | let mut seen = vec![false; MAX as usize]; 41 | 42 | // Safe because we always allocated objects of the same type in this arena, 43 | // and their size >= their align. 44 | for ch in bump.iter_allocated_chunks() { 45 | let chunk_end = ch.as_ptr() as usize + ch.len(); 46 | println!("iter chunk ending @ {:#x}", chunk_end); 47 | assert_eq!( 48 | chunk_ends.pop().unwrap(), 49 | chunk_end, 50 | "should iterate over each chunk once, in order they were allocated in" 51 | ); 52 | 53 | let (before, mid, after) = unsafe { ch.align_to::() }; 54 | assert!(before.is_empty()); 55 | assert!(after.is_empty()); 56 | for i in mid { 57 | assert!(*i < MAX, "{} < {} (aka {:x} < {:x})", i, MAX, i, MAX); 58 | seen[*i as usize] = true; 59 | } 60 | } 61 | 62 | assert!(seen.iter().all(|s| *s)); 63 | } 64 | 65 | #[cfg(not(miri))] // Miri does not panic on OOM, the interpreter halts 66 | #[test] 67 | #[should_panic(expected = "out of memory")] 68 | fn oom_instead_of_bump_pointer_overflow() { 69 | let bump = Bump::new(); 70 | let x = bump.alloc(0_u8); 71 | let p = x as *mut u8 as usize; 72 | 73 | // A size guaranteed to overflow the bump pointer. 74 | let size = (isize::MAX as usize) - p + 1; 75 | let align = 1; 76 | let layout = match Layout::from_size_align(size, align) { 77 | Err(e) => { 78 | // Return on error so that we don't panic and the test fails. 79 | eprintln!("Layout::from_size_align errored: {}", e); 80 | return; 81 | } 82 | Ok(l) => l, 83 | }; 84 | 85 | // This should panic. 86 | bump.alloc_layout(layout); 87 | } 88 | 89 | #[test] 90 | fn force_new_chunk_fits_well() { 91 | let b = Bump::new(); 92 | 93 | // Use the first chunk for something 94 | b.alloc_layout(Layout::from_size_align(1, 1).unwrap()); 95 | 96 | // Next force allocation of some new chunks. 97 | b.alloc_layout(Layout::from_size_align(100_001, 1).unwrap()); 98 | b.alloc_layout(Layout::from_size_align(100_003, 1).unwrap()); 99 | } 100 | 101 | #[test] 102 | fn alloc_with_strong_alignment() { 103 | let b = Bump::new(); 104 | 105 | // 64 is probably the strongest alignment we'll see in practice 106 | // e.g. AVX-512 types, or cache line padding optimizations 107 | b.alloc_layout(Layout::from_size_align(4096, 64).unwrap()); 108 | } 109 | 110 | #[test] 111 | fn alloc_slice_copy() { 112 | let b = Bump::new(); 113 | 114 | let src: &[u16] = &[0xFEED, 0xFACE, 0xA7, 0xCAFE]; 115 | let dst = b.alloc_slice_copy(src); 116 | 117 | assert_eq!(src, dst); 118 | } 119 | 120 | #[test] 121 | fn alloc_slice_clone() { 122 | let b = Bump::new(); 123 | 124 | let src = vec![vec![0], vec![1, 2], vec![3, 4, 5], vec![6, 7, 8, 9]]; 125 | let dst = b.alloc_slice_clone(&src); 126 | 127 | assert_eq!(src, dst); 128 | } 129 | 130 | #[test] 131 | fn small_size_and_large_align() { 132 | let b = Bump::new(); 133 | let layout = std::alloc::Layout::from_size_align(1, 0x1000).unwrap(); 134 | b.alloc_layout(layout); 135 | } 136 | 137 | fn with_capacity_helper(iter: I) 138 | where 139 | T: Copy + Eq, 140 | I: Clone + Iterator + DoubleEndedIterator, 141 | { 142 | for &initial_size in &[0, 1, 8, 11, 0x1000, 0x12345] { 143 | let mut b = Bump::with_capacity(initial_size); 144 | 145 | for v in iter.clone() { 146 | b.alloc(v); 147 | } 148 | 149 | let pushed_values = b.iter_allocated_chunks().flat_map(|c| { 150 | let (before, mid, after) = unsafe { c.align_to::() }; 151 | assert!(before.is_empty()); 152 | assert!(after.is_empty()); 153 | mid.iter().copied() 154 | }); 155 | assert!(pushed_values.eq(iter.clone().rev())); 156 | } 157 | } 158 | 159 | #[test] 160 | fn with_capacity_test() { 161 | with_capacity_helper(0u8..255); 162 | #[cfg(not(miri))] // Miri is very slow, disable most of the test cases when using it 163 | { 164 | with_capacity_helper(0u16..10000); 165 | with_capacity_helper(0u32..10000); 166 | with_capacity_helper(0u64..10000); 167 | with_capacity_helper(0u128..10000); 168 | } 169 | } 170 | 171 | #[test] 172 | fn test_reset() { 173 | let mut b = Bump::new(); 174 | 175 | for i in 0u64..10_000 { 176 | b.alloc(i); 177 | } 178 | 179 | assert!(b.iter_allocated_chunks().count() > 1); 180 | 181 | let last_chunk = b.iter_allocated_chunks().next().unwrap(); 182 | let start = last_chunk.as_ptr() as usize; 183 | let end = start + last_chunk.len(); 184 | b.reset(); 185 | assert_eq!( 186 | end - mem::size_of::(), 187 | b.alloc(0u64) as *const u64 as usize 188 | ); 189 | assert_eq!(b.iter_allocated_chunks().count(), 1); 190 | } 191 | 192 | #[test] 193 | fn test_alignment() { 194 | for &alignment in &[2, 4, 8, 16, 32, 64] { 195 | let b = Bump::with_capacity(513); 196 | let layout = std::alloc::Layout::from_size_align(alignment, alignment).unwrap(); 197 | 198 | for _ in 0..1024 { 199 | let ptr = b.alloc_layout(layout).as_ptr(); 200 | assert_eq!(ptr as *const u8 as usize % alignment, 0); 201 | } 202 | } 203 | } 204 | 205 | #[test] 206 | fn test_chunk_capacity() { 207 | let b = Bump::with_capacity(512); 208 | let orig_capacity = b.chunk_capacity(); 209 | b.alloc(true); 210 | assert!(b.chunk_capacity() < orig_capacity); 211 | } 212 | 213 | #[test] 214 | #[cfg(feature = "allocator_api")] 215 | fn miri_stacked_borrows_issue_247() { 216 | let bump = bumpalo::Bump::new(); 217 | 218 | let a = Box::into_raw(Box::new_in(1u8, &bump)); 219 | drop(unsafe { Box::from_raw_in(a, &bump) }); 220 | 221 | let _b = Box::new_in(2u16, &bump); 222 | } -------------------------------------------------------------------------------- /src/collections/str/lossy.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT 2 | // file at the top-level directory of this distribution and at 3 | // http://rust-lang.org/COPYRIGHT. 4 | // 5 | // Licensed under the Apache License, Version 2.0 or the MIT license 7 | // , at your 8 | // option. This file may not be copied, modified, or distributed 9 | // except according to those terms. 10 | 11 | use crate::collections::str as core_str; 12 | use core::char; 13 | use core::fmt; 14 | use core::fmt::Write; 15 | use core::str; 16 | 17 | /// Lossy UTF-8 string. 18 | pub struct Utf8Lossy<'a> { 19 | bytes: &'a [u8], 20 | } 21 | 22 | impl<'a> Utf8Lossy<'a> { 23 | pub fn from_bytes(bytes: &'a [u8]) -> Utf8Lossy<'a> { 24 | Utf8Lossy { bytes } 25 | } 26 | 27 | pub fn chunks(&self) -> Utf8LossyChunksIter<'a> { 28 | Utf8LossyChunksIter { 29 | source: &self.bytes, 30 | } 31 | } 32 | } 33 | 34 | /// Iterator over lossy UTF-8 string 35 | #[allow(missing_debug_implementations)] 36 | pub struct Utf8LossyChunksIter<'a> { 37 | source: &'a [u8], 38 | } 39 | 40 | #[derive(PartialEq, Eq, Debug)] 41 | pub struct Utf8LossyChunk<'a> { 42 | /// Sequence of valid chars. 43 | /// Can be empty between broken UTF-8 chars. 44 | pub valid: &'a str, 45 | /// Single broken char, empty if none. 46 | /// Empty iff iterator item is last. 47 | pub broken: &'a [u8], 48 | } 49 | 50 | impl<'a> Iterator for Utf8LossyChunksIter<'a> { 51 | type Item = Utf8LossyChunk<'a>; 52 | 53 | fn next(&mut self) -> Option> { 54 | if self.source.is_empty() { 55 | return None; 56 | } 57 | 58 | const TAG_CONT_U8: u8 = 128; 59 | fn unsafe_get(xs: &[u8], i: usize) -> u8 { 60 | unsafe { *xs.get_unchecked(i) } 61 | } 62 | fn safe_get(xs: &[u8], i: usize) -> u8 { 63 | if i >= xs.len() { 64 | 0 65 | } else { 66 | unsafe_get(xs, i) 67 | } 68 | } 69 | 70 | let mut i = 0; 71 | while i < self.source.len() { 72 | let i_ = i; 73 | 74 | let byte = unsafe_get(self.source, i); 75 | i += 1; 76 | 77 | if byte < 128 { 78 | } else { 79 | let w = core_str::utf8_char_width(byte); 80 | 81 | macro_rules! error { 82 | () => {{ 83 | unsafe { 84 | let r = Utf8LossyChunk { 85 | valid: str::from_utf8_unchecked(&self.source[0..i_]), 86 | broken: &self.source[i_..i], 87 | }; 88 | self.source = &self.source[i..]; 89 | return Some(r); 90 | } 91 | }}; 92 | } 93 | 94 | match w { 95 | 2 => { 96 | if safe_get(self.source, i) & 192 != TAG_CONT_U8 { 97 | error!(); 98 | } 99 | i += 1; 100 | } 101 | 3 => { 102 | match (byte, safe_get(self.source, i)) { 103 | (0xE0, 0xA0..=0xBF) => (), 104 | (0xE1..=0xEC, 0x80..=0xBF) => (), 105 | (0xED, 0x80..=0x9F) => (), 106 | (0xEE..=0xEF, 0x80..=0xBF) => (), 107 | _ => { 108 | error!(); 109 | } 110 | } 111 | i += 1; 112 | if safe_get(self.source, i) & 192 != TAG_CONT_U8 { 113 | error!(); 114 | } 115 | i += 1; 116 | } 117 | 4 => { 118 | match (byte, safe_get(self.source, i)) { 119 | (0xF0, 0x90..=0xBF) => (), 120 | (0xF1..=0xF3, 0x80..=0xBF) => (), 121 | (0xF4, 0x80..=0x8F) => (), 122 | _ => { 123 | error!(); 124 | } 125 | } 126 | i += 1; 127 | if safe_get(self.source, i) & 192 != TAG_CONT_U8 { 128 | error!(); 129 | } 130 | i += 1; 131 | if safe_get(self.source, i) & 192 != TAG_CONT_U8 { 132 | error!(); 133 | } 134 | i += 1; 135 | } 136 | _ => { 137 | error!(); 138 | } 139 | } 140 | } 141 | } 142 | 143 | let r = Utf8LossyChunk { 144 | valid: unsafe { str::from_utf8_unchecked(self.source) }, 145 | broken: &[], 146 | }; 147 | self.source = &[]; 148 | Some(r) 149 | } 150 | } 151 | 152 | impl<'a> fmt::Display for Utf8Lossy<'a> { 153 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 154 | // If we're the empty string then our iterator won't actually yield 155 | // anything, so perform the formatting manually 156 | if self.bytes.is_empty() { 157 | return "".fmt(f); 158 | } 159 | 160 | for Utf8LossyChunk { valid, broken } in self.chunks() { 161 | // If we successfully decoded the whole chunk as a valid string then 162 | // we can return a direct formatting of the string which will also 163 | // respect various formatting flags if possible. 164 | if valid.len() == self.bytes.len() { 165 | assert!(broken.is_empty()); 166 | return valid.fmt(f); 167 | } 168 | 169 | f.write_str(valid)?; 170 | if !broken.is_empty() { 171 | f.write_char(char::REPLACEMENT_CHARACTER)?; 172 | } 173 | } 174 | Ok(()) 175 | } 176 | } 177 | 178 | impl<'a> fmt::Debug for Utf8Lossy<'a> { 179 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 180 | f.write_char('"')?; 181 | 182 | for Utf8LossyChunk { valid, broken } in self.chunks() { 183 | // Valid part. 184 | // Here we partially parse UTF-8 again which is suboptimal. 185 | { 186 | let mut from = 0; 187 | for (i, c) in valid.char_indices() { 188 | let esc = c.escape_debug(); 189 | // If char needs escaping, flush backlog so far and write, else skip 190 | if esc.len() != 1 { 191 | f.write_str(&valid[from..i])?; 192 | for c in esc { 193 | f.write_char(c)?; 194 | } 195 | from = i + c.len_utf8(); 196 | } 197 | } 198 | f.write_str(&valid[from..])?; 199 | } 200 | 201 | // Broken parts of string as hex escape. 202 | for &b in broken { 203 | write!(f, "\\x{:02x}", b)?; 204 | } 205 | } 206 | 207 | f.write_char('"') 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `bumpalo` 2 | 3 | **A fast bump allocation arena for Rust.** 4 | 5 | [![](https://docs.rs/bumpalo/badge.svg)](https://docs.rs/bumpalo/) 6 | [![](https://img.shields.io/crates/v/bumpalo.svg)](https://crates.io/crates/bumpalo) 7 | [![](https://img.shields.io/crates/d/bumpalo.svg)](https://crates.io/crates/bumpalo) 8 | [![Build Status](https://github.com/fitzgen/bumpalo/workflows/Rust/badge.svg)](https://github.com/fitzgen/bumpalo/actions?query=workflow%3ARust) 9 | 10 | ![](https://github.com/fitzgen/bumpalo/raw/main/bumpalo.png) 11 | 12 | ### Bump Allocation 13 | 14 | Bump allocation is a fast, but limited approach to allocation. We have a chunk 15 | of memory, and we maintain a pointer within that memory. Whenever we allocate an 16 | object, we do a quick check that we have enough capacity left in our chunk to 17 | allocate the object and then update the pointer by the object's size. *That's 18 | it!* 19 | 20 | The disadvantage of bump allocation is that there is no general way to 21 | deallocate individual objects or reclaim the memory region for a 22 | no-longer-in-use object. 23 | 24 | These trade offs make bump allocation well-suited for *phase-oriented* 25 | allocations. That is, a group of objects that will all be allocated during the 26 | same program phase, used, and then can all be deallocated together as a group. 27 | 28 | ### Deallocation en Masse, but no `Drop` 29 | 30 | To deallocate all the objects in the arena at once, we can simply reset the bump 31 | pointer back to the start of the arena's memory chunk. This makes mass 32 | deallocation *extremely* fast, but allocated objects' [`Drop`] implementations are 33 | not invoked. 34 | 35 | > **However:** [`bumpalo::boxed::Box`][box] can be used to wrap 36 | > `T` values allocated in the `Bump` arena, and calls `T`'s `Drop` 37 | > implementation when the `Box` wrapper goes out of scope. This is similar to 38 | > how [`std::boxed::Box`] works, except without deallocating its backing memory. 39 | 40 | [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html 41 | [box]: https://docs.rs/bumpalo/latest/bumpalo/boxed/struct.Box.html 42 | [`std::boxed::Box`]: https://doc.rust-lang.org/std/boxed/struct.Box.html 43 | 44 | ### What happens when the memory chunk is full? 45 | 46 | This implementation will allocate a new memory chunk from the global allocator 47 | and then start bump allocating into this new memory chunk. 48 | 49 | ### Example 50 | 51 | ```rust 52 | use bumpalo::Bump; 53 | use std::u64; 54 | 55 | struct Doggo { 56 | cuteness: u64, 57 | age: u8, 58 | scritches_required: bool, 59 | } 60 | 61 | // Create a new arena to bump allocate into. 62 | let bump = Bump::new(); 63 | 64 | // Allocate values into the arena. 65 | let scooter = bump.alloc(Doggo { 66 | cuteness: u64::max_value(), 67 | age: 8, 68 | scritches_required: true, 69 | }); 70 | 71 | // Exclusive, mutable references to the just-allocated value are returned. 72 | assert!(scooter.scritches_required); 73 | scooter.age += 1; 74 | ``` 75 | 76 | ### Collections 77 | 78 | When the `"collections"` cargo feature is enabled, a fork of some of the `std` 79 | library's collections are available in the [`collections`] module. These 80 | collection types are modified to allocate their space inside `bumpalo::Bump` 81 | arenas. 82 | 83 | [`collections`]: https://docs.rs/bumpalo/latest/bumpalo/collections/index.html 84 | 85 | ```rust 86 | #[cfg(feature = "collections")] 87 | { 88 | use bumpalo::{Bump, collections::Vec}; 89 | 90 | // Create a new bump arena. 91 | let bump = Bump::new(); 92 | 93 | // Create a vector of integers whose storage is backed by the bump arena. The 94 | // vector cannot outlive its backing arena, and this property is enforced with 95 | // Rust's lifetime rules. 96 | let mut v = Vec::new_in(&bump); 97 | 98 | // Push a bunch of integers onto `v`! 99 | for i in 0..100 { 100 | v.push(i); 101 | } 102 | } 103 | ``` 104 | 105 | Eventually [all `std` collection types will be parameterized by an 106 | allocator](https://github.com/rust-lang/rust/issues/42774) and we can remove 107 | this `collections` module and use the `std` versions. 108 | 109 | For unstable, nightly-only support for custom allocators in `std`, see the 110 | `allocator_api` section below. 111 | 112 | ### `bumpalo::boxed::Box` 113 | 114 | When the `"boxed"` cargo feature is enabled, a fork of `std::boxed::Box` 115 | is available in the `boxed` module. This `Box` type is modified to allocate its 116 | space inside `bumpalo::Bump` arenas. 117 | 118 | **A `Box` runs `T`'s drop implementation when the `Box` is dropped.** You 119 | can use this to work around the fact that `Bump` does not drop values allocated 120 | in its space itself. 121 | 122 | ```rust 123 | #[cfg(feature = "boxed")] 124 | { 125 | use bumpalo::{Bump, boxed::Box}; 126 | use std::sync::atomic::{AtomicUsize, Ordering}; 127 | 128 | static NUM_DROPPED: AtomicUsize = AtomicUsize::new(0); 129 | 130 | struct CountDrops; 131 | 132 | impl Drop for CountDrops { 133 | fn drop(&mut self) { 134 | NUM_DROPPED.fetch_add(1, Ordering::SeqCst); 135 | } 136 | } 137 | 138 | // Create a new bump arena. 139 | let bump = Bump::new(); 140 | 141 | // Create a `CountDrops` inside the bump arena. 142 | let mut c = Box::new_in(CountDrops, &bump); 143 | 144 | // No `CountDrops` have been dropped yet. 145 | assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 0); 146 | 147 | // Drop our `Box`. 148 | drop(c); 149 | 150 | // Its `Drop` implementation was run, and so `NUM_DROPS` has been 151 | // incremented. 152 | assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 1); 153 | } 154 | ``` 155 | 156 | #### Serde 157 | 158 | Adding the `serde` feature flag will enable transparent serialization of Vecs and 159 | boxed values. 160 | 161 | ```toml 162 | [dependencies] 163 | bumpalo = { version = "3.9", features = ["collections", "boxed", "serde"] } 164 | ``` 165 | 166 | ```rust,ignore 167 | use bumpalo::{Bump, boxed::Box, collections::Vec}; 168 | 169 | // Create a new bump arena. 170 | let bump = Bump::new(); 171 | 172 | // Create a `Box` 173 | let box = Box::new_in("hello", &bump); 174 | 175 | // Serialize with serde_json 176 | assert_eq!(serde_json::to_string(&box).unwrap(), "\"hello\""); 177 | 178 | // Create a `Vec` 179 | let vec = Vec::new_in( &bump); 180 | vec.push(1); 181 | vec.push(2); 182 | 183 | // Serialize with serde_json 184 | assert_eq!(serde_json::to_string(&vec).unwrap(), "[1, 2]"); 185 | ``` 186 | 187 | ### `#![no_std]` Support 188 | 189 | Bumpalo is a `no_std` crate by default. It depends only on the `alloc` and `core` crates. 190 | 191 | ### `std` Support 192 | 193 | You can optionally decide to enable the `std` feature in order to enable some 194 | std only trait implementations for some collections: 195 | 196 | * `std::io::Write` for `Vec<'bump, u8>` 197 | 198 | ### Thread support 199 | 200 | The `Bump` is `!Sync`, which makes it hard to use in certain situations around 201 | threads ‒ for example in `rayon`. 202 | 203 | The [`bumpalo-herd`](https://crates.io/crates/bumpalo-herd) crate provides a 204 | pool of `Bump` allocators for use in such situations. 205 | 206 | ### Nightly Rust `allocator_api` Support 207 | 208 | The unstable, nightly-only Rust `allocator_api` feature defines an [`Allocator`] 209 | trait and exposes custom allocators for `std` types. Bumpalo has a matching 210 | `allocator_api` cargo feature to enable implementing `Allocator` and using 211 | `Bump` with `std` collections. Note that, as `feature(allocator_api)` is 212 | unstable and only in nightly Rust, Bumpalo's matching `allocator_api` cargo 213 | feature should be considered unstable, and will not follow the semver 214 | conventions that the rest of the crate does. 215 | 216 | First, enable the `allocator_api` feature in your `Cargo.toml`: 217 | 218 | ```toml 219 | [dependencies] 220 | bumpalo = { version = "3", features = ["allocator_api"] } 221 | ``` 222 | 223 | Next, enable the `allocator_api` nightly Rust feature in your `src/lib.rs` or 224 | `src/main.rs`: 225 | 226 | ```rust,ignore 227 | #![feature(allocator_api)] 228 | ``` 229 | 230 | Finally, use `std` collections with `Bump`, so that their internal heap 231 | allocations are made within the given bump arena: 232 | 233 | ```rust,ignore 234 | use bumpalo::Bump; 235 | 236 | // Create a new bump arena. 237 | let bump = Bump::new(); 238 | 239 | // Create a `Vec` whose elements are allocated within the bump arena. 240 | let mut v = Vec::new_in(&bump); 241 | v.push(0); 242 | v.push(1); 243 | v.push(2); 244 | ``` 245 | 246 | [`Allocator`]: https://doc.rust-lang.org/std/alloc/trait.Allocator.html 247 | 248 | ### Using the `Allocator` API on Stable Rust 249 | 250 | You can enable the `allocator-api2` Cargo feature and `bumpalo` will use [the 251 | `allocator-api2` crate](https://crates.io/crates/allocator-api2) to implement 252 | the unstable nightly`Allocator` API on stable Rust. This means that 253 | `bumpalo::Bump` will be usable with any collection that is generic over 254 | `allocator_api2::Allocator`. 255 | 256 | ### Minimum Supported Rust Version (MSRV) 257 | 258 | This crate is guaranteed to compile on stable Rust **1.71.1** and up. It might 259 | compile with older versions but that may change in any new patch release. 260 | 261 | We reserve the right to increment the MSRV on minor releases, however we will 262 | strive to only do it deliberately and for good reasons. 263 | -------------------------------------------------------------------------------- /tests/all/allocator_api.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "allocator_api")] 2 | 3 | use crate::quickcheck; 4 | use bumpalo::Bump; 5 | use std::alloc::{AllocError, Allocator, Layout}; 6 | use std::ptr::NonNull; 7 | use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; 8 | 9 | /// Map an arbitrary `x` to a power of 2 that is less than or equal to `max`, 10 | /// but with as little bias as possible (eg rounding `min(x, max)` to the 11 | /// nearest power of 2 is unacceptable because it would majorly bias `max` for 12 | /// small values of `max`). 13 | fn clamp_to_pow2_in_range(x: usize, max: usize) -> usize { 14 | let log_x = max.ilog2() as usize; 15 | if log_x == 0 { 16 | return 1; 17 | } 18 | let divisor = usize::MAX / log_x; 19 | let y = 1_usize << (x / divisor); 20 | assert!(y.is_power_of_two(), "{y} is not a power of two"); 21 | assert!(y <= max, "{y} is larger than {max}"); 22 | y 23 | } 24 | 25 | /// Helper to turn a pair of arbitrary `usize`s into a valid `Layout` of 26 | /// reasonable size for use with quickchecks. 27 | pub fn arbitrary_layout(size: usize, align: usize) -> Layout { 28 | const MAX_ALIGN: usize = 64; 29 | const MAX_SIZE: usize = 1024; 30 | 31 | let align = clamp_to_pow2_in_range(align, MAX_ALIGN); 32 | 33 | let size = size % (MAX_SIZE + 1); 34 | let size = size.next_multiple_of(align); 35 | 36 | Layout::from_size_align(size, align).unwrap() 37 | } 38 | 39 | #[derive(Debug)] 40 | struct AllocatorDebug { 41 | bump: Bump, 42 | grows: AtomicUsize, 43 | shrinks: AtomicUsize, 44 | allocs: AtomicUsize, 45 | deallocs: AtomicUsize, 46 | } 47 | 48 | impl AllocatorDebug { 49 | fn new(bump: Bump) -> AllocatorDebug { 50 | AllocatorDebug { 51 | bump, 52 | grows: AtomicUsize::new(0), 53 | shrinks: AtomicUsize::new(0), 54 | allocs: AtomicUsize::new(0), 55 | deallocs: AtomicUsize::new(0), 56 | } 57 | } 58 | } 59 | 60 | unsafe impl Allocator for AllocatorDebug { 61 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 62 | self.allocs.fetch_add(1, Relaxed); 63 | let ref bump = self.bump; 64 | bump.allocate(layout) 65 | } 66 | 67 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 68 | self.deallocs.fetch_add(1, Relaxed); 69 | let ref bump = self.bump; 70 | bump.deallocate(ptr, layout) 71 | } 72 | 73 | unsafe fn shrink( 74 | &self, 75 | ptr: NonNull, 76 | old_layout: Layout, 77 | new_layout: Layout, 78 | ) -> Result, AllocError> { 79 | self.shrinks.fetch_add(1, Relaxed); 80 | let ref bump = self.bump; 81 | bump.shrink(ptr, old_layout, new_layout) 82 | } 83 | 84 | unsafe fn grow( 85 | &self, 86 | ptr: NonNull, 87 | old_layout: Layout, 88 | new_layout: Layout, 89 | ) -> Result, AllocError> { 90 | self.grows.fetch_add(1, Relaxed); 91 | let ref bump = self.bump; 92 | bump.grow(ptr, old_layout, new_layout) 93 | } 94 | } 95 | 96 | #[test] 97 | fn allocator_api_push_a_bunch_of_items() { 98 | let b = AllocatorDebug::new(Bump::new()); 99 | let mut v = Vec::with_capacity_in(1024, &b); 100 | assert_eq!(b.allocs.load(Relaxed), 1); 101 | 102 | for x in 0..1024 { 103 | v.push(x); 104 | } 105 | 106 | // Ensure we trigger a grow 107 | assert_eq!(b.grows.load(Relaxed), 0); 108 | for x in 1024..2048 { 109 | v.push(x); 110 | } 111 | assert_ne!(b.grows.load(Relaxed), 0); 112 | 113 | // Ensure we trigger a shrink 114 | v.truncate(1024); 115 | v.shrink_to_fit(); 116 | assert_eq!(b.shrinks.load(Relaxed), 1); 117 | 118 | // Ensure we trigger a deallocation 119 | assert_eq!(b.deallocs.load(Relaxed), 0); 120 | drop(v); 121 | assert_eq!(b.deallocs.load(Relaxed), 1); 122 | } 123 | 124 | #[test] 125 | fn allocator_grow_zeroed() { 126 | // Create a new bump arena. 127 | let ref bump = Bump::new(); 128 | 129 | // Make an initial allocation. 130 | let first_layout = Layout::from_size_align(4, 4).expect("create a layout"); 131 | let mut p = bump 132 | .allocate_zeroed(first_layout) 133 | .expect("allocate a first chunk"); 134 | let allocated = bump.allocated_bytes(); 135 | unsafe { p.as_mut().fill(42) }; 136 | let p = p.cast(); 137 | 138 | // Grow the last allocation. This should just reserve a few more bytes 139 | // within the current chunk, not allocate a whole new memory block within a 140 | // new chunk. 141 | let second_layout = Layout::from_size_align(8, 4).expect("create a expanded layout"); 142 | let p = unsafe { bump.grow_zeroed(p, first_layout, second_layout) } 143 | .expect("should grow_zeroed okay"); 144 | assert!(bump.allocated_bytes() <= allocated * 2); 145 | assert_eq!(unsafe { p.as_ref() }, [42, 42, 42, 42, 0, 0, 0, 0]); 146 | } 147 | 148 | quickcheck! { 149 | fn allocator_grow_align_increase(layouts: Vec<(usize, usize)>) -> bool { 150 | let mut layouts: Vec<_> = layouts.into_iter().map(|(size, align)| { 151 | arbitrary_layout(size, align) 152 | }).collect(); 153 | 154 | layouts.sort_by_key(|l| (l.size(), l.align())); 155 | 156 | let b = AllocatorDebug::new(Bump::new()); 157 | let mut layout_iter = layouts.into_iter(); 158 | 159 | if let Some(initial_layout) = layout_iter.next() { 160 | let mut pointer = b.allocate(initial_layout).unwrap(); 161 | if !is_pointer_aligned_to(pointer, initial_layout.align()) { 162 | return false; 163 | } 164 | 165 | let mut old_layout = initial_layout; 166 | 167 | for new_layout in layout_iter { 168 | pointer = unsafe { b.grow(pointer.cast(), old_layout, new_layout).unwrap() }; 169 | if !is_pointer_aligned_to(pointer, new_layout.align()) { 170 | return false; 171 | } 172 | 173 | old_layout = new_layout; 174 | } 175 | } 176 | 177 | true 178 | } 179 | 180 | fn allocator_shrink_align_change(layouts: Vec<(usize, usize)>) -> () { 181 | let mut layouts: Vec<_> = layouts.into_iter().map(|(size, align)| { 182 | arbitrary_layout(size, align) 183 | }).collect(); 184 | 185 | layouts.sort_by_key(|l| l.size()); 186 | layouts.reverse(); 187 | 188 | let b = AllocatorDebug::new(Bump::new()); 189 | let mut layout_iter = layouts.into_iter(); 190 | 191 | if let Some(initial_layout) = layout_iter.next() { 192 | let mut pointer = b.allocate(initial_layout).unwrap(); 193 | assert!(is_pointer_aligned_to(pointer, initial_layout.align())); 194 | 195 | let mut old_layout = initial_layout; 196 | 197 | for new_layout in layout_iter { 198 | let res = unsafe { b.shrink(pointer.cast(), old_layout, new_layout) }; 199 | if old_layout.align() < new_layout.align() { 200 | match res { 201 | Ok(p) => assert!(is_pointer_aligned_to(p, new_layout.align())), 202 | Err(_) => {} 203 | } 204 | } else { 205 | pointer = res.unwrap(); 206 | assert!(is_pointer_aligned_to(pointer, new_layout.align())); 207 | 208 | old_layout = new_layout; 209 | } 210 | } 211 | } 212 | } 213 | 214 | fn allocator_grow_or_shrink(layouts: Vec<((usize, usize), (usize, usize))>) -> () { 215 | let layouts = layouts 216 | .into_iter() 217 | .map(|((from_size, from_align), (to_size, to_align))| { 218 | let from_layout = arbitrary_layout(from_size, from_align); 219 | let to_layout = arbitrary_layout(to_size, to_align); 220 | (from_layout, to_layout) 221 | }); 222 | 223 | let b = AllocatorDebug::new(Bump::new()); 224 | for (from_layout, to_layout) in layouts { 225 | let pointer = b.allocate(from_layout).unwrap(); 226 | assert!(is_pointer_aligned_to(pointer, from_layout.align())); 227 | let pointer = pointer.cast::(); 228 | 229 | let result = if to_layout.size() <= from_layout.size() { 230 | unsafe { b.shrink(pointer, from_layout, to_layout) } 231 | } else { 232 | unsafe { b.grow(pointer, from_layout, to_layout) } 233 | }; 234 | 235 | match result { 236 | Ok(new_pointer) => { 237 | assert!(is_pointer_aligned_to(new_pointer, to_layout.align())); 238 | } 239 | // Bumpalo can return allocation errors in various situations, 240 | // for example if we try to shrink an allocation but also grow 241 | // its alignment in such a way that we cannot satisfy the 242 | // requested alignment, and that is okay. 243 | Err(_) => continue, 244 | } 245 | } 246 | } 247 | } 248 | 249 | #[test] 250 | fn allocator_shrink_layout_change() { 251 | let b = AllocatorDebug::new(Bump::with_capacity(1024)); 252 | 253 | let layout_align4 = Layout::from_size_align(1024, 4).unwrap(); 254 | let layout_align16 = Layout::from_size_align(256, 16).unwrap(); 255 | 256 | // Allocate a chunk of memory and attempt to shrink it while increasing 257 | // alignment requirements. 258 | let p4: NonNull = b.allocate(layout_align4).unwrap().cast(); 259 | let p16_res = unsafe { b.shrink(p4, layout_align4, layout_align16) }; 260 | 261 | // This could either happen to succeed because `p4` already happened to be 262 | // 16-aligned and could be reused, or `bumpalo` could return an error. 263 | match p16_res { 264 | Ok(p16) => assert!(is_pointer_aligned_to(p16, 16)), 265 | Err(_) => {} 266 | } 267 | } 268 | 269 | fn is_pointer_aligned_to(p: NonNull<[u8]>, align: usize) -> bool { 270 | debug_assert!(align.is_power_of_two()); 271 | 272 | let pointer = p.as_ptr() as *mut u8 as usize; 273 | let pointer_aligned = pointer & !(align - 1); 274 | 275 | pointer == pointer_aligned 276 | } 277 | -------------------------------------------------------------------------------- /tests/try_alloc.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::{AllocOrInitError, Bump}; 2 | use rand::Rng; 3 | use std::alloc::{GlobalAlloc, Layout, System}; 4 | use std::sync::atomic::{AtomicBool, Ordering}; 5 | 6 | /// A custom allocator that wraps the system allocator, but lets us force 7 | /// allocation failures for testing. 8 | struct Allocator(AtomicBool); 9 | 10 | impl Allocator { 11 | fn is_returning_null(&self) -> bool { 12 | self.0.load(Ordering::SeqCst) 13 | } 14 | 15 | fn set_returning_null(&self, returning_null: bool) { 16 | self.0.store(returning_null, Ordering::SeqCst); 17 | } 18 | 19 | fn toggle_returning_null(&self) { 20 | self.set_returning_null(!self.is_returning_null()); 21 | } 22 | 23 | #[allow(dead_code)] // Silence warnings for non-"collections" builds. 24 | fn with_successful_allocs(&self, callback: F) -> T 25 | where 26 | F: FnOnce() -> T, 27 | { 28 | let old_returning_null = self.is_returning_null(); 29 | self.set_returning_null(false); 30 | let result = callback(); 31 | self.set_returning_null(old_returning_null); 32 | result 33 | } 34 | 35 | fn with_alloc_failures(&self, callback: F) -> T 36 | where 37 | F: FnOnce() -> T, 38 | { 39 | let old_returning_null = self.is_returning_null(); 40 | self.set_returning_null(true); 41 | let result = callback(); 42 | self.set_returning_null(old_returning_null); 43 | result 44 | } 45 | } 46 | 47 | unsafe impl GlobalAlloc for Allocator { 48 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 49 | if self.is_returning_null() { 50 | core::ptr::null_mut() 51 | } else { 52 | System.alloc(layout) 53 | } 54 | } 55 | 56 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 57 | System.dealloc(ptr, layout); 58 | } 59 | 60 | unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { 61 | if self.is_returning_null() { 62 | core::ptr::null_mut() 63 | } else { 64 | System.realloc(ptr, layout, new_size) 65 | } 66 | } 67 | } 68 | 69 | #[global_allocator] 70 | static GLOBAL_ALLOCATOR: Allocator = Allocator(AtomicBool::new(false)); 71 | 72 | /// `assert!` may allocate on failure (e.g. for string formatting and boxing 73 | /// panic info), so we must re-enable allocations during assertions. 74 | macro_rules! assert { 75 | ($cond:expr $(, $args:tt)*) => { 76 | if !$cond { 77 | GLOBAL_ALLOCATOR.set_returning_null(false); 78 | panic!(concat!("Assertion failed: ", stringify!($cond))); 79 | } 80 | }; 81 | } 82 | 83 | /// NB: We provide our own `main` rather than using the default test harness's 84 | /// so that we can ensure that tests are executed serially, and no background 85 | /// threads get tripped up by us disabling the global allocator, or anything 86 | /// like that. 87 | fn main() { 88 | macro_rules! test { 89 | ($name:expr, $test:expr $(,)*) => { 90 | ($name, $test as fn()) 91 | }; 92 | } 93 | 94 | fn test_static_size_alloc(assert_alloc_ok: fn(bump: &Bump), assert_alloc_err: fn(bump: &Bump)) { 95 | // Unlike with `try_alloc_layout`, it's not that easy to test a variety 96 | // of size/capacity combinations here. 97 | // Since nothing in Bump is really random, and we have to start fresh 98 | // each time, just checking each case once is enough. 99 | for &fail_alloc in &[false, true] { 100 | let bump = GLOBAL_ALLOCATOR.with_successful_allocs(|| { 101 | // We can't query the remaining free space in the current chunk, 102 | // so we have to create a new Bump for each test and fill it to 103 | // the brink of a new allocation. 104 | let bump = Bump::try_new().unwrap(); 105 | 106 | // Bump preallocates space in the initial chunk, so we need to 107 | // use up this block prior to the actual test 108 | let layout = Layout::from_size_align(bump.chunk_capacity(), 1).unwrap(); 109 | assert!(bump.try_alloc_layout(layout).is_ok()); 110 | 111 | bump 112 | }); 113 | 114 | GLOBAL_ALLOCATOR.set_returning_null(fail_alloc); 115 | 116 | if fail_alloc { 117 | assert_alloc_err(&bump); 118 | } else { 119 | assert_alloc_ok(&bump); 120 | } 121 | } 122 | } 123 | 124 | let tests = [ 125 | test!("Bump::try_new fails when global allocator fails", || { 126 | GLOBAL_ALLOCATOR.with_alloc_failures(|| { 127 | assert!(Bump::try_with_capacity(1).is_err()); 128 | }); 129 | }), 130 | test!( 131 | "test try_alloc_layout with and without global allocation failures", 132 | || { 133 | const NUM_TESTS: usize = 5000; 134 | const MAX_BYTES_ALLOCATED: usize = 65536; 135 | 136 | let mut bump = Bump::try_new().unwrap(); 137 | let mut bytes_allocated = bump.chunk_capacity(); 138 | 139 | // Bump preallocates space in the initial chunk, so we need to 140 | // use up this block prior to the actual test 141 | let layout = Layout::from_size_align(bump.chunk_capacity(), 1).unwrap(); 142 | assert!(bump.try_alloc_layout(layout).is_ok()); 143 | 144 | let mut rng = rand::thread_rng(); 145 | 146 | for _ in 0..NUM_TESTS { 147 | if rng.gen() { 148 | GLOBAL_ALLOCATOR.toggle_returning_null(); 149 | } 150 | 151 | let layout = Layout::from_size_align(bump.chunk_capacity() + 1, 1).unwrap(); 152 | if GLOBAL_ALLOCATOR.is_returning_null() { 153 | assert!(bump.try_alloc_layout(layout).is_err()); 154 | } else { 155 | assert!(bump.try_alloc_layout(layout).is_ok()); 156 | bytes_allocated += bump.chunk_capacity(); 157 | } 158 | 159 | if bytes_allocated >= MAX_BYTES_ALLOCATED { 160 | bump = GLOBAL_ALLOCATOR.with_successful_allocs(|| Bump::try_new().unwrap()); 161 | bytes_allocated = bump.chunk_capacity(); 162 | } 163 | } 164 | }, 165 | ), 166 | test!( 167 | "test try_alloc with and without global allocation failures", 168 | || { 169 | test_static_size_alloc( 170 | |bump| assert!(bump.try_alloc(1u8).is_ok()), 171 | |bump| assert!(bump.try_alloc(1u8).is_err()), 172 | ); 173 | }, 174 | ), 175 | test!( 176 | "test try_alloc_with with and without global allocation failures", 177 | || { 178 | test_static_size_alloc( 179 | |bump| assert!(bump.try_alloc_with(|| 1u8).is_ok()), 180 | |bump| assert!(bump.try_alloc_with(|| 1u8).is_err()), 181 | ); 182 | }, 183 | ), 184 | test!( 185 | "test try_alloc_try_with (Ok) with and without global allocation failures", 186 | || { 187 | test_static_size_alloc( 188 | |bump| assert!(bump.try_alloc_try_with::<_, _, ()>(|| Ok(1u8)).is_ok()), 189 | |bump| assert!(bump.try_alloc_try_with::<_, _, ()>(|| Ok(1u8)).is_err()), 190 | ); 191 | }, 192 | ), 193 | test!( 194 | "test try_alloc_try_with (Err) with and without global allocation failures", 195 | || { 196 | test_static_size_alloc( 197 | |bump| { 198 | assert!(matches!( 199 | bump.try_alloc_try_with::<_, u8, _>(|| Err(())), 200 | Err(AllocOrInitError::Init(_)) 201 | )); 202 | }, 203 | |bump| { 204 | assert!(matches!( 205 | bump.try_alloc_try_with::<_, u8, _>(|| Err(())), 206 | Err(AllocOrInitError::Alloc(_)) 207 | )); 208 | }, 209 | ); 210 | }, 211 | ), 212 | #[cfg(feature = "collections")] 213 | test!("test Vec::try_reserve and Vec::try_reserve_exact", || { 214 | use bumpalo::collections::Vec; 215 | 216 | let bump = Bump::try_new().unwrap(); 217 | 218 | GLOBAL_ALLOCATOR.with_alloc_failures(|| { 219 | let mut vec = Vec::::new_in(&bump); 220 | let chunk_cap = bump.chunk_capacity(); 221 | 222 | // Will always succeed since this size gets pre-allocated in Bump::try_new() 223 | assert!(vec.try_reserve(chunk_cap).is_ok()); 224 | assert!(vec.try_reserve_exact(chunk_cap).is_ok()); 225 | 226 | // Fails to allocate further since allocator returns null 227 | assert!(vec.try_reserve(chunk_cap + 1).is_err()); 228 | assert!(vec.try_reserve_exact(chunk_cap + 1).is_err()); 229 | }); 230 | 231 | GLOBAL_ALLOCATOR.with_successful_allocs(|| { 232 | let mut vec = Vec::::new_in(&bump); 233 | let chunk_cap = bump.chunk_capacity(); 234 | 235 | // Will always succeed since this size gets pre-allocated in Bump::try_new() 236 | assert!(vec.try_reserve(chunk_cap).is_ok()); 237 | assert!(vec.try_reserve_exact(chunk_cap).is_ok()); 238 | 239 | // Succeeds to allocate further 240 | assert!(vec.try_reserve(chunk_cap + 1).is_ok()); 241 | assert!(vec.try_reserve_exact(chunk_cap + 1).is_ok()); 242 | }); 243 | }), 244 | ]; 245 | 246 | for (name, test) in tests.iter() { 247 | assert!(!GLOBAL_ALLOCATOR.is_returning_null()); 248 | 249 | eprintln!("=== {} ===", name); 250 | test(); 251 | 252 | GLOBAL_ALLOCATOR.set_returning_null(false); 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /tests/all/quickchecks.rs: -------------------------------------------------------------------------------- 1 | use crate::quickcheck; 2 | use ::quickcheck::{Arbitrary, Gen}; 3 | use bumpalo::Bump; 4 | use std::mem; 5 | 6 | #[derive(Clone, Debug, PartialEq)] 7 | struct BigValue { 8 | data: [u64; 32], 9 | } 10 | 11 | impl BigValue { 12 | fn new(x: u64) -> BigValue { 13 | BigValue { 14 | data: [ 15 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, 16 | x, x, x, x, 17 | ], 18 | } 19 | } 20 | } 21 | 22 | impl Arbitrary for BigValue { 23 | fn arbitrary(g: &mut Gen) -> BigValue { 24 | BigValue::new(u64::arbitrary(g)) 25 | } 26 | } 27 | 28 | #[derive(Clone, Debug)] 29 | enum Elems { 30 | OneT(T), 31 | TwoT(T, T), 32 | FourT(T, T, T, T), 33 | OneU(U), 34 | TwoU(U, U), 35 | FourU(U, U, U, U), 36 | } 37 | 38 | impl Arbitrary for Elems 39 | where 40 | T: Arbitrary + Clone, 41 | U: Arbitrary + Clone, 42 | { 43 | fn arbitrary(g: &mut Gen) -> Elems { 44 | let x: u8 = u8::arbitrary(g); 45 | match x % 6 { 46 | 0 => Elems::OneT(T::arbitrary(g)), 47 | 1 => Elems::TwoT(T::arbitrary(g), T::arbitrary(g)), 48 | 2 => Elems::FourT( 49 | T::arbitrary(g), 50 | T::arbitrary(g), 51 | T::arbitrary(g), 52 | T::arbitrary(g), 53 | ), 54 | 3 => Elems::OneU(U::arbitrary(g)), 55 | 4 => Elems::TwoU(U::arbitrary(g), U::arbitrary(g)), 56 | 5 => Elems::FourU( 57 | U::arbitrary(g), 58 | U::arbitrary(g), 59 | U::arbitrary(g), 60 | U::arbitrary(g), 61 | ), 62 | _ => unreachable!(), 63 | } 64 | } 65 | 66 | fn shrink(&self) -> Box> { 67 | match self { 68 | Elems::OneT(_) => Box::new(vec![].into_iter()), 69 | Elems::TwoT(a, b) => { 70 | Box::new(vec![Elems::OneT(a.clone()), Elems::OneT(b.clone())].into_iter()) 71 | } 72 | Elems::FourT(a, b, c, d) => Box::new( 73 | vec![ 74 | Elems::TwoT(a.clone(), b.clone()), 75 | Elems::TwoT(a.clone(), c.clone()), 76 | Elems::TwoT(a.clone(), d.clone()), 77 | Elems::TwoT(b.clone(), c.clone()), 78 | Elems::TwoT(b.clone(), d.clone()), 79 | Elems::TwoT(c.clone(), d.clone()), 80 | ] 81 | .into_iter(), 82 | ), 83 | Elems::OneU(_) => Box::new(vec![].into_iter()), 84 | Elems::TwoU(a, b) => { 85 | Box::new(vec![Elems::OneU(a.clone()), Elems::OneU(b.clone())].into_iter()) 86 | } 87 | Elems::FourU(a, b, c, d) => Box::new( 88 | vec![ 89 | Elems::TwoU(a.clone(), b.clone()), 90 | Elems::TwoU(a.clone(), c.clone()), 91 | Elems::TwoU(a.clone(), d.clone()), 92 | Elems::TwoU(b.clone(), c.clone()), 93 | Elems::TwoU(b.clone(), d.clone()), 94 | Elems::TwoU(c.clone(), d.clone()), 95 | ] 96 | .into_iter(), 97 | ), 98 | } 99 | } 100 | } 101 | 102 | fn overlap((a1, a2): (usize, usize), (b1, b2): (usize, usize)) -> bool { 103 | assert!(a1 < a2); 104 | assert!(b1 < b2); 105 | a1 < b2 && b1 < a2 106 | } 107 | 108 | // Returns whether `(b1, b2)` is contained in `(a1, a2)`. 109 | fn contains((a1, a2): (usize, usize), (b1, b2): (usize, usize)) -> bool { 110 | assert!(a1 < a2); 111 | assert!(b1 < b2); 112 | a1 <= b1 && b2 <= a2 113 | } 114 | 115 | fn range(t: &T) -> (usize, usize) { 116 | let start = t as *const _ as usize; 117 | let end = start + mem::size_of::(); 118 | (start, end) 119 | } 120 | 121 | quickcheck! { 122 | fn can_allocate_big_values(values: Vec) -> () { 123 | let bump = Bump::new(); 124 | let mut alloced = vec![]; 125 | 126 | for vals in values.iter().cloned() { 127 | alloced.push(bump.alloc(vals)); 128 | } 129 | 130 | for (vals, alloc) in values.iter().zip(alloced.into_iter()) { 131 | assert_eq!(vals, alloc); 132 | } 133 | } 134 | 135 | fn big_allocations_never_overlap(values: Vec) -> () { 136 | let bump = Bump::new(); 137 | let mut alloced = vec![]; 138 | 139 | for v in values { 140 | let a = bump.alloc(v); 141 | let start = a as *const _ as usize; 142 | let end = unsafe { (a as *const BigValue).offset(1) as usize }; 143 | let range = (start, end); 144 | 145 | for r in &alloced { 146 | assert!(!overlap(*r, range)); 147 | } 148 | 149 | alloced.push(range); 150 | } 151 | } 152 | 153 | fn can_allocate_heterogeneous_things_and_they_dont_overlap(things: Vec>) -> () { 154 | let bump = Bump::new(); 155 | let mut ranges = vec![]; 156 | 157 | for t in things { 158 | let r = match t { 159 | Elems::OneT(a) => { 160 | range(bump.alloc(a)) 161 | }, 162 | Elems::TwoT(a, b) => { 163 | range(bump.alloc([a, b])) 164 | }, 165 | Elems::FourT(a, b, c, d) => { 166 | range(bump.alloc([a, b, c, d])) 167 | }, 168 | Elems::OneU(a) => { 169 | range(bump.alloc(a)) 170 | }, 171 | Elems::TwoU(a, b) => { 172 | range(bump.alloc([a, b])) 173 | }, 174 | Elems::FourU(a, b, c, d) => { 175 | range(bump.alloc([a, b, c, d])) 176 | }, 177 | }; 178 | 179 | for s in &ranges { 180 | assert!(!overlap(r, *s)); 181 | } 182 | 183 | ranges.push(r); 184 | } 185 | } 186 | 187 | 188 | fn test_alignment_chunks(sizes: Vec) -> () { 189 | const SUPPORTED_ALIGNMENTS: &[usize] = &[1, 2, 4, 8, 16]; 190 | for &alignment in SUPPORTED_ALIGNMENTS { 191 | let mut b = Bump::with_capacity(513); 192 | let mut sizes = sizes.iter().map(|&size| (size % 10) * alignment).collect::>(); 193 | 194 | for &size in &sizes { 195 | let layout = std::alloc::Layout::from_size_align(size, alignment).unwrap(); 196 | let ptr = b.alloc_layout(layout).as_ptr() as *const u8 as usize; 197 | assert_eq!(ptr % alignment, 0); 198 | } 199 | 200 | for chunk in b.iter_allocated_chunks() { 201 | let mut remaining = chunk.len(); 202 | while remaining > 0 { 203 | let size = sizes.pop().expect("too many bytes in the chunk output"); 204 | assert!(remaining >= size, "returned chunk contained padding"); 205 | remaining -= size; 206 | } 207 | } 208 | assert_eq!(sizes.into_iter().sum::(), 0); 209 | } 210 | } 211 | 212 | fn alloc_slices(allocs: Vec<(u8, usize)>) -> () { 213 | let b = Bump::new(); 214 | let mut allocated: Vec<(usize, usize)> = vec![]; 215 | for (val, len) in allocs { 216 | let len = len % 100; 217 | let s = b.alloc_slice_fill_copy(len, val); 218 | 219 | assert_eq!(s.len(), len); 220 | assert!(s.iter().all(|v| v == &val)); 221 | 222 | let range = (s.as_ptr() as usize, unsafe { s.as_ptr().add(s.len()) } as usize); 223 | for r in &allocated { 224 | let no_overlap = range.1 <= r.0 || r.1 <= range.0; 225 | assert!(no_overlap); 226 | } 227 | allocated.push(range); 228 | } 229 | } 230 | 231 | fn alloc_strs(allocs: Vec) -> () { 232 | let b = Bump::new(); 233 | let allocated: Vec<&str> = allocs.iter().map(|s| b.alloc_str(s) as &_).collect(); 234 | for (val, alloc) in allocs.into_iter().zip(allocated) { 235 | assert_eq!(val, alloc); 236 | } 237 | } 238 | 239 | fn all_allocations_in_a_chunk(values: Vec) -> () { 240 | let b = Bump::new(); 241 | let allocated: Vec<&BigValue> = values.into_iter().map(|val| b.alloc(val) as &_).collect(); 242 | let chunks: Vec<(*mut u8, usize)> = unsafe { b.iter_allocated_chunks_raw() }.collect(); 243 | for alloc in allocated.into_iter() { 244 | assert!(chunks.iter().any(|&(ptr, size)| { 245 | let ptr = ptr as usize; 246 | let chunk = (ptr, ptr + size); 247 | contains(chunk, range(alloc)) 248 | })); 249 | } 250 | } 251 | 252 | fn chunks_and_raw_chunks_are_same(values: Vec) -> () { 253 | let mut b = Bump::new(); 254 | for val in values { 255 | b.alloc(val); 256 | } 257 | let raw_chunks: Vec<(_, _)> = unsafe { b.iter_allocated_chunks_raw() }.collect(); 258 | let chunks: Vec<&[_]> = b.iter_allocated_chunks().collect(); 259 | assert_eq!(raw_chunks.len(), chunks.len()); 260 | for ((ptr, size), chunk) in raw_chunks.into_iter().zip(chunks) { 261 | assert_eq!(ptr as *const _, chunk.as_ptr() as *const _); 262 | assert_eq!(size, chunk.len()); 263 | } 264 | } 265 | 266 | // MIRI exits with failure when we try to allocate more memory than its 267 | // sandbox has, rather than returning null from the allocation 268 | // function. This test runs afoul of that bug. 269 | #[cfg(not(miri))] 270 | fn limit_is_never_exceeded(limit: usize) -> bool { 271 | let bump = Bump::new(); 272 | 273 | bump.set_allocation_limit(Some(limit)); 274 | 275 | // The exact numbers here on how much to allocate are a bit murky but we 276 | // have two main goals. 277 | // 278 | // - Attempt to allocate over the allocation limit imposed 279 | // - Allocate in increments small enough that at least a few allocations succeed 280 | let layout = std::alloc::Layout::array::(limit / 16).unwrap(); 281 | for _ in 0..32 { 282 | let _ = bump.try_alloc_layout(layout); 283 | } 284 | 285 | bump.allocated_bytes() <= limit 286 | } 287 | 288 | fn allocated_bytes_including_metadata(allocs: Vec) -> () { 289 | let b = Bump::new(); 290 | let mut slice_bytes = 0; 291 | let allocs_len = allocs.len(); 292 | for len in allocs { 293 | const MAX_LEN: usize = 512; 294 | let len = len % MAX_LEN; 295 | b.alloc_slice_fill_copy(len, 0); 296 | slice_bytes += len; 297 | let allocated_bytes = b.allocated_bytes(); 298 | let allocated_bytes_including_metadata = b.allocated_bytes_including_metadata(); 299 | if slice_bytes == 0 { 300 | assert_eq!(allocated_bytes, 0); 301 | assert_eq!(allocated_bytes_including_metadata, 0); 302 | } else { 303 | assert!(allocated_bytes >= slice_bytes); 304 | assert!(allocated_bytes_including_metadata > allocated_bytes); 305 | assert!(allocated_bytes_including_metadata < allocated_bytes + allocs_len * 100); 306 | } 307 | } 308 | } 309 | 310 | #[cfg(feature = "collections")] 311 | fn extending_from_slice(data1: Vec, data2: Vec) -> () { 312 | let bump = Bump::new(); 313 | 314 | // Create a bumpalo Vec with the contents of `data1` 315 | let mut vec = bumpalo::collections::Vec::new_in(&bump); 316 | vec.extend_from_slice_copy(&data1); 317 | assert_eq!(vec.as_slice(), data1); 318 | 319 | // Extend the Vec using the contents of `data2` 320 | vec.extend_from_slice_copy(&data2); 321 | // Confirm that the Vec now has the expected number of items 322 | assert_eq!(vec.len(), data1.len() + data2.len()); 323 | // Confirm that the beginning of the Vec matches `data1`'s elements 324 | assert_eq!(&vec[0..data1.len()], data1); 325 | // Confirm that the end of the Vec matches `data2`'s elements 326 | assert_eq!(&vec[data1.len()..], data2); 327 | } 328 | 329 | #[cfg(feature = "collections")] 330 | fn extending_from_slices(data: Vec>) -> () { 331 | let bump = Bump::new(); 332 | 333 | // Convert the Vec> into a &[&[usize]] 334 | let slices_vec: Vec<&[usize]> = data.iter().map(Vec::as_slice).collect(); 335 | let slices = slices_vec.as_slice(); 336 | 337 | // Isolate the first slice from the remaining slices. If `slices` is empty, 338 | // fall back to empty slices for both. 339 | let (first_slice, remaining_slices) = match slices { 340 | [head, tail @ ..] => (*head, tail), 341 | [] => (&[][..], &[][..]) 342 | }; 343 | 344 | // Create a bumpalo `Vec` and populate it with the contents of the first slice. 345 | let mut vec = bumpalo::collections::Vec::new_in(&bump); 346 | vec.extend_from_slice_copy(first_slice); 347 | assert_eq!(vec.as_slice(), first_slice); 348 | 349 | // Append all of the other slices onto the end of the Vec 350 | vec.extend_from_slices_copy(remaining_slices); 351 | 352 | let total_length: usize = slices.iter().map(|s| s.len()).sum(); 353 | assert_eq!(vec.len(), total_length); 354 | 355 | let total_data: Vec = slices.iter().flat_map(|s| s.iter().copied()).collect(); 356 | assert_eq!(vec.as_slice(), total_data.as_slice()); 357 | } 358 | 359 | #[cfg(feature = "collections")] 360 | fn compare_extending_from_slice_and_from_slices(data: Vec>) -> () { 361 | let bump = Bump::new(); 362 | 363 | // Convert the Vec> into a &[&[usize]] 364 | let slices_vec: Vec<&[usize]> = data.iter().map(Vec::as_slice).collect(); 365 | let slices = slices_vec.as_slice(); 366 | 367 | // Isolate the first slice from the remaining slices. If `slices` is empty, 368 | // fall back to empty slices for both. 369 | let (first_slice, remaining_slices) = match slices { 370 | [head, tail @ ..] => (*head, tail), 371 | [] => (&[][..], &[][..]) 372 | }; 373 | 374 | // Create a bumpalo `Vec` and populate it with the contents of the first slice. 375 | let mut vec1 = bumpalo::collections::Vec::new_in(&bump); 376 | vec1.extend_from_slice_copy(first_slice); 377 | assert_eq!(vec1.as_slice(), first_slice); 378 | 379 | // Append each remaining slice individually 380 | for slice in remaining_slices { 381 | vec1.extend_from_slice_copy(slice); 382 | } 383 | 384 | // Create a second Vec populated with the contents of the first slice. 385 | let mut vec2 = bumpalo::collections::Vec::new_in(&bump); 386 | vec2.extend_from_slice_copy(first_slice); 387 | assert_eq!(vec2.as_slice(), first_slice); 388 | 389 | // Append the remaining slices en masse 390 | vec2.extend_from_slices_copy(remaining_slices); 391 | 392 | // Confirm that the two approaches to extending a Vec resulted in the same data 393 | assert_eq!(vec1, vec2); 394 | } 395 | } 396 | -------------------------------------------------------------------------------- /benches/benches.rs: -------------------------------------------------------------------------------- 1 | use criterion::*; 2 | 3 | #[derive(Default)] 4 | struct Small(u8); 5 | 6 | #[derive(Default)] 7 | struct Big([usize; 32]); 8 | 9 | fn alloc(n: usize) { 10 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::()); 11 | for _ in 0..n { 12 | let arena = black_box(&arena); 13 | let val: &mut T = arena.alloc(black_box(Default::default())); 14 | black_box(val); 15 | } 16 | } 17 | 18 | fn alloc_with(n: usize) { 19 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::()); 20 | for _ in 0..n { 21 | let arena = black_box(&arena); 22 | let val: &mut T = arena.alloc_with(|| black_box(Default::default())); 23 | black_box(val); 24 | } 25 | } 26 | 27 | fn alloc_try_with(n: usize) { 28 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::>()); 29 | for _ in 0..n { 30 | let arena = black_box(&arena); 31 | let val: Result<&mut T, E> = arena.alloc_try_with(|| black_box(Ok(Default::default()))); 32 | let _ = black_box(val); 33 | } 34 | } 35 | 36 | fn alloc_try_with_err(n: usize) { 37 | // Only enough capacity for one, since the allocation is undone. 38 | let arena = bumpalo::Bump::with_capacity(std::mem::size_of::>()); 39 | for _ in 0..n { 40 | let arena = black_box(&arena); 41 | let val: Result<&mut T, E> = arena.alloc_try_with(|| black_box(Err(Default::default()))); 42 | let _ = black_box(val); 43 | } 44 | } 45 | 46 | fn try_alloc(n: usize) { 47 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::()); 48 | for _ in 0..n { 49 | let arena = black_box(&arena); 50 | let val: Result<&mut T, _> = arena.try_alloc(black_box(Default::default())); 51 | let _ = black_box(val); 52 | } 53 | } 54 | 55 | fn try_alloc_with(n: usize) { 56 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::()); 57 | for _ in 0..n { 58 | let arena = black_box(&arena); 59 | let val: Result<&mut T, _> = arena.try_alloc_with(|| black_box(Default::default())); 60 | let _ = black_box(val); 61 | } 62 | } 63 | 64 | fn try_alloc_try_with(n: usize) { 65 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::>()); 66 | for _ in 0..n { 67 | let arena = black_box(&arena); 68 | let val: Result<&mut T, bumpalo::AllocOrInitError> = 69 | arena.try_alloc_try_with(|| black_box(Ok(Default::default()))); 70 | let _ = black_box(val); 71 | } 72 | } 73 | 74 | fn try_alloc_try_with_err(n: usize) { 75 | // Only enough capacity for one, since the allocation is undone. 76 | let arena = bumpalo::Bump::with_capacity(std::mem::size_of::>()); 77 | for _ in 0..n { 78 | let arena = black_box(&arena); 79 | let val: Result<&mut T, bumpalo::AllocOrInitError> = 80 | arena.try_alloc_try_with(|| black_box(Err(Default::default()))); 81 | let _ = black_box(val); 82 | } 83 | } 84 | 85 | #[cfg(feature = "collections")] 86 | fn format_realloc(bump: &bumpalo::Bump, n: usize) { 87 | let n = criterion::black_box(n); 88 | let s = bumpalo::format!(in bump, "Hello {:.*}", n, "World! "); 89 | criterion::black_box(s); 90 | } 91 | 92 | #[cfg(feature = "collections")] 93 | fn string_from_str_in(bump: &bumpalo::Bump, str: &str) { 94 | let str = criterion::black_box(str); 95 | let s = bumpalo::collections::string::String::from_str_in(str, bump); 96 | criterion::black_box(s); 97 | } 98 | 99 | #[cfg(feature = "collections")] 100 | fn string_push_str(bump: &bumpalo::Bump, str: &str) { 101 | let str = criterion::black_box(str); 102 | let mut s = bumpalo::collections::string::String::with_capacity_in(str.len(), bump); 103 | s.push_str(str); 104 | criterion::black_box(s); 105 | } 106 | 107 | #[cfg(feature = "collections")] 108 | fn extend_u8(bump: &bumpalo::Bump, slice: &[u8]) { 109 | let slice = criterion::black_box(slice); 110 | let mut vec = bumpalo::collections::Vec::::with_capacity_in(slice.len(), bump); 111 | vec.extend(slice.iter().copied()); 112 | criterion::black_box(vec); 113 | } 114 | 115 | #[cfg(feature = "collections")] 116 | fn extend_from_slice_u8(bump: &bumpalo::Bump, slice: &[u8]) { 117 | let slice = criterion::black_box(slice); 118 | let mut vec = bumpalo::collections::Vec::::with_capacity_in(slice.len(), bump); 119 | vec.extend_from_slice(slice); 120 | criterion::black_box(vec); 121 | } 122 | 123 | #[cfg(feature = "collections")] 124 | fn extend_from_slice_copy_u8(bump: &bumpalo::Bump, slice: &[u8]) { 125 | let slice = criterion::black_box(slice); 126 | let mut vec = bumpalo::collections::Vec::::with_capacity_in(slice.len(), bump); 127 | vec.extend_from_slice_copy(slice); 128 | criterion::black_box(vec); 129 | } 130 | 131 | const ALLOCATIONS: usize = 10_000; 132 | 133 | fn bench_extend_from_slice_copy(c: &mut Criterion) { 134 | let lengths = &[ 135 | 4usize, 136 | 5, 137 | 8, 138 | 11, 139 | 16, 140 | 64, 141 | 128, 142 | 331, 143 | 1024, 144 | 4 * 1024, 145 | 16 * 1024, 146 | ]; 147 | 148 | for len in lengths.iter().copied() { 149 | let str = "x".repeat(len); 150 | let mut group = c.benchmark_group(format!("extend {len} bytes")); 151 | group.throughput(Throughput::Elements(len as u64)); 152 | group.bench_function("extend", |b| { 153 | let mut bump = bumpalo::Bump::with_capacity(len); 154 | b.iter(|| { 155 | bump.reset(); 156 | extend_u8(&bump, str.as_bytes()); 157 | }); 158 | }); 159 | group.bench_function("extend_from_slice", |b| { 160 | let mut bump = bumpalo::Bump::with_capacity(len); 161 | let str = "x".repeat(len); 162 | b.iter(|| { 163 | bump.reset(); 164 | extend_from_slice_u8(&bump, str.as_bytes()); 165 | }); 166 | }); 167 | group.bench_function("extend_from_slice_copy", |b| { 168 | let mut bump = bumpalo::Bump::with_capacity(len); 169 | let str = "x".repeat(len); 170 | b.iter(|| { 171 | bump.reset(); 172 | extend_from_slice_copy_u8(&bump, str.as_bytes()); 173 | }); 174 | }); 175 | group.finish(); 176 | } 177 | } 178 | 179 | fn bench_extend_from_slices_copy(c: &mut Criterion) { 180 | // The number of slices that will be copied into the Vec 181 | let slice_counts = &[1, 2, 4, 8, 16, 32]; 182 | 183 | // Whether the Bump and its Vec have will already enough space to store the data without 184 | // requiring reallocation 185 | let is_preallocated_settings = &[false, true]; 186 | 187 | // Slices that can be used to extend the Vec; each may be used more than once. 188 | let data: [&[u8]; 4] = [ 189 | black_box(b"wwwwwwwwwwwwwwww"), 190 | black_box(b"xxxxxxxxxxxxxxxx"), 191 | black_box(b"yyyyyyyyyyyyyyyy"), 192 | black_box(b"zzzzzzzzzzzzzzzz"), 193 | ]; 194 | 195 | // For each (`is_preallocated`, `num_slices`) pair... 196 | for is_preallocated in is_preallocated_settings { 197 | for num_slices in slice_counts.iter().copied() { 198 | // Create an appropriately named benchmark group 199 | let mut group = c.benchmark_group( 200 | format!("extend_from_slices num_slices={num_slices}, is_preallocated={is_preallocated}") 201 | ); 202 | 203 | // Cycle over `data` to construct a slice of slices to append 204 | let slices = data 205 | .iter() 206 | .copied() 207 | .cycle() 208 | .take(num_slices) 209 | .collect::>(); 210 | let total_size = slices.iter().map(|s| s.len()).sum(); 211 | 212 | // If `is_preallocated` is true, both the Bump and the benchmark Vecs will have enough 213 | // capacity to store the concatenated data. If it's false, the Bump and the Vec start 214 | // out with no capacity allocated and grow on demand. 215 | let size_to_allocate = match is_preallocated { 216 | true => total_size, 217 | false => 0, 218 | }; 219 | let mut bump = bumpalo::Bump::with_capacity(size_to_allocate); 220 | 221 | // This benchmark demonstrates the performance of looping over the slice-of-slices, 222 | // calling `extend_from_slice_copy` (and transitively, `reserve`) for each slice. 223 | group.bench_function("loop over extend_from_slice_copy", |b| { 224 | b.iter(|| { 225 | bump.reset(); 226 | let mut vec = bumpalo::collections::Vec::::with_capacity_in(size_to_allocate, &bump); 227 | for slice in black_box(&slices) { 228 | vec.extend_from_slice_copy(slice); 229 | } 230 | black_box(vec.as_slice()); 231 | }); 232 | }); 233 | 234 | // This benchmark demonstrates the performance of using a single call to 235 | // `extend_from_slices_copy`, which performs a single `reserve` before appending 236 | // all of the slices. 237 | group.bench_function("extend_from_slices_copy", |b| { 238 | b.iter(|| { 239 | bump.reset(); 240 | let mut vec = bumpalo::collections::Vec::::with_capacity_in(size_to_allocate, &bump); 241 | vec.extend_from_slices_copy(black_box(slices.as_slice())); 242 | black_box(vec.as_slice()); 243 | }); 244 | }); 245 | 246 | group.finish(); 247 | } 248 | } 249 | } 250 | 251 | fn bench_alloc(c: &mut Criterion) { 252 | let mut group = c.benchmark_group("alloc"); 253 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 254 | group.bench_function("small", |b| b.iter(|| alloc::(ALLOCATIONS))); 255 | group.bench_function("big", |b| b.iter(|| alloc::(ALLOCATIONS))); 256 | } 257 | 258 | fn bench_alloc_with(c: &mut Criterion) { 259 | let mut group = c.benchmark_group("alloc-with"); 260 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 261 | group.bench_function("small", |b| b.iter(|| alloc_with::(ALLOCATIONS))); 262 | group.bench_function("big", |b| b.iter(|| alloc_with::(ALLOCATIONS))); 263 | } 264 | 265 | fn bench_alloc_try_with(c: &mut Criterion) { 266 | let mut group = c.benchmark_group("alloc-try-with"); 267 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 268 | group.bench_function("small, small", |b| { 269 | b.iter(|| alloc_try_with::(ALLOCATIONS)) 270 | }); 271 | group.bench_function("small, big", |b| { 272 | b.iter(|| alloc_try_with::(ALLOCATIONS)) 273 | }); 274 | group.bench_function("big, small", |b| { 275 | b.iter(|| alloc_try_with::(ALLOCATIONS)) 276 | }); 277 | group.bench_function("big, big", |b| { 278 | b.iter(|| alloc_try_with::(ALLOCATIONS)) 279 | }); 280 | } 281 | 282 | fn bench_alloc_try_with_err(c: &mut Criterion) { 283 | let mut group = c.benchmark_group("alloc-try-with-err"); 284 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 285 | group.bench_function("small, small", |b| { 286 | b.iter(|| alloc_try_with_err::(ALLOCATIONS)) 287 | }); 288 | group.bench_function("small, big", |b| { 289 | b.iter(|| alloc_try_with_err::(ALLOCATIONS)) 290 | }); 291 | group.bench_function("big, small", |b| { 292 | b.iter(|| alloc_try_with_err::(ALLOCATIONS)) 293 | }); 294 | group.bench_function("big, big", |b| { 295 | b.iter(|| alloc_try_with_err::(ALLOCATIONS)) 296 | }); 297 | } 298 | 299 | fn bench_try_alloc(c: &mut Criterion) { 300 | let mut group = c.benchmark_group("try-alloc"); 301 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 302 | group.bench_function("small", |b| b.iter(|| try_alloc::(ALLOCATIONS))); 303 | group.bench_function("big", |b| b.iter(|| try_alloc::(ALLOCATIONS))); 304 | } 305 | 306 | fn bench_try_alloc_with(c: &mut Criterion) { 307 | let mut group = c.benchmark_group("try-alloc-with"); 308 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 309 | group.bench_function("small", |b| b.iter(|| try_alloc_with::(ALLOCATIONS))); 310 | group.bench_function("big", |b| b.iter(|| try_alloc_with::(ALLOCATIONS))); 311 | } 312 | 313 | fn bench_try_alloc_try_with(c: &mut Criterion) { 314 | let mut group = c.benchmark_group("try-alloc-try-with"); 315 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 316 | group.bench_function("small, small", |b| { 317 | b.iter(|| try_alloc_try_with::(ALLOCATIONS)) 318 | }); 319 | group.bench_function("small, big", |b| { 320 | b.iter(|| try_alloc_try_with::(ALLOCATIONS)) 321 | }); 322 | group.bench_function("big, small", |b| { 323 | b.iter(|| try_alloc_try_with::(ALLOCATIONS)) 324 | }); 325 | group.bench_function("big, big", |b| { 326 | b.iter(|| try_alloc_try_with::(ALLOCATIONS)) 327 | }); 328 | } 329 | 330 | fn bench_try_alloc_try_with_err(c: &mut Criterion) { 331 | let mut group = c.benchmark_group("try-alloc-try-with-err"); 332 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 333 | group.bench_function("small, small", |b| { 334 | b.iter(|| try_alloc_try_with_err::(ALLOCATIONS)) 335 | }); 336 | group.bench_function("small, big", |b| { 337 | b.iter(|| try_alloc_try_with_err::(ALLOCATIONS)) 338 | }); 339 | group.bench_function("big, small", |b| { 340 | b.iter(|| try_alloc_try_with_err::(ALLOCATIONS)) 341 | }); 342 | group.bench_function("big, big", |b| { 343 | b.iter(|| try_alloc_try_with_err::(ALLOCATIONS)) 344 | }); 345 | } 346 | 347 | fn bench_format_realloc(c: &mut Criterion) { 348 | let mut group = c.benchmark_group("format-realloc"); 349 | 350 | for n in (1..5).map(|n| n * n * n * 10) { 351 | group.throughput(Throughput::Elements(n as u64)); 352 | group.bench_with_input(BenchmarkId::new("format-realloc", n), &n, |b, n| { 353 | let mut bump = bumpalo::Bump::new(); 354 | b.iter(|| { 355 | bump.reset(); 356 | format_realloc(&bump, *n); 357 | }); 358 | }); 359 | } 360 | } 361 | 362 | fn bench_string_from_str_in(c: &mut Criterion) { 363 | let len: usize = 16; 364 | 365 | let mut group = c.benchmark_group("alloc"); 366 | group.throughput(Throughput::Elements(len as u64)); 367 | group.bench_function("from_str_in", |b| { 368 | let mut bump = bumpalo::Bump::with_capacity(len); 369 | let str = "x".repeat(len); 370 | b.iter(|| { 371 | bump.reset(); 372 | string_from_str_in(&bump, &*str); 373 | }); 374 | }); 375 | } 376 | 377 | fn bench_string_push_str(c: &mut Criterion) { 378 | let len: usize = 16 * 1024; // 16 KiB 379 | 380 | let mut group = c.benchmark_group("alloc"); 381 | group.throughput(Throughput::Elements(len as u64)); 382 | group.bench_function("push_str", |b| { 383 | let mut bump = bumpalo::Bump::with_capacity(len); 384 | let str = "x".repeat(len); 385 | b.iter(|| { 386 | bump.reset(); 387 | string_push_str(&bump, &*str); 388 | }); 389 | }); 390 | } 391 | 392 | criterion_group!( 393 | benches, 394 | bench_extend_from_slice_copy, 395 | bench_extend_from_slices_copy, 396 | bench_alloc, 397 | bench_alloc_with, 398 | bench_alloc_try_with, 399 | bench_alloc_try_with_err, 400 | bench_try_alloc, 401 | bench_try_alloc_with, 402 | bench_try_alloc_try_with, 403 | bench_try_alloc_try_with_err, 404 | bench_format_realloc, 405 | bench_string_from_str_in, 406 | bench_string_push_str 407 | ); 408 | criterion_main!(benches); 409 | -------------------------------------------------------------------------------- /src/boxed.rs: -------------------------------------------------------------------------------- 1 | //! A pointer type for bump allocation. 2 | //! 3 | //! [`Box<'a, T>`] provides the simplest form of 4 | //! bump allocation in `bumpalo`. Boxes provide ownership for this allocation, and 5 | //! drop their contents when they go out of scope. 6 | //! 7 | //! # Examples 8 | //! 9 | //! Move a value from the stack to the heap by creating a [`Box`]: 10 | //! 11 | //! ``` 12 | //! use bumpalo::{Bump, boxed::Box}; 13 | //! 14 | //! let b = Bump::new(); 15 | //! 16 | //! let val: u8 = 5; 17 | //! let boxed: Box = Box::new_in(val, &b); 18 | //! ``` 19 | //! 20 | //! Move a value from a [`Box`] back to the stack by [dereferencing]: 21 | //! 22 | //! ``` 23 | //! use bumpalo::{Bump, boxed::Box}; 24 | //! 25 | //! let b = Bump::new(); 26 | //! 27 | //! let boxed: Box = Box::new_in(5, &b); 28 | //! let val: u8 = *boxed; 29 | //! ``` 30 | //! 31 | //! Running [`Drop`] implementations on bump-allocated values: 32 | //! 33 | //! ``` 34 | //! use bumpalo::{Bump, boxed::Box}; 35 | //! use std::sync::atomic::{AtomicUsize, Ordering}; 36 | //! 37 | //! static NUM_DROPPED: AtomicUsize = AtomicUsize::new(0); 38 | //! 39 | //! struct CountDrops; 40 | //! 41 | //! impl Drop for CountDrops { 42 | //! fn drop(&mut self) { 43 | //! NUM_DROPPED.fetch_add(1, Ordering::SeqCst); 44 | //! } 45 | //! } 46 | //! 47 | //! // Create a new bump arena. 48 | //! let bump = Bump::new(); 49 | //! 50 | //! // Create a `CountDrops` inside the bump arena. 51 | //! let mut c = Box::new_in(CountDrops, &bump); 52 | //! 53 | //! // No `CountDrops` have been dropped yet. 54 | //! assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 0); 55 | //! 56 | //! // Drop our `Box`. 57 | //! drop(c); 58 | //! 59 | //! // Its `Drop` implementation was run, and so `NUM_DROPS` has been incremented. 60 | //! assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 1); 61 | //! ``` 62 | //! 63 | //! Creating a recursive data structure: 64 | //! 65 | //! ``` 66 | //! use bumpalo::{Bump, boxed::Box}; 67 | //! 68 | //! let b = Bump::new(); 69 | //! 70 | //! #[derive(Debug)] 71 | //! enum List<'a, T> { 72 | //! Cons(T, Box<'a, List<'a, T>>), 73 | //! Nil, 74 | //! } 75 | //! 76 | //! let list: List = List::Cons(1, Box::new_in(List::Cons(2, Box::new_in(List::Nil, &b)), &b)); 77 | //! println!("{:?}", list); 78 | //! ``` 79 | //! 80 | //! This will print `Cons(1, Cons(2, Nil))`. 81 | //! 82 | //! Recursive structures must be boxed, because if the definition of `Cons` 83 | //! looked like this: 84 | //! 85 | //! ```compile_fail,E0072 86 | //! # enum List { 87 | //! Cons(T, List), 88 | //! # } 89 | //! ``` 90 | //! 91 | //! It wouldn't work. This is because the size of a `List` depends on how many 92 | //! elements are in the list, and so we don't know how much memory to allocate 93 | //! for a `Cons`. By introducing a [`Box<'a, T>`], which has a defined size, we know how 94 | //! big `Cons` needs to be. 95 | //! 96 | //! # Memory layout 97 | //! 98 | //! For non-zero-sized values, a [`Box`] will use the provided [`Bump`] allocator for 99 | //! its allocation. It is valid to convert both ways between a [`Box`] and a 100 | //! pointer allocated with the [`Bump`] allocator, given that the 101 | //! [`Layout`] used with the allocator is correct for the type. More precisely, 102 | //! a `value: *mut T` that has been allocated with the [`Bump`] allocator 103 | //! with `Layout::for_value(&*value)` may be converted into a box using 104 | //! [`Box::::from_raw(value)`]. Conversely, the memory backing a `value: *mut 105 | //! T` obtained from [`Box::::into_raw`] will be deallocated by the 106 | //! [`Bump`] allocator with [`Layout::for_value(&*value)`]. 107 | //! 108 | //! Note that roundtrip `Box::from_raw(Box::into_raw(b))` looses the lifetime bound to the 109 | //! [`Bump`] immutable borrow which guarantees that the allocator will not be reset 110 | //! and memory will not be freed. 111 | //! 112 | //! [dereferencing]: https://doc.rust-lang.org/std/ops/trait.Deref.html 113 | //! [`Box`]: struct.Box.html 114 | //! [`Box<'a, T>`]: struct.Box.html 115 | //! [`Box::::from_raw(value)`]: struct.Box.html#method.from_raw 116 | //! [`Box::::into_raw`]: struct.Box.html#method.into_raw 117 | //! [`Bump`]: ../struct.Bump.html 118 | //! [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html 119 | //! [`Layout`]: https://doc.rust-lang.org/std/alloc/struct.Layout.html 120 | //! [`Layout::for_value(&*value)`]: https://doc.rust-lang.org/std/alloc/struct.Layout.html#method.for_value 121 | 122 | use { 123 | crate::Bump, 124 | { 125 | core::{ 126 | any::Any, 127 | borrow, 128 | cmp::Ordering, 129 | convert::TryFrom, 130 | future::Future, 131 | hash::{Hash, Hasher}, 132 | iter::FusedIterator, 133 | mem::ManuallyDrop, 134 | ops::{Deref, DerefMut}, 135 | pin::Pin, 136 | task::{Context, Poll}, 137 | }, 138 | core_alloc::fmt, 139 | }, 140 | }; 141 | 142 | /// An owned pointer to a bump-allocated `T` value, that runs `Drop` 143 | /// implementations. 144 | /// 145 | /// See the [module-level documentation][crate::boxed] for more details. 146 | #[repr(transparent)] 147 | pub struct Box<'a, T: ?Sized>(&'a mut T); 148 | 149 | impl<'a, T> Box<'a, T> { 150 | /// Allocates memory on the heap and then places `x` into it. 151 | /// 152 | /// This doesn't actually allocate if `T` is zero-sized. 153 | /// 154 | /// # Examples 155 | /// 156 | /// ``` 157 | /// use bumpalo::{Bump, boxed::Box}; 158 | /// 159 | /// let b = Bump::new(); 160 | /// 161 | /// let five = Box::new_in(5, &b); 162 | /// ``` 163 | #[inline(always)] 164 | pub fn new_in(x: T, a: &'a Bump) -> Box<'a, T> { 165 | Box(a.alloc(x)) 166 | } 167 | 168 | /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then 169 | /// `x` will be pinned in memory and unable to be moved. 170 | #[inline(always)] 171 | pub fn pin_in(x: T, a: &'a Bump) -> Pin> { 172 | Box(a.alloc(x)).into() 173 | } 174 | 175 | /// Consumes the `Box`, returning the wrapped value. 176 | /// 177 | /// # Examples 178 | /// 179 | /// ``` 180 | /// use bumpalo::{Bump, boxed::Box}; 181 | /// 182 | /// let b = Bump::new(); 183 | /// 184 | /// let hello = Box::new_in("hello".to_owned(), &b); 185 | /// assert_eq!(Box::into_inner(hello), "hello"); 186 | /// ``` 187 | pub fn into_inner(b: Box<'a, T>) -> T { 188 | // `Box::into_raw` returns a pointer that is properly aligned and non-null. 189 | // The underlying `Bump` only frees the memory, but won't call the destructor. 190 | unsafe { core::ptr::read(Box::into_raw(b)) } 191 | } 192 | } 193 | 194 | impl<'a, T: ?Sized> Box<'a, T> { 195 | /// Constructs a box from a raw pointer. 196 | /// 197 | /// After calling this function, the raw pointer is owned by the 198 | /// resulting `Box`. Specifically, the `Box` destructor will call 199 | /// the destructor of `T` and free the allocated memory. For this 200 | /// to be safe, the memory must have been allocated in accordance 201 | /// with the memory layout used by `Box` . 202 | /// 203 | /// # Safety 204 | /// 205 | /// This function is unsafe because improper use may lead to 206 | /// memory problems. For example, a double-free may occur if the 207 | /// function is called twice on the same raw pointer. 208 | /// 209 | /// # Examples 210 | /// 211 | /// Recreate a `Box` which was previously converted to a raw pointer 212 | /// using [`Box::into_raw`]: 213 | /// ``` 214 | /// use bumpalo::{Bump, boxed::Box}; 215 | /// 216 | /// let b = Bump::new(); 217 | /// 218 | /// let x = Box::new_in(5, &b); 219 | /// let ptr = Box::into_raw(x); 220 | /// let x = unsafe { Box::from_raw(ptr) }; // Note that new `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset. 221 | /// ``` 222 | /// Manually create a `Box` from scratch by using the bump allocator: 223 | /// ``` 224 | /// use std::alloc::{alloc, Layout}; 225 | /// use bumpalo::{Bump, boxed::Box}; 226 | /// 227 | /// let b = Bump::new(); 228 | /// 229 | /// unsafe { 230 | /// let ptr = b.alloc_layout(Layout::new::()).as_ptr() as *mut i32; 231 | /// *ptr = 5; 232 | /// let x = Box::from_raw(ptr); // Note that `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset. 233 | /// } 234 | /// ``` 235 | #[inline] 236 | pub unsafe fn from_raw(raw: *mut T) -> Self { 237 | Box(&mut *raw) 238 | } 239 | 240 | /// Consumes the `Box`, returning a wrapped raw pointer. 241 | /// 242 | /// The pointer will be properly aligned and non-null. 243 | /// 244 | /// After calling this function, the caller is responsible for the 245 | /// value previously managed by the `Box`. In particular, the 246 | /// caller should properly destroy `T`. The easiest way to 247 | /// do this is to convert the raw pointer back into a `Box` with the 248 | /// [`Box::from_raw`] function, allowing the `Box` destructor to perform 249 | /// the cleanup. 250 | /// 251 | /// Note: this is an associated function, which means that you have 252 | /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This 253 | /// is so that there is no conflict with a method on the inner type. 254 | /// 255 | /// # Examples 256 | /// 257 | /// Converting the raw pointer back into a `Box` with [`Box::from_raw`] 258 | /// for automatic cleanup: 259 | /// ``` 260 | /// use bumpalo::{Bump, boxed::Box}; 261 | /// 262 | /// let b = Bump::new(); 263 | /// 264 | /// let x = Box::new_in(String::from("Hello"), &b); 265 | /// let ptr = Box::into_raw(x); 266 | /// let x = unsafe { Box::from_raw(ptr) }; // Note that new `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset. 267 | /// ``` 268 | /// Manual cleanup by explicitly running the destructor: 269 | /// ``` 270 | /// use std::ptr; 271 | /// use bumpalo::{Bump, boxed::Box}; 272 | /// 273 | /// let b = Bump::new(); 274 | /// 275 | /// let mut x = Box::new_in(String::from("Hello"), &b); 276 | /// let p = Box::into_raw(x); 277 | /// unsafe { 278 | /// ptr::drop_in_place(p); 279 | /// } 280 | /// ``` 281 | #[inline] 282 | pub fn into_raw(b: Box<'a, T>) -> *mut T { 283 | let mut b = ManuallyDrop::new(b); 284 | b.deref_mut().0 as *mut T 285 | } 286 | 287 | /// Consumes and leaks the `Box`, returning a mutable reference, 288 | /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime 289 | /// `'a`. If the type has only static references, or none at all, then this 290 | /// may be chosen to be `'static`. 291 | /// 292 | /// This function is mainly useful for data that lives for the remainder of 293 | /// the program's life. Dropping the returned reference will cause a memory 294 | /// leak. If this is not acceptable, the reference should first be wrapped 295 | /// with the [`Box::from_raw`] function producing a `Box`. This `Box` can 296 | /// then be dropped which will properly destroy `T` and release the 297 | /// allocated memory. 298 | /// 299 | /// Note: this is an associated function, which means that you have 300 | /// to call it as `Box::leak(b)` instead of `b.leak()`. This 301 | /// is so that there is no conflict with a method on the inner type. 302 | /// 303 | /// # Examples 304 | /// 305 | /// Simple usage: 306 | /// 307 | /// ``` 308 | /// use bumpalo::{Bump, boxed::Box}; 309 | /// 310 | /// let b = Bump::new(); 311 | /// 312 | /// let x = Box::new_in(41, &b); 313 | /// let reference: &mut usize = Box::leak(x); 314 | /// *reference += 1; 315 | /// assert_eq!(*reference, 42); 316 | /// ``` 317 | /// 318 | ///``` 319 | /// # #[cfg(feature = "collections")] 320 | /// # { 321 | /// use bumpalo::{Bump, boxed::Box, vec}; 322 | /// 323 | /// let b = Bump::new(); 324 | /// 325 | /// let x = vec![in &b; 1, 2, 3].into_boxed_slice(); 326 | /// let reference = Box::leak(x); 327 | /// reference[0] = 4; 328 | /// assert_eq!(*reference, [4, 2, 3]); 329 | /// # } 330 | ///``` 331 | #[inline] 332 | pub fn leak(b: Box<'a, T>) -> &'a mut T { 333 | unsafe { &mut *Box::into_raw(b) } 334 | } 335 | } 336 | 337 | impl<'a, T: ?Sized> Drop for Box<'a, T> { 338 | fn drop(&mut self) { 339 | unsafe { 340 | // `Box` owns value of `T`, but not memory behind it. 341 | core::ptr::drop_in_place(self.0); 342 | } 343 | } 344 | } 345 | 346 | impl<'a, T> Default for Box<'a, [T]> { 347 | fn default() -> Box<'a, [T]> { 348 | // It should be OK to `drop_in_place` empty slice of anything. 349 | Box(&mut []) 350 | } 351 | } 352 | 353 | impl<'a> Default for Box<'a, str> { 354 | fn default() -> Box<'a, str> { 355 | // Empty slice is valid string. 356 | // It should be OK to `drop_in_place` empty str. 357 | unsafe { Box::from_raw(Box::into_raw(Box::<[u8]>::default()) as *mut str) } 358 | } 359 | } 360 | 361 | impl<'a, 'b, T: ?Sized + PartialEq> PartialEq> for Box<'a, T> { 362 | #[inline] 363 | fn eq(&self, other: &Box<'b, T>) -> bool { 364 | PartialEq::eq(&**self, &**other) 365 | } 366 | #[inline] 367 | fn ne(&self, other: &Box<'b, T>) -> bool { 368 | PartialEq::ne(&**self, &**other) 369 | } 370 | } 371 | 372 | impl<'a, 'b, T: ?Sized + PartialOrd> PartialOrd> for Box<'a, T> { 373 | #[inline] 374 | fn partial_cmp(&self, other: &Box<'b, T>) -> Option { 375 | PartialOrd::partial_cmp(&**self, &**other) 376 | } 377 | #[inline] 378 | fn lt(&self, other: &Box<'b, T>) -> bool { 379 | PartialOrd::lt(&**self, &**other) 380 | } 381 | #[inline] 382 | fn le(&self, other: &Box<'b, T>) -> bool { 383 | PartialOrd::le(&**self, &**other) 384 | } 385 | #[inline] 386 | fn ge(&self, other: &Box<'b, T>) -> bool { 387 | PartialOrd::ge(&**self, &**other) 388 | } 389 | #[inline] 390 | fn gt(&self, other: &Box<'b, T>) -> bool { 391 | PartialOrd::gt(&**self, &**other) 392 | } 393 | } 394 | 395 | impl<'a, T: ?Sized + Ord> Ord for Box<'a, T> { 396 | #[inline] 397 | fn cmp(&self, other: &Box<'a, T>) -> Ordering { 398 | Ord::cmp(&**self, &**other) 399 | } 400 | } 401 | 402 | impl<'a, T: ?Sized + Eq> Eq for Box<'a, T> {} 403 | 404 | impl<'a, T: ?Sized + Hash> Hash for Box<'a, T> { 405 | fn hash(&self, state: &mut H) { 406 | (**self).hash(state); 407 | } 408 | } 409 | 410 | impl<'a, T: ?Sized + Hasher> Hasher for Box<'a, T> { 411 | fn finish(&self) -> u64 { 412 | (**self).finish() 413 | } 414 | fn write(&mut self, bytes: &[u8]) { 415 | (**self).write(bytes) 416 | } 417 | fn write_u8(&mut self, i: u8) { 418 | (**self).write_u8(i) 419 | } 420 | fn write_u16(&mut self, i: u16) { 421 | (**self).write_u16(i) 422 | } 423 | fn write_u32(&mut self, i: u32) { 424 | (**self).write_u32(i) 425 | } 426 | fn write_u64(&mut self, i: u64) { 427 | (**self).write_u64(i) 428 | } 429 | fn write_u128(&mut self, i: u128) { 430 | (**self).write_u128(i) 431 | } 432 | fn write_usize(&mut self, i: usize) { 433 | (**self).write_usize(i) 434 | } 435 | fn write_i8(&mut self, i: i8) { 436 | (**self).write_i8(i) 437 | } 438 | fn write_i16(&mut self, i: i16) { 439 | (**self).write_i16(i) 440 | } 441 | fn write_i32(&mut self, i: i32) { 442 | (**self).write_i32(i) 443 | } 444 | fn write_i64(&mut self, i: i64) { 445 | (**self).write_i64(i) 446 | } 447 | fn write_i128(&mut self, i: i128) { 448 | (**self).write_i128(i) 449 | } 450 | fn write_isize(&mut self, i: isize) { 451 | (**self).write_isize(i) 452 | } 453 | } 454 | 455 | impl<'a, T: ?Sized> From> for Pin> { 456 | /// Converts a `Box` into a `Pin>`. 457 | /// 458 | /// This conversion does not allocate on the heap and happens in place. 459 | fn from(boxed: Box<'a, T>) -> Self { 460 | // It's not possible to move or replace the insides of a `Pin>` 461 | // when `T: !Unpin`, so it's safe to pin it directly without any 462 | // additional requirements. 463 | unsafe { Pin::new_unchecked(boxed) } 464 | } 465 | } 466 | 467 | impl<'a> Box<'a, dyn Any> { 468 | #[inline] 469 | /// Attempt to downcast the box to a concrete type. 470 | /// 471 | /// # Examples 472 | /// 473 | /// ``` 474 | /// use std::any::Any; 475 | /// 476 | /// fn print_if_string(value: Box) { 477 | /// if let Ok(string) = value.downcast::() { 478 | /// println!("String ({}): {}", string.len(), string); 479 | /// } 480 | /// } 481 | /// 482 | /// let my_string = "Hello World".to_string(); 483 | /// print_if_string(Box::new(my_string)); 484 | /// print_if_string(Box::new(0i8)); 485 | /// ``` 486 | pub fn downcast(self) -> Result, Box<'a, dyn Any>> { 487 | if self.is::() { 488 | unsafe { 489 | let raw: *mut dyn Any = Box::into_raw(self); 490 | Ok(Box::from_raw(raw as *mut T)) 491 | } 492 | } else { 493 | Err(self) 494 | } 495 | } 496 | } 497 | 498 | impl<'a> Box<'a, dyn Any + Send> { 499 | #[inline] 500 | /// Attempt to downcast the box to a concrete type. 501 | /// 502 | /// # Examples 503 | /// 504 | /// ``` 505 | /// use std::any::Any; 506 | /// 507 | /// fn print_if_string(value: Box) { 508 | /// if let Ok(string) = value.downcast::() { 509 | /// println!("String ({}): {}", string.len(), string); 510 | /// } 511 | /// } 512 | /// 513 | /// let my_string = "Hello World".to_string(); 514 | /// print_if_string(Box::new(my_string)); 515 | /// print_if_string(Box::new(0i8)); 516 | /// ``` 517 | pub fn downcast(self) -> Result, Box<'a, dyn Any + Send>> { 518 | if self.is::() { 519 | unsafe { 520 | let raw: *mut (dyn Any + Send) = Box::into_raw(self); 521 | Ok(Box::from_raw(raw as *mut T)) 522 | } 523 | } else { 524 | Err(self) 525 | } 526 | } 527 | } 528 | 529 | impl<'a, T: fmt::Display + ?Sized> fmt::Display for Box<'a, T> { 530 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 531 | fmt::Display::fmt(&**self, f) 532 | } 533 | } 534 | 535 | impl<'a, T: fmt::Debug + ?Sized> fmt::Debug for Box<'a, T> { 536 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 537 | fmt::Debug::fmt(&**self, f) 538 | } 539 | } 540 | 541 | impl<'a, T: ?Sized> fmt::Pointer for Box<'a, T> { 542 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 543 | // It's not possible to extract the inner Uniq directly from the Box, 544 | // instead we cast it to a *const which aliases the Unique 545 | let ptr: *const T = &**self; 546 | fmt::Pointer::fmt(&ptr, f) 547 | } 548 | } 549 | 550 | impl<'a, T: ?Sized> Deref for Box<'a, T> { 551 | type Target = T; 552 | 553 | fn deref(&self) -> &T { 554 | &*self.0 555 | } 556 | } 557 | 558 | impl<'a, T: ?Sized> DerefMut for Box<'a, T> { 559 | fn deref_mut(&mut self) -> &mut T { 560 | self.0 561 | } 562 | } 563 | 564 | impl<'a, I: Iterator + ?Sized> Iterator for Box<'a, I> { 565 | type Item = I::Item; 566 | fn next(&mut self) -> Option { 567 | (**self).next() 568 | } 569 | fn size_hint(&self) -> (usize, Option) { 570 | (**self).size_hint() 571 | } 572 | fn nth(&mut self, n: usize) -> Option { 573 | (**self).nth(n) 574 | } 575 | fn last(self) -> Option { 576 | #[inline] 577 | fn some(_: Option, x: T) -> Option { 578 | Some(x) 579 | } 580 | self.fold(None, some) 581 | } 582 | } 583 | 584 | impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<'a, I> { 585 | fn next_back(&mut self) -> Option { 586 | (**self).next_back() 587 | } 588 | fn nth_back(&mut self, n: usize) -> Option { 589 | (**self).nth_back(n) 590 | } 591 | } 592 | impl<'a, I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<'a, I> { 593 | fn len(&self) -> usize { 594 | (**self).len() 595 | } 596 | } 597 | 598 | impl<'a, I: FusedIterator + ?Sized> FusedIterator for Box<'a, I> {} 599 | 600 | #[cfg(feature = "collections")] 601 | impl<'a, A> Box<'a, [A]> { 602 | /// Creates a value from an iterator. 603 | /// This method is an adapted version of [`FromIterator::from_iter`][from_iter]. 604 | /// It cannot be made as that trait implementation given different signature. 605 | /// 606 | /// [from_iter]: https://doc.rust-lang.org/std/iter/trait.FromIterator.html#tymethod.from_iter 607 | /// 608 | /// # Examples 609 | /// 610 | /// Basic usage: 611 | /// ``` 612 | /// use bumpalo::{Bump, boxed::Box, vec}; 613 | /// 614 | /// let b = Bump::new(); 615 | /// 616 | /// let five_fives = std::iter::repeat(5).take(5); 617 | /// let slice = Box::from_iter_in(five_fives, &b); 618 | /// assert_eq!(vec![in &b; 5, 5, 5, 5, 5], &*slice); 619 | /// ``` 620 | pub fn from_iter_in>(iter: T, a: &'a Bump) -> Self { 621 | use crate::collections::Vec; 622 | let mut vec = Vec::new_in(a); 623 | vec.extend(iter); 624 | vec.into_boxed_slice() 625 | } 626 | } 627 | 628 | impl<'a, T: ?Sized> borrow::Borrow for Box<'a, T> { 629 | fn borrow(&self) -> &T { 630 | &**self 631 | } 632 | } 633 | 634 | impl<'a, T: ?Sized> borrow::BorrowMut for Box<'a, T> { 635 | fn borrow_mut(&mut self) -> &mut T { 636 | &mut **self 637 | } 638 | } 639 | 640 | impl<'a, T: ?Sized> AsRef for Box<'a, T> { 641 | fn as_ref(&self) -> &T { 642 | &**self 643 | } 644 | } 645 | 646 | impl<'a, T: ?Sized> AsMut for Box<'a, T> { 647 | fn as_mut(&mut self) -> &mut T { 648 | &mut **self 649 | } 650 | } 651 | 652 | impl<'a, T: ?Sized> Unpin for Box<'a, T> {} 653 | 654 | impl<'a, F: ?Sized + Future + Unpin> Future for Box<'a, F> { 655 | type Output = F::Output; 656 | 657 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 658 | F::poll(Pin::new(&mut *self), cx) 659 | } 660 | } 661 | 662 | /// This impl replaces unsize coercion. 663 | impl<'a, T, const N: usize> From> for Box<'a, [T]> { 664 | fn from(arr: Box<'a, [T; N]>) -> Box<'a, [T]> { 665 | let mut arr = ManuallyDrop::new(arr); 666 | let ptr = core::ptr::slice_from_raw_parts_mut(arr.as_mut_ptr(), N); 667 | unsafe { Box::from_raw(ptr) } 668 | } 669 | } 670 | 671 | /// This impl replaces unsize coercion. 672 | impl<'a, T, const N: usize> TryFrom> for Box<'a, [T; N]> { 673 | type Error = Box<'a, [T]>; 674 | fn try_from(slice: Box<'a, [T]>) -> Result, Box<'a, [T]>> { 675 | if slice.len() == N { 676 | let mut slice = ManuallyDrop::new(slice); 677 | let ptr = slice.as_mut_ptr() as *mut [T; N]; 678 | Ok(unsafe { Box::from_raw(ptr) }) 679 | } else { 680 | Err(slice) 681 | } 682 | } 683 | } 684 | 685 | #[cfg(feature = "serde")] 686 | mod serialize { 687 | use super::*; 688 | 689 | use serde::{Serialize, Serializer}; 690 | 691 | impl<'a, T> Serialize for Box<'a, T> 692 | where 693 | T: Serialize, 694 | { 695 | fn serialize(&self, serializer: S) -> Result { 696 | T::serialize(self, serializer) 697 | } 698 | } 699 | } 700 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## Unreleased 2 | 3 | Released YYYY-MM-DD. 4 | 5 | ### Added 6 | 7 | * TODO (or remove section if none) 8 | 9 | ### Changed 10 | 11 | * TODO (or remove section if none) 12 | 13 | ### Deprecated 14 | 15 | * TODO (or remove section if none) 16 | 17 | ### Removed 18 | 19 | * TODO (or remove section if none) 20 | 21 | ### Fixed 22 | 23 | * TODO (or remove section if none) 24 | 25 | ### Security 26 | 27 | * TODO (or remove section if none) 28 | 29 | -------------------------------------------------------------------------------- 30 | 31 | ## 3.16.0 32 | 33 | Released 2024-04-08. 34 | 35 | ### Added 36 | 37 | * Added an optional, off-by-default dependency on the `serde` crate. Enabling 38 | this dependency allows you to serialize Bumpalo's collection and box 39 | types. Deserialization is not implemented, due to constraints of the 40 | deserialization trait. 41 | 42 | -------------------------------------------------------------------------------- 43 | 44 | ## 3.15.4 45 | 46 | Released 2024-03-07. 47 | 48 | ### Added 49 | 50 | * Added the `bumpalo::collections::Vec::extend_from_slices_copy` method, which 51 | is a faster way to extend a vec from multiple slices when the element is 52 | `Copy` than calling `extend_from_slice_copy` N times. 53 | 54 | -------------------------------------------------------------------------------- 55 | 56 | ## 3.15.3 57 | 58 | Released 2024-02-22. 59 | 60 | ### Added 61 | 62 | * Added additional performance improvements to `bumpalo::collections::Vec` 63 | related to reserving capacity. 64 | 65 | -------------------------------------------------------------------------------- 66 | 67 | ## 3.15.2 68 | 69 | Released 2024-02-21. 70 | 71 | ### Added 72 | 73 | * Add a `bumpalo::collections::Vec::extend_from_slice_copy` method. This doesn't 74 | exist on the standard library's `Vec` but they have access to specialization, 75 | so their regular `extend_from_slice` has a specialization for `Copy` 76 | types. Using this new method for `Copy` types is a ~80x performance 77 | improvement over the plain `extend_from_slice` method. 78 | 79 | -------------------------------------------------------------------------------- 80 | 81 | ## 3.15.1 82 | 83 | Released 2024-02-20. 84 | 85 | ### Fixed 86 | 87 | * Fixed the MSRV listed in `Cargo.toml`, whose update was forgotten when the 88 | MSRV bumped in release 3.15.0. 89 | 90 | -------------------------------------------------------------------------------- 91 | 92 | ## 3.15.0 93 | 94 | Released 2024-02-15. 95 | 96 | ### Changed 97 | 98 | * The minimum supported Rust version (MSRV) is now 1.73.0. 99 | * `bumpalo::collections::String::push_str` and 100 | `bumpalo::collections::String::from_str_in` received significant performance 101 | improvements. 102 | * Allocator trait methods are now marked `#[inline]`, increasing performance for 103 | some callers. 104 | 105 | ### Fixed 106 | 107 | * Fixed an edge-case bug in the `Allocator::shrink` method. 108 | 109 | -------------------------------------------------------------------------------- 110 | 111 | ## 3.14.0 112 | 113 | Released 2023-09-14. 114 | 115 | ### Added 116 | 117 | * Added the `std` cargo feature, which enables implementations of `std` traits 118 | for various things. Right now that is just `std::io::Write` for 119 | `bumpalo::collections::Vec`, but could be more in the future. 120 | 121 | -------------------------------------------------------------------------------- 122 | 123 | ## 3.13.0 124 | 125 | Released 2023-05-22. 126 | 127 | ### Added 128 | 129 | * New `"allocator-api2"` feature enables the use of the allocator API on 130 | stable. This feature uses a crate that mirrors the API of the unstable Rust 131 | `allocator_api` feature. If the feature is enabled, references to `Bump` will 132 | implement `allocator_api2::Allocator`. This allows `Bump` to be used as an 133 | allocator for collection types from `allocator-api2` and any other crates that 134 | support `allocator-api2`. 135 | 136 | ### Changed 137 | 138 | * The minimum supported Rust version (MSRV) is now 1.63.0. 139 | 140 | -------------------------------------------------------------------------------- 141 | 142 | ## 3.12.2 143 | 144 | Released 2023-05-09. 145 | 146 | ### Changed 147 | 148 | * Added `rust-version` metadata to `Cargo.toml` which helps `cargo` with version 149 | resolution. 150 | 151 | -------------------------------------------------------------------------------- 152 | 153 | ## 3.12.1 154 | 155 | Released 2023-04-21. 156 | 157 | ### Fixed 158 | 159 | * Fixed a bug where `Bump::try_with_capacity(n)` where `n > isize::MAX` could 160 | lead to attempts to create invalid `Layout`s. 161 | 162 | -------------------------------------------------------------------------------- 163 | 164 | ## 3.12.0 165 | 166 | Released 2023-01-17. 167 | 168 | ### Added 169 | 170 | * Added the `bumpalo::boxed::Box::bump` and `bumpalo::collections::String::bump` 171 | getters to get the underlying `Bump` that a string or box was allocated into. 172 | 173 | ### Changed 174 | 175 | * Some uses of `Box` that MIRI did not previously consider as UB are now 176 | reported as UB, and `bumpalo`'s internals have been adjusted to avoid the new 177 | UB. 178 | 179 | -------------------------------------------------------------------------------- 180 | 181 | ## 3.11.1 182 | 183 | Released 2022-10-18. 184 | 185 | ### Security 186 | 187 | * Fixed a bug where when `std::vec::IntoIter` was ported to 188 | `bumpalo::collections::vec::IntoIter`, it didn't get its underlying `Bump`'s 189 | lifetime threaded through. This meant that `rustc` was not checking the 190 | borrows for `bumpalo::collections::IntoIter` and this could result in 191 | use-after-free bugs. 192 | 193 | -------------------------------------------------------------------------------- 194 | 195 | ## 3.11.0 196 | 197 | Released 2022-08-17. 198 | 199 | ### Added 200 | 201 | * Added support for per-`Bump` allocation limits. These are enforced only in the 202 | slow path when allocating new chunks in the `Bump`, not in the bump allocation 203 | hot path, and therefore impose near zero overhead. 204 | * Added the `bumpalo::boxed::Box::into_inner` method. 205 | 206 | ### Changed 207 | 208 | * Updated to Rust 2021 edition. 209 | * The minimum supported Rust version (MSRV) is now 1.56.0. 210 | 211 | -------------------------------------------------------------------------------- 212 | 213 | ## 3.10.0 214 | 215 | Released 2022-06-01. 216 | 217 | ### Added 218 | 219 | * Implement `bumpalo::collections::FromIteratorIn` for `Option` and `Result`, 220 | just like `core` does for `FromIterator`. 221 | * Implement `bumpalo::collections::FromIteratorIn` for `bumpalo::boxed::Box<'a, 222 | [T]>`. 223 | * Added running tests under MIRI in CI for additional confidence in unsafe code. 224 | * Publicly exposed `bumpalo::collections::Vec::drain_filter` since the 225 | corresponding `std::vec::Vec` method has stabilized. 226 | 227 | ### Changed 228 | 229 | * `Bump::new` will not allocate a backing chunk until the first allocation 230 | inside the bump arena now. 231 | 232 | ### Fixed 233 | 234 | * Properly account for alignment changes when growing or shrinking an existing 235 | allocation. 236 | * Removed all internal integer-to-pointer casts, to play better with UB checkers 237 | like MIRI. 238 | 239 | -------------------------------------------------------------------------------- 240 | 241 | ## 3.9.1 242 | 243 | Released 2022-01-06. 244 | 245 | ### Fixed 246 | 247 | * Fixed link to logo in docs and README.md 248 | 249 | -------------------------------------------------------------------------------- 250 | 251 | ## 3.9.0 252 | 253 | Released 2022-01-05. 254 | 255 | ### Changed 256 | 257 | * The minimum supported Rust version (MSRV) has been raised to Rust 1.54.0. 258 | 259 | * `bumpalo::collections::Vec` implements relevant traits for all arrays of 260 | any size `N` via const generics. Previously, it was just arrays up to length 261 | 32. Similar for `bumpalo::boxed::Box<[T; N]>`. 262 | 263 | -------------------------------------------------------------------------------- 264 | 265 | ## 3.8.0 266 | 267 | Released 2021-10-19. 268 | 269 | ### Added 270 | 271 | * Added the `CollectIn` and `FromIteratorIn` traits to make building a 272 | collection from an iterator easier. These new traits live in the 273 | `bumpalo::collections` module and are implemented by 274 | `bumpalo::collections::{String,Vec}`. 275 | 276 | * Added the `Bump::iter_allocated_chunks_raw` method, which is an `unsafe`, raw 277 | version of `Bump::iter_allocated_chunks`. The new method does not take an 278 | exclusive borrow of the `Bump` and yields raw pointer-and-length pairs for 279 | each chunk in the bump. It is the caller's responsibility to ensure that no 280 | allocation happens in the `Bump` while iterating over chunks and that there 281 | are no active borrows of allocated data if they want to turn any 282 | pointer-and-length pairs into slices. 283 | 284 | -------------------------------------------------------------------------------- 285 | 286 | ## 3.7.1 287 | 288 | Released 2021-09-17. 289 | 290 | ### Changed 291 | 292 | * The packaged crate uploaded to crates.io when `bumpalo` is published is now 293 | smaller, thanks to excluding unnecessary files. 294 | 295 | -------------------------------------------------------------------------------- 296 | 297 | ## 3.7.0 298 | 299 | Released 2020-05-28. 300 | 301 | ### Added 302 | 303 | * Added `Borrow` and `BorrowMut` trait implementations for 304 | `bumpalo::collections::Vec` and 305 | `bumpalo::collections::String`. [#108](https://github.com/fitzgen/bumpalo/pull/108) 306 | 307 | ### Changed 308 | 309 | * When allocating a new chunk fails, don't immediately give up. Instead, try 310 | allocating a chunk that is half that size, and if that fails, then try half of 311 | *that* size, etc until either we successfully allocate a chunk or we fail to 312 | allocate the minimum chunk size and then finally give 313 | up. [#111](https://github.com/fitzgen/bumpalo/pull/111) 314 | 315 | -------------------------------------------------------------------------------- 316 | 317 | ## 3.6.1 318 | 319 | Released 2020-02-18. 320 | 321 | ### Added 322 | 323 | * Improved performance of `Bump`'s `Allocator::grow_zeroed` trait method 324 | implementation. [#99](https://github.com/fitzgen/bumpalo/pull/99) 325 | 326 | -------------------------------------------------------------------------------- 327 | 328 | ## 3.6.0 329 | 330 | Released 2020-01-29. 331 | 332 | ### Added 333 | 334 | * Added a few new flavors of allocation: 335 | 336 | * `try_alloc` for fallible, by-value allocation 337 | 338 | * `try_alloc_with` for fallible allocation with an infallible initializer 339 | function 340 | 341 | * `alloc_try_with` for infallible allocation with a fallible initializer 342 | function 343 | 344 | * `try_alloc_try_with` method for fallible allocation with a fallible 345 | initializer function 346 | 347 | We already have infallible, by-value allocation (`alloc`) and infallible 348 | allocation with an infallible initializer (`alloc_with`). With these new 349 | methods, we now have every combination covered. 350 | 351 | Thanks to [Tamme Schichler](https://github.com/Tamschi) for contributing these 352 | methods! 353 | 354 | -------------------------------------------------------------------------------- 355 | 356 | ## 3.5.0 357 | 358 | Released 2020-01-22. 359 | 360 | ### Added 361 | 362 | * Added experimental, unstable support for the unstable, nightly Rust 363 | `allocator_api` feature. 364 | 365 | The `allocator_api` feature defines an `Allocator` trait and exposes custom 366 | allocators for `std` types. Bumpalo has a matching `allocator_api` cargo 367 | feature to enable implementing `Allocator` and using `Bump` with `std` 368 | collections. 369 | 370 | First, enable the `allocator_api` feature in your `Cargo.toml`: 371 | 372 | ```toml 373 | [dependencies] 374 | bumpalo = { version = "3.5", features = ["allocator_api"] } 375 | ``` 376 | 377 | Next, enable the `allocator_api` nightly Rust feature in your `src/lib.rs` or `src/main.rs`: 378 | 379 | ```rust 380 | # #[cfg(feature = "allocator_api")] 381 | # { 382 | #![feature(allocator_api)] 383 | # } 384 | ``` 385 | 386 | Finally, use `std` collections with `Bump`, so that their internal heap 387 | allocations are made within the given bump arena: 388 | 389 | ``` 390 | # #![cfg_attr(feature = "allocator_api", feature(allocator_api))] 391 | # #[cfg(feature = "allocator_api")] 392 | # { 393 | #![feature(allocator_api)] 394 | use bumpalo::Bump; 395 | 396 | // Create a new bump arena. 397 | let bump = Bump::new(); 398 | 399 | // Create a `Vec` whose elements are allocated within the bump arena. 400 | let mut v = Vec::new_in(&bump); 401 | v.push(0); 402 | v.push(1); 403 | v.push(2); 404 | # } 405 | ``` 406 | 407 | I'm very excited to see custom allocators in `std` coming along! Thanks to 408 | Arthur Gautier for implementing support for the `allocator_api` feature for 409 | Bumpalo. 410 | 411 | -------------------------------------------------------------------------------- 412 | 413 | ## 3.4.0 414 | 415 | Released 2020-06-01. 416 | 417 | ### Added 418 | 419 | * Added the `bumpalo::boxed::Box` type. It is an owned pointer referencing a 420 | bump-allocated value, and it runs `T`'s `Drop` implementation on the 421 | referenced value when dropped. This type can be used by enabling the `"boxed"` 422 | cargo feature flag. 423 | 424 | -------------------------------------------------------------------------------- 425 | 426 | ## 3.3.0 427 | 428 | Released 2020-05-13. 429 | 430 | ### Added 431 | 432 | * Added fallible allocation methods to `Bump`: `try_new`, `try_with_capacity`, 433 | and `try_alloc_layout`. 434 | 435 | * Added `Bump::chunk_capacity` 436 | 437 | * Added `bumpalo::collections::Vec::try_reserve[_exact]` 438 | 439 | -------------------------------------------------------------------------------- 440 | 441 | ## 3.2.1 442 | 443 | Released 2020-03-24. 444 | 445 | ### Security 446 | 447 | * When `realloc`ing, if we allocate new space, we need to copy the old 448 | allocation's bytes into the new space. There are `old_size` number of bytes in 449 | the old allocation, but we were accidentally copying `new_size` number of 450 | bytes, which could lead to copying bytes into the realloc'd space from past 451 | the chunk that we're bump allocating out of, from unknown memory. 452 | 453 | If an attacker can cause `realloc`s, and can read the `realoc`ed data back, 454 | this could allow them to read things from other regions of memory that they 455 | shouldn't be able to. For example, if some crypto keys happened to live in 456 | memory right after a chunk we were bump allocating out of, this could allow 457 | the attacker to read the crypto keys. 458 | 459 | Beyond just fixing the bug and adding a regression test, I've also taken two 460 | additional steps: 461 | 462 | 1. While we were already running the testsuite under `valgrind` in CI, because 463 | `valgrind` exits with the same code that the program did, if there are 464 | invalid reads/writes that happen not to trigger a segfault, the program can 465 | still exit OK and we will be none the wiser. I've enabled the 466 | `--error-exitcode=1` flag for `valgrind` in CI so that tests eagerly fail 467 | in these scenarios. 468 | 469 | 2. I've written a quickcheck test to exercise `realloc`. Without the bug fix 470 | in this patch, this quickcheck immediately triggers invalid reads when run 471 | under `valgrind`. We didn't previously have quickchecks that exercised 472 | `realloc` because `realloc` isn't publicly exposed directly, and instead 473 | can only be indirectly called. This new quickcheck test exercises `realloc` 474 | via `bumpalo::collections::Vec::resize` and 475 | `bumpalo::collections::Vec::shrink_to_fit` calls. 476 | 477 | This bug was introduced in version 3.0.0. 478 | 479 | See [#69](https://github.com/fitzgen/bumpalo/issues/69) for details. 480 | 481 | -------------------------------------------------------------------------------- 482 | 483 | ## 3.2.0 484 | 485 | Released 2020-02-07. 486 | 487 | ### Added 488 | 489 | * Added the `bumpalo::collections::Vec::into_bump_slice_mut` method to turn a 490 | `bumpalo::collections::Vec<'bump, T>` into a `&'bump mut [T]`. 491 | 492 | -------------------------------------------------------------------------------- 493 | 494 | ## 3.1.2 495 | 496 | Released 2020-01-07. 497 | 498 | ### Fixed 499 | 500 | * The `bumpalo::collections::format!` macro did not used to accept a trailing 501 | comma like `format!(in bump; "{}", 1,)`, but it does now. 502 | 503 | -------------------------------------------------------------------------------- 504 | 505 | ## 3.1.1 506 | 507 | Released 2020-01-03. 508 | 509 | ### Fixed 510 | 511 | * The `bumpalo::collections::vec!` macro did not used to accept a trailing 512 | comma like `vec![in bump; 1, 2,]`, but it does now. 513 | 514 | -------------------------------------------------------------------------------- 515 | 516 | ## 3.1.0 517 | 518 | Released 2019-12-27. 519 | 520 | ### Added 521 | 522 | * Added the `Bump::allocated_bytes` diagnostic method for counting the total 523 | number of bytes a `Bump` has allocated. 524 | 525 | -------------------------------------------------------------------------------- 526 | 527 | # 3.0.0 528 | 529 | Released 2019-12-20. 530 | 531 | ## Added 532 | 533 | * Added `Bump::alloc_str` for copying string slices into a `Bump`. 534 | 535 | * Added `Bump::alloc_slice_copy` and `Bump::alloc_slice_clone` for copying or 536 | cloning slices into a `Bump`. 537 | 538 | * Added `Bump::alloc_slice_fill_iter` for allocating a slice in the `Bump` from 539 | an iterator. 540 | 541 | * Added `Bump::alloc_slice_fill_copy` and `Bump::alloc_slice_fill_clone` for 542 | creating slices of length `n` that are filled with copies or clones of an 543 | initial element. 544 | 545 | * Added `Bump::alloc_slice_fill_default` for creating slices of length `n` with 546 | the element type's default instance. 547 | 548 | * Added `Bump::alloc_slice_fill_with` for creating slices of length `n` whose 549 | elements are initialized with a function or closure. 550 | 551 | * Added `Bump::iter_allocated_chunks` as a replacement for the old 552 | `Bump::each_allocated_chunk`. The `iter_allocated_chunks` version returns an 553 | iterator, which is more idiomatic than its old, callback-taking counterpart. 554 | Additionally, `iter_allocated_chunks` exposes the chunks as `MaybeUninit`s 555 | instead of slices, which makes it usable in more situations without triggering 556 | undefined behavior. See also the note about bump direction in the "changed" 557 | section; if you're iterating chunks, you're likely affected by that change! 558 | 559 | * Added `Bump::with_capacity` so that you can pre-allocate a chunk with the 560 | requested space. 561 | 562 | ### Changed 563 | 564 | * **BREAKING:** The direction we allocate within a chunk has changed. It used to 565 | be "upwards", from low addresses within a chunk towards high addresses. It is 566 | now "downwards", from high addresses towards lower addresses. 567 | 568 | Additionally, the order in which we iterate over allocated chunks has changed! 569 | We used to iterate over chunks from oldest chunk to youngest chunk, and now we 570 | do the opposite: the youngest chunks are iterated over first, and the oldest 571 | chunks are iterated over last. 572 | 573 | If you were using `Bump::each_allocated_chunk` to iterate over data that you 574 | had previously allocated, and *you want to iterate in order of 575 | oldest-to-youngest allocation*, you need to reverse the chunks iterator and 576 | also reverse the order in which you loop through the data within a chunk! 577 | 578 | For example, if you had this code: 579 | 580 | ```rust 581 | unsafe { 582 | bump.each_allocated_chunk(|chunk| { 583 | for byte in chunk { 584 | // Touch each byte in oldest-to-youngest allocation order... 585 | } 586 | }); 587 | } 588 | ``` 589 | 590 | It should become this code: 591 | 592 | ```rust 593 | let mut chunks: Vec<_> = bump.iter_allocated_chunks().collect(); 594 | chunks.reverse(); 595 | for chunk in chunks { 596 | for byte in chunk.iter().rev() { 597 | let byte = unsafe { byte.assume_init() }; 598 | // Touch each byte in oldest-to-youngest allocation order... 599 | } 600 | } 601 | ``` 602 | 603 | The good news is that this change yielded a *speed up in allocation throughput 604 | of 3-19%!* 605 | 606 | See https://github.com/fitzgen/bumpalo/pull/37 and 607 | https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html for details. 608 | 609 | * **BREAKING:** The `collections` cargo feature is no longer on by default. You 610 | must explicitly turn it on if you intend to use the `bumpalo::collections` 611 | module. 612 | 613 | * `Bump::reset` will now retain only the last allocated chunk (the biggest), 614 | rather than only the first allocated chunk (the smallest). This should enable 615 | `Bump` to better adapt to workload sizes and quickly reach a steady state 616 | where new chunks are not requested from the global allocator. 617 | 618 | ### Removed 619 | 620 | * The `Bump::each_allocated_chunk` method is removed in favor of 621 | `Bump::iter_allocated_chunks`. Note that its safety requirements for reading 622 | from the allocated chunks are slightly different from the old 623 | `each_allocated_chunk`: only up to 16-byte alignment is supported now. If you 624 | allocate anything with greater alignment than that into the bump arena, there 625 | might be uninitialized padding inserted in the chunks, and therefore it is no 626 | longer safe to read them via `MaybeUninit::assume_init`. See also the note 627 | about bump direction in the "changed" section; if you're iterating chunks, 628 | you're likely affected by that change! 629 | 630 | * The `std` cargo feature has been removed, since this crate is now always 631 | no-std. 632 | 633 | ## Fixed 634 | 635 | * Fixed a bug involving potential integer overflows with large requested 636 | allocation sizes. 637 | 638 | -------------------------------------------------------------------------------- 639 | 640 | # 2.6.0 641 | 642 | Released 2019-08-19. 643 | 644 | * Implement `Send` for `Bump`. 645 | 646 | -------------------------------------------------------------------------------- 647 | 648 | # 2.5.0 649 | 650 | Released 2019-07-01. 651 | 652 | * Add `alloc_slice_copy` and `alloc_slice_clone` methods that allocate space for 653 | slices and either copy (with bound `T: Copy`) or clone (with bound `T: Clone`) 654 | the provided slice's data into the newly allocated space. 655 | 656 | -------------------------------------------------------------------------------- 657 | 658 | # 2.4.3 659 | 660 | Released 2019-05-20. 661 | 662 | * Fixed a bug where chunks were always deallocated with the default chunk 663 | layout, not the layout that the chunk was actually allocated with (i.e. if we 664 | started growing larger chunks with larger layouts, we would deallocate those 665 | chunks with an incorrect layout). 666 | 667 | -------------------------------------------------------------------------------- 668 | 669 | # 2.4.2 670 | 671 | Released 2019-05-17. 672 | 673 | * Added an implementation `Default` for `Bump`. 674 | * Made it so that if bump allocation within a chunk overflows, we still try to 675 | allocate a new chunk to bump out of for the requested allocation. This can 676 | avoid some OOMs in scenarios where the chunk we are currently allocating out 677 | of is very near the high end of the address space, and there is still 678 | available address space lower down for new chunks. 679 | 680 | -------------------------------------------------------------------------------- 681 | 682 | # 2.4.1 683 | 684 | Released 2019-04-19. 685 | 686 | * Added readme metadata to Cargo.toml so it shows up on crates.io 687 | 688 | -------------------------------------------------------------------------------- 689 | 690 | # 2.4.0 691 | 692 | Released 2019-04-19. 693 | 694 | * Added support for `realloc`ing in-place when the pointer being `realloc`ed is 695 | the last allocation made from the bump arena. This should speed up various 696 | `String`, `Vec`, and `format!` operations in many cases. 697 | 698 | -------------------------------------------------------------------------------- 699 | 700 | # 2.3.0 701 | 702 | Released 2019-03-26. 703 | 704 | * Add the `alloc_with` method, that (usually) avoids stack-allocating the 705 | allocated value and then moving it into the bump arena. This avoids potential 706 | stack overflows in release mode when allocating very large objects, and also 707 | some `memcpy` calls. This is similar to the `copyless` crate. Read [the 708 | `alloc_with` doc comments][alloc-with-doc-comments] and [the original issue 709 | proposing this API][issue-proposing-alloc-with] for more. 710 | 711 | [alloc-with-doc-comments]: https://github.com/fitzgen/bumpalo/blob/9f47aee8a6839ba65c073b9ad5372aacbbd02352/src/lib.rs#L436-L475 712 | [issue-proposing-alloc-with]: https://github.com/fitzgen/bumpalo/issues/10 713 | 714 | -------------------------------------------------------------------------------- 715 | 716 | # 2.2.2 717 | 718 | Released 2019-03-18. 719 | 720 | * Fix a regression from 2.2.1 where chunks were not always aligned to the chunk 721 | footer's alignment. 722 | 723 | -------------------------------------------------------------------------------- 724 | 725 | # 2.2.1 726 | 727 | Released 2019-03-18. 728 | 729 | * Fix a regression in 2.2.0 where newly allocated bump chunks could fail to have 730 | capacity for a large requested bump allocation in some corner cases. 731 | 732 | -------------------------------------------------------------------------------- 733 | 734 | # 2.2.0 735 | 736 | Released 2019-03-15. 737 | 738 | * Chunks in an arena now start out small, and double in size as more chunks are 739 | requested. 740 | 741 | -------------------------------------------------------------------------------- 742 | 743 | # 2.1.0 744 | 745 | Released 2019-02-12. 746 | 747 | * Added the `into_bump_slice` method on `bumpalo::collections::Vec`. 748 | 749 | -------------------------------------------------------------------------------- 750 | 751 | # 2.0.0 752 | 753 | Released 2019-02-11. 754 | 755 | * Removed the `BumpAllocSafe` trait. 756 | * Correctly detect overflows from large allocations and panic. 757 | 758 | -------------------------------------------------------------------------------- 759 | 760 | # 1.2.0 761 | 762 | Released 2019-01-15. 763 | 764 | * Fixed an overly-aggressive `debug_assert!` that had false positives. 765 | * Ported to Rust 2018 edition. 766 | 767 | -------------------------------------------------------------------------------- 768 | 769 | # 1.1.0 770 | 771 | Released 2018-11-28. 772 | 773 | * Added the `collections` module, which contains ports of `std`'s collection 774 | types that are compatible with backing their storage in `Bump` arenas. 775 | * Lifted the limits on size and alignment of allocations. 776 | 777 | -------------------------------------------------------------------------------- 778 | 779 | # 1.0.2 780 | 781 | -------------------------------------------------------------------------------- 782 | 783 | # 1.0.1 784 | 785 | -------------------------------------------------------------------------------- 786 | 787 | # 1.0.0 788 | -------------------------------------------------------------------------------- /src/collections/raw_vec.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT 2 | // file at the top-level directory of this distribution and at 3 | // http://rust-lang.org/COPYRIGHT. 4 | // 5 | // Licensed under the Apache License, Version 2.0 or the MIT license 7 | // , at your 8 | // option. This file may not be copied, modified, or distributed 9 | // except according to those terms. 10 | 11 | #![allow(unstable_name_collisions)] 12 | #![allow(dead_code)] 13 | 14 | use crate::Bump; 15 | 16 | use core::cmp; 17 | use core::mem; 18 | use core::ptr::{self, NonNull}; 19 | 20 | use crate::alloc::{handle_alloc_error, Alloc, Layout, UnstableLayoutMethods}; 21 | use crate::collections::CollectionAllocErr; 22 | use crate::collections::CollectionAllocErr::*; 23 | // use boxed::Box; 24 | 25 | /// A low-level utility for more ergonomically allocating, reallocating, and deallocating 26 | /// a buffer of memory on the heap without having to worry about all the corner cases 27 | /// involved. This type is excellent for building your own data structures like Vec and VecDeque. 28 | /// In particular: 29 | /// 30 | /// * Produces Unique::empty() on zero-sized types 31 | /// * Produces Unique::empty() on zero-length allocations 32 | /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics) 33 | /// * Guards against 32-bit systems allocating more than isize::MAX bytes 34 | /// * Guards against overflowing your length 35 | /// * Aborts on OOM 36 | /// * Avoids freeing Unique::empty() 37 | /// * Contains a ptr::Unique and thus endows the user with all related benefits 38 | /// 39 | /// This type does not in anyway inspect the memory that it manages. When dropped it *will* 40 | /// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec 41 | /// to handle the actual things *stored* inside of a RawVec. 42 | /// 43 | /// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types. 44 | /// This enables you to use capacity growing logic catch the overflows in your length 45 | /// that might occur with zero-sized types. 46 | /// 47 | /// However this means that you need to be careful when round-tripping this type 48 | /// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`, 49 | /// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity 50 | /// field. This allows zero-sized types to not be special-cased by consumers of 51 | /// this type. 52 | #[allow(missing_debug_implementations)] 53 | pub struct RawVec<'a, T> { 54 | ptr: NonNull, 55 | cap: usize, 56 | a: &'a Bump, 57 | } 58 | 59 | impl<'a, T> RawVec<'a, T> { 60 | /// Like `new` but parameterized over the choice of allocator for 61 | /// the returned RawVec. 62 | pub fn new_in(a: &'a Bump) -> Self { 63 | // `cap: 0` means "unallocated". zero-sized types are ignored. 64 | RawVec { 65 | ptr: NonNull::dangling(), 66 | cap: 0, 67 | a, 68 | } 69 | } 70 | 71 | /// Like `with_capacity` but parameterized over the choice of 72 | /// allocator for the returned RawVec. 73 | #[inline] 74 | pub fn with_capacity_in(cap: usize, a: &'a Bump) -> Self { 75 | RawVec::allocate_in(cap, false, a) 76 | } 77 | 78 | /// Like `with_capacity_zeroed` but parameterized over the choice 79 | /// of allocator for the returned RawVec. 80 | #[inline] 81 | pub fn with_capacity_zeroed_in(cap: usize, a: &'a Bump) -> Self { 82 | RawVec::allocate_in(cap, true, a) 83 | } 84 | 85 | fn allocate_in(cap: usize, zeroed: bool, mut a: &'a Bump) -> Self { 86 | unsafe { 87 | let elem_size = mem::size_of::(); 88 | 89 | let alloc_size = cap 90 | .checked_mul(elem_size) 91 | .unwrap_or_else(|| capacity_overflow()); 92 | alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow()); 93 | 94 | // handles ZSTs and `cap = 0` alike 95 | let ptr = if alloc_size == 0 { 96 | NonNull::::dangling() 97 | } else { 98 | let align = mem::align_of::(); 99 | let layout = Layout::from_size_align(alloc_size, align).unwrap(); 100 | let result = if zeroed { 101 | a.alloc_zeroed(layout) 102 | } else { 103 | Alloc::alloc(&mut a, layout) 104 | }; 105 | match result { 106 | Ok(ptr) => ptr.cast(), 107 | Err(_) => handle_alloc_error(layout), 108 | } 109 | }; 110 | 111 | RawVec { ptr, cap, a } 112 | } 113 | } 114 | } 115 | 116 | impl<'a, T> RawVec<'a, T> { 117 | /// Reconstitutes a RawVec from a pointer, capacity, and allocator. 118 | /// 119 | /// # Undefined Behavior 120 | /// 121 | /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The 122 | /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems). 123 | /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed. 124 | pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: &'a Bump) -> Self { 125 | RawVec { 126 | ptr: NonNull::new_unchecked(ptr), 127 | cap, 128 | a, 129 | } 130 | } 131 | } 132 | 133 | impl<'a, T> RawVec<'a, T> { 134 | /// Gets a raw pointer to the start of the allocation. Note that this is 135 | /// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must 136 | /// be careful. 137 | pub fn ptr(&self) -> *mut T { 138 | self.ptr.as_ptr() 139 | } 140 | 141 | /// Gets the capacity of the allocation. 142 | /// 143 | /// This will always be `usize::MAX` if `T` is zero-sized. 144 | #[inline(always)] 145 | pub fn cap(&self) -> usize { 146 | if mem::size_of::() == 0 { 147 | !0 148 | } else { 149 | self.cap 150 | } 151 | } 152 | 153 | /// Returns a shared reference to the allocator backing this RawVec. 154 | pub fn bump(&self) -> &'a Bump { 155 | self.a 156 | } 157 | 158 | fn current_layout(&self) -> Option { 159 | if self.cap == 0 { 160 | None 161 | } else { 162 | // We have an allocated chunk of memory, so we can bypass runtime 163 | // checks to get our current layout. 164 | unsafe { 165 | let align = mem::align_of::(); 166 | let size = mem::size_of::() * self.cap; 167 | Some(Layout::from_size_align_unchecked(size, align)) 168 | } 169 | } 170 | } 171 | 172 | /// Doubles the size of the type's backing allocation. This is common enough 173 | /// to want to do that it's easiest to just have a dedicated method. Slightly 174 | /// more efficient logic can be provided for this than the general case. 175 | /// 176 | /// This function is ideal for when pushing elements one-at-a-time because 177 | /// you don't need to incur the costs of the more general computations 178 | /// reserve needs to do to guard against overflow. You do however need to 179 | /// manually check if your `len == cap`. 180 | /// 181 | /// # Panics 182 | /// 183 | /// * Panics if T is zero-sized on the assumption that you managed to exhaust 184 | /// all `usize::MAX` slots in your imaginary buffer. 185 | /// * Panics on 32-bit platforms if the requested capacity exceeds 186 | /// `isize::MAX` bytes. 187 | /// 188 | /// # Aborts 189 | /// 190 | /// Aborts on OOM 191 | /// 192 | /// # Examples 193 | /// 194 | /// ```ignore 195 | /// # #![feature(alloc, raw_vec_internals)] 196 | /// # extern crate alloc; 197 | /// # use std::ptr; 198 | /// # use alloc::raw_vec::RawVec; 199 | /// struct MyVec { 200 | /// buf: RawVec, 201 | /// len: usize, 202 | /// } 203 | /// 204 | /// impl MyVec { 205 | /// pub fn push(&mut self, elem: T) { 206 | /// if self.len == self.buf.cap() { self.buf.double(); } 207 | /// // double would have aborted or panicked if the len exceeded 208 | /// // `isize::MAX` so this is safe to do unchecked now. 209 | /// unsafe { 210 | /// ptr::write(self.buf.ptr().add(self.len), elem); 211 | /// } 212 | /// self.len += 1; 213 | /// } 214 | /// } 215 | /// # fn main() { 216 | /// # let mut vec = MyVec { buf: RawVec::new(), len: 0 }; 217 | /// # vec.push(1); 218 | /// # } 219 | /// ``` 220 | #[inline(never)] 221 | #[cold] 222 | pub fn double(&mut self) { 223 | unsafe { 224 | let elem_size = mem::size_of::(); 225 | 226 | // since we set the capacity to usize::MAX when elem_size is 227 | // 0, getting to here necessarily means the RawVec is overfull. 228 | assert!(elem_size != 0, "capacity overflow"); 229 | 230 | let (new_cap, uniq) = match self.current_layout() { 231 | Some(cur) => { 232 | // Since we guarantee that we never allocate more than 233 | // isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as 234 | // a precondition, so this can't overflow. Additionally the 235 | // alignment will never be too large as to "not be 236 | // satisfiable", so `Layout::from_size_align` will always 237 | // return `Some`. 238 | // 239 | // tl;dr; we bypass runtime checks due to dynamic assertions 240 | // in this module, allowing us to use 241 | // `from_size_align_unchecked`. 242 | let new_cap = 2 * self.cap; 243 | let new_size = new_cap * elem_size; 244 | alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); 245 | let ptr_res = self.a.realloc(self.ptr.cast(), cur, new_size); 246 | match ptr_res { 247 | Ok(ptr) => (new_cap, ptr.cast()), 248 | Err(_) => handle_alloc_error(Layout::from_size_align_unchecked( 249 | new_size, 250 | cur.align(), 251 | )), 252 | } 253 | } 254 | None => { 255 | // skip to 4 because tiny Vec's are dumb; but not if that 256 | // would cause overflow 257 | let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; 258 | match self.a.alloc_array::(new_cap) { 259 | Ok(ptr) => (new_cap, ptr), 260 | Err(_) => handle_alloc_error(Layout::array::(new_cap).unwrap()), 261 | } 262 | } 263 | }; 264 | self.ptr = uniq; 265 | self.cap = new_cap; 266 | } 267 | } 268 | 269 | /// Attempts to double the size of the type's backing allocation in place. This is common 270 | /// enough to want to do that it's easiest to just have a dedicated method. Slightly 271 | /// more efficient logic can be provided for this than the general case. 272 | /// 273 | /// Returns true if the reallocation attempt has succeeded, or false otherwise. 274 | /// 275 | /// # Panics 276 | /// 277 | /// * Panics if T is zero-sized on the assumption that you managed to exhaust 278 | /// all `usize::MAX` slots in your imaginary buffer. 279 | /// * Panics on 32-bit platforms if the requested capacity exceeds 280 | /// `isize::MAX` bytes. 281 | #[inline(never)] 282 | #[cold] 283 | pub fn double_in_place(&mut self) -> bool { 284 | unsafe { 285 | let elem_size = mem::size_of::(); 286 | let old_layout = match self.current_layout() { 287 | Some(layout) => layout, 288 | None => return false, // nothing to double 289 | }; 290 | 291 | // since we set the capacity to usize::MAX when elem_size is 292 | // 0, getting to here necessarily means the RawVec is overfull. 293 | assert!(elem_size != 0, "capacity overflow"); 294 | 295 | // Since we guarantee that we never allocate more than isize::MAX 296 | // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so 297 | // this can't overflow. 298 | // 299 | // Similarly like with `double` above we can go straight to 300 | // `Layout::from_size_align_unchecked` as we know this won't 301 | // overflow and the alignment is sufficiently small. 302 | let new_cap = 2 * self.cap; 303 | let new_size = new_cap * elem_size; 304 | alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); 305 | match self.a.grow_in_place(self.ptr.cast(), old_layout, new_size) { 306 | Ok(_) => { 307 | // We can't directly divide `size`. 308 | self.cap = new_cap; 309 | true 310 | } 311 | Err(_) => false, 312 | } 313 | } 314 | } 315 | 316 | /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. 317 | pub fn try_reserve_exact( 318 | &mut self, 319 | used_cap: usize, 320 | needed_extra_cap: usize, 321 | ) -> Result<(), CollectionAllocErr> { 322 | self.fallible_reserve_internal(used_cap, needed_extra_cap, Exact) 323 | } 324 | 325 | /// Ensures that the buffer contains at least enough space to hold 326 | /// `used_cap + needed_extra_cap` elements. If it doesn't already, 327 | /// will reallocate the minimum possible amount of memory necessary. 328 | /// Generally this will be exactly the amount of memory necessary, 329 | /// but in principle the allocator is free to give back more than 330 | /// we asked for. 331 | /// 332 | /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate 333 | /// the requested space. This is not really unsafe, but the unsafe 334 | /// code *you* write that relies on the behavior of this function may break. 335 | /// 336 | /// # Panics 337 | /// 338 | /// * Panics if the requested capacity exceeds `usize::MAX` bytes. 339 | /// * Panics on 32-bit platforms if the requested capacity exceeds 340 | /// `isize::MAX` bytes. 341 | /// 342 | /// # Aborts 343 | /// 344 | /// Aborts on OOM 345 | pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { 346 | self.infallible_reserve_internal(used_cap, needed_extra_cap, Exact) 347 | } 348 | 349 | /// Calculates the buffer's new size given that it'll hold `used_cap + 350 | /// needed_extra_cap` elements. This logic is used in amortized reserve methods. 351 | /// Returns `(new_capacity, new_alloc_size)`. 352 | fn amortized_new_size( 353 | &self, 354 | used_cap: usize, 355 | needed_extra_cap: usize, 356 | ) -> Result { 357 | // Nothing we can really do about these checks :( 358 | let required_cap = used_cap 359 | .checked_add(needed_extra_cap) 360 | .ok_or(CapacityOverflow)?; 361 | // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. 362 | let double_cap = self.cap * 2; 363 | // `double_cap` guarantees exponential growth. 364 | Ok(cmp::max(double_cap, required_cap)) 365 | } 366 | 367 | /// The same as `reserve`, but returns on errors instead of panicking or aborting. 368 | pub fn try_reserve( 369 | &mut self, 370 | used_cap: usize, 371 | needed_extra_cap: usize, 372 | ) -> Result<(), CollectionAllocErr> { 373 | self.fallible_reserve_internal(used_cap, needed_extra_cap, Amortized) 374 | } 375 | 376 | /// Ensures that the buffer contains at least enough space to hold 377 | /// `used_cap + needed_extra_cap` elements. If it doesn't already have 378 | /// enough capacity, will reallocate enough space plus comfortable slack 379 | /// space to get amortized `O(1)` behavior. Will limit this behavior 380 | /// if it would needlessly cause itself to panic. 381 | /// 382 | /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate 383 | /// the requested space. This is not really unsafe, but the unsafe 384 | /// code *you* write that relies on the behavior of this function may break. 385 | /// 386 | /// This is ideal for implementing a bulk-push operation like `extend`. 387 | /// 388 | /// # Panics 389 | /// 390 | /// * Panics if the requested capacity exceeds `usize::MAX` bytes. 391 | /// * Panics on 32-bit platforms if the requested capacity exceeds 392 | /// `isize::MAX` bytes. 393 | /// 394 | /// # Aborts 395 | /// 396 | /// Aborts on OOM 397 | /// 398 | /// # Examples 399 | /// 400 | /// ```ignore 401 | /// # #![feature(alloc, raw_vec_internals)] 402 | /// # extern crate alloc; 403 | /// # use std::ptr; 404 | /// # use alloc::raw_vec::RawVec; 405 | /// struct MyVec { 406 | /// buf: RawVec, 407 | /// len: usize, 408 | /// } 409 | /// 410 | /// impl MyVec { 411 | /// pub fn push_all(&mut self, elems: &[T]) { 412 | /// self.buf.reserve(self.len, elems.len()); 413 | /// // reserve would have aborted or panicked if the len exceeded 414 | /// // `isize::MAX` so this is safe to do unchecked now. 415 | /// for x in elems { 416 | /// unsafe { 417 | /// ptr::write(self.buf.ptr().add(self.len), x.clone()); 418 | /// } 419 | /// self.len += 1; 420 | /// } 421 | /// } 422 | /// } 423 | /// # fn main() { 424 | /// # let mut vector = MyVec { buf: RawVec::new(), len: 0 }; 425 | /// # vector.push_all(&[1, 3, 5, 7, 9]); 426 | /// # } 427 | /// ``` 428 | #[inline(always)] 429 | pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { 430 | self.infallible_reserve_internal(used_cap, needed_extra_cap, Amortized) 431 | } 432 | 433 | /// Attempts to ensure that the buffer contains at least enough space to hold 434 | /// `used_cap + needed_extra_cap` elements. If it doesn't already have 435 | /// enough capacity, will reallocate in place enough space plus comfortable slack 436 | /// space to get amortized `O(1)` behavior. Will limit this behaviour 437 | /// if it would needlessly cause itself to panic. 438 | /// 439 | /// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate 440 | /// the requested space. This is not really unsafe, but the unsafe 441 | /// code *you* write that relies on the behavior of this function may break. 442 | /// 443 | /// Returns true if the reallocation attempt has succeeded, or false otherwise. 444 | /// 445 | /// # Panics 446 | /// 447 | /// * Panics if the requested capacity exceeds `usize::MAX` bytes. 448 | /// * Panics on 32-bit platforms if the requested capacity exceeds 449 | /// `isize::MAX` bytes. 450 | pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool { 451 | unsafe { 452 | // NOTE: we don't early branch on ZSTs here because we want this 453 | // to actually catch "asking for more than usize::MAX" in that case. 454 | // If we make it past the first branch then we are guaranteed to 455 | // panic. 456 | 457 | // Don't actually need any more capacity. If the current `cap` is 0, we can't 458 | // reallocate in place. 459 | // Wrapping in case they give a bad `used_cap` 460 | let old_layout = match self.current_layout() { 461 | Some(layout) => layout, 462 | None => return false, 463 | }; 464 | if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { 465 | return false; 466 | } 467 | 468 | let new_cap = self 469 | .amortized_new_size(used_cap, needed_extra_cap) 470 | .unwrap_or_else(|_| capacity_overflow()); 471 | 472 | // Here, `cap < used_cap + needed_extra_cap <= new_cap` 473 | // (regardless of whether `self.cap - used_cap` wrapped). 474 | // Therefore we can safely call grow_in_place. 475 | 476 | let new_layout = Layout::new::().repeat(new_cap).unwrap().0; 477 | // FIXME: may crash and burn on over-reserve 478 | alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow()); 479 | match self 480 | .a 481 | .grow_in_place(self.ptr.cast(), old_layout, new_layout.size()) 482 | { 483 | Ok(_) => { 484 | self.cap = new_cap; 485 | true 486 | } 487 | Err(_) => false, 488 | } 489 | } 490 | } 491 | 492 | /// Shrinks the allocation down to the specified amount. If the given amount 493 | /// is 0, actually completely deallocates. 494 | /// 495 | /// # Panics 496 | /// 497 | /// Panics if the given amount is *larger* than the current capacity. 498 | /// 499 | /// # Aborts 500 | /// 501 | /// Aborts on OOM. 502 | pub fn shrink_to_fit(&mut self, amount: usize) { 503 | let elem_size = mem::size_of::(); 504 | 505 | // Set the `cap` because they might be about to promote to a `Box<[T]>` 506 | if elem_size == 0 { 507 | self.cap = amount; 508 | return; 509 | } 510 | 511 | // This check is my waterloo; it's the only thing Vec wouldn't have to do. 512 | assert!(self.cap >= amount, "Tried to shrink to a larger capacity"); 513 | 514 | if amount == 0 { 515 | // We want to create a new zero-length vector within the 516 | // same allocator. We use ptr::write to avoid an 517 | // erroneous attempt to drop the contents, and we use 518 | // ptr::read to sidestep condition against destructuring 519 | // types that implement Drop. 520 | 521 | unsafe { 522 | let a = self.a; 523 | self.dealloc_buffer(); 524 | ptr::write(self, RawVec::new_in(a)); 525 | } 526 | } else if self.cap != amount { 527 | unsafe { 528 | // We know here that our `amount` is greater than zero. This 529 | // implies, via the assert above, that capacity is also greater 530 | // than zero, which means that we've got a current layout that 531 | // "fits" 532 | // 533 | // We also know that `self.cap` is greater than `amount`, and 534 | // consequently we don't need runtime checks for creating either 535 | // layout 536 | let old_size = elem_size * self.cap; 537 | let new_size = elem_size * amount; 538 | let align = mem::align_of::(); 539 | let old_layout = Layout::from_size_align_unchecked(old_size, align); 540 | match self.a.realloc(self.ptr.cast(), old_layout, new_size) { 541 | Ok(p) => self.ptr = p.cast(), 542 | Err(_) => { 543 | handle_alloc_error(Layout::from_size_align_unchecked(new_size, align)) 544 | } 545 | } 546 | } 547 | self.cap = amount; 548 | } 549 | } 550 | } 551 | 552 | #[cfg(feature = "boxed")] 553 | impl<'a, T> RawVec<'a, T> { 554 | /// Converts the entire buffer into `Box<[T]>`. 555 | /// 556 | /// Note that this will correctly reconstitute any `cap` changes 557 | /// that may have been performed. (See description of type for details.) 558 | /// 559 | /// # Undefined Behavior 560 | /// 561 | /// All elements of `RawVec` must be initialized. Notice that 562 | /// the rules around uninitialized boxed values are not finalized yet, 563 | /// but until they are, it is advisable to avoid them. 564 | pub unsafe fn into_box(self) -> crate::boxed::Box<'a, [T]> { 565 | use crate::boxed::Box; 566 | 567 | // NOTE: not calling `cap()` here; actually using the real `cap` field! 568 | let slice = core::slice::from_raw_parts_mut(self.ptr(), self.cap); 569 | let output: Box<'a, [T]> = Box::from_raw(slice); 570 | mem::forget(self); 571 | output 572 | } 573 | } 574 | 575 | enum Fallibility { 576 | Fallible, 577 | Infallible, 578 | } 579 | 580 | use self::Fallibility::*; 581 | 582 | enum ReserveStrategy { 583 | Exact, 584 | Amortized, 585 | } 586 | 587 | use self::ReserveStrategy::*; 588 | 589 | impl<'a, T> RawVec<'a, T> { 590 | #[inline(always)] 591 | fn fallible_reserve_internal( 592 | &mut self, 593 | used_cap: usize, 594 | needed_extra_cap: usize, 595 | strategy: ReserveStrategy, 596 | ) -> Result<(), CollectionAllocErr> { 597 | // This portion of the method should always be inlined. 598 | if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { 599 | return Ok(()); 600 | } 601 | // This portion of the method should never be inlined, and will only be called when 602 | // the check above has confirmed that it is necessary. 603 | self.reserve_internal_or_error(used_cap, needed_extra_cap, Fallible, strategy) 604 | } 605 | 606 | #[inline(always)] 607 | fn infallible_reserve_internal( 608 | &mut self, 609 | used_cap: usize, 610 | needed_extra_cap: usize, 611 | strategy: ReserveStrategy, 612 | ) { 613 | // This portion of the method should always be inlined. 614 | if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { 615 | return; 616 | } 617 | // This portion of the method should never be inlined, and will only be called when 618 | // the check above has confirmed that it is necessary. 619 | self.reserve_internal_or_panic(used_cap, needed_extra_cap, strategy) 620 | } 621 | 622 | #[inline(never)] 623 | fn reserve_internal_or_panic( 624 | &mut self, 625 | used_cap: usize, 626 | needed_extra_cap: usize, 627 | strategy: ReserveStrategy, 628 | ) { 629 | // Delegates the call to `reserve_internal_or_error` and panics in the event of an error. 630 | // This allows the method to have a return type of `()`, simplifying the assembly at the 631 | // call site. 632 | match self.reserve_internal(used_cap, needed_extra_cap, Infallible, strategy) { 633 | Err(CapacityOverflow) => capacity_overflow(), 634 | Err(AllocErr) => unreachable!(), 635 | Ok(()) => { /* yay */ } 636 | } 637 | } 638 | 639 | #[inline(never)] 640 | fn reserve_internal_or_error( 641 | &mut self, 642 | used_cap: usize, 643 | needed_extra_cap: usize, 644 | fallibility: Fallibility, 645 | strategy: ReserveStrategy,)-> Result<(), CollectionAllocErr> { 646 | // Delegates the call to `reserve_internal`, which can be inlined. 647 | self.reserve_internal(used_cap, needed_extra_cap, fallibility, strategy) 648 | } 649 | 650 | /// Helper method to reserve additional space, reallocating the backing memory. 651 | /// The caller is responsible for confirming that there is not already enough space available. 652 | fn reserve_internal( 653 | &mut self, 654 | used_cap: usize, 655 | needed_extra_cap: usize, 656 | fallibility: Fallibility, 657 | strategy: ReserveStrategy, 658 | ) -> Result<(), CollectionAllocErr> { 659 | unsafe { 660 | use crate::AllocErr; 661 | 662 | // NOTE: we don't early branch on ZSTs here because we want this 663 | // to actually catch "asking for more than usize::MAX" in that case. 664 | // If we make it past the first branch then we are guaranteed to 665 | // panic. 666 | 667 | // Nothing we can really do about these checks :( 668 | let new_cap = match strategy { 669 | Exact => used_cap 670 | .checked_add(needed_extra_cap) 671 | .ok_or(CapacityOverflow)?, 672 | Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?, 673 | }; 674 | let new_layout = Layout::array::(new_cap).map_err(|_| CapacityOverflow)?; 675 | 676 | alloc_guard(new_layout.size())?; 677 | 678 | let res = match self.current_layout() { 679 | Some(layout) => { 680 | debug_assert!(new_layout.align() == layout.align()); 681 | self.a.realloc(self.ptr.cast(), layout, new_layout.size()) 682 | } 683 | None => Alloc::alloc(&mut self.a, new_layout), 684 | }; 685 | 686 | if let (Err(AllocErr), Infallible) = (&res, fallibility) { 687 | handle_alloc_error(new_layout); 688 | } 689 | 690 | self.ptr = res?.cast(); 691 | self.cap = new_cap; 692 | 693 | Ok(()) 694 | } 695 | } 696 | } 697 | 698 | impl<'a, T> RawVec<'a, T> { 699 | /// Frees the memory owned by the RawVec *without* trying to Drop its contents. 700 | pub unsafe fn dealloc_buffer(&mut self) { 701 | let elem_size = mem::size_of::(); 702 | if elem_size != 0 { 703 | if let Some(layout) = self.current_layout() { 704 | self.a.dealloc(self.ptr.cast(), layout); 705 | } 706 | } 707 | } 708 | } 709 | 710 | impl<'a, T> Drop for RawVec<'a, T> { 711 | /// Frees the memory owned by the RawVec *without* trying to Drop its contents. 712 | fn drop(&mut self) { 713 | unsafe { 714 | self.dealloc_buffer(); 715 | } 716 | } 717 | } 718 | 719 | // We need to guarantee the following: 720 | // * We don't ever allocate `> isize::MAX` byte-size objects 721 | // * We don't overflow `usize::MAX` and actually allocate too little 722 | // 723 | // On 64-bit we just need to check for overflow since trying to allocate 724 | // `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add 725 | // an extra guard for this in case we're running on a platform which can use 726 | // all 4GB in user-space. e.g. PAE or x32 727 | 728 | #[inline] 729 | fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> { 730 | if mem::size_of::() < 8 && alloc_size > ::core::isize::MAX as usize { 731 | Err(CapacityOverflow) 732 | } else { 733 | Ok(()) 734 | } 735 | } 736 | 737 | // One central function responsible for reporting capacity overflows. This'll 738 | // ensure that the code generation related to these panics is minimal as there's 739 | // only one location which panics rather than a bunch throughout the module. 740 | fn capacity_overflow() -> ! { 741 | panic!("capacity overflow") 742 | } 743 | 744 | #[cfg(test)] 745 | mod tests { 746 | use super::*; 747 | 748 | #[test] 749 | fn reserve_does_not_overallocate() { 750 | let bump = Bump::new(); 751 | { 752 | let mut v: RawVec = RawVec::new_in(&bump); 753 | // First `reserve` allocates like `reserve_exact` 754 | v.reserve(0, 9); 755 | assert_eq!(9, v.cap()); 756 | } 757 | 758 | { 759 | let mut v: RawVec = RawVec::new_in(&bump); 760 | v.reserve(0, 7); 761 | assert_eq!(7, v.cap()); 762 | // 97 if more than double of 7, so `reserve` should work 763 | // like `reserve_exact`. 764 | v.reserve(7, 90); 765 | assert_eq!(97, v.cap()); 766 | } 767 | 768 | { 769 | let mut v: RawVec = RawVec::new_in(&bump); 770 | v.reserve(0, 12); 771 | assert_eq!(12, v.cap()); 772 | v.reserve(12, 3); 773 | // 3 is less than half of 12, so `reserve` must grow 774 | // exponentially. At the time of writing this test grow 775 | // factor is 2, so new capacity is 24, however, grow factor 776 | // of 1.5 is OK too. Hence `>= 18` in assert. 777 | assert!(v.cap() >= 12 + 12 / 2); 778 | } 779 | } 780 | } 781 | --------------------------------------------------------------------------------