├── .github └── workflows │ └── rust.yaml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches ├── README.md ├── allocator_api.rs ├── benches.rs └── tables.toml ├── bumpalo.png ├── src ├── alloc.rs ├── boxed.rs ├── collections │ ├── collect_in.rs │ ├── mod.rs │ ├── raw_vec.rs │ ├── str │ │ ├── lossy.rs │ │ └── mod.rs │ ├── string.rs │ └── vec.rs └── lib.rs ├── tests ├── all │ ├── alloc_fill.rs │ ├── alloc_try_with.rs │ ├── alloc_with.rs │ ├── allocation_limit.rs │ ├── allocator_api.rs │ ├── boxed.rs │ ├── capacity.rs │ ├── collect_in.rs │ ├── main.rs │ ├── quickcheck.rs │ ├── quickchecks.rs │ ├── serde │ │ ├── boxed.rs │ │ ├── mod.rs │ │ ├── string.rs │ │ └── vec.rs │ ├── string.rs │ ├── tests.rs │ ├── try_alloc_try_with.rs │ ├── try_alloc_with.rs │ └── vec.rs └── try_alloc.rs └── valgrind.supp /.github/workflows/rust.yaml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | RUST_BACKTRACE: 1 12 | 13 | jobs: 14 | fmt: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Install rustup 18 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal 19 | - name: Install rustfmt 20 | run: rustup component add rustfmt 21 | - uses: actions/checkout@v4 22 | - name: Check formatting 23 | run: cargo fmt --check 24 | 25 | build: 26 | strategy: 27 | matrix: 28 | rust_channel: ["stable", "beta", "nightly", "1.73.0"] 29 | feature_set: ["--features collections,boxed"] 30 | include: 31 | - rust_channel: "nightly" 32 | feature_set: "--all-features" 33 | - rust_channel: "stable" 34 | feature_set: "--no-default-features" 35 | exclude: 36 | - rust_channel: "nightly" 37 | feature_set: "--features collections,boxed" 38 | 39 | runs-on: ubuntu-latest 40 | steps: 41 | - name: Install rustup 42 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal 43 | - name: Install rust channel 44 | run: rustup install ${{matrix.rust_channel}} && rustup default ${{matrix.rust_channel}} 45 | 46 | - uses: actions/checkout@v4 47 | 48 | - name: Run tests (no features) 49 | run: cargo test --verbose 50 | - name: Run tests (features) 51 | run: cargo test --verbose ${{matrix.feature_set}} 52 | 53 | miri: 54 | runs-on: ubuntu-latest 55 | 56 | env: 57 | MIRIFLAGS: "-Zmiri-strict-provenance -Zmiri-ignore-leaks" 58 | 59 | steps: 60 | - name: Install rustup 61 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain nightly -y 62 | - name: Install miri 63 | run: rustup toolchain install nightly --allow-downgrade --profile minimal --component miri 64 | 65 | - uses: actions/checkout@v4 66 | 67 | - name: Run miri 68 | run: cargo miri test --all-features 69 | 70 | valgrind: 71 | runs-on: ubuntu-latest 72 | 73 | env: 74 | # Don't leak-check, as Rust globals tend to cause false positives. 75 | CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER: "valgrind --suppressions=valgrind.supp --leak-check=no --error-exitcode=1 --gen-suppressions=all" 76 | 77 | steps: 78 | - name: Install rustup 79 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal 80 | - name: Install rust stable 81 | run: rustup install stable && rustup default stable 82 | 83 | - name: Install valgrind 84 | run: sudo apt update && sudo apt install valgrind 85 | 86 | - uses: actions/checkout@v4 87 | 88 | - name: Test under valgrind (no features) 89 | run: cargo test --verbose 90 | - name: Test under valgrind (features) 91 | run: cargo test --verbose --features collections,boxed 92 | 93 | benches: 94 | runs-on: ubuntu-latest 95 | 96 | steps: 97 | - name: Install rustup 98 | run: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal 99 | - name: Install rust nightly 100 | run: rustup install nightly && rustup default nightly 101 | 102 | - uses: actions/checkout@v4 103 | 104 | - name: Check that benches build 105 | run: cargo check --benches --all-features 106 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | benches/results.json 4 | *perf.data* 5 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## Unreleased 2 | 3 | Released YYYY-MM-DD. 4 | 5 | ### Added 6 | 7 | * TODO (or remove section if none) 8 | 9 | ### Changed 10 | 11 | * TODO (or remove section if none) 12 | 13 | ### Deprecated 14 | 15 | * TODO (or remove section if none) 16 | 17 | ### Removed 18 | 19 | * TODO (or remove section if none) 20 | 21 | ### Fixed 22 | 23 | * TODO (or remove section if none) 24 | 25 | ### Security 26 | 27 | * TODO (or remove section if none) 28 | 29 | -------------------------------------------------------------------------------- 30 | 31 | ## 3.17.0 32 | 33 | Released 2025-01-28. 34 | 35 | ### Added 36 | 37 | * Added a bunch of `try_` allocation methods for slices and `str`: 38 | * `try_alloc_slice_fill_default` 39 | * `try_alloc_slice_fill_iter` 40 | * `try_alloc_slice_fill_clone` 41 | * `try_alloc_slice_fill_copy` 42 | * `try_alloc_slice_fill_with` 43 | * `try_alloc_str` 44 | * `try_alloc_slice_clone` 45 | * `try_alloc_slice_copy` 46 | 47 | ### Changed 48 | 49 | * Minimum supported Rust version reduced to 1.71.1 50 | 51 | ### Fixed 52 | 53 | * Fixed a stacked-borrows MIRI bug in `dealloc` 54 | 55 | -------------------------------------------------------------------------------- 56 | 57 | ## 3.16.0 58 | 59 | Released 2024-04-08. 60 | 61 | ### Added 62 | 63 | * Added an optional, off-by-default dependency on the `serde` crate. Enabling 64 | this dependency allows you to serialize Bumpalo's collection and box 65 | types. Deserialization is not implemented, due to constraints of the 66 | deserialization trait. 67 | 68 | -------------------------------------------------------------------------------- 69 | 70 | ## 3.15.4 71 | 72 | Released 2024-03-07. 73 | 74 | ### Added 75 | 76 | * Added the `bumpalo::collections::Vec::extend_from_slices_copy` method, which 77 | is a faster way to extend a vec from multiple slices when the element is 78 | `Copy` than calling `extend_from_slice_copy` N times. 79 | 80 | -------------------------------------------------------------------------------- 81 | 82 | ## 3.15.3 83 | 84 | Released 2024-02-22. 85 | 86 | ### Added 87 | 88 | * Added additional performance improvements to `bumpalo::collections::Vec` 89 | related to reserving capacity. 90 | 91 | -------------------------------------------------------------------------------- 92 | 93 | ## 3.15.2 94 | 95 | Released 2024-02-21. 96 | 97 | ### Added 98 | 99 | * Add a `bumpalo::collections::Vec::extend_from_slice_copy` method. This doesn't 100 | exist on the standard library's `Vec` but they have access to specialization, 101 | so their regular `extend_from_slice` has a specialization for `Copy` 102 | types. Using this new method for `Copy` types is a ~80x performance 103 | improvement over the plain `extend_from_slice` method. 104 | 105 | -------------------------------------------------------------------------------- 106 | 107 | ## 3.15.1 108 | 109 | Released 2024-02-20. 110 | 111 | ### Fixed 112 | 113 | * Fixed the MSRV listed in `Cargo.toml`, whose update was forgotten when the 114 | MSRV bumped in release 3.15.0. 115 | 116 | -------------------------------------------------------------------------------- 117 | 118 | ## 3.15.0 119 | 120 | Released 2024-02-15. 121 | 122 | ### Changed 123 | 124 | * The minimum supported Rust version (MSRV) is now 1.73.0. 125 | * `bumpalo::collections::String::push_str` and 126 | `bumpalo::collections::String::from_str_in` received significant performance 127 | improvements. 128 | * Allocator trait methods are now marked `#[inline]`, increasing performance for 129 | some callers. 130 | 131 | ### Fixed 132 | 133 | * Fixed an edge-case bug in the `Allocator::shrink` method. 134 | 135 | -------------------------------------------------------------------------------- 136 | 137 | ## 3.14.0 138 | 139 | Released 2023-09-14. 140 | 141 | ### Added 142 | 143 | * Added the `std` cargo feature, which enables implementations of `std` traits 144 | for various things. Right now that is just `std::io::Write` for 145 | `bumpalo::collections::Vec`, but could be more in the future. 146 | 147 | -------------------------------------------------------------------------------- 148 | 149 | ## 3.13.0 150 | 151 | Released 2023-05-22. 152 | 153 | ### Added 154 | 155 | * New `"allocator-api2"` feature enables the use of the allocator API on 156 | stable. This feature uses a crate that mirrors the API of the unstable Rust 157 | `allocator_api` feature. If the feature is enabled, references to `Bump` will 158 | implement `allocator_api2::Allocator`. This allows `Bump` to be used as an 159 | allocator for collection types from `allocator-api2` and any other crates that 160 | support `allocator-api2`. 161 | 162 | ### Changed 163 | 164 | * The minimum supported Rust version (MSRV) is now 1.63.0. 165 | 166 | -------------------------------------------------------------------------------- 167 | 168 | ## 3.12.2 169 | 170 | Released 2023-05-09. 171 | 172 | ### Changed 173 | 174 | * Added `rust-version` metadata to `Cargo.toml` which helps `cargo` with version 175 | resolution. 176 | 177 | -------------------------------------------------------------------------------- 178 | 179 | ## 3.12.1 180 | 181 | Released 2023-04-21. 182 | 183 | ### Fixed 184 | 185 | * Fixed a bug where `Bump::try_with_capacity(n)` where `n > isize::MAX` could 186 | lead to attempts to create invalid `Layout`s. 187 | 188 | -------------------------------------------------------------------------------- 189 | 190 | ## 3.12.0 191 | 192 | Released 2023-01-17. 193 | 194 | ### Added 195 | 196 | * Added the `bumpalo::boxed::Box::bump` and `bumpalo::collections::String::bump` 197 | getters to get the underlying `Bump` that a string or box was allocated into. 198 | 199 | ### Changed 200 | 201 | * Some uses of `Box` that MIRI did not previously consider as UB are now 202 | reported as UB, and `bumpalo`'s internals have been adjusted to avoid the new 203 | UB. 204 | 205 | -------------------------------------------------------------------------------- 206 | 207 | ## 3.11.1 208 | 209 | Released 2022-10-18. 210 | 211 | ### Security 212 | 213 | * Fixed a bug where when `std::vec::IntoIter` was ported to 214 | `bumpalo::collections::vec::IntoIter`, it didn't get its underlying `Bump`'s 215 | lifetime threaded through. This meant that `rustc` was not checking the 216 | borrows for `bumpalo::collections::IntoIter` and this could result in 217 | use-after-free bugs. 218 | 219 | -------------------------------------------------------------------------------- 220 | 221 | ## 3.11.0 222 | 223 | Released 2022-08-17. 224 | 225 | ### Added 226 | 227 | * Added support for per-`Bump` allocation limits. These are enforced only in the 228 | slow path when allocating new chunks in the `Bump`, not in the bump allocation 229 | hot path, and therefore impose near zero overhead. 230 | * Added the `bumpalo::boxed::Box::into_inner` method. 231 | 232 | ### Changed 233 | 234 | * Updated to Rust 2021 edition. 235 | * The minimum supported Rust version (MSRV) is now 1.56.0. 236 | 237 | -------------------------------------------------------------------------------- 238 | 239 | ## 3.10.0 240 | 241 | Released 2022-06-01. 242 | 243 | ### Added 244 | 245 | * Implement `bumpalo::collections::FromIteratorIn` for `Option` and `Result`, 246 | just like `core` does for `FromIterator`. 247 | * Implement `bumpalo::collections::FromIteratorIn` for `bumpalo::boxed::Box<'a, 248 | [T]>`. 249 | * Added running tests under MIRI in CI for additional confidence in unsafe code. 250 | * Publicly exposed `bumpalo::collections::Vec::drain_filter` since the 251 | corresponding `std::vec::Vec` method has stabilized. 252 | 253 | ### Changed 254 | 255 | * `Bump::new` will not allocate a backing chunk until the first allocation 256 | inside the bump arena now. 257 | 258 | ### Fixed 259 | 260 | * Properly account for alignment changes when growing or shrinking an existing 261 | allocation. 262 | * Removed all internal integer-to-pointer casts, to play better with UB checkers 263 | like MIRI. 264 | 265 | -------------------------------------------------------------------------------- 266 | 267 | ## 3.9.1 268 | 269 | Released 2022-01-06. 270 | 271 | ### Fixed 272 | 273 | * Fixed link to logo in docs and README.md 274 | 275 | -------------------------------------------------------------------------------- 276 | 277 | ## 3.9.0 278 | 279 | Released 2022-01-05. 280 | 281 | ### Changed 282 | 283 | * The minimum supported Rust version (MSRV) has been raised to Rust 1.54.0. 284 | 285 | * `bumpalo::collections::Vec` implements relevant traits for all arrays of 286 | any size `N` via const generics. Previously, it was just arrays up to length 287 | 32. Similar for `bumpalo::boxed::Box<[T; N]>`. 288 | 289 | -------------------------------------------------------------------------------- 290 | 291 | ## 3.8.0 292 | 293 | Released 2021-10-19. 294 | 295 | ### Added 296 | 297 | * Added the `CollectIn` and `FromIteratorIn` traits to make building a 298 | collection from an iterator easier. These new traits live in the 299 | `bumpalo::collections` module and are implemented by 300 | `bumpalo::collections::{String,Vec}`. 301 | 302 | * Added the `Bump::iter_allocated_chunks_raw` method, which is an `unsafe`, raw 303 | version of `Bump::iter_allocated_chunks`. The new method does not take an 304 | exclusive borrow of the `Bump` and yields raw pointer-and-length pairs for 305 | each chunk in the bump. It is the caller's responsibility to ensure that no 306 | allocation happens in the `Bump` while iterating over chunks and that there 307 | are no active borrows of allocated data if they want to turn any 308 | pointer-and-length pairs into slices. 309 | 310 | -------------------------------------------------------------------------------- 311 | 312 | ## 3.7.1 313 | 314 | Released 2021-09-17. 315 | 316 | ### Changed 317 | 318 | * The packaged crate uploaded to crates.io when `bumpalo` is published is now 319 | smaller, thanks to excluding unnecessary files. 320 | 321 | -------------------------------------------------------------------------------- 322 | 323 | ## 3.7.0 324 | 325 | Released 2020-05-28. 326 | 327 | ### Added 328 | 329 | * Added `Borrow` and `BorrowMut` trait implementations for 330 | `bumpalo::collections::Vec` and 331 | `bumpalo::collections::String`. [#108](https://github.com/fitzgen/bumpalo/pull/108) 332 | 333 | ### Changed 334 | 335 | * When allocating a new chunk fails, don't immediately give up. Instead, try 336 | allocating a chunk that is half that size, and if that fails, then try half of 337 | *that* size, etc until either we successfully allocate a chunk or we fail to 338 | allocate the minimum chunk size and then finally give 339 | up. [#111](https://github.com/fitzgen/bumpalo/pull/111) 340 | 341 | -------------------------------------------------------------------------------- 342 | 343 | ## 3.6.1 344 | 345 | Released 2020-02-18. 346 | 347 | ### Added 348 | 349 | * Improved performance of `Bump`'s `Allocator::grow_zeroed` trait method 350 | implementation. [#99](https://github.com/fitzgen/bumpalo/pull/99) 351 | 352 | -------------------------------------------------------------------------------- 353 | 354 | ## 3.6.0 355 | 356 | Released 2020-01-29. 357 | 358 | ### Added 359 | 360 | * Added a few new flavors of allocation: 361 | 362 | * `try_alloc` for fallible, by-value allocation 363 | 364 | * `try_alloc_with` for fallible allocation with an infallible initializer 365 | function 366 | 367 | * `alloc_try_with` for infallible allocation with a fallible initializer 368 | function 369 | 370 | * `try_alloc_try_with` method for fallible allocation with a fallible 371 | initializer function 372 | 373 | We already have infallible, by-value allocation (`alloc`) and infallible 374 | allocation with an infallible initializer (`alloc_with`). With these new 375 | methods, we now have every combination covered. 376 | 377 | Thanks to [Tamme Schichler](https://github.com/Tamschi) for contributing these 378 | methods! 379 | 380 | -------------------------------------------------------------------------------- 381 | 382 | ## 3.5.0 383 | 384 | Released 2020-01-22. 385 | 386 | ### Added 387 | 388 | * Added experimental, unstable support for the unstable, nightly Rust 389 | `allocator_api` feature. 390 | 391 | The `allocator_api` feature defines an `Allocator` trait and exposes custom 392 | allocators for `std` types. Bumpalo has a matching `allocator_api` cargo 393 | feature to enable implementing `Allocator` and using `Bump` with `std` 394 | collections. 395 | 396 | First, enable the `allocator_api` feature in your `Cargo.toml`: 397 | 398 | ```toml 399 | [dependencies] 400 | bumpalo = { version = "3.5", features = ["allocator_api"] } 401 | ``` 402 | 403 | Next, enable the `allocator_api` nightly Rust feature in your `src/lib.rs` or `src/main.rs`: 404 | 405 | ```rust 406 | # #[cfg(feature = "allocator_api")] 407 | # { 408 | #![feature(allocator_api)] 409 | # } 410 | ``` 411 | 412 | Finally, use `std` collections with `Bump`, so that their internal heap 413 | allocations are made within the given bump arena: 414 | 415 | ``` 416 | # #![cfg_attr(feature = "allocator_api", feature(allocator_api))] 417 | # #[cfg(feature = "allocator_api")] 418 | # { 419 | #![feature(allocator_api)] 420 | use bumpalo::Bump; 421 | 422 | // Create a new bump arena. 423 | let bump = Bump::new(); 424 | 425 | // Create a `Vec` whose elements are allocated within the bump arena. 426 | let mut v = Vec::new_in(&bump); 427 | v.push(0); 428 | v.push(1); 429 | v.push(2); 430 | # } 431 | ``` 432 | 433 | I'm very excited to see custom allocators in `std` coming along! Thanks to 434 | Arthur Gautier for implementing support for the `allocator_api` feature for 435 | Bumpalo. 436 | 437 | -------------------------------------------------------------------------------- 438 | 439 | ## 3.4.0 440 | 441 | Released 2020-06-01. 442 | 443 | ### Added 444 | 445 | * Added the `bumpalo::boxed::Box` type. It is an owned pointer referencing a 446 | bump-allocated value, and it runs `T`'s `Drop` implementation on the 447 | referenced value when dropped. This type can be used by enabling the `"boxed"` 448 | cargo feature flag. 449 | 450 | -------------------------------------------------------------------------------- 451 | 452 | ## 3.3.0 453 | 454 | Released 2020-05-13. 455 | 456 | ### Added 457 | 458 | * Added fallible allocation methods to `Bump`: `try_new`, `try_with_capacity`, 459 | and `try_alloc_layout`. 460 | 461 | * Added `Bump::chunk_capacity` 462 | 463 | * Added `bumpalo::collections::Vec::try_reserve[_exact]` 464 | 465 | -------------------------------------------------------------------------------- 466 | 467 | ## 3.2.1 468 | 469 | Released 2020-03-24. 470 | 471 | ### Security 472 | 473 | * When `realloc`ing, if we allocate new space, we need to copy the old 474 | allocation's bytes into the new space. There are `old_size` number of bytes in 475 | the old allocation, but we were accidentally copying `new_size` number of 476 | bytes, which could lead to copying bytes into the realloc'd space from past 477 | the chunk that we're bump allocating out of, from unknown memory. 478 | 479 | If an attacker can cause `realloc`s, and can read the `realoc`ed data back, 480 | this could allow them to read things from other regions of memory that they 481 | shouldn't be able to. For example, if some crypto keys happened to live in 482 | memory right after a chunk we were bump allocating out of, this could allow 483 | the attacker to read the crypto keys. 484 | 485 | Beyond just fixing the bug and adding a regression test, I've also taken two 486 | additional steps: 487 | 488 | 1. While we were already running the testsuite under `valgrind` in CI, because 489 | `valgrind` exits with the same code that the program did, if there are 490 | invalid reads/writes that happen not to trigger a segfault, the program can 491 | still exit OK and we will be none the wiser. I've enabled the 492 | `--error-exitcode=1` flag for `valgrind` in CI so that tests eagerly fail 493 | in these scenarios. 494 | 495 | 2. I've written a quickcheck test to exercise `realloc`. Without the bug fix 496 | in this patch, this quickcheck immediately triggers invalid reads when run 497 | under `valgrind`. We didn't previously have quickchecks that exercised 498 | `realloc` because `realloc` isn't publicly exposed directly, and instead 499 | can only be indirectly called. This new quickcheck test exercises `realloc` 500 | via `bumpalo::collections::Vec::resize` and 501 | `bumpalo::collections::Vec::shrink_to_fit` calls. 502 | 503 | This bug was introduced in version 3.0.0. 504 | 505 | See [#69](https://github.com/fitzgen/bumpalo/issues/69) for details. 506 | 507 | -------------------------------------------------------------------------------- 508 | 509 | ## 3.2.0 510 | 511 | Released 2020-02-07. 512 | 513 | ### Added 514 | 515 | * Added the `bumpalo::collections::Vec::into_bump_slice_mut` method to turn a 516 | `bumpalo::collections::Vec<'bump, T>` into a `&'bump mut [T]`. 517 | 518 | -------------------------------------------------------------------------------- 519 | 520 | ## 3.1.2 521 | 522 | Released 2020-01-07. 523 | 524 | ### Fixed 525 | 526 | * The `bumpalo::collections::format!` macro did not used to accept a trailing 527 | comma like `format!(in bump; "{}", 1,)`, but it does now. 528 | 529 | -------------------------------------------------------------------------------- 530 | 531 | ## 3.1.1 532 | 533 | Released 2020-01-03. 534 | 535 | ### Fixed 536 | 537 | * The `bumpalo::collections::vec!` macro did not used to accept a trailing 538 | comma like `vec![in bump; 1, 2,]`, but it does now. 539 | 540 | -------------------------------------------------------------------------------- 541 | 542 | ## 3.1.0 543 | 544 | Released 2019-12-27. 545 | 546 | ### Added 547 | 548 | * Added the `Bump::allocated_bytes` diagnostic method for counting the total 549 | number of bytes a `Bump` has allocated. 550 | 551 | -------------------------------------------------------------------------------- 552 | 553 | # 3.0.0 554 | 555 | Released 2019-12-20. 556 | 557 | ## Added 558 | 559 | * Added `Bump::alloc_str` for copying string slices into a `Bump`. 560 | 561 | * Added `Bump::alloc_slice_copy` and `Bump::alloc_slice_clone` for copying or 562 | cloning slices into a `Bump`. 563 | 564 | * Added `Bump::alloc_slice_fill_iter` for allocating a slice in the `Bump` from 565 | an iterator. 566 | 567 | * Added `Bump::alloc_slice_fill_copy` and `Bump::alloc_slice_fill_clone` for 568 | creating slices of length `n` that are filled with copies or clones of an 569 | initial element. 570 | 571 | * Added `Bump::alloc_slice_fill_default` for creating slices of length `n` with 572 | the element type's default instance. 573 | 574 | * Added `Bump::alloc_slice_fill_with` for creating slices of length `n` whose 575 | elements are initialized with a function or closure. 576 | 577 | * Added `Bump::iter_allocated_chunks` as a replacement for the old 578 | `Bump::each_allocated_chunk`. The `iter_allocated_chunks` version returns an 579 | iterator, which is more idiomatic than its old, callback-taking counterpart. 580 | Additionally, `iter_allocated_chunks` exposes the chunks as `MaybeUninit`s 581 | instead of slices, which makes it usable in more situations without triggering 582 | undefined behavior. See also the note about bump direction in the "changed" 583 | section; if you're iterating chunks, you're likely affected by that change! 584 | 585 | * Added `Bump::with_capacity` so that you can pre-allocate a chunk with the 586 | requested space. 587 | 588 | ### Changed 589 | 590 | * **BREAKING:** The direction we allocate within a chunk has changed. It used to 591 | be "upwards", from low addresses within a chunk towards high addresses. It is 592 | now "downwards", from high addresses towards lower addresses. 593 | 594 | Additionally, the order in which we iterate over allocated chunks has changed! 595 | We used to iterate over chunks from oldest chunk to youngest chunk, and now we 596 | do the opposite: the youngest chunks are iterated over first, and the oldest 597 | chunks are iterated over last. 598 | 599 | If you were using `Bump::each_allocated_chunk` to iterate over data that you 600 | had previously allocated, and *you want to iterate in order of 601 | oldest-to-youngest allocation*, you need to reverse the chunks iterator and 602 | also reverse the order in which you loop through the data within a chunk! 603 | 604 | For example, if you had this code: 605 | 606 | ```rust 607 | unsafe { 608 | bump.each_allocated_chunk(|chunk| { 609 | for byte in chunk { 610 | // Touch each byte in oldest-to-youngest allocation order... 611 | } 612 | }); 613 | } 614 | ``` 615 | 616 | It should become this code: 617 | 618 | ```rust 619 | let mut chunks: Vec<_> = bump.iter_allocated_chunks().collect(); 620 | chunks.reverse(); 621 | for chunk in chunks { 622 | for byte in chunk.iter().rev() { 623 | let byte = unsafe { byte.assume_init() }; 624 | // Touch each byte in oldest-to-youngest allocation order... 625 | } 626 | } 627 | ``` 628 | 629 | The good news is that this change yielded a *speed up in allocation throughput 630 | of 3-19%!* 631 | 632 | See https://github.com/fitzgen/bumpalo/pull/37 and 633 | https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html for details. 634 | 635 | * **BREAKING:** The `collections` cargo feature is no longer on by default. You 636 | must explicitly turn it on if you intend to use the `bumpalo::collections` 637 | module. 638 | 639 | * `Bump::reset` will now retain only the last allocated chunk (the biggest), 640 | rather than only the first allocated chunk (the smallest). This should enable 641 | `Bump` to better adapt to workload sizes and quickly reach a steady state 642 | where new chunks are not requested from the global allocator. 643 | 644 | ### Removed 645 | 646 | * The `Bump::each_allocated_chunk` method is removed in favor of 647 | `Bump::iter_allocated_chunks`. Note that its safety requirements for reading 648 | from the allocated chunks are slightly different from the old 649 | `each_allocated_chunk`: only up to 16-byte alignment is supported now. If you 650 | allocate anything with greater alignment than that into the bump arena, there 651 | might be uninitialized padding inserted in the chunks, and therefore it is no 652 | longer safe to read them via `MaybeUninit::assume_init`. See also the note 653 | about bump direction in the "changed" section; if you're iterating chunks, 654 | you're likely affected by that change! 655 | 656 | * The `std` cargo feature has been removed, since this crate is now always 657 | no-std. 658 | 659 | ## Fixed 660 | 661 | * Fixed a bug involving potential integer overflows with large requested 662 | allocation sizes. 663 | 664 | -------------------------------------------------------------------------------- 665 | 666 | # 2.6.0 667 | 668 | Released 2019-08-19. 669 | 670 | * Implement `Send` for `Bump`. 671 | 672 | -------------------------------------------------------------------------------- 673 | 674 | # 2.5.0 675 | 676 | Released 2019-07-01. 677 | 678 | * Add `alloc_slice_copy` and `alloc_slice_clone` methods that allocate space for 679 | slices and either copy (with bound `T: Copy`) or clone (with bound `T: Clone`) 680 | the provided slice's data into the newly allocated space. 681 | 682 | -------------------------------------------------------------------------------- 683 | 684 | # 2.4.3 685 | 686 | Released 2019-05-20. 687 | 688 | * Fixed a bug where chunks were always deallocated with the default chunk 689 | layout, not the layout that the chunk was actually allocated with (i.e. if we 690 | started growing larger chunks with larger layouts, we would deallocate those 691 | chunks with an incorrect layout). 692 | 693 | -------------------------------------------------------------------------------- 694 | 695 | # 2.4.2 696 | 697 | Released 2019-05-17. 698 | 699 | * Added an implementation `Default` for `Bump`. 700 | * Made it so that if bump allocation within a chunk overflows, we still try to 701 | allocate a new chunk to bump out of for the requested allocation. This can 702 | avoid some OOMs in scenarios where the chunk we are currently allocating out 703 | of is very near the high end of the address space, and there is still 704 | available address space lower down for new chunks. 705 | 706 | -------------------------------------------------------------------------------- 707 | 708 | # 2.4.1 709 | 710 | Released 2019-04-19. 711 | 712 | * Added readme metadata to Cargo.toml so it shows up on crates.io 713 | 714 | -------------------------------------------------------------------------------- 715 | 716 | # 2.4.0 717 | 718 | Released 2019-04-19. 719 | 720 | * Added support for `realloc`ing in-place when the pointer being `realloc`ed is 721 | the last allocation made from the bump arena. This should speed up various 722 | `String`, `Vec`, and `format!` operations in many cases. 723 | 724 | -------------------------------------------------------------------------------- 725 | 726 | # 2.3.0 727 | 728 | Released 2019-03-26. 729 | 730 | * Add the `alloc_with` method, that (usually) avoids stack-allocating the 731 | allocated value and then moving it into the bump arena. This avoids potential 732 | stack overflows in release mode when allocating very large objects, and also 733 | some `memcpy` calls. This is similar to the `copyless` crate. Read [the 734 | `alloc_with` doc comments][alloc-with-doc-comments] and [the original issue 735 | proposing this API][issue-proposing-alloc-with] for more. 736 | 737 | [alloc-with-doc-comments]: https://github.com/fitzgen/bumpalo/blob/9f47aee8a6839ba65c073b9ad5372aacbbd02352/src/lib.rs#L436-L475 738 | [issue-proposing-alloc-with]: https://github.com/fitzgen/bumpalo/issues/10 739 | 740 | -------------------------------------------------------------------------------- 741 | 742 | # 2.2.2 743 | 744 | Released 2019-03-18. 745 | 746 | * Fix a regression from 2.2.1 where chunks were not always aligned to the chunk 747 | footer's alignment. 748 | 749 | -------------------------------------------------------------------------------- 750 | 751 | # 2.2.1 752 | 753 | Released 2019-03-18. 754 | 755 | * Fix a regression in 2.2.0 where newly allocated bump chunks could fail to have 756 | capacity for a large requested bump allocation in some corner cases. 757 | 758 | -------------------------------------------------------------------------------- 759 | 760 | # 2.2.0 761 | 762 | Released 2019-03-15. 763 | 764 | * Chunks in an arena now start out small, and double in size as more chunks are 765 | requested. 766 | 767 | -------------------------------------------------------------------------------- 768 | 769 | # 2.1.0 770 | 771 | Released 2019-02-12. 772 | 773 | * Added the `into_bump_slice` method on `bumpalo::collections::Vec`. 774 | 775 | -------------------------------------------------------------------------------- 776 | 777 | # 2.0.0 778 | 779 | Released 2019-02-11. 780 | 781 | * Removed the `BumpAllocSafe` trait. 782 | * Correctly detect overflows from large allocations and panic. 783 | 784 | -------------------------------------------------------------------------------- 785 | 786 | # 1.2.0 787 | 788 | Released 2019-01-15. 789 | 790 | * Fixed an overly-aggressive `debug_assert!` that had false positives. 791 | * Ported to Rust 2018 edition. 792 | 793 | -------------------------------------------------------------------------------- 794 | 795 | # 1.1.0 796 | 797 | Released 2018-11-28. 798 | 799 | * Added the `collections` module, which contains ports of `std`'s collection 800 | types that are compatible with backing their storage in `Bump` arenas. 801 | * Lifted the limits on size and alignment of allocations. 802 | 803 | -------------------------------------------------------------------------------- 804 | 805 | # 1.0.2 806 | 807 | -------------------------------------------------------------------------------- 808 | 809 | # 1.0.1 810 | 811 | -------------------------------------------------------------------------------- 812 | 813 | # 1.0.0 814 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Nick Fitzgerald "] 3 | categories = ["memory-management", "rust-patterns", "no-std"] 4 | description = "A fast bump allocation arena for Rust." 5 | documentation = "https://docs.rs/bumpalo" 6 | edition = "2021" 7 | exclude = ["/.github/*", "/benches", "/tests", "valgrind.supp", "bumpalo.png"] 8 | license = "MIT OR Apache-2.0" 9 | name = "bumpalo" 10 | readme = "README.md" 11 | repository = "https://github.com/fitzgen/bumpalo" 12 | rust-version = "1.71.1" 13 | version = "3.17.0" 14 | 15 | [package.metadata.docs.rs] 16 | all-features = true 17 | 18 | [lib] 19 | path = "src/lib.rs" 20 | bench = false 21 | 22 | [[bench]] 23 | name = "benches" 24 | path = "benches/benches.rs" 25 | harness = false 26 | required-features = ["collections"] 27 | 28 | [[bench]] 29 | name = "allocator_api" 30 | path = "benches/allocator_api.rs" 31 | harness = false 32 | required-features = ["bench_allocator_api"] 33 | 34 | [[test]] 35 | name = "try_alloc" 36 | path = "tests/try_alloc.rs" 37 | harness = false 38 | 39 | [dependencies] 40 | # This dependency provides a version of the unstable nightly Rust `Allocator` 41 | # trait on stable Rust. Enabling this feature means that `bumpalo` will 42 | # implement its `Allocator` trait. 43 | allocator-api2 = { version = "0.3.0", default-features = false, optional = true } 44 | 45 | # This dependency is here to allow integration with Serde, if the `serde` feature is enabled 46 | serde = { version = "1.0.171", optional = true } 47 | 48 | [dev-dependencies] 49 | quickcheck = "1.0.3" 50 | criterion = "0.3.6" 51 | rand = "0.8.5" 52 | serde = { version = "1.0.197", features = ["derive"] } 53 | serde_json = "1.0.115" 54 | blink-alloc = { version = "=0.3.1" } 55 | 56 | [features] 57 | default = [] 58 | collections = [] 59 | boxed = [] 60 | allocator_api = [] 61 | std = [] 62 | serde = ["dep:serde"] 63 | 64 | # Feature for bumpalo's internal development only. Do not use! 65 | bench_allocator_api = ["allocator_api", "blink-alloc/nightly"] 66 | 67 | # [profile.bench] 68 | # debug = true 69 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Nick Fitzgerald 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `bumpalo` 2 | 3 | **A fast bump allocation arena for Rust.** 4 | 5 | [![](https://docs.rs/bumpalo/badge.svg)](https://docs.rs/bumpalo/) 6 | [![](https://img.shields.io/crates/v/bumpalo.svg)](https://crates.io/crates/bumpalo) 7 | [![](https://img.shields.io/crates/d/bumpalo.svg)](https://crates.io/crates/bumpalo) 8 | [![Build Status](https://github.com/fitzgen/bumpalo/workflows/Rust/badge.svg)](https://github.com/fitzgen/bumpalo/actions?query=workflow%3ARust) 9 | 10 | ![](https://github.com/fitzgen/bumpalo/raw/main/bumpalo.png) 11 | 12 | ### Bump Allocation 13 | 14 | Bump allocation is a fast, but limited approach to allocation. We have a chunk 15 | of memory, and we maintain a pointer within that memory. Whenever we allocate an 16 | object, we do a quick check that we have enough capacity left in our chunk to 17 | allocate the object and then update the pointer by the object's size. *That's 18 | it!* 19 | 20 | The disadvantage of bump allocation is that there is no general way to 21 | deallocate individual objects or reclaim the memory region for a 22 | no-longer-in-use object. 23 | 24 | These trade offs make bump allocation well-suited for *phase-oriented* 25 | allocations. That is, a group of objects that will all be allocated during the 26 | same program phase, used, and then can all be deallocated together as a group. 27 | 28 | ### Deallocation en Masse, but no `Drop` 29 | 30 | To deallocate all the objects in the arena at once, we can simply reset the bump 31 | pointer back to the start of the arena's memory chunk. This makes mass 32 | deallocation *extremely* fast, but allocated objects' [`Drop`] implementations are 33 | not invoked. 34 | 35 | > **However:** [`bumpalo::boxed::Box`][box] can be used to wrap 36 | > `T` values allocated in the `Bump` arena, and calls `T`'s `Drop` 37 | > implementation when the `Box` wrapper goes out of scope. This is similar to 38 | > how [`std::boxed::Box`] works, except without deallocating its backing memory. 39 | 40 | [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html 41 | [box]: https://docs.rs/bumpalo/latest/bumpalo/boxed/struct.Box.html 42 | [`std::boxed::Box`]: https://doc.rust-lang.org/std/boxed/struct.Box.html 43 | 44 | ### What happens when the memory chunk is full? 45 | 46 | This implementation will allocate a new memory chunk from the global allocator 47 | and then start bump allocating into this new memory chunk. 48 | 49 | ### Example 50 | 51 | ```rust 52 | use bumpalo::Bump; 53 | 54 | struct Doggo { 55 | cuteness: u64, 56 | age: u8, 57 | scritches_required: bool, 58 | } 59 | 60 | // Create a new arena to bump allocate into. 61 | let bump = Bump::new(); 62 | 63 | // Allocate values into the arena. 64 | let scooter = bump.alloc(Doggo { 65 | cuteness: u64::MAX, 66 | age: 8, 67 | scritches_required: true, 68 | }); 69 | 70 | // Exclusive, mutable references to the just-allocated value are returned. 71 | assert!(scooter.scritches_required); 72 | scooter.age += 1; 73 | ``` 74 | 75 | ### Collections 76 | 77 | When the `"collections"` cargo feature is enabled, a fork of some of the `std` 78 | library's collections are available in the [`collections`] module. These 79 | collection types are modified to allocate their space inside `bumpalo::Bump` 80 | arenas. 81 | 82 | [`collections`]: https://docs.rs/bumpalo/latest/bumpalo/collections/index.html 83 | 84 | ```rust 85 | #[cfg(feature = "collections")] 86 | { 87 | use bumpalo::{Bump, collections::Vec}; 88 | 89 | // Create a new bump arena. 90 | let bump = Bump::new(); 91 | 92 | // Create a vector of integers whose storage is backed by the bump arena. The 93 | // vector cannot outlive its backing arena, and this property is enforced with 94 | // Rust's lifetime rules. 95 | let mut v = Vec::new_in(&bump); 96 | 97 | // Push a bunch of integers onto `v`! 98 | for i in 0..100 { 99 | v.push(i); 100 | } 101 | } 102 | ``` 103 | 104 | Eventually [all `std` collection types will be parameterized by an 105 | allocator](https://github.com/rust-lang/rust/issues/42774) and we can remove 106 | this `collections` module and use the `std` versions. 107 | 108 | For unstable, nightly-only support for custom allocators in `std`, see the 109 | `allocator_api` section below. 110 | 111 | ### `bumpalo::boxed::Box` 112 | 113 | When the `"boxed"` cargo feature is enabled, a fork of `std::boxed::Box` 114 | is available in the `boxed` module. This `Box` type is modified to allocate its 115 | space inside `bumpalo::Bump` arenas. 116 | 117 | **A `Box` runs `T`'s drop implementation when the `Box` is dropped.** You 118 | can use this to work around the fact that `Bump` does not drop values allocated 119 | in its space itself. 120 | 121 | ```rust 122 | #[cfg(feature = "boxed")] 123 | { 124 | use bumpalo::{Bump, boxed::Box}; 125 | use std::sync::atomic::{AtomicUsize, Ordering}; 126 | 127 | static NUM_DROPPED: AtomicUsize = AtomicUsize::new(0); 128 | 129 | struct CountDrops; 130 | 131 | impl Drop for CountDrops { 132 | fn drop(&mut self) { 133 | NUM_DROPPED.fetch_add(1, Ordering::SeqCst); 134 | } 135 | } 136 | 137 | // Create a new bump arena. 138 | let bump = Bump::new(); 139 | 140 | // Create a `CountDrops` inside the bump arena. 141 | let mut c = Box::new_in(CountDrops, &bump); 142 | 143 | // No `CountDrops` have been dropped yet. 144 | assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 0); 145 | 146 | // Drop our `Box`. 147 | drop(c); 148 | 149 | // Its `Drop` implementation was run, and so `NUM_DROPS` has been 150 | // incremented. 151 | assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 1); 152 | } 153 | ``` 154 | 155 | #### Serde 156 | 157 | Adding the `serde` feature flag will enable transparent serialization of `Vec`s, `String`s 158 | and boxed values. 159 | 160 | ```toml 161 | [dependencies] 162 | bumpalo = { version = "3.9", features = ["collections", "boxed", "serde"] } 163 | ``` 164 | 165 | ```rust,ignore 166 | use bumpalo::{Bump, boxed::Box, collections::Vec}; 167 | 168 | // Create a new bump arena. 169 | let bump = Bump::new(); 170 | 171 | // Create a `Box` 172 | let box = Box::new_in("hello", &bump); 173 | 174 | // Serialize with serde_json 175 | assert_eq!(serde_json::to_string(&box).unwrap(), "\"hello\""); 176 | 177 | // Create a `Vec` 178 | let vec = Vec::new_in( &bump); 179 | vec.push(1); 180 | vec.push(2); 181 | 182 | // Serialize with serde_json 183 | assert_eq!(serde_json::to_string(&vec).unwrap(), "[1, 2]"); 184 | ``` 185 | 186 | ### `#![no_std]` Support 187 | 188 | Bumpalo is a `no_std` crate by default. It depends only on the `alloc` and `core` crates. 189 | 190 | ### `std` Support 191 | 192 | You can optionally decide to enable the `std` feature in order to enable some 193 | std only trait implementations for some collections: 194 | 195 | * `std::io::Write` for `Vec<'bump, u8>` 196 | 197 | ### Thread support 198 | 199 | The `Bump` is `!Sync`, which makes it hard to use in certain situations around 200 | threads ‒ for example in `rayon`. 201 | 202 | The [`bumpalo-herd`](https://crates.io/crates/bumpalo-herd) crate provides a 203 | pool of `Bump` allocators for use in such situations. 204 | 205 | ### Nightly Rust `allocator_api` Support 206 | 207 | The unstable, nightly-only Rust `allocator_api` feature defines an [`Allocator`] 208 | trait and exposes custom allocators for `std` types. Bumpalo has a matching 209 | `allocator_api` cargo feature to enable implementing `Allocator` and using 210 | `Bump` with `std` collections. Note that, as `feature(allocator_api)` is 211 | unstable and only in nightly Rust, Bumpalo's matching `allocator_api` cargo 212 | feature should be considered unstable, and will not follow the semver 213 | conventions that the rest of the crate does. 214 | 215 | First, enable the `allocator_api` feature in your `Cargo.toml`: 216 | 217 | ```toml 218 | [dependencies] 219 | bumpalo = { version = "3", features = ["allocator_api"] } 220 | ``` 221 | 222 | Next, enable the `allocator_api` nightly Rust feature in your `src/lib.rs` or 223 | `src/main.rs`: 224 | 225 | ```rust,ignore 226 | #![feature(allocator_api)] 227 | ``` 228 | 229 | Finally, use `std` collections with `Bump`, so that their internal heap 230 | allocations are made within the given bump arena: 231 | 232 | ```rust,ignore 233 | use bumpalo::Bump; 234 | 235 | // Create a new bump arena. 236 | let bump = Bump::new(); 237 | 238 | // Create a `Vec` whose elements are allocated within the bump arena. 239 | let mut v = Vec::new_in(&bump); 240 | v.push(0); 241 | v.push(1); 242 | v.push(2); 243 | ``` 244 | 245 | [`Allocator`]: https://doc.rust-lang.org/std/alloc/trait.Allocator.html 246 | 247 | ### Using the `Allocator` API on Stable Rust 248 | 249 | You can enable the `allocator-api2` Cargo feature and `bumpalo` will use [the 250 | `allocator-api2` crate](https://crates.io/crates/allocator-api2) to implement 251 | the unstable nightly`Allocator` API on stable Rust. This means that 252 | `bumpalo::Bump` will be usable with any collection that is generic over 253 | `allocator_api2::Allocator`. 254 | 255 | ### Minimum Supported Rust Version (MSRV) 256 | 257 | This crate is guaranteed to compile on stable Rust **1.71.1** and up. It might 258 | compile with older versions but that may change in any new patch release. 259 | 260 | We reserve the right to increment the MSRV on minor releases, however we will 261 | strive to only do it deliberately and for good reasons. 262 | -------------------------------------------------------------------------------- /benches/README.md: -------------------------------------------------------------------------------- 1 | # Benchmarks 2 | 3 | ## Table of Contents 4 | 5 | - [Overview](#overview) 6 | - [Reproducing](#reproducing) 7 | - [Benchmark Results](#benchmark-results) 8 | - [allocator-api](#allocator-api) 9 | - [warm-up](#warm-up) 10 | - [reset](#reset) 11 | - [vec](#vec) 12 | 13 | ## Overview 14 | 15 | This directory contains two suites of benchmarks: 16 | 17 | 1. `allocator_api.rs`: `std::alloc::Allocator`-based benchmarks that aim to 18 | measure the performance of bump allocators within the generic `Allocator` 19 | API. 20 | 21 | 2. `benches.rs`: Miscellaneous Bumpalo-specific benchmarks. 22 | 23 | The tables of benchmark results listed below are the results for the suite of 24 | `std::alloc::Allocator`-based benchmarks. They are originally adapted from 25 | [`blink-alloc`] (another fine bump allocator crate) which was already measuring 26 | the relative performance between `blink-alloc` and `bumpalo`. I wasn't able to 27 | reproduce many of their results showing that `blink-alloc` was faster than 28 | `bumpalo`, however, which was part of the motivation to bring a subset of the 29 | benchmarks into this repo and document reproduction steps. 30 | 31 | Furthermore, the tables below include a `std::alloc::System` column, but their 32 | results come with a few caveats. First, in order to implement a `reset` method 33 | for the system allocator and deallocate everything that was allocated within a 34 | certain region of code, I had to add additional bookkeeping to dynamically track 35 | every live allocation. That bookkeeping generally won't be present in real 36 | programs, which will instead use things like `Drop` implementations, so it makes 37 | the system allocator's results look worse than they otherwise would 38 | be. Additionally, these benchmarks are really designed to show off the strengths 39 | of bump allocators and measure the operations that are important for bump 40 | allocators. The system allocator is expected to perform worse, but that's 41 | because it is designed for general purpose scenarios, where as bump allocators 42 | are designed for very specific scenarios. These columns should mostly serve as 43 | just a general reference point to get an idea of the magnitude of allocation 44 | speed up you can expect in the very specific scenarios where using a bump 45 | allocator makes sense. 46 | 47 | Finally, all these benchmarks are synthetic. They are micro benchmarks. You 48 | shouldn't expect that anything here will directly translate into speed ups for 49 | your application. Application performance is what really matters, and things 50 | observed in the micro often disappear in the macro. If your application isn't 51 | bottlenecked on allocation, or can't abide by the constraints that a bump 52 | allocator imposes, there's nothing that a bump allocator can do to improve its 53 | performance. 54 | 55 | [`blink-alloc`]: https://github.com/zakarumych/blink-alloc/blob/845b2db273371260eef2e9858386f6c6aa180e98/BENCHMARKS.md 56 | 57 | ## Reproducing 58 | 59 | The `std::alloc::Allocator`-based benchmarks require using nightly Rust, since 60 | the `Allocator` trait is still unstable. You must additionally enable Bumpalo's 61 | `allocator_api` cargo feature: 62 | 63 | ``` 64 | $ cargo +nightly bench --bench allocator_api --features allocator_api 65 | ``` 66 | 67 | The miscellaneous benchmarks require Bumpalo's `collections` cargo feature: 68 | 69 | ``` 70 | $ cargo bench --bench benches --features collections 71 | ``` 72 | 73 | To update the tables below, use `cargo-criterion` and [`criterion-table`]: 74 | 75 | ``` 76 | $ cd bumpalo/benches/ 77 | $ cargo +nightly bench --features bench_allocator_api \ 78 | --bench allocator_api \ 79 | --message-format=json \ 80 | > results.json 81 | $ criterion-table < results.json > README.md 82 | ``` 83 | 84 | [`cargo-criterion`]: https://github.com/bheisler/cargo-criterion 85 | [`criterion-table`]: https://github.com/nu11ptr/criterion-table 86 | 87 | ## Benchmark Results 88 | 89 | ### allocator-api 90 | 91 | Benchmarks that measure calls into `std::alloc::Allocator` methods directly. 92 | 93 | These operations are generally the ones that happen most often, and therefore 94 | their performance is generally most important. Following the same logic, raw 95 | allocation is generally the very most important. 96 | 97 | | | `bumpalo::Bump` | `blink_alloc::BlinkAlloc` | `std::alloc::System` | 98 | |:----------------------------------------------------|:-------------------------|:-----------------------------------|:---------------------------------- | 99 | | **`allocate(u8) x 10007`** | `16.65 us` (✅ **1.00x**) | `20.13 us` (❌ *1.21x slower*) | `475.36 us` (❌ *28.55x slower*) | 100 | | **`allocate(u32) x 10007`** | `16.41 us` (✅ **1.00x**) | `19.58 us` (❌ *1.19x slower*) | `525.99 us` (❌ *32.06x slower*) | 101 | | **`allocate(u64) x 10007`** | `16.69 us` (✅ **1.00x**) | `16.51 us` (✅ **1.01x faster**) | `564.42 us` (❌ *33.82x slower*) | 102 | | **`allocate(u128) x 10007`** | `15.97 us` (✅ **1.00x**) | `16.41 us` (✅ **1.03x slower**) | `618.64 us` (❌ *38.73x slower*) | 103 | | **`allocate([u8; 0]) x 10007`** | `22.04 us` (✅ **1.00x**) | `17.40 us` (✅ **1.27x faster**) | `197.37 us` (❌ *8.96x slower*) | 104 | | **`allocate([u8; 1]) x 10007`** | `22.03 us` (✅ **1.00x**) | `17.24 us` (✅ **1.28x faster**) | `484.81 us` (❌ *22.01x slower*) | 105 | | **`allocate([u8; 7]) x 10007`** | `22.09 us` (✅ **1.00x**) | `17.41 us` (✅ **1.27x faster**) | `567.44 us` (❌ *25.68x slower*) | 106 | | **`allocate([u8; 8]) x 10007`** | `22.09 us` (✅ **1.00x**) | `17.41 us` (✅ **1.27x faster**) | `561.20 us` (❌ *25.41x slower*) | 107 | | **`allocate([u8; 31]) x 10007`** | `22.09 us` (✅ **1.00x**) | `17.34 us` (✅ **1.27x faster**) | `675.39 us` (❌ *30.57x slower*) | 108 | | **`allocate([u8; 32]) x 10007`** | `21.99 us` (✅ **1.00x**) | `17.57 us` (✅ **1.25x faster**) | `690.94 us` (❌ *31.42x slower*) | 109 | | **`grow same align (u32 -> [u32; 2]) x 10007`** | `29.65 us` (✅ **1.00x**) | `31.03 us` (✅ **1.05x slower**) | `1.15 ms` (❌ *38.75x slower*) | 110 | | **`grow smaller align (u32 -> [u16; 4]) x 10007`** | `30.12 us` (✅ **1.00x**) | `31.06 us` (✅ **1.03x slower**) | `1.15 ms` (❌ *38.07x slower*) | 111 | | **`grow larger align (u32 -> u64) x 10007`** | `37.50 us` (✅ **1.00x**) | `39.16 us` (✅ **1.04x slower**) | `1.15 ms` (❌ *30.79x slower*) | 112 | | **`shrink same align ([u32; 2] -> u32) x 10007`** | `19.66 us` (✅ **1.00x**) | `20.39 us` (✅ **1.04x slower**) | `1.09 ms` (❌ *55.61x slower*) | 113 | | **`shrink smaller align (u32 -> u16) x 10007`** | `19.97 us` (✅ **1.00x**) | `19.93 us` (✅ **1.00x faster**) | `1.08 ms` (❌ *54.32x slower*) | 114 | | **`shrink larger align ([u16; 4] -> u32) x 10007`** | `19.60 us` (✅ **1.00x**) | `39.14 us` (❌ *2.00x slower*) | `1.09 ms` (❌ *55.76x slower*) | 115 | 116 | ### warm-up 117 | 118 | Benchmarks that measure the first allocation in a fresh allocator. 119 | 120 | These aren't generally very important, since the first allocation in a fresh 121 | bump allocator only ever happens once by definition. This is mostly measuring 122 | how long it takes the underlying system allocator to allocate the initial chunk 123 | to bump allocate out of. 124 | 125 | | | `bumpalo::Bump` | `blink_alloc::BlinkAlloc` | `std::alloc::System` | 126 | |:---------------------------|:-------------------------|:-----------------------------------|:-------------------------------- | 127 | | **`first u32 allocation`** | `24.16 ns` (✅ **1.00x**) | `21.65 ns` (✅ **1.12x faster**) | `74.88 ns` (❌ *3.10x slower*) | 128 | 129 | ### reset 130 | 131 | Benchmarks that measure the overhead of resetting a bump allocator to an empty 132 | state, ready to be reused in a new program phase. 133 | 134 | This generally doesn't happen as often as allocation, and therefore is generally 135 | less important, but it is important to keep an eye on generally since 136 | deallocation-en-masse and reusing already-allocated chunks can be selling points 137 | for bump allocation over using a generic allocator in certain scenarios. 138 | 139 | | | `bumpalo::Bump` | `blink_alloc::BlinkAlloc` | `std::alloc::System` | 140 | |:----------------------------------------|:--------------------------|:-----------------------------------|:----------------------------------- | 141 | | **`reset after allocate(u32) x 10007`** | `163.62 ns` (✅ **1.00x**) | `192.34 ns` (❌ *1.18x slower*) | `127.35 us` (❌ *778.30x slower*) | 142 | 143 | ### vec 144 | 145 | Benchmarks that measure the various `std::vec::Vec operations when used in 146 | conjuction with a bump allocator. 147 | 148 | Bump allocators aren't often used directly, but instead through some sort of 149 | collection. These benchmarks are important in the sense that the standard 150 | `Vec` type is probably the most-commonly used collection (although not 151 | necessarily the most commonly used with bump allocators in Rust, at least until 152 | the `Allocator` trait is stabilized). 153 | 154 | | | `bumpalo::Bump` | `blink_alloc::BlinkAlloc` | `std::alloc::System` | 155 | |:-------------------------------|:-------------------------|:-----------------------------------|:--------------------------------- | 156 | | **`push(usize) x 10007`** | `16.66 us` (✅ **1.00x**) | `15.21 us` (✅ **1.10x faster**) | `42.36 us` (❌ *2.54x slower*) | 157 | | **`reserve_exact(1) x 10007`** | `2.26 ms` (✅ **1.00x**) | `60.24 us` (🚀 **37.44x faster**) | `683.34 us` (🚀 **3.30x faster**) | 158 | 159 | --- 160 | Made with [criterion-table](https://github.com/nu11ptr/criterion-table) 161 | -------------------------------------------------------------------------------- /benches/allocator_api.rs: -------------------------------------------------------------------------------- 1 | //! This benchmark is adapted from [`blink-alloc`] (MIT/Apache-2) but with a 2 | //! bunch of extraneous stuff trimmed away. 3 | //! 4 | //! [`blink-alloc`]: https://github.com/zakarumych/blink-alloc/blob/845b2db273371260eef2e9858386f6c6aa180e98/benches/bench.rs 5 | 6 | #![feature(allocator_api)] 7 | 8 | use criterion::*; 9 | use std::{ 10 | alloc::{AllocError, Allocator, Layout}, 11 | cell::RefCell, 12 | collections::HashMap, 13 | mem, 14 | ptr::NonNull, 15 | time::{Duration, Instant}, 16 | }; 17 | 18 | /// Trait for resetting a bump allocator to its initial state. 19 | trait BumpAllocator: Default 20 | where 21 | for<'a> &'a Self: Allocator, 22 | { 23 | fn with_capacity(cap: usize) -> Self; 24 | fn reset(&mut self); 25 | } 26 | 27 | type Bumpalo = bumpalo::Bump<{ std::mem::align_of::() }>; 28 | impl BumpAllocator for Bumpalo { 29 | fn with_capacity(cap: usize) -> Self { 30 | let b = Bumpalo::with_min_align_and_capacity(cap); 31 | b.set_allocation_limit(Some(cap)); 32 | b 33 | } 34 | 35 | #[inline(always)] 36 | fn reset(&mut self) { 37 | self.reset(); 38 | } 39 | } 40 | 41 | impl BumpAllocator for blink_alloc::BlinkAlloc { 42 | fn with_capacity(cap: usize) -> Self { 43 | blink_alloc::BlinkAlloc::with_chunk_size(cap) 44 | } 45 | 46 | #[inline(always)] 47 | fn reset(&mut self) { 48 | self.reset(); 49 | } 50 | } 51 | 52 | /// System allocator, as if it were a bump allocator. See caveats in 53 | /// `benches/README.md`; it isn't expected that this super accurately reflects 54 | /// the system allocator's performance. 55 | #[derive(Default)] 56 | struct SystemAlloc { 57 | alloc: std::alloc::System, 58 | live: RefCell, Layout>>, 59 | } 60 | 61 | impl BumpAllocator for SystemAlloc { 62 | fn with_capacity(cap: usize) -> Self { 63 | SystemAlloc { 64 | alloc: std::alloc::System, 65 | live: RefCell::new(HashMap::with_capacity(cap)), 66 | } 67 | } 68 | 69 | fn reset(&mut self) { 70 | let mut live = self.live.borrow_mut(); 71 | for (ptr, layout) in live.drain() { 72 | unsafe { 73 | self.alloc.deallocate(ptr, layout); 74 | } 75 | } 76 | } 77 | } 78 | 79 | unsafe impl<'a> Allocator for &'a SystemAlloc { 80 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 81 | let ptr = self.alloc.allocate(layout)?; 82 | 83 | let mut live = self.live.borrow_mut(); 84 | live.insert(ptr.cast(), layout); 85 | 86 | Ok(ptr) 87 | } 88 | 89 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 90 | self.alloc.deallocate(ptr, layout); 91 | let mut live = self.live.borrow_mut(); 92 | live.remove(&ptr); 93 | } 94 | 95 | fn allocate_zeroed(&self, _layout: Layout) -> Result, AllocError> { 96 | unimplemented!() 97 | } 98 | 99 | unsafe fn grow( 100 | &self, 101 | ptr: NonNull, 102 | old_layout: Layout, 103 | new_layout: Layout, 104 | ) -> Result, AllocError> { 105 | { 106 | let mut live = self.live.borrow_mut(); 107 | live.remove(&ptr); 108 | } 109 | 110 | let ptr = self.alloc.grow(ptr, old_layout, new_layout)?; 111 | 112 | let mut live = self.live.borrow_mut(); 113 | live.insert(ptr.cast(), new_layout); 114 | 115 | Ok(ptr) 116 | } 117 | 118 | unsafe fn grow_zeroed( 119 | &self, 120 | _ptr: NonNull, 121 | _old_layout: Layout, 122 | _new_layout: Layout, 123 | ) -> Result, AllocError> { 124 | unimplemented!() 125 | } 126 | 127 | unsafe fn shrink( 128 | &self, 129 | ptr: NonNull, 130 | old_layout: Layout, 131 | new_layout: Layout, 132 | ) -> Result, AllocError> { 133 | { 134 | let mut live = self.live.borrow_mut(); 135 | live.remove(&ptr); 136 | } 137 | 138 | let ptr = self.alloc.shrink(ptr, old_layout, new_layout)?; 139 | 140 | let mut live = self.live.borrow_mut(); 141 | live.insert(ptr.cast(), new_layout); 142 | 143 | Ok(ptr) 144 | } 145 | } 146 | 147 | // The number of allocations to perform in each iteration of the 148 | // benchmarks. This used to be 17453, but it wasn't clear to me why that number 149 | // was chosen, or how it related to the warm up allocation's size. Instead, I've 150 | // chosen 10_007 because it is a prime number and therefore should hopefully 151 | // help us avoid any kind of unwanted harmonics in our measurements. It is also 152 | // large enough that we can start to filter out the noise from our alloc 153 | // operations, but small enough that running the benchmarks takes a reasonable 154 | // amount of time. Finally, I factored out the warm-up logic to be directly tied 155 | // to this number, and ensure that we avoid measuring any resizes during our 156 | // allocations, as (a) they are already covered by the "warm-up" benchmark and 157 | // (b) resizing is rare and amortized across allocations (which happen 158 | // frequently, and whose performance is actually important). 159 | const NUM_ALLOCS: usize = 10_007; 160 | 161 | fn bench_allocator_api(name: &str, c: &mut Criterion) 162 | where 163 | for<'a> &'a A: Allocator, 164 | A: BumpAllocator + Default + 'static, 165 | { 166 | let mut group = c.benchmark_group(format!("allocator-api/{name}")); 167 | 168 | group.bench_function(format!("allocate(u8) x {NUM_ALLOCS}"), |b| { 169 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS); 170 | b.iter(|| { 171 | for _ in 0..NUM_ALLOCS { 172 | let ptr = (&alloc).allocate(Layout::new::()).unwrap(); 173 | black_box(ptr); 174 | } 175 | alloc.reset(); 176 | }) 177 | }); 178 | 179 | group.bench_function(format!("allocate(u32) x {NUM_ALLOCS}"), |b| { 180 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS); 181 | b.iter(|| { 182 | for _ in 0..NUM_ALLOCS { 183 | let ptr = (&alloc).allocate(Layout::new::()).unwrap(); 184 | black_box(ptr); 185 | } 186 | alloc.reset(); 187 | }) 188 | }); 189 | 190 | group.bench_function(format!("allocate(u64) x {NUM_ALLOCS}"), |b| { 191 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS); 192 | b.iter(|| { 193 | for _ in 0..NUM_ALLOCS { 194 | let ptr = (&alloc).allocate(Layout::new::()).unwrap(); 195 | black_box(ptr); 196 | } 197 | alloc.reset(); 198 | }) 199 | }); 200 | 201 | group.bench_function(format!("allocate(u128) x {NUM_ALLOCS}"), |b| { 202 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS); 203 | b.iter(|| { 204 | for _ in 0..NUM_ALLOCS { 205 | let ptr = (&alloc).allocate(Layout::new::()).unwrap(); 206 | black_box(ptr); 207 | } 208 | alloc.reset(); 209 | }) 210 | }); 211 | 212 | // Choose some small, medium, and "large" lengths, as well as some prime 213 | // numbers to see how the allocators deal with "unaligned" sizes. 214 | for len in [0, 1, 7, 8, 31, 32] { 215 | group.bench_function(format!("allocate([u8; {len}]) x {NUM_ALLOCS}"), |b| { 216 | let mut alloc = A::with_capacity(mem::size_of::() * len * NUM_ALLOCS); 217 | b.iter(|| { 218 | for _ in 0..NUM_ALLOCS { 219 | // NB: black box the length but not the whole layout, since 220 | // that more accurately reflects things like `Vec` where the 221 | // element size (and therefore its alignment) is statically 222 | // known but the collection length is dynamic. 223 | let len = black_box(len); 224 | let layout = Layout::array::(len).unwrap(); 225 | 226 | let ptr = (&alloc).allocate(layout).unwrap(); 227 | black_box(ptr); 228 | } 229 | alloc.reset(); 230 | }) 231 | }); 232 | } 233 | 234 | group.bench_function( 235 | format!("grow same align (u32 -> [u32; 2]) x {NUM_ALLOCS}"), 236 | |b| { 237 | let mut alloc = A::with_capacity(mem::size_of::<[u32; 2]>() * NUM_ALLOCS); 238 | b.iter(|| { 239 | for _ in 0..NUM_ALLOCS { 240 | unsafe { 241 | let ptr = black_box(&alloc).allocate(Layout::new::()).unwrap(); 242 | let ptr = black_box(&alloc) 243 | .grow(ptr.cast(), Layout::new::(), Layout::new::<[u32; 2]>()) 244 | .unwrap(); 245 | black_box(ptr); 246 | } 247 | } 248 | alloc.reset(); 249 | }) 250 | }, 251 | ); 252 | 253 | group.bench_function( 254 | format!("grow smaller align (u32 -> [u16; 4]) x {NUM_ALLOCS}"), 255 | |b| { 256 | let mut alloc = A::with_capacity(mem::size_of::<[u16; 4]>() * NUM_ALLOCS); 257 | b.iter(|| { 258 | for _ in 0..NUM_ALLOCS { 259 | unsafe { 260 | let ptr = black_box(&alloc).allocate(Layout::new::()).unwrap(); 261 | let ptr = black_box(&alloc) 262 | .grow(ptr.cast(), Layout::new::(), Layout::new::<[u16; 4]>()) 263 | .unwrap(); 264 | black_box(ptr); 265 | } 266 | } 267 | alloc.reset(); 268 | }) 269 | }, 270 | ); 271 | 272 | group.bench_function( 273 | format!("grow larger align (u32 -> u64) x {NUM_ALLOCS}"), 274 | |b| { 275 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS); 276 | b.iter(|| { 277 | for _ in 0..NUM_ALLOCS { 278 | unsafe { 279 | let ptr = black_box(&alloc).allocate(Layout::new::()).unwrap(); 280 | let ptr = black_box(&alloc) 281 | .grow(ptr.cast(), Layout::new::(), Layout::new::()) 282 | .unwrap(); 283 | black_box(ptr); 284 | } 285 | } 286 | alloc.reset(); 287 | }) 288 | }, 289 | ); 290 | 291 | group.bench_function( 292 | format!("shrink same align ([u32; 2] -> u32) x {NUM_ALLOCS}"), 293 | |b| { 294 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS); 295 | b.iter(|| { 296 | for _ in 0..NUM_ALLOCS { 297 | unsafe { 298 | let ptr = black_box(&alloc) 299 | .allocate(Layout::new::<[u32; 2]>()) 300 | .unwrap(); 301 | let ptr = black_box(&alloc) 302 | .shrink(ptr.cast(), Layout::new::<[u32; 2]>(), Layout::new::()) 303 | .unwrap(); 304 | black_box(ptr); 305 | } 306 | } 307 | alloc.reset(); 308 | }) 309 | }, 310 | ); 311 | 312 | group.bench_function( 313 | format!("shrink smaller align (u32 -> u16) x {NUM_ALLOCS}"), 314 | |b| { 315 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS); 316 | b.iter(|| { 317 | for _ in 0..NUM_ALLOCS { 318 | unsafe { 319 | let ptr = black_box(&alloc).allocate(Layout::new::()).unwrap(); 320 | let ptr = black_box(&alloc) 321 | .shrink(ptr.cast(), Layout::new::(), Layout::new::()) 322 | .unwrap(); 323 | black_box(ptr); 324 | } 325 | } 326 | alloc.reset(); 327 | }) 328 | }, 329 | ); 330 | 331 | group.bench_function( 332 | format!("shrink larger align ([u16; 4] -> u32) x {NUM_ALLOCS}"), 333 | |b| { 334 | let mut alloc = A::with_capacity(mem::size_of::<[u16; 4]>() * NUM_ALLOCS); 335 | b.iter(|| { 336 | for _ in 0..NUM_ALLOCS { 337 | unsafe { 338 | let ptr = black_box(&alloc) 339 | .allocate(Layout::new::<[u16; 4]>()) 340 | .unwrap(); 341 | let ptr = black_box(&alloc) 342 | .shrink(ptr.cast(), Layout::new::<[u16; 4]>(), Layout::new::()) 343 | .unwrap(); 344 | black_box(ptr); 345 | } 346 | } 347 | alloc.reset(); 348 | }) 349 | }, 350 | ); 351 | 352 | group.finish(); 353 | } 354 | 355 | fn bench_warm_up(name: &str, c: &mut Criterion) 356 | where 357 | for<'a> &'a A: Allocator, 358 | A: BumpAllocator + Default, 359 | { 360 | let mut group = c.benchmark_group(format!("warm-up/{name}")); 361 | 362 | group.bench_function(format!("first u32 allocation"), |b| { 363 | b.iter(|| { 364 | let alloc = A::default(); 365 | let ptr = black_box(&alloc).allocate(Layout::new::()).unwrap(); 366 | black_box(ptr); 367 | }) 368 | }); 369 | 370 | group.finish(); 371 | } 372 | 373 | fn bench_reset(name: &str, c: &mut Criterion) 374 | where 375 | for<'a> &'a A: Allocator, 376 | A: BumpAllocator + Default, 377 | { 378 | let mut group = c.benchmark_group(format!("reset/{name}")); 379 | 380 | group.bench_function(format!("reset after allocate(u32) x {NUM_ALLOCS}"), |b| { 381 | b.iter_custom(move |iters| { 382 | let mut duration = Duration::from_millis(0); 383 | 384 | for _ in 0..iters { 385 | // NB: do not use `with_capacity` here, we want to measure 386 | // resetting with multiple internal bump chunks. 387 | let mut alloc = A::default(); 388 | 389 | for _ in 0..NUM_ALLOCS { 390 | black_box((&alloc).allocate(Layout::new::()).unwrap()); 391 | } 392 | 393 | let start = Instant::now(); 394 | 395 | alloc.reset(); 396 | black_box(&alloc); 397 | 398 | duration += start.elapsed(); 399 | } 400 | 401 | duration 402 | }); 403 | }); 404 | 405 | group.finish(); 406 | } 407 | 408 | fn bench_vec(name: &str, c: &mut Criterion) 409 | where 410 | for<'a> &'a A: Allocator, 411 | A: BumpAllocator + Default, 412 | { 413 | let mut group = c.benchmark_group(format!("vec/{name}")); 414 | 415 | // Additional room because the vectors are going to potentially resize 416 | // multiple times. 417 | const RESIZE_FACTOR: usize = 10; 418 | 419 | group.bench_function(format!("push(usize) x {NUM_ALLOCS}"), |b| { 420 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS * RESIZE_FACTOR); 421 | b.iter(|| { 422 | let mut vec = Vec::new_in(&alloc); 423 | for i in 0..NUM_ALLOCS { 424 | vec.push(i); 425 | } 426 | drop(vec); 427 | alloc.reset(); 428 | }) 429 | }); 430 | 431 | group.bench_function(format!("reserve_exact(1) x {NUM_ALLOCS}"), |b| { 432 | let mut alloc = A::with_capacity(mem::size_of::() * NUM_ALLOCS * RESIZE_FACTOR); 433 | b.iter(|| { 434 | let mut vec = Vec::::new_in(&alloc); 435 | for i in 0..NUM_ALLOCS { 436 | vec.reserve_exact(i); 437 | } 438 | drop(vec); 439 | alloc.reset(); 440 | }) 441 | }); 442 | 443 | group.finish(); 444 | } 445 | 446 | pub fn criterion_benchmark(c: &mut Criterion) { 447 | bench_allocator_api::("bumpalo::Bump", c); 448 | bench_allocator_api::("blink_alloc::BlinkAlloc", c); 449 | bench_allocator_api::("std::alloc::System", c); 450 | 451 | bench_warm_up::("bumpalo::Bump", c); 452 | bench_warm_up::("blink_alloc::BlinkAlloc", c); 453 | bench_warm_up::("std::alloc::System", c); 454 | 455 | bench_reset::("bumpalo::Bump", c); 456 | bench_reset::("blink_alloc::BlinkAlloc", c); 457 | bench_reset::("std::alloc::System", c); 458 | 459 | bench_vec::("bumpalo::Bump", c); 460 | bench_vec::("blink_alloc::BlinkAlloc", c); 461 | bench_vec::("std::alloc::System", c); 462 | } 463 | 464 | criterion_group!(benches, criterion_benchmark); 465 | criterion_main!(benches); 466 | -------------------------------------------------------------------------------- /benches/benches.rs: -------------------------------------------------------------------------------- 1 | use criterion::*; 2 | 3 | #[derive(Default)] 4 | struct Small(u8); 5 | 6 | #[derive(Default)] 7 | struct Big([usize; 32]); 8 | 9 | fn alloc(n: usize) { 10 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::()); 11 | for _ in 0..n { 12 | let arena = black_box(&arena); 13 | let val: &mut T = arena.alloc(black_box(Default::default())); 14 | black_box(val); 15 | } 16 | } 17 | 18 | fn alloc_with(n: usize) { 19 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::()); 20 | for _ in 0..n { 21 | let arena = black_box(&arena); 22 | let val: &mut T = arena.alloc_with(|| black_box(Default::default())); 23 | black_box(val); 24 | } 25 | } 26 | 27 | fn alloc_try_with(n: usize) { 28 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::>()); 29 | for _ in 0..n { 30 | let arena = black_box(&arena); 31 | let val: Result<&mut T, E> = arena.alloc_try_with(|| black_box(Ok(Default::default()))); 32 | let _ = black_box(val); 33 | } 34 | } 35 | 36 | fn alloc_try_with_err(n: usize) { 37 | // Only enough capacity for one, since the allocation is undone. 38 | let arena = bumpalo::Bump::with_capacity(std::mem::size_of::>()); 39 | for _ in 0..n { 40 | let arena = black_box(&arena); 41 | let val: Result<&mut T, E> = arena.alloc_try_with(|| black_box(Err(Default::default()))); 42 | let _ = black_box(val); 43 | } 44 | } 45 | 46 | fn try_alloc(n: usize) { 47 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::()); 48 | for _ in 0..n { 49 | let arena = black_box(&arena); 50 | let val: Result<&mut T, _> = arena.try_alloc(black_box(Default::default())); 51 | let _ = black_box(val); 52 | } 53 | } 54 | 55 | fn try_alloc_with(n: usize) { 56 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::()); 57 | for _ in 0..n { 58 | let arena = black_box(&arena); 59 | let val: Result<&mut T, _> = arena.try_alloc_with(|| black_box(Default::default())); 60 | let _ = black_box(val); 61 | } 62 | } 63 | 64 | fn try_alloc_try_with(n: usize) { 65 | let arena = bumpalo::Bump::with_capacity(n * std::mem::size_of::>()); 66 | for _ in 0..n { 67 | let arena = black_box(&arena); 68 | let val: Result<&mut T, bumpalo::AllocOrInitError> = 69 | arena.try_alloc_try_with(|| black_box(Ok(Default::default()))); 70 | let _ = black_box(val); 71 | } 72 | } 73 | 74 | fn try_alloc_try_with_err(n: usize) { 75 | // Only enough capacity for one, since the allocation is undone. 76 | let arena = bumpalo::Bump::with_capacity(std::mem::size_of::>()); 77 | for _ in 0..n { 78 | let arena = black_box(&arena); 79 | let val: Result<&mut T, bumpalo::AllocOrInitError> = 80 | arena.try_alloc_try_with(|| black_box(Err(Default::default()))); 81 | let _ = black_box(val); 82 | } 83 | } 84 | 85 | #[cfg(feature = "collections")] 86 | fn format_realloc(bump: &bumpalo::Bump, n: usize) { 87 | let n = criterion::black_box(n); 88 | let s = bumpalo::format!(in bump, "Hello {:.*}", n, "World! "); 89 | criterion::black_box(s); 90 | } 91 | 92 | #[cfg(feature = "collections")] 93 | fn string_from_str_in(bump: &bumpalo::Bump, str: &str) { 94 | let str = criterion::black_box(str); 95 | let s = bumpalo::collections::string::String::from_str_in(str, bump); 96 | criterion::black_box(s); 97 | } 98 | 99 | #[cfg(feature = "collections")] 100 | fn string_push_str(bump: &bumpalo::Bump, str: &str) { 101 | let str = criterion::black_box(str); 102 | let mut s = bumpalo::collections::string::String::with_capacity_in(str.len(), bump); 103 | s.push_str(str); 104 | criterion::black_box(s); 105 | } 106 | 107 | #[cfg(feature = "collections")] 108 | fn extend_u8(bump: &bumpalo::Bump, slice: &[u8]) { 109 | let slice = criterion::black_box(slice); 110 | let mut vec = bumpalo::collections::Vec::::with_capacity_in(slice.len(), bump); 111 | vec.extend(slice.iter().copied()); 112 | criterion::black_box(vec); 113 | } 114 | 115 | #[cfg(feature = "collections")] 116 | fn extend_from_slice_u8(bump: &bumpalo::Bump, slice: &[u8]) { 117 | let slice = criterion::black_box(slice); 118 | let mut vec = bumpalo::collections::Vec::::with_capacity_in(slice.len(), bump); 119 | vec.extend_from_slice(slice); 120 | criterion::black_box(vec); 121 | } 122 | 123 | #[cfg(feature = "collections")] 124 | fn extend_from_slice_copy_u8(bump: &bumpalo::Bump, slice: &[u8]) { 125 | let slice = criterion::black_box(slice); 126 | let mut vec = bumpalo::collections::Vec::::with_capacity_in(slice.len(), bump); 127 | vec.extend_from_slice_copy(slice); 128 | criterion::black_box(vec); 129 | } 130 | 131 | const ALLOCATIONS: usize = 10_000; 132 | 133 | fn bench_extend_from_slice_copy(c: &mut Criterion) { 134 | let lengths = &[ 135 | 4usize, 136 | 5, 137 | 8, 138 | 11, 139 | 16, 140 | 64, 141 | 128, 142 | 331, 143 | 1024, 144 | 4 * 1024, 145 | 16 * 1024, 146 | ]; 147 | 148 | for len in lengths.iter().copied() { 149 | let str = "x".repeat(len); 150 | let mut group = c.benchmark_group(format!("extend {len} bytes")); 151 | group.throughput(Throughput::Elements(len as u64)); 152 | group.bench_function("extend", |b| { 153 | let mut bump = bumpalo::Bump::with_capacity(len); 154 | b.iter(|| { 155 | bump.reset(); 156 | extend_u8(&bump, str.as_bytes()); 157 | }); 158 | }); 159 | group.bench_function("extend_from_slice", |b| { 160 | let mut bump = bumpalo::Bump::with_capacity(len); 161 | let str = "x".repeat(len); 162 | b.iter(|| { 163 | bump.reset(); 164 | extend_from_slice_u8(&bump, str.as_bytes()); 165 | }); 166 | }); 167 | group.bench_function("extend_from_slice_copy", |b| { 168 | let mut bump = bumpalo::Bump::with_capacity(len); 169 | let str = "x".repeat(len); 170 | b.iter(|| { 171 | bump.reset(); 172 | extend_from_slice_copy_u8(&bump, str.as_bytes()); 173 | }); 174 | }); 175 | group.finish(); 176 | } 177 | } 178 | 179 | fn bench_extend_from_slices_copy(c: &mut Criterion) { 180 | // The number of slices that will be copied into the Vec 181 | let slice_counts = &[1, 2, 4, 8, 16, 32]; 182 | 183 | // Whether the Bump and its Vec have will already enough space to store the data without 184 | // requiring reallocation 185 | let is_preallocated_settings = &[false, true]; 186 | 187 | // Slices that can be used to extend the Vec; each may be used more than once. 188 | let data: [&[u8]; 4] = [ 189 | black_box(b"wwwwwwwwwwwwwwww"), 190 | black_box(b"xxxxxxxxxxxxxxxx"), 191 | black_box(b"yyyyyyyyyyyyyyyy"), 192 | black_box(b"zzzzzzzzzzzzzzzz"), 193 | ]; 194 | 195 | // For each (`is_preallocated`, `num_slices`) pair... 196 | for is_preallocated in is_preallocated_settings { 197 | for num_slices in slice_counts.iter().copied() { 198 | // Create an appropriately named benchmark group 199 | let mut group = c.benchmark_group(format!( 200 | "extend_from_slices num_slices={num_slices}, is_preallocated={is_preallocated}" 201 | )); 202 | 203 | // Cycle over `data` to construct a slice of slices to append 204 | let slices = data 205 | .iter() 206 | .copied() 207 | .cycle() 208 | .take(num_slices) 209 | .collect::>(); 210 | let total_size = slices.iter().map(|s| s.len()).sum(); 211 | 212 | // If `is_preallocated` is true, both the Bump and the benchmark Vecs will have enough 213 | // capacity to store the concatenated data. If it's false, the Bump and the Vec start 214 | // out with no capacity allocated and grow on demand. 215 | let size_to_allocate = match is_preallocated { 216 | true => total_size, 217 | false => 0, 218 | }; 219 | let mut bump = bumpalo::Bump::with_capacity(size_to_allocate); 220 | 221 | // This benchmark demonstrates the performance of looping over the slice-of-slices, 222 | // calling `extend_from_slice_copy` (and transitively, `reserve`) for each slice. 223 | group.bench_function("loop over extend_from_slice_copy", |b| { 224 | b.iter(|| { 225 | bump.reset(); 226 | let mut vec = 227 | bumpalo::collections::Vec::::with_capacity_in(size_to_allocate, &bump); 228 | for slice in black_box(&slices) { 229 | vec.extend_from_slice_copy(slice); 230 | } 231 | black_box(vec.as_slice()); 232 | }); 233 | }); 234 | 235 | // This benchmark demonstrates the performance of using a single call to 236 | // `extend_from_slices_copy`, which performs a single `reserve` before appending 237 | // all of the slices. 238 | group.bench_function("extend_from_slices_copy", |b| { 239 | b.iter(|| { 240 | bump.reset(); 241 | let mut vec = 242 | bumpalo::collections::Vec::::with_capacity_in(size_to_allocate, &bump); 243 | vec.extend_from_slices_copy(black_box(slices.as_slice())); 244 | black_box(vec.as_slice()); 245 | }); 246 | }); 247 | 248 | group.finish(); 249 | } 250 | } 251 | } 252 | 253 | fn bench_alloc(c: &mut Criterion) { 254 | let mut group = c.benchmark_group("alloc"); 255 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 256 | group.bench_function("small", |b| b.iter(|| alloc::(ALLOCATIONS))); 257 | group.bench_function("big", |b| b.iter(|| alloc::(ALLOCATIONS))); 258 | } 259 | 260 | fn bench_alloc_with(c: &mut Criterion) { 261 | let mut group = c.benchmark_group("alloc-with"); 262 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 263 | group.bench_function("small", |b| b.iter(|| alloc_with::(ALLOCATIONS))); 264 | group.bench_function("big", |b| b.iter(|| alloc_with::(ALLOCATIONS))); 265 | } 266 | 267 | fn bench_alloc_try_with(c: &mut Criterion) { 268 | let mut group = c.benchmark_group("alloc-try-with"); 269 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 270 | group.bench_function("small, small", |b| { 271 | b.iter(|| alloc_try_with::(ALLOCATIONS)) 272 | }); 273 | group.bench_function("small, big", |b| { 274 | b.iter(|| alloc_try_with::(ALLOCATIONS)) 275 | }); 276 | group.bench_function("big, small", |b| { 277 | b.iter(|| alloc_try_with::(ALLOCATIONS)) 278 | }); 279 | group.bench_function("big, big", |b| { 280 | b.iter(|| alloc_try_with::(ALLOCATIONS)) 281 | }); 282 | } 283 | 284 | fn bench_alloc_try_with_err(c: &mut Criterion) { 285 | let mut group = c.benchmark_group("alloc-try-with-err"); 286 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 287 | group.bench_function("small, small", |b| { 288 | b.iter(|| alloc_try_with_err::(ALLOCATIONS)) 289 | }); 290 | group.bench_function("small, big", |b| { 291 | b.iter(|| alloc_try_with_err::(ALLOCATIONS)) 292 | }); 293 | group.bench_function("big, small", |b| { 294 | b.iter(|| alloc_try_with_err::(ALLOCATIONS)) 295 | }); 296 | group.bench_function("big, big", |b| { 297 | b.iter(|| alloc_try_with_err::(ALLOCATIONS)) 298 | }); 299 | } 300 | 301 | fn bench_try_alloc(c: &mut Criterion) { 302 | let mut group = c.benchmark_group("try-alloc"); 303 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 304 | group.bench_function("small", |b| b.iter(|| try_alloc::(ALLOCATIONS))); 305 | group.bench_function("big", |b| b.iter(|| try_alloc::(ALLOCATIONS))); 306 | } 307 | 308 | fn bench_try_alloc_with(c: &mut Criterion) { 309 | let mut group = c.benchmark_group("try-alloc-with"); 310 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 311 | group.bench_function("small", |b| b.iter(|| try_alloc_with::(ALLOCATIONS))); 312 | group.bench_function("big", |b| b.iter(|| try_alloc_with::(ALLOCATIONS))); 313 | } 314 | 315 | fn bench_try_alloc_try_with(c: &mut Criterion) { 316 | let mut group = c.benchmark_group("try-alloc-try-with"); 317 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 318 | group.bench_function("small, small", |b| { 319 | b.iter(|| try_alloc_try_with::(ALLOCATIONS)) 320 | }); 321 | group.bench_function("small, big", |b| { 322 | b.iter(|| try_alloc_try_with::(ALLOCATIONS)) 323 | }); 324 | group.bench_function("big, small", |b| { 325 | b.iter(|| try_alloc_try_with::(ALLOCATIONS)) 326 | }); 327 | group.bench_function("big, big", |b| { 328 | b.iter(|| try_alloc_try_with::(ALLOCATIONS)) 329 | }); 330 | } 331 | 332 | fn bench_try_alloc_try_with_err(c: &mut Criterion) { 333 | let mut group = c.benchmark_group("try-alloc-try-with-err"); 334 | group.throughput(Throughput::Elements(ALLOCATIONS as u64)); 335 | group.bench_function("small, small", |b| { 336 | b.iter(|| try_alloc_try_with_err::(ALLOCATIONS)) 337 | }); 338 | group.bench_function("small, big", |b| { 339 | b.iter(|| try_alloc_try_with_err::(ALLOCATIONS)) 340 | }); 341 | group.bench_function("big, small", |b| { 342 | b.iter(|| try_alloc_try_with_err::(ALLOCATIONS)) 343 | }); 344 | group.bench_function("big, big", |b| { 345 | b.iter(|| try_alloc_try_with_err::(ALLOCATIONS)) 346 | }); 347 | } 348 | 349 | fn bench_format_realloc(c: &mut Criterion) { 350 | let mut group = c.benchmark_group("format-realloc"); 351 | 352 | for n in (1..5).map(|n| n * n * n * 10) { 353 | group.throughput(Throughput::Elements(n as u64)); 354 | group.bench_with_input(BenchmarkId::new("format-realloc", n), &n, |b, n| { 355 | let mut bump = bumpalo::Bump::new(); 356 | b.iter(|| { 357 | bump.reset(); 358 | format_realloc(&bump, *n); 359 | }); 360 | }); 361 | } 362 | } 363 | 364 | fn bench_string_from_str_in(c: &mut Criterion) { 365 | let len: usize = 16; 366 | 367 | let mut group = c.benchmark_group("alloc"); 368 | group.throughput(Throughput::Elements(len as u64)); 369 | group.bench_function("from_str_in", |b| { 370 | let mut bump = bumpalo::Bump::with_capacity(len); 371 | let str = "x".repeat(len); 372 | b.iter(|| { 373 | bump.reset(); 374 | string_from_str_in(&bump, &*str); 375 | }); 376 | }); 377 | } 378 | 379 | fn bench_string_push_str(c: &mut Criterion) { 380 | let len: usize = 16 * 1024; // 16 KiB 381 | 382 | let mut group = c.benchmark_group("alloc"); 383 | group.throughput(Throughput::Elements(len as u64)); 384 | group.bench_function("push_str", |b| { 385 | let mut bump = bumpalo::Bump::with_capacity(len); 386 | let str = "x".repeat(len); 387 | b.iter(|| { 388 | bump.reset(); 389 | string_push_str(&bump, &*str); 390 | }); 391 | }); 392 | } 393 | 394 | criterion_group!( 395 | benches, 396 | bench_extend_from_slice_copy, 397 | bench_extend_from_slices_copy, 398 | bench_alloc, 399 | bench_alloc_with, 400 | bench_alloc_try_with, 401 | bench_alloc_try_with_err, 402 | bench_try_alloc, 403 | bench_try_alloc_with, 404 | bench_try_alloc_try_with, 405 | bench_try_alloc_try_with_err, 406 | bench_format_realloc, 407 | bench_string_from_str_in, 408 | bench_string_push_str 409 | ); 410 | criterion_main!(benches); 411 | -------------------------------------------------------------------------------- /benches/tables.toml: -------------------------------------------------------------------------------- 1 | [top_comments] 2 | 3 | Overview = """ 4 | This directory contains two suites of benchmarks: 5 | 6 | 1. `allocator_api.rs`: `std::alloc::Allocator`-based benchmarks that aim to 7 | measure the performance of bump allocators within the generic `Allocator` 8 | API. 9 | 10 | 2. `benches.rs`: Miscellaneous Bumpalo-specific benchmarks. 11 | 12 | The tables of benchmark results listed below are the results for the suite of 13 | `std::alloc::Allocator`-based benchmarks. They are originally adapted from 14 | [`blink-alloc`] (another fine bump allocator crate) which was already measuring 15 | the relative performance between `blink-alloc` and `bumpalo`. I wasn't able to 16 | reproduce many of their results showing , which was part of the motivation to bring a 17 | subset of them into this repo and document reproduction. 18 | 19 | Furthermore, the tables below include a `std::alloc::System` column, but their 20 | results come with a few caveats. First, in order to implement a `reset` method 21 | for the system allocator and deallocate everything that was allocated within a 22 | certain region of code, I had to add additional bookkeeping to dynamically track 23 | every live allocation. That bookkeeping generally won't be present in real 24 | programs, which will instead use things like `Drop` implementations, so it makes 25 | the system allocator's results look worse than they otherwise would 26 | be. Additionally, these benchmarks are really designed to show off the strengths 27 | of bump allocators and measure the operations that are important for bump 28 | allocators. The system allocator is expected to perform worse, but that's 29 | because it is designed for general purpose scenarios, where as bump allocators 30 | are designed for very specific scenarios. These columns should mostly serve as 31 | just a general reference point to get an idea of the magnitude of allocation 32 | speed up you can expect in the very specific scenarios where using a bump 33 | allocator makes sense. 34 | 35 | Finally, all these benchmarks are synthetic. They are micro benchmarks. You 36 | shouldn't expect that anything here will directly translate into speed ups for 37 | your application. Application performance is what really matters, and things 38 | observed in the micro often disappear in the macro. If your application isn't 39 | bottlenecked on allocation, and can't live with the constraints a bump allocator 40 | imposes, there's nothing that a bump allocator can do to help you. 41 | 42 | [`blink-alloc`]: https://github.com/zakarumych/blink-alloc/blob/845b2db273371260eef2e9858386f6c6aa180e98/BENCHMARKS.md 43 | """ 44 | 45 | Reproducing = """ 46 | The `std::alloc::Allocator`-based benchmarks require using nightly Rust, since 47 | the `Allocator` trait is still unstable. You must additionally enable Bumpalo's 48 | `allocator_api` cargo feature: 49 | 50 | ``` 51 | $ cargo +nightly bench --bench allocator_api --features allocator_api 52 | ``` 53 | 54 | The miscellaneous benchmarks require Bumpalo's `collections` cargo feature: 55 | 56 | ``` 57 | $ cargo bench --bench benches --features collections 58 | ``` 59 | 60 | To update the tables below, use `cargo-criterion` and [`criterion-table`]: 61 | 62 | ``` 63 | $ cd bumpalo/benches/ 64 | $ cargo +nightly bench --features allocator_api \\ 65 | --bench allocator_api \\ 66 | --message-format=json \\ 67 | > results.json 68 | $ criterion-table < results.json > README.md 69 | ``` 70 | 71 | [`cargo-criterion`]: https://github.com/bheisler/cargo-criterion 72 | [`criterion-table`]: https://github.com/nu11ptr/criterion-table 73 | """ 74 | 75 | [table_comments] 76 | 77 | allocator-api = """ 78 | Benchmarks that measure calls into `std::alloc::Allocator` methods directly. 79 | 80 | These operations are generally the ones that happen most often, and therefore 81 | their performance is generally most important. Following the same logic, raw 82 | allocation is generally the very most important. 83 | """ 84 | 85 | warm-up = """ 86 | Benchmarks that measure the first allocation in a fresh allocator. 87 | 88 | These aren't generally very important, since the first allocation in a fresh 89 | bump allocator only ever happens once by definition. This is mostly measuring 90 | how long it takes the underlying system allocator to allocate the initial chunk 91 | to bump allocate out of. 92 | """ 93 | 94 | reset = """ 95 | Benchmarks that measure the overhead of resetting a bump allocator to an empty 96 | state, ready to be reused in a new program phase. 97 | 98 | This generally doesn't happen as often as allocation, and therefore is generally 99 | less important, but it is important to keep an eye on generally since 100 | deallocation-en-masse and reusing already-allocated chunks can be selling points 101 | for bump allocation over using a generic allocator in certain scenarios. 102 | """ 103 | 104 | vec = """ 105 | Benchmarks that measure the various `std::vec::Vec operations when used in 106 | conjuction with a bump allocator. 107 | 108 | Bump allocators aren't often used directly, but instead through some sort of 109 | collection. These benchmarks are important in the sense that the standard 110 | `Vec` type is probably the most-commonly used collection (although not 111 | necessarily the most commonly used with bump allocators in Rust, at least until 112 | the `Allocator` trait is stabilized). 113 | """ 114 | -------------------------------------------------------------------------------- /bumpalo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fitzgen/bumpalo/dd8a0e1aacc184273c36605ea6273504886cbec0/bumpalo.png -------------------------------------------------------------------------------- /src/boxed.rs: -------------------------------------------------------------------------------- 1 | //! A pointer type for bump allocation. 2 | //! 3 | //! [`Box<'a, T>`] provides the simplest form of 4 | //! bump allocation in `bumpalo`. Boxes provide ownership for this allocation, and 5 | //! drop their contents when they go out of scope. 6 | //! 7 | //! # Examples 8 | //! 9 | //! Move a value from the stack to the heap by creating a [`Box`]: 10 | //! 11 | //! ``` 12 | //! use bumpalo::{Bump, boxed::Box}; 13 | //! 14 | //! let b = Bump::new(); 15 | //! 16 | //! let val: u8 = 5; 17 | //! let boxed: Box = Box::new_in(val, &b); 18 | //! ``` 19 | //! 20 | //! Move a value from a [`Box`] back to the stack by [dereferencing]: 21 | //! 22 | //! ``` 23 | //! use bumpalo::{Bump, boxed::Box}; 24 | //! 25 | //! let b = Bump::new(); 26 | //! 27 | //! let boxed: Box = Box::new_in(5, &b); 28 | //! let val: u8 = *boxed; 29 | //! ``` 30 | //! 31 | //! Running [`Drop`] implementations on bump-allocated values: 32 | //! 33 | //! ``` 34 | //! use bumpalo::{Bump, boxed::Box}; 35 | //! use std::sync::atomic::{AtomicUsize, Ordering}; 36 | //! 37 | //! static NUM_DROPPED: AtomicUsize = AtomicUsize::new(0); 38 | //! 39 | //! struct CountDrops; 40 | //! 41 | //! impl Drop for CountDrops { 42 | //! fn drop(&mut self) { 43 | //! NUM_DROPPED.fetch_add(1, Ordering::SeqCst); 44 | //! } 45 | //! } 46 | //! 47 | //! // Create a new bump arena. 48 | //! let bump = Bump::new(); 49 | //! 50 | //! // Create a `CountDrops` inside the bump arena. 51 | //! let mut c = Box::new_in(CountDrops, &bump); 52 | //! 53 | //! // No `CountDrops` have been dropped yet. 54 | //! assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 0); 55 | //! 56 | //! // Drop our `Box`. 57 | //! drop(c); 58 | //! 59 | //! // Its `Drop` implementation was run, and so `NUM_DROPS` has been incremented. 60 | //! assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 1); 61 | //! ``` 62 | //! 63 | //! Creating a recursive data structure: 64 | //! 65 | //! ``` 66 | //! use bumpalo::{Bump, boxed::Box}; 67 | //! 68 | //! let b = Bump::new(); 69 | //! 70 | //! #[derive(Debug)] 71 | //! enum List<'a, T> { 72 | //! Cons(T, Box<'a, List<'a, T>>), 73 | //! Nil, 74 | //! } 75 | //! 76 | //! let list: List = List::Cons(1, Box::new_in(List::Cons(2, Box::new_in(List::Nil, &b)), &b)); 77 | //! println!("{:?}", list); 78 | //! ``` 79 | //! 80 | //! This will print `Cons(1, Cons(2, Nil))`. 81 | //! 82 | //! Recursive structures must be boxed, because if the definition of `Cons` 83 | //! looked like this: 84 | //! 85 | //! ```compile_fail,E0072 86 | //! # enum List { 87 | //! Cons(T, List), 88 | //! # } 89 | //! ``` 90 | //! 91 | //! It wouldn't work. This is because the size of a `List` depends on how many 92 | //! elements are in the list, and so we don't know how much memory to allocate 93 | //! for a `Cons`. By introducing a [`Box<'a, T>`], which has a defined size, we know how 94 | //! big `Cons` needs to be. 95 | //! 96 | //! # Memory layout 97 | //! 98 | //! For non-zero-sized values, a [`Box`] will use the provided [`Bump`] allocator for 99 | //! its allocation. It is valid to convert both ways between a [`Box`] and a 100 | //! pointer allocated with the [`Bump`] allocator, given that the 101 | //! [`Layout`] used with the allocator is correct for the type. More precisely, 102 | //! a `value: *mut T` that has been allocated with the [`Bump`] allocator 103 | //! with `Layout::for_value(&*value)` may be converted into a box using 104 | //! [`Box::::from_raw(value)`]. Conversely, the memory backing a `value: *mut 105 | //! T` obtained from [`Box::::into_raw`] will be deallocated by the 106 | //! [`Bump`] allocator with [`Layout::for_value(&*value)`]. 107 | //! 108 | //! Note that roundtrip `Box::from_raw(Box::into_raw(b))` looses the lifetime bound to the 109 | //! [`Bump`] immutable borrow which guarantees that the allocator will not be reset 110 | //! and memory will not be freed. 111 | //! 112 | //! [dereferencing]: https://doc.rust-lang.org/std/ops/trait.Deref.html 113 | //! [`Box`]: struct.Box.html 114 | //! [`Box<'a, T>`]: struct.Box.html 115 | //! [`Box::::from_raw(value)`]: struct.Box.html#method.from_raw 116 | //! [`Box::::into_raw`]: struct.Box.html#method.into_raw 117 | //! [`Bump`]: ../struct.Bump.html 118 | //! [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html 119 | //! [`Layout`]: https://doc.rust-lang.org/std/alloc/struct.Layout.html 120 | //! [`Layout::for_value(&*value)`]: https://doc.rust-lang.org/std/alloc/struct.Layout.html#method.for_value 121 | 122 | use { 123 | crate::Bump, 124 | { 125 | core::{ 126 | any::Any, 127 | borrow, 128 | cmp::Ordering, 129 | convert::TryFrom, 130 | future::Future, 131 | hash::{Hash, Hasher}, 132 | iter::FusedIterator, 133 | mem::ManuallyDrop, 134 | ops::{Deref, DerefMut}, 135 | pin::Pin, 136 | task::{Context, Poll}, 137 | }, 138 | core_alloc::fmt, 139 | }, 140 | }; 141 | 142 | /// An owned pointer to a bump-allocated `T` value, that runs `Drop` 143 | /// implementations. 144 | /// 145 | /// See the [module-level documentation][crate::boxed] for more details. 146 | #[repr(transparent)] 147 | pub struct Box<'a, T: ?Sized>(&'a mut T); 148 | 149 | impl<'a, T> Box<'a, T> { 150 | /// Allocates memory on the heap and then places `x` into it. 151 | /// 152 | /// This doesn't actually allocate if `T` is zero-sized. 153 | /// 154 | /// # Examples 155 | /// 156 | /// ``` 157 | /// use bumpalo::{Bump, boxed::Box}; 158 | /// 159 | /// let b = Bump::new(); 160 | /// 161 | /// let five = Box::new_in(5, &b); 162 | /// ``` 163 | #[inline(always)] 164 | pub fn new_in(x: T, a: &'a Bump) -> Box<'a, T> { 165 | Box(a.alloc(x)) 166 | } 167 | 168 | /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then 169 | /// `x` will be pinned in memory and unable to be moved. 170 | #[inline(always)] 171 | pub fn pin_in(x: T, a: &'a Bump) -> Pin> { 172 | Box(a.alloc(x)).into() 173 | } 174 | 175 | /// Consumes the `Box`, returning the wrapped value. 176 | /// 177 | /// # Examples 178 | /// 179 | /// ``` 180 | /// use bumpalo::{Bump, boxed::Box}; 181 | /// 182 | /// let b = Bump::new(); 183 | /// 184 | /// let hello = Box::new_in("hello".to_owned(), &b); 185 | /// assert_eq!(Box::into_inner(hello), "hello"); 186 | /// ``` 187 | pub fn into_inner(b: Box<'a, T>) -> T { 188 | // `Box::into_raw` returns a pointer that is properly aligned and non-null. 189 | // The underlying `Bump` only frees the memory, but won't call the destructor. 190 | unsafe { core::ptr::read(Box::into_raw(b)) } 191 | } 192 | } 193 | 194 | impl<'a, T: ?Sized> Box<'a, T> { 195 | /// Constructs a box from a raw pointer. 196 | /// 197 | /// After calling this function, the raw pointer is owned by the 198 | /// resulting `Box`. Specifically, the `Box` destructor will call 199 | /// the destructor of `T` and free the allocated memory. For this 200 | /// to be safe, the memory must have been allocated in accordance 201 | /// with the memory layout used by `Box` . 202 | /// 203 | /// # Safety 204 | /// 205 | /// This function is unsafe because improper use may lead to 206 | /// memory problems. For example, a double-free may occur if the 207 | /// function is called twice on the same raw pointer. 208 | /// 209 | /// # Examples 210 | /// 211 | /// Recreate a `Box` which was previously converted to a raw pointer 212 | /// using [`Box::into_raw`]: 213 | /// ``` 214 | /// use bumpalo::{Bump, boxed::Box}; 215 | /// 216 | /// let b = Bump::new(); 217 | /// 218 | /// let x = Box::new_in(5, &b); 219 | /// let ptr = Box::into_raw(x); 220 | /// let x = unsafe { Box::from_raw(ptr) }; // Note that new `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset. 221 | /// ``` 222 | /// Manually create a `Box` from scratch by using the bump allocator: 223 | /// ``` 224 | /// use std::alloc::{alloc, Layout}; 225 | /// use bumpalo::{Bump, boxed::Box}; 226 | /// 227 | /// let b = Bump::new(); 228 | /// 229 | /// unsafe { 230 | /// let ptr = b.alloc_layout(Layout::new::()).as_ptr() as *mut i32; 231 | /// *ptr = 5; 232 | /// let x = Box::from_raw(ptr); // Note that `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset. 233 | /// } 234 | /// ``` 235 | #[inline] 236 | pub unsafe fn from_raw(raw: *mut T) -> Self { 237 | Box(&mut *raw) 238 | } 239 | 240 | /// Consumes the `Box`, returning a wrapped raw pointer. 241 | /// 242 | /// The pointer will be properly aligned and non-null. 243 | /// 244 | /// After calling this function, the caller is responsible for the 245 | /// value previously managed by the `Box`. In particular, the 246 | /// caller should properly destroy `T`. The easiest way to 247 | /// do this is to convert the raw pointer back into a `Box` with the 248 | /// [`Box::from_raw`] function, allowing the `Box` destructor to perform 249 | /// the cleanup. 250 | /// 251 | /// Note: this is an associated function, which means that you have 252 | /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This 253 | /// is so that there is no conflict with a method on the inner type. 254 | /// 255 | /// # Examples 256 | /// 257 | /// Converting the raw pointer back into a `Box` with [`Box::from_raw`] 258 | /// for automatic cleanup: 259 | /// ``` 260 | /// use bumpalo::{Bump, boxed::Box}; 261 | /// 262 | /// let b = Bump::new(); 263 | /// 264 | /// let x = Box::new_in(String::from("Hello"), &b); 265 | /// let ptr = Box::into_raw(x); 266 | /// let x = unsafe { Box::from_raw(ptr) }; // Note that new `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset. 267 | /// ``` 268 | /// Manual cleanup by explicitly running the destructor: 269 | /// ``` 270 | /// use std::ptr; 271 | /// use bumpalo::{Bump, boxed::Box}; 272 | /// 273 | /// let b = Bump::new(); 274 | /// 275 | /// let mut x = Box::new_in(String::from("Hello"), &b); 276 | /// let p = Box::into_raw(x); 277 | /// unsafe { 278 | /// ptr::drop_in_place(p); 279 | /// } 280 | /// ``` 281 | #[inline] 282 | pub fn into_raw(b: Box<'a, T>) -> *mut T { 283 | let mut b = ManuallyDrop::new(b); 284 | b.deref_mut().0 as *mut T 285 | } 286 | 287 | /// Consumes and leaks the `Box`, returning a mutable reference, 288 | /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime 289 | /// `'a`. If the type has only static references, or none at all, then this 290 | /// may be chosen to be `'static`. 291 | /// 292 | /// This function is mainly useful for data that lives for the remainder of 293 | /// the program's life. Dropping the returned reference will cause a memory 294 | /// leak. If this is not acceptable, the reference should first be wrapped 295 | /// with the [`Box::from_raw`] function producing a `Box`. This `Box` can 296 | /// then be dropped which will properly destroy `T` and release the 297 | /// allocated memory. 298 | /// 299 | /// Note: this is an associated function, which means that you have 300 | /// to call it as `Box::leak(b)` instead of `b.leak()`. This 301 | /// is so that there is no conflict with a method on the inner type. 302 | /// 303 | /// # Examples 304 | /// 305 | /// Simple usage: 306 | /// 307 | /// ``` 308 | /// use bumpalo::{Bump, boxed::Box}; 309 | /// 310 | /// let b = Bump::new(); 311 | /// 312 | /// let x = Box::new_in(41, &b); 313 | /// let reference: &mut usize = Box::leak(x); 314 | /// *reference += 1; 315 | /// assert_eq!(*reference, 42); 316 | /// ``` 317 | /// 318 | ///``` 319 | /// # #[cfg(feature = "collections")] 320 | /// # { 321 | /// use bumpalo::{Bump, boxed::Box, vec}; 322 | /// 323 | /// let b = Bump::new(); 324 | /// 325 | /// let x = vec![in &b; 1, 2, 3].into_boxed_slice(); 326 | /// let reference = Box::leak(x); 327 | /// reference[0] = 4; 328 | /// assert_eq!(*reference, [4, 2, 3]); 329 | /// # } 330 | ///``` 331 | #[inline] 332 | pub fn leak(b: Box<'a, T>) -> &'a mut T { 333 | unsafe { &mut *Box::into_raw(b) } 334 | } 335 | } 336 | 337 | impl<'a, T: ?Sized> Drop for Box<'a, T> { 338 | fn drop(&mut self) { 339 | unsafe { 340 | // `Box` owns value of `T`, but not memory behind it. 341 | core::ptr::drop_in_place(self.0); 342 | } 343 | } 344 | } 345 | 346 | impl<'a, T> Default for Box<'a, [T]> { 347 | fn default() -> Box<'a, [T]> { 348 | // It should be OK to `drop_in_place` empty slice of anything. 349 | Box(&mut []) 350 | } 351 | } 352 | 353 | impl<'a> Default for Box<'a, str> { 354 | fn default() -> Box<'a, str> { 355 | // Empty slice is valid string. 356 | // It should be OK to `drop_in_place` empty str. 357 | unsafe { Box::from_raw(Box::into_raw(Box::<[u8]>::default()) as *mut str) } 358 | } 359 | } 360 | 361 | impl<'a, 'b, T: ?Sized + PartialEq> PartialEq> for Box<'a, T> { 362 | #[inline] 363 | fn eq(&self, other: &Box<'b, T>) -> bool { 364 | PartialEq::eq(&**self, &**other) 365 | } 366 | #[inline] 367 | fn ne(&self, other: &Box<'b, T>) -> bool { 368 | PartialEq::ne(&**self, &**other) 369 | } 370 | } 371 | 372 | impl<'a, 'b, T: ?Sized + PartialOrd> PartialOrd> for Box<'a, T> { 373 | #[inline] 374 | fn partial_cmp(&self, other: &Box<'b, T>) -> Option { 375 | PartialOrd::partial_cmp(&**self, &**other) 376 | } 377 | #[inline] 378 | fn lt(&self, other: &Box<'b, T>) -> bool { 379 | PartialOrd::lt(&**self, &**other) 380 | } 381 | #[inline] 382 | fn le(&self, other: &Box<'b, T>) -> bool { 383 | PartialOrd::le(&**self, &**other) 384 | } 385 | #[inline] 386 | fn ge(&self, other: &Box<'b, T>) -> bool { 387 | PartialOrd::ge(&**self, &**other) 388 | } 389 | #[inline] 390 | fn gt(&self, other: &Box<'b, T>) -> bool { 391 | PartialOrd::gt(&**self, &**other) 392 | } 393 | } 394 | 395 | impl<'a, T: ?Sized + Ord> Ord for Box<'a, T> { 396 | #[inline] 397 | fn cmp(&self, other: &Box<'a, T>) -> Ordering { 398 | Ord::cmp(&**self, &**other) 399 | } 400 | } 401 | 402 | impl<'a, T: ?Sized + Eq> Eq for Box<'a, T> {} 403 | 404 | impl<'a, T: ?Sized + Hash> Hash for Box<'a, T> { 405 | fn hash(&self, state: &mut H) { 406 | (**self).hash(state); 407 | } 408 | } 409 | 410 | impl<'a, T: ?Sized + Hasher> Hasher for Box<'a, T> { 411 | fn finish(&self) -> u64 { 412 | (**self).finish() 413 | } 414 | fn write(&mut self, bytes: &[u8]) { 415 | (**self).write(bytes) 416 | } 417 | fn write_u8(&mut self, i: u8) { 418 | (**self).write_u8(i) 419 | } 420 | fn write_u16(&mut self, i: u16) { 421 | (**self).write_u16(i) 422 | } 423 | fn write_u32(&mut self, i: u32) { 424 | (**self).write_u32(i) 425 | } 426 | fn write_u64(&mut self, i: u64) { 427 | (**self).write_u64(i) 428 | } 429 | fn write_u128(&mut self, i: u128) { 430 | (**self).write_u128(i) 431 | } 432 | fn write_usize(&mut self, i: usize) { 433 | (**self).write_usize(i) 434 | } 435 | fn write_i8(&mut self, i: i8) { 436 | (**self).write_i8(i) 437 | } 438 | fn write_i16(&mut self, i: i16) { 439 | (**self).write_i16(i) 440 | } 441 | fn write_i32(&mut self, i: i32) { 442 | (**self).write_i32(i) 443 | } 444 | fn write_i64(&mut self, i: i64) { 445 | (**self).write_i64(i) 446 | } 447 | fn write_i128(&mut self, i: i128) { 448 | (**self).write_i128(i) 449 | } 450 | fn write_isize(&mut self, i: isize) { 451 | (**self).write_isize(i) 452 | } 453 | } 454 | 455 | impl<'a, T: ?Sized> From> for Pin> { 456 | /// Converts a `Box` into a `Pin>`. 457 | /// 458 | /// This conversion does not allocate on the heap and happens in place. 459 | fn from(boxed: Box<'a, T>) -> Self { 460 | // It's not possible to move or replace the insides of a `Pin>` 461 | // when `T: !Unpin`, so it's safe to pin it directly without any 462 | // additional requirements. 463 | unsafe { Pin::new_unchecked(boxed) } 464 | } 465 | } 466 | 467 | impl<'a> Box<'a, dyn Any> { 468 | #[inline] 469 | /// Attempt to downcast the box to a concrete type. 470 | /// 471 | /// # Examples 472 | /// 473 | /// ``` 474 | /// use std::any::Any; 475 | /// 476 | /// fn print_if_string(value: Box) { 477 | /// if let Ok(string) = value.downcast::() { 478 | /// println!("String ({}): {}", string.len(), string); 479 | /// } 480 | /// } 481 | /// 482 | /// let my_string = "Hello World".to_string(); 483 | /// print_if_string(Box::new(my_string)); 484 | /// print_if_string(Box::new(0i8)); 485 | /// ``` 486 | pub fn downcast(self) -> Result, Box<'a, dyn Any>> { 487 | if self.is::() { 488 | unsafe { 489 | let raw: *mut dyn Any = Box::into_raw(self); 490 | Ok(Box::from_raw(raw as *mut T)) 491 | } 492 | } else { 493 | Err(self) 494 | } 495 | } 496 | } 497 | 498 | impl<'a> Box<'a, dyn Any + Send> { 499 | #[inline] 500 | /// Attempt to downcast the box to a concrete type. 501 | /// 502 | /// # Examples 503 | /// 504 | /// ``` 505 | /// use std::any::Any; 506 | /// 507 | /// fn print_if_string(value: Box) { 508 | /// if let Ok(string) = value.downcast::() { 509 | /// println!("String ({}): {}", string.len(), string); 510 | /// } 511 | /// } 512 | /// 513 | /// let my_string = "Hello World".to_string(); 514 | /// print_if_string(Box::new(my_string)); 515 | /// print_if_string(Box::new(0i8)); 516 | /// ``` 517 | pub fn downcast(self) -> Result, Box<'a, dyn Any + Send>> { 518 | if self.is::() { 519 | unsafe { 520 | let raw: *mut (dyn Any + Send) = Box::into_raw(self); 521 | Ok(Box::from_raw(raw as *mut T)) 522 | } 523 | } else { 524 | Err(self) 525 | } 526 | } 527 | } 528 | 529 | impl<'a, T: fmt::Display + ?Sized> fmt::Display for Box<'a, T> { 530 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 531 | fmt::Display::fmt(&**self, f) 532 | } 533 | } 534 | 535 | impl<'a, T: fmt::Debug + ?Sized> fmt::Debug for Box<'a, T> { 536 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 537 | fmt::Debug::fmt(&**self, f) 538 | } 539 | } 540 | 541 | impl<'a, T: ?Sized> fmt::Pointer for Box<'a, T> { 542 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 543 | // It's not possible to extract the inner Uniq directly from the Box, 544 | // instead we cast it to a *const which aliases the Unique 545 | let ptr: *const T = &**self; 546 | fmt::Pointer::fmt(&ptr, f) 547 | } 548 | } 549 | 550 | impl<'a, T: ?Sized> Deref for Box<'a, T> { 551 | type Target = T; 552 | 553 | fn deref(&self) -> &T { 554 | &*self.0 555 | } 556 | } 557 | 558 | impl<'a, T: ?Sized> DerefMut for Box<'a, T> { 559 | fn deref_mut(&mut self) -> &mut T { 560 | self.0 561 | } 562 | } 563 | 564 | impl<'a, I: Iterator + ?Sized> Iterator for Box<'a, I> { 565 | type Item = I::Item; 566 | fn next(&mut self) -> Option { 567 | (**self).next() 568 | } 569 | fn size_hint(&self) -> (usize, Option) { 570 | (**self).size_hint() 571 | } 572 | fn nth(&mut self, n: usize) -> Option { 573 | (**self).nth(n) 574 | } 575 | fn last(self) -> Option { 576 | #[inline] 577 | fn some(_: Option, x: T) -> Option { 578 | Some(x) 579 | } 580 | self.fold(None, some) 581 | } 582 | } 583 | 584 | impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<'a, I> { 585 | fn next_back(&mut self) -> Option { 586 | (**self).next_back() 587 | } 588 | fn nth_back(&mut self, n: usize) -> Option { 589 | (**self).nth_back(n) 590 | } 591 | } 592 | impl<'a, I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<'a, I> { 593 | fn len(&self) -> usize { 594 | (**self).len() 595 | } 596 | } 597 | 598 | impl<'a, I: FusedIterator + ?Sized> FusedIterator for Box<'a, I> {} 599 | 600 | #[cfg(feature = "collections")] 601 | impl<'a, A> Box<'a, [A]> { 602 | /// Creates a value from an iterator. 603 | /// This method is an adapted version of [`FromIterator::from_iter`][from_iter]. 604 | /// It cannot be made as that trait implementation given different signature. 605 | /// 606 | /// [from_iter]: https://doc.rust-lang.org/std/iter/trait.FromIterator.html#tymethod.from_iter 607 | /// 608 | /// # Examples 609 | /// 610 | /// Basic usage: 611 | /// ``` 612 | /// use bumpalo::{Bump, boxed::Box, vec}; 613 | /// 614 | /// let b = Bump::new(); 615 | /// 616 | /// let five_fives = std::iter::repeat(5).take(5); 617 | /// let slice = Box::from_iter_in(five_fives, &b); 618 | /// assert_eq!(vec![in &b; 5, 5, 5, 5, 5], &*slice); 619 | /// ``` 620 | pub fn from_iter_in>(iter: T, a: &'a Bump) -> Self { 621 | use crate::collections::Vec; 622 | let mut vec = Vec::new_in(a); 623 | vec.extend(iter); 624 | vec.into_boxed_slice() 625 | } 626 | } 627 | 628 | impl<'a, T: ?Sized> borrow::Borrow for Box<'a, T> { 629 | fn borrow(&self) -> &T { 630 | &**self 631 | } 632 | } 633 | 634 | impl<'a, T: ?Sized> borrow::BorrowMut for Box<'a, T> { 635 | fn borrow_mut(&mut self) -> &mut T { 636 | &mut **self 637 | } 638 | } 639 | 640 | impl<'a, T: ?Sized> AsRef for Box<'a, T> { 641 | fn as_ref(&self) -> &T { 642 | &**self 643 | } 644 | } 645 | 646 | impl<'a, T: ?Sized> AsMut for Box<'a, T> { 647 | fn as_mut(&mut self) -> &mut T { 648 | &mut **self 649 | } 650 | } 651 | 652 | impl<'a, T: ?Sized> Unpin for Box<'a, T> {} 653 | 654 | impl<'a, F: ?Sized + Future + Unpin> Future for Box<'a, F> { 655 | type Output = F::Output; 656 | 657 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 658 | F::poll(Pin::new(&mut *self), cx) 659 | } 660 | } 661 | 662 | /// This impl replaces unsize coercion. 663 | impl<'a, T, const N: usize> From> for Box<'a, [T]> { 664 | fn from(arr: Box<'a, [T; N]>) -> Box<'a, [T]> { 665 | let mut arr = ManuallyDrop::new(arr); 666 | let ptr = core::ptr::slice_from_raw_parts_mut(arr.as_mut_ptr(), N); 667 | unsafe { Box::from_raw(ptr) } 668 | } 669 | } 670 | 671 | /// This impl replaces unsize coercion. 672 | impl<'a, T, const N: usize> TryFrom> for Box<'a, [T; N]> { 673 | type Error = Box<'a, [T]>; 674 | fn try_from(slice: Box<'a, [T]>) -> Result, Box<'a, [T]>> { 675 | if slice.len() == N { 676 | let mut slice = ManuallyDrop::new(slice); 677 | let ptr = slice.as_mut_ptr() as *mut [T; N]; 678 | Ok(unsafe { Box::from_raw(ptr) }) 679 | } else { 680 | Err(slice) 681 | } 682 | } 683 | } 684 | 685 | #[cfg(feature = "serde")] 686 | mod serialize { 687 | use super::*; 688 | 689 | use serde::{Serialize, Serializer}; 690 | 691 | impl<'a, T> Serialize for Box<'a, T> 692 | where 693 | T: Serialize, 694 | { 695 | fn serialize(&self, serializer: S) -> Result { 696 | T::serialize(self, serializer) 697 | } 698 | } 699 | } 700 | -------------------------------------------------------------------------------- /src/collections/collect_in.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "boxed")] 2 | use crate::boxed::Box; 3 | use crate::collections::{String, Vec}; 4 | use crate::Bump; 5 | 6 | /// A trait for types that support being constructed from an iterator, parameterized by an allocator. 7 | pub trait FromIteratorIn { 8 | /// The allocator type 9 | type Alloc; 10 | 11 | /// Similar to [`FromIterator::from_iter`][from_iter], but with a given allocator. 12 | /// 13 | /// [from_iter]: https://doc.rust-lang.org/std/iter/trait.FromIterator.html#tymethod.from_iter 14 | /// 15 | /// ``` 16 | /// # use bumpalo::collections::{FromIteratorIn, Vec}; 17 | /// # use bumpalo::Bump; 18 | /// # 19 | /// let five_fives = std::iter::repeat(5).take(5); 20 | /// let bump = Bump::new(); 21 | /// 22 | /// let v = Vec::from_iter_in(five_fives, &bump); 23 | /// 24 | /// assert_eq!(v, [5, 5, 5, 5, 5]); 25 | /// ``` 26 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 27 | where 28 | I: IntoIterator; 29 | } 30 | 31 | #[cfg(feature = "boxed")] 32 | impl<'bump, T> FromIteratorIn for Box<'bump, [T]> { 33 | type Alloc = &'bump Bump; 34 | 35 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 36 | where 37 | I: IntoIterator, 38 | { 39 | Box::from_iter_in(iter, alloc) 40 | } 41 | } 42 | 43 | impl<'bump, T> FromIteratorIn for Vec<'bump, T> { 44 | type Alloc = &'bump Bump; 45 | 46 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 47 | where 48 | I: IntoIterator, 49 | { 50 | Vec::from_iter_in(iter, alloc) 51 | } 52 | } 53 | 54 | impl> FromIteratorIn> for Option { 55 | type Alloc = V::Alloc; 56 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 57 | where 58 | I: IntoIterator>, 59 | { 60 | iter.into_iter() 61 | .map(|x| x.ok_or(())) 62 | .collect_in::>(alloc) 63 | .ok() 64 | } 65 | } 66 | 67 | impl> FromIteratorIn> for Result { 68 | type Alloc = V::Alloc; 69 | /// Takes each element in the `Iterator`: if it is an `Err`, no further 70 | /// elements are taken, and the `Err` is returned. Should no `Err` occur, a 71 | /// container with the values of each `Result` is returned. 72 | /// 73 | /// Here is an example which increments every integer in a vector, 74 | /// checking for overflow: 75 | /// 76 | /// ``` 77 | /// # use bumpalo::collections::{FromIteratorIn, CollectIn, Vec, String}; 78 | /// # use bumpalo::Bump; 79 | /// # 80 | /// let bump = Bump::new(); 81 | /// 82 | /// let v = vec![1, 2, u32::MAX]; 83 | /// let res: Result, &'static str> = v.iter().take(2).map(|x: &u32| 84 | /// x.checked_add(1).ok_or("Overflow!") 85 | /// ).collect_in(&bump); 86 | /// assert_eq!(res, Ok(bumpalo::vec![in ≎ 2, 3])); 87 | /// 88 | /// let res: Result, &'static str> = v.iter().map(|x: &u32| 89 | /// x.checked_add(1).ok_or("Overflow!") 90 | /// ).collect_in(&bump); 91 | /// assert_eq!(res, Err("Overflow!")); 92 | /// ``` 93 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 94 | where 95 | I: IntoIterator>, 96 | { 97 | let mut iter = iter.into_iter(); 98 | let mut error = None; 99 | let container = core::iter::from_fn(|| match iter.next() { 100 | Some(Ok(x)) => Some(x), 101 | Some(Err(e)) => { 102 | error = Some(e); 103 | None 104 | } 105 | None => None, 106 | }) 107 | .collect_in(alloc); 108 | 109 | match error { 110 | Some(e) => Err(e), 111 | None => Ok(container), 112 | } 113 | } 114 | } 115 | 116 | impl<'bump> FromIteratorIn for String<'bump> { 117 | type Alloc = &'bump Bump; 118 | 119 | fn from_iter_in(iter: I, alloc: Self::Alloc) -> Self 120 | where 121 | I: IntoIterator, 122 | { 123 | String::from_iter_in(iter, alloc) 124 | } 125 | } 126 | 127 | /// Extension trait for iterators, in order to allow allocator-parameterized collections to be constructed more easily. 128 | pub trait CollectIn: Iterator + Sized { 129 | /// Collect all items from an iterator, into a collection parameterized by an allocator. 130 | /// Similar to [`Iterator::collect`][collect]. 131 | /// 132 | /// [collect]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect 133 | /// 134 | /// ``` 135 | /// # use bumpalo::collections::{FromIteratorIn, CollectIn, Vec, String}; 136 | /// # use bumpalo::Bump; 137 | /// # 138 | /// let bump = Bump::new(); 139 | /// 140 | /// let str = "hello, world!".to_owned(); 141 | /// let bump_str: String = str.chars().collect_in(&bump); 142 | /// assert_eq!(&bump_str, &str); 143 | /// 144 | /// let nums: Vec = (0..=3).collect_in::>(&bump); 145 | /// assert_eq!(&nums, &[0,1,2,3]); 146 | /// ``` 147 | fn collect_in>(self, alloc: C::Alloc) -> C { 148 | C::from_iter_in(self, alloc) 149 | } 150 | } 151 | 152 | impl CollectIn for I {} 153 | -------------------------------------------------------------------------------- /src/collections/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT 2 | // file at the top-level directory of this distribution and at 3 | // http://rust-lang.org/COPYRIGHT. 4 | // 5 | // Licensed under the Apache License, Version 2.0 or the MIT license 7 | // , at your 8 | // option. This file may not be copied, modified, or distributed 9 | // except according to those terms. 10 | 11 | //! Collection types that allocate inside a [`Bump`] arena. 12 | //! 13 | //! [`Bump`]: ../struct.Bump.html 14 | 15 | #![allow(deprecated)] 16 | 17 | mod raw_vec; 18 | 19 | pub mod vec; 20 | pub use self::vec::Vec; 21 | 22 | mod str; 23 | pub mod string; 24 | pub use self::string::String; 25 | 26 | mod collect_in; 27 | pub use collect_in::{CollectIn, FromIteratorIn}; 28 | 29 | // pub mod binary_heap; 30 | // mod btree; 31 | // pub mod linked_list; 32 | // pub mod vec_deque; 33 | 34 | // pub mod btree_map { 35 | // //! A map based on a B-Tree. 36 | // pub use super::btree::map::*; 37 | // } 38 | 39 | // pub mod btree_set { 40 | // //! A set based on a B-Tree. 41 | // pub use super::btree::set::*; 42 | // } 43 | 44 | // #[doc(no_inline)] 45 | // pub use self::binary_heap::BinaryHeap; 46 | 47 | // #[doc(no_inline)] 48 | // pub use self::btree_map::BTreeMap; 49 | 50 | // #[doc(no_inline)] 51 | // pub use self::btree_set::BTreeSet; 52 | 53 | // #[doc(no_inline)] 54 | // pub use self::linked_list::LinkedList; 55 | 56 | // #[doc(no_inline)] 57 | // pub use self::vec_deque::VecDeque; 58 | 59 | use crate::alloc::{AllocErr, LayoutErr}; 60 | 61 | /// Augments `AllocErr` with a `CapacityOverflow` variant. 62 | #[derive(Clone, PartialEq, Eq, Debug)] 63 | // #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] 64 | pub enum CollectionAllocErr { 65 | /// Error due to the computed capacity exceeding the collection's maximum 66 | /// (usually `isize::MAX` bytes). 67 | CapacityOverflow, 68 | /// Error due to the allocator (see the documentation for the [`AllocErr`] type). 69 | AllocErr, 70 | } 71 | 72 | // #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] 73 | impl From for CollectionAllocErr { 74 | #[inline] 75 | fn from(AllocErr: AllocErr) -> Self { 76 | CollectionAllocErr::AllocErr 77 | } 78 | } 79 | 80 | // #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] 81 | impl From for CollectionAllocErr { 82 | #[inline] 83 | fn from(_: LayoutErr) -> Self { 84 | CollectionAllocErr::CapacityOverflow 85 | } 86 | } 87 | 88 | // /// An intermediate trait for specialization of `Extend`. 89 | // #[doc(hidden)] 90 | // trait SpecExtend { 91 | // /// Extends `self` with the contents of the given iterator. 92 | // fn spec_extend(&mut self, iter: I); 93 | // } 94 | -------------------------------------------------------------------------------- /src/collections/str/lossy.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT 2 | // file at the top-level directory of this distribution and at 3 | // http://rust-lang.org/COPYRIGHT. 4 | // 5 | // Licensed under the Apache License, Version 2.0 or the MIT license 7 | // , at your 8 | // option. This file may not be copied, modified, or distributed 9 | // except according to those terms. 10 | 11 | use crate::collections::str as core_str; 12 | use core::char; 13 | use core::fmt; 14 | use core::fmt::Write; 15 | use core::str; 16 | 17 | /// Lossy UTF-8 string. 18 | pub struct Utf8Lossy<'a> { 19 | bytes: &'a [u8], 20 | } 21 | 22 | impl<'a> Utf8Lossy<'a> { 23 | pub fn from_bytes(bytes: &'a [u8]) -> Utf8Lossy<'a> { 24 | Utf8Lossy { bytes } 25 | } 26 | 27 | pub fn chunks(&self) -> Utf8LossyChunksIter<'a> { 28 | Utf8LossyChunksIter { 29 | source: &self.bytes, 30 | } 31 | } 32 | } 33 | 34 | /// Iterator over lossy UTF-8 string 35 | #[allow(missing_debug_implementations)] 36 | pub struct Utf8LossyChunksIter<'a> { 37 | source: &'a [u8], 38 | } 39 | 40 | #[derive(PartialEq, Eq, Debug)] 41 | pub struct Utf8LossyChunk<'a> { 42 | /// Sequence of valid chars. 43 | /// Can be empty between broken UTF-8 chars. 44 | pub valid: &'a str, 45 | /// Single broken char, empty if none. 46 | /// Empty iff iterator item is last. 47 | pub broken: &'a [u8], 48 | } 49 | 50 | impl<'a> Iterator for Utf8LossyChunksIter<'a> { 51 | type Item = Utf8LossyChunk<'a>; 52 | 53 | fn next(&mut self) -> Option> { 54 | if self.source.is_empty() { 55 | return None; 56 | } 57 | 58 | const TAG_CONT_U8: u8 = 128; 59 | fn unsafe_get(xs: &[u8], i: usize) -> u8 { 60 | unsafe { *xs.get_unchecked(i) } 61 | } 62 | fn safe_get(xs: &[u8], i: usize) -> u8 { 63 | if i >= xs.len() { 64 | 0 65 | } else { 66 | unsafe_get(xs, i) 67 | } 68 | } 69 | 70 | let mut i = 0; 71 | while i < self.source.len() { 72 | let i_ = i; 73 | 74 | let byte = unsafe_get(self.source, i); 75 | i += 1; 76 | 77 | if byte < 128 { 78 | } else { 79 | let w = core_str::utf8_char_width(byte); 80 | 81 | macro_rules! error { 82 | () => {{ 83 | unsafe { 84 | let r = Utf8LossyChunk { 85 | valid: str::from_utf8_unchecked(&self.source[0..i_]), 86 | broken: &self.source[i_..i], 87 | }; 88 | self.source = &self.source[i..]; 89 | return Some(r); 90 | } 91 | }}; 92 | } 93 | 94 | match w { 95 | 2 => { 96 | if safe_get(self.source, i) & 192 != TAG_CONT_U8 { 97 | error!(); 98 | } 99 | i += 1; 100 | } 101 | 3 => { 102 | match (byte, safe_get(self.source, i)) { 103 | (0xE0, 0xA0..=0xBF) => (), 104 | (0xE1..=0xEC, 0x80..=0xBF) => (), 105 | (0xED, 0x80..=0x9F) => (), 106 | (0xEE..=0xEF, 0x80..=0xBF) => (), 107 | _ => { 108 | error!(); 109 | } 110 | } 111 | i += 1; 112 | if safe_get(self.source, i) & 192 != TAG_CONT_U8 { 113 | error!(); 114 | } 115 | i += 1; 116 | } 117 | 4 => { 118 | match (byte, safe_get(self.source, i)) { 119 | (0xF0, 0x90..=0xBF) => (), 120 | (0xF1..=0xF3, 0x80..=0xBF) => (), 121 | (0xF4, 0x80..=0x8F) => (), 122 | _ => { 123 | error!(); 124 | } 125 | } 126 | i += 1; 127 | if safe_get(self.source, i) & 192 != TAG_CONT_U8 { 128 | error!(); 129 | } 130 | i += 1; 131 | if safe_get(self.source, i) & 192 != TAG_CONT_U8 { 132 | error!(); 133 | } 134 | i += 1; 135 | } 136 | _ => { 137 | error!(); 138 | } 139 | } 140 | } 141 | } 142 | 143 | let r = Utf8LossyChunk { 144 | valid: unsafe { str::from_utf8_unchecked(self.source) }, 145 | broken: &[], 146 | }; 147 | self.source = &[]; 148 | Some(r) 149 | } 150 | } 151 | 152 | impl<'a> fmt::Display for Utf8Lossy<'a> { 153 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 154 | // If we're the empty string then our iterator won't actually yield 155 | // anything, so perform the formatting manually 156 | if self.bytes.is_empty() { 157 | return "".fmt(f); 158 | } 159 | 160 | for Utf8LossyChunk { valid, broken } in self.chunks() { 161 | // If we successfully decoded the whole chunk as a valid string then 162 | // we can return a direct formatting of the string which will also 163 | // respect various formatting flags if possible. 164 | if valid.len() == self.bytes.len() { 165 | assert!(broken.is_empty()); 166 | return valid.fmt(f); 167 | } 168 | 169 | f.write_str(valid)?; 170 | if !broken.is_empty() { 171 | f.write_char(char::REPLACEMENT_CHARACTER)?; 172 | } 173 | } 174 | Ok(()) 175 | } 176 | } 177 | 178 | impl<'a> fmt::Debug for Utf8Lossy<'a> { 179 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 180 | f.write_char('"')?; 181 | 182 | for Utf8LossyChunk { valid, broken } in self.chunks() { 183 | // Valid part. 184 | // Here we partially parse UTF-8 again which is suboptimal. 185 | { 186 | let mut from = 0; 187 | for (i, c) in valid.char_indices() { 188 | let esc = c.escape_debug(); 189 | // If char needs escaping, flush backlog so far and write, else skip 190 | if esc.len() != 1 { 191 | f.write_str(&valid[from..i])?; 192 | for c in esc { 193 | f.write_char(c)?; 194 | } 195 | from = i + c.len_utf8(); 196 | } 197 | } 198 | f.write_str(&valid[from..])?; 199 | } 200 | 201 | // Broken parts of string as hex escape. 202 | for &b in broken { 203 | write!(f, "\\x{:02x}", b)?; 204 | } 205 | } 206 | 207 | f.write_char('"') 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/collections/str/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT 2 | // file at the top-level directory of this distribution and at 3 | // http://rust-lang.org/COPYRIGHT. 4 | // 5 | // Licensed under the Apache License, Version 2.0 or the MIT license 7 | // , at your 8 | // option. This file may not be copied, modified, or distributed 9 | // except according to those terms. 10 | 11 | //! String manipulation 12 | //! 13 | //! For more details, see std::str 14 | 15 | #[allow(missing_docs)] 16 | pub mod lossy; 17 | 18 | // https://tools.ietf.org/html/rfc3629 19 | #[rustfmt::skip] 20 | static UTF8_CHAR_WIDTH: [u8; 256] = [ 21 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 22 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F 23 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 24 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F 25 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 26 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F 27 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 28 | 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F 29 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 30 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F 31 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 32 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF 33 | 0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 34 | 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF 35 | 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF 36 | 4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF 37 | ]; 38 | 39 | /// Given a first byte, determines how many bytes are in this UTF-8 character. 40 | #[inline] 41 | pub fn utf8_char_width(b: u8) -> usize { 42 | UTF8_CHAR_WIDTH[b as usize] as usize 43 | } 44 | -------------------------------------------------------------------------------- /tests/all/alloc_fill.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::Bump; 2 | use std::alloc::Layout; 3 | use std::cmp; 4 | use std::iter::repeat; 5 | use std::mem; 6 | 7 | #[test] 8 | fn alloc_slice_fill_zero() { 9 | let b = Bump::new(); 10 | let u8_layout = Layout::new::(); 11 | 12 | let ptr1 = b.alloc_layout(u8_layout); 13 | 14 | struct MyZeroSizedType; 15 | 16 | b.alloc_slice_copy::(&[]); 17 | b.alloc_slice_clone::(&[]); 18 | b.alloc_slice_fill_with::(0, |_| panic!("should not happen")); 19 | b.alloc_slice_fill_copy(0, 42u64); 20 | b.alloc_slice_fill_clone(0, &"hello".to_string()); 21 | b.alloc_slice_fill_default::(0); 22 | let ptr2 = b.alloc(MyZeroSizedType); 23 | let alignment = cmp::max(mem::align_of::(), mem::align_of::()); 24 | assert_eq!( 25 | ptr1.as_ptr() as usize & !(alignment - 1), 26 | ptr2 as *mut _ as usize 27 | ); 28 | 29 | let ptr3 = b.alloc_layout(u8_layout); 30 | dbg!(ptr2 as *mut _); 31 | dbg!(ptr3); 32 | assert_eq!( 33 | ptr2 as *mut _ as usize, 34 | (ptr3.as_ptr() as usize) + b.min_align().max(u8_layout.align()), 35 | ); 36 | } 37 | 38 | #[test] 39 | fn alloc_slice_try_fill_with_succeeds() { 40 | let b = Bump::new(); 41 | let res: Result<&mut [usize], ()> = b.alloc_slice_try_fill_with(100, |n| Ok(n)); 42 | assert_eq!(res.map(|arr| arr[50]), Ok(50)); 43 | } 44 | 45 | #[test] 46 | fn alloc_slice_try_fill_with_fails() { 47 | let b = Bump::new(); 48 | let res: Result<&mut [u16], ()> = 49 | b.alloc_slice_try_fill_with(1000, |n| if n == 100 { Err(()) } else { Ok(42) }); 50 | assert_eq!(res, Err(())); 51 | } 52 | 53 | #[test] 54 | fn alloc_slice_try_fill_iter_succeeds() { 55 | let b = Bump::new(); 56 | let elems = repeat(42).take(10).collect::>(); 57 | let res: Result<&mut [u16], ()> = b.alloc_slice_try_fill_iter(elems.into_iter().map(Ok)); 58 | assert_eq!(res.map(|arr| arr[5]), Ok(42)); 59 | } 60 | 61 | #[test] 62 | fn alloc_slice_try_fill_iter_fails() { 63 | let b = Bump::new(); 64 | let elems = repeat(()).take(10).collect::>(); 65 | let res: Result<&mut [u16], ()> = b.alloc_slice_try_fill_iter(elems.into_iter().map(Err)); 66 | assert_eq!(res, Err(())); 67 | } 68 | 69 | #[test] 70 | #[should_panic(expected = "out of memory")] 71 | fn alloc_slice_overflow() { 72 | let b = Bump::new(); 73 | 74 | b.alloc_slice_fill_default::(usize::max_value()); 75 | } 76 | -------------------------------------------------------------------------------- /tests/all/alloc_try_with.rs: -------------------------------------------------------------------------------- 1 | // All of these alloc_try_with tests will fail with "fatal runtime error: stack overflow" unless 2 | // LLVM manages to optimize the stack writes away. 3 | // 4 | // We only run them when debug_assertions are not set, as we expect them to fail outside release 5 | // mode. 6 | 7 | use std::iter::repeat; 8 | 9 | use bumpalo::Bump; 10 | 11 | #[test] 12 | #[cfg_attr(debug_assertions, ignore)] 13 | fn alloc_try_with_large_array() -> Result<(), ()> { 14 | let b = Bump::new(); 15 | 16 | b.alloc_try_with(|| Ok([4u8; 10_000_000]))?; 17 | 18 | Ok(()) 19 | } 20 | 21 | #[test] 22 | #[cfg_attr(debug_assertions, ignore)] 23 | fn alloc_try_with_large_array_err() { 24 | let b = Bump::new(); 25 | 26 | assert!(b 27 | .alloc_try_with(|| Result::<[u8; 10_000_000], _>::Err(())) 28 | .is_err()); 29 | } 30 | 31 | #[allow(dead_code)] 32 | struct LargeStruct { 33 | small: usize, 34 | big1: [u8; 20_000_000], 35 | big2: [u8; 20_000_000], 36 | big3: [u8; 20_000_000], 37 | } 38 | 39 | #[test] 40 | #[cfg_attr(debug_assertions, ignore)] 41 | fn alloc_try_with_large_struct() -> Result<(), ()> { 42 | let b = Bump::new(); 43 | 44 | b.alloc_try_with(|| { 45 | Ok(LargeStruct { 46 | small: 1, 47 | big1: [2; 20_000_000], 48 | big2: [3; 20_000_000], 49 | big3: [4; 20_000_000], 50 | }) 51 | })?; 52 | 53 | Ok(()) 54 | } 55 | 56 | #[test] 57 | #[cfg_attr(debug_assertions, ignore)] 58 | fn alloc_try_with_large_struct_err() { 59 | let b = Bump::new(); 60 | 61 | assert!(b 62 | .alloc_try_with(|| Result::::Err(())) 63 | .is_err()); 64 | } 65 | 66 | #[test] 67 | #[cfg_attr(debug_assertions, ignore)] 68 | fn alloc_try_with_large_tuple() -> Result<(), ()> { 69 | let b = Bump::new(); 70 | 71 | b.alloc_try_with(|| { 72 | Ok(( 73 | 1u32, 74 | LargeStruct { 75 | small: 2, 76 | big1: [3; 20_000_000], 77 | big2: [4; 20_000_000], 78 | big3: [5; 20_000_000], 79 | }, 80 | )) 81 | })?; 82 | 83 | Ok(()) 84 | } 85 | 86 | #[test] 87 | #[cfg_attr(debug_assertions, ignore)] 88 | fn alloc_try_with_large_tuple_err() { 89 | let b = Bump::new(); 90 | 91 | assert!(b 92 | .alloc_try_with(|| { Result::<(u32, LargeStruct), _>::Err(()) }) 93 | .is_err()); 94 | } 95 | 96 | enum LargeEnum { 97 | Small, 98 | #[allow(dead_code)] 99 | Large([u8; 10_000_000]), 100 | } 101 | 102 | #[test] 103 | #[cfg_attr(debug_assertions, ignore)] 104 | fn alloc_try_with_large_enum() -> Result<(), ()> { 105 | let b = Bump::new(); 106 | 107 | b.alloc_try_with(|| Ok(LargeEnum::Small))?; 108 | 109 | Ok(()) 110 | } 111 | 112 | #[test] 113 | #[cfg_attr(debug_assertions, ignore)] 114 | fn alloc_try_with_large_enum_err() { 115 | let b = Bump::new(); 116 | 117 | assert!(b 118 | .alloc_try_with(|| Result::::Err(())) 119 | .is_err()); 120 | } 121 | 122 | #[test] 123 | #[cfg_attr(debug_assertions, ignore)] 124 | fn alloc_slice_try_fill_with_large_length() { 125 | let b = Bump::new(); 126 | 127 | assert!(b 128 | .alloc_slice_try_fill_with(10_000_000, |_| Err::(())) 129 | .is_err()); 130 | } 131 | 132 | #[test] 133 | #[cfg_attr(debug_assertions, ignore)] 134 | fn alloc_slice_try_fill_iter_large_length() { 135 | let b = Bump::new(); 136 | 137 | let elems = repeat(Err::(())) 138 | .take(10_000_000) 139 | .collect::>(); 140 | assert!(b.alloc_slice_try_fill_iter(elems).is_err()); 141 | } 142 | -------------------------------------------------------------------------------- /tests/all/alloc_with.rs: -------------------------------------------------------------------------------- 1 | // All of these alloc_with tests will fail with "fatal runtime error: stack overflow" unless LLVM 2 | // manages to optimize the stack writes away. 3 | // 4 | // We only run them when debug_assertions are not set, as we expect them to fail outside release 5 | // mode. 6 | 7 | use bumpalo::Bump; 8 | 9 | #[test] 10 | #[cfg_attr(debug_assertions, ignore)] 11 | fn alloc_with_large_array() { 12 | let b = Bump::new(); 13 | 14 | b.alloc_with(|| [4u8; 10_000_000]); 15 | } 16 | 17 | #[allow(dead_code)] 18 | struct LargeStruct { 19 | small: usize, 20 | big1: [u8; 20_000_000], 21 | big2: [u8; 20_000_000], 22 | big3: [u8; 20_000_000], 23 | } 24 | 25 | #[test] 26 | #[cfg_attr(debug_assertions, ignore)] 27 | fn alloc_with_large_struct() { 28 | let b = Bump::new(); 29 | 30 | b.alloc_with(|| LargeStruct { 31 | small: 1, 32 | big1: [2; 20_000_000], 33 | big2: [3; 20_000_000], 34 | big3: [4; 20_000_000], 35 | }); 36 | } 37 | 38 | #[test] 39 | #[cfg_attr(debug_assertions, ignore)] 40 | fn alloc_with_large_tuple() { 41 | let b = Bump::new(); 42 | 43 | b.alloc_with(|| { 44 | ( 45 | 1u32, 46 | LargeStruct { 47 | small: 2, 48 | big1: [3; 20_000_000], 49 | big2: [4; 20_000_000], 50 | big3: [5; 20_000_000], 51 | }, 52 | ) 53 | }); 54 | } 55 | 56 | enum LargeEnum { 57 | Small, 58 | #[allow(dead_code)] 59 | Large([u8; 10_000_000]), 60 | } 61 | 62 | #[test] 63 | #[cfg_attr(debug_assertions, ignore)] 64 | fn alloc_with_large_enum() { 65 | let b = Bump::new(); 66 | 67 | b.alloc_with(|| LargeEnum::Small); 68 | } 69 | -------------------------------------------------------------------------------- /tests/all/allocation_limit.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::Bump; 2 | 3 | #[test] 4 | fn allocation_limit_trivial() { 5 | let bump = Bump::with_capacity(0); 6 | bump.set_allocation_limit(Some(0)); 7 | 8 | assert!(bump.try_alloc(5).is_err()); 9 | assert!(bump.allocation_limit().unwrap() >= bump.allocated_bytes()); 10 | 11 | bump.set_allocation_limit(None); 12 | 13 | assert!(bump.try_alloc(5).is_ok()); 14 | } 15 | 16 | #[test] 17 | fn change_allocation_limit_with_live_allocations() { 18 | let bump = Bump::new(); 19 | 20 | bump.set_allocation_limit(Some(512)); 21 | 22 | bump.alloc(10); 23 | 24 | assert!(bump.try_alloc([0; 2048]).is_err()); 25 | 26 | bump.set_allocation_limit(Some(16384)); 27 | 28 | assert!(bump.try_alloc([0; 2048]).is_ok()); 29 | assert!(bump.allocation_limit().unwrap() >= bump.allocated_bytes()); 30 | } 31 | 32 | #[test] 33 | fn remove_allocation_limit_with_live_allocations() { 34 | let bump = Bump::new(); 35 | 36 | bump.set_allocation_limit(Some(512)); 37 | 38 | bump.alloc(10); 39 | 40 | assert!(bump.try_alloc([0; 2048]).is_err()); 41 | assert!(bump.allocation_limit().unwrap() >= bump.allocated_bytes()); 42 | 43 | bump.set_allocation_limit(None); 44 | 45 | assert!(bump.try_alloc([0; 2048]).is_ok()); 46 | } 47 | 48 | #[test] 49 | fn reset_preserves_allocation_limits() { 50 | let mut bump = Bump::new(); 51 | 52 | bump.set_allocation_limit(Some(512)); 53 | bump.reset(); 54 | 55 | assert!(bump.try_alloc([0; 2048]).is_err()); 56 | assert!(bump.allocation_limit().unwrap() >= bump.allocated_bytes()); 57 | } 58 | 59 | #[test] 60 | fn reset_updates_allocated_bytes() { 61 | let mut bump = Bump::new(); 62 | 63 | bump.alloc([0; 1 << 9]); 64 | 65 | // This second allocation should be a big enough one 66 | // after the first to force a new chunk allocation 67 | bump.alloc([0; 1 << 9]); 68 | 69 | let allocated_bytes_before_reset = bump.allocated_bytes(); 70 | 71 | bump.reset(); 72 | 73 | let allocated_bytes_after_reset = bump.allocated_bytes(); 74 | 75 | assert!(allocated_bytes_after_reset < allocated_bytes_before_reset); 76 | } 77 | 78 | #[test] 79 | fn new_bump_allocated_bytes_is_zero() { 80 | let bump = Bump::new(); 81 | 82 | assert_eq!(bump.allocated_bytes(), 0); 83 | } 84 | 85 | #[test] 86 | fn small_allocation_limit() { 87 | let bump = Bump::new(); 88 | 89 | bump.set_allocation_limit(Some(64)); 90 | assert!(bump.try_alloc([0; 1]).is_ok()); 91 | } 92 | -------------------------------------------------------------------------------- /tests/all/allocator_api.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "allocator_api")] 2 | 3 | use crate::quickcheck; 4 | use bumpalo::Bump; 5 | use std::alloc::{AllocError, Allocator, Layout}; 6 | use std::ptr::NonNull; 7 | use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; 8 | 9 | /// Map an arbitrary `x` to a power of 2 that is less than or equal to `max`, 10 | /// but with as little bias as possible (eg rounding `min(x, max)` to the 11 | /// nearest power of 2 is unacceptable because it would majorly bias `max` for 12 | /// small values of `max`). 13 | fn clamp_to_pow2_in_range(x: usize, max: usize) -> usize { 14 | let log_x = max.ilog2() as usize; 15 | if log_x == 0 { 16 | return 1; 17 | } 18 | let divisor = usize::MAX / log_x; 19 | let y = 1_usize << (x / divisor); 20 | assert!(y.is_power_of_two(), "{y} is not a power of two"); 21 | assert!(y <= max, "{y} is larger than {max}"); 22 | y 23 | } 24 | 25 | /// Helper to turn a pair of arbitrary `usize`s into a valid `Layout` of 26 | /// reasonable size for use with quickchecks. 27 | pub fn arbitrary_layout(size: usize, align: usize) -> Layout { 28 | const MAX_ALIGN: usize = 64; 29 | const MAX_SIZE: usize = 1024; 30 | 31 | let align = clamp_to_pow2_in_range(align, MAX_ALIGN); 32 | 33 | let size = size % (MAX_SIZE + 1); 34 | let size = size.next_multiple_of(align); 35 | 36 | Layout::from_size_align(size, align).unwrap() 37 | } 38 | 39 | #[derive(Debug)] 40 | struct AllocatorDebug { 41 | bump: Bump, 42 | grows: AtomicUsize, 43 | shrinks: AtomicUsize, 44 | allocs: AtomicUsize, 45 | deallocs: AtomicUsize, 46 | } 47 | 48 | impl AllocatorDebug { 49 | fn new(bump: Bump) -> AllocatorDebug { 50 | AllocatorDebug { 51 | bump, 52 | grows: AtomicUsize::new(0), 53 | shrinks: AtomicUsize::new(0), 54 | allocs: AtomicUsize::new(0), 55 | deallocs: AtomicUsize::new(0), 56 | } 57 | } 58 | } 59 | 60 | unsafe impl Allocator for AllocatorDebug { 61 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 62 | self.allocs.fetch_add(1, Relaxed); 63 | let ref bump = self.bump; 64 | bump.allocate(layout) 65 | } 66 | 67 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { 68 | self.deallocs.fetch_add(1, Relaxed); 69 | let ref bump = self.bump; 70 | bump.deallocate(ptr, layout) 71 | } 72 | 73 | unsafe fn shrink( 74 | &self, 75 | ptr: NonNull, 76 | old_layout: Layout, 77 | new_layout: Layout, 78 | ) -> Result, AllocError> { 79 | self.shrinks.fetch_add(1, Relaxed); 80 | let ref bump = self.bump; 81 | bump.shrink(ptr, old_layout, new_layout) 82 | } 83 | 84 | unsafe fn grow( 85 | &self, 86 | ptr: NonNull, 87 | old_layout: Layout, 88 | new_layout: Layout, 89 | ) -> Result, AllocError> { 90 | self.grows.fetch_add(1, Relaxed); 91 | let ref bump = self.bump; 92 | bump.grow(ptr, old_layout, new_layout) 93 | } 94 | } 95 | 96 | #[test] 97 | fn allocator_api_push_a_bunch_of_items() { 98 | let b = AllocatorDebug::new(Bump::new()); 99 | let mut v = Vec::with_capacity_in(1024, &b); 100 | assert_eq!(b.allocs.load(Relaxed), 1); 101 | 102 | for x in 0..1024 { 103 | v.push(x); 104 | } 105 | 106 | // Ensure we trigger a grow 107 | assert_eq!(b.grows.load(Relaxed), 0); 108 | for x in 1024..2048 { 109 | v.push(x); 110 | } 111 | assert_ne!(b.grows.load(Relaxed), 0); 112 | 113 | // Ensure we trigger a shrink 114 | v.truncate(1024); 115 | v.shrink_to_fit(); 116 | assert_eq!(b.shrinks.load(Relaxed), 1); 117 | 118 | // Ensure we trigger a deallocation 119 | assert_eq!(b.deallocs.load(Relaxed), 0); 120 | drop(v); 121 | assert_eq!(b.deallocs.load(Relaxed), 1); 122 | } 123 | 124 | #[test] 125 | fn allocator_grow_zeroed() { 126 | // Create a new bump arena. 127 | let ref bump = Bump::new(); 128 | 129 | // Make an initial allocation. 130 | let first_layout = Layout::from_size_align(4, 4).expect("create a layout"); 131 | let mut p = bump 132 | .allocate_zeroed(first_layout) 133 | .expect("allocate a first chunk"); 134 | let allocated = bump.allocated_bytes(); 135 | unsafe { p.as_mut().fill(42) }; 136 | let p = p.cast(); 137 | 138 | // Grow the last allocation. This should just reserve a few more bytes 139 | // within the current chunk, not allocate a whole new memory block within a 140 | // new chunk. 141 | let second_layout = Layout::from_size_align(8, 4).expect("create a expanded layout"); 142 | let p = unsafe { bump.grow_zeroed(p, first_layout, second_layout) } 143 | .expect("should grow_zeroed okay"); 144 | assert!(bump.allocated_bytes() <= allocated * 2); 145 | assert_eq!(unsafe { p.as_ref() }, [42, 42, 42, 42, 0, 0, 0, 0]); 146 | } 147 | 148 | quickcheck! { 149 | fn allocator_grow_align_increase(layouts: Vec<(usize, usize)>) -> bool { 150 | let mut layouts: Vec<_> = layouts.into_iter().map(|(size, align)| { 151 | arbitrary_layout(size, align) 152 | }).collect(); 153 | 154 | layouts.sort_by_key(|l| (l.size(), l.align())); 155 | 156 | let b = AllocatorDebug::new(Bump::new()); 157 | let mut layout_iter = layouts.into_iter(); 158 | 159 | if let Some(initial_layout) = layout_iter.next() { 160 | let mut pointer = b.allocate(initial_layout).unwrap(); 161 | if !is_pointer_aligned_to(pointer, initial_layout.align()) { 162 | return false; 163 | } 164 | 165 | let mut old_layout = initial_layout; 166 | 167 | for new_layout in layout_iter { 168 | pointer = unsafe { b.grow(pointer.cast(), old_layout, new_layout).unwrap() }; 169 | if !is_pointer_aligned_to(pointer, new_layout.align()) { 170 | return false; 171 | } 172 | 173 | old_layout = new_layout; 174 | } 175 | } 176 | 177 | true 178 | } 179 | 180 | fn allocator_shrink_align_change(layouts: Vec<(usize, usize)>) -> () { 181 | let mut layouts: Vec<_> = layouts.into_iter().map(|(size, align)| { 182 | arbitrary_layout(size, align) 183 | }).collect(); 184 | 185 | layouts.sort_by_key(|l| l.size()); 186 | layouts.reverse(); 187 | 188 | let b = AllocatorDebug::new(Bump::new()); 189 | let mut layout_iter = layouts.into_iter(); 190 | 191 | if let Some(initial_layout) = layout_iter.next() { 192 | let mut pointer = b.allocate(initial_layout).unwrap(); 193 | assert!(is_pointer_aligned_to(pointer, initial_layout.align())); 194 | 195 | let mut old_layout = initial_layout; 196 | 197 | for new_layout in layout_iter { 198 | let res = unsafe { b.shrink(pointer.cast(), old_layout, new_layout) }; 199 | if old_layout.align() < new_layout.align() { 200 | match res { 201 | Ok(p) => assert!(is_pointer_aligned_to(p, new_layout.align())), 202 | Err(_) => {} 203 | } 204 | } else { 205 | pointer = res.unwrap(); 206 | assert!(is_pointer_aligned_to(pointer, new_layout.align())); 207 | 208 | old_layout = new_layout; 209 | } 210 | } 211 | } 212 | } 213 | 214 | fn allocator_grow_or_shrink(layouts: Vec<((usize, usize), (usize, usize))>) -> () { 215 | let layouts = layouts 216 | .into_iter() 217 | .map(|((from_size, from_align), (to_size, to_align))| { 218 | let from_layout = arbitrary_layout(from_size, from_align); 219 | let to_layout = arbitrary_layout(to_size, to_align); 220 | (from_layout, to_layout) 221 | }); 222 | 223 | let b = AllocatorDebug::new(Bump::new()); 224 | for (from_layout, to_layout) in layouts { 225 | let pointer = b.allocate(from_layout).unwrap(); 226 | assert!(is_pointer_aligned_to(pointer, from_layout.align())); 227 | let pointer = pointer.cast::(); 228 | 229 | let result = if to_layout.size() <= from_layout.size() { 230 | unsafe { b.shrink(pointer, from_layout, to_layout) } 231 | } else { 232 | unsafe { b.grow(pointer, from_layout, to_layout) } 233 | }; 234 | 235 | match result { 236 | Ok(new_pointer) => { 237 | assert!(is_pointer_aligned_to(new_pointer, to_layout.align())); 238 | } 239 | // Bumpalo can return allocation errors in various situations, 240 | // for example if we try to shrink an allocation but also grow 241 | // its alignment in such a way that we cannot satisfy the 242 | // requested alignment, and that is okay. 243 | Err(_) => continue, 244 | } 245 | } 246 | } 247 | } 248 | 249 | #[test] 250 | fn allocator_shrink_layout_change() { 251 | let b = AllocatorDebug::new(Bump::with_capacity(1024)); 252 | 253 | let layout_align4 = Layout::from_size_align(1024, 4).unwrap(); 254 | let layout_align16 = Layout::from_size_align(256, 16).unwrap(); 255 | 256 | // Allocate a chunk of memory and attempt to shrink it while increasing 257 | // alignment requirements. 258 | let p4: NonNull = b.allocate(layout_align4).unwrap().cast(); 259 | let p16_res = unsafe { b.shrink(p4, layout_align4, layout_align16) }; 260 | 261 | // This could either happen to succeed because `p4` already happened to be 262 | // 16-aligned and could be reused, or `bumpalo` could return an error. 263 | match p16_res { 264 | Ok(p16) => assert!(is_pointer_aligned_to(p16, 16)), 265 | Err(_) => {} 266 | } 267 | } 268 | 269 | fn is_pointer_aligned_to(p: NonNull<[u8]>, align: usize) -> bool { 270 | debug_assert!(align.is_power_of_two()); 271 | 272 | let pointer = p.as_ptr() as *mut u8 as usize; 273 | let pointer_aligned = pointer & !(align - 1); 274 | 275 | pointer == pointer_aligned 276 | } 277 | -------------------------------------------------------------------------------- /tests/all/boxed.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "boxed")] 2 | 3 | use bumpalo::boxed::Box; 4 | use bumpalo::Bump; 5 | 6 | #[test] 7 | fn into_raw_aliasing() { 8 | let bump = Bump::new(); 9 | let boxed = Box::new_in(1, &bump); 10 | let raw = Box::into_raw(boxed); 11 | 12 | let mut_ref = unsafe { &mut *raw }; 13 | dbg!(mut_ref); 14 | } 15 | -------------------------------------------------------------------------------- /tests/all/capacity.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::Bump; 2 | 3 | #[test] 4 | fn try_with_capacity_too_large() { 5 | // Shouldn't panic even though the capacity is too large for a `Layout`. 6 | let _ = Bump::try_with_capacity(isize::MAX as usize + 1); 7 | } 8 | -------------------------------------------------------------------------------- /tests/all/collect_in.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "collections")] 2 | 3 | use crate::quickcheck; 4 | use bumpalo::collections::{CollectIn, String, Vec}; 5 | use bumpalo::Bump; 6 | use std::string::String as StdString; 7 | use std::vec::Vec as StdVec; 8 | 9 | quickcheck! { 10 | fn test_string_collect(input: StdString) -> bool { 11 | let bump = Bump::new(); 12 | let bump_str = input.chars().collect_in::(&bump); 13 | 14 | bump_str == input 15 | } 16 | 17 | fn test_vec_collect(input: StdVec) -> bool { 18 | let bump = Bump::new(); 19 | let bump_vec = input.clone().into_iter().collect_in::>(&bump); 20 | 21 | bump_vec.as_slice() == input.as_slice() 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /tests/all/main.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(feature = "allocator_api", feature(allocator_api))] 2 | 3 | mod alloc_fill; 4 | mod alloc_try_with; 5 | mod alloc_with; 6 | mod allocation_limit; 7 | mod allocator_api; 8 | mod boxed; 9 | mod capacity; 10 | mod collect_in; 11 | mod quickcheck; 12 | mod quickchecks; 13 | mod string; 14 | mod tests; 15 | mod try_alloc_try_with; 16 | mod try_alloc_with; 17 | mod vec; 18 | 19 | #[cfg(feature = "serde")] 20 | mod serde; 21 | 22 | fn main() {} 23 | -------------------------------------------------------------------------------- /tests/all/quickcheck.rs: -------------------------------------------------------------------------------- 1 | /// A redefinition/wrapper macro of `quickcheck::quickcheck!` that supports 2 | /// limiting the number of test iterations to one when we are running under 3 | /// MIRI. 4 | #[macro_export] 5 | macro_rules! quickcheck { 6 | ( 7 | $( 8 | $(#[$m:meta])* 9 | fn $fn_name:ident($($arg_name:ident : $arg_ty:ty),*) -> $ret:ty { 10 | $($code:tt)* 11 | } 12 | )* 13 | ) => { 14 | $( 15 | #[test] 16 | $(#[$m])* 17 | fn $fn_name() { 18 | fn prop($($arg_name: $arg_ty),*) -> $ret { 19 | $($code)* 20 | } 21 | 22 | let mut qc = ::quickcheck::QuickCheck::new(); 23 | 24 | // Use the `QUICKCHECK_TESTS` environment variable from 25 | // compiletime to avoid violating MIRI's isolation by looking at 26 | // the runtime environment variable. 27 | let tests = option_env!("QUICKCHECK_TESTS").and_then(|s| s.parse().ok()); 28 | 29 | // Limit quickcheck tests to a single iteration under MIRI, 30 | // since they are otherwise super slow. 31 | #[cfg(miri)] 32 | let tests = tests.or(Some(1)); 33 | 34 | if let Some(tests) = tests { 35 | eprintln!("Executing at most {} quickchecks", tests); 36 | qc = qc.tests(tests); 37 | } 38 | 39 | qc.quickcheck(prop as fn($($arg_ty),*) -> $ret); 40 | } 41 | )* 42 | }; 43 | } 44 | -------------------------------------------------------------------------------- /tests/all/quickchecks.rs: -------------------------------------------------------------------------------- 1 | use crate::quickcheck; 2 | use ::quickcheck::{Arbitrary, Gen}; 3 | use bumpalo::Bump; 4 | use std::mem; 5 | 6 | #[derive(Clone, Debug, PartialEq)] 7 | struct BigValue { 8 | data: [u64; 32], 9 | } 10 | 11 | impl BigValue { 12 | fn new(x: u64) -> BigValue { 13 | BigValue { 14 | data: [ 15 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, 16 | x, x, x, x, 17 | ], 18 | } 19 | } 20 | } 21 | 22 | impl Arbitrary for BigValue { 23 | fn arbitrary(g: &mut Gen) -> BigValue { 24 | BigValue::new(u64::arbitrary(g)) 25 | } 26 | } 27 | 28 | #[derive(Clone, Debug)] 29 | enum Elems { 30 | OneT(T), 31 | TwoT(T, T), 32 | FourT(T, T, T, T), 33 | OneU(U), 34 | TwoU(U, U), 35 | FourU(U, U, U, U), 36 | } 37 | 38 | impl Arbitrary for Elems 39 | where 40 | T: Arbitrary + Clone, 41 | U: Arbitrary + Clone, 42 | { 43 | fn arbitrary(g: &mut Gen) -> Elems { 44 | let x: u8 = u8::arbitrary(g); 45 | match x % 6 { 46 | 0 => Elems::OneT(T::arbitrary(g)), 47 | 1 => Elems::TwoT(T::arbitrary(g), T::arbitrary(g)), 48 | 2 => Elems::FourT( 49 | T::arbitrary(g), 50 | T::arbitrary(g), 51 | T::arbitrary(g), 52 | T::arbitrary(g), 53 | ), 54 | 3 => Elems::OneU(U::arbitrary(g)), 55 | 4 => Elems::TwoU(U::arbitrary(g), U::arbitrary(g)), 56 | 5 => Elems::FourU( 57 | U::arbitrary(g), 58 | U::arbitrary(g), 59 | U::arbitrary(g), 60 | U::arbitrary(g), 61 | ), 62 | _ => unreachable!(), 63 | } 64 | } 65 | 66 | fn shrink(&self) -> Box> { 67 | match self { 68 | Elems::OneT(_) => Box::new(vec![].into_iter()), 69 | Elems::TwoT(a, b) => { 70 | Box::new(vec![Elems::OneT(a.clone()), Elems::OneT(b.clone())].into_iter()) 71 | } 72 | Elems::FourT(a, b, c, d) => Box::new( 73 | vec![ 74 | Elems::TwoT(a.clone(), b.clone()), 75 | Elems::TwoT(a.clone(), c.clone()), 76 | Elems::TwoT(a.clone(), d.clone()), 77 | Elems::TwoT(b.clone(), c.clone()), 78 | Elems::TwoT(b.clone(), d.clone()), 79 | Elems::TwoT(c.clone(), d.clone()), 80 | ] 81 | .into_iter(), 82 | ), 83 | Elems::OneU(_) => Box::new(vec![].into_iter()), 84 | Elems::TwoU(a, b) => { 85 | Box::new(vec![Elems::OneU(a.clone()), Elems::OneU(b.clone())].into_iter()) 86 | } 87 | Elems::FourU(a, b, c, d) => Box::new( 88 | vec![ 89 | Elems::TwoU(a.clone(), b.clone()), 90 | Elems::TwoU(a.clone(), c.clone()), 91 | Elems::TwoU(a.clone(), d.clone()), 92 | Elems::TwoU(b.clone(), c.clone()), 93 | Elems::TwoU(b.clone(), d.clone()), 94 | Elems::TwoU(c.clone(), d.clone()), 95 | ] 96 | .into_iter(), 97 | ), 98 | } 99 | } 100 | } 101 | 102 | fn overlap((a1, a2): (usize, usize), (b1, b2): (usize, usize)) -> bool { 103 | assert!(a1 < a2); 104 | assert!(b1 < b2); 105 | a1 < b2 && b1 < a2 106 | } 107 | 108 | // Returns whether `(b1, b2)` is contained in `(a1, a2)`. 109 | fn contains((a1, a2): (usize, usize), (b1, b2): (usize, usize)) -> bool { 110 | assert!(a1 < a2); 111 | assert!(b1 < b2); 112 | a1 <= b1 && b2 <= a2 113 | } 114 | 115 | fn range(t: &T) -> (usize, usize) { 116 | let start = t as *const _ as usize; 117 | let end = start + mem::size_of::(); 118 | (start, end) 119 | } 120 | 121 | quickcheck! { 122 | fn can_allocate_big_values(values: Vec) -> () { 123 | let bump = Bump::new(); 124 | let mut alloced = vec![]; 125 | 126 | for vals in values.iter().cloned() { 127 | alloced.push(bump.alloc(vals)); 128 | } 129 | 130 | for (vals, alloc) in values.iter().zip(alloced.into_iter()) { 131 | assert_eq!(vals, alloc); 132 | } 133 | } 134 | 135 | fn big_allocations_never_overlap(values: Vec) -> () { 136 | let bump = Bump::new(); 137 | let mut alloced = vec![]; 138 | 139 | for v in values { 140 | let a = bump.alloc(v); 141 | let start = a as *const _ as usize; 142 | let end = unsafe { (a as *const BigValue).offset(1) as usize }; 143 | let range = (start, end); 144 | 145 | for r in &alloced { 146 | assert!(!overlap(*r, range)); 147 | } 148 | 149 | alloced.push(range); 150 | } 151 | } 152 | 153 | fn can_allocate_heterogeneous_things_and_they_dont_overlap(things: Vec>) -> () { 154 | let bump = Bump::new(); 155 | let mut ranges = vec![]; 156 | 157 | for t in things { 158 | let r = match t { 159 | Elems::OneT(a) => { 160 | range(bump.alloc(a)) 161 | }, 162 | Elems::TwoT(a, b) => { 163 | range(bump.alloc([a, b])) 164 | }, 165 | Elems::FourT(a, b, c, d) => { 166 | range(bump.alloc([a, b, c, d])) 167 | }, 168 | Elems::OneU(a) => { 169 | range(bump.alloc(a)) 170 | }, 171 | Elems::TwoU(a, b) => { 172 | range(bump.alloc([a, b])) 173 | }, 174 | Elems::FourU(a, b, c, d) => { 175 | range(bump.alloc([a, b, c, d])) 176 | }, 177 | }; 178 | 179 | for s in &ranges { 180 | assert!(!overlap(r, *s)); 181 | } 182 | 183 | ranges.push(r); 184 | } 185 | } 186 | 187 | 188 | fn test_alignment_chunks(sizes: Vec) -> () { 189 | const SUPPORTED_ALIGNMENTS: &[usize] = &[1, 2, 4, 8, 16]; 190 | for &alignment in SUPPORTED_ALIGNMENTS { 191 | let mut b = Bump::<1>::with_min_align_and_capacity(513); 192 | let mut sizes = sizes.iter().map(|&size| (size % 10) * alignment).collect::>(); 193 | 194 | for &size in &sizes { 195 | let layout = std::alloc::Layout::from_size_align(size, alignment).unwrap(); 196 | let ptr = b.alloc_layout(layout).as_ptr() as *const u8 as usize; 197 | assert_eq!(ptr % alignment, 0); 198 | } 199 | 200 | for chunk in b.iter_allocated_chunks() { 201 | let mut remaining = chunk.len(); 202 | while remaining > 0 { 203 | let size = sizes.pop().expect("too many bytes in the chunk output"); 204 | assert!(remaining >= size, "returned chunk contained padding"); 205 | remaining -= size; 206 | } 207 | } 208 | assert_eq!(sizes.into_iter().sum::(), 0); 209 | } 210 | } 211 | 212 | fn alloc_slices(allocs: Vec<(u8, usize)>) -> () { 213 | let b = Bump::new(); 214 | let mut allocated: Vec<(usize, usize)> = vec![]; 215 | for (val, len) in allocs { 216 | let len = len % 100; 217 | let s = b.alloc_slice_fill_copy(len, val); 218 | 219 | assert_eq!(s.len(), len); 220 | assert!(s.iter().all(|v| v == &val)); 221 | 222 | let range = (s.as_ptr() as usize, unsafe { s.as_ptr().add(s.len()) } as usize); 223 | for r in &allocated { 224 | let no_overlap = range.1 <= r.0 || r.1 <= range.0; 225 | assert!(no_overlap); 226 | } 227 | allocated.push(range); 228 | } 229 | } 230 | 231 | fn alloc_strs(allocs: Vec) -> () { 232 | let b = Bump::new(); 233 | let allocated: Vec<&str> = allocs.iter().map(|s| b.alloc_str(s) as &_).collect(); 234 | for (val, alloc) in allocs.into_iter().zip(allocated) { 235 | assert_eq!(val, alloc); 236 | } 237 | } 238 | 239 | fn all_allocations_in_a_chunk(values: Vec) -> () { 240 | let b = Bump::new(); 241 | let allocated: Vec<&BigValue> = values.into_iter().map(|val| b.alloc(val) as &_).collect(); 242 | let chunks: Vec<(*mut u8, usize)> = unsafe { b.iter_allocated_chunks_raw() }.collect(); 243 | for alloc in allocated.into_iter() { 244 | assert!(chunks.iter().any(|&(ptr, size)| { 245 | let ptr = ptr as usize; 246 | let chunk = (ptr, ptr + size); 247 | contains(chunk, range(alloc)) 248 | })); 249 | } 250 | } 251 | 252 | fn chunks_and_raw_chunks_are_same(values: Vec) -> () { 253 | let mut b = Bump::new(); 254 | for val in values { 255 | b.alloc(val); 256 | } 257 | let raw_chunks: Vec<(_, _)> = unsafe { b.iter_allocated_chunks_raw() }.collect(); 258 | let chunks: Vec<&[_]> = b.iter_allocated_chunks().collect(); 259 | assert_eq!(raw_chunks.len(), chunks.len()); 260 | for ((ptr, size), chunk) in raw_chunks.into_iter().zip(chunks) { 261 | assert_eq!(ptr as *const _, chunk.as_ptr() as *const _); 262 | assert_eq!(size, chunk.len()); 263 | } 264 | } 265 | 266 | // MIRI exits with failure when we try to allocate more memory than its 267 | // sandbox has, rather than returning null from the allocation 268 | // function. This test runs afoul of that bug. 269 | #[cfg(not(miri))] 270 | fn limit_is_never_exceeded(limit: usize) -> bool { 271 | let bump = Bump::new(); 272 | 273 | bump.set_allocation_limit(Some(limit)); 274 | 275 | // The exact numbers here on how much to allocate are a bit murky but we 276 | // have two main goals. 277 | // 278 | // - Attempt to allocate over the allocation limit imposed 279 | // - Allocate in increments small enough that at least a few allocations succeed 280 | let layout = std::alloc::Layout::array::(limit / 16).unwrap(); 281 | for _ in 0..32 { 282 | let _ = bump.try_alloc_layout(layout); 283 | } 284 | 285 | bump.allocated_bytes() <= limit 286 | } 287 | 288 | fn allocated_bytes_including_metadata(allocs: Vec) -> () { 289 | let b = Bump::new(); 290 | let mut slice_bytes = 0; 291 | let allocs_len = allocs.len(); 292 | for len in allocs { 293 | const MAX_LEN: usize = 512; 294 | let len = len % MAX_LEN; 295 | b.alloc_slice_fill_copy(len, 0); 296 | slice_bytes += len; 297 | let allocated_bytes = b.allocated_bytes(); 298 | let allocated_bytes_including_metadata = b.allocated_bytes_including_metadata(); 299 | if slice_bytes == 0 { 300 | assert_eq!(allocated_bytes, 0); 301 | assert_eq!(allocated_bytes_including_metadata, 0); 302 | } else { 303 | assert!(allocated_bytes >= slice_bytes); 304 | assert!(allocated_bytes_including_metadata > allocated_bytes); 305 | assert!(allocated_bytes_including_metadata < allocated_bytes + allocs_len * 100); 306 | } 307 | } 308 | } 309 | 310 | #[cfg(feature = "collections")] 311 | fn extending_from_slice(data1: Vec, data2: Vec) -> () { 312 | let bump = Bump::new(); 313 | 314 | // Create a bumpalo Vec with the contents of `data1` 315 | let mut vec = bumpalo::collections::Vec::new_in(&bump); 316 | vec.extend_from_slice_copy(&data1); 317 | assert_eq!(vec.as_slice(), data1); 318 | 319 | // Extend the Vec using the contents of `data2` 320 | vec.extend_from_slice_copy(&data2); 321 | // Confirm that the Vec now has the expected number of items 322 | assert_eq!(vec.len(), data1.len() + data2.len()); 323 | // Confirm that the beginning of the Vec matches `data1`'s elements 324 | assert_eq!(&vec[0..data1.len()], data1); 325 | // Confirm that the end of the Vec matches `data2`'s elements 326 | assert_eq!(&vec[data1.len()..], data2); 327 | } 328 | 329 | #[cfg(feature = "collections")] 330 | fn extending_from_slices(data: Vec>) -> () { 331 | let bump = Bump::new(); 332 | 333 | // Convert the Vec> into a &[&[usize]] 334 | let slices_vec: Vec<&[usize]> = data.iter().map(Vec::as_slice).collect(); 335 | let slices = slices_vec.as_slice(); 336 | 337 | // Isolate the first slice from the remaining slices. If `slices` is empty, 338 | // fall back to empty slices for both. 339 | let (first_slice, remaining_slices) = match slices { 340 | [head, tail @ ..] => (*head, tail), 341 | [] => (&[][..], &[][..]) 342 | }; 343 | 344 | // Create a bumpalo `Vec` and populate it with the contents of the first slice. 345 | let mut vec = bumpalo::collections::Vec::new_in(&bump); 346 | vec.extend_from_slice_copy(first_slice); 347 | assert_eq!(vec.as_slice(), first_slice); 348 | 349 | // Append all of the other slices onto the end of the Vec 350 | vec.extend_from_slices_copy(remaining_slices); 351 | 352 | let total_length: usize = slices.iter().map(|s| s.len()).sum(); 353 | assert_eq!(vec.len(), total_length); 354 | 355 | let total_data: Vec = slices.iter().flat_map(|s| s.iter().copied()).collect(); 356 | assert_eq!(vec.as_slice(), total_data.as_slice()); 357 | } 358 | 359 | #[cfg(feature = "collections")] 360 | fn compare_extending_from_slice_and_from_slices(data: Vec>) -> () { 361 | let bump = Bump::new(); 362 | 363 | // Convert the Vec> into a &[&[usize]] 364 | let slices_vec: Vec<&[usize]> = data.iter().map(Vec::as_slice).collect(); 365 | let slices = slices_vec.as_slice(); 366 | 367 | // Isolate the first slice from the remaining slices. If `slices` is empty, 368 | // fall back to empty slices for both. 369 | let (first_slice, remaining_slices) = match slices { 370 | [head, tail @ ..] => (*head, tail), 371 | [] => (&[][..], &[][..]) 372 | }; 373 | 374 | // Create a bumpalo `Vec` and populate it with the contents of the first slice. 375 | let mut vec1 = bumpalo::collections::Vec::new_in(&bump); 376 | vec1.extend_from_slice_copy(first_slice); 377 | assert_eq!(vec1.as_slice(), first_slice); 378 | 379 | // Append each remaining slice individually 380 | for slice in remaining_slices { 381 | vec1.extend_from_slice_copy(slice); 382 | } 383 | 384 | // Create a second Vec populated with the contents of the first slice. 385 | let mut vec2 = bumpalo::collections::Vec::new_in(&bump); 386 | vec2.extend_from_slice_copy(first_slice); 387 | assert_eq!(vec2.as_slice(), first_slice); 388 | 389 | // Append the remaining slices en masse 390 | vec2.extend_from_slices_copy(remaining_slices); 391 | 392 | // Confirm that the two approaches to extending a Vec resulted in the same data 393 | assert_eq!(vec1, vec2); 394 | } 395 | } 396 | -------------------------------------------------------------------------------- /tests/all/serde/boxed.rs: -------------------------------------------------------------------------------- 1 | #![cfg(all(feature = "boxed", feature = "serde"))] 2 | 3 | use super::{assert_eq_json, Mixed, Test}; 4 | 5 | use bumpalo::{boxed::Box, Bump}; 6 | 7 | macro_rules! compare_std_box { 8 | (in $bump:ident; $x:expr) => { 9 | (Box::new_in($x, &$bump), std::boxed::Box::new($x)) 10 | }; 11 | } 12 | 13 | #[test] 14 | fn test_box_serializes() { 15 | let bump = Bump::new(); 16 | let (box_int, std_box_int) = compare_std_box!(in bump; 1); 17 | assert_eq_json!(box_int, std_box_int); 18 | let (box_str, std_box_str) = compare_std_box!(in bump; 1); 19 | assert_eq_json!(box_str, std_box_str); 20 | let (box_vec, std_box_vec) = compare_std_box!(in bump; std::vec!["hello", "world"]); 21 | assert_eq_json!(box_vec, std_box_vec); 22 | } 23 | 24 | #[test] 25 | fn test_box_serializes_complex() { 26 | let bump = Bump::new(); 27 | let (vec, std_vec) = compare_std_box![ 28 | in bump; 29 | Mixed { 30 | i: 8, 31 | s: "a".into(), 32 | o: None, 33 | e: Test::Second, 34 | } 35 | ]; 36 | assert_eq_json!(vec, std_vec); 37 | let de: std::boxed::Box = 38 | serde_json::from_str(&serde_json::to_string(&vec).unwrap()).unwrap(); 39 | assert_eq!(de, std_vec); 40 | } 41 | -------------------------------------------------------------------------------- /tests/all/serde/mod.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | mod boxed; 4 | mod string; 5 | mod vec; 6 | 7 | #[macro_export] 8 | macro_rules! assert_eq_json { 9 | ($a:ident, $b:ident) => { 10 | assert_eq!( 11 | serde_json::to_string(&$a).unwrap(), 12 | serde_json::to_string(&$b).unwrap(), 13 | ) 14 | }; 15 | } 16 | use assert_eq_json; 17 | 18 | #[derive(Serialize, Deserialize, Debug, PartialEq)] 19 | #[serde(tag = "t", content = "c")] 20 | enum Test { 21 | First, 22 | Second, 23 | } 24 | 25 | #[derive(Serialize, Deserialize, Debug, PartialEq)] 26 | #[serde()] 27 | struct Mixed { 28 | i: i32, 29 | s: std::string::String, 30 | o: Option, 31 | e: Test, 32 | } 33 | -------------------------------------------------------------------------------- /tests/all/serde/string.rs: -------------------------------------------------------------------------------- 1 | #![cfg(all(feature = "collections", feature = "serde"))] 2 | 3 | use super::assert_eq_json; 4 | 5 | use bumpalo::{collections::string::String, Bump}; 6 | 7 | macro_rules! compare_std_str { 8 | (in $bump:ident; $x:expr) => { 9 | ( 10 | String::from_str_in($x, &$bump), 11 | std::string::String::from($x), 12 | ) 13 | }; 14 | } 15 | 16 | #[test] 17 | fn test_string_serializes() { 18 | let bump = Bump::new(); 19 | let (str, std_str) = compare_std_str![in bump; "hello world !"]; 20 | assert_eq_json!(str, std_str); 21 | let de: std::string::String = 22 | serde_json::from_str(&serde_json::to_string(&str).unwrap()).unwrap(); 23 | assert_eq!(de, std_str); 24 | } 25 | -------------------------------------------------------------------------------- /tests/all/serde/vec.rs: -------------------------------------------------------------------------------- 1 | #![cfg(all(feature = "collections", feature = "serde"))] 2 | 3 | use super::{assert_eq_json, Mixed, Test}; 4 | 5 | use bumpalo::{vec, Bump}; 6 | 7 | macro_rules! compare_std_vec { 8 | (in $bump:ident; $($x:expr),+) => {{ 9 | let vec = vec![in &$bump; $($x),+]; 10 | let std_vec = std::vec![$($x),+]; 11 | (vec, std_vec) 12 | }} 13 | } 14 | 15 | #[test] 16 | fn test_vec_serializes_str() { 17 | let bump = Bump::new(); 18 | let (vec, std_vec) = compare_std_vec![in bump; "hello", "world"]; 19 | assert_eq_json!(vec, std_vec); 20 | let de: std::vec::Vec = 21 | serde_json::from_str(&serde_json::to_string(&vec).unwrap()).unwrap(); 22 | assert_eq!(de, std_vec); 23 | } 24 | 25 | #[test] 26 | fn test_vec_serializes_f32() { 27 | let bump = Bump::new(); 28 | let (vec, std_vec) = compare_std_vec![in bump; 1.5707964, 3.1415927]; 29 | assert_eq_json!(vec, std_vec); 30 | let de: std::vec::Vec = 31 | serde_json::from_str(&serde_json::to_string(&vec).unwrap()).unwrap(); 32 | assert_eq!(de, std_vec); 33 | } 34 | 35 | #[test] 36 | fn test_vec_serializes_complex() { 37 | let bump = Bump::new(); 38 | let (vec, std_vec) = compare_std_vec![ 39 | in bump; 40 | Mixed { 41 | i: 8, 42 | s: "a".into(), 43 | o: None, 44 | e: Test::Second, 45 | }, 46 | Mixed { 47 | i: 8, 48 | s: "b".into(), 49 | o: Some("some".into()), 50 | e: Test::First, 51 | } 52 | ]; 53 | assert_eq_json!(vec, std_vec); 54 | let de: std::vec::Vec = 55 | serde_json::from_str(&serde_json::to_string(&vec).unwrap()).unwrap(); 56 | assert_eq!(de, std_vec); 57 | } 58 | -------------------------------------------------------------------------------- /tests/all/string.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "collections")] 2 | use bumpalo::{collections::String, format, Bump}; 3 | use std::fmt::Write; 4 | 5 | #[test] 6 | fn format_a_bunch_of_strings() { 7 | let b = Bump::new(); 8 | let mut s = String::from_str_in("hello", &b); 9 | for i in 0..1000 { 10 | write!(&mut s, " {}", i).unwrap(); 11 | } 12 | } 13 | 14 | #[test] 15 | fn trailing_comma_in_format_macro() { 16 | let b = Bump::new(); 17 | let v = format![in &b, "{}{}", 1, 2, ]; 18 | assert_eq!(v, "12"); 19 | } 20 | 21 | #[test] 22 | fn push_str() { 23 | let b = Bump::new(); 24 | let mut s = String::new_in(&b); 25 | s.push_str("abc"); 26 | assert_eq!(s, "abc"); 27 | s.push_str("def"); 28 | assert_eq!(s, "abcdef"); 29 | s.push_str(""); 30 | assert_eq!(s, "abcdef"); 31 | s.push_str(&"x".repeat(4000)); 32 | assert_eq!(s.len(), 4006); 33 | s.push_str("ghi"); 34 | assert_eq!(s.len(), 4009); 35 | assert_eq!(&s[s.len() - 5..], "xxghi"); 36 | } 37 | -------------------------------------------------------------------------------- /tests/all/tests.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::Bump; 2 | use std::alloc::Layout; 3 | use std::fmt::Debug; 4 | use std::mem; 5 | use std::usize; 6 | 7 | #[test] 8 | fn can_iterate_over_allocated_things() { 9 | let mut bump = Bump::new(); 10 | 11 | #[cfg(not(miri))] 12 | const MAX: u64 = 131_072; 13 | 14 | #[cfg(miri)] // Miri is very slow, pick a smaller max that runs in a reasonable amount of time 15 | const MAX: u64 = 1024; 16 | 17 | let mut chunk_ends = vec![]; 18 | let mut last = None; 19 | 20 | for i in 0..MAX { 21 | let this = bump.alloc(i); 22 | assert_eq!(*this, i); 23 | let this = this as *const _ as usize; 24 | 25 | if match last { 26 | Some(last) if last - mem::size_of::() == this => false, 27 | _ => true, 28 | } { 29 | let chunk_end = this + mem::size_of::(); 30 | println!("new chunk ending @ 0x{:x}", chunk_end); 31 | assert!( 32 | !chunk_ends.contains(&chunk_end), 33 | "should not have already allocated this chunk" 34 | ); 35 | chunk_ends.push(chunk_end); 36 | } 37 | 38 | last = Some(this); 39 | } 40 | 41 | let mut seen = vec![false; MAX as usize]; 42 | 43 | // Safe because we always allocated objects of the same type in this arena, 44 | // and their size >= their align. 45 | for ch in bump.iter_allocated_chunks() { 46 | let chunk_end = ch.as_ptr() as usize + ch.len(); 47 | println!("iter chunk ending @ {:#x}", chunk_end); 48 | assert_eq!( 49 | chunk_ends.pop().unwrap(), 50 | chunk_end, 51 | "should iterate over each chunk once, in order they were allocated in" 52 | ); 53 | 54 | let (before, mid, after) = unsafe { ch.align_to::() }; 55 | assert!(before.is_empty()); 56 | assert!(after.is_empty()); 57 | for i in mid { 58 | assert!(*i < MAX, "{} < {} (aka {:x} < {:x})", i, MAX, i, MAX); 59 | seen[*i as usize] = true; 60 | } 61 | } 62 | 63 | assert!(seen.iter().all(|s| *s)); 64 | } 65 | 66 | #[cfg(not(miri))] // Miri does not panic on OOM, the interpreter halts 67 | #[test] 68 | #[should_panic(expected = "out of memory")] 69 | fn oom_instead_of_bump_pointer_overflow() { 70 | let bump = Bump::new(); 71 | let x = bump.alloc(0_u8); 72 | let p = x as *mut u8 as usize; 73 | 74 | // A size guaranteed to overflow the bump pointer. 75 | let size = (isize::MAX as usize) - p + 1; 76 | let align = 1; 77 | let layout = match Layout::from_size_align(size, align) { 78 | Err(e) => { 79 | // Return on error so that we don't panic and the test fails. 80 | eprintln!("Layout::from_size_align errored: {}", e); 81 | return; 82 | } 83 | Ok(l) => l, 84 | }; 85 | 86 | // This should panic. 87 | bump.alloc_layout(layout); 88 | } 89 | 90 | #[test] 91 | fn force_new_chunk_fits_well() { 92 | let b = Bump::new(); 93 | 94 | // Use the first chunk for something 95 | b.alloc_layout(Layout::from_size_align(1, 1).unwrap()); 96 | 97 | // Next force allocation of some new chunks. 98 | b.alloc_layout(Layout::from_size_align(100_001, 1).unwrap()); 99 | b.alloc_layout(Layout::from_size_align(100_003, 1).unwrap()); 100 | } 101 | 102 | #[test] 103 | fn alloc_with_strong_alignment() { 104 | let b = Bump::new(); 105 | 106 | // 64 is probably the strongest alignment we'll see in practice 107 | // e.g. AVX-512 types, or cache line padding optimizations 108 | b.alloc_layout(Layout::from_size_align(4096, 64).unwrap()); 109 | } 110 | 111 | #[test] 112 | fn alloc_slice_copy() { 113 | let b = Bump::new(); 114 | 115 | let src: &[u16] = &[0xFEED, 0xFACE, 0xA7, 0xCAFE]; 116 | let dst = b.alloc_slice_copy(src); 117 | 118 | assert_eq!(src, dst); 119 | } 120 | 121 | #[test] 122 | fn alloc_slice_clone() { 123 | let b = Bump::new(); 124 | 125 | let src = vec![vec![0], vec![1, 2], vec![3, 4, 5], vec![6, 7, 8, 9]]; 126 | let dst = b.alloc_slice_clone(&src); 127 | 128 | assert_eq!(src, dst); 129 | } 130 | 131 | #[test] 132 | fn small_size_and_large_align() { 133 | let b = Bump::new(); 134 | let layout = std::alloc::Layout::from_size_align(1, 0x1000).unwrap(); 135 | b.alloc_layout(layout); 136 | } 137 | 138 | fn with_capacity_helper(iter: I) 139 | where 140 | T: Copy + Debug + Eq, 141 | I: Clone + Iterator + DoubleEndedIterator, 142 | { 143 | for &initial_size in &[0, 1, 8, 11, 0x1000, 0x12345] { 144 | let mut b = Bump::<1>::with_min_align_and_capacity(initial_size); 145 | 146 | for v in iter.clone() { 147 | b.alloc(v); 148 | } 149 | 150 | let mut pushed_values = b.iter_allocated_chunks().flat_map(|c| { 151 | let (before, mid, after) = unsafe { c.align_to::() }; 152 | assert!(before.is_empty()); 153 | assert!(after.is_empty()); 154 | mid.iter().copied() 155 | }); 156 | 157 | let mut iter = iter.clone().rev(); 158 | for (expected, actual) in iter.by_ref().zip(pushed_values.by_ref()) { 159 | assert_eq!(expected, actual); 160 | } 161 | 162 | assert!(iter.next().is_none()); 163 | assert!(pushed_values.next().is_none()); 164 | } 165 | } 166 | 167 | #[test] 168 | fn with_capacity_test() { 169 | with_capacity_helper(0u8..255); 170 | #[cfg(not(miri))] // Miri is very slow, disable most of the test cases when using it 171 | { 172 | with_capacity_helper(0u16..10000); 173 | with_capacity_helper(0u32..10000); 174 | with_capacity_helper(0u64..10000); 175 | with_capacity_helper(0u128..10000); 176 | } 177 | } 178 | 179 | #[test] 180 | fn test_reset() { 181 | let mut b = Bump::new(); 182 | 183 | for i in 0u64..10_000 { 184 | b.alloc(i); 185 | } 186 | 187 | assert!(b.iter_allocated_chunks().count() > 1); 188 | 189 | let last_chunk = b.iter_allocated_chunks().next().unwrap(); 190 | let start = last_chunk.as_ptr() as usize; 191 | let end = start + last_chunk.len(); 192 | b.reset(); 193 | assert_eq!( 194 | end - mem::size_of::(), 195 | b.alloc(0u64) as *const u64 as usize 196 | ); 197 | assert_eq!(b.iter_allocated_chunks().count(), 1); 198 | } 199 | 200 | #[test] 201 | fn test_alignment() { 202 | for &alignment in &[2, 4, 8, 16, 32, 64] { 203 | let b = Bump::with_capacity(513); 204 | let layout = std::alloc::Layout::from_size_align(alignment, alignment).unwrap(); 205 | 206 | for _ in 0..1024 { 207 | let ptr = b.alloc_layout(layout).as_ptr(); 208 | assert_eq!(ptr as *const u8 as usize % alignment, 0); 209 | } 210 | } 211 | } 212 | 213 | #[test] 214 | fn test_chunk_capacity() { 215 | let b = Bump::with_capacity(512); 216 | let orig_capacity = b.chunk_capacity(); 217 | b.alloc(true); 218 | assert!(b.chunk_capacity() < orig_capacity); 219 | } 220 | 221 | #[test] 222 | #[cfg(feature = "allocator_api")] 223 | fn miri_stacked_borrows_issue_247() { 224 | let bump = bumpalo::Bump::new(); 225 | 226 | let a = Box::into_raw(Box::new_in(1u8, &bump)); 227 | drop(unsafe { Box::from_raw_in(a, &bump) }); 228 | 229 | let _b = Box::new_in(2u16, &bump); 230 | } 231 | -------------------------------------------------------------------------------- /tests/all/try_alloc_try_with.rs: -------------------------------------------------------------------------------- 1 | // All of these try_alloc_try_with tests will fail with "fatal runtime error: stack overflow" unless 2 | // LLVM manages to optimize the stack writes away. 3 | // 4 | // We only run them when debug_assertions are not set, as we expect them to fail outside release 5 | // mode. 6 | 7 | use bumpalo::{AllocOrInitError, Bump}; 8 | 9 | #[test] 10 | #[cfg_attr(debug_assertions, ignore)] 11 | fn try_alloc_try_with_large_array() -> Result<(), AllocOrInitError<()>> { 12 | let b = Bump::new(); 13 | 14 | b.try_alloc_try_with(|| Ok([4u8; 10_000_000]))?; 15 | 16 | Ok(()) 17 | } 18 | 19 | #[test] 20 | #[cfg_attr(debug_assertions, ignore)] 21 | fn try_alloc_try_with_large_array_err() { 22 | let b = Bump::new(); 23 | 24 | assert!(b 25 | .try_alloc_try_with(|| Result::<[u8; 10_000_000], _>::Err(())) 26 | .is_err()); 27 | } 28 | 29 | #[allow(dead_code)] 30 | struct LargeStruct { 31 | small: usize, 32 | big1: [u8; 20_000_000], 33 | big2: [u8; 20_000_000], 34 | big3: [u8; 20_000_000], 35 | } 36 | 37 | #[test] 38 | #[cfg_attr(debug_assertions, ignore)] 39 | fn try_alloc_try_with_large_struct() -> Result<(), AllocOrInitError<()>> { 40 | let b = Bump::new(); 41 | 42 | b.try_alloc_try_with(|| { 43 | Ok(LargeStruct { 44 | small: 1, 45 | big1: [2; 20_000_000], 46 | big2: [3; 20_000_000], 47 | big3: [4; 20_000_000], 48 | }) 49 | })?; 50 | 51 | Ok(()) 52 | } 53 | 54 | #[test] 55 | #[cfg_attr(debug_assertions, ignore)] 56 | fn try_alloc_try_with_large_struct_err() { 57 | let b = Bump::new(); 58 | 59 | assert!(b 60 | .try_alloc_try_with(|| Result::::Err(())) 61 | .is_err()); 62 | } 63 | 64 | #[test] 65 | #[cfg_attr(debug_assertions, ignore)] 66 | fn try_alloc_try_with_large_tuple() -> Result<(), AllocOrInitError<()>> { 67 | let b = Bump::new(); 68 | 69 | b.try_alloc_try_with(|| { 70 | Ok(( 71 | 1u32, 72 | LargeStruct { 73 | small: 2, 74 | big1: [3; 20_000_000], 75 | big2: [4; 20_000_000], 76 | big3: [5; 20_000_000], 77 | }, 78 | )) 79 | })?; 80 | 81 | Ok(()) 82 | } 83 | 84 | #[test] 85 | #[cfg_attr(debug_assertions, ignore)] 86 | fn try_alloc_try_with_large_tuple_err() { 87 | let b = Bump::new(); 88 | 89 | assert!(b 90 | .try_alloc_try_with(|| { Result::<(u32, LargeStruct), _>::Err(()) }) 91 | .is_err()); 92 | } 93 | 94 | enum LargeEnum { 95 | Small, 96 | #[allow(dead_code)] 97 | Large([u8; 10_000_000]), 98 | } 99 | 100 | #[test] 101 | #[cfg_attr(debug_assertions, ignore)] 102 | fn try_alloc_try_with_large_enum() -> Result<(), AllocOrInitError<()>> { 103 | let b = Bump::new(); 104 | 105 | b.try_alloc_try_with(|| Ok(LargeEnum::Small))?; 106 | 107 | Ok(()) 108 | } 109 | 110 | #[test] 111 | #[cfg_attr(debug_assertions, ignore)] 112 | fn try_alloc_try_with_large_enum_err() { 113 | let b = Bump::new(); 114 | 115 | assert!(b 116 | .try_alloc_try_with(|| Result::::Err(())) 117 | .is_err()); 118 | } 119 | -------------------------------------------------------------------------------- /tests/all/try_alloc_with.rs: -------------------------------------------------------------------------------- 1 | // All of these try_alloc_with tests will fail with "fatal runtime error: stack overflow" unless LLVM 2 | // manages to optimize the stack writes away. 3 | // 4 | // We only run them when debug_assertions are not set, as we expect them to fail outside release 5 | // mode. 6 | 7 | use bumpalo::Bump; 8 | 9 | #[test] 10 | #[cfg_attr(debug_assertions, ignore)] 11 | fn try_alloc_with_large_array() { 12 | let b = Bump::new(); 13 | 14 | b.try_alloc_with(|| [4u8; 10_000_000]).unwrap(); 15 | } 16 | 17 | #[allow(dead_code)] 18 | struct LargeStruct { 19 | small: usize, 20 | big1: [u8; 20_000_000], 21 | big2: [u8; 20_000_000], 22 | big3: [u8; 20_000_000], 23 | } 24 | 25 | #[test] 26 | #[cfg_attr(debug_assertions, ignore)] 27 | fn try_alloc_with_large_struct() { 28 | let b = Bump::new(); 29 | 30 | b.try_alloc_with(|| LargeStruct { 31 | small: 1, 32 | big1: [2; 20_000_000], 33 | big2: [3; 20_000_000], 34 | big3: [4; 20_000_000], 35 | }) 36 | .unwrap(); 37 | } 38 | 39 | #[test] 40 | #[cfg_attr(debug_assertions, ignore)] 41 | fn try_alloc_with_large_tuple() { 42 | let b = Bump::new(); 43 | 44 | b.try_alloc_with(|| { 45 | ( 46 | 1u32, 47 | LargeStruct { 48 | small: 2, 49 | big1: [3; 20_000_000], 50 | big2: [4; 20_000_000], 51 | big3: [5; 20_000_000], 52 | }, 53 | ) 54 | }) 55 | .unwrap(); 56 | } 57 | 58 | enum LargeEnum { 59 | Small, 60 | #[allow(dead_code)] 61 | Large([u8; 10_000_000]), 62 | } 63 | 64 | #[test] 65 | #[cfg_attr(debug_assertions, ignore)] 66 | fn try_alloc_with_large_enum() { 67 | let b = Bump::new(); 68 | 69 | b.try_alloc_with(|| LargeEnum::Small).unwrap(); 70 | } 71 | -------------------------------------------------------------------------------- /tests/all/vec.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "collections")] 2 | 3 | use crate::quickcheck; 4 | use bumpalo::{collections::Vec, vec, Bump}; 5 | use std::cell::{Cell, RefCell}; 6 | use std::ops::Deref; 7 | 8 | #[test] 9 | fn push_a_bunch_of_items() { 10 | let b = Bump::new(); 11 | let mut v = Vec::new_in(&b); 12 | for x in 0..10_000 { 13 | v.push(x); 14 | } 15 | } 16 | 17 | #[test] 18 | fn trailing_comma_in_vec_macro() { 19 | let b = Bump::new(); 20 | let v = vec![in &b; 1, 2, 3,]; 21 | assert_eq!(v, [1, 2, 3]); 22 | } 23 | 24 | #[test] 25 | fn recursive_vecs() { 26 | // The purpose of this test is to see if the data structures with 27 | // self references are allowed without causing a compile error 28 | // because of the dropck 29 | let b = Bump::new(); 30 | 31 | struct Node<'a> { 32 | myself: Cell>>, 33 | edges: Cell>>, 34 | } 35 | 36 | let node1: &Node = b.alloc(Node { 37 | myself: Cell::new(None), 38 | edges: Cell::new(Vec::new_in(&b)), 39 | }); 40 | let node2: &Node = b.alloc(Node { 41 | myself: Cell::new(None), 42 | edges: Cell::new(Vec::new_in(&b)), 43 | }); 44 | 45 | node1.myself.set(Some(node1)); 46 | node1.edges.set(bumpalo::vec![in &b; node1, node1, node2]); 47 | 48 | node2.myself.set(Some(node2)); 49 | node2.edges.set(bumpalo::vec![in &b; node1, node2]); 50 | } 51 | 52 | #[test] 53 | fn test_into_bump_slice_mut() { 54 | let b = Bump::new(); 55 | let v = bumpalo::vec![in &b; 1, 2, 3]; 56 | let slice = v.into_bump_slice_mut(); 57 | 58 | slice[0] = 3; 59 | slice[2] = 1; 60 | 61 | assert_eq!(slice, [3, 2, 1]); 62 | } 63 | 64 | quickcheck! { 65 | fn vec_resizes_causing_reallocs(sizes: std::vec::Vec) -> () { 66 | // Exercise `realloc` by doing a bunch of `resize`s followed by 67 | // `shrink_to_fit`s. 68 | 69 | let b = Bump::new(); 70 | let mut v = bumpalo::vec![in &b]; 71 | 72 | for len in sizes { 73 | // We don't want to get too big and OOM. 74 | const MAX_SIZE: usize = 1 << 15; 75 | 76 | // But we want allocations to get fairly close to the minimum chunk 77 | // size, so that we are exercising both realloc'ing within a chunk 78 | // and when we need new chunks. 79 | const MIN_SIZE: usize = 1 << 7; 80 | 81 | let len = std::cmp::min(len, MAX_SIZE); 82 | let len = std::cmp::max(len, MIN_SIZE); 83 | 84 | v.resize(len, 0); 85 | v.shrink_to_fit(); 86 | } 87 | } 88 | } 89 | 90 | #[test] 91 | fn test_vec_items_get_dropped() { 92 | struct Foo<'a>(&'a RefCell); 93 | impl<'a> Drop for Foo<'a> { 94 | fn drop(&mut self) { 95 | self.0.borrow_mut().push_str("Dropped!"); 96 | } 97 | } 98 | 99 | let buffer = RefCell::new(String::new()); 100 | let bump = Bump::new(); 101 | { 102 | let mut vec_foo = Vec::new_in(&bump); 103 | vec_foo.push(Foo(&buffer)); 104 | vec_foo.push(Foo(&buffer)); 105 | } 106 | assert_eq!("Dropped!Dropped!", buffer.borrow().deref()); 107 | } 108 | 109 | #[test] 110 | fn test_extend_from_slice_copy() { 111 | let bump = Bump::new(); 112 | let mut vec = vec![in ≎ 1, 2, 3]; 113 | assert_eq!(&[1, 2, 3][..], vec.as_slice()); 114 | 115 | vec.extend_from_slice_copy(&[4, 5, 6]); 116 | assert_eq!(&[1, 2, 3, 4, 5, 6][..], vec.as_slice()); 117 | 118 | // Confirm that passing an empty slice is a no-op 119 | vec.extend_from_slice_copy(&[]); 120 | assert_eq!(&[1, 2, 3, 4, 5, 6][..], vec.as_slice()); 121 | 122 | vec.extend_from_slice_copy(&[7]); 123 | assert_eq!(&[1, 2, 3, 4, 5, 6, 7][..], vec.as_slice()); 124 | } 125 | 126 | #[test] 127 | fn test_extend_from_slices_copy() { 128 | let bump = Bump::new(); 129 | let mut vec = vec![in ≎ 1, 2, 3]; 130 | assert_eq!(&[1, 2, 3][..], vec.as_slice()); 131 | 132 | // Confirm that passing an empty slice of slices is a no-op 133 | vec.extend_from_slices_copy(&[]); 134 | assert_eq!(&[1, 2, 3][..], vec.as_slice()); 135 | 136 | // Confirm that an empty slice in the slice-of-slices is a no-op 137 | vec.extend_from_slices_copy(&[&[4, 5, 6], &[], &[7]]); 138 | assert_eq!(&[1, 2, 3, 4, 5, 6, 7][..], vec.as_slice()); 139 | 140 | vec.extend_from_slices_copy(&[&[8], &[9, 10, 11], &[12]]); 141 | assert_eq!(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], vec.as_slice()); 142 | } 143 | 144 | #[cfg(feature = "std")] 145 | #[test] 146 | fn test_vec_write() { 147 | use std::io::Write; 148 | 149 | let b = Bump::new(); 150 | let mut v = bumpalo::vec![in &b]; 151 | 152 | assert_eq!(v.write(&[]).unwrap(), 0); 153 | 154 | v.flush().unwrap(); 155 | 156 | assert_eq!(v.write(&[1]).unwrap(), 1); 157 | 158 | v.flush().unwrap(); 159 | 160 | v.write_all(&[]).unwrap(); 161 | 162 | v.flush().unwrap(); 163 | 164 | v.write_all(&[2, 3]).unwrap(); 165 | 166 | v.flush().unwrap(); 167 | 168 | assert_eq!(v, &[1, 2, 3]); 169 | } 170 | -------------------------------------------------------------------------------- /tests/try_alloc.rs: -------------------------------------------------------------------------------- 1 | use bumpalo::{AllocOrInitError, Bump}; 2 | use rand::Rng; 3 | use std::alloc::{GlobalAlloc, Layout, System}; 4 | use std::sync::atomic::{AtomicBool, Ordering}; 5 | 6 | /// A custom allocator that wraps the system allocator, but lets us force 7 | /// allocation failures for testing. 8 | struct Allocator(AtomicBool); 9 | 10 | impl Allocator { 11 | fn is_returning_null(&self) -> bool { 12 | self.0.load(Ordering::SeqCst) 13 | } 14 | 15 | fn set_returning_null(&self, returning_null: bool) { 16 | self.0.store(returning_null, Ordering::SeqCst); 17 | } 18 | 19 | fn toggle_returning_null(&self) { 20 | self.set_returning_null(!self.is_returning_null()); 21 | } 22 | 23 | #[allow(dead_code)] // Silence warnings for non-"collections" builds. 24 | fn with_successful_allocs(&self, callback: F) -> T 25 | where 26 | F: FnOnce() -> T, 27 | { 28 | let old_returning_null = self.is_returning_null(); 29 | self.set_returning_null(false); 30 | let result = callback(); 31 | self.set_returning_null(old_returning_null); 32 | result 33 | } 34 | 35 | fn with_alloc_failures(&self, callback: F) -> T 36 | where 37 | F: FnOnce() -> T, 38 | { 39 | let old_returning_null = self.is_returning_null(); 40 | self.set_returning_null(true); 41 | let result = callback(); 42 | self.set_returning_null(old_returning_null); 43 | result 44 | } 45 | } 46 | 47 | unsafe impl GlobalAlloc for Allocator { 48 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 49 | if self.is_returning_null() { 50 | core::ptr::null_mut() 51 | } else { 52 | System.alloc(layout) 53 | } 54 | } 55 | 56 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 57 | System.dealloc(ptr, layout); 58 | } 59 | 60 | unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { 61 | if self.is_returning_null() { 62 | core::ptr::null_mut() 63 | } else { 64 | System.realloc(ptr, layout, new_size) 65 | } 66 | } 67 | } 68 | 69 | #[global_allocator] 70 | static GLOBAL_ALLOCATOR: Allocator = Allocator(AtomicBool::new(false)); 71 | 72 | /// `assert!` may allocate on failure (e.g. for string formatting and boxing 73 | /// panic info), so we must re-enable allocations during assertions. 74 | macro_rules! assert { 75 | ($cond:expr $(, $args:tt)*) => { 76 | if !$cond { 77 | GLOBAL_ALLOCATOR.set_returning_null(false); 78 | panic!(concat!("Assertion failed: ", stringify!($cond))); 79 | } 80 | }; 81 | } 82 | 83 | /// NB: We provide our own `main` rather than using the default test harness's 84 | /// so that we can ensure that tests are executed serially, and no background 85 | /// threads get tripped up by us disabling the global allocator, or anything 86 | /// like that. 87 | fn main() { 88 | macro_rules! test { 89 | ($name:expr, $test:expr $(,)*) => { 90 | ($name, $test as fn()) 91 | }; 92 | } 93 | 94 | fn test_static_size_alloc(assert_alloc_ok: fn(bump: &Bump), assert_alloc_err: fn(bump: &Bump)) { 95 | // Unlike with `try_alloc_layout`, it's not that easy to test a variety 96 | // of size/capacity combinations here. 97 | // Since nothing in Bump is really random, and we have to start fresh 98 | // each time, just checking each case once is enough. 99 | for &fail_alloc in &[false, true] { 100 | let bump = GLOBAL_ALLOCATOR.with_successful_allocs(|| { 101 | // We can't query the remaining free space in the current chunk, 102 | // so we have to create a new Bump for each test and fill it to 103 | // the brink of a new allocation. 104 | let bump = Bump::try_new().unwrap(); 105 | 106 | // Bump preallocates space in the initial chunk, so we need to 107 | // use up this block prior to the actual test 108 | let layout = Layout::from_size_align(bump.chunk_capacity(), 1).unwrap(); 109 | assert!(bump.try_alloc_layout(layout).is_ok()); 110 | 111 | bump 112 | }); 113 | 114 | GLOBAL_ALLOCATOR.set_returning_null(fail_alloc); 115 | 116 | if fail_alloc { 117 | assert_alloc_err(&bump); 118 | } else { 119 | assert_alloc_ok(&bump); 120 | } 121 | } 122 | } 123 | 124 | let tests = [ 125 | test!("Bump::try_new fails when global allocator fails", || { 126 | GLOBAL_ALLOCATOR.with_alloc_failures(|| { 127 | assert!(Bump::try_with_capacity(1).is_err()); 128 | }); 129 | }), 130 | test!( 131 | "test try_alloc_layout with and without global allocation failures", 132 | || { 133 | const NUM_TESTS: usize = 5000; 134 | const MAX_BYTES_ALLOCATED: usize = 65536; 135 | 136 | let mut bump = Bump::try_new().unwrap(); 137 | let mut bytes_allocated = bump.chunk_capacity(); 138 | 139 | // Bump preallocates space in the initial chunk, so we need to 140 | // use up this block prior to the actual test 141 | let layout = Layout::from_size_align(bump.chunk_capacity(), 1).unwrap(); 142 | assert!(bump.try_alloc_layout(layout).is_ok()); 143 | 144 | let mut rng = rand::thread_rng(); 145 | 146 | for _ in 0..NUM_TESTS { 147 | if rng.gen() { 148 | GLOBAL_ALLOCATOR.toggle_returning_null(); 149 | } 150 | 151 | let layout = Layout::from_size_align(bump.chunk_capacity() + 1, 1).unwrap(); 152 | if GLOBAL_ALLOCATOR.is_returning_null() { 153 | assert!(bump.try_alloc_layout(layout).is_err()); 154 | } else { 155 | assert!(bump.try_alloc_layout(layout).is_ok()); 156 | bytes_allocated += bump.chunk_capacity(); 157 | } 158 | 159 | if bytes_allocated >= MAX_BYTES_ALLOCATED { 160 | bump = GLOBAL_ALLOCATOR.with_successful_allocs(|| Bump::try_new().unwrap()); 161 | bytes_allocated = bump.chunk_capacity(); 162 | } 163 | } 164 | }, 165 | ), 166 | test!( 167 | "test try_alloc with and without global allocation failures", 168 | || { 169 | test_static_size_alloc( 170 | |bump| assert!(bump.try_alloc(1u8).is_ok()), 171 | |bump| assert!(bump.try_alloc(1u8).is_err()), 172 | ); 173 | }, 174 | ), 175 | test!( 176 | "test try_alloc_with with and without global allocation failures", 177 | || { 178 | test_static_size_alloc( 179 | |bump| assert!(bump.try_alloc_with(|| 1u8).is_ok()), 180 | |bump| assert!(bump.try_alloc_with(|| 1u8).is_err()), 181 | ); 182 | }, 183 | ), 184 | test!( 185 | "test try_alloc_try_with (Ok) with and without global allocation failures", 186 | || { 187 | test_static_size_alloc( 188 | |bump| assert!(bump.try_alloc_try_with::<_, _, ()>(|| Ok(1u8)).is_ok()), 189 | |bump| assert!(bump.try_alloc_try_with::<_, _, ()>(|| Ok(1u8)).is_err()), 190 | ); 191 | }, 192 | ), 193 | test!( 194 | "test try_alloc_try_with (Err) with and without global allocation failures", 195 | || { 196 | test_static_size_alloc( 197 | |bump| { 198 | assert!(matches!( 199 | bump.try_alloc_try_with::<_, u8, _>(|| Err(())), 200 | Err(AllocOrInitError::Init(_)) 201 | )); 202 | }, 203 | |bump| { 204 | assert!(matches!( 205 | bump.try_alloc_try_with::<_, u8, _>(|| Err(())), 206 | Err(AllocOrInitError::Alloc(_)) 207 | )); 208 | }, 209 | ); 210 | }, 211 | ), 212 | #[cfg(feature = "collections")] 213 | test!("test Vec::try_reserve and Vec::try_reserve_exact", || { 214 | use bumpalo::collections::Vec; 215 | 216 | let bump = Bump::try_new().unwrap(); 217 | 218 | GLOBAL_ALLOCATOR.with_alloc_failures(|| { 219 | let mut vec = Vec::::new_in(&bump); 220 | let chunk_cap = bump.chunk_capacity(); 221 | 222 | // Will always succeed since this size gets pre-allocated in Bump::try_new() 223 | assert!(vec.try_reserve(chunk_cap).is_ok()); 224 | assert!(vec.try_reserve_exact(chunk_cap).is_ok()); 225 | 226 | // Fails to allocate further since allocator returns null 227 | assert!(vec.try_reserve(chunk_cap + 1).is_err()); 228 | assert!(vec.try_reserve_exact(chunk_cap + 1).is_err()); 229 | }); 230 | 231 | GLOBAL_ALLOCATOR.with_successful_allocs(|| { 232 | let mut vec = Vec::::new_in(&bump); 233 | let chunk_cap = bump.chunk_capacity(); 234 | 235 | // Will always succeed since this size gets pre-allocated in Bump::try_new() 236 | assert!(vec.try_reserve(chunk_cap).is_ok()); 237 | assert!(vec.try_reserve_exact(chunk_cap).is_ok()); 238 | 239 | // Succeeds to allocate further 240 | assert!(vec.try_reserve(chunk_cap + 1).is_ok()); 241 | assert!(vec.try_reserve_exact(chunk_cap + 1).is_ok()); 242 | }); 243 | }), 244 | ]; 245 | 246 | for (name, test) in tests.iter() { 247 | assert!(!GLOBAL_ALLOCATOR.is_returning_null()); 248 | 249 | eprintln!("=== {} ===", name); 250 | test(); 251 | 252 | GLOBAL_ALLOCATOR.set_returning_null(false); 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /valgrind.supp: -------------------------------------------------------------------------------- 1 | { 2 | 3 | Memcheck:FishyValue 4 | malloc(size) 5 | fun:malloc 6 | obj:/**/target/*/deps/tests-* 7 | } 8 | { 9 | 10 | Memcheck:Param 11 | statx(buf) 12 | fun:syscall 13 | fun:statx 14 | } 15 | { 16 | 17 | Memcheck:Param 18 | statx(file_name) 19 | fun:syscall 20 | fun:statx 21 | } 22 | { 23 | 24 | Memcheck:Param 25 | statx(buf) 26 | fun:statx 27 | fun:statx 28 | } 29 | { 30 | 31 | Memcheck:Param 32 | statx(file_name) 33 | fun:statx 34 | fun:statx 35 | } 36 | --------------------------------------------------------------------------------