├── .gitignore ├── LICENSE ├── README.md └── src └── root.zig /.gitignore: -------------------------------------------------------------------------------- 1 | .zig-cache/ 2 | zig-out/ 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Dominic Weiller 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Composable allocators 2 | 3 | This is an implementation in Zig of the ideas [presented](https://www.youtube.com/watch?v=LIb3L4vKZ7U) by Andrei Alexandrescu about composable allocators. 4 | 5 | The allocators are (mostly) implemented as comptime generics conforming to an interface that is (optionally) a superset of `std.mem.Allocator`, except that concrete types are used instead of `*anyopaque`. 6 | 7 | The following allocators are either currently implemented (those with a tick) or are planned to be: 8 | 9 | - [x] `Std` (for wrapping a `std.mem.Allocator`) 10 | - [x] `Null` 11 | - [x] `FixedBuffer` (bump allocator with fixed memory buffer) 12 | - [x] `Fallback` 13 | - [x] `Stack` (a thin wrapper around `FixedBuffer` putting the buffer on the stack) 14 | - [ ] `Affix` (add optional extra data before/after each allocation) 15 | - [x] `FreeList` (allocates blocks of a specific size; non-thread-safe) 16 | - [ ] `ThreadSafeFreeList` (a thread-safe version of `FreeList`, allowing other threads to free, but not allocate) 17 | - [ ] `BitMapped` (allocates blocks of a specific size, tracking occupancy with a bitmap) 18 | - [ ] `Cascading` (holds a collection of allocators in use (all the same type), adding a new one when they are all full) 19 | - [x] `Segregated` (chooses between two allocators based on a size threshold) 20 | - [ ] `Bucket` (like `Segregated`, but has multiple size classes) 21 | 22 | All of these (except for the first three) are generic over other allocator types that they wrap, allowing them to be composed to create complex allocation strategies in a relatively simple way; for example, an allocator that allocates on the stack (as a bump allocator) but falls back to a `std.heap.GeneralPurposeAllocator` can be implemented as: 23 | ```zig 24 | const ca = @import("/path/to/composable-allocator/lib.zig"); 25 | 26 | pub fn main() void { 27 | var gpa = std.heap.GeneralPurposeAllocator(.{}){}; 28 | var test_allocator: ca.Fallback(ca.Stack(1024), ca.Std) = undefined; 29 | test_allocator.primary.initInPlaceExtra(.{gpa.allocator()}); 30 | const a = ca.allocator(&test_allocator); 31 | } 32 | ``` 33 | 34 | In the above, `a` is a `std.mem.Allocator` that will first try to use a bump allocator with 1024 bytes on the stack before reverting to using `std.mem.GeneralPurposeAllocator(.{})`. 35 | 36 | ## WIP 37 | If you have suggestions, would like to contribute some allocators, or have issues using anything issues and PRs are welcome. 38 | -------------------------------------------------------------------------------- /src/root.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | 4 | pub fn Interface(comptime T: type) type { 5 | return struct { 6 | const required = struct { 7 | /// Return a pointer to `len` bytes with specified `alignment`, or return 8 | /// `null` indicating the allocation failed. 9 | /// 10 | /// `ret_addr` is optionally provided as the first return address of the 11 | /// allocation call stack. If the value is `0` it means no return address 12 | /// has been provided. 13 | const alloc = fn ( 14 | self: *T, 15 | len: usize, 16 | alignment: std.mem.Alignment, 17 | ret_addr: usize, 18 | ) ?[*]u8; 19 | 20 | /// Attempt to expand or shrink memory in place. 21 | /// 22 | /// `memory.len` must equal the length requested from the most recent 23 | /// successful call to `alloc`, `resize`, or `remap`. `alignment` must 24 | /// equal the same value that was passed as the `alignment` parameter to 25 | /// the original `alloc` call. 26 | /// 27 | /// A result of `true` indicates the resize was successful and the 28 | /// allocation now has the same address but a size of `new_len`. `false` 29 | /// indicates the resize could not be completed without moving the 30 | /// allocation to a different address. 31 | /// 32 | /// `new_len` must be greater than zero. 33 | /// 34 | /// `ret_addr` is optionally provided as the first return address of the 35 | /// allocation call stack. If the value is `0` it means no return address 36 | /// has been provided. 37 | const resize = fn ( 38 | self: *T, 39 | memory: []u8, 40 | alignment: std.mem.Alignment, 41 | new_len: usize, 42 | ret_addr: usize, 43 | ) bool; 44 | 45 | /// Attempt to expand or shrink memory, allowing relocation. 46 | /// 47 | /// `memory.len` must equal the length requested from the most recent 48 | /// successful call to `alloc`, `resize`, or `remap`. `alignment` must 49 | /// equal the same value that was passed as the `alignment` parameter to 50 | /// the original `alloc` call. 51 | /// 52 | /// A non-`null` return value indicates the resize was successful. The 53 | /// allocation may have same address, or may have been relocated. In either 54 | /// case, the allocation now has size of `new_len`. A `null` return value 55 | /// indicates that the resize would be equivalent to allocating new memory, 56 | /// copying the bytes from the old memory, and then freeing the old memory. 57 | /// In such case, it is more efficient for the caller to perform the copy. 58 | /// 59 | /// `new_len` must be greater than zero. 60 | /// 61 | /// `ret_addr` is optionally provided as the first return address of the 62 | /// allocation call stack. If the value is `0` it means no return address 63 | /// has been provided. 64 | const remap = fn ( 65 | *anyopaque, 66 | memory: []u8, 67 | alignment: std.mem.Alignment, 68 | new_len: usize, 69 | ret_addr: usize, 70 | ) ?[*]u8; 71 | 72 | /// Free and invalidate a region of memory. 73 | /// 74 | /// `memory.len` must equal the length requested from the most recent 75 | /// successful call to `alloc`, `resize`, or `remap`. `alignment` must 76 | /// equal the same value that was passed as the `alignment` parameter to 77 | /// the original `alloc` call. 78 | /// 79 | /// `ret_addr` is optionally provided as the first return address of the 80 | /// allocation call stack. If the value is `0` it means no return address 81 | /// has been provided. 82 | const free = fn ( 83 | *anyopaque, 84 | memory: []u8, 85 | alignment: std.mem.Alignment, 86 | ret_addr: usize, 87 | ) void; 88 | }; 89 | 90 | const optional = struct { 91 | /// Checks whether the allocator owns the memory for `buf`. 92 | /// Composite allocators will generally pass `buf` to the underlying 93 | /// allocators, so it is not advisable to have composite allocators 94 | /// share backing allocators that define `owns` 95 | const owns = fn (self: *T, buf: []u8) bool; 96 | 97 | /// Attempt to return all remaining memory available to the allocator, or 98 | /// return null, if there isn't any. 99 | const allocAll = fn (self: *T) ?[]u8; 100 | 101 | /// Free all allocated memory. 102 | const freeAll = fn (self: *T) void; 103 | }; 104 | }; 105 | } 106 | 107 | pub fn allocator(a: anytype) std.mem.Allocator { 108 | const Self = @typeInfo(@TypeOf(a)).pointer.child; 109 | 110 | comptime { 111 | validateAllocator(Self); 112 | } 113 | 114 | return .{ 115 | .ptr = a, 116 | .vtable = &.{ 117 | .alloc = &struct { 118 | fn alloc( 119 | ptr: *anyopaque, 120 | len: usize, 121 | alignment: std.mem.Alignment, 122 | ret_addr: usize, 123 | ) ?[*]u8 { 124 | const self: *Self = @ptrCast(@alignCast(ptr)); 125 | return self.alloc(len, alignment, ret_addr); 126 | } 127 | }.alloc, 128 | .resize = &struct { 129 | fn resize( 130 | ptr: *anyopaque, 131 | buf: []u8, 132 | alignment: std.mem.Alignment, 133 | new_len: usize, 134 | ret_addr: usize, 135 | ) bool { 136 | const self: *Self = @ptrCast(@alignCast(ptr)); 137 | return self.resize(buf, alignment, new_len, ret_addr); 138 | } 139 | }.resize, 140 | .remap = &struct { 141 | fn remap( 142 | ptr: *anyopaque, 143 | buf: []u8, 144 | alignment: std.mem.Alignment, 145 | new_len: usize, 146 | ret_addr: usize, 147 | ) ?[*]u8 { 148 | const self: *Self = @ptrCast(@alignCast(ptr)); 149 | return self.remap(buf, alignment, new_len, ret_addr); 150 | } 151 | }.remap, 152 | .free = &struct { 153 | fn free( 154 | ptr: *anyopaque, 155 | buf: []u8, 156 | alignment: std.mem.Alignment, 157 | ret_addr: usize, 158 | ) void { 159 | const self: *Self = @ptrCast(@alignCast(ptr)); 160 | self.free(buf, alignment, ret_addr); 161 | } 162 | }.free, 163 | }, 164 | }; 165 | } 166 | 167 | pub fn validateAllocator(comptime T: type) void { 168 | comptime { 169 | for (std.meta.declarations(Interface(T).required)) |decl| { 170 | const E = @field(Interface(T).required, decl.name); 171 | if (!@hasDecl(T, decl.name)) { 172 | @compileError("type " ++ @typeName(T) ++ " must have declaration " ++ decl.name ++ 173 | " of type " ++ @typeName(T)); 174 | } 175 | const D = @TypeOf(@field(T, decl.name)); 176 | if (D != E) { 177 | @compileError("declaration " ++ decl.name ++ " in type " ++ @typeName(T) ++ 178 | " is expected to have type " ++ @typeName(E) ++ ", found " ++ @typeName(D)); 179 | } 180 | } 181 | 182 | for (std.meta.declarations(Interface(T).optional)) |decl| { 183 | if (@hasDecl(T, decl.name)) { 184 | const E = @field(Interface(T).optional, decl.name); 185 | const D = @TypeOf(@field(T, decl.name)); 186 | if (D != E) { 187 | @compileError("declaration " ++ decl.name ++ " in type " ++ @typeName(T) ++ 188 | " is expected to have type " ++ @typeName(E) ++ ", found " ++ @typeName(D)); 189 | } 190 | } 191 | } 192 | } 193 | } 194 | 195 | pub const Std = struct { 196 | a: std.mem.Allocator, 197 | 198 | pub fn alloc(self: *Std, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 { 199 | return self.a.rawAlloc(len, alignment, ret_addr); 200 | } 201 | 202 | pub fn resize( 203 | self: *Std, 204 | buf: []u8, 205 | alignment: std.mem.Alignment, 206 | new_len: usize, 207 | ret_addr: usize, 208 | ) bool { 209 | return self.a.rawResize(buf, alignment, new_len, ret_addr); 210 | } 211 | 212 | pub fn remap( 213 | self: *Std, 214 | buf: []u8, 215 | alignment: std.mem.Alignment, 216 | new_len: usize, 217 | ret_addr: usize, 218 | ) ?[*]u8 { 219 | return self.a.rawRemap(buf, alignment, new_len, ret_addr); 220 | } 221 | 222 | pub fn free(self: *Std, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 223 | return self.a.rawFree(buf, alignment, ret_addr); 224 | } 225 | }; 226 | 227 | pub const Null = struct { 228 | pub fn alloc(_: *Null, _: usize, _: std.mem.Alignment, _: usize) ?[*]u8 { 229 | return null; 230 | } 231 | 232 | pub fn resize(_: *Null, buf: []u8, _: std.mem.Alignment, new_len: usize, _: usize) bool { 233 | assert(buf.len == 0); 234 | return new_len == 0; 235 | } 236 | 237 | pub fn remap(_: *Null, buf: []u8, _: std.mem.Alignment, new_len: usize, _: usize) ?[*]u8 { 238 | assert(buf.len == 0); 239 | return if (new_len == 0) buf.ptr else null; 240 | } 241 | 242 | pub fn free(_: *Null, buf: []u8, _: std.mem.Alignment, _: usize) void { 243 | assert(buf.len == 0); 244 | } 245 | 246 | pub fn allocAll(_: *Null) ?[]u8 { 247 | return null; 248 | } 249 | 250 | pub fn freeAll(_: *Null) void {} 251 | }; 252 | 253 | pub const FixedBuffer = struct { 254 | buffer: []u8, 255 | len: usize, 256 | 257 | const Self = @This(); 258 | 259 | fn isLastAllocation(self: Self, buf: []u8) bool { 260 | return self.buffer.ptr + self.len == buf.ptr + buf.len; 261 | } 262 | 263 | pub fn alloc(self: *Self, len: usize, alignment: std.mem.Alignment, ret_addr: usize) ?[*]u8 { 264 | _ = ret_addr; 265 | const align_offset = std.mem.alignPointerOffset( 266 | self.buffer.ptr + self.len, 267 | alignment.toByteUnits(), 268 | ) orelse return null; 269 | const start_index = self.len + align_offset; 270 | const new_len = start_index + len; 271 | if (new_len > self.buffer.len) return null; 272 | self.len = new_len; 273 | return self.buffer.ptr + start_index; 274 | } 275 | 276 | pub fn resize( 277 | self: *Self, 278 | buf: []u8, 279 | alignment: std.mem.Alignment, 280 | new_len: usize, 281 | ret_addr: usize, 282 | ) bool { 283 | _ = alignment; 284 | _ = ret_addr; 285 | assert(self.owns(buf)); 286 | 287 | if (!self.isLastAllocation(buf)) { 288 | if (new_len > buf.len) return false; 289 | return true; 290 | } 291 | 292 | if (new_len <= buf.len) { 293 | self.len -= buf.len - new_len; 294 | return true; 295 | } 296 | 297 | const new_total_len = self.len + (new_len - buf.len); 298 | if (new_total_len > self.buffer.len) return false; 299 | self.len = new_total_len; 300 | return true; 301 | } 302 | 303 | pub fn remap( 304 | self: *Self, 305 | buf: []u8, 306 | alignment: std.mem.Alignment, 307 | new_len: usize, 308 | ret_addr: usize, 309 | ) ?[*]u8 { 310 | if (self.resize(buf, alignment, new_len, ret_addr)) return buf.ptr; 311 | return null; 312 | } 313 | 314 | pub fn free(self: *Self, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 315 | _ = alignment; 316 | _ = ret_addr; 317 | assert(self.owns(buf)); 318 | 319 | if (self.isLastAllocation(buf)) { 320 | self.len -= buf.len; 321 | } 322 | } 323 | 324 | pub fn owns(self: *Self, buf: []u8) bool { 325 | return sliceContainsSlice(self.buffer, buf); 326 | } 327 | 328 | pub fn allocAll(self: *Self) ?[]u8 { 329 | if (self.len == self.buffer.len) return null; 330 | self.len = self.buffer.len; 331 | return self.buffer[self.len..self.buffer.len]; 332 | } 333 | 334 | pub fn freeAll(self: *Self) void { 335 | self.len = 0; 336 | } 337 | }; 338 | 339 | pub fn Fallback(comptime PrimaryAllocator: type, comptime FallbackAllocator: type) type { 340 | comptime { 341 | validateAllocator(PrimaryAllocator); 342 | validateAllocator(FallbackAllocator); 343 | } 344 | 345 | return struct { 346 | primary: PrimaryAllocator, 347 | fallback: FallbackAllocator, 348 | 349 | const Self = @This(); 350 | 351 | pub fn alloc( 352 | self: *Self, 353 | len: usize, 354 | alignment: std.mem.Alignment, 355 | ret_addr: usize, 356 | ) ?[*]u8 { 357 | return self.primary.alloc(len, alignment, ret_addr) orelse { 358 | return self.fallback.alloc(len, alignment, ret_addr); 359 | }; 360 | } 361 | 362 | pub fn resize( 363 | self: *Self, 364 | buf: []u8, 365 | alignment: std.mem.Alignment, 366 | new_len: usize, 367 | ret_addr: usize, 368 | ) bool { 369 | if (self.primary.owns(buf)) { 370 | return self.primary.resize(buf, alignment, new_len, ret_addr); 371 | } else { 372 | return self.fallback.resize(buf, alignment, new_len, ret_addr); 373 | } 374 | } 375 | 376 | pub fn remap( 377 | self: *Self, 378 | buf: []u8, 379 | alignment: std.mem.Alignment, 380 | new_len: usize, 381 | ret_addr: usize, 382 | ) ?[*]u8 { 383 | if (self.primary.owns(buf)) { 384 | return self.primary.remap(buf, alignment, new_len, ret_addr); 385 | } else { 386 | return self.fallback.remap(buf, alignment, new_len, ret_addr); 387 | } 388 | } 389 | 390 | pub fn free(self: *Self, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 391 | if (self.primary.owns(buf)) { 392 | self.primary.free(buf, alignment, ret_addr); 393 | } else { 394 | self.fallback.free(buf, alignment, ret_addr); 395 | } 396 | } 397 | }; 398 | } 399 | 400 | fn sliceContainsSlice(slice: []u8, other: []u8) bool { 401 | return @intFromPtr(slice.ptr) <= @intFromPtr(other.ptr) and 402 | @intFromPtr(slice.ptr) + slice.len >= @intFromPtr(other.ptr) + other.len; 403 | } 404 | 405 | pub fn Stack(comptime capacity: usize) type { 406 | return struct { 407 | buffer: [capacity]u8 = undefined, 408 | len: usize, 409 | 410 | const Self = @This(); 411 | 412 | pub const init: Self = .{ .buffer = undefined, .len = 0 }; 413 | 414 | fn fixedBufferAllocator(self: *Self) FixedBuffer { 415 | return .{ .buffer = &self.buffer, .len = self.len }; 416 | } 417 | 418 | pub fn alloc( 419 | self: *Self, 420 | len: usize, 421 | alignment: std.mem.Alignment, 422 | ret_addr: usize, 423 | ) ?[*]u8 { 424 | var fba = self.fixedBufferAllocator(); 425 | defer self.len = fba.len; 426 | return fba.alloc(len, alignment, ret_addr); 427 | } 428 | 429 | pub fn resize( 430 | self: *Self, 431 | buf: []u8, 432 | alignment: std.mem.Alignment, 433 | new_len: usize, 434 | ret_addr: usize, 435 | ) bool { 436 | var fba = self.fixedBufferAllocator(); 437 | defer self.len = fba.len; 438 | return fba.resize(buf, alignment, new_len, ret_addr); 439 | } 440 | 441 | pub fn remap( 442 | self: *Self, 443 | buf: []u8, 444 | alignment: std.mem.Alignment, 445 | new_len: usize, 446 | ret_addr: usize, 447 | ) ?[*]u8 { 448 | var fba = self.fixedBufferAllocator(); 449 | defer self.len = fba.len; 450 | return fba.remap(buf, alignment, new_len, ret_addr); 451 | } 452 | 453 | pub fn free(self: *Self, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 454 | var fba = self.fixedBufferAllocator(); 455 | defer self.len = fba.len; 456 | fba.free(buf, alignment, ret_addr); 457 | } 458 | 459 | pub fn owns(self: *Self, buf: []u8) bool { 460 | var fba = self.fixedBufferAllocator(); 461 | return fba.owns(buf); 462 | } 463 | 464 | pub fn allocAll(self: *Self) ?[]u8 { 465 | var fba = self.fixedBufferAllocator(); 466 | defer self.len = fba.len; 467 | return fba.allocAll(); 468 | } 469 | 470 | pub fn freeAll(self: *Self) void { 471 | var fba = self.fixedBufferAllocator(); 472 | defer self.len = fba.len; 473 | fba.freeAll(); 474 | } 475 | }; 476 | } 477 | 478 | pub fn FreeList( 479 | comptime BackingAllocator: type, 480 | comptime block_size: usize, 481 | comptime alloc_count: usize, // number of blocks to allocate at a time, 482 | comptime max_list_size: ?usize, 483 | ) type { 484 | comptime { 485 | validateAllocator(BackingAllocator); 486 | } 487 | 488 | return struct { 489 | free_list: std.SinglyLinkedList, 490 | free_size: usize, 491 | backing_allocator: BackingAllocator, 492 | 493 | const Self = @This(); 494 | 495 | const Node = std.SinglyLinkedList.Node; 496 | const block_alignment: std.mem.Alignment = .fromByteUnits(block_size); 497 | comptime { 498 | assert(alloc_count != 0); 499 | assert(@sizeOf(Node) <= block_size); 500 | assert(block_alignment.compare(.gte, .of(Node))); 501 | } 502 | 503 | fn addBlocksToFreeList( 504 | self: *Self, 505 | blocks: []align(block_alignment.toByteUnits()) [block_size]u8, 506 | ) void { 507 | var i: usize = blocks.len; 508 | while (i > 0) : (i -= 1) { 509 | const node: *Node = @ptrCast(@alignCast(&blocks[i - 1])); 510 | self.free_list.prepend(node); 511 | } 512 | self.free_size += blocks.len; 513 | } 514 | 515 | pub fn alloc( 516 | self: *Self, 517 | len: usize, 518 | alignment: std.mem.Alignment, 519 | ret_addr: usize, 520 | ) ?[*]u8 { 521 | // TODO: should we check len < block_size and/or check if ptr_align == block_size 522 | // to try and avoid gcd calculation? 523 | 524 | // blocks are always aligned to block_size, so the requested alignment 525 | // must divide block_size 526 | assert(block_alignment.compare(.gte, alignment)); 527 | assert(len <= block_size); 528 | 529 | if (self.free_list.popFirst()) |node| { 530 | self.free_size -= 1; 531 | return @ptrCast(node); 532 | } 533 | 534 | if (alloc_count > 1 and 535 | if (max_list_size) |max| self.free_size + alloc_count - 1 < max else true) 536 | { 537 | const ptr = self.backing_allocator.alloc( 538 | block_size * alloc_count, 539 | block_alignment, 540 | ret_addr, 541 | ); 542 | const block_ptr: [*]align(block_alignment.toByteUnits()) [block_size]u8 = 543 | @ptrCast(@alignCast(ptr)); 544 | const blocks = block_ptr[1..alloc_count]; 545 | self.addBlocksToFreeList(blocks); 546 | return ptr; 547 | } else { 548 | return self.backing_allocator.alloc( 549 | block_size * alloc_count, 550 | block_alignment, 551 | ret_addr, 552 | ); 553 | } 554 | } 555 | 556 | pub fn resize( 557 | self: *Self, 558 | buf: []u8, 559 | alignment: std.mem.Alignment, 560 | new_len: usize, 561 | ret_addr: usize, 562 | ) bool { 563 | _ = self; 564 | _ = buf; 565 | _ = alignment; 566 | _ = ret_addr; 567 | return new_len <= block_size; 568 | } 569 | 570 | pub fn remap( 571 | self: *Self, 572 | buf: []u8, 573 | alignment: std.mem.Alignment, 574 | new_len: usize, 575 | ret_addr: usize, 576 | ) ?[*]u8 { 577 | return if (self.resize(buf, alignment, new_len, ret_addr)) buf.ptr else null; 578 | } 579 | 580 | pub fn free(self: *Self, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 581 | assert(block_alignment.compare(.gte, alignment)); 582 | 583 | if (max_list_size == null or self.free_size < max_list_size.?) { 584 | const node: *align(block_alignment.toByteUnits()) Node = @ptrCast(@alignCast(buf.ptr)); 585 | self.free_list.prepend(node); 586 | self.free_size += 1; 587 | return; 588 | } else { 589 | self.backing_allocator.free(buf, alignment, ret_addr); 590 | } 591 | } 592 | 593 | pub fn freeAll(self: *Self) void { 594 | while (self.free_list.popFirst()) |node| { 595 | const casted_ptr: *align(block_alignment.toByteUnits()) [block_size]u8 = 596 | @alignCast(@ptrCast(node)); 597 | self.backing_allocator.free(casted_ptr, block_alignment, @returnAddress()); 598 | } 599 | } 600 | }; 601 | } 602 | 603 | pub fn Segregated( 604 | comptime SmallAllocator: type, 605 | comptime LargeAllocator: type, 606 | comptime threshold: usize, // largest size to use the small allocator for 607 | ) type { 608 | comptime { 609 | validateAllocator(SmallAllocator); 610 | validateAllocator(LargeAllocator); 611 | } 612 | 613 | return struct { 614 | small_allocator: SmallAllocator, 615 | large_allocator: LargeAllocator, 616 | 617 | const Self = @This(); 618 | 619 | pub fn alloc( 620 | self: *Self, 621 | len: usize, 622 | alignment: std.mem.Alignment, 623 | ret_addr: usize, 624 | ) ?[*]u8 { 625 | return if (len <= threshold) 626 | self.small_allocator.alloc(len, alignment, ret_addr) 627 | else 628 | self.large_allocator.alloc(len, alignment, ret_addr); 629 | } 630 | 631 | pub fn resize( 632 | self: *Self, 633 | buf: []u8, 634 | alignment: std.mem.Alignment, 635 | new_len: usize, 636 | ret_addr: usize, 637 | ) bool { 638 | return if (buf.len <= threshold and new_len <= threshold) 639 | self.small_allocator.resize(buf, alignment, new_len, ret_addr) 640 | else if (buf.len > threshold and new_len > threshold) 641 | self.large_allocator.resize(buf, alignment, new_len, ret_addr) 642 | else 643 | false; 644 | } 645 | 646 | pub fn remap( 647 | self: *Self, 648 | buf: []u8, 649 | alignment: std.mem.Alignment, 650 | new_len: usize, 651 | ret_addr: usize, 652 | ) ?[*]u8 { 653 | return if (self.resize(buf, alignment, new_len, ret_addr)) buf.ptr else null; 654 | } 655 | 656 | pub fn free(self: *Self, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 657 | if (buf.len <= threshold) { 658 | self.small_allocator.free(buf, alignment, ret_addr); 659 | } else { 660 | self.large_allocator.free(buf, alignment, ret_addr); 661 | } 662 | } 663 | 664 | pub const owns = if (hasOptionalFunc(SmallAllocator, "owns") and 665 | hasOptionalFunc(LargeAllocator, "owns")) struct { 666 | fn owns(self: *Self, buf: []u8) bool { 667 | return if (buf.len <= threshold) 668 | self.small_allocator.owns(buf) 669 | else 670 | self.large_allocator.owns(buf); 671 | } 672 | }.owns else null; 673 | 674 | pub const freeAll = if (hasOptionalFunc(SmallAllocator, "freeAll") and 675 | hasOptionalFunc(LargeAllocator, "freeAll")) struct { 676 | pub fn freeAll(self: *Self) void { 677 | self.small_allocator.freeAll(); 678 | self.large_allocator.freeAll(); 679 | } 680 | }.freeAll else null; 681 | }; 682 | } 683 | 684 | fn hasOptionalFunc(comptime T: type, comptime name: []const u8) bool { 685 | return @hasDecl(T, name) and @TypeOf(@field(T, name)) != null; 686 | } 687 | 688 | test { 689 | const StackFallback = Fallback(Stack(1024), Std); 690 | const StackSegregatedFallback = Fallback( 691 | Stack(1024), 692 | Segregated( 693 | FreeList(FixedBuffer, 64, 2, null), 694 | Std, 695 | 64, 696 | ), 697 | ); 698 | comptime { 699 | validateAllocator(Null); 700 | validateAllocator(FixedBuffer); 701 | validateAllocator(StackFallback); 702 | validateAllocator(StackSegregatedFallback); 703 | } 704 | std.testing.refAllDecls(@This()); 705 | std.testing.refAllDecls(Fallback(FixedBuffer, Null)); 706 | std.testing.refAllDecls(Stack(1024)); 707 | std.testing.refAllDecls(FreeList(Stack(1024), 64, 1, null)); 708 | std.testing.refAllDecls(FreeList(Stack(1024), 8, 1, 32)); 709 | std.testing.refAllDecls(StackFallback); 710 | std.testing.refAllDecls(StackSegregatedFallback); 711 | 712 | var test_allocator: Fallback(Stack(1024), Std) = .{ 713 | .primary = .init, 714 | .fallback = .{ .a = std.testing.allocator }, 715 | }; 716 | const a = allocator(&test_allocator); 717 | 718 | try std.heap.testAllocator(a); // the first realloc makes the test fail 719 | try std.heap.testAllocatorAligned(a); 720 | try std.heap.testAllocatorAlignedShrink(a); 721 | try std.heap.testAllocatorLargeAlignment(a); 722 | } 723 | --------------------------------------------------------------------------------