├── .gitignore ├── LICENSE ├── README.md ├── build.zig ├── build.zig.zon └── src ├── SubAllocator.zig └── zuballoc.zig /.gitignore: -------------------------------------------------------------------------------- 1 | .zig-cache/ 2 | zig-out/ 3 | 4 | **/.DS_Store 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Justus Klausecker 4 | Copyright (c) 2023 Sebastian Aaltonen 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # zuballoc 2 | A hard realtime O(1) allocator for sub-allocating memory regions with minimal fragmentation. 3 | 4 | This implementation is based on the [OffsetAllocator by sebbbi](https://github.com/sebbbi/OffsetAllocator). 5 | 6 | Uses 256 bins following the 8-bit floating point distribution (3 bits mantissa, 5 bits exponent). 7 | 8 | The allocation metadata can optionally be stored outside of the sub-allocated memory region, making this allocator suitable for sub-allocating any resources, such as GPU heaps, buffers and arrays. 9 | 10 | The allocator is fully compatible with the Zig `Allocator` interface and provides a `.allocator()` function. 11 | 12 | ## Implementation 13 | 14 | The goal of this allocator is to provide consistent and predictable allocation speeds without any outliers to make it suitable for realtime applications with tight time constraints. 15 | 16 | This is achieved by using a two-stage bit set and bitwise operations (mainly count-trailing-zeroes) instead of loops to find free nodes that contain enough memory to fulfill an allocation request. 17 | 18 | The general architecture and implementation is based on the aforementioned [OffsetAllocator](https://github.com/sebbbi/OffsetAllocator) which is itself based on the [Two-Level Segregated Fit (TLSF) algorithm](https://www.researchgate.net/publication/4080369_TLSF_A_new_dynamic_memory_allocator_for_real-time_systems). 19 | 20 | TLSF was first developed in the early 2000s as a successor to the (much older) buddy allocator and its derivitives which often tend to produce a lot of memory fragmentation. [It has been shown](https://www.researchgate.net/publication/234785757_A_comparison_of_memory_allocators_for_real-time_applications) to provide the best balance between response time and fragmentation compared to other realtime allocation schemes. 21 | 22 | When the allocator recieves an allocation request, it will first calculate the appropriate bin for the given size. This is done by first converting the requested size to an 8-bit float (3-bit mantissa, 5-bit exponent) and then reinterpreting the result as an integer. This method results in 256 total bins whose size follows the (logarithmic) floating point distribution. The first 17 bins are exact bins which is very nice for efficiently handling small allocations, especially compared to other binning methods like power-of-two bins. 23 | 24 | Then it tries to pop a node from the free list of the calculated bin. If there is none, it will instead query the bitsets for the next lowest set bit, which indicates that there are free nodes available in that bin. They will be oversized, but they will fit the allocation. 25 | 26 | If there is any excess memory (because of an oversized bin or alignment) it will put back into the free list as a new node at the appropriate bin to combat internal fragmentation. 27 | 28 | The metadata for each node keeps track of its direct in-memory neighbors and will try to coalesce itself with them when it gets freed. 29 | 30 | All of this works without any loops which results in guaranteed O(1) time complexity. 31 | 32 | ## Usage 33 | 34 | ### Add this project to yours as a dependency: 35 | 36 | 1. Run this command: 37 | 38 | ```sh 39 | zig fetch --save git+https://github.com/Justus2308/zuballoc.git 40 | ``` 41 | 42 | 2. Import the module into your `build.zig` file 43 | 44 | ```zig 45 | const zuballoc_dependency = b.dependency("zuballoc", .{ 46 | .target = target, 47 | .optimize = optimize, 48 | }); 49 | exe.root_module.addImport("zuballoc", zuballoc_dependency.module("zuballoc")); 50 | ``` 51 | 52 | ### Use as a module in your code: 53 | 54 | ```zig 55 | const std = @import("std"); 56 | const zuballoc = @import("zuballoc"); 57 | 58 | pub fn main() !void { 59 | var buffer: [1024]u8 = undefined; 60 | var sub_allocator = try zuballoc.SubAllocator.init(std.heap.smp_allocator, &buffer, 256); 61 | defer sub_allocator.deinit(std.heap.smp_allocator); 62 | 63 | // Allocations with external metadata 64 | const slice_allocation = try sub_allocator.allocWithMetadata(u8, 116); 65 | defer sub_allocator.freeWithMetadata(slice_allocation); 66 | const slice: []u8 = slice_allocation.get(); 67 | 68 | const single_allocation = try sub_allocator.createWithMetadata(u64); 69 | defer sub_allocator.destroyWithMetadata(single_allocation); 70 | const item: *u64 = single_allocation.get(); 71 | 72 | // Allocations with intrusive metadata 73 | const allocator = sub_allocator.allocator(); 74 | 75 | const memory = try allocator.alloc(u16, 12); 76 | defer allocator.free(memory); 77 | 78 | var list = std.ArrayListUnmanaged(u32).empty; 79 | defer list.deinit(allocator); 80 | 81 | try list.append(allocator, 123); 82 | 83 | _ = .{ slice, item }; 84 | } 85 | ``` 86 | -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub fn build(b: *std.Build) void { 4 | const target = b.standardTargetOptions(.{}); 5 | const optimize = b.standardOptimizeOption(.{}); 6 | 7 | const zuballoc_mod = b.addModule("zuballoc", .{ 8 | .root_source_file = b.path("src/zuballoc.zig"), 9 | .target = target, 10 | .optimize = optimize, 11 | }); 12 | 13 | const unit_tests = b.addTest(.{ 14 | .root_module = zuballoc_mod, 15 | }); 16 | 17 | const run_unit_tests = b.addRunArtifact(unit_tests); 18 | 19 | const test_step = b.step("test", "Run unit tests"); 20 | test_step.dependOn(&run_unit_tests.step); 21 | } 22 | -------------------------------------------------------------------------------- /build.zig.zon: -------------------------------------------------------------------------------- 1 | .{ 2 | .name = .zuballoc, 3 | .version = "0.1.0", 4 | 5 | .fingerprint = 0x16f60641a697bb9, // Changing this has security and trust implications. 6 | 7 | .minimum_zig_version = "0.14.0", 8 | 9 | .paths = .{ 10 | "build.zig", 11 | "build.zig.zon", 12 | "src", 13 | "LICENSE", 14 | }, 15 | } 16 | -------------------------------------------------------------------------------- /src/SubAllocator.zig: -------------------------------------------------------------------------------- 1 | ptr: [*]u8, 2 | size: Size, 3 | free_storage: Size, 4 | 5 | used_bins_top: std.bit_set.IntegerBitSet(top_bin_count), 6 | used_bins: [top_bin_count]std.bit_set.IntegerBitSet(bins_per_leaf), 7 | bin_indices: [leaf_bin_count]Node.Index, 8 | 9 | nodes: Nodes, 10 | is_used: std.DynamicBitSetUnmanaged, 11 | free_nodes: Indices, 12 | free_offset: Node.Index, 13 | 14 | const top_bin_count = (1 << f8.exponent_bit_count); 15 | const bins_per_leaf = (1 << f8.mantissa_bit_count); 16 | const leaf_bin_count = (top_bin_count * bins_per_leaf); 17 | 18 | const Self = @This(); 19 | 20 | const Size = u32; 21 | const Log2Size = math.Log2Int(Size); 22 | 23 | const Node = struct { 24 | data_offset: Size, 25 | data_size: Size, 26 | bin_list_prev: Index, 27 | bin_list_next: Index, 28 | neighbor_prev: Index, 29 | neighbor_next: Index, 30 | 31 | pub const IndexType = u32; 32 | 33 | pub const Index = enum(IndexType) { 34 | unused = math.maxInt(IndexType), 35 | _, 36 | 37 | pub inline fn from(val: IndexType) Index { 38 | const index: Index = @enumFromInt(val); 39 | assert(index != .unused); 40 | return index; 41 | } 42 | pub inline fn asInt(index: Index) IndexType { 43 | assert(index != .unused); 44 | return @intFromEnum(index); 45 | } 46 | 47 | /// Allows wrapping from `.unused` to 0, but not incrementing to `.unused`. 48 | pub inline fn incr(index: *Index) void { 49 | const n = (@intFromEnum(index.*) +% 1); 50 | index.* = @enumFromInt(n); 51 | assert(index.* != .unused); 52 | } 53 | /// Allows wrapping to `.unused`, but not decrementing from `.unused`. 54 | pub inline fn decr(index: *Index) void { 55 | assert(index.* != .unused); 56 | const n = (@intFromEnum(index.*) -% 1); 57 | index.* = @enumFromInt(n); 58 | } 59 | }; 60 | pub const Log2Index = math.Log2Int(IndexType); 61 | }; 62 | 63 | const is_debug = (builtin.mode == .Debug); 64 | const is_safe = (is_debug or builtin.mode == .ReleaseSafe); 65 | 66 | // Make automatic OOB checks possible in safe builds 67 | const Nodes = if (is_safe) []Node else [*]Node; 68 | const Indices = if (is_safe) []Node.Index else [*]Node.Index; 69 | 70 | const f8 = packed struct(u8) { 71 | mantissa: u3, 72 | exponent: u5, 73 | 74 | pub const Mantissa = u3; 75 | pub const Exponent = u5; 76 | 77 | /// 0b111 78 | pub const max_mantissa = math.maxInt(Mantissa); 79 | /// 0b11111 80 | pub const max_exponent = math.maxInt(Exponent); 81 | 82 | pub const mantissa_bit_count = @as(comptime_int, @typeInfo(Mantissa).int.bits); 83 | pub const exponent_bit_count = @as(comptime_int, @typeInfo(Exponent).int.bits); 84 | 85 | pub inline fn asInt(float: f8) u8 { 86 | return @bitCast(float); 87 | } 88 | }; 89 | 90 | const FloatFromSizeMode = enum { floor, ceil }; 91 | fn floatFromSize(comptime mode: FloatFromSizeMode, size: Size) f8 { 92 | var float = @as(f8, @bitCast(@as(u8, 0))); 93 | 94 | if (size <= f8.max_mantissa) { 95 | float.mantissa = @truncate(size); 96 | } else { 97 | const leading_zeroes: Log2Size = @truncate(@clz(size)); 98 | const highest_set_bit = (math.maxInt(Log2Size) - leading_zeroes); 99 | 100 | const mantissa_start_bit = highest_set_bit - f8.mantissa_bit_count; 101 | float.exponent = @intCast(mantissa_start_bit + 1); 102 | float.mantissa = @truncate(size >> mantissa_start_bit); 103 | 104 | const low_bit_mask = (@as(Size, 1) << mantissa_start_bit) - 1; 105 | 106 | // Round up 107 | if (mode == .ceil and (size & low_bit_mask) != 0) { 108 | float.mantissa, const carry = @addWithOverflow(float.mantissa, 1); 109 | float.exponent += carry; 110 | } 111 | } 112 | 113 | return float; 114 | } 115 | 116 | fn sizeFromFloat(float: f8) Size { 117 | if (float.exponent == 0) { 118 | return float.mantissa; 119 | } else { 120 | return (@as(Size, float.mantissa) | (@as(Size, 1) << f8.mantissa_bit_count)) << (float.exponent - 1); 121 | } 122 | } 123 | 124 | fn findFirstSetAfter(mask: anytype, start_index: math.Log2Int(@TypeOf(mask))) ?math.Log2Int(@TypeOf(mask)) { 125 | const mask_before_start_index = (@as(@TypeOf(mask), 1) << start_index) - 1; 126 | const mask_after_start_index = ~mask_before_start_index; 127 | const bits_after = (mask & mask_after_start_index); 128 | if (bits_after == 0) { 129 | return null; 130 | } 131 | return @truncate(@ctz(bits_after)); 132 | } 133 | 134 | pub fn init(gpa: Allocator, buffer: []u8, max_alloc_count: Node.IndexType) Allocator.Error!Self { 135 | assert(max_alloc_count > 0); 136 | var self: Self = undefined; 137 | self.ptr = buffer.ptr; 138 | self.size = @intCast(buffer.len); 139 | const node_count = (max_alloc_count + 1); 140 | self.nodes = @ptrCast(try gpa.alloc(Node, node_count)); 141 | errdefer gpa.free(self.nodes[0..node_count]); 142 | self.free_nodes = @ptrCast(try gpa.alloc(Node.Index, node_count)); 143 | errdefer gpa.free(self.free_nodes[0..node_count]); 144 | self.is_used = try .initEmpty(gpa, node_count); 145 | errdefer self.is_used.deinit(gpa); 146 | self.reset(); 147 | return self; 148 | } 149 | 150 | pub fn deinit(self: *Self, gpa: Allocator) void { 151 | const node_count = (self.maxAllocCount() + 1); 152 | gpa.free(self.nodes[0..node_count]); 153 | gpa.free(self.free_nodes[0..node_count]); 154 | self.is_used.deinit(gpa); 155 | } 156 | 157 | pub fn reset(self: *Self) void { 158 | self.free_storage = 0; 159 | self.used_bins_top = .initEmpty(); 160 | self.free_offset = .from(@intCast(self.maxAllocCount())); 161 | 162 | @memset(&self.used_bins, .initEmpty()); 163 | @memset(&self.bin_indices, .unused); 164 | 165 | const node_count = (self.maxAllocCount() + 1); 166 | 167 | // Freelist is a stack. Nodes in inverse order so that [0] pops first. 168 | for (0..node_count) |i| { 169 | self.free_nodes[i] = .from(@intCast(self.maxAllocCount() - @as(u32, @intCast(i)))); 170 | } 171 | @memset(self.nodes[0..node_count], Node{ 172 | .data_offset = 0, 173 | .data_size = 0, 174 | .bin_list_prev = .unused, 175 | .bin_list_next = .unused, 176 | .neighbor_prev = .unused, 177 | .neighbor_next = .unused, 178 | }); 179 | self.is_used.unsetAll(); 180 | 181 | // Start state: Whole storage as one big node 182 | // Algorithm will split remainders and push them back as smaller nodes 183 | _ = self.insertNodeIntoBin(self.size, 0); 184 | } 185 | 186 | pub fn ownsPtr(self: Self, ptr: [*]const u8) bool { 187 | return sliceContainsPtr(self.ptr[0..self.size], ptr); 188 | } 189 | 190 | pub fn ownsSlice(self: Self, slice: []const u8) bool { 191 | return sliceContainsSlice(self.ptr[0..self.size], slice); 192 | } 193 | 194 | pub fn maxAllocCount(self: Self) u32 { 195 | return @intCast(self.is_used.bit_length - 1); 196 | } 197 | 198 | pub fn totalFreeSpace(self: Self) Size { 199 | if (self.free_offset == .unused) { 200 | return 0; 201 | } 202 | return self.free_storage; 203 | } 204 | 205 | pub fn largestFreeRegion(self: Self) Size { 206 | if (self.free_offset == .unused or self.used_bins_top.count() == 0) { 207 | return 0; 208 | } 209 | const top_bin_index = self.used_bins_top.findLastSet() orelse 0; 210 | const leaf_bin_index = self.used_bins[top_bin_index].findLastSet() orelse 0; 211 | const float = f8{ .mantissa = @intCast(leaf_bin_index), .exponent = @intCast(top_bin_index) }; 212 | const largest_free_region = sizeFromFloat(float); 213 | assert(self.free_storage >= largest_free_region); 214 | return largest_free_region; 215 | } 216 | 217 | pub const StorageReport = struct { 218 | free_regions: [leaf_bin_count]Region, 219 | 220 | pub const Region = struct { 221 | size: Size, 222 | count: u32, 223 | }; 224 | 225 | pub fn print(report: StorageReport) void { 226 | std.debug.print("===== STORAGE REPORT =====\n", .{}); 227 | for (report.free_regions, 0..) |free_region, i| { 228 | if (free_region.count > 0) { 229 | std.debug.print("[{d}] size={d},count={d}\n", .{ 230 | i, 231 | free_region.size, 232 | free_region.count, 233 | }); 234 | } 235 | } 236 | std.debug.print("==========================\n", .{}); 237 | } 238 | }; 239 | 240 | pub fn storageReport(self: Self) StorageReport { 241 | var report: StorageReport = .{ .free_regions = undefined }; 242 | for (0..leaf_bin_count) |i| { 243 | var count: u32 = 0; 244 | var node_index = self.bin_indices[i]; 245 | while (node_index != .unused) : (node_index = self.nodes[node_index.asInt()].bin_list_next) { 246 | count += 1; 247 | } 248 | report.free_regions[i] = .{ .size = sizeFromFloat(@bitCast(@as(u8, @intCast(i)))), .count = count }; 249 | } 250 | return report; 251 | } 252 | 253 | fn insertNodeIntoBin(self: *Self, size: Size, data_offset: Size) Node.Index { 254 | // Round down to bin index to ensure that bin >= alloc 255 | const bin_index = floatFromSize(.floor, size); 256 | 257 | const top_bin_index = bin_index.exponent; 258 | const leaf_bin_index = bin_index.mantissa; 259 | 260 | // Bin was empty before? 261 | if (self.bin_indices[bin_index.asInt()] == .unused) { 262 | // Set bin mask bits 263 | self.used_bins[top_bin_index].set(leaf_bin_index); 264 | self.used_bins_top.set(top_bin_index); 265 | } 266 | 267 | // Take a freelist node and insert on top of the bin linked list (next = old top) 268 | const top_node_index = self.bin_indices[bin_index.asInt()]; 269 | const node_index = self.free_nodes[self.free_offset.asInt()]; 270 | log.debug("Getting node {d} from freelist[{d}] (insertNodeIntoBin)", .{ node_index.asInt(), self.free_offset.asInt() }); 271 | self.free_offset.decr(); 272 | 273 | self.nodes[node_index.asInt()] = .{ 274 | .data_offset = data_offset, 275 | .data_size = size, 276 | .bin_list_prev = .unused, 277 | .bin_list_next = top_node_index, 278 | .neighbor_prev = .unused, 279 | .neighbor_next = .unused, 280 | }; 281 | if (top_node_index != .unused) { 282 | self.nodes[top_node_index.asInt()].bin_list_prev = node_index; 283 | } 284 | self.bin_indices[bin_index.asInt()] = node_index; 285 | 286 | self.free_storage += size; 287 | 288 | log.debug("Free storage: {d} (+{d}) (insertNodeIntoBin)", .{ self.free_storage, size }); 289 | 290 | return node_index; 291 | } 292 | 293 | fn removeNodeFromBin(self: *Self, node_index: Node.Index) void { 294 | const node: *Node = &self.nodes[node_index.asInt()]; 295 | 296 | if (node.bin_list_prev != .unused) { 297 | // Easy case: We have previous node. Just remove this node from the middle of the list. 298 | self.nodes[node.bin_list_prev.asInt()].bin_list_next = node.bin_list_next; 299 | if (node.bin_list_next != .unused) { 300 | self.nodes[node.bin_list_next.asInt()].bin_list_prev = node.bin_list_prev; 301 | } 302 | } else { 303 | // Hard case: We are the first node in a bin. Find the bin. 304 | 305 | // Round down to bin index to ensure that bin >= alloc 306 | const bin_index = floatFromSize(.floor, node.data_size); 307 | 308 | const top_bin_index = bin_index.exponent; 309 | const leaf_bin_index = bin_index.mantissa; 310 | 311 | self.bin_indices[bin_index.asInt()] = node.bin_list_next; 312 | if (node.bin_list_next != .unused) { 313 | self.nodes[node.bin_list_next.asInt()].bin_list_prev = .unused; 314 | } 315 | 316 | // Bin empty? 317 | if (self.bin_indices[bin_index.asInt()] == .unused) { 318 | // Remove a leaf bin mask bit 319 | self.used_bins[top_bin_index].unset(leaf_bin_index); 320 | 321 | // All leaf bins empty? 322 | if (self.used_bins[top_bin_index].mask == 0) { 323 | // Remove a top bin mask bit 324 | self.used_bins_top.unset(top_bin_index); 325 | } 326 | } 327 | 328 | // Insert the node to freelist 329 | self.free_offset.incr(); 330 | log.debug("Putting node {d} into freelist[{d}] (removeNodeFromBin)", .{ node_index.asInt(), self.free_offset.asInt() }); 331 | self.free_nodes[self.free_offset.asInt()] = node_index; 332 | 333 | self.free_storage -= node.data_size; 334 | 335 | log.debug("Free storage: {d} (-{d}) (removeNodeFromBin)", .{ self.free_storage, node.data_size }); 336 | } 337 | } 338 | 339 | pub const AllocationKind = enum { 340 | pointer, 341 | slice, 342 | }; 343 | pub const AllocationKindWithAlignment = union(AllocationKind) { 344 | pointer, 345 | slice: ?Alignment, 346 | 347 | pub fn selfAligned(kind: AllocationKind) AllocationKindWithAlignment { 348 | return switch (kind) { 349 | .pointer => .pointer, 350 | .slice => .{ .slice = null }, 351 | }; 352 | } 353 | }; 354 | pub fn Allocation(comptime T: type, comptime kind: AllocationKindWithAlignment) type { 355 | return switch (kind) { 356 | .pointer => struct { 357 | ptr: *T, 358 | metadata: InnerAllocation.Metadata, 359 | 360 | pub inline fn get(self: @This()) *T { 361 | return self.ptr; 362 | } 363 | 364 | pub inline fn slice(self: @This()) []T { 365 | return @as([*]T, @ptrCast(self.ptr))[0..1]; 366 | } 367 | 368 | pub fn toGeneric(self: @This()) GenericAllocation { 369 | return GenericAllocation{ 370 | .ptr = @ptrCast(self.ptr), 371 | .size = @sizeOf(T), 372 | .metadata = self.metadata, 373 | }; 374 | } 375 | }, 376 | .slice => |alignment| slice: { 377 | const alignment_in_bytes = if (alignment) |a| a.toByteUnits() else @alignOf(T); 378 | break :slice struct { 379 | ptr: [*]align(alignment_in_bytes) T, 380 | len: Size, 381 | metadata: InnerAllocation.Metadata, 382 | 383 | pub inline fn get(self: @This()) []align(alignment_in_bytes) T { 384 | return self.slice(); 385 | } 386 | 387 | pub inline fn slice(self: @This()) []align(alignment_in_bytes) T { 388 | return self.ptr[0..self.len]; 389 | } 390 | 391 | pub fn toGeneric(self: @This()) GenericAllocation { 392 | return GenericAllocation{ 393 | .ptr = @ptrCast(self.ptr), 394 | .size = (@sizeOf(T) * self.len), 395 | .metadata = self.metadata, 396 | }; 397 | } 398 | }; 399 | }, 400 | }; 401 | } 402 | 403 | /// Type-erased `Allocation` type for easier generic 404 | /// storage of different kinds of allocations. 405 | /// Passing this type to any `...WithMetadata` functions 406 | /// without casting first results in undefined behavior! 407 | pub const GenericAllocation = struct { 408 | ptr: [*]u8, 409 | size: Size, 410 | metadata: InnerAllocation.Metadata, 411 | 412 | pub const CastError = math.AlignCastError || error{InvalidSize}; 413 | 414 | pub fn cast(self: GenericAllocation, comptime T: type, comptime kind: AllocationKind) CastError!Allocation(T, .selfAligned(kind)) { 415 | return switch (kind) { 416 | .pointer => if (self.size != @sizeOf(T)) { 417 | return CastError.InvalidSize; 418 | } else .{ 419 | .ptr = @ptrCast(try math.alignCast(.of(T), self.ptr)), 420 | .metadata = self.metadata, 421 | }, 422 | .slice => self.castAligned(T, null), 423 | }; 424 | } 425 | pub fn castAligned(self: GenericAllocation, comptime T: type, comptime alignment: ?Alignment) CastError!Allocation(T, .{ .slice = alignment }) { 426 | return .{ 427 | .ptr = @ptrCast(try math.alignCast(alignment orelse .of(T), self.ptr)), 428 | .len = math.divExact(Size, self.size, @sizeOf(T)) catch return CastError.InvalidSize, 429 | .metadata = self.metadata, 430 | }; 431 | } 432 | 433 | pub fn raw(self: GenericAllocation) []u8 { 434 | return self.ptr[0..self.size]; 435 | } 436 | }; 437 | 438 | pub fn createWithMetadata(self: *Self, comptime T: type) Allocator.Error!Allocation(T, .pointer) { 439 | const size: Size = @sizeOf(T); 440 | const inner_allocation = self.innerAlloc(.external, size, .of(T)); 441 | if (inner_allocation.isOutOfMemory()) { 442 | return Allocator.Error.OutOfMemory; 443 | } 444 | const ptr: *T = @ptrCast(@alignCast(self.ptr[inner_allocation.offset..][0..@sizeOf(T)])); 445 | return .{ 446 | .ptr = ptr, 447 | .metadata = inner_allocation.metadata, 448 | }; 449 | } 450 | 451 | pub fn destroyWithMetadata(self: *Self, allocation: anytype) void { 452 | const ptr_info = @typeInfo(@FieldType(@TypeOf(allocation), "ptr")).pointer; 453 | if (ptr_info.size != .one) @compileError("allocation must be of type pointer"); 454 | 455 | assert(@inComptime() or self.ownsSlice(mem.asBytes(allocation.ptr))); 456 | self.innerFree(allocation.metadata); 457 | } 458 | 459 | pub fn allocWithMetadata(self: *Self, comptime T: type, n: Size) Allocator.Error!Allocation(T, .{ .slice = null }) { 460 | return self.alignedAllocWithMetadata(T, null, n); 461 | } 462 | 463 | pub fn alignedAllocWithMetadata( 464 | self: *Self, 465 | comptime T: type, 466 | comptime alignment: ?Alignment, 467 | n: Size, 468 | ) Allocator.Error!Allocation(T, .{ .slice = alignment }) { 469 | const alignment_resolved = alignment orelse Alignment.of(T); 470 | const size: Size = math.cast(Size, (n * @sizeOf(T))) orelse return Allocator.Error.OutOfMemory; 471 | const inner_allocation = self.innerAlloc(.external, size, alignment_resolved); 472 | if (inner_allocation.isOutOfMemory()) { 473 | return Allocator.Error.OutOfMemory; 474 | } 475 | const ptr: [*]align(alignment_resolved.toByteUnits()) T = @ptrCast(@alignCast(self.ptr[inner_allocation.offset..])); 476 | return .{ 477 | .ptr = ptr, 478 | .len = n, 479 | .metadata = inner_allocation.metadata, 480 | }; 481 | } 482 | 483 | pub fn resizeWithMetadata(self: *Self, allocation_ptr: anytype, new_len: Size) bool { 484 | const ptr_info = @typeInfo(@FieldType(std.meta.Child(@TypeOf(allocation_ptr)), "ptr")).pointer; 485 | if (ptr_info.size != .many) @compileError("allocation must be of type slice"); 486 | 487 | assert(@inComptime() or self.ownsSlice(mem.sliceAsBytes(allocation_ptr.ptr[0..allocation_ptr.len]))); 488 | 489 | const old_size = (allocation_ptr.len * @sizeOf(ptr_info.child)); 490 | const new_size = (new_len * @sizeOf(ptr_info.child)); 491 | const ok = self.innerResize(allocation_ptr.metadata, old_size, new_size); 492 | if (ok) { 493 | allocation_ptr.len = new_len; 494 | } 495 | return ok; 496 | } 497 | 498 | pub fn freeWithMetadata(self: *Self, allocation: anytype) void { 499 | const ptr_info = @typeInfo(@FieldType(@TypeOf(allocation), "ptr")).pointer; 500 | if (ptr_info.size != .many) @compileError("allocation must be of type slice"); 501 | 502 | assert(@inComptime() or self.ownsSlice(mem.sliceAsBytes(allocation.ptr[0..allocation.len]))); 503 | self.innerFree(allocation.metadata); 504 | } 505 | 506 | const InnerAllocation = struct { 507 | offset: Size, 508 | metadata: Metadata, 509 | 510 | pub const oom = InnerAllocation{ 511 | .offset = undefined, 512 | .metadata = .unused, 513 | }; 514 | 515 | pub const Metadata = Node.Index; 516 | 517 | pub inline fn isOutOfMemory(inner_allocation: InnerAllocation) bool { 518 | return (inner_allocation.metadata == .unused); 519 | } 520 | }; 521 | 522 | const MetadataKind = enum { external, embedded }; 523 | 524 | fn innerAlloc(self: *Self, comptime metadata_kind: MetadataKind, size: Size, alignment: Alignment) InnerAllocation { 525 | if (self.free_offset == .unused) { 526 | @branchHint(.unlikely); 527 | return .oom; 528 | } 529 | 530 | const effective_alignment = switch (metadata_kind) { 531 | .external => alignment, 532 | .embedded => Alignment.max(alignment, .of(InnerAllocation.Metadata)), 533 | }; 534 | const effective_alignment_in_bytes = math.cast(Size, effective_alignment.toByteUnits()) orelse { 535 | @branchHint(.cold); 536 | return .oom; 537 | }; 538 | const effective_size = switch (metadata_kind) { 539 | .external => size, 540 | .embedded => (effective_alignment_in_bytes + size), 541 | }; 542 | const size_alignable = (effective_size + effective_alignment_in_bytes - 1); 543 | 544 | // Round up to bin index to ensure that alloc >= bin 545 | // Gives us min bin index that fits the size 546 | const min_bin_index = floatFromSize(.ceil, size_alignable); 547 | 548 | const min_top_bin_index = min_bin_index.exponent; 549 | const min_leaf_bin_index = min_bin_index.mantissa; 550 | 551 | const top_bin_index: f8.Exponent, const leaf_bin_index: f8.Mantissa = indices: { 552 | // If top bin exists, scan its leaf bin. This can fail. 553 | if (self.used_bins_top.isSet(min_top_bin_index)) { 554 | if (findFirstSetAfter(self.used_bins[min_top_bin_index].mask, min_leaf_bin_index)) |leaf_bin_index| { 555 | break :indices .{ min_top_bin_index, leaf_bin_index }; 556 | } 557 | } 558 | 559 | // If we didn't find space in top bin, we search top bin from +1 560 | const top_bin_index = findFirstSetAfter(self.used_bins_top.mask, (min_top_bin_index + 1)) orelse { 561 | // OOM 562 | return .oom; 563 | }; 564 | 565 | // All leaf bins here fit the alloc, since the top bin was rounded up. Start leaf search from bit 0. 566 | // NOTE: This search can't fail since at least one leaf bit was set because the top bit was set. 567 | const leaf_bin_index: f8.Mantissa = @intCast(self.used_bins[top_bin_index].findFirstSet().?); 568 | 569 | break :indices .{ top_bin_index, leaf_bin_index }; 570 | }; 571 | 572 | const bin_index = f8.asInt(.{ 573 | .mantissa = leaf_bin_index, 574 | .exponent = top_bin_index, 575 | }); 576 | 577 | // Pop the top node of the bin. Bin top = node.next. 578 | // We also need to account for alignment by offsetting 579 | // into the actual allocation until we are aligned. 580 | const node_index = self.bin_indices[bin_index]; 581 | const node: *Node = &self.nodes[node_index.asInt()]; 582 | const node_total_size = node.data_size; 583 | 584 | const alignment_padding = padding: { 585 | const base_addr = (@intFromPtr(self.ptr) + node.data_offset); 586 | const aligned_addr = effective_alignment.forward(base_addr); 587 | const padding: Size = @intCast(aligned_addr - base_addr); 588 | if (metadata_kind == .embedded and padding < @sizeOf(InnerAllocation.Metadata)) { 589 | // Metadata does not fit into alignment padding yet 590 | break :padding (padding + effective_alignment_in_bytes); 591 | } else { 592 | break :padding padding; 593 | } 594 | }; 595 | const size_aligned = (alignment_padding + size); 596 | assert(size_aligned <= node_total_size); 597 | 598 | self.is_used.set(node_index.asInt()); 599 | self.bin_indices[bin_index] = node.bin_list_next; 600 | if (node.bin_list_next != .unused) { 601 | self.nodes[node.bin_list_next.asInt()].bin_list_prev = .unused; 602 | } 603 | self.free_storage -= node_total_size; 604 | 605 | log.debug("Free storage: {d} (-{d}) (alloc)", .{ self.free_storage, node_total_size }); 606 | 607 | // Bin empty? 608 | if (self.bin_indices[bin_index] == .unused) { 609 | // Remove a leaf bin mask bit 610 | self.used_bins[top_bin_index].unset(leaf_bin_index); 611 | 612 | // All leaf bins empty? 613 | if (self.used_bins[top_bin_index].mask == 0) { 614 | // Remove a top bin mask bit 615 | self.used_bins_top.unset(top_bin_index); 616 | } 617 | } 618 | 619 | const offset = (node.data_offset + alignment_padding); 620 | 621 | // Push back remainder N elements to a lower bin 622 | const remainder_size_rhs = (node_total_size - size_aligned); 623 | if (remainder_size_rhs > 0) { 624 | const new_node_index = self.insertNodeIntoBin(remainder_size_rhs, (node.data_offset + size_aligned)); 625 | node.data_size -= remainder_size_rhs; 626 | 627 | // Link nodes next to each other so that we can merge them later if both are free 628 | // And update the old next neighbor to point to the new node (in middle) 629 | if (node.neighbor_next != .unused) { 630 | self.nodes[node.neighbor_next.asInt()].neighbor_prev = new_node_index; 631 | self.nodes[new_node_index.asInt()].neighbor_next = node.neighbor_next; 632 | } 633 | self.nodes[new_node_index.asInt()].neighbor_prev = node_index; 634 | node.neighbor_next = new_node_index; 635 | } 636 | 637 | const remainder_size_lhs = switch (metadata_kind) { 638 | .external => alignment_padding, 639 | .embedded => (alignment_padding - @sizeOf(InnerAllocation.Metadata)), 640 | }; 641 | if (remainder_size_lhs > 0) { 642 | const new_node_index = self.insertNodeIntoBin(remainder_size_lhs, node.data_offset); 643 | node.data_offset += remainder_size_lhs; 644 | node.data_size -= remainder_size_lhs; 645 | 646 | // Link nodes next to each other so that we can merge them later if both are free 647 | // And update the old next neighbor to point to the new node (in middle) 648 | if (node.neighbor_prev != .unused) { 649 | self.nodes[node.neighbor_prev.asInt()].neighbor_next = new_node_index; 650 | self.nodes[new_node_index.asInt()].neighbor_prev = node.neighbor_prev; 651 | } 652 | self.nodes[new_node_index.asInt()].neighbor_next = node_index; 653 | node.neighbor_prev = new_node_index; 654 | } 655 | 656 | return InnerAllocation{ 657 | .offset = offset, 658 | .metadata = node_index, 659 | }; 660 | } 661 | 662 | fn innerResize(self: *Self, metadata: InnerAllocation.Metadata, old_size: Size, new_size: Size) bool { 663 | const node_index = metadata; 664 | 665 | assert(self.is_used.isSet(node_index.asInt())); 666 | const node: *Node = &self.nodes[node_index.asInt()]; 667 | 668 | const size_diff = (@as(i64, new_size) - @as(i64, old_size)); 669 | 670 | if (size_diff > 0) { 671 | if (node.neighbor_next == .unused or self.is_used.isSet(node.neighbor_next.asInt())) { 672 | return false; 673 | } 674 | const next_node: *Node = &self.nodes[node.neighbor_next.asInt()]; 675 | 676 | // Check if the neighbor node can fit the requested size 677 | if (next_node.data_size < @as(Size, @intCast(size_diff))) { 678 | return false; 679 | } 680 | log.debug("Resizing node {d} from {d} to {d} (resize)", .{ 681 | node_index.asInt(), 682 | node.data_size, 683 | (node.data_size + @as(Size, @intCast(size_diff))), 684 | }); 685 | node.data_size += @as(Size, @intCast(size_diff)); 686 | 687 | const remainder_size = (next_node.data_size - @as(Size, @intCast(size_diff))); 688 | const neighbor_next = next_node.neighbor_next; 689 | 690 | // Remove node from the bin linked list 691 | self.removeNodeFromBin(node.neighbor_next); 692 | 693 | if (remainder_size > 0) { 694 | const new_node_index = self.insertNodeIntoBin(remainder_size, (node.data_offset + node.data_size)); 695 | 696 | if (neighbor_next != .unused) { 697 | self.nodes[neighbor_next.asInt()].neighbor_prev = new_node_index; 698 | self.nodes[new_node_index.asInt()].neighbor_next = neighbor_next; 699 | } 700 | self.nodes[new_node_index.asInt()].neighbor_prev = node_index; 701 | node.neighbor_next = new_node_index; 702 | } 703 | 704 | assert(next_node.neighbor_prev == Node.Index.from(node_index.asInt())); 705 | node.neighbor_next = next_node.neighbor_next; 706 | } else if (size_diff < 0) { 707 | const remainder_size: Size = @intCast(-size_diff); 708 | log.debug("Resizing node {d} from {d} to {d} (resize)", .{ 709 | node_index.asInt(), 710 | node.data_size, 711 | (node.data_size - @as(Size, @intCast(-size_diff))), 712 | }); 713 | node.data_size -= remainder_size; 714 | 715 | const new_node_index = self.insertNodeIntoBin(remainder_size, (node.data_offset + node.data_size)); 716 | 717 | if (node.neighbor_next != .unused) { 718 | self.nodes[node.neighbor_next.asInt()].neighbor_prev = new_node_index; 719 | self.nodes[new_node_index.asInt()].neighbor_next = node.neighbor_next; 720 | } 721 | self.nodes[new_node_index.asInt()].neighbor_prev = node_index; 722 | node.neighbor_next = new_node_index; 723 | } 724 | 725 | return true; 726 | } 727 | 728 | fn innerFree(self: *Self, metadata: InnerAllocation.Metadata) void { 729 | const node_index = metadata; 730 | 731 | assert(self.is_used.isSet(node_index.asInt())); 732 | const node: *Node = &self.nodes[node_index.asInt()]; 733 | 734 | // Merge with neighbors... 735 | var offset = node.data_offset; 736 | var size = node.data_size; 737 | 738 | if (node.neighbor_prev != .unused and !self.is_used.isSet(node.neighbor_prev.asInt())) { 739 | // Previous (contiguous) free node: Change offset to previous node offset. Sum sizes 740 | const prev_node: *Node = &self.nodes[node.neighbor_prev.asInt()]; 741 | offset = prev_node.data_offset; 742 | size += prev_node.data_size; 743 | 744 | // Remove node from the bin linked list and put it in the freelist 745 | self.removeNodeFromBin(node.neighbor_prev); 746 | 747 | assert(prev_node.neighbor_next == Node.Index.from(node_index.asInt())); 748 | node.neighbor_prev = prev_node.neighbor_prev; 749 | } 750 | 751 | if (node.neighbor_next != .unused and self.is_used.isSet(node.neighbor_next.asInt()) == false) { 752 | // Next (contiguous) free node: Offset remains the same. Sum sizes. 753 | const next_node: *Node = &self.nodes[node.neighbor_next.asInt()]; 754 | size += next_node.data_size; 755 | 756 | // Remove node from the bin linked list and put it in the freelist 757 | self.removeNodeFromBin(node.neighbor_next); 758 | 759 | assert(next_node.neighbor_prev == Node.Index.from(node_index.asInt())); 760 | node.neighbor_next = next_node.neighbor_next; 761 | } 762 | 763 | const neighbor_next = node.neighbor_next; 764 | const neighbor_prev = node.neighbor_prev; 765 | 766 | // Insert the removed node to freelist 767 | self.free_offset.incr(); 768 | log.debug("Putting node {d} into freelist[{d}] (free)", .{ node_index.asInt(), self.free_offset.asInt() }); 769 | self.free_nodes[self.free_offset.asInt()] = node_index; 770 | 771 | // Insert the (combined) free node to bin 772 | const combined_node_index = self.insertNodeIntoBin(size, offset); 773 | 774 | // Connect neighbors with the new combined node 775 | if (neighbor_next != .unused) { 776 | self.nodes[combined_node_index.asInt()].neighbor_next = neighbor_next; 777 | self.nodes[neighbor_next.asInt()].neighbor_prev = combined_node_index; 778 | } 779 | if (neighbor_prev != .unused) { 780 | self.nodes[combined_node_index.asInt()].neighbor_prev = neighbor_prev; 781 | self.nodes[neighbor_prev.asInt()].neighbor_next = combined_node_index; 782 | } 783 | } 784 | 785 | /// To conform to Zig's `Allocator` interface this allocator uses 786 | /// embedded metadata and might not be suitable for some use cases. 787 | /// If you need externally stored metadata, use the `...WithMetadata` 788 | /// functions this type provides. 789 | /// Note that the effective size of all allocations with embedded 790 | /// metadata will be at least `@sizeOf(Node.Index) + alloc_size`. 791 | pub fn allocator(self: *Self) Allocator { 792 | return Allocator{ 793 | .ptr = @ptrCast(self), 794 | .vtable = &.{ 795 | .alloc = alloc, 796 | .resize = resize, 797 | .remap = Allocator.noRemap, 798 | .free = free, 799 | }, 800 | }; 801 | } 802 | 803 | fn alloc(context: *anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 { 804 | _ = ret_addr; 805 | 806 | const size = math.cast(Size, len) orelse { 807 | @branchHint(.cold); 808 | return null; 809 | }; 810 | 811 | const self: *Self = @ptrCast(@alignCast(context)); 812 | const inner_allocation = self.innerAlloc(.embedded, size, alignment); 813 | if (inner_allocation.isOutOfMemory()) { 814 | @branchHint(.unlikely); 815 | return null; 816 | } 817 | 818 | const ptr = self.ptr[inner_allocation.offset..]; 819 | const metadata_ptr: *InnerAllocation.Metadata = 820 | @ptrCast(@alignCast(self.ptr[(inner_allocation.offset - @sizeOf(InnerAllocation.Metadata))..][0..@sizeOf(InnerAllocation.Metadata)])); 821 | metadata_ptr.* = inner_allocation.metadata; 822 | 823 | return ptr; 824 | } 825 | 826 | fn resize(context: *anyopaque, memory: []u8, alignment: Alignment, new_len: usize, ret_addr: usize) bool { 827 | _ = .{ alignment, ret_addr }; 828 | 829 | const old_size: Size = @intCast(memory.len); 830 | const new_size = math.cast(Size, new_len) orelse { 831 | @branchHint(.cold); 832 | return false; 833 | }; 834 | 835 | const self: *Self = @ptrCast(@alignCast(context)); 836 | 837 | assert(memory.len >= @sizeOf(InnerAllocation.Metadata)); 838 | assert(self.ownsSlice(memory)); 839 | 840 | const metadata_ptr: *InnerAllocation.Metadata = @ptrFromInt(@intFromPtr(memory.ptr) - @sizeOf(InnerAllocation.Metadata)); 841 | assert(self.ownsPtr(@ptrCast(metadata_ptr))); 842 | const metadata = metadata_ptr.*; 843 | assert(metadata != .unused); 844 | 845 | return self.innerResize(metadata, old_size, new_size); 846 | } 847 | 848 | fn free(context: *anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void { 849 | _ = .{ alignment, ret_addr }; 850 | 851 | const self: *Self = @ptrCast(@alignCast(context)); 852 | 853 | assert(memory.len >= @sizeOf(InnerAllocation.Metadata)); 854 | assert(self.ownsSlice(memory)); 855 | 856 | const metadata_ptr: *InnerAllocation.Metadata = @ptrFromInt(@intFromPtr(memory.ptr) - @sizeOf(InnerAllocation.Metadata)); 857 | assert(self.ownsPtr(@ptrCast(metadata_ptr))); 858 | const metadata = metadata_ptr.*; 859 | assert(metadata != .unused); 860 | 861 | self.innerFree(metadata); 862 | } 863 | 864 | fn sliceContainsPtr(container: []const u8, ptr: [*]const u8) bool { 865 | return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and 866 | @intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len); 867 | } 868 | 869 | fn sliceContainsSlice(container: []const u8, slice: []const u8) bool { 870 | return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and 871 | (@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len); 872 | } 873 | 874 | comptime { 875 | if (mem.byte_size_in_bits != 8) { 876 | @compileError("this allocator depends on byte size being 8 bits"); 877 | } 878 | } 879 | 880 | const builtin = @import("builtin"); 881 | const std = @import("std"); 882 | const math = std.math; 883 | const mem = std.mem; 884 | const testing = std.testing; 885 | const Alignment = mem.Alignment; 886 | const Allocator = mem.Allocator; 887 | const assert = std.debug.assert; 888 | const log = std.log.scoped(.suballocator); 889 | 890 | // TESTS 891 | 892 | const testing_log_level = std.log.Level.debug; 893 | 894 | fn testFloatSizeConversion(size: Size, size_floor: Size, size_ceil: Size) !void { 895 | const floored = floatFromSize(.floor, size); 896 | const ceiled = floatFromSize(.ceil, size); 897 | try testing.expect(sizeFromFloat(floored) == size_floor); 898 | try testing.expect(sizeFromFloat(ceiled) == size_ceil); 899 | } 900 | 901 | test "f8<->Size conversion" { 902 | // From known bins 903 | try testing.expect(sizeFromFloat(@bitCast(@as(u8, 3))) == 3); 904 | try testing.expect(sizeFromFloat(@bitCast(@as(u8, 92))) == 12288); 905 | try testing.expect(sizeFromFloat(@bitCast(@as(u8, 211))) == 369098752); 906 | 907 | // To known bins 908 | try testing.expect(floatFromSize(.floor, 3).asInt() == 3); 909 | try testing.expect(floatFromSize(.floor, 11688920).asInt() == 171); 910 | try testing.expect(floatFromSize(.ceil, 11688920).asInt() == 172); 911 | 912 | // Exact 913 | for (0..17) |i| { 914 | const size: Size = @intCast(i); 915 | try testFloatSizeConversion(size, size, size); 916 | } 917 | try testFloatSizeConversion(180224, 180224, 180224); 918 | try testFloatSizeConversion(2952790016, 2952790016, 2952790016); 919 | 920 | // Between bins 921 | try testFloatSizeConversion(19, 18, 20); 922 | try testFloatSizeConversion(21267, 20480, 22528); 923 | try testFloatSizeConversion(24678495, 23068672, 25165824); 924 | } 925 | 926 | test findFirstSetAfter { 927 | const n: u8 = 0b0001_0010; 928 | 929 | const b1 = findFirstSetAfter(n, 0); 930 | const b2 = findFirstSetAfter(n, 3); 931 | const b3 = findFirstSetAfter(n, 4); 932 | const b4 = findFirstSetAfter(n, 6); 933 | 934 | try testing.expect(b1 != null and b1.? == 1); 935 | try testing.expect(b2 != null and b2.? == 4); 936 | try testing.expect(b3 != null and b3.? == 4); 937 | try testing.expect(b4 == null); 938 | } 939 | 940 | var test_memory: [800000 * @sizeOf(u64)]u8 = undefined; 941 | const test_max_allocs = 800000; 942 | 943 | test "basic usage with external metadata" { 944 | testing.log_level = testing_log_level; 945 | 946 | var self = try Self.init(testing.allocator, &test_memory, test_max_allocs); 947 | defer self.deinit(testing.allocator); 948 | 949 | const ptr = try self.createWithMetadata(u16); 950 | defer self.destroyWithMetadata(ptr); 951 | 952 | try testing.expect(self.totalFreeSpace() == (test_memory.len - @sizeOf(u16))); 953 | 954 | const slice = try self.allocWithMetadata(u16, 3); 955 | defer self.freeWithMetadata(slice); 956 | 957 | try testing.expect(slice.len == 3); 958 | try testing.expect(self.totalFreeSpace() == (test_memory.len - (4 * @sizeOf(u16)))); 959 | 960 | const big_slice = try self.allocWithMetadata(u16, 600000); 961 | defer self.freeWithMetadata(big_slice); 962 | 963 | try testing.expect(big_slice.len == 600000); 964 | try testing.expect(self.totalFreeSpace() == (test_memory.len - (600004 * @sizeOf(u16)))); 965 | 966 | ptr.get().* = 0xBABA; 967 | try testing.expect(ptr.ptr.* == 0xBABA); 968 | 969 | slice.get()[0] = 0xBABA; 970 | try testing.expect(slice.slice()[0] == 0xBABA); 971 | } 972 | 973 | test "basic usage with embedded metadata" { 974 | testing.log_level = testing_log_level; 975 | 976 | var self = try Self.init(testing.allocator, &test_memory, test_max_allocs); 977 | defer self.deinit(testing.allocator); 978 | 979 | const a = self.allocator(); 980 | 981 | try std.heap.testAllocator(a); 982 | try std.heap.testAllocatorAligned(a); 983 | try std.heap.testAllocatorLargeAlignment(a); 984 | try std.heap.testAllocatorAlignedShrink(a); 985 | } 986 | 987 | test "skewed allocation parameters" { 988 | testing.log_level = testing_log_level; 989 | 990 | const buffer = try testing.allocator.alignedAlloc(u8, .@"16", 1001); 991 | defer testing.allocator.free(buffer); 992 | 993 | // 1000 bytes capacity 994 | var self = try Self.init(testing.allocator, buffer[1..buffer.len], 32); 995 | defer self.deinit(testing.allocator); 996 | 997 | // Since buffer is 15 bytes out of alignment padding is required. 998 | // This padding should be released back to the allocator as a node. 999 | const a1 = try self.alignedAllocWithMetadata(u8, .@"16", 500); 1000 | defer self.freeWithMetadata(a1); 1001 | 1002 | try testing.expect(self.totalFreeSpace() == 500); 1003 | 1004 | // Should result in two nodes with 15 and 485 bytes respectively. 1005 | // We cannot allocate the full 485 bits here because of fragmentation. 1006 | const a2 = try self.allocWithMetadata(u8, 460); 1007 | defer self.freeWithMetadata(a2); 1008 | 1009 | const a3 = try self.allocWithMetadata(u8, 15); 1010 | defer self.freeWithMetadata(a3); 1011 | 1012 | try testing.expect(self.totalFreeSpace() == 25); 1013 | } 1014 | 1015 | test "aligned allocations" { 1016 | testing.log_level = testing_log_level; 1017 | 1018 | var self = try Self.init(testing.allocator, &test_memory, test_max_allocs); 1019 | defer self.deinit(testing.allocator); 1020 | 1021 | const max_align_pow2 = 16; 1022 | 1023 | var allocations: [max_align_pow2]GenericAllocation = undefined; 1024 | inline for (0..max_align_pow2) |i| { 1025 | const alignment: Alignment = @enumFromInt(i); 1026 | const allocation = try self.alignedAllocWithMetadata(u8, alignment, 32); 1027 | allocations[i] = allocation.toGeneric(); 1028 | } 1029 | inline for (0..max_align_pow2) |i| { 1030 | const allocation = try allocations[i].castAligned(u8, @enumFromInt(i)); 1031 | self.freeWithMetadata(allocation); 1032 | } 1033 | } 1034 | 1035 | test "resize" { 1036 | testing.log_level = testing_log_level; 1037 | 1038 | var self = try Self.init(testing.allocator, &test_memory, test_max_allocs); 1039 | defer self.deinit(testing.allocator); 1040 | 1041 | const a1 = try self.allocWithMetadata(u8, 12); 1042 | defer self.freeWithMetadata(a1); 1043 | 1044 | const a2 = try self.allocWithMetadata(u8, 12); 1045 | 1046 | var a3 = try self.allocWithMetadata(u8, 12); 1047 | defer self.freeWithMetadata(a3); 1048 | 1049 | try testing.expect(self.totalFreeSpace() == (test_memory.len - 12 - 12 - 12)); 1050 | 1051 | self.freeWithMetadata(a2); 1052 | 1053 | try testing.expect(self.totalFreeSpace() == (test_memory.len - 12 - 12)); 1054 | 1055 | const ok = self.resizeWithMetadata(&a3, 20); 1056 | try testing.expect(ok); 1057 | 1058 | try testing.expect(self.totalFreeSpace() == (test_memory.len - 12 - 20)); 1059 | } 1060 | 1061 | test reset { 1062 | testing.log_level = testing_log_level; 1063 | 1064 | var buffer: [8]u8 = undefined; 1065 | var self = try Self.init(testing.allocator, @ptrCast(&buffer), 32); 1066 | defer self.deinit(testing.allocator); 1067 | 1068 | var allocation = try self.allocWithMetadata(u8, 8); 1069 | defer self.freeWithMetadata(allocation); 1070 | 1071 | try testing.expectError(Allocator.Error.OutOfMemory, self.allocWithMetadata(u8, 1)); 1072 | 1073 | self.reset(); 1074 | 1075 | allocation = try self.allocWithMetadata(u8, 8); 1076 | } 1077 | 1078 | test GenericAllocation { 1079 | testing.log_level = testing_log_level; 1080 | 1081 | var self = try Self.init(testing.allocator, &test_memory, 256); 1082 | defer self.deinit(testing.allocator); 1083 | 1084 | const a1 = try self.createWithMetadata(u64); 1085 | const a2 = try self.allocWithMetadata(u32, 128); 1086 | const a3 = try self.alignedAllocWithMetadata(u16, .@"8", 32); 1087 | 1088 | const g1 = a1.toGeneric(); 1089 | const g2 = a2.toGeneric(); 1090 | const g3 = a3.toGeneric(); 1091 | 1092 | try testing.expect(self.ownsSlice(g1.raw())); 1093 | try testing.expect(self.ownsSlice(g2.raw())); 1094 | try testing.expect(self.ownsSlice(g3.raw())); 1095 | 1096 | try testing.expect(g1.size == @sizeOf(u64)); 1097 | try testing.expect(g2.size == (a2.len * @sizeOf(u32))); 1098 | try testing.expect(g3.size == (a3.len * @sizeOf(u16))); 1099 | 1100 | const c1 = try g1.cast(u64, .pointer); 1101 | const c2 = try g2.cast(u32, .slice); 1102 | const c3 = try g3.castAligned(u16, .@"8"); 1103 | 1104 | try testing.expectEqualDeep(c1, a1); 1105 | try testing.expectEqualDeep(c2, a2); 1106 | try testing.expectEqualDeep(c3, a3); 1107 | } 1108 | -------------------------------------------------------------------------------- /src/zuballoc.zig: -------------------------------------------------------------------------------- 1 | pub const SubAllocator = @import("SubAllocator.zig"); 2 | 3 | test { 4 | @import("std").testing.refAllDecls(SubAllocator); 5 | } 6 | --------------------------------------------------------------------------------