├── .github └── FUNDING.yml ├── .gitignore ├── LICENSE ├── README.md ├── gpda.zig └── test ├── double-free.zig ├── fuzz.zig ├── invalid-free.zig ├── leak.zig └── use-after-free.zig /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [andrewrk] 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | zig-cache/ 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (Expat) 2 | 3 | Copyright (c) 2019 Andrew Kelley 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # This Project Has Been Merged Upstream 2 | 3 | This code was integrated into the Zig Standard Library in 4 | [Pull Request #5998](https://github.com/ziglang/zig/pull/5998). 5 | 6 | **This repository is no longer maintained.** 7 | 8 | # GeneralPurposeDebugAllocator 9 | 10 | This is the code for [my Zig Live Coding Stream](https://www.twitch.tv/andrewrok). 11 | 12 | This is a work-in-progress general purpose allocator intended to be eventually merged 13 | into the [Zig](https://ziglang.org/) standard library, with the focus on these goals: 14 | 15 | * Detect double free, and print stack trace of: 16 | - Where it was first allocated 17 | - Where it was freed the first time 18 | - Where it was freed the second time 19 | 20 | * Detect leaks and print stack trace of: 21 | - Where it was allocated 22 | 23 | * When a page of memory is no longer needed, give it back to resident memory, 24 | but keep it mapped with no permissions (read/write/exec) so that it causes 25 | page faults when used. 26 | 27 | * Make pointer math errors unlikely to harm memory from 28 | unrelated allocations 29 | 30 | * It's OK for these mechanisms to cost some extra bytes and for 31 | memory to become a little fragmented. 32 | 33 | * OK for performance cost for these mechanisms. 34 | 35 | * Rogue memory writes should not harm the allocator's state. 36 | 37 | * Cross platform. Allowed to take advatage of a specific operating system's 38 | features, but should work everywhere, even freestanding, by wrapping an 39 | existing allocator. 40 | 41 | * Compile-time configuration, including: 42 | - Whether the allocatior is to be thread-safe. If thread-safety is disabled, 43 | then the debug allocator will detect invalid thread usage with it. 44 | - How many stack frames to collect. 45 | 46 | ## Goals for Other General Purpose Allocators But Not This One 47 | 48 | ReleaseFast and ReleaseSmall Modes: 49 | 50 | * Low fragmentation is primary concern 51 | * Performance of worst-case latency is secondary concern 52 | * Performance of average-case latency is next 53 | * Finally, having freed memory unmapped, and pointer math errors unlikely to 54 | harm memory from unrelated allocations are nice-to-haves. 55 | 56 | ReleaseSafe Mode: 57 | 58 | * Low fragmentation is primary concern 59 | * All the safety mechanisms from Debug Mode are the next concern. 60 | * It's OK for these mechanisms to take up some percent overhead 61 | of memory, but not at the cost of fragmentation, which can cause 62 | the equivalent of memory leaks. 63 | 64 | ## Current Status 65 | 66 | * POSIX-only so far. 67 | * Most basic functionality works. See Roadmap below for what's left to do. 68 | * Not well tested yet. 69 | 70 | Memory leak detection: 71 | 72 | ![](https://i.imgur.com/KufxrKm.png) 73 | 74 | Double free detection: 75 | 76 | ![](https://i.imgur.com/5M5xS95.png) 77 | 78 | ### Current Design 79 | 80 | Small allocations are divided into buckets: 81 | 82 | ``` 83 | index obj_size 84 | 0 1 85 | 1 2 86 | 2 4 87 | 3 8 88 | 4 16 89 | 5 32 90 | 6 64 91 | 7 128 92 | 8 256 93 | 9 512 94 | 10 1024 95 | 11 2048 96 | ``` 97 | 98 | The main allocator state has an array of all the "current" buckets for each 99 | size class. Each slot in the array can be null, meaning the bucket for that 100 | size class is not allocated. When the first object is allocated for a given 101 | size class, it allocates 1 page of memory from the OS. This page is 102 | divided into "slots" - one per allocated object. Along with the page of memory 103 | for object slots, as many pages as necessary are allocated to store the 104 | BucketHeader, followed by "used bits", and two stack traces for each slot 105 | (allocation trace and free trace). 106 | 107 | The "used bits" are 1 bit per slot representing whether the slot is used. 108 | Allocations use the data to iterate to find a free slot. Frees assert that the 109 | corresponding bit is 1 and set it to 0. 110 | 111 | The memory for the allocator goes on its own page, with no write permissions. 112 | On call to reallocFn and shrinkFn, the allocator uses mprotect to make its own state 113 | writable, and then removes write permissions before returning. However bucket 114 | metadata is not protected in this way yet. 115 | 116 | Buckets have prev and next pointers. When there is only one bucket for a given 117 | size class, both prev and next point to itself. When all slots of a bucket are 118 | used, a new bucket is allocated, and enters the doubly linked list. The main 119 | allocator state tracks the "current" bucket for each size class. Leak detection 120 | currently only checks the current bucket. 121 | 122 | Reallocation detects if the size class is unchanged, in which case the same 123 | pointer is returned unmodified. If a different size class is required, the 124 | allocator attempts to allocate a new slot, copy the bytes, and then free the 125 | old slot. 126 | 127 | Large objects are allocated directly using `mmap` and their metadata is stored 128 | in a `std.HashMap` backed by a simple direct allocator. The hash map's data 129 | is memory protected with the same strategy as the allocator's state. 130 | 131 | ## Roadmap 132 | 133 | * Port to Windows 134 | - Make sure that use after free tests work. 135 | * Test mmap hints on other platforms: 136 | - macOS 137 | - FreeBSD 138 | * Ability to print stats 139 | - Requested Bytes Allocated (total of n for every alloc minus n for every free) 140 | - Resident Bytes (pagesize * number of pages mmapped for slots) 141 | - Overhead Bytes (how much memory the allocator state is using) 142 | * Validation fuzz testing 143 | - vary the size and alignment of allocations 144 | - vary the number of and kind of operations in between allocations and 145 | corresponding frees 146 | - vary whether or not the backing allocator succeeds 147 | - how much memory capacity it goes up to 148 | * When allocating new pages for small objects, if virtual address space is 149 | exhausted, fall back to using the oldest freed memory, whether that be 150 | unused pages, or freed slots. 151 | * When falling back to old unused pages, if we get an error from the OS from 152 | reactivating the page, then fall back to a freed slot. 153 | * Implement handling of multiple threads. 154 | * On invalid free, print nearest allocation/deallocation stack trace 155 | * Do the memory protection for bucket metadata too 156 | * Catch the error when wrong size or wrong alignment is given to free or realloc/shrink. 157 | * Performance benchmarking 158 | - Do we need meta-buckets? 159 | * Iterate over usize instead of u8 for used bits 160 | * When configured to be non-thread-safe, then detect usage with multiple threads, 161 | and print stack traces showing where it was used in each thread. 162 | * Write unit tests / regression tests 163 | * Make `std.HashMap` return bytes back to the allocator when the hash map gets 164 | smaller. 165 | * Make deallocated but still mapped bytes be `0xdd`. 166 | * Detect when client uses the wrong `old_align` or `old_mem.len`. 167 | * Keep track of first allocation stack trace as well as reallocation stack trace 168 | for large objects. 169 | * Test whether it is an improvement to try to use an mmap hint when growing 170 | a large object and it has to mmap more. 171 | * Once a bucket becomes full, remove it from the linked list of buckets that are 172 | used to find allocation slots. 173 | -------------------------------------------------------------------------------- /gpda.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const os = std.os; 3 | const builtin = @import("builtin"); 4 | const assert = std.debug.assert; 5 | const Allocator = std.mem.Allocator; 6 | const page_size = std.mem.page_size; 7 | 8 | /// Integer type for pointing to slots in a small allocation 9 | const SlotIndex = @IntType(false, std.math.log2(page_size) + 1); 10 | 11 | pub const Config = struct { 12 | /// Number of stack frames to capture. 13 | stack_trace_frames: usize = 4, 14 | 15 | /// Whether the allocator is configured to accept a backing 16 | /// allocator used for the underlying memory. 17 | /// false means it will make syscalls directly, and 18 | /// the create() function takes no arguments. 19 | /// If this is set to true, create() takes a *Allocator parameter. 20 | backing_allocator: bool = false, 21 | 22 | /// Whether to use mprotect to take away write permission 23 | /// from allocator internal state to prevent allocator state 24 | /// corruption. Enabling this catches bugs but is slower. 25 | memory_protection: bool = true, 26 | }; 27 | 28 | pub fn GeneralPurposeDebugAllocator(comptime config: Config) type { 29 | return struct { 30 | allocator: Allocator, 31 | backing_allocator: BackingAllocator, 32 | buckets: [small_bucket_count]?*BucketHeader, 33 | simple_allocator: SimpleAllocatorType, 34 | large_allocations: LargeAllocTable, 35 | 36 | total_requested_bytes: usize, 37 | requested_memory_limit: usize, 38 | 39 | comptime { 40 | if (config.backing_allocator and config.memory_protection) { 41 | @compileError("Memory protection is unavailable when using a backing allocator"); 42 | } 43 | } 44 | 45 | const Self = @This(); 46 | 47 | const BackingAllocator = if (config.backing_allocator) *Allocator else void; 48 | const SimpleAllocatorType = if (config.backing_allocator) void else SimpleAllocator; 49 | 50 | const stack_n = config.stack_trace_frames; 51 | const one_trace_size = @sizeOf(usize) * stack_n; 52 | const traces_per_slot = 2; 53 | 54 | pub const Error = std.mem.Allocator.Error; 55 | 56 | const small_bucket_count = std.math.log2(page_size); 57 | const largest_bucket_object_size = 1 << (small_bucket_count - 1); 58 | 59 | const LargeAlloc = struct { 60 | bytes: []u8, 61 | stack_addresses: [stack_n]usize, 62 | 63 | fn dumpStackTrace(self: *LargeAlloc) void { 64 | var len: usize = 0; 65 | while (len < stack_n and self.stack_addresses[len] != 0) { 66 | len += 1; 67 | } 68 | const stack_trace = builtin.StackTrace{ 69 | .instruction_addresses = &self.stack_addresses, 70 | .index = len, 71 | }; 72 | std.debug.dumpStackTrace(stack_trace); 73 | } 74 | }; 75 | const LargeAllocTable = std.HashMap(usize, LargeAlloc, hash_addr, eql_addr); 76 | 77 | pub fn createWithAllocator(backing_allocator: BackingAllocator) !*Self { 78 | const self = blk: { 79 | if (config.backing_allocator) { 80 | break :blk try backing_allocator.create(Self); 81 | } else { 82 | const self_bytes = try sysAlloc(undefined, @sizeOf(Self)); 83 | break :blk @ptrCast(*Self, self_bytes.ptr); 84 | } 85 | }; 86 | self.* = Self{ 87 | .allocator = Allocator{ 88 | .reallocFn = realloc, 89 | .shrinkFn = shrink, 90 | }, 91 | .backing_allocator = backing_allocator, 92 | .buckets = [1]?*BucketHeader{null} ** small_bucket_count, 93 | .simple_allocator = if (config.backing_allocator) {} else SimpleAllocator.init(), 94 | .large_allocations = LargeAllocTable.init(if (config.backing_allocator) 95 | backing_allocator 96 | else 97 | &self.simple_allocator.allocator), 98 | 99 | .total_requested_bytes = 0, 100 | .requested_memory_limit = std.math.maxInt(usize), 101 | }; 102 | try self.mprotectInit(os.PROT_READ); 103 | return self; 104 | } 105 | 106 | pub fn create() !*Self { 107 | if (config.backing_allocator) { 108 | @compileError("GeneralPurposeDebugAllocator has backing_allocator enabled therefore client must call createWithAllocator()"); 109 | } 110 | return createWithAllocator({}); 111 | } 112 | 113 | // Bucket: In memory, in order: 114 | // * BucketHeader 115 | // * bucket_used_bits: [N]u8, // 1 bit for every slot; 1 byte for every 8 slots 116 | // * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation 117 | 118 | const BucketHeader = struct { 119 | prev: *BucketHeader, 120 | next: *BucketHeader, 121 | page: [*]align(page_size) u8, 122 | alloc_cursor: SlotIndex, 123 | used_count: SlotIndex, 124 | 125 | fn usedBits(bucket: *BucketHeader, index: usize) *u8 { 126 | return @intToPtr(*u8, @ptrToInt(bucket) + @sizeOf(BucketHeader) + index); 127 | } 128 | 129 | fn stackTracePtr( 130 | bucket: *BucketHeader, 131 | size_class: usize, 132 | slot_index: SlotIndex, 133 | trace_kind: TraceKind, 134 | ) *[stack_n]usize { 135 | const start_ptr = @ptrCast([*]u8, bucket) + bucketStackFramesStart(size_class); 136 | const addr = start_ptr + one_trace_size * traces_per_slot * slot_index + 137 | @enumToInt(trace_kind) * usize(one_trace_size); 138 | return @ptrCast(*[stack_n]usize, addr); 139 | } 140 | 141 | fn captureStackTrace( 142 | bucket: *BucketHeader, 143 | return_address: usize, 144 | size_class: usize, 145 | slot_index: SlotIndex, 146 | trace_kind: TraceKind, 147 | ) void { 148 | // Initialize them to 0. When determining the count we must look 149 | // for non zero addresses. 150 | const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind); 151 | collectStackTrace(return_address, stack_addresses); 152 | } 153 | }; 154 | 155 | fn bucketStackTrace( 156 | bucket: *BucketHeader, 157 | size_class: usize, 158 | slot_index: SlotIndex, 159 | trace_kind: TraceKind, 160 | ) builtin.StackTrace { 161 | const stack_addresses = bucket.stackTracePtr(size_class, slot_index, trace_kind); 162 | var len: usize = 0; 163 | while (len < stack_n and stack_addresses[len] != 0) { 164 | len += 1; 165 | } 166 | return builtin.StackTrace{ 167 | .instruction_addresses = stack_addresses, 168 | .index = len, 169 | }; 170 | } 171 | 172 | fn bucketStackFramesStart(size_class: usize) usize { 173 | return std.mem.alignForward( 174 | @sizeOf(BucketHeader) + usedBitsCount(size_class), 175 | @alignOf(usize), 176 | ); 177 | } 178 | 179 | fn bucketSize(size_class: usize) usize { 180 | const slot_count = @divExact(page_size, size_class); 181 | return bucketStackFramesStart(size_class) + one_trace_size * traces_per_slot * slot_count; 182 | } 183 | 184 | fn usedBitsCount(size_class: usize) usize { 185 | const slot_count = @divExact(page_size, size_class); 186 | if (slot_count < 8) return 1; 187 | return @divExact(slot_count, 8); 188 | } 189 | 190 | fn mprotectInit(self: *Self, protection: u32) Error!void { 191 | if (!config.memory_protection) return; 192 | const slice = @intToPtr([*]align(page_size) u8, @ptrToInt(self))[0..page_size]; 193 | os.mprotect(slice, protection) catch |e| switch (e) { 194 | error.AccessDenied => unreachable, 195 | error.OutOfMemory => return error.OutOfMemory, 196 | error.Unexpected => return error.OutOfMemory, 197 | }; 198 | } 199 | 200 | fn mprotect(self: *Self, protection: u32) void { 201 | if (!config.memory_protection) return; 202 | const slice = @intToPtr([*]align(page_size) u8, @ptrToInt(self))[0..page_size]; 203 | os.mprotect(slice, protection) catch unreachable; 204 | } 205 | 206 | fn detectLeaksInBucket( 207 | bucket: *BucketHeader, 208 | size_class: usize, 209 | used_bits_count: usize, 210 | ) void { 211 | var used_bits_byte: usize = 0; 212 | while (used_bits_byte < used_bits_count) : (used_bits_byte += 1) { 213 | const used_byte = bucket.usedBits(used_bits_byte).*; 214 | if (used_byte != 0) { 215 | var bit_index: u3 = 0; 216 | while (true) : (bit_index += 1) { 217 | const is_used = @truncate(u1, used_byte >> bit_index) != 0; 218 | if (is_used) { 219 | std.debug.warn("\nMemory leak detected:\n"); 220 | const slot_index = @intCast(SlotIndex, used_bits_byte * 8 + bit_index); 221 | const stack_trace = bucketStackTrace( 222 | bucket, 223 | size_class, 224 | slot_index, 225 | .Alloc, 226 | ); 227 | std.debug.dumpStackTrace(stack_trace); 228 | } 229 | if (bit_index == std.math.maxInt(u3)) 230 | break; 231 | } 232 | } 233 | } 234 | } 235 | 236 | pub fn destroy(self: *Self) void { 237 | for (self.buckets) |optional_bucket, bucket_i| { 238 | const first_bucket = optional_bucket orelse continue; 239 | const size_class = usize(1) << @intCast(u6, bucket_i); 240 | const used_bits_count = usedBitsCount(size_class); 241 | var bucket = first_bucket; 242 | while (true) { 243 | detectLeaksInBucket(bucket, size_class, used_bits_count); 244 | bucket = bucket.next; 245 | if (bucket == first_bucket) 246 | break; 247 | } 248 | } 249 | var large_it = self.large_allocations.iterator(); 250 | while (large_it.next()) |large_alloc| { 251 | std.debug.warn("\nMemory leak detected:\n"); 252 | large_alloc.value.dumpStackTrace(); 253 | } 254 | if (!config.backing_allocator) 255 | self.simple_allocator.deinit(); // Free large_allocations memory. 256 | self.sysFree(@ptrCast([*]u8, self)[0..@sizeOf(Self)]); 257 | } 258 | 259 | fn directAlloc( 260 | self: *Self, 261 | n: usize, 262 | alignment: u29, 263 | first_trace_addr: usize, 264 | ) Error![]u8 { 265 | const alloc_size = if (alignment <= page_size) n else n + alignment; 266 | const slice = try sysAlloc(self, alloc_size); 267 | errdefer self.sysFree(slice); 268 | 269 | if (alloc_size == n) { 270 | try self.trackLargeAlloc(slice, first_trace_addr); 271 | return slice; 272 | } 273 | 274 | const addr = @ptrToInt(slice.ptr); 275 | const aligned_addr = std.mem.alignForward(addr, alignment); 276 | 277 | // Unmap the extra bytes that were only requested in order to guarantee 278 | // that the range of memory we were provided had a proper alignment in 279 | // it somewhere. The extra bytes could be at the beginning, or end, or both. 280 | const unused_start = slice[0 .. aligned_addr - addr]; 281 | if (unused_start.len != 0) { 282 | self.sysFree(unused_start); 283 | } 284 | const aligned_end_addr = std.mem.alignForward(aligned_addr + n, page_size); 285 | const unused_end_len = @ptrToInt(slice.ptr + slice.len) - aligned_end_addr; 286 | const unused_end = @intToPtr([*]u8, aligned_end_addr)[0..unused_end_len]; 287 | if (unused_end.len != 0) { 288 | self.sysFree(unused_end); 289 | } 290 | 291 | const result = @intToPtr([*]u8, aligned_addr)[0..n]; 292 | try self.trackLargeAlloc(result, first_trace_addr); 293 | return result; 294 | } 295 | 296 | fn mprotectLargeAllocs(self: *Self, flags: u32) void { 297 | if (!config.memory_protection) return; 298 | if (config.backing_allocator) return; 299 | self.simple_allocator.mprotect(flags); 300 | } 301 | 302 | fn trackLargeAlloc( 303 | self: *Self, 304 | bytes: []u8, 305 | first_trace_addr: usize, 306 | ) !void { 307 | self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ); 308 | defer self.mprotectLargeAllocs(os.PROT_READ); 309 | 310 | const gop = try self.large_allocations.getOrPut(@ptrToInt(bytes.ptr)); 311 | if (gop.found_existing) { 312 | @panic("OS provided unexpected memory address"); 313 | } 314 | gop.kv.value.bytes = bytes; 315 | collectStackTrace(first_trace_addr, &gop.kv.value.stack_addresses); 316 | } 317 | 318 | fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void { 319 | std.mem.set(usize, addresses, 0); 320 | var stack_trace = builtin.StackTrace{ 321 | .instruction_addresses = addresses, 322 | .index = 0, 323 | }; 324 | std.debug.captureStackTrace(first_trace_addr, &stack_trace); 325 | } 326 | 327 | fn allocSlot( 328 | self: *Self, 329 | size_class: usize, 330 | trace_addr: usize, 331 | ) Error![*]u8 { 332 | const bucket_index = std.math.log2(size_class); 333 | const first_bucket = self.buckets[bucket_index] orelse try self.createBucket( 334 | size_class, 335 | bucket_index, 336 | ); 337 | var bucket = first_bucket; 338 | const slot_count = @divExact(page_size, size_class); 339 | while (bucket.alloc_cursor == slot_count) { 340 | const prev_bucket = bucket; 341 | bucket = prev_bucket.next; 342 | if (bucket == first_bucket) { 343 | // make a new one 344 | bucket = try self.createBucket(size_class, bucket_index); 345 | bucket.prev = prev_bucket; 346 | bucket.next = prev_bucket.next; 347 | prev_bucket.next = bucket; 348 | bucket.next.prev = bucket; 349 | } 350 | } 351 | // change the allocator's current bucket to be this one 352 | self.buckets[bucket_index] = bucket; 353 | 354 | const slot_index = bucket.alloc_cursor; 355 | bucket.alloc_cursor += 1; 356 | 357 | var used_bits_byte = bucket.usedBits(slot_index / 8); 358 | const used_bit_index: u3 = @intCast(u3, slot_index % 8); // TODO cast should be unnecessary 359 | used_bits_byte.* |= (u8(1) << used_bit_index); 360 | bucket.used_count += 1; 361 | bucket.captureStackTrace(trace_addr, size_class, slot_index, .Alloc); 362 | return bucket.page + slot_index * size_class; 363 | } 364 | 365 | fn searchBucket( 366 | self: *Self, 367 | bucket_index: usize, 368 | addr: usize, 369 | ) ?*BucketHeader { 370 | const first_bucket = self.buckets[bucket_index] orelse return null; 371 | var bucket = first_bucket; 372 | while (true) { 373 | const in_bucket_range = (addr >= @ptrToInt(bucket.page) and 374 | addr < @ptrToInt(bucket.page) + page_size); 375 | if (in_bucket_range) return bucket; 376 | bucket = bucket.prev; 377 | if (bucket == first_bucket) { 378 | return null; 379 | } 380 | self.buckets[bucket_index] = bucket; 381 | } 382 | } 383 | 384 | fn freeSlot( 385 | self: *Self, 386 | bucket: *BucketHeader, 387 | bucket_index: usize, 388 | size_class: usize, 389 | slot_index: SlotIndex, 390 | used_byte: *u8, 391 | used_bit_index: u3, 392 | trace_addr: usize, 393 | ) void { 394 | // Capture stack trace to be the "first free", in case a double free happens. 395 | bucket.captureStackTrace(@returnAddress(), size_class, slot_index, .Free); 396 | 397 | used_byte.* &= ~(u8(1) << used_bit_index); 398 | bucket.used_count -= 1; 399 | if (bucket.used_count == 0) { 400 | if (bucket.next == bucket) { 401 | // it's the only bucket and therefore the current one 402 | self.buckets[bucket_index] = null; 403 | } else { 404 | bucket.next.prev = bucket.prev; 405 | bucket.prev.next = bucket.next; 406 | self.buckets[bucket_index] = bucket.prev; 407 | } 408 | self.sysFree(bucket.page[0..page_size]); 409 | const bucket_size = bucketSize(size_class); 410 | const aligned_bucket_size = std.mem.alignForward(bucket_size, page_size); 411 | self.sysFree(@ptrCast([*]u8, bucket)[0..aligned_bucket_size]); 412 | } 413 | } 414 | 415 | const ResizeBehavior = enum { 416 | shrink, 417 | realloc, 418 | }; 419 | 420 | fn directRealloc( 421 | self: *Self, 422 | old_mem: []u8, 423 | new_size: usize, 424 | return_addr: usize, 425 | behavior: ResizeBehavior, 426 | ) Error![]u8 { 427 | self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ); 428 | defer self.mprotectLargeAllocs(os.PROT_READ); 429 | 430 | const old_kv = self.large_allocations.get(@ptrToInt(old_mem.ptr)).?; 431 | const result = old_mem.ptr[0..new_size]; 432 | // TODO test if the old_mem.len is correct 433 | old_kv.value.bytes = result; 434 | collectStackTrace(return_addr, &old_kv.value.stack_addresses); 435 | const old_end_page = std.mem.alignForward(old_mem.len, page_size); 436 | const new_end_page = std.mem.alignForward(new_size, page_size); 437 | if (new_end_page < old_end_page) { 438 | self.sysFree(old_mem.ptr[new_end_page..old_end_page]); 439 | } else if (behavior == .realloc) { 440 | return error.OutOfMemory; 441 | } 442 | return result; 443 | } 444 | 445 | /// This function assumes the object is in the large object storage regardless 446 | /// of the parameters. 447 | fn resizeLarge( 448 | self: *Self, 449 | old_mem: []u8, 450 | old_align: u29, 451 | new_size: usize, 452 | new_align: u29, 453 | return_addr: usize, 454 | behavior: ResizeBehavior, 455 | ) Error![]u8 { 456 | if (new_size == 0) { 457 | self.directFree(old_mem); 458 | return old_mem[0..0]; 459 | } else if (new_size > old_mem.len or new_align > old_align) { 460 | self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ); 461 | defer self.mprotectLargeAllocs(os.PROT_READ); 462 | 463 | const old_kv = self.large_allocations.get(@ptrToInt(old_mem.ptr)).?; 464 | const end_page = std.mem.alignForward(old_kv.value.bytes.len, page_size); 465 | if (new_size <= end_page and (new_align <= old_align or 466 | isAligned(@ptrToInt(old_mem.ptr), new_align))) 467 | { 468 | const result = old_mem.ptr[0..new_size]; 469 | // TODO test if the old_mem.len is correct 470 | old_kv.value.bytes = result; 471 | collectStackTrace(return_addr, &old_kv.value.stack_addresses); 472 | return result; 473 | } 474 | const new_mem = try self.directAlloc(new_size, new_align, return_addr); 475 | @memcpy(new_mem.ptr, old_mem.ptr, std.math.min(old_mem.len, new_mem.len)); 476 | self.directFree(old_mem); 477 | return new_mem; 478 | } else { 479 | const new_aligned_size = std.math.max(new_size, new_align); 480 | if (new_aligned_size > largest_bucket_object_size) { 481 | return self.directRealloc(old_mem, new_size, return_addr, behavior); 482 | } else { 483 | const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size); 484 | const ptr = self.allocSlot(new_size_class, return_addr) catch |e| switch (e) { 485 | error.OutOfMemory => return self.directRealloc( 486 | old_mem, 487 | new_size, 488 | return_addr, 489 | behavior, 490 | ), 491 | }; 492 | @memcpy(ptr, old_mem.ptr, new_size); 493 | self.directFree(old_mem); 494 | return ptr[0..new_size]; 495 | } 496 | } 497 | } 498 | 499 | pub fn setRequestedMemoryLimit(self: *Self, limit: usize) void { 500 | self.mprotect(os.PROT_WRITE | os.PROT_READ); 501 | defer self.mprotect(os.PROT_READ); 502 | 503 | self.requested_memory_limit = limit; 504 | } 505 | 506 | fn reallocOrShrink( 507 | allocator: *Allocator, 508 | old_mem: []u8, 509 | old_align: u29, 510 | new_size: usize, 511 | new_align: u29, 512 | return_addr: usize, 513 | behavior: ResizeBehavior, 514 | ) Error![]u8 { 515 | const self = @fieldParentPtr(Self, "allocator", allocator); 516 | self.mprotect(os.PROT_WRITE | os.PROT_READ); 517 | defer self.mprotect(os.PROT_READ); 518 | 519 | const prev_req_bytes = self.total_requested_bytes; 520 | const new_req_bytes = prev_req_bytes + new_size - old_mem.len; 521 | if (new_req_bytes > prev_req_bytes and 522 | new_req_bytes > self.requested_memory_limit) 523 | { 524 | return error.OutOfMemory; 525 | } 526 | 527 | self.total_requested_bytes = new_req_bytes; 528 | errdefer self.total_requested_bytes = prev_req_bytes; 529 | 530 | if (old_mem.len == 0) { 531 | assert(behavior == .realloc); 532 | const new_aligned_size = std.math.max(new_size, new_align); 533 | if (new_aligned_size > largest_bucket_object_size) { 534 | return self.directAlloc(new_size, new_align, return_addr); 535 | } else { 536 | const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size); 537 | const ptr = try self.allocSlot(new_size_class, return_addr); 538 | return ptr[0..new_size]; 539 | } 540 | } 541 | 542 | const aligned_size = std.math.max(old_mem.len, old_align); 543 | if (aligned_size > largest_bucket_object_size) { 544 | return self.resizeLarge(old_mem, old_align, new_size, new_align, return_addr, behavior); 545 | } 546 | const size_class = up_to_nearest_power_of_2(usize, aligned_size); 547 | 548 | var bucket_index = std.math.log2(size_class); 549 | const bucket = while (bucket_index < small_bucket_count) : (bucket_index += 1) { 550 | if (self.searchBucket(bucket_index, @ptrToInt(old_mem.ptr))) |bucket| { 551 | break bucket; 552 | } 553 | } else { 554 | return self.resizeLarge(old_mem, old_align, new_size, new_align, return_addr, behavior); 555 | }; 556 | const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page); 557 | const slot_index = @intCast(SlotIndex, byte_offset / size_class); 558 | const used_byte_index = slot_index / 8; 559 | const used_bit_index = @intCast(u3, slot_index % 8); 560 | const used_byte = bucket.usedBits(used_byte_index); 561 | const is_used = @truncate(u1, used_byte.* >> used_bit_index) != 0; 562 | if (!is_used) { 563 | // print allocation stack trace 564 | std.debug.warn("\nDouble free detected, allocated here:\n"); 565 | const alloc_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .Alloc); 566 | std.debug.dumpStackTrace(alloc_stack_trace); 567 | std.debug.warn("\nFirst free here:\n"); 568 | const free_stack_trace = bucketStackTrace(bucket, size_class, slot_index, .Free); 569 | std.debug.dumpStackTrace(free_stack_trace); 570 | @panic("\nSecond free here:"); 571 | } 572 | if (new_size == 0) { 573 | self.freeSlot( 574 | bucket, 575 | bucket_index, 576 | size_class, 577 | slot_index, 578 | used_byte, 579 | used_bit_index, 580 | return_addr, 581 | ); 582 | return old_mem[0..0]; 583 | } 584 | const new_aligned_size = std.math.max(new_size, new_align); 585 | const new_size_class = up_to_nearest_power_of_2(usize, new_aligned_size); 586 | if (size_class == new_size_class) { 587 | return old_mem.ptr[0..new_size]; 588 | } 589 | if (new_aligned_size > largest_bucket_object_size) { 590 | self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ); 591 | defer self.mprotectLargeAllocs(os.PROT_READ); 592 | 593 | const new_mem = try self.directAlloc(new_size, new_align, return_addr); 594 | @memcpy(new_mem.ptr, old_mem.ptr, old_mem.len); 595 | self.freeSlot( 596 | bucket, 597 | bucket_index, 598 | size_class, 599 | slot_index, 600 | used_byte, 601 | used_bit_index, 602 | return_addr, 603 | ); 604 | return new_mem; 605 | } 606 | // Migrating to a smaller size class. 607 | const ptr = self.allocSlot(new_size_class, return_addr) catch |e| switch (e) { 608 | error.OutOfMemory => switch (behavior) { 609 | .realloc => return error.OutOfMemory, 610 | .shrink => return old_mem.ptr[0..new_size], 611 | }, 612 | }; 613 | @memcpy(ptr, old_mem.ptr, new_size); 614 | self.freeSlot( 615 | bucket, 616 | bucket_index, 617 | size_class, 618 | slot_index, 619 | used_byte, 620 | used_bit_index, 621 | return_addr, 622 | ); 623 | return ptr[0..new_size]; 624 | } 625 | 626 | fn directFree(self: *Self, bytes: []u8) void { 627 | self.mprotectLargeAllocs(os.PROT_WRITE | os.PROT_READ); 628 | defer self.mprotectLargeAllocs(os.PROT_READ); 629 | 630 | var kv = self.large_allocations.remove(@ptrToInt(bytes.ptr)).?; 631 | if (bytes.len != kv.value.bytes.len) { 632 | std.debug.warn( 633 | "\nAllocation size {} bytes does not match free size {}. Allocated here:\n", 634 | kv.value.bytes.len, 635 | bytes.len, 636 | ); 637 | kv.value.dumpStackTrace(); 638 | 639 | @panic("\nFree here:"); 640 | } 641 | 642 | self.sysFree(bytes); 643 | } 644 | 645 | fn shrink( 646 | allocator: *Allocator, 647 | old_mem: []u8, 648 | old_align: u29, 649 | new_size: usize, 650 | new_align: u29, 651 | ) []u8 { 652 | return reallocOrShrink( 653 | allocator, 654 | old_mem, 655 | old_align, 656 | new_size, 657 | new_align, 658 | @returnAddress(), 659 | .shrink, 660 | ) catch unreachable; 661 | } 662 | 663 | fn realloc( 664 | allocator: *Allocator, 665 | old_mem: []u8, 666 | old_align: u29, 667 | new_size: usize, 668 | new_align: u29, 669 | ) Error![]u8 { 670 | return reallocOrShrink( 671 | allocator, 672 | old_mem, 673 | old_align, 674 | new_size, 675 | new_align, 676 | @returnAddress(), 677 | .realloc, 678 | ); 679 | } 680 | 681 | fn createBucket( 682 | self: *Self, 683 | size_class: usize, 684 | bucket_index: usize, 685 | ) Error!*BucketHeader { 686 | const page = try sysAlloc(self, page_size); 687 | errdefer self.sysFree(page); 688 | 689 | const bucket_size = bucketSize(size_class); 690 | const aligned_bucket_size = std.mem.alignForward(bucket_size, page_size); 691 | const bucket_bytes = try sysAlloc(self, aligned_bucket_size); 692 | const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr); 693 | ptr.* = BucketHeader{ 694 | .prev = ptr, 695 | .next = ptr, 696 | .page = page.ptr, 697 | .alloc_cursor = 0, 698 | .used_count = 0, 699 | }; 700 | self.buckets[bucket_index] = ptr; 701 | // Set the used bits to all zeroes 702 | @memset((*[1]u8)(ptr.usedBits(0)), 0, usedBitsCount(size_class)); 703 | return ptr; 704 | } 705 | 706 | var next_addr_hint: ?[*]align(page_size) u8 = null; 707 | 708 | fn sysAlloc(self: *Self, len: usize) Error![]align(page_size) u8 { 709 | if (config.backing_allocator) { 710 | return self.backing_allocator.alignedAlloc(u8, page_size, len); 711 | } else { 712 | const perms = os.PROT_READ | os.PROT_WRITE; 713 | const flags = os.MAP_PRIVATE | os.MAP_ANONYMOUS; 714 | const hint = @atomicLoad(@typeOf(next_addr_hint), &next_addr_hint, .SeqCst); 715 | const result = os.mmap(hint, len, perms, flags, -1, 0) catch return error.OutOfMemory; 716 | const new_hint = result.ptr + std.mem.alignForward(result.len, page_size); 717 | _ = @cmpxchgStrong(@typeOf(next_addr_hint), &next_addr_hint, hint, new_hint, .SeqCst, .SeqCst); 718 | return result; 719 | } 720 | } 721 | 722 | fn sysFree(self: *Self, old_mem: []u8) void { 723 | if (config.backing_allocator) { 724 | return self.backing_allocator.free(old_mem); 725 | } else { 726 | // This call cannot fail because we are giving the full memory back (not splitting a 727 | // vm page). 728 | os.munmap(@alignCast(page_size, old_mem)); 729 | } 730 | } 731 | 732 | const SimpleAllocator = struct { 733 | allocator: Allocator, 734 | active_allocation: []u8, 735 | 736 | fn init() SimpleAllocator { 737 | return SimpleAllocator{ 738 | .allocator = Allocator{ 739 | .reallocFn = realloc, 740 | .shrinkFn = shrink, 741 | }, 742 | .active_allocation = (([*]u8)(undefined))[0..0], 743 | }; 744 | } 745 | 746 | fn deinit(self: SimpleAllocator) void { 747 | if (self.active_allocation.len == 0) return; 748 | comptime assert(!config.backing_allocator); 749 | sysFree(undefined, self.active_allocation); 750 | } 751 | 752 | fn realloc( 753 | allocator: *Allocator, 754 | old_mem: []u8, 755 | old_align: u29, 756 | new_size: usize, 757 | new_align: u29, 758 | ) error{OutOfMemory}![]u8 { 759 | assert(old_mem.len == 0); 760 | assert(new_align < page_size); 761 | comptime assert(!config.backing_allocator); 762 | const self = @fieldParentPtr(SimpleAllocator, "allocator", allocator); 763 | const result = try sysAlloc(undefined, new_size); 764 | self.active_allocation = result; 765 | return result; 766 | } 767 | 768 | fn shrink( 769 | allocator: *Allocator, 770 | old_mem: []u8, 771 | old_align: u29, 772 | new_size: usize, 773 | new_align: u29, 774 | ) []u8 { 775 | assert(new_size == 0); 776 | comptime assert(!config.backing_allocator); 777 | sysFree(undefined, old_mem); 778 | return old_mem[0..0]; 779 | } 780 | 781 | /// Applies to all of the bytes in the entire allocator. 782 | pub fn mprotect(self: *SimpleAllocator, protection: u32) void { 783 | if (!config.memory_protection) return; 784 | if (self.active_allocation.len == 0) return; 785 | const aligned_ptr = @alignCast(page_size, self.active_allocation.ptr); 786 | const aligned_len = std.mem.alignForward(self.active_allocation.len, page_size); 787 | const slice = aligned_ptr[0..aligned_len]; 788 | os.mprotect(slice, protection) catch unreachable; 789 | } 790 | }; 791 | }; 792 | } 793 | 794 | const TraceKind = enum { 795 | Alloc, 796 | Free, 797 | }; 798 | 799 | fn up_to_nearest_power_of_2(comptime T: type, n: T) T { 800 | var power: T = 1; 801 | while (power < n) 802 | power *= 2; 803 | return power; 804 | } 805 | 806 | fn hash_addr(addr: usize) u32 { 807 | // TODO ignore the least significant bits because addr is guaranteed 808 | // to be page aligned 809 | if (@sizeOf(usize) == @sizeOf(u32)) 810 | return addr; 811 | comptime assert(@sizeOf(usize) == 8); 812 | return @intCast(u32, addr >> 32) ^ @truncate(u32, addr); 813 | } 814 | 815 | fn eql_addr(a: usize, b: usize) bool { 816 | return a == b; 817 | } 818 | 819 | const test_config = Config{}; 820 | 821 | const test_config_nomprotect = Config{ .memory_protection = false }; 822 | 823 | test "small allocations - free in same order" { 824 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 825 | defer gpda.destroy(); 826 | const allocator = &gpda.allocator; 827 | 828 | var list = std.ArrayList(*u64).init(std.debug.global_allocator); 829 | 830 | var i: usize = 0; 831 | while (i < 513) : (i += 1) { 832 | const ptr = try allocator.create(u64); 833 | try list.append(ptr); 834 | } 835 | 836 | for (list.toSlice()) |ptr| { 837 | allocator.destroy(ptr); 838 | } 839 | } 840 | 841 | test "small allocations - free in reverse order" { 842 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 843 | defer gpda.destroy(); 844 | const allocator = &gpda.allocator; 845 | 846 | var list = std.ArrayList(*u64).init(std.debug.global_allocator); 847 | 848 | var i: usize = 0; 849 | while (i < 513) : (i += 1) { 850 | const ptr = try allocator.create(u64); 851 | try list.append(ptr); 852 | } 853 | 854 | while (list.popOrNull()) |ptr| { 855 | allocator.destroy(ptr); 856 | } 857 | } 858 | 859 | test "large allocations" { 860 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 861 | defer gpda.destroy(); 862 | const allocator = &gpda.allocator; 863 | 864 | const ptr1 = try allocator.alloc(u64, 42768); 865 | const ptr2 = try allocator.alloc(u64, 52768); 866 | allocator.free(ptr1); 867 | const ptr3 = try allocator.alloc(u64, 62768); 868 | allocator.free(ptr3); 869 | allocator.free(ptr2); 870 | } 871 | 872 | test "realloc" { 873 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 874 | defer gpda.destroy(); 875 | const allocator = &gpda.allocator; 876 | 877 | var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1); 878 | defer allocator.free(slice); 879 | slice[0] = 0x12; 880 | 881 | // This reallocation should keep its pointer address. 882 | const old_slice = slice; 883 | slice = try allocator.realloc(slice, 2); 884 | assert(old_slice.ptr == slice.ptr); 885 | assert(slice[0] == 0x12); 886 | slice[1] = 0x34; 887 | 888 | // This requires upgrading to a larger size class 889 | slice = try allocator.realloc(slice, 17); 890 | assert(slice[0] == 0x12); 891 | assert(slice[1] == 0x34); 892 | } 893 | 894 | test "shrink" { 895 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 896 | defer gpda.destroy(); 897 | const allocator = &gpda.allocator; 898 | 899 | var slice = try allocator.alloc(u8, 20); 900 | defer allocator.free(slice); 901 | 902 | std.mem.set(u8, slice, 0x11); 903 | 904 | slice = allocator.shrink(slice, 17); 905 | 906 | for (slice) |b| { 907 | assert(b == 0x11); 908 | } 909 | 910 | slice = allocator.shrink(slice, 16); 911 | 912 | for (slice) |b| { 913 | assert(b == 0x11); 914 | } 915 | } 916 | 917 | test "large object - grow" { 918 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 919 | defer gpda.destroy(); 920 | const allocator = &gpda.allocator; 921 | 922 | var slice1 = try allocator.alloc(u8, page_size * 2 - 20); 923 | defer allocator.free(slice1); 924 | 925 | var old = slice1; 926 | slice1 = try allocator.realloc(slice1, page_size * 2 - 10); 927 | assert(slice1.ptr == old.ptr); 928 | 929 | slice1 = try allocator.realloc(slice1, page_size * 2); 930 | assert(slice1.ptr == old.ptr); 931 | 932 | slice1 = try allocator.realloc(slice1, page_size * 2 + 1); 933 | } 934 | 935 | test "realloc small object to large object" { 936 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 937 | defer gpda.destroy(); 938 | const allocator = &gpda.allocator; 939 | 940 | var slice = try allocator.alloc(u8, 70); 941 | defer allocator.free(slice); 942 | slice[0] = 0x12; 943 | slice[60] = 0x34; 944 | 945 | // This requires upgrading to a large object 946 | const large_object_size = page_size * 2 + 50; 947 | slice = try allocator.realloc(slice, large_object_size); 948 | assert(slice[0] == 0x12); 949 | assert(slice[60] == 0x34); 950 | } 951 | 952 | test "shrink large object to large object" { 953 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 954 | defer gpda.destroy(); 955 | const allocator = &gpda.allocator; 956 | 957 | var slice = try allocator.alloc(u8, page_size * 2 + 50); 958 | defer allocator.free(slice); 959 | slice[0] = 0x12; 960 | slice[60] = 0x34; 961 | 962 | if (allocator.realloc(slice, page_size * 2 + 1)) |_| { 963 | @panic("expected failure"); 964 | } else |e| assert(e == error.OutOfMemory); 965 | 966 | slice = allocator.shrink(slice, page_size * 2 + 1); 967 | assert(slice[0] == 0x12); 968 | assert(slice[60] == 0x34); 969 | 970 | slice = try allocator.realloc(slice, page_size * 2); 971 | assert(slice[0] == 0x12); 972 | assert(slice[60] == 0x34); 973 | } 974 | 975 | test "shrink large object to large object with larger alignment" { 976 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 977 | defer gpda.destroy(); 978 | const allocator = &gpda.allocator; 979 | 980 | var debug_buffer: [1000]u8 = undefined; 981 | const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator; 982 | 983 | const alloc_size = page_size * 2 + 50; 984 | var slice = try allocator.alignedAlloc(u8, 16, alloc_size); 985 | defer allocator.free(slice); 986 | 987 | var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); 988 | while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) { 989 | try stuff_to_free.append(slice); 990 | slice = try allocator.alignedAlloc(u8, 16, alloc_size); 991 | } 992 | while (stuff_to_free.popOrNull()) |item| { 993 | allocator.free(item); 994 | } 995 | slice[0] = 0x12; 996 | slice[60] = 0x34; 997 | 998 | slice = try allocator.alignedRealloc(slice, page_size * 2, alloc_size / 2); 999 | assert(slice[0] == 0x12); 1000 | assert(slice[60] == 0x34); 1001 | } 1002 | 1003 | test "realloc large object to small object" { 1004 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 1005 | defer gpda.destroy(); 1006 | const allocator = &gpda.allocator; 1007 | 1008 | var slice = try allocator.alloc(u8, page_size * 2 + 50); 1009 | defer allocator.free(slice); 1010 | slice[0] = 0x12; 1011 | slice[16] = 0x34; 1012 | 1013 | slice = try allocator.realloc(slice, 19); 1014 | assert(slice[0] == 0x12); 1015 | assert(slice[16] == 0x34); 1016 | } 1017 | 1018 | test "backing allocator" { 1019 | const gpda = try GeneralPurposeDebugAllocator(Config{ 1020 | .backing_allocator = true, 1021 | .memory_protection = false, 1022 | }).createWithAllocator(std.debug.global_allocator); 1023 | defer gpda.destroy(); 1024 | const allocator = &gpda.allocator; 1025 | 1026 | const ptr = try allocator.create(i32); 1027 | defer allocator.destroy(ptr); 1028 | } 1029 | 1030 | test "realloc large object to larger alignment" { 1031 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 1032 | defer gpda.destroy(); 1033 | const allocator = &gpda.allocator; 1034 | 1035 | var debug_buffer: [1000]u8 = undefined; 1036 | const debug_allocator = &std.heap.FixedBufferAllocator.init(&debug_buffer).allocator; 1037 | 1038 | var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50); 1039 | defer allocator.free(slice); 1040 | 1041 | var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator); 1042 | while (isAligned(@ptrToInt(slice.ptr), page_size * 2)) { 1043 | try stuff_to_free.append(slice); 1044 | slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50); 1045 | } 1046 | while (stuff_to_free.popOrNull()) |item| { 1047 | allocator.free(item); 1048 | } 1049 | slice[0] = 0x12; 1050 | slice[16] = 0x34; 1051 | 1052 | slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 100); 1053 | assert(slice[0] == 0x12); 1054 | assert(slice[16] == 0x34); 1055 | 1056 | slice = try allocator.alignedRealloc(slice, 32, page_size * 2 + 25); 1057 | assert(slice[0] == 0x12); 1058 | assert(slice[16] == 0x34); 1059 | 1060 | slice = try allocator.alignedRealloc(slice, page_size * 2, page_size * 2 + 100); 1061 | assert(slice[0] == 0x12); 1062 | assert(slice[16] == 0x34); 1063 | } 1064 | 1065 | fn isAligned(addr: usize, alignment: usize) bool { 1066 | // 000010000 // example addr 1067 | // 000001111 // subtract 1 1068 | // 111110000 // binary not 1069 | const aligned_addr = (addr & ~(alignment - 1)); 1070 | return aligned_addr == addr; 1071 | } 1072 | 1073 | test "isAligned works" { 1074 | assert(isAligned(0, 4)); 1075 | assert(isAligned(1, 1)); 1076 | assert(isAligned(2, 1)); 1077 | assert(isAligned(2, 2)); 1078 | assert(!isAligned(2, 4)); 1079 | assert(isAligned(3, 1)); 1080 | assert(!isAligned(3, 2)); 1081 | assert(!isAligned(3, 4)); 1082 | assert(isAligned(4, 4)); 1083 | assert(isAligned(4, 2)); 1084 | assert(isAligned(4, 1)); 1085 | assert(!isAligned(4, 8)); 1086 | assert(!isAligned(4, 16)); 1087 | } 1088 | 1089 | test "large object shrinks to small but allocation fails during shrink" { 1090 | var failing_allocator = std.debug.FailingAllocator.init(std.heap.direct_allocator, 3); 1091 | const gpda = try GeneralPurposeDebugAllocator(Config{ 1092 | .backing_allocator = true, 1093 | .memory_protection = false, 1094 | }).createWithAllocator(&failing_allocator.allocator); 1095 | defer gpda.destroy(); 1096 | const allocator = &gpda.allocator; 1097 | 1098 | var slice = try allocator.alloc(u8, page_size * 2 + 50); 1099 | defer allocator.free(slice); 1100 | slice[0] = 0x12; 1101 | slice[3] = 0x34; 1102 | 1103 | // Next allocation will fail in the backing allocator of the GeneralPurposeDebugAllocator 1104 | 1105 | slice = allocator.shrink(slice, 4); 1106 | assert(slice[0] == 0x12); 1107 | assert(slice[3] == 0x34); 1108 | } 1109 | 1110 | test "objects of size 1024 and 2048" { 1111 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 1112 | defer gpda.destroy(); 1113 | const allocator = &gpda.allocator; 1114 | 1115 | const slice = try allocator.alloc(u8, 1025); 1116 | const slice2 = try allocator.alloc(u8, 3000); 1117 | 1118 | allocator.free(slice); 1119 | allocator.free(slice2); 1120 | } 1121 | 1122 | test "setting a memory cap" { 1123 | const gpda = try GeneralPurposeDebugAllocator(test_config).create(); 1124 | defer gpda.destroy(); 1125 | const allocator = &gpda.allocator; 1126 | 1127 | gpda.setRequestedMemoryLimit(1010); 1128 | 1129 | const small = try allocator.create(i32); 1130 | assert(gpda.total_requested_bytes == 4); 1131 | 1132 | const big = try allocator.alloc(u8, 1000); 1133 | assert(gpda.total_requested_bytes == 1004); 1134 | 1135 | std.testing.expectError(error.OutOfMemory, allocator.create(u64)); 1136 | 1137 | allocator.destroy(small); 1138 | assert(gpda.total_requested_bytes == 1000); 1139 | 1140 | allocator.free(big); 1141 | assert(gpda.total_requested_bytes == 0); 1142 | 1143 | const exact = try allocator.alloc(u8, 1010); 1144 | assert(gpda.total_requested_bytes == 1010); 1145 | allocator.free(exact); 1146 | } 1147 | -------------------------------------------------------------------------------- /test/double-free.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpda_module = @import("../gpda.zig"); 3 | 4 | const test_config = gpda_module.Config{}; 5 | 6 | test "double free" { 7 | const gpda = try gpda_module.GeneralPurposeDebugAllocator(test_config).create(); 8 | defer gpda.destroy(); 9 | const allocator = &gpda.allocator; 10 | 11 | std.debug.warn("\n"); 12 | 13 | const alloc1 = try allocator.create(i32); 14 | std.debug.warn("alloc1 = {}\n", alloc1); 15 | 16 | const alloc2 = try allocator.create(i32); 17 | std.debug.warn("alloc2 = {}\n", alloc2); 18 | 19 | allocator.destroy(alloc1); 20 | allocator.destroy(alloc1); 21 | } 22 | -------------------------------------------------------------------------------- /test/fuzz.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpda_module = @import("../gpda.zig"); 3 | 4 | const test_config = gpda_module.Config{}; 5 | 6 | test "fuzz testing" { 7 | const gpda = try gpda_module.GeneralPurposeDebugAllocator(test_config).create(); 8 | defer gpda.destroy(); 9 | const allocator = &gpda.allocator; 10 | 11 | const seed = 0x1234; 12 | var prng = std.rand.DefaultPrng.init(seed); 13 | const rand = &prng.random; 14 | 15 | var allocated_n: u64 = 0; 16 | var freed_n: u64 = 0; 17 | 18 | const Free = struct { 19 | slice: []u8, 20 | it_index: usize, 21 | }; 22 | 23 | var free_queue = std.ArrayList(Free).init(allocator); 24 | var it_index: usize = 0; 25 | 26 | while (true) : (it_index += 1) { 27 | const is_small = rand.boolean(); 28 | const size = if (is_small) 29 | rand.uintLessThanBiased(usize, std.mem.page_size) 30 | else 31 | std.mem.page_size + rand.uintLessThanBiased(usize, 10 * 1024 * 1024); 32 | 33 | const iterations_until_free = rand.uintLessThanBiased(usize, 100); 34 | const slice = allocator.alloc(u8, size) catch unreachable; 35 | allocated_n += size; 36 | free_queue.append(Free{ 37 | .slice = slice, 38 | .it_index = it_index + iterations_until_free, 39 | }) catch unreachable; 40 | 41 | var free_i: usize = 0; 42 | while (free_i < free_queue.len) { 43 | const item = &free_queue.toSlice()[free_i]; 44 | if (item.it_index <= it_index) { 45 | // free time 46 | allocator.free(item.slice); 47 | freed_n += item.slice.len; 48 | _ = free_queue.swapRemove(free_i); 49 | continue; 50 | } 51 | free_i += 1; 52 | } 53 | std.debug.warn("index={} allocated: {Bi:2} freed: {Bi:2}\n", it_index, allocated_n, freed_n); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /test/invalid-free.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpda_module = @import("../gpda.zig"); 3 | 4 | const test_config = gpda_module.Config{}; 5 | 6 | test "invalid free" { 7 | const gpda = try gpda_module.GeneralPurposeDebugAllocator(test_config).create(); 8 | defer gpda.destroy(); 9 | const allocator = &gpda.allocator; 10 | 11 | std.debug.warn("\n"); 12 | 13 | const alloc1 = try allocator.create(i32); 14 | std.debug.warn("alloc1 = {}\n", alloc1); 15 | 16 | allocator.destroy(@intToPtr(*i32, 0x12345)); 17 | } 18 | -------------------------------------------------------------------------------- /test/leak.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpda_module = @import("../gpda.zig"); 3 | 4 | const test_config = gpda_module.Config{}; 5 | 6 | test "basic leaks" { 7 | const gpda = try gpda_module.GeneralPurposeDebugAllocator(test_config).create(); 8 | defer gpda.destroy(); 9 | const allocator = &gpda.allocator; 10 | 11 | std.debug.warn("\n"); 12 | var i: usize = 0; 13 | while (i < 4) : (i += 1) { 14 | const alloc1 = try allocator.create(i32); 15 | std.debug.warn("alloc1 = {}\n", alloc1); 16 | defer allocator.destroy(alloc1); 17 | 18 | const alloc2 = try allocator.create(i32); 19 | std.debug.warn("alloc2 = {}\n", alloc2); 20 | //defer allocator.destroy(alloc2); 21 | } 22 | } 23 | 24 | test "leak in the first bucket" { 25 | const gpda = try gpda_module.GeneralPurposeDebugAllocator(test_config).create(); 26 | defer gpda.destroy(); 27 | const allocator = &gpda.allocator; 28 | 29 | var buffer: [8000]u8 = undefined; 30 | const fixed = &std.heap.FixedBufferAllocator.init(&buffer).allocator; 31 | 32 | var list = std.ArrayList(*u64).init(fixed); 33 | 34 | std.debug.warn("\n"); 35 | 36 | { 37 | // fill up a whole bucket, plus 1 extra 38 | var i: usize = 0; 39 | while (i < 513) : (i += 1) { 40 | const ptr = allocator.create(u64) catch unreachable; 41 | list.append(ptr) catch unreachable; 42 | } 43 | } 44 | 45 | // grab the extra one which should be in its own bucket 46 | // but forget to free it 47 | const leaky_pointer1 = list.pop(); 48 | const leaky_pointer2 = list.pop(); 49 | 50 | while (list.popOrNull()) |ptr| { 51 | allocator.destroy(ptr); 52 | } 53 | 54 | // now we expect to see 2 memory leaks 55 | } 56 | -------------------------------------------------------------------------------- /test/use-after-free.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const testing = std.testing; 3 | const gpda_module = @import("../gpda.zig"); 4 | 5 | const test_config = gpda_module.Config{}; 6 | 7 | test "use after free - large" { 8 | const gpda = try gpda_module.GeneralPurposeDebugAllocator(test_config).create(); 9 | defer gpda.destroy(); 10 | const allocator = &gpda.allocator; 11 | 12 | const first = try allocator.alloc(u8, 3000); 13 | allocator.free(first); 14 | 15 | const second = try allocator.alloc(f32, 1000); 16 | second[0] = 3.14; 17 | std.mem.copy(u8, first, "hello this is dog"); 18 | testing.expect(second[0] == 3.14); 19 | 20 | allocator.free(second); 21 | } 22 | 23 | test "use after free - small" { 24 | const gpda = try gpda_module.GeneralPurposeDebugAllocator(test_config).create(); 25 | defer gpda.destroy(); 26 | const allocator = &gpda.allocator; 27 | 28 | const first = try allocator.alloc(u8, 4); 29 | std.debug.warn("first = {*}\n", first.ptr); 30 | 31 | // this one keeps the page mapped for size class 4 32 | const anchor = try allocator.create(i32); 33 | std.debug.warn("anchor = {*}\n", anchor); 34 | defer allocator.destroy(anchor); 35 | 36 | allocator.free(first); 37 | 38 | const second = try allocator.create(f32); 39 | std.debug.warn("second = {*}\n", second); 40 | second.* = 3.14; 41 | std.mem.copy(u8, first, "hell"); 42 | testing.expect(second.* == 3.14); 43 | 44 | allocator.destroy(second); 45 | } 46 | --------------------------------------------------------------------------------