├── .gitignore ├── DebugHeap.c ├── DebugHeap.h ├── README.md ├── demo.c └── tundra.lua /.gitignore: -------------------------------------------------------------------------------- 1 | t2-output 2 | -------------------------------------------------------------------------------- /DebugHeap.c: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2014, Insomniac Games 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | Redistributions in binary form must reproduce the above copyright notice, this 12 | list of conditions and the following disclaimer in the documentation and/or 13 | other materials provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 19 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #include "DebugHeap.h" 28 | #include 29 | #include 30 | #include 31 | 32 | #if defined(_WIN32) 33 | #include 34 | #elif defined(__APPLE__) || defined(linux) 35 | #include 36 | #else 37 | # error What are you?! 38 | #endif 39 | 40 | //----------------------------------------------------------------------------- 41 | // Preliminaries 42 | 43 | // An assert macro that kills the program. 44 | // Substitute your own assert macro that takes formatted text. 45 | #define ASSERT_FATAL(expr, message, ...) \ 46 | assert(expr && message) 47 | 48 | // Routines that wrap platform-specific virtual memory functionality. 49 | 50 | static void* VmAllocate(size_t size); 51 | static void VmFree(void* ptr, size_t size); 52 | static void VmCommit(void* ptr, size_t size); 53 | static void VmDecommit(void* ptr, size_t size); 54 | 55 | // Windows virtual memory support. 56 | #if defined(_WIN32) 57 | typedef volatile LONG DebugHeapAtomicType; 58 | 59 | static void* VmAllocate(size_t size) 60 | { 61 | void* result = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_READWRITE); 62 | ASSERT_FATAL(result, "Couldn't allocate address space"); 63 | return result; 64 | } 65 | 66 | static void VmFree(void* ptr, size_t size) 67 | { 68 | BOOL result = VirtualFree(ptr, 0, MEM_RELEASE); 69 | ASSERT_FATAL(result, "Failed to free memory"); 70 | (void) size; 71 | } 72 | 73 | static void VmCommit(void* ptr, size_t size) 74 | { 75 | LPVOID result = VirtualAlloc(ptr, size, MEM_COMMIT, PAGE_READWRITE); 76 | ASSERT_FATAL(result, "Failed to commit memory"); 77 | } 78 | 79 | static void VmDecommit(void* ptr, size_t size) 80 | { 81 | BOOL result = VirtualFree(ptr, size, MEM_DECOMMIT); 82 | ASSERT_FATAL(result, "Failed to decommit memory"); 83 | } 84 | 85 | static DebugHeapAtomicType AtomicInc32(DebugHeapAtomicType *var) 86 | { 87 | return InterlockedIncrement(var); 88 | } 89 | 90 | static DebugHeapAtomicType AtomicDec32(DebugHeapAtomicType *var) 91 | { 92 | return InterlockedDecrement(var); 93 | } 94 | #endif 95 | 96 | #if defined(__APPLE__) || defined(linux) 97 | typedef volatile uint32_t DebugHeapAtomicType; 98 | 99 | static void* VmAllocate(size_t size) 100 | { 101 | void* result = mmap(NULL, size, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0); 102 | ASSERT_FATAL(result, "Couldn't allocate address space"); 103 | return result; 104 | } 105 | 106 | static void VmFree(void* ptr, size_t size) 107 | { 108 | int result = munmap(ptr, size); 109 | ASSERT_FATAL(0 == result, "Failed to free memory"); 110 | } 111 | 112 | static void VmCommit(void* ptr, size_t size) 113 | { 114 | int result = mprotect(ptr, size, PROT_READ|PROT_WRITE); 115 | ASSERT_FATAL(0 == result, "Failed to commit memory"); 116 | } 117 | 118 | static void VmDecommit(void* ptr, size_t size) 119 | { 120 | int result = madvise(ptr, size, MADV_DONTNEED); 121 | ASSERT_FATAL(0 == result, "madvise() failed"); 122 | result = mprotect(ptr, size, PROT_NONE); 123 | ASSERT_FATAL(0 == result, "Failed to decommit memory"); 124 | } 125 | 126 | static DebugHeapAtomicType AtomicInc32(DebugHeapAtomicType *var) 127 | { 128 | return __sync_add_and_fetch(var, 1); 129 | } 130 | 131 | static DebugHeapAtomicType AtomicDec32(DebugHeapAtomicType *var) 132 | { 133 | return __sync_sub_and_fetch(var, 1); 134 | } 135 | #endif 136 | 137 | 138 | // We want to use the smallest page size possible, and that happens to be 4k on x86/x64. 139 | // Using larger pages sizes would waste enormous amounts of memory. 140 | enum 141 | { 142 | kPageSize = 4096, 143 | }; 144 | 145 | typedef struct DebugBlockInfo 146 | { 147 | uint32_t m_Allocated : 1; 148 | uint32_t m_PageCount : 31; 149 | uint32_t m_PendingFree : 1; 150 | uint32_t m_PageIndex : 31; 151 | struct DebugBlockInfo *m_Prev; 152 | struct DebugBlockInfo *m_Next; 153 | } DebugBlockInfo; 154 | 155 | struct DebugHeap 156 | { 157 | uint32_t m_MaxAllocs; 158 | uint32_t m_PageCount; 159 | 160 | char* m_BaseAddress; 161 | 162 | uint32_t m_FreeListSize; 163 | DebugBlockInfo** m_FreeList; 164 | 165 | uint32_t m_PendingListSize; 166 | DebugBlockInfo** m_PendingList; 167 | 168 | DebugBlockInfo** m_BlockLookup; 169 | DebugBlockInfo* m_FirstUnusedBlockInfo; 170 | 171 | DebugBlockInfo* m_Blocks; 172 | 173 | DebugHeapAtomicType m_ReentrancyGuard; 174 | }; 175 | 176 | #define DEBUG_THREAD_GUARD_ENTER(heap) \ 177 | ASSERT_FATAL(1 == AtomicInc32(&heap->m_ReentrancyGuard), "Unsynchronized MT usage detected") 178 | 179 | #define DEBUG_THREAD_GUARD_LEAVE(heap) \ 180 | ASSERT_FATAL(0 == AtomicDec32(&heap->m_ReentrancyGuard), "Unsynchronized MT usage detected") 181 | 182 | static void* AdvancePtr(void* src, size_t amount) 183 | { 184 | return (char*)src + amount; 185 | } 186 | 187 | DebugBlockInfo* AllocBlockInfo(DebugHeap* heap) 188 | { 189 | DebugBlockInfo* result = heap->m_FirstUnusedBlockInfo; 190 | ASSERT_FATAL((uint32_t)result->m_Allocated, "Block info corrupted"); 191 | ASSERT_FATAL((uint32_t)result->m_PendingFree, "Block info corrupted"); 192 | heap->m_FirstUnusedBlockInfo = result->m_Next; 193 | 194 | memset(result, 0, sizeof *result); 195 | 196 | return result; 197 | } 198 | 199 | void FreeBlockInfo(DebugHeap* heap, DebugBlockInfo* block_info) 200 | { 201 | block_info->m_Allocated = 1; 202 | block_info->m_PendingFree = 1; 203 | block_info->m_Prev = NULL; 204 | block_info->m_Next = heap->m_FirstUnusedBlockInfo; 205 | heap->m_FirstUnusedBlockInfo = block_info; 206 | } 207 | 208 | DebugHeap* DebugHeapInit(size_t mem_size_bytes) 209 | { 210 | DebugHeap* self; 211 | 212 | const size_t mem_page_count = mem_size_bytes / kPageSize; 213 | const size_t max_allocs = mem_page_count / 2; 214 | 215 | const size_t bookkeeping_bytes = 216 | sizeof(DebugHeap) + 217 | (3 * mem_page_count * sizeof(DebugBlockInfo*)) + 218 | sizeof(DebugBlockInfo) * mem_page_count; 219 | 220 | const size_t bookkeeping_pages = (bookkeeping_bytes + kPageSize - 1) / kPageSize; 221 | const size_t total_pages = bookkeeping_pages + mem_page_count; 222 | const size_t total_bytes = total_pages * kPageSize; 223 | 224 | char* range = (char *)VmAllocate(total_bytes); 225 | if (!range) 226 | { 227 | return NULL; 228 | } 229 | 230 | VmCommit(range, bookkeeping_pages * kPageSize); 231 | 232 | self = (DebugHeap*) range; 233 | 234 | self->m_MaxAllocs = (uint32_t) max_allocs; 235 | self->m_BaseAddress = range + kPageSize * bookkeeping_pages; 236 | self->m_PageCount = (uint32_t) mem_page_count; 237 | self->m_FreeList = (DebugBlockInfo**) AdvancePtr(range, sizeof(DebugHeap)); 238 | self->m_PendingList = (DebugBlockInfo**) AdvancePtr(self->m_FreeList, sizeof(DebugBlockInfo*) * mem_page_count); 239 | self->m_BlockLookup = (DebugBlockInfo**) AdvancePtr(self->m_PendingList, sizeof(DebugBlockInfo*) * mem_page_count); 240 | self->m_Blocks = (DebugBlockInfo*) AdvancePtr(self->m_BlockLookup, sizeof(DebugBlockInfo*) * mem_page_count); 241 | self->m_FreeListSize = 1; 242 | self->m_PendingListSize = 0; 243 | self->m_ReentrancyGuard = 0; 244 | 245 | // Initialize block allocation linked list 246 | { 247 | uint32_t i, count; 248 | for (i = 0, count = self->m_MaxAllocs; i < count; ++i) 249 | { 250 | self->m_Blocks[i].m_Allocated = 1; // flag invalid 251 | self->m_Blocks[i].m_PendingFree = 1; // flag invalid 252 | self->m_Blocks[i].m_Prev = NULL; 253 | self->m_Blocks[i].m_Next = (i + 1) < count ? &self->m_Blocks[i+1] : NULL; 254 | } 255 | } 256 | 257 | self->m_FirstUnusedBlockInfo = &self->m_Blocks[0]; 258 | 259 | { 260 | DebugBlockInfo* root_block = AllocBlockInfo(self); 261 | 262 | root_block->m_PageIndex = 0; 263 | root_block->m_Allocated = 0; 264 | root_block->m_PendingFree = 0; 265 | root_block->m_PageCount = (uint32_t) mem_page_count; 266 | root_block->m_Prev = NULL; 267 | root_block->m_Next = NULL; 268 | 269 | self->m_FreeList[0] = root_block; 270 | } 271 | 272 | return self; 273 | } 274 | 275 | void DebugHeapDestroy(DebugHeap* heap) 276 | { 277 | VmFree(heap, heap->m_PageCount * kPageSize); 278 | } 279 | 280 | static void* AllocFromFreeList(DebugHeap* heap, size_t page_req) 281 | { 282 | // Cache in register to avoid repeated memory derefs 283 | DebugBlockInfo** const free_list = heap->m_FreeList; 284 | 285 | // Keep track of the best fitting block so far. 286 | DebugBlockInfo* best_block = NULL; 287 | uint32_t best_block_size = ~0u; 288 | uint32_t best_freelist_index = 0; 289 | uint32_t i, count; 290 | 291 | // First try the free list. This is slow. That's OK. It's a debug heap. 292 | for (i = 0, count = heap->m_FreeListSize; i < count; ++i) 293 | { 294 | DebugBlockInfo* block = free_list[i]; 295 | uint32_t block_count = block->m_PageCount; 296 | ASSERT_FATAL(!block->m_Allocated, "block info corrupted"); 297 | ASSERT_FATAL(!block->m_PendingFree, "block info corrupted"); 298 | 299 | if (block_count >= page_req && block_count < best_block_size) 300 | { 301 | best_block = block; 302 | best_block_size = block_count; 303 | best_freelist_index = i; 304 | } 305 | } 306 | 307 | if (!best_block) 308 | return NULL; 309 | 310 | // Take this block off the free list. 311 | if (heap->m_FreeListSize > 1) 312 | { 313 | heap->m_FreeList[best_freelist_index] = heap->m_FreeList[heap->m_FreeListSize - 1]; 314 | } 315 | heap->m_FreeListSize--; 316 | 317 | // Carve out the number of pages we need from our best block. 318 | { 319 | uint32_t unused_page_count = (uint32_t) (best_block_size - page_req); 320 | 321 | if (unused_page_count > 0) 322 | { 323 | // Allocate a new block to keep track of the tail end. 324 | DebugBlockInfo* tail_block = AllocBlockInfo(heap); 325 | tail_block->m_Allocated = 0; 326 | tail_block->m_PendingFree = 0; 327 | tail_block->m_PageIndex = best_block->m_PageIndex + best_block->m_PageCount - unused_page_count; 328 | tail_block->m_PageCount = unused_page_count; 329 | 330 | // Link it in to the chain. 331 | tail_block->m_Next = best_block->m_Next; 332 | tail_block->m_Prev = best_block; 333 | best_block->m_Next = tail_block; 334 | 335 | // Add it to the free list 336 | heap->m_FreeList[heap->m_FreeListSize++] = tail_block; 337 | 338 | // Patch up this block 339 | best_block->m_PageCount = (uint32_t) page_req; 340 | } 341 | } 342 | 343 | best_block->m_Allocated = 1; 344 | 345 | ASSERT_FATAL(heap->m_BlockLookup[best_block->m_PageIndex] == NULL, "block lookup corrupted"); 346 | heap->m_BlockLookup[best_block->m_PageIndex] = best_block; 347 | 348 | { 349 | uint32_t i, max; 350 | for (i = 1, max = best_block->m_PageCount; i < max; ++i) 351 | { 352 | ASSERT_FATAL(heap->m_BlockLookup[best_block->m_PageIndex + i] == NULL, "block lookup corrupted"); 353 | } 354 | } 355 | 356 | return heap->m_BaseAddress + ((uint64_t)(best_block->m_PageIndex)) * kPageSize; 357 | } 358 | 359 | static void* FinalizeAlloc(void* ptr_in, size_t user_size, size_t pages_allocated, size_t user_alignment) 360 | { 361 | char* ptr = (char*) ptr_in; 362 | uint32_t ideal_offset, aligned_offset; 363 | 364 | // Commit pages in user-accessible section. 365 | VmCommit(ptr, (pages_allocated - 1) * kPageSize); 366 | 367 | // Decommit guard page to force crashes for stepping over bounds 368 | VmDecommit(ptr + (pages_allocated - 1) * kPageSize, kPageSize); 369 | 370 | // Align user allocation towards end of page, respecting user alignment. 371 | 372 | // Ideally the offset would be kPageSize - user_size % kPageSize. 373 | ideal_offset = ((uint32_t)(kPageSize - user_size)) % kPageSize; 374 | 375 | // Align down to meet user minimum alignment. 376 | aligned_offset = ideal_offset & ~((uint32_t)(user_alignment-1)); 377 | 378 | // Garbage fill start of page. 379 | memset(ptr, 0xfc, aligned_offset); 380 | 381 | return ptr + aligned_offset; 382 | } 383 | 384 | static void FlushPendingFrees(DebugHeap* heap) 385 | { 386 | uint32_t i, count; 387 | for (i = 0, count = heap->m_PendingListSize; i < count; ++i) 388 | { 389 | int block_removed = 0; 390 | 391 | DebugBlockInfo* block = heap->m_PendingList[i]; 392 | DebugBlockInfo* prev; 393 | DebugBlockInfo* next; 394 | 395 | // Attempt to merge into an adjacent block to the left. 396 | // We can only merge with blocks that are free and not on this same pending list. 397 | if (NULL != (prev = block->m_Prev)) 398 | { 399 | if (!prev->m_Allocated && !prev->m_PendingFree && prev->m_PageIndex + prev->m_PageCount == block->m_PageIndex) 400 | { 401 | // Linked list setup. 402 | prev->m_Next = block->m_Next; 403 | 404 | if (block->m_Next) 405 | block->m_Next->m_Prev = prev; 406 | 407 | // Increase size of left neighbor. 408 | prev->m_PageCount += block->m_PageCount; 409 | 410 | // Kill this pending block. 411 | FreeBlockInfo(heap, block); 412 | 413 | // Attempt to do right side coalescing with this other block instead. 414 | block = prev; 415 | 416 | // Don't try to delete this block later - we've already done that. 417 | block_removed = 1; 418 | } 419 | } 420 | 421 | // Attempt to merge into an adjacent block to the right. 422 | if (NULL != (next = block->m_Next)) 423 | { 424 | if (!next->m_Allocated && !next->m_PendingFree && next->m_PageIndex == block->m_PageIndex + block->m_PageCount) 425 | { 426 | uint32_t fi, fcount; 427 | // Linked list setup. 428 | block->m_Next = next->m_Next; 429 | if (block->m_Next) 430 | block->m_Next->m_Prev = block; 431 | block->m_PageCount += next->m_PageCount; 432 | 433 | // Find this thing on the free list and remove it. This is slow. 434 | for (fi = 0, fcount = heap->m_FreeListSize; fi < fcount; ++fi) 435 | { 436 | if (heap->m_FreeList[fi] == next) 437 | { 438 | heap->m_FreeList[fi] = heap->m_FreeList[heap->m_FreeListSize-1]; 439 | --heap->m_FreeListSize; 440 | break; 441 | } 442 | } 443 | 444 | // Free the R neighbor block now that we're done with it. 445 | FreeBlockInfo(heap, next); 446 | } 447 | } 448 | 449 | if (!block_removed) 450 | { 451 | // This block goes on the free list. 452 | block->m_PendingFree = 0; 453 | heap->m_FreeList[heap->m_FreeListSize++] = block; 454 | } 455 | } 456 | 457 | heap->m_PendingListSize = 0; 458 | } 459 | 460 | void* DebugHeapAllocate(DebugHeap* heap, size_t size, size_t alignment) 461 | { 462 | void* ptr; 463 | uint32_t page_req; 464 | 465 | DEBUG_THREAD_GUARD_ENTER(heap); 466 | 467 | // Figure out how many pages we're going to need. 468 | // Always increment by one so we have room for a guard page at the end. 469 | page_req = 1 + (uint32_t) ((size + kPageSize - 1) / kPageSize); 470 | 471 | if (NULL != (ptr = AllocFromFreeList(heap, page_req))) 472 | { 473 | void* result = FinalizeAlloc(ptr, size, page_req, alignment); 474 | DEBUG_THREAD_GUARD_LEAVE(heap); 475 | return result; 476 | } 477 | 478 | // We couldn't find a block off the free list. Consolidate pending frees. 479 | FlushPendingFrees(heap); 480 | 481 | // Try again. 482 | if (NULL != (ptr = AllocFromFreeList(heap, page_req))) 483 | { 484 | void* result = FinalizeAlloc(ptr, size, page_req, alignment); 485 | DEBUG_THREAD_GUARD_LEAVE(heap); 486 | return result; 487 | } 488 | 489 | // Out of memory. 490 | DEBUG_THREAD_GUARD_LEAVE(heap); 491 | return NULL; 492 | } 493 | 494 | void DebugHeapFree(DebugHeap* heap, void* ptr_in) 495 | { 496 | uintptr_t ptr; 497 | uintptr_t relative_offset; 498 | uint32_t page_index; 499 | DebugBlockInfo *block; 500 | char *block_base; 501 | 502 | DEBUG_THREAD_GUARD_ENTER(heap); 503 | 504 | // Figure out what page this belongs to. 505 | ptr = (uintptr_t) ptr_in; 506 | 507 | relative_offset = ptr - (uintptr_t) heap->m_BaseAddress; 508 | page_index = (uint32_t) (relative_offset / kPageSize); 509 | 510 | ASSERT_FATAL(page_index < heap->m_PageCount, "Invalid pointer %p freed", ptr_in); 511 | 512 | block = heap->m_BlockLookup[page_index]; 513 | 514 | ASSERT_FATAL(block, "Double free of %p", ptr_in); 515 | 516 | ASSERT_FATAL((uint32_t)block->m_Allocated, "Block state corrupted"); 517 | ASSERT_FATAL(!block->m_PendingFree, "Block state corrupted"); 518 | 519 | // TODO: Check the fill pattern before the user pointer. 520 | 521 | block->m_Allocated = 0; 522 | block->m_PendingFree = 1; 523 | 524 | // Zero out this block in the lookup to catch double frees. 525 | heap->m_BlockLookup[page_index] = NULL; 526 | 527 | { 528 | uint32_t i, max; 529 | for (i = 1, max = block->m_PageCount; i < max; ++i) 530 | { 531 | ASSERT_FATAL(heap->m_BlockLookup[page_index + i] == NULL, "block lookup corrupted"); 532 | } 533 | } 534 | 535 | // Add the block to the pending free list 536 | heap->m_PendingList[heap->m_PendingListSize++] = block; 537 | 538 | // Protect these blocks from reading or writing completely by decommiting the pages. 539 | // The last page is already inaccessible. 540 | block_base = heap->m_BaseAddress + ((uint64_t)block->m_PageIndex) * kPageSize; 541 | VmDecommit(block_base, ((uint64_t)(block->m_PageCount - 1)) * kPageSize); 542 | 543 | DEBUG_THREAD_GUARD_LEAVE(heap); 544 | } 545 | 546 | size_t DebugHeapGetAllocSize(DebugHeap* heap, void* ptr_in) 547 | { 548 | uintptr_t ptr; 549 | uintptr_t relative_offset; 550 | uint32_t page_index; 551 | size_t result; 552 | DebugBlockInfo* block; 553 | 554 | DEBUG_THREAD_GUARD_ENTER(heap); 555 | 556 | // Figure out what page this belongs to. 557 | ptr = (uintptr_t) ptr_in; 558 | 559 | relative_offset = ptr - (uintptr_t) heap->m_BaseAddress; 560 | page_index = (uint32_t) (relative_offset / kPageSize); 561 | 562 | ASSERT_FATAL(page_index < heap->m_PageCount, "Invalid pointer %p", ptr_in); 563 | 564 | block = heap->m_BlockLookup[page_index]; 565 | 566 | result = (block->m_PageCount - 1) * kPageSize - ptr % kPageSize; 567 | 568 | DEBUG_THREAD_GUARD_LEAVE(heap); 569 | 570 | return result; 571 | } 572 | 573 | int DebugHeapOwns(DebugHeap* heap, void* buffer) 574 | { 575 | uintptr_t ptr; 576 | uintptr_t base; 577 | uintptr_t end; 578 | int status; 579 | 580 | DEBUG_THREAD_GUARD_ENTER(heap); 581 | 582 | ptr = (uintptr_t) buffer; 583 | base = (uintptr_t) heap->m_BaseAddress; 584 | end = base + ((uint64_t)heap->m_PageCount) * kPageSize; 585 | status = ptr >= base && ptr <= end; 586 | 587 | DEBUG_THREAD_GUARD_LEAVE(heap); 588 | return status; 589 | } 590 | -------------------------------------------------------------------------------- /DebugHeap.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #if defined(__cplusplus) 6 | extern "C" { 7 | #endif 8 | 9 | /* 10 | Copyright (c) 2014, Insomniac Games 11 | All rights reserved. 12 | 13 | Redistribution and use in source and binary forms, with or without 14 | modification, are permitted provided that the following conditions are met: 15 | 16 | Redistributions of source code must retain the above copyright notice, this 17 | list of conditions and the following disclaimer. 18 | 19 | Redistributions in binary form must reproduce the above copyright notice, this 20 | list of conditions and the following disclaimer in the documentation and/or 21 | other materials provided with the distribution. 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 24 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 27 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 30 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | */ 34 | 35 | //----------------------------------------------------------------------------- 36 | // Debug heap functionality 37 | // 38 | // These set of functions implement a debug heap. It provides the following 39 | // features: 40 | // 41 | // - Array indexing errors (positive) trigger crashes, because allocations are 42 | // aligned as closely as possible up to an inaccessible virtual memory page. 43 | // 44 | // - Using memory after freeing it trigger crashes most of the time. 45 | // 46 | // - Double frees are detected most of the time. 47 | // 48 | // - Unsynchronized multi-threaded access is detected. 49 | // 50 | // To improve the chances of crashing on use-after-free or double frees, 51 | // increase the size of the heap. Freed blocks are kept on an "observation 52 | // list" for as long as possible to flush out these error classes, but it will 53 | // eventually be reused. 54 | // 55 | // This heap is terribly slow, and wastes tons of memory. You only want to use 56 | // it to track down memory errors. One neat way of doing that is to provide a 57 | // heap interface that can dynamically switch to this heap, maybe with a 58 | // configuration option. You can then hunt for memory errors without recompiling. 59 | 60 | typedef struct DebugHeap DebugHeap; 61 | 62 | // Create and initialize a debug heap. 63 | // The size must be a multiple of the page size (4k), and should be generously padded. 64 | // At the very least you need 2 pages per sub-4k allocation, but the more the better. 65 | // The implementation is 64-bit clean and you can throw more than 4 GB at it just fine. 66 | DebugHeap* DebugHeapInit(size_t size); 67 | 68 | // Nuke a debug heap. All memory is returned to the OS. 69 | void DebugHeapDestroy(DebugHeap* heap); 70 | 71 | // Allocate memory from a debug heap. 72 | // Size can be any value, except zero. 73 | // Alignment must be a power of two. 74 | // Returns NULL if the heap is full. 75 | void* DebugHeapAllocate(DebugHeap* heap, size_t size, size_t alignment); 76 | 77 | // Free memory in a debug heap. 78 | void DebugHeapFree(DebugHeap* heap, void* ptr); 79 | 80 | // Return the allocation size for a previously allocated block. 81 | size_t DebugHeapGetAllocSize(DebugHeap* heap, void* ptr); 82 | 83 | // A quick and dirty range check to see if a buffer could have come from a debug heap. 84 | // Doesn't validate that the buffer is actually allocated. 85 | int DebugHeapOwns(DebugHeap* heap, void* buffer); 86 | 87 | #if defined(__cplusplus) 88 | } 89 | #endif 90 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ig-debugheap - A debugging heap 2 | ============================================================================= 3 | 4 | This is a debug heap useful when trying to track down memory errors (especially 5 | on Windows, where there's no Valgrind.) It is written in C, and works on Mac, 6 | Linux and Windows. 7 | 8 | This package provides the following features: 9 | 10 | - Array indexing errors (positive) trigger crashes, because allocations are 11 | aligned as closely as possible up to an inaccessible virtual memory page. 12 | 13 | - Using memory after freeing it triggers a crash most of the time. 14 | 15 | - Double frees are detected most of the time. 16 | 17 | - Unsynchronized multi-threaded access is detected. 18 | 19 | To improve the chances of crashing on use-after-free or double frees, 20 | increase the size of the heap. Freed blocks are kept on an "observation 21 | list" for as long as possible to flush out these error classes, but it will 22 | eventually be reused. 23 | 24 | This heap is terribly slow, and wastes tons of memory. You only want to use 25 | it to track down memory errors. One neat way of doing that is to provide a 26 | heap interface that can dynamically switch to this heap, maybe with a 27 | configuration option. You can then hunt for memory errors without recompiling. 28 | 29 | License 30 | ----------------------------------------------------------------------------- 31 | Copyright (c) 2014, Insomniac Games 32 | All rights reserved. 33 | 34 | Redistribution and use in source and binary forms, with or without 35 | modification, are permitted provided that the following conditions are met: 36 | 37 | Redistributions of source code must retain the above copyright notice, this 38 | list of conditions and the following disclaimer. 39 | 40 | Redistributions in binary form must reproduce the above copyright notice, this 41 | list of conditions and the following disclaimer in the documentation and/or 42 | other materials provided with the distribution. 43 | 44 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 45 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 46 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 47 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 48 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 49 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 50 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 51 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 52 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 | -------------------------------------------------------------------------------- /demo.c: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2014, Insomniac Games 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | Redistributions in binary form must reproduce the above copyright notice, this 12 | list of conditions and the following disclaimer in the documentation and/or 13 | other materials provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 19 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | // demo.c - quick demo of debug heap functionality 28 | 29 | #include 30 | #include 31 | 32 | #include "DebugHeap.h" 33 | 34 | int main(int argc, char* argv[]) 35 | { 36 | DebugHeap *heap; 37 | 38 | if (argc < 2) { 39 | fprintf(stderr, "Usage: demo \n"); 40 | fprintf(stderr, "\nTest cases:\n"); 41 | fprintf(stderr, "0: setup+teardown\n"); 42 | fprintf(stderr, "1: array overrun (should crash)\n"); 43 | fprintf(stderr, "2: double free (should assert)\n"); 44 | fprintf(stderr, "3: use after free (should crash)\n"); 45 | exit(1); 46 | } 47 | 48 | heap = DebugHeapInit(2 * 1024 * 1024); 49 | 50 | switch (atoi(argv[1])) { 51 | case 0: 52 | { 53 | char* ptr; 54 | ptr = DebugHeapAllocate(heap, 128, 4); 55 | ptr[127] = 'a'; 56 | DebugHeapFree(heap, ptr); 57 | } 58 | break; 59 | 60 | case 1: 61 | { 62 | char* ptr; 63 | ptr = DebugHeapAllocate(heap, 128, 4); 64 | ptr[128] = 'a'; // should crash here 65 | } 66 | break; 67 | 68 | case 2: 69 | { 70 | char* ptr; 71 | ptr = DebugHeapAllocate(heap, 128, 4); 72 | DebugHeapFree(heap, ptr); 73 | DebugHeapFree(heap, ptr); // should assert here 74 | ptr[127] = 'a'; 75 | } 76 | break; 77 | 78 | case 3: 79 | { 80 | char* ptr; 81 | ptr = DebugHeapAllocate(heap, 128, 4); 82 | DebugHeapFree(heap, ptr); 83 | ptr[0] = 'a'; // should crash here 84 | } 85 | break; 86 | 87 | default: 88 | fprintf(stderr, "Unsupported test case\n"); 89 | break; 90 | } 91 | 92 | DebugHeapDestroy(heap); 93 | 94 | return 0; 95 | } 96 | -------------------------------------------------------------------------------- /tundra.lua: -------------------------------------------------------------------------------- 1 | local common = { 2 | Env = { 3 | CCOPTS = { 4 | -- clang and GCC 5 | { "-g"; Config = { "*-gcc-debug", "*-clang-debug" } }, 6 | { "-g -O2"; Config = { "*-gcc-production", "*-clang-production" } }, 7 | { "-O3"; Config = { "*-gcc-release", "*-clang-release" } }, 8 | { "-Wall", "-Werror", "-Wextra", "-Wno-unused-parameter", "-Wno-unused-function" 9 | ; Config = { "*-gcc-*", "*-clang-*" } 10 | }, 11 | { "/W4"; Config = { "*-msvc-*" } }, 12 | }, 13 | 14 | CPPDEFS = { 15 | { "NDEBUG"; Config = "*-*-release" }, 16 | }, 17 | }, 18 | } 19 | 20 | Build { 21 | Units = function () 22 | 23 | local demo = Program { 24 | Name = "demo", 25 | Sources = { 26 | "DebugHeap.c", 27 | "demo.c", 28 | }, 29 | } 30 | 31 | Default(demo) 32 | end, 33 | 34 | Configs = { 35 | Config { 36 | Name = "macosx-clang", 37 | Inherit = common, 38 | DefaultOnHost = "macosx", 39 | Tools = { "clang-osx", }, 40 | }, 41 | Config { 42 | Name = "linux-gcc", 43 | Inherit = common, 44 | DefaultOnHost = "linux", 45 | Tools = { "gcc", }, 46 | }, 47 | Config { 48 | Name = "win64-msvc", 49 | Inherit = common, 50 | DefaultOnHost = "windows", 51 | Tools = { { "msvc-vs2012"; TargetArch = "x64" }, }, 52 | }, 53 | }, 54 | 55 | Variants = { 56 | { Name = "debug", Options = { GeneratePdb = true } }, 57 | { Name = "release" }, 58 | }, 59 | DefaultVariant = "debug", 60 | } 61 | --------------------------------------------------------------------------------