├── android └── jni │ ├── Application.mk │ └── Android.mk ├── misc ├── iso_alloc_logo.png ├── isoalloc_design.png └── commands.gdb ├── .clang-format ├── include ├── os │ ├── freebsd.h │ ├── macos.h │ ├── android.h │ └── linux.h ├── compiler.h ├── iso_alloc_util.h ├── iso_alloc_profiler.h ├── iso_alloc_sanity.h ├── iso_alloc.h ├── conf.h ├── iso_alloc_ds.h └── iso_alloc_internal.h ├── .cirrus.yml ├── tests ├── wild_free.c ├── double_free.c ├── unaligned_free.c ├── zero_alloc.c ├── sized_free.c ├── incorrect_chunk_size_multiple.c ├── uninit_read.c ├── big_double_free.c ├── memset_sanity.c ├── heap_underflow.c ├── bzero_sanity.c ├── init_destroy.c ├── leaks_test.c ├── memmove_sanity.c ├── memcpy_sanity.c ├── big_canary_test.c ├── bad_tag_ptr_test.c ├── heap_overflow.c ├── tagged_ptr_test.c ├── uaf.c ├── verify_tag_ptr_test.c ├── rand_freelist.c ├── uaf_tag_ptr_test.c ├── big_tests.c ├── pool_test.c ├── tagged_ptr_test.cpp ├── thread_tests.c ├── tests.cpp ├── interfaces_test.c ├── tests.c └── alloc_fuzz.c ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── testsuite.yml ├── CREDIT.md ├── src ├── iso_alloc_signal.c ├── libc_hook.c ├── iso_alloc_mte.c ├── iso_alloc_random.c ├── iso_alloc.cpp ├── iso_alloc_mem_tags.c ├── iso_alloc_printf.c ├── malloc_hook.c ├── iso_alloc_search.c ├── iso_alloc_util.c ├── iso_alloc_interfaces.c └── iso_alloc_profiler.c ├── utils ├── run_tagging_tests.sh └── run_tests.sh ├── FUZZING.md ├── CONTRIBUTE.md ├── MEMORY_TAGGING.md ├── PROFILER.md ├── SECURITY_COMPARISON.MD └── LICENSE /android/jni/Application.mk: -------------------------------------------------------------------------------- 1 | APP_ABI := arm64-v8a 2 | APP_PLATFORM := android-28 3 | -------------------------------------------------------------------------------- /misc/iso_alloc_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/struct/isoalloc/HEAD/misc/iso_alloc_logo.png -------------------------------------------------------------------------------- /misc/isoalloc_design.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/struct/isoalloc/HEAD/misc/isoalloc_design.png -------------------------------------------------------------------------------- /misc/commands.gdb: -------------------------------------------------------------------------------- 1 | set env LD_LIBRARY_PATH=build/ 2 | r 3 | i r 4 | x/i $pc 5 | thread apply all bt 6 | thread apply all info locals 7 | p *_root 8 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | { 2 | BasedOnStyle: llvm, 3 | SortIncludes: false, 4 | SpaceBeforeParens: Never, 5 | IndentWidth: 4, 6 | ColumnLimit: 0, 7 | SpaceAfterCStyleCast: true} 8 | -------------------------------------------------------------------------------- /include/os/freebsd.h: -------------------------------------------------------------------------------- 1 | /* freebsd.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #define MAP_HUGETLB MAP_ALIGNED_SUPER 7 | -------------------------------------------------------------------------------- /.cirrus.yml: -------------------------------------------------------------------------------- 1 | env: 2 | CIRRUS_CLONE_DEPTH: 1 3 | ARCH: amd64 4 | 5 | freebsd_instance: 6 | image_family: freebsd-14-3 7 | 8 | task: 9 | name: testsuite-freebsd-amd64 10 | install_script: 11 | - pkg install -y bash gmake 12 | script: 13 | - gmake tests 14 | - gmake cpp_tests 15 | -------------------------------------------------------------------------------- /tests/wild_free.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc wild_free.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | int64_t *p = (int64_t *) 0x7fffffffffff; 9 | iso_free(p); 10 | return OK; 11 | } 12 | -------------------------------------------------------------------------------- /tests/double_free.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc double_free.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | void *p = iso_alloc(1024); 9 | iso_free(p); 10 | iso_free(p); 11 | return OK; 12 | } 13 | -------------------------------------------------------------------------------- /tests/unaligned_free.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc unaligned_free.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | void *p = iso_alloc(128); 9 | p += 1; 10 | iso_free(p); 11 | return OK; 12 | } 13 | -------------------------------------------------------------------------------- /tests/zero_alloc.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc zero_alloc.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | void *p = iso_alloc(0); 9 | memcpy(p, "0x41", 1); 10 | iso_free(p); 11 | return 0; 12 | } 13 | -------------------------------------------------------------------------------- /tests/sized_free.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc sized_free.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | size_t size = 1024; 9 | uint8_t *p = iso_alloc(size); 10 | iso_free_size(p, size * 4); 11 | return 0; 12 | } 13 | -------------------------------------------------------------------------------- /tests/incorrect_chunk_size_multiple.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc incorrect_chunk_size_multiple.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | int64_t *p = (int64_t *) iso_alloc(128); 9 | p += 8; 10 | iso_free(p); 11 | return OK; 12 | } 13 | -------------------------------------------------------------------------------- /tests/uninit_read.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc uninit_read.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | while(1) { 9 | uint8_t *p = iso_alloc(1024); 10 | uint8_t drf = p[128]; 11 | p[256] = drf; 12 | iso_free(p); 13 | } 14 | 15 | return OK; 16 | } 17 | -------------------------------------------------------------------------------- /tests/big_double_free.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc big_double_free.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | void *p = iso_alloc(SMALL_SIZE_MAX + 1); 9 | iso_free(p); 10 | void *z = iso_alloc(SMALL_SIZE_MAX + 1); 11 | iso_free(p); 12 | iso_free(z); 13 | return OK; 14 | } 15 | -------------------------------------------------------------------------------- /include/os/macos.h: -------------------------------------------------------------------------------- 1 | /* macos.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #include 7 | #include 8 | #include 9 | #define bswap_32(x) OSSwapInt32(x) 10 | #define bswap_64(x) OSSwapInt64(x) 11 | #define ENVIRON NULL 12 | 13 | /* 14 | * On pthread_exit, it expects to be dealing with libmalloc 15 | * rather than isoalloc at releasing time 16 | */ 17 | __attribute__((weak)) void _pthread_tsd_cleanup(pthread_t); 18 | void _pthread_tsd_cleanup(pthread_t p) 19 | { 20 | (void)p; 21 | } 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report a bug in IsoAlloc 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Overview** 11 | A clear and concise description of what the problem is. 12 | 13 | **Reproduction Steps** 14 | A clear step by step guide for reproducing the issue. 15 | 16 | **Build / Configuration Options** 17 | Please provide any build or configuration options needed. 18 | 19 | **Remediation** 20 | If possible provide a fix for the issue you've found. 21 | 22 | **Stack Trace** 23 | If the issue is reproducible please provide a stack trace. 24 | -------------------------------------------------------------------------------- /tests/memset_sanity.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc memset_sanity.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | #if !MEMSET_SANITY 8 | #error "This test intended to be run with -DMEMSET_SANITY=1" 9 | #endif 10 | 11 | int main(int argc, char *argv[]) { 12 | uint8_t *p = NULL; 13 | 14 | for(int32_t i = 0; i < 1024; i++) { 15 | p = (uint8_t *) iso_alloc(32); 16 | iso_free(p); 17 | } 18 | 19 | p = (uint8_t *) iso_alloc(32); 20 | memset(p, 0x41, 65535); 21 | 22 | iso_free(p); 23 | iso_verify_zones(); 24 | 25 | return OK; 26 | } 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /tests/heap_underflow.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc heap_underflow.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | uint8_t *p = NULL; 9 | 10 | for(int32_t i = 0; i < 128; i++) { 11 | p = (uint8_t *) iso_alloc(32); 12 | iso_free(p); 13 | } 14 | 15 | p = (uint8_t *) iso_alloc(32); 16 | 17 | #if MEMSET_SANITY 18 | uint8_t *p_dest = p - 65535; 19 | size_t n = 65535; 20 | 21 | while(n--) { 22 | *p_dest++ = 0; 23 | } 24 | #else 25 | memset(p - 65535, 0x42, 65535); 26 | #endif 27 | 28 | iso_free(p); 29 | iso_verify_zones(); 30 | 31 | return OK; 32 | } 33 | -------------------------------------------------------------------------------- /tests/bzero_sanity.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc memset_sanity.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | #if !MEMSET_SANITY 8 | #error "This test intended to be run with -DMEMSET_SANITY=1" 9 | #endif 10 | 11 | #if !(__FreeBSD__ || __NetBSD__ || __OpenBSD__ || __DragonFly__) 12 | #error "This test intended for BSD systems" 13 | #endif 14 | 15 | int main(int argc, char *argv[]) { 16 | uint8_t *p = NULL; 17 | 18 | for(int32_t i = 0; i < 1024; i++) { 19 | p = (uint8_t *) iso_alloc(32); 20 | iso_free(p); 21 | } 22 | 23 | bzero(p, 65535); 24 | 25 | iso_free(p); 26 | iso_verify_zones(); 27 | 28 | return OK; 29 | } 30 | -------------------------------------------------------------------------------- /tests/init_destroy.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc init_destroy.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | #if AUTO_CTOR_DTOR 8 | #error "This test should have AUTO_CTOR_DTOR disabled" 9 | #endif 10 | 11 | #if !ISO_DTOR_CLEANUP 12 | #error "Enable ISO_DTOR_CLEANUP before running this test" 13 | #endif 14 | 15 | int main(int argc, char *argv[]) { 16 | /* Manually initialize IsoAlloc root */ 17 | iso_alloc_initialize(); 18 | 19 | void *p = iso_alloc(1024); 20 | 21 | if(p == NULL) { 22 | LOG_AND_ABORT("iso_alloc failed") 23 | } 24 | 25 | iso_free(p); 26 | 27 | /* Manually destroy IsoAlloc root */ 28 | iso_alloc_destroy(); 29 | return 0; 30 | } 31 | -------------------------------------------------------------------------------- /tests/leaks_test.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc leaks.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | void *p[16]; 9 | int32_t leak = 0; 10 | 11 | for(int32_t i = 0; i < 16; i++) { 12 | p[i] = iso_alloc(i * i); 13 | 14 | /* Free a single chunk */ 15 | if(i == 1) { 16 | iso_free(p[i]); 17 | } else { 18 | leak++; 19 | } 20 | } 21 | 22 | for(int32_t i = 0; i < 16; i++) { 23 | LOG("p[%d] (%p) = %p", i, &p[i], p[i]); 24 | } 25 | 26 | iso_verify_zones(); 27 | int32_t r = iso_alloc_detect_leaks(); 28 | 29 | LOG("Total leaks detected: %d %p of %d", r, p, leak); 30 | 31 | return r; 32 | } 33 | -------------------------------------------------------------------------------- /tests/memmove_sanity.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc memmove_sanity.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | #if !MEMCPY_SANITY 8 | #error "This test intended to be run with -DMEMCPY_SANITY=1" 9 | #endif 10 | 11 | int main(int argc, char *argv[]) { 12 | uint8_t *p = NULL; 13 | p = (uint8_t *) iso_alloc(SMALLEST_CHUNK_SZ); 14 | 15 | const char *A = "ABCABCABCABCABCABCABCABCABCABCABCABCABCAA" 16 | "ABCABCABCABCABCABCABCABCABCABCABCABCABCAA" 17 | "ABCABCABCABCABCABCABCABCABCABCABCABCABCAA"; 18 | size_t len = strlen(A); 19 | memcpy(p, A, SMALLEST_CHUNK_SZ); 20 | 21 | memmove(&p[0], &p[64], len - 64); 22 | 23 | iso_free(p); 24 | iso_verify_zones(); 25 | 26 | return OK; 27 | } 28 | -------------------------------------------------------------------------------- /tests/memcpy_sanity.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc memcpy_sanity.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | #if !MEMCPY_SANITY 8 | #error "This test intended to be run with -DMEMCPY_SANITY=1" 9 | #endif 10 | 11 | int main(int argc, char *argv[]) { 12 | uint8_t *p = NULL; 13 | 14 | for(int32_t i = 0; i < 1024; i++) { 15 | p = (uint8_t *) iso_alloc(8); 16 | iso_free(p); 17 | } 18 | 19 | p = (uint8_t *) iso_alloc(8); 20 | 21 | const char *A = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" 22 | "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" 23 | "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; 24 | memcpy(p, A, strlen(A)); 25 | 26 | iso_free(p); 27 | iso_verify_zones(); 28 | 29 | return OK; 30 | } 31 | -------------------------------------------------------------------------------- /CREDIT.md: -------------------------------------------------------------------------------- 1 | # Thanks 2 | 3 | The following list of people have contributed to IsoAlloc through code, issues, documentation, or ideas: 4 | 5 | * [Oscar Reparaz](https://github.com/struct/isoalloc/pull/5) 6 | * [Silvio Cesare](https://github.com/struct/isoalloc/commit/6cef89ce907a037e70440ee7225a88e260ef1e63) 7 | * [Max Blachman](https://github.com/struct/isoalloc/issues/3) 8 | * [Filippo Valsorda](https://github.com/struct/isoalloc/pull/2) 9 | * [Insu Yun](https://github.com/struct/isoalloc/issues/7) 10 | * [Jean-Michaël Celerier](https://github.com/struct/isoalloc/issues/8) 11 | * [theimpostor](https://github.com/struct/isoalloc/issues/10) 12 | * [jvoisin](https://github.com/struct/isoalloc/search?q=jvoisin&type=commits) 13 | * [devnexen](https://github.com/struct/isoalloc/pull/82) 14 | 15 | If you belong on this list and I have forgotten you just reach out! 16 | -------------------------------------------------------------------------------- /tests/big_canary_test.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc big_canary_test.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | 9 | void *r = iso_alloc(ZONE_USER_SIZE + (ZONE_USER_SIZE / 4)); 10 | 11 | if(r == NULL) { 12 | LOG_AND_ABORT("Failed to allocate a big zone of %d bytes", ZONE_USER_SIZE + (ZONE_USER_SIZE / 4)); 13 | } 14 | 15 | iso_alloc_root *root = _get_root(); 16 | void *p = ((iso_alloc_big_zone_t *) ((uintptr_t) root->big_zone_next_mask ^ (uintptr_t) root->big_zone_used)); 17 | 18 | if(p == NULL) { 19 | LOG_AND_ABORT("Big zone list is empty, %p must not be a big zone!", r); 20 | } 21 | 22 | memset(p, 0x41, sizeof(iso_alloc_big_zone_t)); 23 | 24 | iso_free_permanently(r); 25 | 26 | return 0; 27 | } 28 | -------------------------------------------------------------------------------- /tests/bad_tag_ptr_test.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc bad_tag_ptr_test.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | /* This test should successfully fail with or 5 | * without MEMORY_TAGGING support */ 6 | 7 | #include 8 | #include 9 | #include "iso_alloc.h" 10 | 11 | #define SIZE 256 12 | 13 | int main(int argc, char *argv[]) { 14 | iso_alloc_zone_handle *_zone_handle = iso_alloc_new_zone(SIZE); 15 | 16 | if(_zone_handle == NULL) { 17 | abort(); 18 | } 19 | 20 | char buffer[1024]; 21 | void *p = &buffer; 22 | 23 | /* This should crash because p is not allocated from this zone */ 24 | uint8_t tag = iso_alloc_get_mem_tag(p, _zone_handle); 25 | printf("Tag = %x\n", tag); 26 | iso_alloc_destroy_zone(_zone_handle); 27 | 28 | #if !MEMORY_TAGGING 29 | return -1; 30 | #endif 31 | 32 | return 0; 33 | } 34 | -------------------------------------------------------------------------------- /src/iso_alloc_signal.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_signal.c - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc_internal.h" 5 | 6 | #if SIGNAL_HANDLER 7 | INTERNAL_HIDDEN void handle_signal(int sig, siginfo_t *si, void *ctx) { 8 | void *crash_addr = si->si_addr; 9 | 10 | #if UAF_PTR_PAGE 11 | if(si->si_addr == NULL) { 12 | LOG_AND_ABORT("si->si_addr == NULL"); 13 | } 14 | 15 | /* Check for the address within 2 pages in 16 | * either direction of _root->uaf_ptr_page */ 17 | if(crash_addr >= _root->uaf_ptr_page - (g_page_size * 2) && 18 | crash_addr <= _root->uaf_ptr_page + (g_page_size * 2)) { 19 | LOG_AND_ABORT("Use after free detected! Crashed at _root->uaf_ptr_page 0x%x", si->si_addr); 20 | } 21 | #endif 22 | 23 | LOG_AND_ABORT("Unknown segmentation fault @ 0x%p", crash_addr); 24 | } 25 | #endif 26 | -------------------------------------------------------------------------------- /tests/heap_overflow.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc heap_overflow.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | int main(int argc, char *argv[]) { 8 | uint8_t *p = NULL; 9 | 10 | for(int32_t i = 0; i < 1024; i++) { 11 | p = (uint8_t *) iso_alloc(32); 12 | iso_free(p); 13 | } 14 | 15 | p = (uint8_t *) iso_alloc(32); 16 | 17 | #if MEMCPY_SANITY 18 | const char *A = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" 19 | "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" 20 | "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; 21 | memcpy(p, A, strlen(A)); 22 | #else 23 | size_t n = 65536; 24 | uint8_t *pw = (uint8_t *) p; 25 | while(n--) { 26 | *pw = 'A'; 27 | pw++; 28 | } 29 | #endif 30 | 31 | iso_free(p); 32 | iso_verify_zones(); 33 | 34 | return OK; 35 | } 36 | -------------------------------------------------------------------------------- /src/libc_hook.c: -------------------------------------------------------------------------------- 1 | /* libc_hook.c - Provides low level hooks for libc functions 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc_internal.h" 5 | #include "iso_alloc_sanity.h" 6 | #include "iso_alloc_util.h" 7 | 8 | #if MEMCPY_SANITY 9 | EXTERNAL_API void *memcpy(void *restrict dest, const void *restrict src, size_t n) { 10 | return _iso_alloc_memcpy(dest, src, n); 11 | } 12 | 13 | EXTERNAL_API void *memmove(void *dest, const void *src, size_t n) { 14 | return _iso_alloc_memmove(dest, src, n); 15 | } 16 | #endif 17 | 18 | #if MEMSET_SANITY 19 | EXTERNAL_API void *memset(void *dest, int b, size_t n) { 20 | return _iso_alloc_memset(dest, b, n); 21 | } 22 | 23 | /* 24 | * bzero is removed from the POSIX standard in IEEE Std 1003.1-2008, but still a valid *BSD extension 25 | */ 26 | #if(__FreeBSD__ || __NetBSD__ || __OpenBSD__ || __DragonFly__) 27 | EXTERNAL_API void bzero(void *dest, size_t n) { 28 | (void) _iso_alloc_memset(dest, 0, n); 29 | } 30 | #endif 31 | #endif 32 | -------------------------------------------------------------------------------- /tests/tagged_ptr_test.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc tagged_ptr_test.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | #include 4 | #include 5 | #include "iso_alloc.h" 6 | #include "iso_alloc_internal.h" 7 | 8 | #if !MEMORY_TAGGING 9 | #error "This test intended to be run with -DMEMORY_TAGGING=1" 10 | #endif 11 | 12 | #define SIZE 256 13 | 14 | int main(int argc, char *argv[]) { 15 | iso_alloc_zone_handle *_zone_handle = iso_alloc_new_zone(SIZE); 16 | 17 | if(_zone_handle == NULL) { 18 | abort(); 19 | } 20 | 21 | void *p = iso_alloc_from_zone_tagged(_zone_handle); 22 | void *up = iso_alloc_untag_ptr(p, _zone_handle); 23 | 24 | uint8_t tag = ((uintptr_t) p >> 56); 25 | uint8_t itag = iso_alloc_get_mem_tag(up, _zone_handle); 26 | 27 | if(tag != itag) { 28 | LOG_AND_ABORT("Tags %d and %d do not match", tag, itag); 29 | } 30 | 31 | /* We can pass a tagged or untagged pointer to iso_free_from_zone */ 32 | iso_free_from_zone(p, _zone_handle); 33 | 34 | iso_alloc_destroy_zone(_zone_handle); 35 | 36 | return 0; 37 | } 38 | -------------------------------------------------------------------------------- /tests/uaf.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc uaf.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | #if UAF_PTR_PAGE && !ALLOC_SANITY 8 | /* This test should be run manually after enabling UAF_PTR_PAGE 9 | * and disabling the sampling mechanism before the call to 10 | * _iso_alloc_ptr_search in _iso_free_internal_unlocked */ 11 | typedef struct test { 12 | char *str; 13 | } test_t; 14 | 15 | int main(int argc, char *argv[]) { 16 | void *str = iso_alloc(32); 17 | test_t *test = (test_t *) iso_alloc(1024); 18 | test->str = str; 19 | 20 | const char *s = "a string!"; 21 | memcpy(str, s, strlen(s)); 22 | 23 | /* We free the chunk permanently because 24 | * it bypasses the quarantine */ 25 | iso_free_permanently(str); 26 | 27 | /* Dereference a pointer that should have been 28 | * detected and overwritten with UAF_PTR_PAGE */ 29 | iso_alloc_root *root = _get_root(); 30 | fprintf(stdout, "Dereferencing test->str @ %p. Fault address will be %p\n", test->str, root->uaf_ptr_page); 31 | fprintf(stdout, "[%s]\n", test->str); 32 | iso_free_permanently(test); 33 | 34 | return OK; 35 | } 36 | #else 37 | int main(int argc, char *argv[]) { 38 | return 0; 39 | } 40 | #endif 41 | -------------------------------------------------------------------------------- /tests/verify_tag_ptr_test.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc verify_tag_ptr_test.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include 5 | #include 6 | #include "iso_alloc.h" 7 | #include "iso_alloc_internal.h" 8 | 9 | #if !MEMORY_TAGGING 10 | #error "This test intended to be run with -DMEMORY_TAGGING=1" 11 | #endif 12 | 13 | #define SIZE 256 14 | 15 | int main(int argc, char *argv[]) { 16 | iso_alloc_zone_handle *_zone_handle = iso_alloc_new_zone(SIZE); 17 | 18 | if(_zone_handle == NULL) { 19 | abort(); 20 | } 21 | 22 | /* Allocate a chunk, and assign a tagged pointer to p */ 23 | void *p = iso_alloc_from_zone_tagged(_zone_handle); 24 | 25 | /* Remove the tag from the pointer */ 26 | void *up = iso_alloc_untag_ptr(p, _zone_handle); 27 | 28 | /* Free the underlying chunk with the untagged pointer */ 29 | iso_free_from_zone(up, _zone_handle); 30 | 31 | /* Flush all caches includes the delayed free list. When 32 | * the chunk is free'd its tag will be changed */ 33 | iso_flush_caches(); 34 | 35 | /* Verify the tag on our stale tagged pointer. This should 36 | * abort because the tag was changed during free() */ 37 | iso_alloc_verify_ptr_tag(p, _zone_handle); 38 | 39 | iso_alloc_destroy_zone(_zone_handle); 40 | 41 | return 0; 42 | } 43 | -------------------------------------------------------------------------------- /tests/rand_freelist.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc rand_freelist.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | /* Number of allocation requests to test */ 8 | #define ALLOCATIONS 32 9 | 10 | /* Size of each allocation */ 11 | #define CHUNK_SIZE 32 12 | 13 | /* Failure threshold */ 14 | #define FAIL 4 15 | 16 | /* This test requires that RANDOMIZE_FREELIST is 17 | * enabled or the test will return 0. 18 | * The test will look for returned chunks 19 | * that are adjacent and increment a counter when 20 | * they're found. This randomization is probabilistic 21 | * so this test may fail from time to time. */ 22 | int main(int argc, char *argv[]) { 23 | uint8_t *p; 24 | uint8_t *q; 25 | size_t adj_count = 0; 26 | 27 | for(int32_t i = 0; i < ALLOCATIONS; i++) { 28 | p = (uint8_t *) iso_alloc(CHUNK_SIZE); 29 | q = (uint8_t *) iso_alloc(CHUNK_SIZE); 30 | 31 | if(q > p && (q - p) == CHUNK_SIZE) { 32 | adj_count++; 33 | } else if(p - q == CHUNK_SIZE) { 34 | adj_count++; 35 | } 36 | 37 | #if RANDOMIZE_FREELIST 38 | if(adj_count > FAIL) { 39 | LOG_AND_ABORT("First allocation %p adjacent to second %p", p, q); 40 | } 41 | #endif 42 | iso_free(p); 43 | iso_free(q); 44 | } 45 | 46 | return 0; 47 | } 48 | -------------------------------------------------------------------------------- /include/os/android.h: -------------------------------------------------------------------------------- 1 | /* android.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #include 7 | #include 8 | 9 | /* This magic number is usually defined by Android Bionic: 10 | * https://android.googlesource.com/platform/bionic/+/263325d/libc/include/sys/prctl.h#42 */ 11 | #ifndef PR_SET_VMA 12 | #define PR_SET_VMA 0x53564d41 13 | #endif 14 | 15 | #ifndef PR_SET_VMA_ANON_NAME 16 | #define PR_SET_VMA_ANON_NAME 0 17 | #endif 18 | 19 | #if (__clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)) && ARM_MTE == 1 20 | /* TODO: these belong in an aarch specific header */ 21 | #ifndef PROT_MTE 22 | #define PROT_MTE 0x20 23 | #endif 24 | #ifndef PR_SET_TAGGED_ADDR_CTRL 25 | #define PR_SET_TAGGED_ADDR_CTRL 54 26 | #endif 27 | #ifndef PR_GET_TAGGED_ADDR_CTRL 28 | #define PR_GET_TAGGED_ADDR_CTRL 56 29 | #endif 30 | #ifndef PR_TAGGED_ADDR_ENABLE 31 | #define PR_TAGGED_ADDR_ENABLE (1UL << 0) 32 | #endif 33 | #ifndef PR_MTE_TCF_SHIFT 34 | #define PR_MTE_TCF_SHIFT 1 35 | #endif 36 | #ifndef PR_MTE_TAG_SHIFT 37 | #define PR_MTE_TAG_SHIFT 3 38 | #endif 39 | #ifndef PR_MTE_TCF_NONE 40 | #define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) 41 | #endif 42 | #ifndef PR_MTE_TCF_SYNC 43 | #define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) 44 | #endif 45 | #ifndef PR_MTE_TCF_MASK 46 | #define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) 47 | #endif 48 | #endif 49 | -------------------------------------------------------------------------------- /tests/uaf_tag_ptr_test.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc uaf_tag_ptr_test.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include 5 | #include 6 | #include "iso_alloc.h" 7 | #include "iso_alloc_internal.h" 8 | 9 | #if !MEMORY_TAGGING 10 | #error "This test intended to be run with -DMEMORY_TAGGING=1" 11 | #endif 12 | 13 | #define SIZE 256 14 | 15 | int main(int argc, char *argv[]) { 16 | iso_alloc_zone_handle *_zone_handle = iso_alloc_new_zone(SIZE); 17 | 18 | if(_zone_handle == NULL) { 19 | abort(); 20 | } 21 | 22 | void *p = iso_alloc_from_zone_tagged(_zone_handle); 23 | void *up = iso_alloc_untag_ptr(p, _zone_handle); 24 | iso_free_from_zone(up, _zone_handle); 25 | iso_flush_caches(); 26 | p = iso_alloc_untag_ptr(p, _zone_handle); 27 | 28 | /* This should crash on systems without TBI as p is 29 | * already free and the untagging operation should 30 | * result in a bad pointer */ 31 | memset(p, 0x41, SIZE); 32 | 33 | #if __aarch64__ 34 | /* aarch64 systems with TBI enabled will succeed in 35 | * using the tagged pointer p. If p is still tagged 36 | * we abort here */ 37 | if((uintptr_t) p & IS_TAGGED_PTR_MASK) { 38 | LOG_AND_ABORT("Write to tagged ptr %p succeeded. TBI may be enabled", p); 39 | } 40 | #else 41 | if((uintptr_t) p & IS_TAGGED_PTR_MASK) { 42 | LOG_AND_ABORT("Write to tagged ptr %p succeeded on x86_64 ?!", p); 43 | } 44 | #endif 45 | 46 | iso_alloc_destroy_zone(_zone_handle); 47 | return 0; 48 | } 49 | -------------------------------------------------------------------------------- /include/compiler.h: -------------------------------------------------------------------------------- 1 | /* compiler.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #define INTERNAL_HIDDEN __attribute__((visibility("hidden"))) 7 | #define ASSUME_ALIGNED __attribute__((assume_aligned(8))) 8 | #define CONST __attribute__((const)) 9 | 10 | /* This isn't standard in C as [[nodiscard]] until C23 */ 11 | #define NO_DISCARD __attribute__((warn_unused_result)) 12 | 13 | #if UNIT_TESTING 14 | #define EXTERNAL_API __attribute__((visibility("default"))) 15 | #endif 16 | 17 | #if PERF_TEST_BUILD 18 | #define INLINE 19 | #define FLATTEN 20 | #else 21 | #if __clang__ 22 | #define INLINE __attribute__((always_inline)) 23 | #else 24 | #define INLINE __attribute__((inline)) 25 | #endif 26 | #define FLATTEN __attribute__((flatten)) 27 | #endif 28 | 29 | #define LIKELY(x) __builtin_expect(!!(x), 1) 30 | #define UNLIKELY(x) __builtin_expect(!!(x), 0) 31 | 32 | #if __clang__ 33 | #if __has_feature(address_sanitizer) 34 | #define __SANITIZE_ADDRESS__ 1 35 | #endif 36 | #if __has_feature(memory_sanitizer) 37 | #define __SANITIZE_MEMORY__ 1 38 | #endif 39 | #endif 40 | 41 | #if DONT_USE_NEON == 0 && __ARM_NEON 42 | #include 43 | #define USE_NEON 1 44 | #else 45 | #define USE_NEON 0 46 | #endif 47 | 48 | #if defined(__SANITIZE_ADDRESS__) 49 | static_assert(ENABLE_ASAN == 1, "ENABLE_ASAN should be 1 to enable asan instead"); 50 | #endif 51 | 52 | #if defined(__SANITIZE_MEMORY__) 53 | static_assert(ENABLE_MSAN == 1, "ENABLE_MSAN should be 1 to enable msan instead"); 54 | #endif 55 | -------------------------------------------------------------------------------- /android/jni/Android.mk: -------------------------------------------------------------------------------- 1 | LOCAL_PATH := $(call my-dir) 2 | include $(CLEAR_VARS) 3 | 4 | LOCAL_CFLAGS := -DTHREAD_SUPPORT=1 -pthread \ 5 | -DPRE_POPULATE_PAGES=0 -DSMALL_MEM_STARTUP=0 -DSANITIZE_CHUNKS=0 \ 6 | -DFUZZ_MODE=0 -DPERM_FREE_REALLOC=0 -DDISABLE_CANARY=0 -Werror \ 7 | -pedantic -Wno-pointer-arith -Wno-gnu-zero-variadic-macro-arguments \ 8 | -Wno-format-pedantic -DMALLOC_HOOK=1 -fvisibility=hidden -std=c11 \ 9 | -DALLOC_SANITY=0 -DUNINIT_READ_SANITY=0 -DCPU_PIN=0 -DEXPERIMENTAL=0 \ 10 | -DUAF_PTR_PAGE=0 -DVERIFY_FREE_BIT_SLOTS=0 -DNAMED_MAPPINGS=1 -fPIC \ 11 | -shared -DDEBUG=1 -DLEAK_DETECTOR=1 -DMEM_USAGE=1 -DUSE_MLOCK=1 \ 12 | -DMEMORY_TAGGING=0 -DSCHED_GETCPU -g -ggdb3 -fno-omit-frame-pointer \ 13 | -DRANDOMIZE_FREELIST=1 -DBIG_ZONE_META_DATA_GUARD=0 -DBIG_ZONE_GUARD=0 \ 14 | -DPROTECT_FREE_BIG_ZONES=0 -DMASK_PTRS=1 -DSIGNAL_HANDLER=0 \ 15 | -DUSE_MLOCK=1 -DNO_ZERO_ALLOCATIONS=1 -DABORT_ON_NULL=0 \ 16 | -DABORT_NO_ENTROPY=1 -DMEMCPY_SANITY=0 -DMEMSET_SANITY=0 \ 17 | -DSTRONG_SIZE_ISOLATION=0 -DISO_DTOR_CLEANUP=0 -DARM_MTE=1 \ 18 | -march=armv8.5-a+memtag 19 | 20 | LOCAL_SRC_FILES := ../../src/iso_alloc.c ../../src/iso_alloc_printf.c ../../src/iso_alloc_random.c \ 21 | ../../src/iso_alloc_search.c ../../src/iso_alloc_interfaces.c ../../src/iso_alloc_profiler.c \ 22 | ../../src/iso_alloc_sanity.c ../../src/iso_alloc_util.c ../../src/malloc_hook.c \ 23 | ../../src/libc_hook.c ../../src/iso_alloc_mem_tags.c ../../src/iso_alloc_mte.c 24 | 25 | LOCAL_C_INCLUDES := ../../include/ 26 | 27 | LOCAL_MODULE := libisoalloc 28 | 29 | include $(BUILD_SHARED_LIBRARY) 30 | -------------------------------------------------------------------------------- /utils/run_tagging_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | $(echo '' > tagging_test_output.txt) 3 | 4 | tests=("tagged_ptr_test" "tagged_ptr_test_cpp") 5 | failure=0 6 | succeeded=0 7 | 8 | $(ulimit -c 0) 9 | 10 | export LD_LIBRARY_PATH=build/ 11 | 12 | for t in "${tests[@]}"; do 13 | echo -n "Running $t test" 14 | echo "Running $t test" >> tagging_test_output.txt 2>&1 15 | $(build/$t >> tagging_test_output.txt 2>&1) 16 | ret=$? 17 | 18 | if [ $ret -ne 0 ]; then 19 | echo "... Failed" 20 | echo "... Failed" >> tagging_test_output.txt 2>&1 21 | failure=$((failure+1)) 22 | else 23 | echo "... Succeeded" 24 | echo "... Succeeded" >> tagging_test_output.txt 2>&1 25 | succeeded=$((succeeded+1)) 26 | fi 27 | done 28 | 29 | fail_tests=("bad_tag_ptr_test" "verify_tag_ptr_test" "uaf_tag_ptr_test") 30 | 31 | for t in "${fail_tests[@]}"; do 32 | echo -n "Running $t test" 33 | echo "Running $t test" >> tagging_test_output.txt 2>&1 34 | $(build/$t >> tagging_test_output.txt 2>&1) 35 | ret=$? 36 | 37 | if [ $ret -ne 0 ]; then 38 | echo "... Succeeded" 39 | echo "... Succeeded" >> tagging_test_output.txt 2>&1 40 | succeeded=$((succeeded+1)) 41 | else 42 | echo "... Failed" 43 | echo "... Failed" >> tagging_test_output.txt 2>&1 44 | failure=$((failure+1)) 45 | fi 46 | done 47 | 48 | echo "$succeeded Tests passed" 49 | echo "$failure Tests failed" 50 | 51 | unset LD_LIBRARY_PATH 52 | unset LD_PRELOAD 53 | 54 | if [ $failure -ne 0 ]; then 55 | cat tagging_test_output.txt 56 | exit -1 57 | else 58 | exit 0 59 | fi 60 | -------------------------------------------------------------------------------- /.github/workflows/testsuite.yml: -------------------------------------------------------------------------------- 1 | name: testsuite 2 | on: [push, pull_request] 3 | jobs: 4 | testsuite-osx: 5 | runs-on: macos-latest 6 | steps: 7 | - uses: actions/checkout@v2 8 | - run: make tests 9 | - run: make cpp_tests 10 | testsuite-clang: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | version: [15, 16, 17, 18] 15 | steps: 16 | - uses: actions/checkout@v4 17 | - run: | 18 | sudo apt install clang-${{ matrix.version }} 19 | sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-${{ matrix.version }} 100 20 | sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${{ matrix.version }} 100 21 | - run: make tests 22 | - run: make cpp_tests 23 | testsuite-clang18-perf: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v4 27 | - run: sudo apt install clang-18 28 | - run: make malloc_cmp_test 29 | testsuite-clang-format: 30 | runs-on: ubuntu-latest 31 | steps: 32 | - uses: actions/checkout@v4 33 | - run: sudo apt install clang clang-format-18 34 | - run: make format-ci 35 | testsuite-gcc: 36 | runs-on: ubuntu-latest 37 | steps: 38 | - uses: actions/checkout@v4 39 | - run: make tests CC=gcc CXX=g++ 40 | - run: make cpp_tests CC=gcc CXX=g++ 41 | testsuite-musl: 42 | runs-on: ubuntu-latest 43 | container: 44 | image: alpine:latest 45 | steps: 46 | - uses: actions/checkout@v4 47 | - run: apk update && apk add build-base bash musl-dev linux-headers 48 | - run: make tests CC=gcc CXX=g++ 49 | - run: make cpp_tests CC=gcc CXX=g++ 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /tests/big_tests.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc big_tests.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | #include 7 | 8 | int main(int argc, char *argv[]) { 9 | 10 | void *p = iso_alloc(SMALL_SIZE_MAX + 1); 11 | 12 | if(p == NULL) { 13 | LOG_AND_ABORT("Failed to allocate %d bytes", SMALL_SIZE_MAX + 1); 14 | } 15 | 16 | iso_free(p); 17 | 18 | p = iso_alloc(ZONE_USER_SIZE * 2); 19 | 20 | if(p == NULL) { 21 | LOG_AND_ABORT("Failed to allocate a big zone of %d bytes", ZONE_USER_SIZE * 2); 22 | } 23 | 24 | iso_free(p); 25 | 26 | void *q = iso_alloc(ZONE_USER_SIZE + (ZONE_USER_SIZE / 2)); 27 | 28 | if(q == NULL) { 29 | LOG_AND_ABORT("Failed to allocate a big zone of %d bytes", ZONE_USER_SIZE + (ZONE_USER_SIZE / 2)); 30 | } 31 | 32 | void *r = iso_alloc(ZONE_USER_SIZE + (ZONE_USER_SIZE / 4)); 33 | 34 | if(r == NULL) { 35 | LOG_AND_ABORT("Failed to allocate a big zone of %d bytes", ZONE_USER_SIZE + (ZONE_USER_SIZE / 4)); 36 | } 37 | 38 | iso_free_permanently(r); 39 | iso_free(q); 40 | 41 | void *ptrs[64]; 42 | srand(time(NULL)); 43 | 44 | iso_verify_zones(); 45 | 46 | for(int32_t i = 0; i < 64; i++) { 47 | ptrs[i] = iso_alloc(ZONE_USER_SIZE + (rand() % ZONE_USER_SIZE)); 48 | 49 | /* Randomly free some allocations */ 50 | if((rand() % 5) > 1) { 51 | iso_free(ptrs[i]); 52 | ptrs[i] = NULL; 53 | } 54 | } 55 | 56 | iso_verify_zones(); 57 | 58 | LOG("[Big Zone Test] Megabytes used: %lu", iso_alloc_mem_usage()); 59 | 60 | for(int32_t i = 0; i < 64; i++) { 61 | iso_free(ptrs[i]); 62 | } 63 | 64 | return 0; 65 | } 66 | -------------------------------------------------------------------------------- /include/iso_alloc_util.h: -------------------------------------------------------------------------------- 1 | /* iso_alloc_util.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | #include "compiler.h" 6 | 7 | #if !NAMED_MAPPINGS 8 | #define SAMPLED_ALLOC_NAME "" 9 | #define BIG_ZONE_UD_NAME "" 10 | #define BIG_ZONE_MD_NAME "" 11 | #define GUARD_PAGE_NAME "" 12 | #define ROOT_NAME "" 13 | #define ZONE_BITMAP_NAME "" 14 | #define INTERNAL_UZ_NAME "" 15 | #define PRIVATE_UZ_NAME "" 16 | #endif 17 | 18 | #if USE_MLOCK 19 | #define MLOCK(p, s) mlock(p, s) 20 | #else 21 | #define MLOCK(p, s) 22 | #endif 23 | 24 | INTERNAL_HIDDEN void *create_guard_page(void *p); 25 | INTERNAL_HIDDEN INLINE void darwin_reuse(void *p, size_t size); 26 | INTERNAL_HIDDEN void unmap_guarded_pages(void *p, size_t size); 27 | INTERNAL_HIDDEN ASSUME_ALIGNED void *mmap_guarded_rw_pages(size_t size, bool populate, const char *name); 28 | INTERNAL_HIDDEN ASSUME_ALIGNED void *mmap_guarded_rw_mte_pages(size_t size, bool populate, const char *name); 29 | INTERNAL_HIDDEN ASSUME_ALIGNED void *mmap_rw_pages(size_t size, bool populate, const char *name); 30 | INTERNAL_HIDDEN ASSUME_ALIGNED void *mmap_rw_mte_pages(size_t size, bool populate, const char *name); 31 | INTERNAL_HIDDEN ASSUME_ALIGNED void *mmap_pages(size_t size, bool populate, const char *name, int32_t prot); 32 | INTERNAL_HIDDEN void mprotect_pages(void *p, size_t size, int32_t protection); 33 | INTERNAL_HIDDEN int32_t name_mapping(void *p, size_t sz, const char *name); 34 | INTERNAL_HIDDEN size_t next_pow2(size_t sz); 35 | INTERNAL_HIDDEN uint32_t _log2(uint32_t v); 36 | 37 | INTERNAL_HIDDEN int8_t *_fmt(uint64_t n, uint32_t base); 38 | INTERNAL_HIDDEN void _iso_alloc_printf(int32_t fd, const char *f, ...); 39 | 40 | #if CPU_PIN 41 | INTERNAL_HIDDEN INLINE int _iso_getcpu(void); 42 | #endif 43 | -------------------------------------------------------------------------------- /tests/pool_test.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc pool_test.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | static const uint32_t allocation_sizes[] = {ZONE_16, ZONE_32, ZONE_64, ZONE_128, 8 | ZONE_256, ZONE_512, ZONE_1024, 9 | ZONE_2048, ZONE_4096, ZONE_8192}; 10 | 11 | static const uint32_t array_sizes[] = {16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192}; 12 | 13 | int allocate(size_t array_size, size_t allocation_size) { 14 | iso_alloc_zone_handle *zone = iso_alloc_new_zone(allocation_size); 15 | size_t total_chunks = iso_zone_chunk_count(zone); 16 | 17 | /* We can treat private zones like pools of chunks 18 | * that don't need to be freed. Instead we can just 19 | * destroy the whole zone when we are done. We get 20 | * the benefits of pools with all of the security 21 | * properties of an IsoAlloc zone */ 22 | for(int i = 0; i < total_chunks; i++) { 23 | void *p = iso_alloc_from_zone(zone); 24 | 25 | if(p == NULL) { 26 | LOG_AND_ABORT("Failed to allocate %ld bytes after %d total allocations from zone with %d total chunks", 27 | allocation_size, i, total_chunks); 28 | } 29 | } 30 | 31 | iso_alloc_destroy_zone(zone); 32 | 33 | return OK; 34 | } 35 | 36 | int main(int argc, char *argv[]) { 37 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 38 | for(int z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 39 | allocate(array_sizes[i], allocation_sizes[z]); 40 | } 41 | } 42 | 43 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 44 | allocate(array_sizes[i], 0); 45 | } 46 | 47 | return 0; 48 | } 49 | -------------------------------------------------------------------------------- /utils/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script runs all debug tests including vulnerable 3 | # examples of code that should crash 4 | $(echo '' > test_output.txt) 5 | 6 | tests=("tests" "big_tests" "interfaces_test" "thread_tests" "pool_test" 7 | "rand_freelist") 8 | failure=0 9 | succeeded=0 10 | 11 | $(ulimit -c 0) 12 | 13 | export LD_LIBRARY_PATH=build/ 14 | 15 | for t in "${tests[@]}"; do 16 | echo -n "Running $t test" 17 | echo "Running $t test" >> test_output.txt 2>&1 18 | $(build/$t >> test_output.txt 2>&1) 19 | ret=$? 20 | 21 | if [ $ret -ne 0 ]; then 22 | echo "... Failed" 23 | echo "... Failed" >> test_output.txt 2>&1 24 | failure=$((failure+1)) 25 | else 26 | echo "... Succeeded" 27 | echo "... Succeeded" >> test_output.txt 2>&1 28 | succeeded=$((succeeded+1)) 29 | fi 30 | done 31 | 32 | fail_tests=("double_free" "big_double_free" "heap_overflow" "heap_underflow" 33 | "leaks_test" "wild_free" "unaligned_free" "incorrect_chunk_size_multiple" 34 | "big_canary_test" "zero_alloc" "sized_free") 35 | 36 | for t in "${fail_tests[@]}"; do 37 | echo -n "Running $t test" 38 | echo "Running $t test" >> test_output.txt 2>&1 39 | $(build/$t >> test_output.txt 2>&1) 40 | ret=$? 41 | 42 | if [ $ret -ne 0 ]; then 43 | echo "... Succeeded" 44 | echo "... Succeeded" >> test_output.txt 2>&1 45 | succeeded=$((succeeded+1)) 46 | else 47 | echo "... Failed" 48 | echo "... Failed" >> test_output.txt 2>&1 49 | failure=$((failure+1)) 50 | fi 51 | done 52 | 53 | echo "$succeeded Tests passed" 54 | echo "$failure Tests failed" 55 | 56 | unset LD_LIBRARY_PATH 57 | unset LD_PRELOAD 58 | 59 | if [ $failure -ne 0 ]; then 60 | cat test_output.txt 61 | exit -1 62 | else 63 | exit 0 64 | fi 65 | -------------------------------------------------------------------------------- /include/os/linux.h: -------------------------------------------------------------------------------- 1 | /* linux.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #include 7 | #include 8 | #include 9 | /* Get linux kernel version */ 10 | #include 11 | 12 | #if defined(CPU_PIN) && defined(_GNU_SOURCE) && defined(__linux__) 13 | #include 14 | #endif 15 | 16 | /* In Linux kernel versions greater than 5.17.0, it is also possible 17 | * to name anonymous VMA. See 18 | * https://kernelnewbies.org/Linux_5.17#Support_giving_names_to_anonymous_memory */ 19 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) 20 | /* Use this macro can allow for compatibility with older versions while 21 | * introducing new functionality */ 22 | #define KERNEL_VERSION_SEQ_5_17 1 23 | /* Same as android.h. */ 24 | #ifndef PR_SET_VMA 25 | #define PR_SET_VMA 0x53564d41 26 | #endif 27 | #ifndef PR_SET_VMA_ANON_NAME 28 | #define PR_SET_VMA_ANON_NAME 0 29 | #endif 30 | #endif 31 | 32 | #define ENVIRON environ 33 | 34 | #if (__clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)) && ARM_MTE == 1 35 | /* TODO: these belong in an aarch specific header */ 36 | #ifndef PROT_MTE 37 | #define PROT_MTE 0x20 38 | #endif 39 | #ifndef PR_SET_TAGGED_ADDR_CTRL 40 | #define PR_SET_TAGGED_ADDR_CTRL 54 41 | #endif 42 | #ifndef PR_GET_TAGGED_ADDR_CTRL 43 | #define PR_GET_TAGGED_ADDR_CTRL 56 44 | #endif 45 | #ifndef PR_TAGGED_ADDR_ENABLE 46 | #define PR_TAGGED_ADDR_ENABLE (1UL << 0) 47 | #endif 48 | #ifndef PR_MTE_TCF_SHIFT 49 | #define PR_MTE_TCF_SHIFT 1 50 | #endif 51 | #ifndef PR_MTE_TAG_SHIFT 52 | #define PR_MTE_TAG_SHIFT 3 53 | #endif 54 | #ifndef PR_MTE_TCF_NONE 55 | #define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) 56 | #endif 57 | #ifndef PR_MTE_TCF_SYNC 58 | #define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) 59 | #endif 60 | #ifndef PR_MTE_TCF_MASK 61 | #define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) 62 | #endif 63 | #endif 64 | -------------------------------------------------------------------------------- /FUZZING.md: -------------------------------------------------------------------------------- 1 | # Fuzzing 2 | 3 | [Fuzzing](https://en.wikipedia.org/wiki/Fuzzing) is the automated process of discovering realibility bugs and security vulnerabilities in software by creating random or structure aware inputs that a program was not built to handle. Fuzzing is extremely powerful at discovering subtle issues in your code but it requires a comprehensive set of tools and instrumentation in order to be successful. Runtime instrumentation like Address Sanitizer and Memory Sanitizer have enabled even the simplest of fuzzers to uncover these kinds of issues. 4 | 5 | ## Fuzzing with IsoAlloc 6 | 7 | Memory allocation libraries like IsoAlloc can also help uncover security vulnerabilities by enabling the right configurations and calling the right APIs. It's also useful for fuzzing in environments where the sanitizers cannot easily run, or when the target is binary-only. A good example of this is [libdislocator](https://github.com/mirrorer/afl/tree/master/libdislocator) which has shipped with the AFL fuzzer for many years. If you're finding value in libdislocator then switching to IsoAlloc will likely uncover even more issues for you. You can do this today by simply using `LD_PRELOAD=libisoalloc.so` and starting your fuzzer. 8 | 9 | ## Fuzzing Configuration 10 | 11 | Below is a list of configurations which will help you get the most out of fuzzing with IsoAlloc. Each of these can be enabled at compile time by modifying the Makefile. All of these flags should be documented in the [README](README.md#security-properties). 12 | 13 | * FUZZ_MODE - Verifies the internal state of all zones on each alloc/free operation 14 | * SANITIZE_CHUNKS - Over write chunk contents upon free with 0xDE 15 | * PERM_FREE_REALLOC - Any chunk passed to realloc will be permanently free'd 16 | * ALLOC_SANITY - Samples allocations and places them on individual pages to detection UAF and out of bounds r/w 17 | * UAF_PTR_PAGE - Calls to `iso_free` will be sampled to search for dangling references to the chunk being free'd 18 | * ABORT_ON_NULL - Aborts instead of returning NULL whenever malloc() fails 19 | -------------------------------------------------------------------------------- /CONTRIBUTE.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | If you're looking to contribute to IsoAlloc then you will want to start with this guide. It contains some steps you will want to follow before making a pull request, and a basic style guide. 4 | 5 | ## Testing Your Changes 6 | 7 | Contributing to IsoAlloc is a pretty standard process of forking the repo, making a pull request, and optionally linking it to an existing issue. Before you make your pull request please run the following commands on both Linux and MacOS: 8 | 9 | `make tests` - Make sure all tests still pass 10 | 11 | `make malloc_cmp_test` - Check for major performance regressions 12 | 13 | `make cpp_tests` - Make sure all C++ tests still pass 14 | 15 | `make library` - Build a release version of the library 16 | 17 | `make cpp_library` - Build a release version of the library with C++ support 18 | 19 | Repeat the steps above using gcc/g++ as your compiler. e.g. `make tests CC=gcc CXX=g++` 20 | 21 | Compile a debug version of the library with `make cpp_library_debug` and then run a basic test using `LD_PRELOAD` and another binary. 22 | 23 | If you're making changes that are handled differently between Clang and GCC then please run the tests above but also set the `CC` and `CXX` environment variables approriately. 24 | 25 | ## Style Guide 26 | 27 | Before you make a PR please run the following: 28 | 29 | `make format` - Run the clang formatter to ensure your changes conform to the rest of the project style 30 | 31 | The clang-format Makefile target should cleanup a lot of your commit but please ensure you conform to the following style guide: 32 | 33 | - Open braces on same line as if/function start 34 | - No space between if conditional and parantheses 35 | - Use a define for any int or string constants 36 | - Comments should be C style unless its a .cpp file 37 | - Declare counter local types within for loop declarations if possible 38 | 39 | ``` 40 | /* Check the value of some flag */ 41 | if(flag == SOME_VALUE) { 42 | ... 43 | } 44 | 45 | for(uint64_t i = 0; i < canary_count; i++) { 46 | ... 47 | } 48 | ``` 49 | -------------------------------------------------------------------------------- /src/iso_alloc_mte.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_mte.c - A secure memory allocator 2 | * Copyright 2024 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc_internal.h" 5 | 6 | /* The majority of this code is adapted from Scudos implementation 7 | * of ARM MTE support. That code can be found here: 8 | * https://android.googlesource.com/platform/external/scudo/+/refs/tags/android-14.0.0_r1/standalone/ 9 | * Its license (The LLVM Project is under the Apache License v2.0 with LLVM Exceptions) can be found here 10 | * https://android.googlesource.com/platform/external/scudo/+/refs/tags/android-14.0.0_r1/LICENSE.TXT */ 11 | 12 | #if ARM_MTE == 1 13 | 14 | #define MTE_GRANULE 16 15 | #define UNTAGGED_BITS 56 16 | 17 | #ifndef HWCAP2_MTE 18 | #define HWCAP2_MTE (1 << 18) 19 | #endif 20 | 21 | void *iso_mte_untag_ptr(void *p) { 22 | return (void *) ((uintptr_t) p & ((1ULL << UNTAGGED_BITS) - 1)); 23 | } 24 | 25 | uint8_t iso_mte_extract_tag(void *p) { 26 | return ((uintptr_t) p >> UNTAGGED_BITS) & 0xF; 27 | } 28 | 29 | /* Check for the MTE bit in the ELF Auxv */ 30 | bool iso_is_mte_supported(void) { 31 | return getauxval(AT_HWCAP2) & HWCAP2_MTE; 32 | } 33 | 34 | void *iso_mte_set_tag_range(void *p, size_t size) { 35 | void *tagged_ptr = iso_mte_create_tag(iso_mte_untag_ptr(p), 0x0); 36 | 37 | for(int i = 0; i < size; i += MTE_GRANULE) { 38 | iso_mte_set_tag(tagged_ptr + i); 39 | } 40 | 41 | return tagged_ptr; 42 | } 43 | 44 | /* Uses IRG to create a random tag */ 45 | void *iso_mte_create_tag(void *p, uint64_t exclusion_mask) { 46 | exclusion_mask |= 1; 47 | void *tagged_ptr; 48 | __asm__ __volatile__( 49 | ".arch_extension memtag\n" 50 | "irg %[tagged_ptr], %[p], %[exclusion_mask]\n" 51 | : [tagged_ptr] "=r"(tagged_ptr) 52 | : [p] "r"(p), [exclusion_mask] "r"(exclusion_mask)); 53 | return tagged_ptr; 54 | } 55 | 56 | /* Uses STG to lock a tag to an address */ 57 | void iso_mte_set_tag(void *p) { 58 | __asm__ __volatile__( 59 | ".arch_extension memtag\n" 60 | "stg %0, [%0]\n" 61 | : 62 | : "r"(p) 63 | : "memory"); 64 | } 65 | 66 | /* Uses LDG to load a tag */ 67 | void *iso_mte_get_tag(void *p) { 68 | void *tagged_ptr = p; 69 | __asm__ __volatile__( 70 | ".arch_extension memtag\n" 71 | "ldg %0, [%0]\n" 72 | : "+r"(tagged_ptr) 73 | : 74 | : "memory"); 75 | return tagged_ptr; 76 | } 77 | #endif 78 | -------------------------------------------------------------------------------- /include/iso_alloc_profiler.h: -------------------------------------------------------------------------------- 1 | /* iso_alloc_profiler.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #if HEAP_PROFILER 7 | #include "compiler.h" 8 | #include "conf.h" 9 | 10 | #define PROFILER_ODDS 10000 11 | #define HG_SIZE 65535 12 | #define CHUNK_USAGE_THRESHOLD 75 13 | #define PROFILER_ENV_STR "ISO_ALLOC_PROFILER_FILE_PATH" 14 | #define PROFILER_FILE_PATH "iso_alloc_profiler.data" 15 | #define BACKTRACE_DEPTH 8 16 | #define BACKTRACE_DEPTH_SZ 128 17 | 18 | /* The IsoAlloc profiler is not thread local but these 19 | * globals should only ever be touched by internal 20 | * allocator functions when the root is locked */ 21 | uint64_t _alloc_count; 22 | uint64_t _free_count; 23 | uint64_t _alloc_sampled_count; 24 | uint64_t _free_sampled_count; 25 | 26 | int32_t profiler_fd; 27 | 28 | typedef struct { 29 | uint64_t total; 30 | uint64_t count; 31 | } zone_profiler_map_t; 32 | 33 | zone_profiler_map_t _zone_profiler_map[SMALL_SIZE_MAX]; 34 | 35 | /* iso_alloc_traces_t is a public structure, and 36 | * is defined in the public header iso_alloc.h */ 37 | iso_alloc_traces_t _alloc_bts[BACKTRACE_DEPTH_SZ]; 38 | size_t _alloc_bts_count; 39 | 40 | /* iso_free_traces_t is a public structure, and 41 | * is defined in the public header iso_alloc.h */ 42 | iso_free_traces_t _free_bts[BACKTRACE_DEPTH_SZ]; 43 | size_t _free_bts_count; 44 | 45 | INTERNAL_HIDDEN INLINE uint64_t _get_backtrace_hash(void); 46 | INTERNAL_HIDDEN INLINE void _save_backtrace(iso_alloc_traces_t *abts); 47 | INTERNAL_HIDDEN INLINE uint64_t _call_count_from_hash(uint16_t hash); 48 | INTERNAL_HIDDEN void _iso_output_profile(void); 49 | INTERNAL_HIDDEN void _initialize_profiler(void); 50 | INTERNAL_HIDDEN void _iso_alloc_profile(size_t size); 51 | INTERNAL_HIDDEN void _iso_free_profile(void); 52 | INTERNAL_HIDDEN size_t _iso_get_alloc_traces(iso_alloc_traces_t *traces_out); 53 | INTERNAL_HIDDEN size_t _iso_get_free_traces(iso_free_traces_t *traces_out); 54 | INTERNAL_HIDDEN void _iso_alloc_reset_traces(void); 55 | #endif 56 | 57 | INTERNAL_HIDDEN uint64_t _iso_alloc_zone_leak_detector(iso_alloc_zone_t *zone, bool profile); 58 | INTERNAL_HIDDEN uint64_t _iso_alloc_detect_leaks_in_zone(iso_alloc_zone_t *zone); 59 | INTERNAL_HIDDEN uint64_t _iso_alloc_detect_leaks(void); 60 | INTERNAL_HIDDEN uint64_t _iso_alloc_zone_mem_usage(iso_alloc_zone_t *zone); 61 | INTERNAL_HIDDEN uint64_t __iso_alloc_zone_mem_usage(iso_alloc_zone_t *zone); 62 | INTERNAL_HIDDEN uint64_t _iso_alloc_big_zone_mem_usage(void); 63 | INTERNAL_HIDDEN uint64_t __iso_alloc_big_zone_mem_usage(iso_alloc_big_zone_t *head); 64 | INTERNAL_HIDDEN uint64_t _iso_alloc_mem_usage(void); 65 | INTERNAL_HIDDEN uint64_t __iso_alloc_mem_usage(void); 66 | -------------------------------------------------------------------------------- /include/iso_alloc_sanity.h: -------------------------------------------------------------------------------- 1 | /* iso_alloc_sanity.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #include "iso_alloc_util.h" 7 | 8 | #if ALLOC_SANITY 9 | #if UNINIT_READ_SANITY 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #endif 16 | 17 | #define SANITY_SAMPLE_ODDS 10000 18 | #define MAX_SANE_SAMPLES 1024 19 | #define SANE_CACHE_SIZE 65535 20 | #define SANE_CACHE_IDX(p) (((uint64_t) p >> 8) & 0xffff) 21 | #define SANITY_CANARY_VALIDATE_MASK 0xffffffffffffff00 22 | #define SANITY_CANARY_SIZE 8 23 | 24 | #if THREAD_SUPPORT 25 | #if USE_SPINLOCK 26 | extern atomic_flag sane_cache_flag; 27 | #define LOCK_SANITY_CACHE() \ 28 | do { \ 29 | } while(atomic_flag_test_and_set(&sane_cache_flag)); 30 | 31 | #define UNLOCK_SANITY_CACHE() \ 32 | atomic_flag_clear(&sane_cache_flag); 33 | #else 34 | extern pthread_mutex_t sane_cache_mutex; 35 | #define LOCK_SANITY_CACHE() \ 36 | pthread_mutex_lock(&sane_cache_mutex); 37 | 38 | #define UNLOCK_SANITY_CACHE() \ 39 | pthread_mutex_unlock(&sane_cache_mutex); 40 | #endif 41 | #else 42 | #define LOCK_SANITY_CACHE() 43 | #define UNLOCK_SANITY_CACHE() 44 | #endif 45 | 46 | #if UNINIT_READ_SANITY 47 | extern pthread_t _page_fault_thread; 48 | extern struct uffdio_api _uffd_api; 49 | extern int64_t _uf_fd; 50 | #endif 51 | 52 | extern int32_t _sane_sampled; 53 | extern uint8_t _sane_cache[SANE_CACHE_SIZE]; 54 | 55 | typedef struct { 56 | void *address; 57 | size_t orig_size; 58 | bool right_aligned; 59 | } _sane_allocation_t; 60 | 61 | extern _sane_allocation_t _sane_allocations[MAX_SANE_SAMPLES]; 62 | extern uint64_t _sanity_canary; 63 | 64 | #if UNINIT_READ_SANITY 65 | INTERNAL_HIDDEN void _iso_alloc_setup_userfaultfd(void); 66 | INTERNAL_HIDDEN void *_page_fault_thread_handler(void *uf_fd); 67 | #endif 68 | 69 | INTERNAL_HIDDEN INLINE void write_sanity_canary(void *p); 70 | INTERNAL_HIDDEN INLINE void check_sanity_canary(_sane_allocation_t *sane_alloc); 71 | INTERNAL_HIDDEN void *_iso_alloc_sample(const size_t size); 72 | INTERNAL_HIDDEN int32_t _iso_alloc_free_sane_sample(void *p); 73 | INTERNAL_HIDDEN int32_t _remove_from_sane_trace(void *p); 74 | INTERNAL_HIDDEN _sane_allocation_t *_get_sane_alloc(void *p); 75 | #endif 76 | 77 | INTERNAL_HIDDEN INLINE void *__iso_memcpy(void *dest, const void *src, size_t n); 78 | INTERNAL_HIDDEN void *_iso_alloc_memcpy(void *dest, const void *src, size_t n); 79 | INTERNAL_HIDDEN INLINE void *__iso_memmove(void *dest, const void *src, size_t n); 80 | INTERNAL_HIDDEN void *_iso_alloc_memmove(void *dest, const void *src, size_t n); 81 | INTERNAL_HIDDEN INLINE void *__iso_memset(void *dest, int b, size_t n); 82 | INTERNAL_HIDDEN void *_iso_alloc_memset(void *dest, int b, size_t n); 83 | -------------------------------------------------------------------------------- /src/iso_alloc_random.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_random.c - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | /* Contributed by Oscar Reparaz (@oreparaz) 5 | * https://github.com/struct/isoalloc/pull/5 */ 6 | 7 | #include "iso_alloc_internal.h" 8 | 9 | #define OLD_GLIBC (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 24) 10 | 11 | #if OLD_GLIBC 12 | #include 13 | #include 14 | #elif __APPLE__ 15 | #include 16 | #elif __FreeBSD__ || __DragonFly__ || __linux__ || __ANDROID__ 17 | #include 18 | #elif __NetBSD__ 19 | #include 20 | #elif __sun 21 | #include 22 | #else 23 | #error "unknown OS" 24 | #endif 25 | 26 | /* Adapted from Daniel Lemire (@lemire). The code can be found at 27 | * https://github.com/lemire/testingRNG/blob/master/source/wyhash.h 28 | * Which is adapted from wyhash from @wangyi-fudan 29 | * 30 | * This is not suitable for anything cryptographic. We use it in 31 | * IsoAlloc for generating pointer masks, shuffling arrays etc, 32 | * and the initial seed is refreshed with rand_uint64() each time 33 | * we create a new zone. 34 | * 35 | * The magic values below are just random 64 bit primes. These values 36 | * are combined with our seed and then run through the mum function. 37 | * See the original wyhash paper for more details. 38 | * https://github.com/wangyi-fudan/wyhash/ */ 39 | INTERNAL_HIDDEN INLINE uint64_t us_rand_uint64(uint64_t *seed) { 40 | *seed += 0x5d6447c8df9375b5; 41 | __uint128_t tmp = (__uint128_t) *seed * 0x3c5829f2fb3c3eef; 42 | uint64_t m1 = (tmp >> 64) ^ tmp; 43 | tmp = (__uint128_t) m1 * 0x4f38e4ccc1b0ea59; 44 | return (tmp >> 64) ^ tmp; 45 | } 46 | 47 | INTERNAL_HIDDEN INLINE uint64_t rand_uint64(void) { 48 | uint64_t val = 0; 49 | int ret = 0; 50 | 51 | /* In modern versions of glibc (>=2.25) we can call getrandom(), 52 | * but older versions of glibc are still in use as of writing this. 53 | * Use the raw system call as a lower common denominator. 54 | * We give up on checking the return value. The alternative would be 55 | * to crash. We prefer here to keep going with degraded randomness. */ 56 | #if OLD_GLIBC 57 | ret = syscall(SYS_getrandom, &val, sizeof(val), GRND_NONBLOCK) != sizeof(val); 58 | #elif __APPLE__ 59 | ret = SecRandomCopyBytes(kSecRandomDefault, sizeof(val), &val); 60 | #elif __FreeBSD__ || __DragonFly__ || __linux__ || __ANDROID__ || __sun 61 | ret = getrandom(&val, sizeof(val), GRND_NONBLOCK) != sizeof(val); 62 | #elif __NetBSD__ 63 | /* Temporary solution until NetBSD 10 released with getrandom support */ 64 | arc4random_buf(&val, sizeof(val)); 65 | #endif 66 | 67 | #if ABORT_NO_ENTROPY 68 | if(ret != 0) { 69 | LOG_AND_ABORT("Unable to gather enough entropy"); 70 | } 71 | #endif 72 | 73 | return val; 74 | } 75 | -------------------------------------------------------------------------------- /src/iso_alloc.cpp: -------------------------------------------------------------------------------- 1 | // iso_alloc.cpp - A secure memory allocator 2 | // Copyright 2023 - chris.rohlf@gmail.com 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | #if CPP_SUPPORT 8 | #if MALLOC_HOOK 9 | 10 | #include 11 | 12 | #if !ABORT_ON_NULL 13 | #define NEW_EXCEPT noexcept(false) 14 | 15 | void iso_cpp_handler() { 16 | throw std::bad_alloc(); 17 | } 18 | 19 | extern "C" __attribute__((constructor(FIRST_CTOR + 1))) void iso_cpp(void) { 20 | std::set_new_handler(iso_cpp_handler); 21 | } 22 | #else 23 | #define NEW_EXCEPT 24 | #endif 25 | 26 | // These hooks override the basic new/delete 27 | // operators to use the iso_alloc API 28 | 29 | EXTERNAL_API FLATTEN void *operator new(size_t size) NEW_EXCEPT { 30 | return iso_alloc(size); 31 | } 32 | 33 | EXTERNAL_API FLATTEN void operator delete(void *p) noexcept { 34 | iso_free(p); 35 | } 36 | 37 | EXTERNAL_API FLATTEN void *operator new[](size_t size) NEW_EXCEPT { 38 | return iso_alloc(size); 39 | } 40 | 41 | EXTERNAL_API FLATTEN void operator delete[](void *p) noexcept { 42 | iso_free(p); 43 | } 44 | 45 | EXTERNAL_API FLATTEN void *operator new(size_t size, const std::nothrow_t &) noexcept { 46 | return iso_alloc(size); 47 | } 48 | 49 | EXTERNAL_API FLATTEN void *operator new[](size_t size, const std::nothrow_t &) noexcept { 50 | return iso_alloc(size); 51 | } 52 | 53 | FLATTEN void operator delete(void *ptr, size_t size) noexcept { 54 | return iso_free_size(ptr, size); 55 | } 56 | 57 | FLATTEN void operator delete[](void *ptr, size_t size) noexcept { 58 | return iso_free_size(ptr, size); 59 | } 60 | 61 | EXTERNAL_API FLATTEN void operator delete(void *ptr, const std::nothrow_t &) noexcept { 62 | return iso_free(ptr); 63 | } 64 | 65 | EXTERNAL_API FLATTEN void operator delete[](void *ptr, const std::nothrow_t &) noexcept { 66 | return iso_free(ptr); 67 | } 68 | 69 | EXTERNAL_API FLATTEN void *operator new(size_t size, std::align_val_t val) NEW_EXCEPT { 70 | return iso_alloc(size); 71 | } 72 | 73 | EXTERNAL_API FLATTEN void *operator new[](size_t size, std::align_val_t val) NEW_EXCEPT { 74 | return iso_alloc(size); 75 | } 76 | 77 | EXTERNAL_API FLATTEN void operator delete(void *p, std::align_val_t val) noexcept { 78 | iso_free(p); 79 | } 80 | 81 | EXTERNAL_API FLATTEN void operator delete[](void *p, std::align_val_t val) noexcept { 82 | iso_free(p); 83 | } 84 | 85 | EXTERNAL_API FLATTEN void *operator new(size_t size, std::align_val_t val, std::nothrow_t &) noexcept { 86 | return iso_alloc(size); 87 | } 88 | 89 | EXTERNAL_API FLATTEN void *operator new[](size_t size, std::align_val_t val, std::nothrow_t &) noexcept { 90 | return iso_alloc(size); 91 | } 92 | 93 | EXTERNAL_API FLATTEN void operator delete(void *p, std::align_val_t val, std::nothrow_t &) noexcept { 94 | iso_free(p); 95 | } 96 | 97 | EXTERNAL_API FLATTEN void operator delete[](void *p, std::align_val_t val, std::nothrow_t &) noexcept { 98 | iso_free(p); 99 | } 100 | 101 | #endif 102 | #endif 103 | -------------------------------------------------------------------------------- /src/iso_alloc_mem_tags.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_mem_tags.c - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | /* This file contains the software only implementation of 5 | * of memory tagging. See MEMORY_TAGGING.md for more info */ 6 | #include "iso_alloc_internal.h" 7 | 8 | /* Returns a tag for a pointer p, which must be untagged 9 | * when passed to this function */ 10 | INTERNAL_HIDDEN uint8_t _iso_alloc_get_mem_tag(void *p, iso_alloc_zone_t *zone) { 11 | #if MEMORY_TAGGING 12 | void *user_pages_start = UNMASK_USER_PTR(zone); 13 | 14 | if(user_pages_start > p || (user_pages_start + ZONE_USER_SIZE) < p) { 15 | LOG_AND_ABORT("Cannot get tag for pointer %p with wrong zone %d %p - %p", p, zone->index, user_pages_start, user_pages_start + ZONE_USER_SIZE); 16 | } 17 | 18 | uint8_t *_mtp = (user_pages_start - g_page_size - ROUND_UP_PAGE(zone->chunk_count * MEM_TAG_SIZE)); 19 | const uint64_t chunk_offset = (uint64_t) (p - user_pages_start); 20 | 21 | /* Ensure the pointer is a multiple of chunk size */ 22 | if(UNLIKELY((chunk_offset & (zone->chunk_size - 1)) != 0)) { 23 | LOG_AND_ABORT("Chunk offset %d not an alignment of %d", chunk_offset, zone->chunk_size); 24 | } 25 | 26 | _mtp += (chunk_offset / zone->chunk_size); 27 | 28 | return *_mtp; 29 | #else 30 | return 0; 31 | #endif 32 | } 33 | 34 | INTERNAL_HIDDEN void _iso_alloc_verify_tag(void *p, iso_alloc_zone_t *zone) { 35 | #if MEMORY_TAGGING 36 | if(UNLIKELY(p == NULL || zone == NULL)) { 37 | return; 38 | } 39 | 40 | void *untagged_p = (void *) ((uintptr_t) p & TAGGED_PTR_MASK); 41 | const uint64_t tag = _iso_alloc_get_mem_tag(untagged_p, zone); 42 | 43 | if(tag != ((uintptr_t) p & IS_TAGGED_PTR_MASK)) { 44 | LOG_AND_ABORT("Pointer %p has wrong tag %x, expected %x", p, ((uintptr_t) p & IS_TAGGED_PTR_MASK), tag); 45 | } 46 | #endif 47 | return; 48 | } 49 | 50 | INTERNAL_HIDDEN void *_tag_ptr(void *p, iso_alloc_zone_t *zone) { 51 | #if MEMORY_TAGGING 52 | if(UNLIKELY(p == NULL || zone == NULL)) { 53 | return NULL; 54 | } 55 | 56 | const uint64_t tag = _iso_alloc_get_mem_tag(p, zone); 57 | return (void *) ((tag << UNTAGGED_BITS) | (uintptr_t) p); 58 | #else 59 | return p; 60 | #endif 61 | } 62 | 63 | INTERNAL_HIDDEN void *_untag_ptr(void *p, iso_alloc_zone_t *zone) { 64 | #if MEMORY_TAGGING 65 | if(UNLIKELY(p == NULL || zone == NULL)) { 66 | return NULL; 67 | } 68 | 69 | void *untagged_p = (void *) ((uintptr_t) p & TAGGED_PTR_MASK); 70 | const uint64_t tag = _iso_alloc_get_mem_tag(untagged_p, zone); 71 | return (void *) ((tag << UNTAGGED_BITS) ^ (uintptr_t) p); 72 | #else 73 | return p; 74 | #endif 75 | } 76 | 77 | INTERNAL_HIDDEN bool _refresh_zone_mem_tags(iso_alloc_zone_t *zone) { 78 | #if MEMORY_TAGGING 79 | /* This implements a similar policy to zone retirement. 80 | * The only difference is that we refresh all tags at 81 | * %25 of the configured zone retirement age */ 82 | if(UNLIKELY(zone->af_count == 0 && zone->alloc_count > (zone->chunk_count << _root->zone_retirement_shf)) >> 2) { 83 | size_t s = ROUND_UP_PAGE(zone->chunk_count * MEM_TAG_SIZE); 84 | uint64_t *_mtp = (zone->user_pages_start - g_page_size - s); 85 | size_t tms = s / sizeof(uint64_t); 86 | 87 | for(uint64_t o = 0; o < tms; o++) { 88 | _mtp[o] = us_rand_uint64(&_root->seed); 89 | } 90 | 91 | return true; 92 | } 93 | #endif 94 | return false; 95 | } 96 | -------------------------------------------------------------------------------- /src/iso_alloc_printf.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_printf.c - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc_internal.h" 5 | #include 6 | 7 | #if __GNUC__ && !__clang__ 8 | #pragma GCC diagnostic ignored "-Wstringop-truncation" 9 | #pragma GCC diagnostic ignored "-Wstringop-overflow" 10 | #endif 11 | 12 | #define INTERNAL_HIDDEN __attribute__((visibility("hidden"))) 13 | 14 | /* This primitive printf implementation is only ever called 15 | * called from the LOG and LOG_AND_ABORT macros. We need to 16 | * be able to print basic log messages without invoking 17 | * malloc() or we run the risk of using a corrupted heap */ 18 | static int8_t fmt_buf[64]; 19 | static const int8_t asc_hex[] = "0123456789abcdef"; 20 | 21 | INTERNAL_HIDDEN int8_t *_fmt(uint64_t n, uint32_t base) { 22 | int8_t *ptr; 23 | uint32_t count = 0; 24 | 25 | __builtin_memset(fmt_buf, 0x0, sizeof(fmt_buf)); 26 | ptr = &fmt_buf[63]; 27 | 28 | while(n != 0) { 29 | *--ptr = asc_hex[n % base]; 30 | n /= base; 31 | count++; 32 | }; 33 | 34 | if(count == 0) { 35 | ptr = (int8_t *) "0"; 36 | } 37 | 38 | return ptr; 39 | } 40 | 41 | INTERNAL_HIDDEN void _iso_alloc_printf(int32_t fd, const char *f, ...) { 42 | if(UNLIKELY(f == NULL)) { 43 | return; 44 | } 45 | 46 | uint64_t i; 47 | uint32_t j; 48 | char *s; 49 | va_list arg; 50 | va_start(arg, f); 51 | char out[65535]; 52 | char *p = out; 53 | __builtin_memset(p, 0x0, sizeof(out)); 54 | 55 | for(const char *idx = f; *idx != '\0'; idx++) { 56 | if(p >= (char *) (out + sizeof(out))) { 57 | break; 58 | } 59 | 60 | while(*idx != '%' && *idx != '\0') { 61 | *p = *idx; 62 | p++; 63 | 64 | if(*idx == '\n') { 65 | break; 66 | } 67 | 68 | idx++; 69 | } 70 | 71 | idx++; 72 | 73 | if(*idx == '\0') { 74 | break; 75 | } 76 | 77 | if(*idx == 'x' || *idx == 'p') { 78 | i = va_arg(arg, int64_t); 79 | s = (char *) _fmt(i, 16); 80 | __builtin_strncpy(p, s, strlen(s)); 81 | p += strlen(s); 82 | } else if(*idx == 'd' || *idx == 'u') { 83 | j = va_arg(arg, int32_t); 84 | 85 | if(0 > j) { 86 | j = -j; 87 | *p = '-'; 88 | p++; 89 | } 90 | 91 | s = (char *) _fmt(j, 10); 92 | 93 | __builtin_strncpy(p, s, strlen(s)); 94 | p += strlen(s); 95 | } else if(*idx == 'l') { 96 | if(*(idx + 1) == 'd' || *(idx + 1) == 'u') { 97 | idx++; 98 | } 99 | 100 | i = va_arg(arg, int64_t); 101 | 102 | if(0 > i) { 103 | i = -i; 104 | *p = '-'; 105 | p++; 106 | } 107 | 108 | s = (char *) _fmt(i, 10); 109 | 110 | __builtin_strncpy(p, s, strlen(s)); 111 | p += strlen(s); 112 | } else if(*idx == 's') { 113 | s = va_arg(arg, char *); 114 | 115 | if(s == NULL) { 116 | break; 117 | } 118 | 119 | __builtin_strncpy(p, s, strlen(s)); 120 | p += strlen(s); 121 | } 122 | } 123 | 124 | (void) !write(fd, out, strlen(out)); 125 | va_end(arg); 126 | } 127 | -------------------------------------------------------------------------------- /MEMORY_TAGGING.md: -------------------------------------------------------------------------------- 1 | # Isolation Alloc Memory Tagging 2 | 3 | IsoAlloc supports a software only memory tagging model that is very similar to Chromes [MTECheckedPtr](https://docs.google.com/document/d/1ph7iOorkGqTuETFZp-xvHV4L2rYootuz1ThzAAoGe30/edit?usp=sharing). This technique for pointer protection is inspired by ARM's upcoming Memory Tagging Extension (MTE) due in ARM v8.5-A. ARM MTE is a comprehensive hardware based solution for detecting memory safety issues in release builds of software with very little overhead. ARM MTE uses the Top Byte Ignore (TBI) feature to transparently tag pointers with metadata or a 'tag'. With ARM MTE this tag is mostly transparently checked and removed in hardware. The feature implemented here in IsoAlloc is conceptually very similar except that tagging and untagging of pointers happens in software. 4 | 5 | You can read more details about this feature [here](https://struct.github.io/pointer_tagging.html). 6 | 7 | Note that this feature is experimental, off by default, and the APIs are subject to change! 8 | 9 | ## Overview 10 | 11 | We can't achieve the granularity provided by ARM MTE in software alone but we can implement a pointer protection mechanism by generating 1 byte of meta data per chunk managed by a private IsoAlloc zone, adding that tag to the pointer, and verifying it before dereferencing it. This feature is enabled or disabled with `MEMORY_TAGGING` in the Makefile. 12 | 13 | This 1 byte tag will be added to the LSB of the pointer returned by calling `iso_alloc_from_zone_tagged`. IsoAlloc also provides a C API for tagging and untagging pointers retrieved by `iso_alloc_from_zone`. These functions are `iso_alloc_tag_ptr` and `iso_alloc_untag_ptr`. 14 | 15 | Using these primivite operations we can build a simple C++ smart pointer that transparently tags, untags, and dereferences a tagged pointer. 16 | 17 | ``` 18 | template 19 | class IsoAllocPtr { 20 | public: 21 | IsoAllocPtr(iso_alloc_zone_handle *handle, T *ptr) : eptr(nullptr), zone(handle) { 22 | eptr = iso_alloc_tag_ptr((void *) ptr, zone); 23 | } 24 | 25 | T *operator->() { 26 | T *p = reinterpret_cast(iso_alloc_untag_ptr(eptr, zone)); 27 | return p; 28 | } 29 | 30 | void *eptr; 31 | iso_alloc_zone_handle *zone; 32 | }; 33 | ``` 34 | 35 | These APIs can also be used in C, but this requires tagging and untagging pointers manually before using them. 36 | 37 | ## Implementation Details 38 | 39 | * Currently only private zones can make use of memory tagging in IsoAlloc 40 | * All tag data is stored below user pages with a guard page allocated in between 41 | * A single 1 byte tag is generated per chunk in a private zone, this means the memory required to hold tags is larger for private zones holding smaller chunk sizes. For zones holding chunks 1024 byte or larger only a single of page of memory is required for tags as there are only 4096 possible 1024 byte chunks in 4mb IsoAlloc zone. The maximum amount of memory needed is for 16 byte chunks which requires 64 pages because there are 262144 possible chunks with a 4mb zone. 42 | * Tags are 1 byte in size and randomly chosen, they are added to the LSB of a pointer (e.g. tag value `0xed`, tagged pointer `0xed000b8066c1a000`, untagged pointer `0xb8066c1a000`) 43 | * Tags are refreshed whenever the private zone has reached %25 of 'retirement age' (defined in conf.h as `ZONE_ALLOC_RETIRE`) with 0 current allocations 44 | 45 | ## Examples 46 | 47 | * The C API test can be found [here](tests/tagged_ptr_test.c) 48 | * The C++ smart pointer test can be found [here](tests/tagged_ptr_test.cpp) 49 | -------------------------------------------------------------------------------- /tests/tagged_ptr_test.cpp: -------------------------------------------------------------------------------- 1 | // iso_alloc tagged_ptr_test.cpp 2 | // Copyright 2023 - chris.rohlf@gmail.com 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "iso_alloc.h" 9 | 10 | #if !MEMORY_TAGGING 11 | #error "This test intended to be run with -DMEMORY_TAGGING=1" 12 | #endif 13 | 14 | iso_alloc_zone_handle *_zone_handle; 15 | constexpr uint32_t _str_size = 32; 16 | 17 | class Base { 18 | public: 19 | int32_t type; 20 | char *str; 21 | }; 22 | 23 | class Derived : Base { 24 | public: 25 | Derived(int32_t i) { 26 | count = i; 27 | type = count * count; 28 | str = (char *) iso_alloc(_str_size); 29 | memset(str, 0x41, _str_size); 30 | } 31 | 32 | ~Derived() { 33 | count = 0; 34 | type = 0; 35 | iso_free(str); 36 | } 37 | 38 | char *GetStr() { 39 | return str; 40 | } 41 | 42 | static Derived *Create(int32_t i) { 43 | // Allocate a chunk of memory from a private zone 44 | // Only IsoAlloc private zones have memory tags associated 45 | // with each chunk in the zone. Thats why we use this interface 46 | // and not the standard overloaded operator new or malloc 47 | void *b = iso_alloc_from_zone(_zone_handle); 48 | 49 | if(b == nullptr) { 50 | return nullptr; 51 | } 52 | 53 | // Construct an object of type Derived in that chunk 54 | auto d = new(b) Derived(i); 55 | 56 | // Return a pointer to the new Derived object instance 57 | return d; 58 | } 59 | 60 | uint32_t count; 61 | }; 62 | 63 | // This is a working example of how to construct a C++ 64 | // smart pointer that utilizes memory tagging applied 65 | // to each IsoAlloc private zone 66 | template 67 | class IsoAllocPtr { 68 | public: 69 | IsoAllocPtr(iso_alloc_zone_handle *handle, T *ptr) : eptr(nullptr), zone(handle) { 70 | eptr = iso_alloc_tag_ptr((void *) ptr, zone); 71 | } 72 | 73 | T *operator->() { 74 | T *p = reinterpret_cast(iso_alloc_untag_ptr(eptr, zone)); 75 | return p; 76 | } 77 | 78 | void *eptr; 79 | iso_alloc_zone_handle *zone; 80 | }; 81 | 82 | int main(int argc, char *argv[]) { 83 | // Create a private IsoAlloc zone 84 | _zone_handle = iso_alloc_new_zone(sizeof(Derived)); 85 | 86 | for(int32_t i = 0; i < 65535; i++) { 87 | auto d = Derived::Create(i); 88 | 89 | if(d == nullptr) { 90 | abort(); 91 | } 92 | 93 | // Wrap the new object in an IsoAllocPtr. The template 94 | // needs to know about our zone handle and the object 95 | // we want to manipulate through this pointer 96 | IsoAllocPtr e(_zone_handle, d); 97 | 98 | // Use the IsoAllocPtr operator -> the same way we 99 | // would a raw pointer to the Derived object 100 | if(e->GetStr() == nullptr) { 101 | abort(); 102 | } 103 | 104 | #if ENABLE_ASAN 105 | // If you need to make ASAN happy this ugly code will work 106 | // because it invokes the destructor manually and then calls 107 | // the appropriate IsoAlloc free function 108 | d->~Derived(); 109 | iso_free_from_zone(d, _zone_handle); 110 | #else 111 | // Tools like AddressSanitizer will throw an error 112 | // here because it doesn't think the underlying chunk was 113 | // allocated with malloc() because we used placement new 114 | // but IsoAlloc properly handles the iso_free() call 115 | // invoked from operator delete 116 | 117 | delete d; 118 | #endif 119 | } 120 | 121 | // Destroy the private zone we created 122 | iso_alloc_destroy_zone(_zone_handle); 123 | 124 | return 0; 125 | } 126 | -------------------------------------------------------------------------------- /src/malloc_hook.c: -------------------------------------------------------------------------------- 1 | /* malloc_hook.c - Provides low level hooks for malloc/free 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | /* The MALLOC_HOOK configuration allows us to hook the usual 8 | * malloc interfaces and redirect them to the iso_alloc API. 9 | * This may not be desired, especially if you intend to call 10 | * iso_alloc interfaces directly. These hook points are 11 | * useful because they allow us to use iso_alloc even in 12 | * existing and closed source programs that call malloc/free 13 | */ 14 | #if MALLOC_HOOK 15 | 16 | EXTERNAL_API void *__libc_malloc(size_t s) { 17 | return iso_alloc(s); 18 | } 19 | 20 | EXTERNAL_API void *malloc(size_t s) { 21 | return iso_alloc(s); 22 | } 23 | 24 | EXTERNAL_API void __libc_free(void *p) { 25 | iso_free(p); 26 | } 27 | 28 | EXTERNAL_API void free(void *p) { 29 | iso_free(p); 30 | } 31 | 32 | EXTERNAL_API void *__libc_calloc(size_t n, size_t s) { 33 | return iso_calloc(n, s); 34 | } 35 | 36 | EXTERNAL_API void *calloc(size_t n, size_t s) { 37 | return iso_calloc(n, s); 38 | } 39 | 40 | EXTERNAL_API void *__libc_realloc(void *p, size_t s) { 41 | return iso_realloc(p, s); 42 | } 43 | 44 | EXTERNAL_API void *realloc(void *p, size_t s) { 45 | return iso_realloc(p, s); 46 | } 47 | 48 | EXTERNAL_API void *__libc_reallocarray(void *p, size_t n, size_t s) { 49 | return iso_reallocarray(p, n, s); 50 | } 51 | 52 | EXTERNAL_API void *reallocarray(void *p, size_t n, size_t s) { 53 | return iso_reallocarray(p, n, s); 54 | } 55 | 56 | EXTERNAL_API int __posix_memalign(void **r, size_t a, size_t s) { 57 | if(is_pow2(a) == false) { 58 | *r = NULL; 59 | return EINVAL; 60 | } 61 | 62 | if(s < a) { 63 | s = a; 64 | } 65 | 66 | *r = iso_alloc(s); 67 | 68 | if(*r != NULL) { 69 | return 0; 70 | } else { 71 | return ENOMEM; 72 | } 73 | } 74 | 75 | EXTERNAL_API int posix_memalign(void **r, size_t alignment, size_t s) { 76 | return __posix_memalign(r, alignment, s); 77 | } 78 | 79 | EXTERNAL_API void *__libc_memalign(size_t alignment, size_t s) { 80 | /* All iso_alloc allocations are 8 byte aligned */ 81 | return iso_alloc(s); 82 | } 83 | 84 | EXTERNAL_API void *aligned_alloc(size_t alignment, size_t s) { 85 | /* All iso_alloc allocations are 8 byte aligned */ 86 | return iso_alloc(s); 87 | } 88 | 89 | EXTERNAL_API void *memalign(size_t alignment, size_t s) { 90 | /* All iso_alloc allocations are 8 byte aligned */ 91 | return iso_alloc(s); 92 | } 93 | 94 | #if __ANDROID__ || __FreeBSD__ 95 | EXTERNAL_API size_t malloc_usable_size(const void *ptr) { 96 | return iso_chunksz((void *) ptr); 97 | } 98 | #elif __APPLE__ 99 | EXTERNAL_API size_t malloc_size(const void *ptr) { 100 | return iso_chunksz((void *) ptr); 101 | } 102 | 103 | EXTERNAL_API size_t malloc_good_size(size_t size) { 104 | return ALIGN_SZ_UP(size); 105 | } 106 | #else 107 | EXTERNAL_API size_t malloc_usable_size(void *ptr) { 108 | return iso_chunksz(ptr); 109 | } 110 | #endif 111 | 112 | static void *libc_malloc(size_t s, const void *caller) { 113 | return iso_alloc(s); 114 | } 115 | static void *libc_realloc(void *ptr, size_t s, const void *caller) { 116 | return iso_realloc(ptr, s); 117 | } 118 | static void libc_free(void *ptr, const void *caller) { 119 | iso_free(ptr); 120 | } 121 | static void *libc_memalign(size_t alignment, size_t s, const void *caller) { 122 | return iso_alloc(s); 123 | } 124 | 125 | #if !__ANDROID__ 126 | void *(*__malloc_hook)(size_t, const void *) = &libc_malloc; 127 | void *(*__realloc_hook)(void *, size_t, const void *) = &libc_realloc; 128 | void (*__free_hook)(void *, const void *) = &libc_free; 129 | void *(*__memalign_hook)(size_t, size_t, const void *) = &libc_memalign; 130 | #endif 131 | #endif 132 | -------------------------------------------------------------------------------- /tests/thread_tests.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc thread_tests.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | 7 | static const uint32_t allocation_sizes[] = {ZONE_16, ZONE_32, ZONE_64, ZONE_128, 8 | ZONE_256, ZONE_512, ZONE_1024, 9 | ZONE_2048, ZONE_4096, ZONE_8192}; 10 | 11 | static const uint32_t array_sizes[] = {16, 32, 256, 512, 1024, 2048, 8192, 16384}; 12 | 13 | /* This test can be repurposed for benchmarking 14 | * against other allocators using LD_PRELOAD */ 15 | #if MALLOC_PERF_TEST 16 | #define alloc_mem malloc 17 | #define calloc_mem calloc 18 | #define realloc_mem realloc 19 | #define free_mem free 20 | #else 21 | #define alloc_mem iso_alloc 22 | #define calloc_mem iso_calloc 23 | #define realloc_mem iso_realloc 24 | #define free_mem iso_free 25 | #endif 26 | 27 | const int32_t ALLOC = 0; 28 | const int32_t REALLOC = 1; 29 | const int32_t CALLOC = 2; 30 | 31 | uint32_t times; 32 | 33 | void *allocate(void *_type) { 34 | size_t array_size; 35 | size_t allocation_size; 36 | int32_t alloc_count = 0; 37 | int32_t type = *((int32_t *) _type); 38 | 39 | for(int o = 0; o < times; o++) { 40 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 41 | for(int z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 42 | array_size = array_sizes[i]; 43 | allocation_size = allocation_sizes[z]; 44 | void *p[array_size]; 45 | memset(p, 0x0, array_size); 46 | 47 | for(int y = 0; y < array_size; y++) { 48 | if(allocation_size == 0) { 49 | allocation_size = allocation_sizes[(rand() % sizeof(allocation_sizes) / sizeof(uint32_t))] + (rand() % 32); 50 | } 51 | 52 | void *d = NULL; 53 | 54 | if(type == ALLOC) { 55 | p[y] = alloc_mem(allocation_size); 56 | } else if(type == REALLOC) { 57 | d = (void *) alloc_mem(allocation_size / 2); 58 | p[y] = realloc_mem(d, allocation_size); 59 | } else if(type == CALLOC) { 60 | p[y] = calloc_mem(1, allocation_size); 61 | } 62 | 63 | if(p[y] == NULL) { 64 | LOG_AND_ABORT("Failed to allocate %ld bytes after %d total allocations", allocation_size, alloc_count); 65 | } 66 | 67 | alloc_count++; 68 | memset(p[y], 0x41, allocation_size); 69 | 70 | /* Randomly free some allocations */ 71 | if((rand() % 5) > 1) { 72 | free_mem(p[y]); 73 | p[y] = NULL; 74 | } 75 | } 76 | 77 | /* Free the remaining allocations */ 78 | for(int r = 0; r < array_size; r++) { 79 | if(p[r] != NULL) { 80 | free_mem(p[r]); 81 | } 82 | } 83 | } 84 | } 85 | } 86 | 87 | iso_flush_caches(); 88 | 89 | return OK; 90 | } 91 | 92 | void run_test_threads(void) { 93 | #if THREAD_SUPPORT 94 | pthread_t t; 95 | pthread_t tt; 96 | pthread_t ttt; 97 | pthread_create(&t, NULL, allocate, (void *) &ALLOC); 98 | pthread_create(&tt, NULL, allocate, (void *) &REALLOC); 99 | pthread_create(&ttt, NULL, allocate, (void *) &CALLOC); 100 | pthread_join(t, NULL); 101 | pthread_join(tt, NULL); 102 | pthread_join(ttt, NULL); 103 | #endif 104 | } 105 | 106 | int main(int argc, char *argv[]) { 107 | if(argc != 2) { 108 | times = 1; 109 | } else { 110 | times = atol(argv[1]); 111 | } 112 | 113 | run_test_threads(); 114 | iso_alloc_detect_leaks(); 115 | iso_verify_zones(); 116 | return OK; 117 | } 118 | -------------------------------------------------------------------------------- /tests/tests.cpp: -------------------------------------------------------------------------------- 1 | /* iso_alloc tests.cpp 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include 5 | #include 6 | #include 7 | #if THREAD_SUPPORT 8 | #include 9 | #endif 10 | #include "iso_alloc.h" 11 | #include "iso_alloc_internal.h" 12 | 13 | using namespace std; 14 | 15 | static const uint32_t allocation_sizes[] = {ZONE_16, ZONE_32, ZONE_64, ZONE_128, 16 | ZONE_256, ZONE_512, ZONE_1024, 17 | ZONE_2048, ZONE_4096, ZONE_8192}; 18 | 19 | static const uint32_t array_sizes[] = {16, 32, 64, 128, 256, 512, 1024, 20 | 2048, 4096, 8192, 16384, 32768, 65536}; 21 | 22 | int32_t alloc_count; 23 | 24 | class Base { 25 | public: 26 | int32_t type; 27 | char *str; 28 | }; 29 | 30 | class Derived : Base { 31 | public: 32 | Derived(int32_t i) { 33 | count = i; 34 | type = count * count; 35 | str = (char *) iso_alloc(1024); 36 | memcpy(str, "AAAAA", 5); 37 | } 38 | 39 | ~Derived() { 40 | count = 0; 41 | type = 0; 42 | iso_free(str); 43 | } 44 | 45 | void operator delete(void *p) {} 46 | 47 | void operator delete(void *p, void *h) { 48 | Derived *d = static_cast(p); 49 | d->~Derived(); 50 | uint8_t *plc = static_cast(h); 51 | delete[] plc; 52 | } 53 | 54 | #if __cplusplus >= 202002L 55 | /* Note this need to be class member */ 56 | void operator delete(Derived *ptr, std::destroying_delete_t) { 57 | ptr->~Derived(); 58 | ::operator delete(ptr); 59 | } 60 | #endif 61 | 62 | uint32_t count; 63 | }; 64 | 65 | int allocate(size_t array_size, size_t allocation_size) { 66 | std::vector p(array_size); 67 | memset(p.data(), 0x0, p.size()); 68 | 69 | for(size_t i = 0; i < array_size; i++) { 70 | if(allocation_size == 0) { 71 | allocation_size = allocation_sizes[(rand() % sizeof(allocation_sizes) / sizeof(uint32_t))] + (rand() % 32); 72 | } 73 | 74 | p[i] = new uint8_t[allocation_size]; 75 | 76 | if(p[i] == NULL) { 77 | LOG_AND_ABORT("Failed to allocate %ld bytes after %d total allocations", allocation_size, alloc_count); 78 | } 79 | 80 | alloc_count++; 81 | 82 | /* Randomly free some allocations */ 83 | if((rand() % 2) == 1) { 84 | delete[](uint8_t *) p[i]; 85 | p[i] = NULL; 86 | } 87 | } 88 | 89 | /* Free the remaining allocations */ 90 | for(size_t i = 0; i < array_size; i++) { 91 | if(p[i] != NULL) { 92 | delete[](uint8_t *) p[i]; 93 | } 94 | } 95 | 96 | return OK; 97 | } 98 | 99 | int main(int argc, char *argv[]) { 100 | char *a = (char *) iso_alloc(100); 101 | iso_free(a); 102 | auto d = std::make_unique(100); 103 | constexpr size_t array_sizeslen = sizeof(array_sizes) / sizeof(uint32_t); 104 | 105 | for(size_t i = 0; i < array_sizeslen; i++) { 106 | for(size_t z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 107 | allocate(array_sizes[i], allocation_sizes[z]); 108 | } 109 | } 110 | 111 | for(size_t i = 0; i < array_sizeslen; i++) { 112 | allocate(array_sizes[i], 0); 113 | Base *b = new Base(); 114 | delete b; 115 | auto d = std::make_unique(i); 116 | } 117 | 118 | for(size_t i = 0; i < array_sizeslen; i++) { 119 | allocate(array_sizes[i], 0); 120 | auto *ptr = new uint8_t[sizeof(Derived)]; 121 | auto *d = new(ptr) Derived(i); 122 | Derived::operator delete(d, ptr); 123 | } 124 | 125 | #if THREAD_SUPPORT && __linux__ 126 | for(size_t i = 0; i < 4; i++) { 127 | std::array t; 128 | for(size_t z = 0; z < 4; z++) { 129 | t[i] = std::thread(allocate, array_sizes[i], allocation_sizes[z]); 130 | t[i].join(); 131 | } 132 | } 133 | #endif 134 | 135 | iso_verify_zones(); 136 | 137 | return 0; 138 | } 139 | -------------------------------------------------------------------------------- /tests/interfaces_test.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc interfaces_test.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | #define _POSIX_C_SOURCE 200809L 4 | #include 5 | #include 6 | #include "iso_alloc.h" 7 | #include "iso_alloc_internal.h" 8 | 9 | #if HEAP_PROFILER 10 | #include "iso_alloc_profiler.h" 11 | #endif 12 | 13 | int main(int argc, char *argv[]) { 14 | /* Test iso_calloc() */ 15 | void *p = iso_calloc(10, 2); 16 | 17 | if(p == NULL) { 18 | LOG_AND_ABORT("iso_calloc failed") 19 | } 20 | 21 | p = iso_calloc(INT_MAX - 1, 4); 22 | 23 | if(p != NULL) { 24 | LOG_AND_ABORT("iso_calloc over check failed") 25 | } 26 | 27 | iso_free(p); 28 | 29 | /* Test iso_alloc() */ 30 | p = iso_alloc(128); 31 | 32 | if(p == NULL) { 33 | LOG_AND_ABORT("iso_alloc failed") 34 | } 35 | 36 | memset(p, 0x41, 128); 37 | 38 | uint8_t *pv = p; 39 | 40 | if(pv[10] != 0x41 || pv[100] != 0x41) { 41 | LOG_AND_ABORT("Chunk allocated at %p does not contain expected data! %x %x", p, pv[10], pv[100]); 42 | } 43 | 44 | /* Test iso_realloc */ 45 | p = iso_realloc(p, 1024); 46 | 47 | if(p == NULL) { 48 | LOG_AND_ABORT("iso_realloc failed") 49 | } 50 | 51 | /* Test iso_reallocarray */ 52 | if(iso_reallocarray(NULL, SIZE_MAX, SIZE_MAX) != NULL) { 53 | LOG_AND_ABORT("iso_reallocarray should have overflown"); 54 | } 55 | 56 | p = iso_reallocarray(p, 16, 16); 57 | 58 | if(p == NULL) { 59 | LOG_AND_ABORT("iso_reallocarray failed") 60 | } 61 | 62 | iso_free(p); 63 | 64 | p = iso_alloc(1024); 65 | 66 | assert((iso_chunksz(p)) >= 1024); 67 | 68 | iso_free_permanently(p); 69 | 70 | iso_alloc_zone_handle *zone = iso_alloc_new_zone(256); 71 | 72 | if(zone == NULL) { 73 | LOG_AND_ABORT("Could not create a zone"); 74 | } 75 | 76 | p = iso_alloc_from_zone(zone); 77 | 78 | if(p == NULL) { 79 | LOG_AND_ABORT("Could not allocate from private zone"); 80 | } 81 | 82 | iso_free_from_zone(p, zone); 83 | 84 | iso_alloc_destroy_zone(zone); 85 | 86 | p = iso_alloc(1024); 87 | 88 | if(p == NULL) { 89 | LOG_AND_ABORT("iso_alloc failed"); 90 | } 91 | 92 | memset(p, 0x0, 1024); 93 | 94 | void *r = iso_strdup(p); 95 | 96 | if(r == NULL) { 97 | LOG_AND_ABORT("iso_strdup failed"); 98 | } 99 | 100 | iso_free(p); 101 | iso_free(r); 102 | 103 | void *sz = iso_alloc(8192); 104 | iso_free_size(sz, 8192); 105 | 106 | uint8_t *ap = NULL; 107 | int rr = 0; 108 | rr = posix_memalign((void **) &ap, 16, 64); 109 | if(ap == NULL || rr != 0 || ((uintptr_t) ap % 16) != 0) { 110 | LOG_AND_ABORT("ap %p | %d != 0", ap, (uintptr_t) ap % 16); 111 | } 112 | free(ap); 113 | 114 | rr = posix_memalign((void **) &ap, 256, 16); 115 | if(ap == NULL || rr != 0 || ((uintptr_t) ap % 256) != 0) { 116 | LOG_AND_ABORT("ap %p | %d != 0", ap, (uintptr_t) ap % 256); 117 | } 118 | free(ap); 119 | 120 | #if HEAP_PROFILER 121 | iso_alloc_traces_t at[BACKTRACE_DEPTH_SZ]; 122 | size_t alloc_trace_count = iso_get_alloc_traces(at); 123 | 124 | for(int32_t i = 0; i < alloc_trace_count; i++) { 125 | iso_alloc_traces_t *abts = &at[i]; 126 | LOG("alloc_backtrace=%d,backtrace_hash=0x%x,calls=%d,lower_bound_size=%d,upper_bound_size=%d,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x\n", 127 | i, abts->backtrace_hash, abts->call_count, abts->lower_bound_size, abts->upper_bound_size, abts->callers[0], abts->callers[1], 128 | abts->callers[2], abts->callers[3], abts->callers[4], abts->callers[5], abts->callers[6], abts->callers[7]); 129 | } 130 | 131 | iso_free_traces_t ft[BACKTRACE_DEPTH_SZ]; 132 | size_t free_trace_count = iso_get_free_traces(ft); 133 | 134 | for(int32_t i = 0; i < free_trace_count; i++) { 135 | iso_free_traces_t *fbts = &ft[i]; 136 | LOG("free_backtrace=%d,backtrace_hash=0x%x,calls=%d,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x\n", 137 | i, fbts->backtrace_hash, fbts->call_count, fbts->callers[0], fbts->callers[1], fbts->callers[2], fbts->callers[3], 138 | fbts->callers[4], fbts->callers[5], fbts->callers[6], fbts->callers[7]); 139 | } 140 | 141 | iso_alloc_reset_traces(); 142 | #endif 143 | 144 | iso_flush_caches(); 145 | iso_verify_zones(); 146 | 147 | return 0; 148 | } 149 | -------------------------------------------------------------------------------- /src/iso_alloc_search.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_search.c - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc_internal.h" 5 | 6 | /* Search all zones for either the first instance of a pointer 7 | * value and return it or overwrite the first potentially 8 | * dangling pointer with the address of an unmapped page */ 9 | INTERNAL_HIDDEN void *_iso_alloc_ptr_search(void *n, bool poison) { 10 | uint8_t *search = NULL; 11 | uint8_t *end = NULL; 12 | const size_t zones_used = _root->zones_used; 13 | 14 | #if MEMORY_TAGGING || (ARM_MTE == 1) 15 | /* It should be safe to clear these upper bits even 16 | * if the pointer wasn't returned by IsoAlloc. */ 17 | n = (void *) ((uintptr_t) n & TAGGED_PTR_MASK); 18 | #endif 19 | 20 | for(int32_t i = 0; i < zones_used; i++) { 21 | iso_alloc_zone_t *zone = &_root->zones[i]; 22 | 23 | search = UNMASK_USER_PTR(zone); 24 | end = search + ZONE_USER_SIZE; 25 | 26 | while(search <= (uint8_t *) (end - sizeof(uint64_t))) { 27 | if(LIKELY((uint64_t) * (uint64_t *) search != (uint64_t) n)) { 28 | search++; 29 | } else { 30 | if(poison == false) { 31 | return search; 32 | } else { 33 | #if UAF_PTR_PAGE 34 | *(uint64_t *) search = (uint64_t) (_root->uaf_ptr_page); 35 | return search; 36 | #endif 37 | } 38 | } 39 | } 40 | } 41 | 42 | return NULL; 43 | } 44 | 45 | #if EXPERIMENTAL 46 | /* These functions are all experimental and subject to change */ 47 | 48 | /* Search the stack for pointers into IsoAlloc zones. If 49 | * stack_start is NULL then this function starts searching 50 | * from the environment variables which should be mapped 51 | * just below the stack */ 52 | INTERNAL_HIDDEN void _iso_alloc_search_stack(uint8_t *stack_start) { 53 | if(stack_start == NULL) { 54 | stack_start = (uint8_t *) ENVIRON; 55 | 56 | if(stack_start == NULL) { 57 | return; 58 | } 59 | } 60 | 61 | /* The end of our stack is the address of this local */ 62 | uint8_t *stack_end; 63 | stack_end = (uint8_t *) &stack_end; 64 | const uint64_t tps = UINT32_MAX; 65 | 66 | uint8_t *current = stack_start; 67 | uint64_t max_ptr = 0x800000000000; 68 | 69 | while(current > stack_end) { 70 | /* Iterating through zones is expensive so this quickly 71 | * decides on values that are unlikely to be pointers 72 | * into zone user pages */ 73 | if(*(int64_t *) current <= tps || *(int64_t *) current >= max_ptr || (*(int64_t *) current & 0xffffff) == 0) { 74 | // LOG("Ignoring pointer start=%p end=%p stack_ptr=%p value=%lx", stack_start, stack_end, current, *(int64_t *)current); 75 | current--; 76 | continue; 77 | } 78 | 79 | uint64_t *p = (uint64_t *) *(int64_t *) current; 80 | iso_alloc_zone_t *zone = iso_find_zone_range(p); 81 | current--; 82 | 83 | if(zone != NULL) { 84 | UNMASK_ZONE_PTRS(zone); 85 | 86 | /* Ensure the pointer is properly aligned */ 87 | if(UNLIKELY(IS_ALIGNED((uintptr_t) p) != 0)) { 88 | LOG_AND_ABORT("Chunk at 0x%p of zone[%d] is not %d byte aligned", p, zone->index, SZ_ALIGNMENT); 89 | } 90 | 91 | uint64_t chunk_offset = (uint64_t) (p - (uint64_t *) zone->user_pages_start); 92 | LOG("zone[%d] user_pages_start=%p value=%p %lu %d", zone->index, zone->user_pages_start, p, chunk_offset, zone->chunk_size); 93 | 94 | /* Ensure the pointer is a multiple of chunk size */ 95 | if(UNLIKELY((chunk_offset % zone->chunk_size) != 0)) { 96 | LOG("Chunk at %p is not a multiple of zone[%d] chunk size %d. Off by %" PRIu64 " bits", p, zone->index, zone->chunk_size, (chunk_offset % zone->chunk_size)); 97 | MASK_ZONE_PTRS(zone); 98 | continue; 99 | } 100 | 101 | size_t chunk_number = (chunk_offset / zone->chunk_size); 102 | bit_slot_t bit_slot = (chunk_number * BITS_PER_CHUNK); 103 | bit_slot_t dwords_to_bit_slot = (bit_slot / BITS_PER_QWORD); 104 | 105 | if(UNLIKELY((zone->bitmap_start + dwords_to_bit_slot) >= (zone->bitmap_start + zone->bitmap_size))) { 106 | LOG("Cannot calculate this chunks location in the bitmap %p", p); 107 | MASK_ZONE_PTRS(zone); 108 | continue; 109 | } 110 | 111 | int64_t which_bit = (bit_slot % BITS_PER_QWORD); 112 | bitmap_index_t *bm = (bitmap_index_t *) zone->bitmap_start; 113 | bitmap_index_t b = bm[dwords_to_bit_slot]; 114 | 115 | if(UNLIKELY((GET_BIT(b, which_bit)) == 0)) { 116 | LOG("Chunk at %p is in-use", p); 117 | } else { 118 | LOG("Chunk at %p is free", p); 119 | } 120 | 121 | MASK_ZONE_PTRS(zone); 122 | } 123 | 124 | zone = iso_find_zone_bitmap_range(p); 125 | 126 | if(zone != NULL) { 127 | LOG_AND_ABORT("Pointer to bitmap for zone[%d] found in stack @ %p", zone->index, p); 128 | } 129 | } 130 | } 131 | #endif 132 | -------------------------------------------------------------------------------- /include/iso_alloc.h: -------------------------------------------------------------------------------- 1 | /* iso_alloc.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #if !__aarch64__ && !__x86_64__ 12 | #pragma error "IsoAlloc is untested and unsupported on 32 bit platforms" 13 | #endif 14 | 15 | static_assert(sizeof(size_t) == 8, "IsoAlloc requires 64 bit size_t"); 16 | 17 | #ifndef EXTERNAL_API 18 | #define EXTERNAL_API __attribute__((visibility("default"))) 19 | #endif 20 | 21 | #define NO_DISCARD __attribute__((warn_unused_result)) 22 | 23 | #define MALLOC_ATTR __attribute__((malloc)) 24 | #define ALLOC_SIZE __attribute__((alloc_size(1))) 25 | #define CALLOC_SIZE __attribute__((alloc_size(1, 2))) 26 | #define REALLOC_SIZE __attribute__((alloc_size(2))) 27 | #define ZONE_ALLOC_SIZE __attribute__((alloc_size(2))) 28 | #define ASSUME_ALIGNED __attribute__((assume_aligned(8))) 29 | 30 | #if MASK_PTRS 31 | #define UNMASK_ZONE_HANDLE(zone) \ 32 | zone = (iso_alloc_zone_handle *) ((uintptr_t) zone ^ (uintptr_t) _root->zone_handle_mask); 33 | #else 34 | #define UNMASK_ZONE_HANDLE(zone) zone = zone; 35 | #endif 36 | 37 | typedef void iso_alloc_zone_handle; 38 | 39 | #if CPP_SUPPORT 40 | extern "C" { 41 | #endif 42 | 43 | /* See https://github.com/struct/isoalloc/blob/master/README.md#api for 44 | * detailed information on how to use these functions */ 45 | EXTERNAL_API void iso_alloc_initialize(void); 46 | EXTERNAL_API void iso_alloc_destroy(void); 47 | EXTERNAL_API NO_DISCARD MALLOC_ATTR ALLOC_SIZE ASSUME_ALIGNED void *iso_alloc(size_t size); 48 | EXTERNAL_API NO_DISCARD MALLOC_ATTR CALLOC_SIZE ASSUME_ALIGNED void *iso_calloc(size_t nmemb, size_t size); 49 | EXTERNAL_API NO_DISCARD MALLOC_ATTR REALLOC_SIZE ASSUME_ALIGNED void *iso_realloc(void *p, size_t size); 50 | EXTERNAL_API NO_DISCARD MALLOC_ATTR REALLOC_SIZE ASSUME_ALIGNED void *iso_reallocarray(void *p, size_t nmemb, size_t size); 51 | EXTERNAL_API void iso_free(void *p); 52 | EXTERNAL_API void iso_free_size(void *p, size_t size); 53 | EXTERNAL_API void iso_free_permanently(void *p); 54 | EXTERNAL_API void iso_free_from_zone(void *p, iso_alloc_zone_handle *zone); 55 | EXTERNAL_API void iso_free_from_zone_permanently(void *p, iso_alloc_zone_handle *zone); 56 | EXTERNAL_API size_t iso_chunksz(void *p); 57 | EXTERNAL_API void iso_alloc_verify_ptr_tag(void *p, iso_alloc_zone_handle *zone); 58 | EXTERNAL_API NO_DISCARD uint8_t iso_alloc_get_mem_tag(void *p, iso_alloc_zone_handle *zone); 59 | EXTERNAL_API NO_DISCARD ASSUME_ALIGNED char *iso_strdup(const char *str); 60 | EXTERNAL_API NO_DISCARD ASSUME_ALIGNED char *iso_strdup_from_zone(iso_alloc_zone_handle *zone, const char *str); 61 | EXTERNAL_API NO_DISCARD ASSUME_ALIGNED char *iso_strndup(const char *str, size_t n); 62 | EXTERNAL_API NO_DISCARD ASSUME_ALIGNED char *iso_strndup_from_zone(iso_alloc_zone_handle *zone, const char *str, size_t n); 63 | EXTERNAL_API NO_DISCARD MALLOC_ATTR ASSUME_ALIGNED void *iso_alloc_from_zone(iso_alloc_zone_handle *zone); 64 | EXTERNAL_API NO_DISCARD MALLOC_ATTR void *iso_alloc_from_zone_tagged(iso_alloc_zone_handle *zone); 65 | EXTERNAL_API NO_DISCARD void *iso_alloc_tag_ptr(void *p, iso_alloc_zone_handle *zone); 66 | EXTERNAL_API NO_DISCARD void *iso_alloc_untag_ptr(void *p, iso_alloc_zone_handle *zone); 67 | EXTERNAL_API NO_DISCARD iso_alloc_zone_handle *iso_alloc_new_zone(size_t size); 68 | EXTERNAL_API NO_DISCARD size_t iso_zone_chunk_count(iso_alloc_zone_handle *zone); 69 | EXTERNAL_API void iso_alloc_destroy_zone(iso_alloc_zone_handle *zone); 70 | EXTERNAL_API void iso_alloc_protect_root(void); 71 | EXTERNAL_API void iso_alloc_unprotect_root(void); 72 | EXTERNAL_API uint64_t iso_alloc_detect_zone_leaks(iso_alloc_zone_handle *zone); 73 | EXTERNAL_API uint64_t iso_alloc_detect_leaks(void); 74 | EXTERNAL_API uint64_t iso_alloc_zone_mem_usage(iso_alloc_zone_handle *zone); 75 | EXTERNAL_API uint64_t iso_alloc_mem_usage(void); 76 | EXTERNAL_API void iso_verify_zones(void); 77 | EXTERNAL_API void iso_verify_zone(iso_alloc_zone_handle *zone); 78 | EXTERNAL_API int32_t iso_alloc_name_zone(iso_alloc_zone_handle *zone, char *name); 79 | EXTERNAL_API void iso_flush_caches(void); 80 | 81 | #if HEAP_PROFILER 82 | #define BACKTRACE_DEPTH 8 83 | 84 | typedef struct { 85 | /* The address of the last 8 callers as referenced by stack frames */ 86 | uint64_t callers[BACKTRACE_DEPTH]; 87 | /* The smallest allocation size requested by this call path */ 88 | size_t lower_bound_size; 89 | /* The largest allocation size requested by this call path */ 90 | size_t upper_bound_size; 91 | /* A 16 bit hash of the back trace */ 92 | uint16_t backtrace_hash; 93 | /* Call count */ 94 | size_t call_count; 95 | } iso_alloc_traces_t; 96 | 97 | typedef struct { 98 | /* The address of the last 8 callers as referenced by stack frames */ 99 | uint64_t callers[BACKTRACE_DEPTH]; 100 | /* A 16 bit hash of the back trace */ 101 | uint16_t backtrace_hash; 102 | /* Call count */ 103 | size_t call_count; 104 | } iso_free_traces_t; 105 | 106 | EXTERNAL_API size_t iso_get_alloc_traces(iso_alloc_traces_t *traces_out); 107 | EXTERNAL_API size_t iso_get_free_traces(iso_free_traces_t *traces_out); 108 | EXTERNAL_API void iso_alloc_reset_traces(void); 109 | #endif 110 | 111 | #if EXPERIMENTAL 112 | EXTERNAL_API void iso_alloc_search_stack(void *p); 113 | #endif 114 | 115 | #if CPP_SUPPORT 116 | } 117 | #endif 118 | -------------------------------------------------------------------------------- /tests/tests.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc tests.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | #include 7 | 8 | static const uint32_t allocation_sizes[] = {ZONE_16, ZONE_32, ZONE_64, ZONE_128, 9 | ZONE_256, ZONE_512, ZONE_1024, 10 | ZONE_2048, ZONE_4096, ZONE_8192, 16384, 32768, 42000}; 11 | 12 | static const uint32_t array_sizes[] = {16, 32, 64, 128, 256, 512, 1024, 13 | 2048, 4096, 8192, 16384, 32768, 65536}; 14 | 15 | int32_t alloc_count; 16 | 17 | #if MALLOC_PERF_TEST 18 | #define alloc_mem malloc 19 | #define calloc_mem calloc 20 | #define realloc_mem realloc 21 | #define free_mem free 22 | #else 23 | #define alloc_mem iso_alloc 24 | #define calloc_mem iso_calloc 25 | #define realloc_mem iso_realloc 26 | #define free_mem iso_free 27 | #endif 28 | 29 | int reallocate(size_t array_size, size_t allocation_size) { 30 | void *p[array_size]; 31 | memset(p, 0x0, array_size); 32 | 33 | for(int i = 0; i < array_size; i++) { 34 | if(allocation_size == 0) { 35 | allocation_size = allocation_sizes[(rand() % sizeof(allocation_sizes) / sizeof(uint32_t))] + (rand() % 32); 36 | } 37 | 38 | void *d = alloc_mem(allocation_size / 2); 39 | 40 | memset(d, 0x0, allocation_size / 2); 41 | p[i] = realloc_mem(d, allocation_size); 42 | 43 | if(p[i] == NULL) { 44 | LOG_AND_ABORT("Failed to allocate %ld bytes after %d total allocations", allocation_size, alloc_count); 45 | } 46 | 47 | alloc_count++; 48 | 49 | /* Free every other allocation */ 50 | if(i % 2) { 51 | free_mem(p[i]); 52 | p[i] = NULL; 53 | } 54 | } 55 | 56 | /* Free the remaining allocations */ 57 | for(int i = 0; i < array_size; i++) { 58 | if(p[i] != NULL) { 59 | free_mem(p[i]); 60 | } 61 | } 62 | 63 | return OK; 64 | } 65 | 66 | int callocate(size_t array_size, size_t allocation_size) { 67 | void *p[array_size]; 68 | memset(p, 0x0, array_size); 69 | 70 | for(int i = 0; i < array_size; i++) { 71 | if(allocation_size == 0) { 72 | allocation_size = allocation_sizes[(rand() % sizeof(allocation_sizes) / sizeof(uint32_t))] + (rand() % 32); 73 | } 74 | 75 | p[i] = calloc_mem(1, allocation_size); 76 | 77 | if(p[i] == NULL) { 78 | LOG_AND_ABORT("Failed to allocate %ld bytes after %d total allocations", allocation_size, alloc_count); 79 | } 80 | 81 | alloc_count++; 82 | 83 | /* Free every other allocation */ 84 | if(i % 2) { 85 | free_mem(p[i]); 86 | p[i] = NULL; 87 | } 88 | } 89 | 90 | /* Free the remaining allocations */ 91 | for(int i = 0; i < array_size; i++) { 92 | if(p[i] != NULL) { 93 | free_mem(p[i]); 94 | } 95 | } 96 | 97 | return OK; 98 | } 99 | 100 | int allocate(size_t array_size, size_t allocation_size) { 101 | void *p[array_size]; 102 | memset(p, 0x0, array_size); 103 | 104 | for(int i = 0; i < array_size; i++) { 105 | if(allocation_size == 0) { 106 | allocation_size = allocation_sizes[(rand() % sizeof(allocation_sizes) / sizeof(uint32_t))] + (rand() % 32); 107 | } 108 | 109 | p[i] = alloc_mem(allocation_size); 110 | 111 | if(p[i] == NULL) { 112 | LOG_AND_ABORT("Failed to allocate %ld bytes after %d total allocations", allocation_size, alloc_count); 113 | } 114 | 115 | alloc_count++; 116 | 117 | /* Free every other allocation */ 118 | if(i % 2) { 119 | free_mem(p[i]); 120 | p[i] = NULL; 121 | } 122 | } 123 | 124 | /* Free the remaining allocations */ 125 | for(int i = 0; i < array_size; i++) { 126 | if(p[i] != NULL) { 127 | free_mem(p[i]); 128 | } 129 | } 130 | 131 | return OK; 132 | } 133 | 134 | int main(int argc, char *argv[]) { 135 | clock_t start, end; 136 | srand(time(NULL)); 137 | 138 | start = clock(); 139 | 140 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 141 | for(int z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 142 | allocate(array_sizes[i], allocation_sizes[z]); 143 | } 144 | } 145 | 146 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 147 | allocate(array_sizes[i], 0); 148 | } 149 | 150 | end = clock(); 151 | double total = ((double) (end - start)) / CLOCKS_PER_SEC; 152 | 153 | #if MALLOC_PERF_TEST 154 | fprintf(stdout, "malloc/free %d tests completed in %f seconds\n", alloc_count, total); 155 | #else 156 | fprintf(stdout, "iso_alloc/iso_free %d tests completed in %f seconds\n", alloc_count, total); 157 | #endif 158 | 159 | alloc_count = 0; 160 | start = clock(); 161 | 162 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 163 | for(int z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 164 | callocate(array_sizes[i], allocation_sizes[z]); 165 | } 166 | } 167 | 168 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 169 | callocate(array_sizes[i], 0); 170 | } 171 | 172 | end = clock(); 173 | total = ((double) (end - start)) / CLOCKS_PER_SEC; 174 | 175 | #if MALLOC_PERF_TEST 176 | fprintf(stdout, "calloc/free %d tests completed in %f seconds\n", alloc_count, total); 177 | #else 178 | fprintf(stdout, "iso_calloc/iso_free %d tests completed in %f seconds\n", alloc_count, total); 179 | #endif 180 | 181 | alloc_count = 0; 182 | start = clock(); 183 | 184 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 185 | for(int z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 186 | reallocate(array_sizes[i], allocation_sizes[z]); 187 | } 188 | } 189 | 190 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 191 | reallocate(array_sizes[i], 0); 192 | } 193 | 194 | end = clock(); 195 | total = ((double) (end - start)) / CLOCKS_PER_SEC; 196 | 197 | #if MALLOC_PERF_TEST 198 | fprintf(stdout, "realloc/free %d tests completed in %f seconds\n", alloc_count, total); 199 | #else 200 | fprintf(stdout, "iso_realloc/iso_free %d tests completed in %f seconds\n", alloc_count, total); 201 | #endif 202 | 203 | return 0; 204 | } 205 | -------------------------------------------------------------------------------- /src/iso_alloc_util.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_util.c - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc_internal.h" 5 | 6 | #if CPU_PIN 7 | /* sched_getcpu's performance depends on the 8 | * architecture/kernel version, so we lower 9 | * the cost of feature's abstraction here. */ 10 | int _iso_getcpu(void) { 11 | #if defined(SCHED_GETCPU) 12 | return sched_getcpu(); 13 | #elif defined(__x86_64__) 14 | /* rdtscp is not always available and is pretty slow 15 | * we instead load from the global descriptor table 16 | * then "mov" it to 'a' */ 17 | unsigned int a; 18 | const unsigned int cpunodesegment = 15 * 8 + 3; 19 | __asm__ volatile("lsl %1, %0" 20 | : "=r"(a) 21 | : "r"(cpunodesegment)); 22 | return (int) (a & 0xfff); 23 | #elif defined(__aarch64__) 24 | #if __APPLE__ 25 | /* unlike other operating systems, the tpidr_el0 register on macOs 26 | * is unused data stored for the current thread is instead fetchable 27 | * from "tpidrro_el0". */ 28 | uintptr_t a; 29 | __asm__ volatile("mrs %x0, tpidrro_el0" 30 | : "=r"(a)::"memory"); 31 | return (int) ((a & 0x8)); 32 | #else 33 | /* TODO most likely different register/making on other platforms */ 34 | return -1; 35 | #endif 36 | #else 37 | return -1; 38 | #endif 39 | } 40 | #endif 41 | 42 | void darwin_reuse(void *p, size_t size) { 43 | #if __APPLE__ 44 | while(madvise(p, size, MADV_FREE_REUSE) && errno == EAGAIN) { 45 | } 46 | #endif 47 | } 48 | 49 | void *create_guard_page(void *p) { 50 | if(p == NULL) { 51 | p = mmap_rw_pages(g_page_size, false, NULL); 52 | 53 | if(p == NULL) { 54 | LOG_AND_ABORT("Could not allocate guard page"); 55 | } 56 | } 57 | 58 | mprotect_pages(p, g_page_size, PROT_NONE); 59 | name_mapping(p, g_page_size, GUARD_PAGE_NAME); 60 | return p; 61 | } 62 | 63 | /* Assumes p is page aligned and surrounded by guard pages */ 64 | void unmap_guarded_pages(void *p, size_t size) { 65 | size_t sz = ROUND_UP_PAGE(size); 66 | munmap(p - g_page_size, sz + (g_page_size << 1)); 67 | } 68 | 69 | /* Assumes size for guard pages is NOT accounted for. 70 | * Returns a pointer to a contiguous set of RW pages 71 | * with guard pages mapped on top and bottom. */ 72 | void *mmap_guarded_rw_pages(size_t size, bool populate, const char *name) { 73 | size_t sz = ROUND_UP_PAGE(size); 74 | 75 | if(sz < size) { 76 | return NULL; 77 | } 78 | 79 | void *p = mmap_rw_pages(sz + (g_page_size * 2), populate, name); 80 | 81 | create_guard_page(p); 82 | create_guard_page(p + (g_page_size + sz)); 83 | return (p + g_page_size); 84 | } 85 | 86 | #if ARM_MTE 87 | void *mmap_guarded_rw_mte_pages(size_t size, bool populate, const char *name) { 88 | size_t sz = ROUND_UP_PAGE(size); 89 | 90 | if(sz < size) { 91 | return NULL; 92 | } 93 | 94 | void *p = mmap_rw_mte_pages(sz + (g_page_size * 2), populate, name); 95 | 96 | create_guard_page(p); 97 | create_guard_page(p + (g_page_size + sz)); 98 | return (p + g_page_size); 99 | } 100 | #endif 101 | 102 | void *mmap_rw_pages(size_t size, bool populate, const char *name) { 103 | return mmap_pages(size, populate, name, PROT_READ | PROT_WRITE); 104 | } 105 | 106 | #if ARM_MTE 107 | void *mmap_rw_mte_pages(size_t size, bool populate, const char *name) { 108 | return mmap_pages(size, populate, name, PROT_READ | PROT_WRITE | PROT_MTE); 109 | } 110 | #endif 111 | 112 | void *mmap_pages(size_t size, bool populate, const char *name, int32_t prot) { 113 | #if !ENABLE_ASAN 114 | /* Produce a random page address as a hint for mmap */ 115 | uint64_t hint = ROUND_DOWN_PAGE(rand_uint64()); 116 | hint &= 0x3FFFFFFFF000; 117 | void *p = (void *) hint; 118 | #else 119 | void *p = NULL; 120 | #endif 121 | size_t sz = ROUND_UP_PAGE(size); 122 | 123 | if(sz < size) { 124 | return NULL; 125 | } 126 | 127 | int32_t flags = (MAP_PRIVATE | MAP_ANONYMOUS); 128 | int fd = -1; 129 | 130 | #if __linux__ 131 | #if PRE_POPULATE_PAGES 132 | if(populate == true) { 133 | flags |= MAP_POPULATE; 134 | } 135 | #endif 136 | 137 | #if MAP_HUGETLB && HUGE_PAGES 138 | /* If we are allocating pages for a user zone 139 | * then take advantage of the huge TLB */ 140 | if(sz == ZONE_USER_SIZE || sz == (ZONE_USER_SIZE >> 1)) { 141 | flags |= MAP_HUGETLB; 142 | } 143 | #endif 144 | #elif __APPLE__ 145 | #if VM_FLAGS_SUPERPAGE_SIZE_2MB && HUGE_PAGES 146 | /* If we are allocating pages for a user zone 147 | * we are going to use the 2 MB superpage flag */ 148 | if(sz == ZONE_USER_SIZE || sz == (ZONE_USER_SIZE >> 1)) { 149 | fd = VM_FLAGS_SUPERPAGE_SIZE_2MB; 150 | } 151 | #endif 152 | #endif 153 | 154 | p = mmap(p, sz, prot, flags, fd, 0); 155 | 156 | if(p == MAP_FAILED) { 157 | LOG_AND_ABORT("Failed to mmap rw pages"); 158 | } 159 | 160 | #if __linux__ && MAP_HUGETLB && HUGE_PAGES && THP_PAGES && MADV_HUGEPAGE 161 | if(sz == ZONE_USER_SIZE || sz == (ZONE_USER_SIZE >> 1)) { 162 | madvise(p, sz, MADV_HUGEPAGE); 163 | } 164 | #endif 165 | 166 | /* All pages are mapped as if we will never need 167 | * them. This is to ensure RSS stays managable */ 168 | dont_need_pages(p, sz); 169 | 170 | if(name != NULL) { 171 | name_mapping(p, sz, name); 172 | } 173 | 174 | return p; 175 | } 176 | 177 | void dont_need_pages(void *p, size_t size) { 178 | madvise(p, size, FREE_OR_DONTNEED); 179 | 180 | #if __APPLE__ 181 | while(madvise(p, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { 182 | } 183 | #endif 184 | } 185 | 186 | void mprotect_pages(void *p, size_t size, int32_t protection) { 187 | if((mprotect(p, size, protection)) == ERR) { 188 | LOG_AND_ABORT("Failed to mprotect pages @ 0x%p (%s)", p, strerror(errno)); 189 | } 190 | } 191 | 192 | int32_t name_mapping(void *p, size_t sz, const char *name) { 193 | #if NAMED_MAPPINGS && (__ANDROID__ || KERNEL_VERSION_SEQ_5_17) 194 | return prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, sz, name); 195 | #else 196 | return 0; 197 | #endif 198 | } 199 | 200 | bool is_pow2(uint64_t sz) { 201 | return (sz & (sz - 1)) == 0; 202 | } 203 | 204 | size_t next_pow2(size_t sz) { 205 | sz |= sz >> 1; 206 | sz |= sz >> 2; 207 | sz |= sz >> 4; 208 | sz |= sz >> 8; 209 | sz |= sz >> 16; 210 | sz |= sz >> 32; 211 | return sz + 1; 212 | } 213 | 214 | const uint32_t _log_table[32] = { 215 | 0, 9, 1, 10, 13, 21, 2, 29, 216 | 11, 14, 16, 18, 22, 25, 3, 30, 217 | 8, 12, 20, 28, 15, 17, 24, 7, 218 | 19, 27, 23, 6, 26, 5, 4, 31}; 219 | 220 | /* Fast log2() implementation for 32 bit integers */ 221 | uint32_t _log2(uint32_t v) { 222 | v |= v >> 1; 223 | v |= v >> 2; 224 | v |= v >> 4; 225 | v |= v >> 8; 226 | v |= v >> 16; 227 | return _log_table[(uint32_t) (v * 0x07C4ACDD) >> 27]; 228 | } 229 | -------------------------------------------------------------------------------- /tests/alloc_fuzz.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc alloc_fuzz.c 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | /* This test is not meant to be run as a part of the IsoAlloc 5 | * test suite. It should be run stand alone during development 6 | * work to catch any bugs you introduce */ 7 | 8 | #include "iso_alloc.h" 9 | #include "iso_alloc_internal.h" 10 | #include 11 | 12 | static const uint32_t allocation_sizes[] = {ZONE_16, ZONE_32, ZONE_64, ZONE_128, 13 | ZONE_256, ZONE_512, ZONE_1024, 14 | ZONE_2048, ZONE_4096, ZONE_8192, 15 | SMALL_SIZE_MAX}; 16 | 17 | static const uint32_t array_sizes[] = {16, 32, 64, 128, 256}; 18 | 19 | static __thread iso_alloc_zone_handle *private_zone; 20 | 21 | #define MAYBE_VALIDATE_ZONES() \ 22 | if((rand() % 10) == 1) { \ 23 | iso_verify_zones(); \ 24 | } 25 | 26 | int64_t reallocate(size_t array_size, size_t allocation_size) { 27 | uint64_t allocs = 0; 28 | void *p[array_size]; 29 | memset(p, 0x0, array_size); 30 | 31 | for(int i = 0; i < array_size; i++) { 32 | if(allocation_size == 0) { 33 | allocation_size = allocation_sizes[(rand() % sizeof(allocation_sizes) / sizeof(uint32_t))] + (rand() % 32); 34 | } 35 | 36 | void *d = iso_alloc(allocation_size / 2); 37 | allocs++; 38 | memset(d, 0x0, allocation_size / 2); 39 | p[i] = iso_realloc(d, allocation_size); 40 | allocs++; 41 | 42 | if(p[i] == NULL) { 43 | LOG_AND_ABORT("Failed to allocate %ld bytes", allocation_size); 44 | } 45 | 46 | /* Free every other allocation */ 47 | if(i % 2) { 48 | iso_free(p[i]); 49 | p[i] = NULL; 50 | } 51 | } 52 | 53 | MAYBE_VALIDATE_ZONES(); 54 | 55 | /* Free the remaining allocations */ 56 | for(int i = 0; i < array_size; i++) { 57 | iso_free(p[i]); 58 | } 59 | 60 | return allocs; 61 | } 62 | 63 | int64_t callocate(size_t array_size, size_t allocation_size) { 64 | uint64_t allocs = 0; 65 | void *p[array_size]; 66 | memset(p, 0x0, array_size); 67 | 68 | for(int i = 0; i < array_size; i++) { 69 | if(allocation_size == 0) { 70 | allocation_size = allocation_sizes[(rand() % sizeof(allocation_sizes) / sizeof(uint32_t))] + (rand() % 32); 71 | } 72 | 73 | p[i] = iso_calloc(1, allocation_size); 74 | allocs++; 75 | 76 | if(p[i] == NULL) { 77 | LOG_AND_ABORT("Failed to allocate %ld bytes", allocation_size); 78 | } 79 | 80 | /* Free every other allocation */ 81 | if(i % 2) { 82 | iso_free(p[i]); 83 | p[i] = NULL; 84 | } 85 | } 86 | 87 | MAYBE_VALIDATE_ZONES(); 88 | 89 | /* Free the remaining allocations */ 90 | for(int i = 0; i < array_size; i++) { 91 | iso_free(p[i]); 92 | } 93 | 94 | return allocs; 95 | } 96 | 97 | int64_t allocate(size_t array_size, size_t allocation_size) { 98 | uint64_t allocs = 0; 99 | void *p[array_size]; 100 | memset(p, 0x0, array_size); 101 | 102 | if(rand() % 100 == 1) { 103 | if(private_zone != NULL) { 104 | iso_alloc_destroy_zone(private_zone); 105 | 106 | private_zone = NULL; 107 | } 108 | 109 | if(allocation_size <= SMALL_SIZE_MAX) { 110 | private_zone = iso_alloc_new_zone(allocation_size); 111 | 112 | if(private_zone == NULL) { 113 | LOG_AND_ABORT("Could not allocate private zone!"); 114 | } 115 | } 116 | } 117 | 118 | for(int i = 0; i < array_size; i++) { 119 | if(allocation_size == 0) { 120 | allocation_size = allocation_sizes[(rand() % sizeof(allocation_sizes) / sizeof(uint32_t))] + (rand() % 32); 121 | } 122 | 123 | if(rand() % 100 == 1 && private_zone != NULL && allocation_size < SMALL_SIZE_MAX) { 124 | p[i] = iso_alloc_from_zone(private_zone); 125 | allocs++; 126 | } else { 127 | p[i] = iso_alloc(allocation_size); 128 | allocs++; 129 | } 130 | 131 | if(p[i] == NULL) { 132 | LOG_AND_ABORT("Failed to allocate %ld bytes", allocation_size); 133 | } 134 | 135 | /* Free every other allocation */ 136 | if(i % 2) { 137 | iso_free(p[i]); 138 | p[i] = NULL; 139 | } 140 | } 141 | 142 | MAYBE_VALIDATE_ZONES(); 143 | 144 | /* Free the remaining allocations */ 145 | for(int i = 0; i < array_size; i++) { 146 | /* Occasionally leak chunks */ 147 | if((rand() % 100) == 1) { 148 | iso_free(p[i]); 149 | } 150 | } 151 | 152 | if(rand() % 10 == 1) { 153 | iso_alloc_destroy_zone(private_zone); 154 | private_zone = NULL; 155 | } 156 | 157 | return allocs; 158 | } 159 | 160 | void *start(void *p) { 161 | uint64_t total_allocations = 0; 162 | uint64_t loop = 1; 163 | srand(time(NULL)); 164 | 165 | while(1) { 166 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 167 | for(int z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 168 | total_allocations += allocate(array_sizes[i], allocation_sizes[z]); 169 | } 170 | } 171 | 172 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 173 | total_allocations += allocate(array_sizes[i], 0); 174 | } 175 | 176 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 177 | for(int z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 178 | total_allocations += callocate(array_sizes[i], allocation_sizes[z]); 179 | } 180 | } 181 | 182 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 183 | total_allocations += callocate(array_sizes[i], 0); 184 | } 185 | 186 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 187 | for(int z = 0; z < sizeof(allocation_sizes) / sizeof(uint32_t); z++) { 188 | total_allocations += reallocate(array_sizes[i], allocation_sizes[z]); 189 | } 190 | } 191 | 192 | for(int i = 0; i < sizeof(array_sizes) / sizeof(uint32_t); i++) { 193 | total_allocations += reallocate(array_sizes[i], 0); 194 | } 195 | 196 | LOG("Thread ID (%d) looped %d times. Total allocations: %ld", pthread_self(), loop++, total_allocations); 197 | } 198 | } 199 | 200 | int main(int argc, char *argv[]) { 201 | private_zone = NULL; 202 | 203 | pthread_t t; 204 | pthread_t tt; 205 | pthread_t ttt; 206 | pthread_t tttt; 207 | pthread_create(&t, NULL, start, NULL); 208 | pthread_create(&tt, NULL, start, NULL); 209 | pthread_create(&ttt, NULL, start, NULL); 210 | pthread_create(&tttt, NULL, start, NULL); 211 | 212 | pthread_join(t, NULL); 213 | pthread_join(tt, NULL); 214 | pthread_join(ttt, NULL); 215 | pthread_join(tttt, NULL); 216 | pthread_exit(NULL); 217 | 218 | return 0; 219 | } 220 | -------------------------------------------------------------------------------- /include/conf.h: -------------------------------------------------------------------------------- 1 | /* iso_alloc_internal.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | /* Modifying the values in this configuration header 7 | * can significantly improve the performance of your 8 | * workload, or the security of your runtime. Please 9 | * read the comments and documentation carefully before 10 | * modifying these values as many of them are core to 11 | * how the underlying memory allocator functions */ 12 | 13 | /* This controls what % of chunks are canaries in a 14 | * zone. For example, if a zone holds 128 byte chunks 15 | * then it has (ZONE_USER_SIZE / 128) = 32768 total 16 | * chunks available for it. The number of canaries is 17 | * calculated as (32768 / CANARY_COUNT_DIV) = 327. 18 | * When CANARY_COUNT_DIV = 7 we set aside < %1 of user 19 | * chunks as canaries because we right shift zone 20 | * chunk count by this value, e.g. (65535 >> 7 = 511) */ 21 | #define CANARY_COUNT_DIV 7 22 | 23 | /* The minimum number of free bit slots needed in the 24 | * cache in order to randomize it. RANDOMIZE_FREELIST 25 | * must be enabled for this to be used */ 26 | #define MIN_RAND_FREELIST 4 27 | 28 | /* If you're compiling for Android and want custom names 29 | * for internal mappings that are viewable from procfs 30 | * (i.e. /proc/pid/maps) you can modify those names here */ 31 | #if NAMED_MAPPINGS 32 | #define SAMPLED_ALLOC_NAME "isoalloc sampled allocation" 33 | #define BIG_ZONE_UD_NAME "isoalloc big zone user data" 34 | #define BIG_ZONE_MD_NAME "isoalloc big zone metadata" 35 | #define GUARD_PAGE_NAME "guard page" 36 | #define ROOT_NAME "isoalloc root" 37 | #define ZONE_BITMAP_NAME "isoalloc zone bitmap" 38 | #define INTERNAL_UZ_NAME "internal isoalloc user zone" 39 | #define PRIVATE_UZ_NAME "private isoalloc user zone" 40 | #define MEM_TAG_NAME "isoalloc zone mem tags" 41 | #define PREALLOC_BITMAPS "isoalloc small bitmaps" 42 | #endif 43 | 44 | /* If you're using the UAF_PTR_PAGE functionality and 45 | * want to change the frequency it is triggered or the 46 | * magic value that is written */ 47 | #if UAF_PTR_PAGE 48 | #define UAF_PTR_PAGE_ODDS 1000000 49 | #endif 50 | 51 | /* Zones can be retired after a certain number of 52 | * allocations. This is computed as the total count 53 | * of chunks the zone can hold multiplied by this 54 | * value. The zone is replaced at that point if all 55 | * of its current chunks are free */ 56 | #define ZONE_ALLOC_RETIRE 32 57 | 58 | /* This byte value will overwrite the contents 59 | * of all free'd user chunks if -DSANITIZE_CHUNKS 60 | * is enabled in the Makefile. The value is completely 61 | * arbitrary, but non-zero since this could mask 62 | * some bugs. */ 63 | #define POISON_BYTE 0xde 64 | 65 | /* See PERFORMANCE.md for notes on huge page sizes. 66 | * If your system uses a non-default value for huge 67 | * page sizes you will need to adjust that here */ 68 | #if(__linux__ && MAP_HUGETLB) || (__APPLE__ && VM_FLAGS_SUPERPAGE_SIZE_2MB) || (__FreeBSD__ && MAP_HUGETLB) && HUGE_PAGES 69 | #define HUGE_PAGE_SZ 2097152 70 | #endif 71 | 72 | /* Size of the zone cache documented in PERFORMANCE.md */ 73 | #define ZONE_CACHE_SZ 8 74 | 75 | /* Size of the chunk quarantine cache documented in PERFORMANCE.md */ 76 | #define CHUNK_QUARANTINE_SZ 64 77 | 78 | /* This is the maximum number of zones iso_alloc can 79 | * create. This is a completely arbitrary number but 80 | * it does correspond to the size of the _root.zones 81 | * array. Currently the iso_alloc_zone_t structure is 82 | * roughly 2112 bytes so this results in 17301504 bytes 83 | * (~17 MB) for zone meta data. See PERFORMANCE.md for 84 | * more information on this value. Max is 65535 */ 85 | #define MAX_ZONES 8192 86 | 87 | /* Anything above this size will need to go through the 88 | * big zone path. Maximum value here is 131072 due to how 89 | * we construct the zone bitmap. You can think of this 90 | * value as roughly equivalent to M_MMAP_THRESHOLD. Valid 91 | * values for SMALL_SIZE_MAX are powers of 2 through 131072 */ 92 | #define SMALL_SIZE_MAX 65536 93 | 94 | /* Big zones are for any allocation bigger than SMALL_SIZE_MAX. 95 | * We reuse them when possible but not if the reuse would 96 | * result in unused memory that exceeds this value */ 97 | #define BIG_ZONE_WASTE 8192 98 | 99 | /* The maximum number of big zone free list entries. 100 | * We want to limit the number of these because they 101 | * are often backed by a large number of pages */ 102 | #define BIG_ZONE_MAX_FREE_LIST 64 103 | 104 | /* Big zones can be retired after a certain number of 105 | * allocations. Big zones pages will be unmapped and 106 | * not added to the free list after being used N times */ 107 | #define BIG_ZONE_ALLOC_RETIRE 16 108 | 109 | /* We allocate zones at startup for common sizes. 110 | * Each of these default zones is 4mb (ZONE_USER_SIZE) 111 | * so ZONE_8192 would hold less chunks than ZONE_128 */ 112 | #define ZONE_16 16 113 | #define ZONE_32 32 114 | #define ZONE_64 64 115 | #define ZONE_128 128 116 | #define ZONE_256 256 117 | #define ZONE_512 512 118 | #define ZONE_1024 1024 119 | #define ZONE_2048 2048 120 | #define ZONE_4096 4096 121 | #define ZONE_8192 8192 122 | 123 | /* Default zones should ideally never be above 8192 124 | * bytes in size. This is because the allocator makes 125 | * certain decisions based on this value such as the 126 | * number of canary values in a zone. It is safe to 127 | * modify to a larger value but you will likely be 128 | * wasting memory by doing so. */ 129 | #define MAX_DEFAULT_ZONE_SZ ZONE_8192 130 | 131 | /* If you have specific allocation pattern requirements 132 | * then you may want a custom set of default zones. These 133 | * example are provided to get you started. Zone creation 134 | * at runtime is *not* limited to these sizes, this defines 135 | * the default zones that will be created at startup time. 136 | * Each of these examples is 4 default zones which will 137 | * consume 16 mb of memory for user chunks, plus 2 guard 138 | * pages per zone, a bitmap, and 2 guard pages per bitmap. 139 | * You also need to define SMALLEST_CHUNK_SZ which should 140 | * correspond to the smallest value in your default_zones 141 | * array. It's value should never be less than 16 */ 142 | #if SMALL_MEM_STARTUP 143 | /* ZONE_USER_SIZE * sizeof(default_zones) = ~16 mb */ 144 | /* SZ_ALIGNMENT = 32 */ 145 | #define SMALLEST_CHUNK_SZ SZ_ALIGNMENT 146 | const static uint64_t default_zones[] = {ZONE_64, ZONE_256, ZONE_512, ZONE_1024}; 147 | #else 148 | /* ZONE_USER_SIZE * sizeof(default_zones) = ~40 mb */ 149 | #define SMALLEST_CHUNK_SZ SZ_ALIGNMENT 150 | const static uint64_t default_zones[] = {ZONE_32, ZONE_64, ZONE_128, ZONE_256, ZONE_512, 151 | ZONE_1024, ZONE_2048, ZONE_4096, ZONE_8192}; 152 | #endif 153 | 154 | /* Additional default zone example configurations are below */ 155 | 156 | #if 0 157 | /* Only small allocations between 16 and 28 bytes are expected */ 158 | #define SMALLEST_CHUNK_SZ ZONE_16 159 | static uint64_t default_zones[] = {ZONE_32, ZONE_32, ZONE_64, ZONE_64, ZONE_128, ZONE_128}; 160 | #endif 161 | 162 | #if 0 163 | /* Large allocations but smaller than a page */ 164 | #define SMALLEST_CHUNK_SZ ZONE_512 165 | static uint64_t default_zones[] = {ZONE_512, ZONE_1024, ZONE_2048, ZONE_4096}; 166 | #endif 167 | -------------------------------------------------------------------------------- /include/iso_alloc_ds.h: -------------------------------------------------------------------------------- 1 | /* iso_alloc_ds.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | /* This header contains the core data structures, 7 | * caches, and typedef used by the allocator */ 8 | #define ZONE_FREE_LIST_SZ 255 9 | 10 | /* The size of this table scales with SMALL_SIZE_MAX. */ 11 | #define ZONE_LOOKUP_TABLE_SZ (SMALL_SIZE_MAX >> 4) * sizeof(uint32_t) 12 | #define SZ_TO_ZONE_LOOKUP_IDX(size) size >> 4 13 | 14 | #define CHUNK_TO_ZONE_TABLE_SZ (65535 * sizeof(uint16_t)) 15 | #define ADDR_TO_CHUNK_TABLE(p) (((uintptr_t) p >> 32) & 0xffff) 16 | 17 | typedef int64_t bit_slot_t; 18 | typedef int64_t bitmap_index_t; 19 | typedef uint16_t zone_lookup_table_t; 20 | typedef uint16_t chunk_lookup_table_t; 21 | 22 | #if ZONE_FREE_LIST_SZ > 255 23 | typedef uint16_t free_bit_slot_t; 24 | #define FREE_LIST_SHF 16 25 | #else 26 | typedef uint8_t free_bit_slot_t; 27 | #define FREE_LIST_SHF 8 28 | #endif 29 | 30 | typedef struct { 31 | void *user_pages_start; /* Start of the pages backing this zone */ 32 | void *bitmap_start; /* Start of the bitmap */ 33 | int64_t next_free_bit_slot; /* The last bit slot returned by get_next_free_bit_slot */ 34 | bit_slot_t free_bit_slots[ZONE_FREE_LIST_SZ]; /* A cache of bit slots that point to freed chunks */ 35 | uint64_t canary_secret; /* Each zone has its own canary secret */ 36 | uint64_t pointer_mask; /* Each zone has its own pointer protection secret */ 37 | bitmap_index_t max_bitmap_idx; /* Max bitmap index for this bitmap */ 38 | uint32_t chunk_size; /* Size of chunks managed by this zone */ 39 | uint32_t bitmap_size; /* Size of the bitmap in bytes */ 40 | uint32_t af_count; /* Increment/Decrement with each alloc/free operation */ 41 | uint32_t chunk_count; /* Total number of chunks in this zone */ 42 | uint32_t alloc_count; /* Total number of lifetime allocations */ 43 | uint16_t index; /* Zone index */ 44 | uint16_t next_sz_index; /* What is the index of the next zone of this size */ 45 | free_bit_slot_t free_bit_slots_index; /* Tracks how many entries in the cache are filled */ 46 | free_bit_slot_t free_bit_slots_usable; /* The oldest members of the free cache are served first */ 47 | int8_t preallocated_bitmap_idx; /* The bitmap is preallocated and its index */ 48 | #if CPU_PIN 49 | uint8_t cpu_core; /* What CPU core this zone is pinned to */ 50 | #endif 51 | bool internal; /* Zones can be managed by iso_alloc or private */ 52 | bool is_full; /* Flags whether this zone is full to avoid bit slot searches */ 53 | #if MEMORY_TAGGING 54 | bool tagged; /* Zone supports memory tagging */ 55 | #endif 56 | } __attribute__((packed, aligned(sizeof(int64_t)))) iso_alloc_zone_t; 57 | 58 | /* Meta data for big allocations are allocated near the 59 | * user pages themselves but separated via guard pages. 60 | * This meta data is stored at a random offset from the 61 | * beginning of the page it resides on */ 62 | typedef struct iso_alloc_big_zone_t { 63 | uint64_t canary_a; 64 | bool free; 65 | uint64_t size; 66 | uint32_t ttl; 67 | void *user_pages_start; 68 | struct iso_alloc_big_zone_t *next; 69 | uint64_t canary_b; 70 | } __attribute__((packed, aligned(sizeof(int64_t)))) iso_alloc_big_zone_t; 71 | 72 | #define BITMAP_SIZE_16 16 73 | #define BITMAP_SIZE_32 32 74 | #define BITMAP_SIZE_64 64 75 | #define BITMAP_SIZE_128 128 76 | #define BITMAP_SIZE_256 256 77 | #define BITMAP_SIZE_512 512 78 | #define BITMAP_SIZE_1024 1024 79 | 80 | /* Small bitmap sizes are in reverse order as 81 | * smaller chunks are more likely to be needed. 82 | * The extra 0 is for alignment, we subtract it 83 | * when iterating _root->bitmaps */ 84 | const static int small_bitmap_sizes[] = { 85 | BITMAP_SIZE_1024, 86 | BITMAP_SIZE_512, 87 | BITMAP_SIZE_256, 88 | BITMAP_SIZE_128, 89 | BITMAP_SIZE_64, 90 | BITMAP_SIZE_32, 91 | BITMAP_SIZE_16, 92 | 0}; 93 | 94 | /* Preallocated pages for bitmaps are managed using 95 | * an array of these structures placed in the root */ 96 | typedef struct { 97 | void *bitmap; 98 | /* Our bitmap has a bitmap */ 99 | uint32_t in_use; 100 | /* Bucket value determines how many times we divy up 101 | * the bitmap page. eg For BITMAP_SIZE_16 its 256 times */ 102 | uint32_t bucket; 103 | } __attribute__((packed, aligned(sizeof(int64_t)))) iso_alloc_bitmap_t; 104 | 105 | /* There is only one iso_alloc root per-process. 106 | * It contains an array of zone structures. Each 107 | * Zone represents a number of contiguous pages 108 | * that hold chunks containing caller data */ 109 | typedef struct { 110 | iso_alloc_zone_t *zones; 111 | /* The chunk to zone lookup table provides a high hit 112 | * rate cache for finding which zone owns a user chunk. 113 | * It works by mapping the MSB of the chunk addressq 114 | * to a zone index. Misses are gracefully handled and 115 | * more common with a higher RSS and more mappings. */ 116 | chunk_lookup_table_t *chunk_lookup_table; 117 | uintptr_t *chunk_quarantine; 118 | iso_alloc_big_zone_t *big_zone_free; 119 | iso_alloc_big_zone_t *big_zone_used; 120 | #if NO_ZERO_ALLOCATIONS 121 | void *zero_alloc_page; 122 | #endif 123 | #if UAF_PTR_PAGE 124 | void *uaf_ptr_page; 125 | #endif 126 | /* Zones are linked by their next_sz_index member which 127 | * tells the allocator where in the _root->zones array 128 | * it can find the next zone that holds the same size 129 | * chunks. The lookup table helps us find the first zone 130 | * that holds a specific size in O(1) time */ 131 | zone_lookup_table_t zone_lookup_table[ZONE_LOOKUP_TABLE_SZ]; 132 | /* For chunk sizes >= 1024 our bitmap size is smaller 133 | * than a page. This optimization preallocates pages to 134 | * hold multiple bitmaps for these zones */ 135 | iso_alloc_bitmap_t bitmaps[sizeof(small_bitmap_sizes) / sizeof(int)]; 136 | uint64_t zone_handle_mask; 137 | uint64_t big_zone_next_mask; 138 | uint64_t big_zone_canary_secret; 139 | uint64_t seed; 140 | size_t chunk_quarantine_count; 141 | size_t zones_size; 142 | #if THREAD_SUPPORT 143 | #if USE_SPINLOCK 144 | atomic_flag big_zone_free_flag; 145 | atomic_flag big_zone_used_flag; 146 | #else 147 | pthread_mutex_t big_zone_free_mutex; 148 | pthread_mutex_t big_zone_used_mutex; 149 | #endif 150 | #endif 151 | uint32_t zone_retirement_shf; 152 | int32_t big_zone_free_count; 153 | int32_t big_zone_used_count; 154 | uint16_t zones_used; 155 | #if ARM_MTE 156 | bool arm_mte_enabled; 157 | #endif 158 | } __attribute__((aligned(sizeof(int64_t)))) iso_alloc_root; 159 | 160 | typedef struct { 161 | void *user_pages_start; 162 | void *bitmap_start; 163 | uint32_t bitmap_size; 164 | uint8_t ttl; 165 | } __attribute__((aligned(sizeof(int64_t)))) zone_quarantine_t; 166 | 167 | /* Each thread gets a local cache of the most recently 168 | * used zones. This can greatly speed up allocations 169 | * if your threads are reusing the same zones. This 170 | * cache is first in last out, and is populated during 171 | * both alloc and free operations */ 172 | typedef struct { 173 | size_t chunk_size; 174 | iso_alloc_zone_t *zone; 175 | } __attribute__((aligned(sizeof(int64_t)))) _tzc; 176 | -------------------------------------------------------------------------------- /src/iso_alloc_interfaces.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_interfaces.c - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc.h" 5 | #include "iso_alloc_internal.h" 6 | #include "iso_alloc_ds.h" 7 | #include "iso_alloc_sanity.h" 8 | 9 | #if HEAP_PROFILER 10 | #include "iso_alloc_profiler.h" 11 | #endif 12 | 13 | EXTERNAL_API FLATTEN void iso_alloc_initialize(void) { 14 | _iso_alloc_initialize(); 15 | } 16 | 17 | EXTERNAL_API FLATTEN void iso_alloc_destroy(void) { 18 | _iso_alloc_destroy(); 19 | } 20 | 21 | EXTERNAL_API NO_DISCARD FLATTEN MALLOC_ATTR ALLOC_SIZE ASSUME_ALIGNED void *iso_alloc(size_t size) { 22 | return _iso_alloc(NULL, size); 23 | } 24 | 25 | EXTERNAL_API NO_DISCARD FLATTEN MALLOC_ATTR CALLOC_SIZE ASSUME_ALIGNED void *iso_calloc(size_t nmemb, size_t size) { 26 | return _iso_calloc(nmemb, size); 27 | } 28 | 29 | EXTERNAL_API FLATTEN void iso_free(void *p) { 30 | _iso_free(p, false); 31 | } 32 | 33 | EXTERNAL_API FLATTEN void iso_free_size(void *p, size_t size) { 34 | _iso_free_size(p, size); 35 | } 36 | 37 | EXTERNAL_API FLATTEN void iso_free_from_zone(void *p, iso_alloc_zone_handle *zone) { 38 | UNMASK_ZONE_HANDLE(zone); 39 | _iso_free_from_zone(p, zone, false); 40 | } 41 | 42 | EXTERNAL_API FLATTEN void iso_free_from_zone_permanently(void *p, iso_alloc_zone_handle *zone) { 43 | UNMASK_ZONE_HANDLE(zone); 44 | _iso_free_from_zone(p, zone, true); 45 | } 46 | 47 | EXTERNAL_API FLATTEN void iso_free_permanently(void *p) { 48 | _iso_free(p, true); 49 | } 50 | 51 | EXTERNAL_API FLATTEN size_t iso_chunksz(void *p) { 52 | return _iso_chunk_size(p); 53 | } 54 | 55 | EXTERNAL_API FLATTEN NO_DISCARD size_t iso_zone_chunk_count(iso_alloc_zone_handle *zone) { 56 | UNMASK_ZONE_HANDLE(zone); 57 | iso_alloc_zone_t *_zone = (iso_alloc_zone_t *) zone; 58 | size_t canaries = 0; 59 | 60 | #if !DISABLE_CANARY 61 | canaries = _zone->chunk_count >> CANARY_COUNT_DIV; 62 | #endif 63 | return (_zone->chunk_count - canaries); 64 | } 65 | 66 | EXTERNAL_API FLATTEN NO_DISCARD REALLOC_SIZE ASSUME_ALIGNED void *iso_realloc(void *p, size_t size) { 67 | if(size == 0) { 68 | iso_free(p); 69 | return NULL; 70 | } 71 | 72 | void *r = iso_alloc(size); 73 | 74 | if(r == NULL) { 75 | return r; 76 | } 77 | 78 | size_t chunk_size = iso_chunksz(p); 79 | 80 | if(size > chunk_size) { 81 | size = chunk_size; 82 | } 83 | 84 | if(p != NULL) { 85 | _iso_alloc_memcpy(r, p, size); 86 | } 87 | 88 | #if PERM_FREE_REALLOC 89 | _iso_free(p, true); 90 | #else 91 | _iso_free_size(p, chunk_size); 92 | #endif 93 | 94 | return r; 95 | } 96 | 97 | EXTERNAL_API FLATTEN NO_DISCARD MALLOC_ATTR REALLOC_SIZE ASSUME_ALIGNED void *iso_reallocarray(void *p, size_t nmemb, size_t size) { 98 | unsigned int res; 99 | 100 | if(__builtin_mul_overflow(nmemb, size, &res)) { 101 | return NULL; 102 | } 103 | 104 | return iso_realloc(p, nmemb * size); 105 | } 106 | 107 | EXTERNAL_API FLATTEN NO_DISCARD ASSUME_ALIGNED char *iso_strdup(const char *str) { 108 | return iso_strdup_from_zone(NULL, str); 109 | } 110 | 111 | EXTERNAL_API FLATTEN NO_DISCARD ASSUME_ALIGNED char *iso_strdup_from_zone(iso_alloc_zone_handle *zone, const char *str) { 112 | if(str == NULL) { 113 | return NULL; 114 | } 115 | 116 | size_t size = strlen(str); 117 | 118 | if(zone != NULL) { 119 | UNMASK_ZONE_HANDLE(zone); 120 | } 121 | 122 | char *p = (char *) _iso_alloc(zone, size); 123 | 124 | if(p == NULL) { 125 | return NULL; 126 | } 127 | 128 | _iso_alloc_memcpy(p, str, size); 129 | return p; 130 | } 131 | 132 | EXTERNAL_API FLATTEN NO_DISCARD ASSUME_ALIGNED char *iso_strndup(const char *str, size_t n) { 133 | return iso_strndup_from_zone(NULL, str, n); 134 | } 135 | 136 | EXTERNAL_API FLATTEN NO_DISCARD ASSUME_ALIGNED char *iso_strndup_from_zone(iso_alloc_zone_handle *zone, const char *str, size_t n) { 137 | if(str == NULL) { 138 | return NULL; 139 | } 140 | 141 | size_t s_size = strlen(str); 142 | 143 | if(zone != NULL) { 144 | UNMASK_ZONE_HANDLE(zone); 145 | } 146 | 147 | char *p = (char *) _iso_alloc(zone, n); 148 | 149 | if(p == NULL) { 150 | return NULL; 151 | } 152 | 153 | if(s_size > n) { 154 | _iso_alloc_memcpy(p, str, n); 155 | p[n - 1] = '\0'; 156 | } else { 157 | _iso_alloc_memcpy(p, str, s_size); 158 | } 159 | 160 | return p; 161 | } 162 | 163 | EXTERNAL_API FLATTEN NO_DISCARD MALLOC_ATTR ASSUME_ALIGNED void *iso_alloc_from_zone(iso_alloc_zone_handle *zone) { 164 | if(zone == NULL) { 165 | return NULL; 166 | } 167 | 168 | UNMASK_ZONE_HANDLE(zone); 169 | iso_alloc_zone_t *_zone = (iso_alloc_zone_t *) zone; 170 | 171 | return _iso_alloc(zone, _zone->chunk_size); 172 | } 173 | 174 | EXTERNAL_API FLATTEN NO_DISCARD MALLOC_ATTR void *iso_alloc_from_zone_tagged(iso_alloc_zone_handle *zone) { 175 | if(zone == NULL) { 176 | return NULL; 177 | } 178 | 179 | UNMASK_ZONE_HANDLE(zone); 180 | iso_alloc_zone_t *_zone = (iso_alloc_zone_t *) zone; 181 | 182 | void *p = _iso_alloc(zone, _zone->chunk_size); 183 | return _tag_ptr(p, zone); 184 | } 185 | 186 | EXTERNAL_API FLATTEN NO_DISCARD void *iso_alloc_tag_ptr(void *p, iso_alloc_zone_handle *zone) { 187 | if(zone == NULL) { 188 | return NULL; 189 | } 190 | 191 | UNMASK_ZONE_HANDLE(zone); 192 | return _tag_ptr(p, zone); 193 | } 194 | 195 | EXTERNAL_API FLATTEN NO_DISCARD void *iso_alloc_untag_ptr(void *p, iso_alloc_zone_handle *zone) { 196 | if(zone == NULL) { 197 | return NULL; 198 | } 199 | 200 | UNMASK_ZONE_HANDLE(zone); 201 | return _untag_ptr(p, zone); 202 | } 203 | 204 | EXTERNAL_API FLATTEN void iso_alloc_verify_ptr_tag(void *p, iso_alloc_zone_handle *zone) { 205 | if(zone == NULL) { 206 | return; 207 | } 208 | 209 | UNMASK_ZONE_HANDLE(zone); 210 | _iso_alloc_verify_tag(p, zone); 211 | return; 212 | } 213 | 214 | EXTERNAL_API FLATTEN NO_DISCARD uint8_t iso_alloc_get_mem_tag(void *p, iso_alloc_zone_handle *zone) { 215 | if(zone == NULL || p == NULL) { 216 | return 0; 217 | } 218 | 219 | UNMASK_ZONE_HANDLE(zone); 220 | return _iso_alloc_get_mem_tag(p, zone); 221 | } 222 | 223 | EXTERNAL_API FLATTEN void iso_alloc_destroy_zone(iso_alloc_zone_handle *zone) { 224 | if(zone == NULL) { 225 | return; 226 | } 227 | 228 | UNMASK_ZONE_HANDLE(zone); 229 | _iso_alloc_destroy_zone(zone); 230 | } 231 | 232 | EXTERNAL_API FLATTEN NO_DISCARD iso_alloc_zone_handle *iso_alloc_new_zone(size_t size) { 233 | iso_alloc_zone_handle *zone = (iso_alloc_zone_handle *) iso_new_zone(size, false); 234 | UNMASK_ZONE_HANDLE(zone); 235 | return zone; 236 | } 237 | 238 | EXTERNAL_API FLATTEN int32_t iso_alloc_name_zone(iso_alloc_zone_handle *zone, char *name) { 239 | if(zone == NULL) { 240 | return 0; 241 | } else { 242 | UNMASK_ZONE_HANDLE(zone); 243 | } 244 | 245 | iso_alloc_zone_t *_zone = (iso_alloc_zone_t *) zone; 246 | return name_mapping(_zone->user_pages_start, ZONE_USER_SIZE, name); 247 | } 248 | 249 | EXTERNAL_API FLATTEN void iso_alloc_protect_root(void) { 250 | _iso_alloc_protect_root(); 251 | } 252 | 253 | EXTERNAL_API FLATTEN void iso_alloc_unprotect_root(void) { 254 | _iso_alloc_unprotect_root(); 255 | } 256 | 257 | EXTERNAL_API FLATTEN uint64_t iso_alloc_detect_zone_leaks(iso_alloc_zone_handle *zone) { 258 | if(zone == NULL) { 259 | return 0; 260 | } else { 261 | UNMASK_ZONE_HANDLE(zone); 262 | } 263 | 264 | return _iso_alloc_detect_leaks_in_zone(zone); 265 | } 266 | 267 | EXTERNAL_API FLATTEN uint64_t iso_alloc_detect_leaks(void) { 268 | return _iso_alloc_detect_leaks(); 269 | } 270 | 271 | EXTERNAL_API FLATTEN uint64_t iso_alloc_zone_mem_usage(iso_alloc_zone_handle *zone) { 272 | if(zone == NULL) { 273 | return 0; 274 | } else { 275 | UNMASK_ZONE_HANDLE(zone); 276 | } 277 | 278 | return _iso_alloc_zone_mem_usage(zone); 279 | } 280 | 281 | EXTERNAL_API FLATTEN uint64_t iso_alloc_mem_usage(void) { 282 | return _iso_alloc_mem_usage(); 283 | } 284 | 285 | EXTERNAL_API FLATTEN void iso_verify_zones(void) { 286 | verify_all_zones(); 287 | } 288 | 289 | EXTERNAL_API FLATTEN void iso_verify_zone(iso_alloc_zone_handle *zone) { 290 | if(zone == NULL) { 291 | return; 292 | } else { 293 | UNMASK_ZONE_HANDLE(zone); 294 | } 295 | 296 | verify_zone(zone); 297 | } 298 | 299 | EXTERNAL_API FLATTEN void iso_flush_caches(void) { 300 | flush_caches(); 301 | } 302 | 303 | #if HEAP_PROFILER 304 | EXTERNAL_API FLATTEN size_t iso_get_alloc_traces(iso_alloc_traces_t *traces_out) { 305 | return _iso_get_alloc_traces(traces_out); 306 | } 307 | 308 | EXTERNAL_API FLATTEN size_t iso_get_free_traces(iso_free_traces_t *traces_out) { 309 | return _iso_get_free_traces(traces_out); 310 | } 311 | 312 | EXTERNAL_API FLATTEN void iso_alloc_reset_traces(void) { 313 | _iso_alloc_reset_traces(); 314 | } 315 | #endif 316 | 317 | #if EXPERIMENTAL 318 | EXTERNAL_API FLATTEN void iso_alloc_search_stack(void *p) { 319 | _iso_alloc_search_stack(p); 320 | } 321 | #endif 322 | -------------------------------------------------------------------------------- /PROFILER.md: -------------------------------------------------------------------------------- 1 | # IsoAlloc Heap Profiler 2 | 3 | Different workloads can affect the performance of IsoAlloc. For example, if your program only ever makes allocations between 32 and 512 bytes then theres no reason to allocate zones for sizes above 512 bytes. Other workloads might make a lot of short lived allocations of one size. By sampling allocations from this target we can produce data that results in a better understanding of memory usage through the programs lifetime. This data can be used to generate more efficient configurations of IsoAlloc. 4 | 5 | The heap profiler is designed to sample heap allocations over time across your workload. This profiler emits a machine readable file throughout the lifetime of the sampled target. These files are then passed to the profiler tool where they are merged. After merging the profiler tool will emit a C header file `iso_alloc_target_config.h`. This tooling is still under development. 6 | 7 | In order to get the most out of the profiler it is recommended to compile all of your code with `-fno-omit-frame-pointer`. Without this flag IsoAlloc can't properly collect backtrace information and may even be unstable. 8 | 9 | ## Profiler Tuning 10 | 11 | You can control the file profiler data is written to with the `ISO_ALLOC_PROFILER_FILE_PATH` environment variable. The default path is `$CWD/iso_alloc_profiler.data`. 12 | 13 | Currently theres only one way to tune the profiler internals and thats by changing `PROFILER_ODDS` or `CHUNK_USAGE_THRESHOLD`. These control the rate at which we sample allocations and the % a zone must be full before being recorded as such. 14 | 15 | ## Profiler Output Format 16 | 17 | The profiler outputs a file (example below) that contains information about the state of the IsoAlloc managed heap. This information is captured by sampling allocations during runtime and when the process is exiting. 18 | 19 | ``` 20 | # Total Allocations 21 | allocated=5766465 22 | 23 | # Number of allocations sampled 24 | alloc_sampled=551 25 | 26 | # Total free's 27 | freed=4324848 28 | 29 | # Number of free's sampled 30 | free_sampled=427 31 | 32 | # Sampled unique backtraces to malloc/free 33 | # backtrace id, backtrace hash, number of calls, smallest size requested, largest size requested, backtrace 34 | 35 | alloc_backtrace=0,backtrace_hash=0x8614,calls=117,lower_bound_size=16,upper_bound_size=8192 36 | 0xffffab91a010 -> iso_alloc build/libisoalloc.so 37 | 0x400f44 -> [?] 38 | 0x401134 -> [?] 39 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 40 | 0x4008e4 -> [?] 41 | alloc_backtrace=1,backtrace_hash=0x86a0,calls=9,lower_bound_size=45,upper_bound_size=538 42 | 0xffffab91a010 -> iso_alloc build/libisoalloc.so 43 | 0x400f44 -> [?] 44 | 0x401180 -> [?] 45 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 46 | 0x4008e4 -> [?] 47 | alloc_backtrace=2,backtrace_hash=0xea18,calls=148,lower_bound_size=16,upper_bound_size=8192 48 | 0xffffab916d64 -> [?] 49 | 0xffffab91a03c -> iso_calloc build/libisoalloc.so 50 | 0x400d04 -> [?] 51 | 0x401230 -> [?] 52 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 53 | 0x4008e4 -> [?] 54 | alloc_backtrace=3,backtrace_hash=0xea54,calls=16,lower_bound_size=134,upper_bound_size=8212 55 | 0xffffab916d64 -> [?] 56 | 0xffffab91a03c -> iso_calloc build/libisoalloc.so 57 | 0x400d04 -> [?] 58 | 0x40127c -> [?] 59 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 60 | 0x4008e4 -> [?] 61 | alloc_backtrace=4,backtrace_hash=0x81dc,calls=127,lower_bound_size=8,upper_bound_size=4096 62 | 0xffffab91a010 -> iso_alloc build/libisoalloc.so 63 | 0x400a94 -> [?] 64 | 0x40132c -> [?] 65 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 66 | 0x4008e4 -> [?] 67 | alloc_backtrace=5,backtrace_hash=0x2040,calls=104,lower_bound_size=16,upper_bound_size=8192 68 | 0xffffab91a010 -> iso_alloc build/libisoalloc.so 69 | 0xffffab91a1c8 -> iso_realloc build/libisoalloc.so 70 | 0x400ac0 -> [?] 71 | 0x40132c -> [?] 72 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 73 | 0x4008e4 -> [?] 74 | alloc_backtrace=6,backtrace_hash=0x2014,calls=11,lower_bound_size=151,upper_bound_size=4124 75 | 0xffffab91a010 -> iso_alloc build/libisoalloc.so 76 | 0xffffab91a1c8 -> iso_realloc build/libisoalloc.so 77 | 0x400ac0 -> [?] 78 | 0x401378 -> [?] 79 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 80 | 0x4008e4 -> [?] 81 | alloc_backtrace=7,backtrace_hash=0x8188,calls=11,lower_bound_size=75,upper_bound_size=2062 82 | 0xffffab91a010 -> iso_alloc build/libisoalloc.so 83 | 0x400a94 -> [?] 84 | 0x401378 -> [?] 85 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 86 | 0x4008e4 -> [?] 87 | free_backtrace=0,backtrace_hash=0x86d4,calls=66 88 | 0xffffab91a068 -> iso_free build/libisoalloc.so 89 | 0x400ffc -> [?] 90 | 0x401134 -> [?] 91 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 92 | 0x4008e4 -> [?] 93 | free_backtrace=1,backtrace_hash=0x995c,calls=60 94 | 0xffffab91a068 -> iso_free build/libisoalloc.so 95 | 0x401074 -> [?] 96 | 0x401134 -> [?] 97 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 98 | 0x4008e4 -> [?] 99 | free_backtrace=2,backtrace_hash=0x99e8,calls=7 100 | 0xffffab91a068 -> iso_free build/libisoalloc.so 101 | 0x401074 -> [?] 102 | 0x401180 -> [?] 103 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 104 | 0x4008e4 -> [?] 105 | free_backtrace=3,backtrace_hash=0x8660,calls=6 106 | 0xffffab91a068 -> iso_free build/libisoalloc.so 107 | 0x400ffc -> [?] 108 | 0x401180 -> [?] 109 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 110 | 0x4008e4 -> [?] 111 | free_backtrace=4,backtrace_hash=0x8418,calls=66 112 | 0xffffab91a068 -> iso_free build/libisoalloc.so 113 | 0x400e34 -> [?] 114 | 0x401230 -> [?] 115 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 116 | 0x4008e4 -> [?] 117 | free_backtrace=5,backtrace_hash=0x8790,calls=72 118 | 0xffffab91a068 -> iso_free build/libisoalloc.so 119 | 0x400dbc -> [?] 120 | 0x401230 -> [?] 121 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 122 | 0x4008e4 -> [?] 123 | free_backtrace=6,backtrace_hash=0x87dc,calls=6 124 | 0xffffab91a068 -> iso_free build/libisoalloc.so 125 | 0x400dbc -> [?] 126 | 0x40127c -> [?] 127 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 128 | 0x4008e4 -> [?] 129 | free_backtrace=7,backtrace_hash=0x8454,calls=4 130 | 0xffffab91a068 -> iso_free build/libisoalloc.so 131 | 0x400e34 -> [?] 132 | 0x40127c -> [?] 133 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 134 | 0x4008e4 -> [?] 135 | free_backtrace=8,backtrace_hash=0x80c0,calls=64 136 | 0xffffab91a068 -> iso_free build/libisoalloc.so 137 | 0x400bf0 -> [?] 138 | 0x40132c -> [?] 139 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 140 | 0x4008e4 -> [?] 141 | free_backtrace=9,backtrace_hash=0x8048,calls=55 142 | 0xffffab91a068 -> iso_free build/libisoalloc.so 143 | 0x400b78 -> [?] 144 | 0x40132c -> [?] 145 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 146 | 0x4008e4 -> [?] 147 | free_backtrace=10,backtrace_hash=0x8094,calls=4 148 | 0xffffab91a068 -> iso_free build/libisoalloc.so 149 | 0x400bf0 -> [?] 150 | 0x401378 -> [?] 151 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 152 | 0x4008e4 -> [?] 153 | free_backtrace=11,backtrace_hash=0x801c,calls=5 154 | 0xffffab91a068 -> iso_free build/libisoalloc.so 155 | 0x400b78 -> [?] 156 | 0x401378 -> [?] 157 | 0xffffab793090 -> __libc_start_main /lib/aarch64-linux-gnu/libc.so.6 158 | 0x4008e4 -> [?] 159 | 160 | # Chunk size, number of zones holding that size, 161 | # number of times that zone was 75% full 162 | 128,1,1 163 | 256,1,6 164 | 512,1,29 165 | 1024,1,58 166 | 2048,4,116 167 | 4096,16,570 168 | 8192,65,3195 169 | 16384,33,13 170 | ``` 171 | 172 | The profiler will collect backtraces in order to produce a report about callers into IsoAlloc. This data is helpful for understanding memory allocation patterns in a program. These hashes are not immediately usable, the profiler uses them internally to track unique call stacks. If the profiler data shows a large number of backtraces then its unlikely using just a handful of memory allocation abstractions (e.g. its frequently calling malloc/new). Due to their size (16 bits) calculated backtraces may not be entirely unique. To get the most accurate results from this feature please compile IsoAlloc and your program with the `-fno-omit-frame-pointer` option. 173 | 174 | The 'Zone data' shown above is a simple CSV format that is displaying the size of chunks, the number of zones holding chunks of that size, and the number of times the zone was more than `CHUNK_USAGE_THRESHOLD` % (default=75%) full when being sampled. In the example above this program was making a high number of 16384, and 4096 byte allocations. 175 | 176 | ## Profiler Tool 177 | 178 | TODO - A CLI utility that reads the profiler output and produces an IsoAlloc configuration suited for that runtime. This tool isn't written yet. 179 | 180 | ## Allocator Based Program Profiling 181 | 182 | The profiler built into IsoAlloc is pretty simple. We sample calls to `malloc()` and `free()` and record some basic information such as the size of the allocation requested, and record unique backtraces that made the call. This information is useful for various security research (e.g. fuzzing instrumentation) and analysis of code paths that perform memory allocation and manipulation. The public API now defines a structure, `iso_alloc_traces_t`, that can be requested from the allocator that contains this information. The profiler is still experimental so please expect the API to be unstable and change frequently. 183 | -------------------------------------------------------------------------------- /SECURITY_COMPARISON.MD: -------------------------------------------------------------------------------- 1 | # Heap Allocator Security Feature Comparison 2 | 3 | Heap allocators hide incredible complexity behind `malloc` and `free`. They must maintain an acceptable level of performance for various environments with wildly different memory and CPU constraints. All the security checks in the world don't matter if they require performance regressions that slow down your program by orders of magnitude. Striking a balance between performance and security is a requirement if you want people to use your library. Most allocators have some security checks, even if they're poorly implemented and easily bypassed. It's impossible to capture the nuances of how those checks work, the corner cases of when they do and don't apply, or the environment changes that affect their efficacy etc. This table is incomplete at the moment so I welcome pull requests that improve its accuracy. 4 | 5 | :heavy_check_mark: = Yes by default 6 | 7 | :heavy_plus_sign: = Yes but requires configuration 8 | 9 | :heavy_minus_sign: = Yes but via non-standard api 10 | 11 | :x: = Not available 12 | 13 | :grey_question: = Todo 14 | 15 | 16 | | Security Feature | isoalloc | scudo | mimalloc | tcmalloc | ptmalloc3 | jemalloc | musl malloc-ng | hardened_malloc | PartitionAlloc | snmalloc | 17 | |:-----------------------------------:|:----------------:|:----------------:|:----------------:|:---------------:|:----------------:|:----------------:|:----------------:|:----------------:|:----------------:|:----------------:| 18 | |Memory Isolation |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:x: |:x: |:grey_question: |:x: |:heavy_check_mark:|:heavy_check_mark:|:grey_question: 19 | |Canaries |:heavy_check_mark:|:heavy_check_mark:|:x: |:x: |:heavy_plus_sign: |:x: |:heavy_check_mark:|:heavy_check_mark:|:grey_question: |:heavy_check_mark: 20 | |Non-global canary |:heavy_check_mark:|:x: |:x: |:x: |:x: |:x: |:x: |:heavy_check_mark:|:grey_question: |:heavy_check_mark: 21 | |Guard Pages |:heavy_check_mark:|:heavy_check_mark:|:heavy_plus_sign: |:x: |:x: |:x: |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark: 22 | |Randomized Allocations |:heavy_check_mark:|:heavy_check_mark:|:heavy_plus_sign: |:grey_question: |:x: |:x: |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark: 23 | |Pointer Obfuscation |:heavy_check_mark:|:x: |:heavy_plus_sign: |:grey_question: |:x: |:grey_question: |:x: |:grey_question: |:heavy_check_mark:|:heavy_check_mark: 24 | |Double Free Detection |:heavy_check_mark:|:heavy_check_mark:|:heavy_plus_sign: |:x: |:heavy_check_mark:|:heavy_plus_sign: |:heavy_check_mark:|:heavy_check_mark:|:heavy_plus_sign: |:heavy_check_mark: 25 | |Chunk Alignment Check |:heavy_check_mark:|:heavy_check_mark:|:heavy_plus_sign: |:x: |:heavy_check_mark:|:heavy_plus_sign: |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark: 26 | |Out Of Band Metadata |:heavy_check_mark:|:x: |:x: |:x: |:x: |:heavy_check_mark:|:heavy_check_mark:|:heavy_check_mark:|:x: |:heavy_check_mark: 27 | |Permanent Free API |:heavy_minus_sign:|:x: |:x: |:x: |:x: |:x: |:x: |:x: |:x: |:x: 28 | |Freed Chunk Sanitization |:heavy_plus_sign: |:heavy_plus_sign: |:x: |:x: |:x: |:heavy_plus_sign: |:x: |:heavy_check_mark:|:heavy_plus_sign: |:x: 29 | |Adjacent Chunk Verification |:heavy_check_mark:|:heavy_check_mark:|:x: |:x: |:x: |:x: |:x: |:grey_question: |:x: |:x: 30 | |Delayed Free |:heavy_check_mark:|:heavy_check_mark:|:x: |:x: |:x: |:heavy_plus_sign: |:heavy_check_mark:|:heavy_check_mark:|:heavy_plus_sign: |:heavy_check_mark: 31 | |Dangling Pointer Detection |:heavy_plus_sign: |:x: |:x: |:x: |:x: |:x: |:x: |:heavy_check_mark:|:heavy_plus_sign: |:x: 32 | |GWP-ASAN Like Sampling |:heavy_plus_sign: |:heavy_plus_sign: |:x: |:heavy_plus_sign:|:x: |:x: |:x: |:heavy_check_mark:|:grey_question: |:x: 33 | |Size Mismatch Detection |:heavy_check_mark:|:heavy_check_mark:|:x: |:x: |:heavy_plus_sign: |:x: |:x: |:heavy_check_mark:|:heavy_plus_sign: |:heavy_plus_sign: 34 | |ARM Memory Tagging |:x: |:heavy_check_mark:|:x: |:x: |:x: |:x: |:x: |:heavy_check_mark:|:heavy_check_mark: |:x: 35 | |Zone/Chunk CPU/Thread Pinning |:heavy_plus_sign: |:x: |:x: |:x: |:x: |:x: |:x: |:heavy_check_mark:|:x: |:x: 36 | |Chunk Race Error Detection |:x: |:heavy_check_mark:|:grey_question: |:x: |:x: |:x: |:grey_question: |:grey_question: |:grey_question: |:heavy_plus_sign: 37 | |Zero Size Allocation Special Handling|:heavy_check_mark:|:x: |:grey_question: |:grey_question: |:x: |:x: |:x: |:heavy_check_mark:|:x: |:x: 38 | |Read-only global structure |:heavy_minus_sign:|:x: |:x: |:x: |:x: |:x: |:x: |:heavy_check_mark:|:x: |:x: 39 | |SW Memory Tagging |:heavy_minus_sign:|:x: |:x: |:x: |:x: |:x: |:x: |:grey_question: |:heavy_check_mark:|:x: 40 | |Guarded memcpy/memmove |:heavy_check_mark:|:x: |:x: |:x: |:x: |:x: |:x: |:heavy_minus_sign:|:x: |:heavy_check_mark: 41 | |Automatic initialization |:x: |:heavy_plus_sign: |:x: |:x: |:x: |:x: |:x: |:heavy_check_mark:|:x: |:x: 42 | 43 | **Lexicon** 44 | 45 | - Non-global canary: There isn't a single global secret where all bets are off when leaked, but a different canary per slab/arena. 46 | - *Double Free Detection*: A pointer can't be freed twice. 47 | - *Chunk Alignment Check*: Pointers passed to `free` must be aligned, and multiples of the zone chunk size. 48 | - *Out Of Band Metadata*: The chunk metadata are not stored next to/in the chunk, but out of band, making them harder to corrupt. 49 | - *Freed Chunk Sanitization*: Freed memory is overwritten by some value, preventing info-leaks. 50 | - *Adjacent Chunk Verification*: When freeing a chunk the canaries in adjacent chunks above/below are verified. 51 | - *Delayed Free*: Chunks aren't immediately freed, making use-after-free exploitation without a heap-spray harder. 52 | - *Size Mismatch Detection*: In C++, type-confusing at free time of objects with *sufficiently different sizes* is detected. 53 | - *Arm Memory Tagging*: See https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/enhancing-memory-safety 54 | - *Zone/Chunk CPU Pinning*: Allocations from a given zone are restricted to the CPU core that created that zone. 55 | - *Zero Size Allocation Special Handling*: Zero size allocations are treated like a dedicated size class, 56 | and point to a non-readable and non-writable region, to ensure that the application can't use them in any way. 57 | - *Read-only global structure*: The global state structure is entirely read-only after initialization. 58 | - *SW Memory Tagging*: The ability to tag pointers with a unique tag that is later verified before pointer dereference 59 | - *Guarded Memcpy*: Ability to use allocator meta-data to protect destination of memcpy from overflow. 60 | - *Automatic initialization*: The allocator memory is always initialized to a specific value, usually `0`. Note that while this is cheaper than zero'ing on free, it is also less powerful. 61 | 62 | **Notes** 63 | 64 | Not every security feature is needed in every allocator. A missing feature in the table above may not mean the allocator needs it but may instead may not require it. The table needs a N/A qualifier. This section attempts to capture some of this nuance. 65 | 66 | - SNMalloc stores free lists in band but protects it 67 | 68 | **Sources** 69 | 70 | https://github.com/struct/IsoAlloc 71 | 72 | https://source.android.com/devices/tech/debug/scudo 73 | 74 | https://github.com/llvm/llvm-project/tree/main/compiler-rt/lib/scudo/standalone 75 | 76 | https://www.microsoft.com/en-us/research/uploads/prod/2019/06/mimalloc-tr-v1.pdf 77 | 78 | Thanks to [Kostya Kortchinsky](https://twitter.com/@crypt0ad) for reviewing the Scudo column 79 | 80 | https://dustri.org/b/security-features-of-musl.html 81 | 82 | https://github.com/GrapheneOS/hardened_malloc#security-properties 83 | 84 | https://downloads.immunityinc.com/infiltrate-archives/webkit_heap.pdf 85 | 86 | https://census-labs.com/media/shadow-infiltrate-2017.pdf 87 | 88 | https://blog.nsogroup.com/a-tale-of-two-mallocs-on-android-libc-allocators-part-3-exploitation/ 89 | 90 | https://sourceware.org/glibc/wiki/MallocInternals 91 | 92 | https://source.chromium.org/chromium/chromium/src/+/master:base/allocator/partition_allocator/PartitionAlloc.md 93 | 94 | https://source.chromium.org/chromium/chromium/src/+/master:base/allocator/partition_allocator/starscan/README.md 95 | 96 | https://google.github.io/tcmalloc/gwp-asan.html 97 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /include/iso_alloc_internal.h: -------------------------------------------------------------------------------- 1 | /* iso_alloc_internal.h - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #pragma once 5 | 6 | #ifndef _GNU_SOURCE 7 | #define _GNU_SOURCE 1 8 | #endif 9 | 10 | #if !__aarch64__ && !__x86_64__ 11 | #pragma error "IsoAlloc is untested and unsupported on 32 bit platforms" 12 | assert(sizeof(size_t) >= 64) 13 | #endif 14 | 15 | #if __linux__ 16 | #include "os/linux.h" 17 | #endif 18 | 19 | #if __APPLE__ 20 | #include "os/macos.h" 21 | #endif 22 | 23 | #if __ANDROID__ 24 | #include "os/android.h" 25 | #endif 26 | 27 | #if __FreeBSD__ 28 | #include "os/freebsd.h" 29 | #endif 30 | 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | 45 | #if MEM_USAGE 46 | #include 47 | #endif 48 | 49 | #if THREAD_SUPPORT 50 | #include 51 | #ifdef __cplusplus 52 | #include 53 | using namespace std; 54 | #else 55 | #include 56 | #endif 57 | #endif 58 | 59 | #if HEAP_PROFILER 60 | #include 61 | #endif 62 | 63 | #include "conf.h" 64 | #include "iso_alloc.h" 65 | #include "iso_alloc_sanity.h" 66 | #include "iso_alloc_util.h" 67 | #include "iso_alloc_ds.h" 68 | #include "iso_alloc_profiler.h" 69 | #include "compiler.h" 70 | 71 | #ifndef MADV_DONTNEED 72 | #define MADV_DONTNEED POSIX_MADV_DONTNEED 73 | #endif 74 | 75 | #ifndef MADV_FREE 76 | #define FREE_OR_DONTNEED MADV_DONTNEED 77 | #else 78 | #define FREE_OR_DONTNEED MADV_FREE 79 | #endif 80 | 81 | #if ENABLE_ASAN 82 | #include 83 | 84 | #define POISON_ZONE(zone) \ 85 | if(IS_POISONED_RANGE(zone->user_pages_start, ZONE_USER_SIZE) == 0) { \ 86 | ASAN_POISON_MEMORY_REGION(zone->user_pages_start, ZONE_USER_SIZE); \ 87 | } \ 88 | if(IS_POISONED_RANGE(zone->bitmap_start, zone->bitmap_size) == 0) { \ 89 | ASAN_POISON_MEMORY_REGION(zone->user_pages_start, zone->bitmap_size); \ 90 | } 91 | 92 | #define UNPOISON_ZONE(zone) \ 93 | if(IS_POISONED_RANGE(zone->user_pages_start, ZONE_USER_SIZE) != 0) { \ 94 | ASAN_UNPOISON_MEMORY_REGION(zone->user_pages_start, ZONE_USER_SIZE); \ 95 | } \ 96 | if(IS_POISONED_RANGE(zone->bitmap_start, zone->bitmap_size) != 0) { \ 97 | ASAN_UNPOISON_MEMORY_REGION(zone->bitmap_start, zone->bitmap_size); \ 98 | } 99 | 100 | #define POISON_ZONE_CHUNK(zone, ptr) \ 101 | if(IS_POISONED_RANGE(ptr, zone->chunk_size) == 0) { \ 102 | ASAN_POISON_MEMORY_REGION(ptr, zone->chunk_size); \ 103 | } 104 | 105 | #define UNPOISON_ZONE_CHUNK(zone, ptr) \ 106 | if(IS_POISONED_RANGE(ptr, zone->chunk_size) != 0) { \ 107 | ASAN_UNPOISON_MEMORY_REGION(ptr, zone->chunk_size); \ 108 | } 109 | 110 | #define POISON_BIG_ZONE(zone) \ 111 | if(IS_POISONED_RANGE(zone->user_pages_start, zone->size) == 0) { \ 112 | ASAN_POISON_MEMORY_REGION(zone->user_pages_start, zone->size); \ 113 | } 114 | 115 | #define UNPOISON_BIG_ZONE(zone) \ 116 | if(IS_POISONED_RANGE(zone->user_pages_start, zone->size) != 0) { \ 117 | ASAN_UNPOISON_MEMORY_REGION(zone->user_pages_start, zone->size); \ 118 | } 119 | 120 | #define IS_POISONED_RANGE(ptr, size) \ 121 | __asan_region_is_poisoned(ptr, size) 122 | #else 123 | #define POISON_ZONE(zone) 124 | #define UNPOISON_ZONE(zone) 125 | #define POISON_ZONE_CHUNK(ptr, zone) 126 | #define UNPOISON_ZONE_CHUNK(ptr, zone) 127 | #define POISON_BIG_ZONE(zone) 128 | #define UNPOISON_BIG_ZONE(zone) 129 | #define IS_POISONED_RANGE(ptr, size) 0 130 | #endif 131 | 132 | #define OK 0 133 | #define ERR -1 134 | 135 | /* GCC complains if your constructor priority is 136 | * 0-100 but Clang does not. We need the lowest 137 | * priority constructor for MALLOC_HOOK */ 138 | #define FIRST_CTOR 101 139 | #define LAST_DTOR 65535 140 | 141 | #if DEBUG 142 | #define LOG(msg, ...) \ 143 | _iso_alloc_printf(STDOUT_FILENO, "[LOG][%d](%s:%d %s()) " msg "\n", getpid(), __FILE__, __LINE__, __func__, ##__VA_ARGS__); 144 | #else 145 | #define LOG(msg, ...) 146 | #endif 147 | 148 | #define LOG_AND_ABORT(msg, ...) \ 149 | _iso_alloc_printf(STDOUT_FILENO, "[ABORTING][%d](%s:%d %s()) " msg "\n", getpid(), __FILE__, __LINE__, __func__, ##__VA_ARGS__); \ 150 | abort(); 151 | 152 | /* The number of bits in the bitmap that correspond 153 | * to a user chunk. We use 2 bits: 154 | * 00 free, never used 155 | * 10 currently in use 156 | * 01 was used, now free 157 | * 11 canary chunk / permanently free'd */ 158 | #define BITS_PER_CHUNK 2 159 | #define BITS_PER_CHUNK_SHIFT 1 160 | 161 | #define BITS_PER_BYTE 8 162 | #define BITS_PER_BYTE_SHIFT 3 163 | 164 | #define BITS_PER_QWORD 64 165 | #define BITS_PER_QWORD_SHIFT 6 166 | 167 | #define BITS_PER_ODWORD 128 168 | #define BITS_PER_ODWORD_SHIFT 7 169 | 170 | #define CANARY_SIZE 8 171 | 172 | #define USED_BIT_VECTOR 0x5555555555555555 173 | 174 | /* All chunks are 8 byte aligned */ 175 | #define CHUNK_ALIGNMENT 8 176 | 177 | #define SZ_ALIGNMENT 32 178 | 179 | #define WHICH_BIT(bit_slot) \ 180 | (bit_slot & (BITS_PER_QWORD - 1)) 181 | 182 | #define IS_ALIGNED(v) \ 183 | (v & (CHUNK_ALIGNMENT - 1)) 184 | 185 | #define IS_PAGE_ALIGNED(v) \ 186 | (v & (g_page_size - 1)) 187 | 188 | #define GET_BIT(n, k) \ 189 | (n >> k) & 1UL 190 | 191 | #define SET_BIT(n, k) \ 192 | n |= 1UL << k; 193 | 194 | #define UNSET_BIT(n, k) \ 195 | n &= ~(1UL << k); 196 | 197 | #define ALIGN_SZ_UP(n) \ 198 | ((n + SZ_ALIGNMENT - 1) & ~(SZ_ALIGNMENT - 1)) 199 | 200 | #define ALIGN_SZ_DOWN(n) \ 201 | (((n + SZ_ALIGNMENT - 1) & ~(SZ_ALIGNMENT - 1)) - SZ_ALIGNMENT) 202 | 203 | #define ROUND_UP_PAGE(n) \ 204 | ((((n + g_page_size) - 1) >> g_page_size_shift) * (g_page_size)) 205 | 206 | #define ROUND_DOWN_PAGE(n) \ 207 | (ROUND_UP_PAGE(n) - g_page_size) 208 | 209 | #if MASK_PTRS 210 | #define MASK_ZONE_PTRS(zone) \ 211 | MASK_BITMAP_PTRS(zone); \ 212 | MASK_USER_PTRS(zone); 213 | 214 | #define UNMASK_ZONE_PTRS(zone) \ 215 | MASK_ZONE_PTRS(zone); 216 | 217 | #define MASK_BITMAP_PTRS(zone) \ 218 | zone->bitmap_start = (void *) ((uintptr_t) zone->bitmap_start ^ (uintptr_t) zone->pointer_mask); 219 | 220 | #define MASK_USER_PTRS(zone) \ 221 | zone->user_pages_start = (void *) ((uintptr_t) zone->user_pages_start ^ (uintptr_t) zone->pointer_mask); 222 | 223 | #define UNMASK_USER_PTR(zone) \ 224 | (void *) ((uintptr_t) zone->user_pages_start ^ (uintptr_t) zone->pointer_mask) 225 | 226 | #define UNMASK_BITMAP_PTR(zone) \ 227 | (void *) ((uintptr_t) zone->bitmap_start ^ (uintptr_t) zone->pointer_mask) 228 | 229 | #define MASK_BIG_ZONE_NEXT(bnp) \ 230 | UNMASK_BIG_ZONE_NEXT(bnp) 231 | 232 | #define UNMASK_BIG_ZONE_NEXT(bnp) \ 233 | ((iso_alloc_big_zone_t *) ((uintptr_t) _root->big_zone_next_mask ^ (uintptr_t) bnp)) 234 | #else 235 | #define MASK_ZONE_PTRS(zone) 236 | #define UNMASK_ZONE_PTRS(zone) 237 | #define MASK_BITMAP_PTRS(zone) 238 | #define MASK_USER_PTRS(zone) 239 | #define UNMASK_USER_PTR(zone) (void *) zone->user_pages_start 240 | #define UNMASK_BITMAP_PTR(zone) (void *) zone->bitmap_start 241 | #define MASK_BIG_ZONE_NEXT(bnp) bnp 242 | #define UNMASK_BIG_ZONE_NEXT(bnp) bnp 243 | #endif 244 | 245 | /* Cap our big zones at 4GB of memory */ 246 | #define BIG_SZ_MAX 4294967296 247 | 248 | #define MIN_BITMAP_IDX 8 249 | 250 | #define WASTED_SZ_MULTIPLIER 8 251 | #define WASTED_SZ_MULTIPLIER_SHIFT 3 252 | 253 | #define BIG_ZONE_META_DATA_PAGE_COUNT 1 254 | #define BIG_ZONE_USER_PAGE_COUNT 2 255 | #define BIG_ZONE_USER_PAGE_COUNT_SHIFT 1 256 | 257 | #if MEMORY_TAGGING || (ARM_MTE == 1) 258 | #define TAGGED_PTR_MASK 0x00ffffffffffffff 259 | #define IS_TAGGED_PTR_MASK 0xff00000000000000 260 | #define UNTAGGED_BITS 56 261 | #define MEM_TAG_SIZE 1 262 | #endif 263 | 264 | #define MEGABYTE_SIZE 1048576 265 | #define KILOBYTE_SIZE 1024 266 | 267 | /* We don't validate the last byte of the canary. 268 | * It is always 0 to prevent an out of bounds read 269 | * from exposing it's value */ 270 | #define CANARY_VALIDATE_MASK 0xffffffffffffff00 271 | 272 | #define BAD_BIT_SLOT -1 273 | 274 | /* Calculate the user pointer given a zone and a bit slot */ 275 | #define POINTER_FROM_BITSLOT(zone, bit_slot) \ 276 | ((void *) zone->user_pages_start + ((bit_slot >> 1) * zone->chunk_size)); 277 | 278 | /* This global is used by the page rounding macros. 279 | * The value stored in _root->system_page_size is 280 | * preferred but we need this to setup the root. */ 281 | extern uint32_t g_page_size; 282 | 283 | /* We need to know what power of 2 the page size is */ 284 | extern uint32_t g_page_size_shift; 285 | 286 | /* iso_alloc makes a number of default zones for common 287 | * allocation sizes. Allocations are 'first fit' up until 288 | * ZONE_1024 at which point a new zone is created for that 289 | * specific size request. */ 290 | #define DEFAULT_ZONE_COUNT sizeof(default_zones) >> 3 291 | 292 | /* Each user allocation zone we make is 4MB in size. 293 | * With MAX_ZONES at 8192 this means we top out at 294 | * about 32~ gb of heap. If you adjust this then 295 | * you need to make sure that SMALL_SIZE_MAX is correctly 296 | * adjusted or you will calculate chunks outside of 297 | * the zone user memory! */ 298 | #define ZONE_USER_SIZE 4194304 299 | 300 | static_assert(SMALLEST_CHUNK_SZ >= 16, "SMALLEST_CHUNK_SZ is too small, must be at least 16"); 301 | static_assert(SMALL_SIZE_MAX <= 131072, "SMALL_SIZE_MAX is too big, cannot exceed 131072"); 302 | 303 | #if THREAD_SUPPORT 304 | #if USE_SPINLOCK 305 | extern atomic_flag root_busy_flag; 306 | #define LOCK_ROOT() \ 307 | do { \ 308 | } while(atomic_flag_test_and_set(&root_busy_flag)); 309 | 310 | #define UNLOCK_ROOT() \ 311 | atomic_flag_clear(&root_busy_flag); 312 | 313 | #define LOCK_BIG_ZONE_FREE() \ 314 | do { \ 315 | } while(atomic_flag_test_and_set(&_root->big_zone_free_flag)); 316 | 317 | #define UNLOCK_BIG_ZONE_FREE() \ 318 | atomic_flag_clear(&_root->big_zone_free_flag); 319 | 320 | #define LOCK_BIG_ZONE_USED() \ 321 | do { \ 322 | } while(atomic_flag_test_and_set(&_root->big_zone_used_flag)); 323 | 324 | #define UNLOCK_BIG_ZONE_USED() \ 325 | atomic_flag_clear(&_root->big_zone_used_flag); 326 | 327 | #else 328 | extern pthread_mutex_t root_busy_mutex; 329 | #define LOCK_ROOT() \ 330 | pthread_mutex_lock(&root_busy_mutex); 331 | 332 | #define UNLOCK_ROOT() \ 333 | pthread_mutex_unlock(&root_busy_mutex); 334 | 335 | #define LOCK_BIG_ZONE_FREE() \ 336 | pthread_mutex_lock(&_root->big_zone_free_mutex); 337 | 338 | #define UNLOCK_BIG_ZONE_FREE() \ 339 | pthread_mutex_unlock(&_root->big_zone_free_mutex); 340 | 341 | #define LOCK_BIG_ZONE_USED() \ 342 | pthread_mutex_lock(&_root->big_zone_used_mutex); 343 | 344 | #define UNLOCK_BIG_ZONE_USED() \ 345 | pthread_mutex_unlock(&_root->big_zone_used_mutex); 346 | 347 | #endif 348 | #else 349 | #define LOCK_ROOT() 350 | #define UNLOCK_ROOT() 351 | #define LOCK_BIG_ZONE() 352 | #define UNLOCK_BIG_ZONE() 353 | #define LOCK_BIG_ZONE_FREE() 354 | #define UNLOCK_BIG_ZONE_FREE() 355 | #define LOCK_BIG_ZONE_USED() 356 | #define UNLOCK_BIG_ZONE_USED() 357 | #endif 358 | 359 | /* The global root */ 360 | extern iso_alloc_root *_root; 361 | 362 | INTERNAL_HIDDEN INLINE void check_big_canary(iso_alloc_big_zone_t *big); 363 | INTERNAL_HIDDEN INLINE void check_canary(iso_alloc_zone_t *zone, const void *p); 364 | INTERNAL_HIDDEN INLINE void iso_clear_user_chunk(uint8_t *p, size_t size); 365 | INTERNAL_HIDDEN INLINE void insert_free_bit_slot(iso_alloc_zone_t *zone, int64_t bit_slot); 366 | INTERNAL_HIDDEN INLINE void write_canary(iso_alloc_zone_t *zone, void *p); 367 | INTERNAL_HIDDEN INLINE void populate_zone_cache(iso_alloc_zone_t *zone); 368 | INTERNAL_HIDDEN INLINE void flush_chunk_quarantine(void); 369 | INTERNAL_HIDDEN INLINE void clear_zone_cache(void); 370 | INTERNAL_HIDDEN iso_alloc_big_zone_t *iso_find_big_zone(void *p, bool remove); 371 | INTERNAL_HIDDEN iso_alloc_zone_t *is_zone_usable(iso_alloc_zone_t *zone, size_t size); 372 | INTERNAL_HIDDEN iso_alloc_zone_t *find_suitable_zone(size_t size); 373 | INTERNAL_HIDDEN iso_alloc_zone_t *iso_new_zone(size_t size, bool internal); 374 | INTERNAL_HIDDEN iso_alloc_zone_t *_iso_new_zone(size_t size, bool internal, int32_t index); 375 | INTERNAL_HIDDEN iso_alloc_zone_t *iso_find_zone_bitmap_range(const void *p); 376 | INTERNAL_HIDDEN iso_alloc_zone_t *iso_find_zone_range(void *p); 377 | INTERNAL_HIDDEN iso_alloc_zone_t *search_chunk_lookup_table(const void *p); 378 | INTERNAL_HIDDEN bit_slot_t iso_scan_zone_free_slot_slow(iso_alloc_zone_t *zone); 379 | INTERNAL_HIDDEN bit_slot_t iso_scan_zone_free_slot(iso_alloc_zone_t *zone); 380 | INTERNAL_HIDDEN bit_slot_t get_next_free_bit_slot(iso_alloc_zone_t *zone); 381 | INTERNAL_HIDDEN iso_alloc_root *iso_alloc_new_root(void); 382 | INTERNAL_HIDDEN bool is_pow2(uint64_t sz); 383 | INTERNAL_HIDDEN bool _is_zone_retired(iso_alloc_zone_t *zone); 384 | INTERNAL_HIDDEN bool _refresh_zone_mem_tags(iso_alloc_zone_t *zone); 385 | INTERNAL_HIDDEN iso_alloc_zone_t *_iso_free_internal_unlocked(void *p, bool permanent, iso_alloc_zone_t *zone); 386 | INTERNAL_HIDDEN void fill_free_bit_slots(iso_alloc_zone_t *zone); 387 | INTERNAL_HIDDEN void flush_caches(void); 388 | INTERNAL_HIDDEN void iso_free_chunk_from_zone(iso_alloc_zone_t *zone, void *p, bool permanent); 389 | INTERNAL_HIDDEN void create_canary_chunks(iso_alloc_zone_t *zone); 390 | INTERNAL_HIDDEN void iso_alloc_initialize_global_root(void); 391 | INTERNAL_HIDDEN void _iso_alloc_destroy_zone_unlocked(iso_alloc_zone_t *zone, bool flush_caches, bool replace); 392 | INTERNAL_HIDDEN void _iso_alloc_destroy_zone(iso_alloc_zone_t *zone); 393 | INTERNAL_HIDDEN void _verify_zone(iso_alloc_zone_t *zone); 394 | INTERNAL_HIDDEN void _verify_all_zones(void); 395 | INTERNAL_HIDDEN void verify_zone(iso_alloc_zone_t *zone); 396 | INTERNAL_HIDDEN void verify_all_zones(void); 397 | INTERNAL_HIDDEN void _iso_free(void *p, bool permanent); 398 | INTERNAL_HIDDEN void _iso_free_internal(void *p, bool permanent); 399 | INTERNAL_HIDDEN void _iso_free_size(void *p, size_t size); 400 | INTERNAL_HIDDEN void _iso_free_from_zone(void *p, iso_alloc_zone_t *zone, bool permanent); 401 | INTERNAL_HIDDEN void iso_free_big_zone(iso_alloc_big_zone_t *big_zone, bool permanent); 402 | INTERNAL_HIDDEN void _iso_alloc_protect_root(void); 403 | INTERNAL_HIDDEN void _iso_free_quarantine(void *p); 404 | INTERNAL_HIDDEN void _iso_alloc_unprotect_root(void); 405 | INTERNAL_HIDDEN INLINE void dont_need_pages(void *p, size_t size); 406 | INTERNAL_HIDDEN void *_tag_ptr(void *p, iso_alloc_zone_t *zone); 407 | INTERNAL_HIDDEN void *_untag_ptr(void *p, iso_alloc_zone_t *zone); 408 | INTERNAL_HIDDEN void _free_big_zone_list(iso_alloc_big_zone_t *head); 409 | INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_big_alloc(size_t size); 410 | INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc(iso_alloc_zone_t *zone, size_t size); 411 | INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_alloc_bitslot_from_zone(bit_slot_t bitslot, iso_alloc_zone_t *zone); 412 | INTERNAL_HIDDEN ASSUME_ALIGNED void *_iso_calloc(size_t nmemb, size_t size); 413 | INTERNAL_HIDDEN void *_iso_alloc_ptr_search(void *n, bool poison); 414 | INTERNAL_HIDDEN INLINE uint64_t us_rand_uint64(uint64_t *seed); 415 | INTERNAL_HIDDEN INLINE uint64_t rand_uint64(void); 416 | INTERNAL_HIDDEN uint8_t _iso_alloc_get_mem_tag(void *p, iso_alloc_zone_t *zone); 417 | INTERNAL_HIDDEN void _iso_alloc_verify_tag(void *p, iso_alloc_zone_t *zone); 418 | INTERNAL_HIDDEN size_t _iso_alloc_print_stats(void); 419 | INTERNAL_HIDDEN size_t _iso_chunk_size(void *p); 420 | INTERNAL_HIDDEN int64_t check_canary_no_abort(iso_alloc_zone_t *zone, const void *p); 421 | INTERNAL_HIDDEN void _iso_alloc_initialize(void); 422 | INTERNAL_HIDDEN void _iso_alloc_destroy(void); 423 | 424 | #if ARM_MTE 425 | INLINE void *iso_mte_untag_ptr(void *p); 426 | INLINE uint8_t iso_mte_extract_tag(void *p); 427 | INLINE bool iso_is_mte_supported(void); 428 | INLINE void *iso_mte_set_tag_range(void *p, size_t size); 429 | INLINE void *iso_mte_create_tag(void *p, uint64_t exclusion_mask); 430 | INLINE void iso_mte_set_tag(void *p); 431 | INLINE void *iso_mte_get_tag(void *p); 432 | #endif 433 | 434 | #if SIGNAL_HANDLER 435 | INTERNAL_HIDDEN void handle_signal(int sig, siginfo_t *si, void *ctx); 436 | #endif 437 | 438 | #if EXPERIMENTAL 439 | INTERNAL_HIDDEN void _iso_alloc_search_stack(uint8_t *stack_start); 440 | #endif 441 | 442 | #if UNIT_TESTING 443 | EXTERNAL_API iso_alloc_root *_get_root(void); 444 | #endif 445 | -------------------------------------------------------------------------------- /src/iso_alloc_profiler.c: -------------------------------------------------------------------------------- 1 | /* iso_alloc_profiler.c - A secure memory allocator 2 | * Copyright 2023 - chris.rohlf@gmail.com */ 3 | 4 | #include "iso_alloc_internal.h" 5 | 6 | #if MEMCPY_SANITY || MEMSET_SANITY 7 | #include "iso_alloc_sanity.h" 8 | #endif 9 | 10 | #if HEAP_PROFILER 11 | #include "iso_alloc_profiler.h" 12 | #endif 13 | 14 | #include 15 | 16 | INTERNAL_HIDDEN uint64_t _iso_alloc_detect_leaks_in_zone(iso_alloc_zone_t *zone) { 17 | LOCK_ROOT(); 18 | uint64_t leaks = _iso_alloc_zone_leak_detector(zone, false); 19 | UNLOCK_ROOT(); 20 | return leaks; 21 | } 22 | 23 | INTERNAL_HIDDEN uint64_t _iso_alloc_mem_usage(void) { 24 | LOCK_ROOT(); 25 | uint64_t mem_usage = __iso_alloc_mem_usage(); 26 | mem_usage += _iso_alloc_big_zone_mem_usage(); 27 | UNLOCK_ROOT(); 28 | return mem_usage; 29 | } 30 | 31 | INTERNAL_HIDDEN uint64_t _iso_alloc_big_zone_mem_usage(void) { 32 | LOCK_BIG_ZONE_USED(); 33 | uint64_t mem_usage = __iso_alloc_big_zone_mem_usage(_root->big_zone_used); 34 | UNLOCK_BIG_ZONE_USED(); 35 | 36 | LOCK_BIG_ZONE_FREE(); 37 | mem_usage += __iso_alloc_big_zone_mem_usage(_root->big_zone_free); 38 | UNLOCK_BIG_ZONE_FREE(); 39 | 40 | return mem_usage; 41 | } 42 | 43 | INTERNAL_HIDDEN uint64_t _iso_alloc_zone_mem_usage(iso_alloc_zone_t *zone) { 44 | LOCK_ROOT(); 45 | uint64_t zone_mem_usage = __iso_alloc_zone_mem_usage(zone); 46 | UNLOCK_ROOT(); 47 | return zone_mem_usage; 48 | } 49 | 50 | #if DEBUG && MEM_USAGE 51 | INTERNAL_HIDDEN size_t _iso_alloc_print_stats(void) { 52 | struct rusage _rusage = {0}; 53 | 54 | int32_t ret = getrusage(RUSAGE_SELF, &_rusage); 55 | 56 | if(ret == ERR) { 57 | return ERR; 58 | } 59 | 60 | #if __linux__ || __FreeBSD__ 61 | LOG("RSS: %d (mb)", (_rusage.ru_maxrss / KILOBYTE_SIZE)); 62 | #elif __APPLE__ 63 | LOG("RSS: %d (mb)", (_rusage.ru_maxrss / MEGABYTE_SIZE)); 64 | #endif 65 | LOG("Soft Page Faults: %d", _rusage.ru_minflt); 66 | LOG("Hard Page Faults: %d", _rusage.ru_majflt); 67 | return OK; 68 | } 69 | #endif 70 | 71 | INTERNAL_HIDDEN uint64_t _iso_alloc_detect_leaks(void) { 72 | uint64_t total_leaks = 0; 73 | uint64_t big_leaks = 0; 74 | 75 | LOCK_ROOT(); 76 | 77 | for(uint16_t i = 0; i < _root->zones_used; i++) { 78 | iso_alloc_zone_t *zone = &_root->zones[i]; 79 | total_leaks += _iso_alloc_zone_leak_detector(zone, false); 80 | } 81 | 82 | UNLOCK_ROOT(); 83 | LOCK_BIG_ZONE_USED(); 84 | 85 | iso_alloc_big_zone_t *big = _root->big_zone_used; 86 | 87 | if(big != NULL) { 88 | big = UNMASK_BIG_ZONE_NEXT(_root->big_zone_used); 89 | } 90 | 91 | /* All allocations on the used list are 'leaked' */ 92 | while(big != NULL) { 93 | big_leaks += big->size; 94 | LOG("Big zone leaked %lu bytes", big->size); 95 | 96 | if(big->next != NULL) { 97 | big = UNMASK_BIG_ZONE_NEXT(big->next); 98 | } else { 99 | big = NULL; 100 | } 101 | } 102 | 103 | UNLOCK_BIG_ZONE_USED(); 104 | 105 | LOG("Total leaked in big zones: bytes (%lu) megabytes (%lu)", big_leaks, (big_leaks / MEGABYTE_SIZE)); 106 | return total_leaks + big_leaks; 107 | } 108 | 109 | /* This is the built-in leak detector. It works by scanning 110 | * the bitmap for every allocated zone and looking for 111 | * uncleared bits. This does not search for references from 112 | * a root like a GC, so if you purposefully did not free a 113 | * chunk then expect it to show up as leaked! */ 114 | INTERNAL_HIDDEN uint64_t _iso_alloc_zone_leak_detector(iso_alloc_zone_t *zone, bool profile) { 115 | uint32_t in_use = 0; 116 | 117 | #if LEAK_DETECTOR || HEAP_PROFILER 118 | if(zone == NULL) { 119 | return 0; 120 | } 121 | 122 | UNMASK_ZONE_PTRS(zone); 123 | 124 | bitmap_index_t *bm = (bitmap_index_t *) zone->bitmap_start; 125 | uint32_t was_used = 0; 126 | int64_t bms = zone->bitmap_size / sizeof(bitmap_index_t); 127 | 128 | for(bitmap_index_t i = 0; i < bms; i++) { 129 | for(int j = 0; j < BITS_PER_QWORD; j += BITS_PER_CHUNK) { 130 | 131 | if(bm[i] == 0) { 132 | continue; 133 | } 134 | 135 | int64_t bit = GET_BIT(bm[i], j); 136 | int64_t bit_two = GET_BIT(bm[i], (j + 1)); 137 | 138 | /* Chunk was used but is now free */ 139 | if(bit == 0 && bit_two == 1) { 140 | was_used++; 141 | } 142 | 143 | if(bit == 1) { 144 | /* Theres no difference between a leaked and previously 145 | * used chunk (11) and a canary chunk (11). So in order 146 | * to accurately report on leaks we need to verify the 147 | * canary value. If it doesn't validate then we assume 148 | * its a true leak and increment the in_use counter */ 149 | bit_slot_t bit_slot = (i * BITS_PER_QWORD) + j; 150 | const void *leak = (zone->user_pages_start + ((bit_slot >> 1) * zone->chunk_size)); 151 | 152 | if(bit_two == 1 && (check_canary_no_abort(zone, leak) != ERR)) { 153 | continue; 154 | } else { 155 | in_use++; 156 | 157 | if(profile == false) { 158 | LOG("Leaked chunk (%d) in zone[%d] of %d bytes detected at 0x%p (bit position = %d)", in_use, zone->index, zone->chunk_size, leak, bit_slot); 159 | } 160 | } 161 | } 162 | } 163 | } 164 | 165 | if(profile == false) { 166 | LOG("Zone[%d] Total number of %d byte chunks(%d) used and free'd (%d) (%d percent), in use = %d", zone->index, zone->chunk_size, zone->chunk_count, 167 | was_used, (int32_t) ((float) was_used / zone->chunk_count) * 100, in_use); 168 | } 169 | 170 | MASK_ZONE_PTRS(zone); 171 | #endif 172 | 173 | #if HEAP_PROFILER 174 | /* When profiling this zone we want to capture 175 | * the total number of allocations both currently 176 | * in use and previously used by this zone */ 177 | if(profile == true) { 178 | uint64_t total = (in_use + was_used); 179 | return (uint64_t) ((float) (total / zone->chunk_count) * 100.0); 180 | } 181 | #endif 182 | return in_use; 183 | } 184 | 185 | INTERNAL_HIDDEN uint64_t __iso_alloc_zone_mem_usage(iso_alloc_zone_t *zone) { 186 | uint64_t mem_usage = 0; 187 | mem_usage += zone->bitmap_size; 188 | mem_usage += ZONE_USER_SIZE; 189 | LOG("Zone[%d] holds %d byte chunks. Total bytes (%lu), megabytes (%lu)", zone->index, zone->chunk_size, 190 | mem_usage, (mem_usage / MEGABYTE_SIZE)); 191 | return (mem_usage / MEGABYTE_SIZE); 192 | } 193 | 194 | INTERNAL_HIDDEN uint64_t __iso_alloc_mem_usage(void) { 195 | uint64_t mem_usage = 0; 196 | 197 | for(uint16_t i = 0; i < _root->zones_used; i++) { 198 | iso_alloc_zone_t *zone = &_root->zones[i]; 199 | mem_usage += zone->bitmap_size; 200 | mem_usage += ZONE_USER_SIZE; 201 | LOG("Zone[%d] holds %d byte chunks, megabytes (%d) next zone = %d, total allocations = %d, in use = %d", zone->index, zone->chunk_size, 202 | (ZONE_USER_SIZE / MEGABYTE_SIZE), zone->next_sz_index, zone->alloc_count, zone->af_count); 203 | } 204 | 205 | return (mem_usage / MEGABYTE_SIZE); 206 | } 207 | 208 | INTERNAL_HIDDEN uint64_t __iso_alloc_big_zone_mem_usage(iso_alloc_big_zone_t *head) { 209 | uint64_t mem_usage = 0; 210 | iso_alloc_big_zone_t *big = head; 211 | 212 | if(big != NULL) { 213 | big = UNMASK_BIG_ZONE_NEXT(head); 214 | } 215 | 216 | while(big != NULL) { 217 | size_t total_size = big->size + (g_page_size * BIG_ZONE_META_DATA_PAGE_COUNT); 218 | /* Meta data has 2 guard pages, user data has 2 guard pages */ 219 | #if BIG_ZONE_META_DATA_GUARD 220 | total_size += (g_page_size * 4); 221 | #else 222 | total_size += (g_page_size * 2); 223 | #endif 224 | LOG("Big Zone Total bytes mapped (%lu), megabytes (%lu)", big->size, (big->size / MEGABYTE_SIZE)); 225 | mem_usage += total_size; 226 | if(big->next != NULL) { 227 | big = UNMASK_BIG_ZONE_NEXT(big->next); 228 | } else { 229 | big = NULL; 230 | } 231 | } 232 | 233 | if(mem_usage != 0) { 234 | LOG("Total megabytes allocated (%lu)", (mem_usage / MEGABYTE_SIZE)); 235 | return (mem_usage / MEGABYTE_SIZE); 236 | } else { 237 | return 0; 238 | } 239 | } 240 | 241 | #if HEAP_PROFILER 242 | 243 | /* Returns a documented data structure that can 244 | * be used to interpret allocation patterns */ 245 | INTERNAL_HIDDEN size_t _iso_get_alloc_traces(iso_alloc_traces_t *traces_out) { 246 | LOCK_ROOT(); 247 | __iso_memcpy(traces_out, _alloc_bts, sizeof(iso_alloc_traces_t)); 248 | size_t sz = _alloc_bts_count; 249 | UNLOCK_ROOT(); 250 | return sz; 251 | } 252 | 253 | INTERNAL_HIDDEN size_t _iso_get_free_traces(iso_free_traces_t *traces_out) { 254 | LOCK_ROOT(); 255 | __iso_memcpy(traces_out, _free_bts, sizeof(iso_free_traces_t)); 256 | size_t sz = _free_bts_count; 257 | UNLOCK_ROOT(); 258 | return sz; 259 | } 260 | 261 | INTERNAL_HIDDEN void _iso_alloc_reset_traces(void) { 262 | LOCK_ROOT(); 263 | __iso_memset(_alloc_bts, 0x0, sizeof(_alloc_bts)); 264 | __iso_memset(_free_bts, 0x0, sizeof(_free_bts)); 265 | _alloc_bts_count = 0; 266 | _free_bts_count = 0; 267 | UNLOCK_ROOT(); 268 | } 269 | 270 | #define UPDATE_BT_HASH(frame, hash) \ 271 | if(__builtin_frame_address(frame)) { \ 272 | hash ^= (uint64_t) __builtin_return_address(frame); \ 273 | } else { \ 274 | return hash; \ 275 | } 276 | 277 | INTERNAL_HIDDEN INLINE uint64_t _get_backtrace_hash(void) { 278 | uint64_t hash = 0; 279 | UPDATE_BT_HASH(1, hash); 280 | UPDATE_BT_HASH(2, hash); 281 | UPDATE_BT_HASH(3, hash); 282 | UPDATE_BT_HASH(4, hash); 283 | UPDATE_BT_HASH(5, hash); 284 | UPDATE_BT_HASH(6, hash); 285 | UPDATE_BT_HASH(7, hash); 286 | UPDATE_BT_HASH(8, hash); 287 | return hash; 288 | } 289 | 290 | #define SAVE_BACKTRACE_FRAME(frame, bts) \ 291 | if(__builtin_frame_address(frame)) { \ 292 | uint64_t r = (uint64_t) __builtin_return_address(frame); \ 293 | if(r > 0x1000) { \ 294 | bts->callers[frame - 1] = (uint64_t) __builtin_return_address(frame); \ 295 | } \ 296 | } else { \ 297 | return; \ 298 | } 299 | 300 | INTERNAL_HIDDEN INLINE void _save_alloc_backtrace(iso_alloc_traces_t *abts) { 301 | SAVE_BACKTRACE_FRAME(1, abts); 302 | SAVE_BACKTRACE_FRAME(2, abts); 303 | SAVE_BACKTRACE_FRAME(3, abts); 304 | SAVE_BACKTRACE_FRAME(4, abts); 305 | SAVE_BACKTRACE_FRAME(5, abts); 306 | SAVE_BACKTRACE_FRAME(6, abts); 307 | SAVE_BACKTRACE_FRAME(7, abts); 308 | SAVE_BACKTRACE_FRAME(8, abts); 309 | return; 310 | } 311 | 312 | INTERNAL_HIDDEN INLINE void _save_free_backtrace(iso_free_traces_t *fbts) { 313 | SAVE_BACKTRACE_FRAME(1, fbts); 314 | SAVE_BACKTRACE_FRAME(2, fbts); 315 | SAVE_BACKTRACE_FRAME(3, fbts); 316 | SAVE_BACKTRACE_FRAME(4, fbts); 317 | SAVE_BACKTRACE_FRAME(5, fbts); 318 | SAVE_BACKTRACE_FRAME(6, fbts); 319 | SAVE_BACKTRACE_FRAME(7, fbts); 320 | SAVE_BACKTRACE_FRAME(8, fbts); 321 | return; 322 | } 323 | 324 | INTERNAL_HIDDEN void _iso_output_profile() { 325 | _iso_alloc_printf(profiler_fd, "allocated=%d\n", _alloc_count); 326 | _iso_alloc_printf(profiler_fd, "alloc_sampled=%d\n", _alloc_sampled_count); 327 | _iso_alloc_printf(profiler_fd, "freed=%d\n", _free_count); 328 | _iso_alloc_printf(profiler_fd, "free_sampled=%d\n", _free_sampled_count); 329 | 330 | for(uint16_t i = 0; i < _root->zones_used; i++) { 331 | iso_alloc_zone_t *zone = &_root->zones[i]; 332 | _zone_profiler_map[zone->chunk_size].total++; 333 | } 334 | 335 | for(size_t i = 0; i < _alloc_bts_count; i++) { 336 | iso_alloc_traces_t *abts = &_alloc_bts[i]; 337 | _iso_alloc_printf(profiler_fd, "alloc_backtrace=%d,backtrace_hash=0x%x,calls=%d,lower_bound_size=%d,upper_bound_size=%d\n", 338 | i, abts->backtrace_hash, abts->call_count, abts->lower_bound_size, abts->upper_bound_size); 339 | 340 | for(int j = 0; j < BACKTRACE_DEPTH; j++) { 341 | if(abts->callers[j] < 0x1000) { 342 | continue; 343 | } 344 | 345 | Dl_info dl; 346 | dladdr((void *) abts->callers[j], &dl); 347 | 348 | if(dl.dli_sname != NULL) { 349 | _iso_alloc_printf(profiler_fd, "\t0x%x -> %s %s\n", abts->callers[j], dl.dli_sname, dl.dli_fname); 350 | } else { 351 | _iso_alloc_printf(profiler_fd, "\t0x%x -> [?]\n", abts->callers[j]); 352 | } 353 | } 354 | } 355 | 356 | for(size_t i = 0; i < _free_bts_count; i++) { 357 | iso_free_traces_t *fbts = &_free_bts[i]; 358 | _iso_alloc_printf(profiler_fd, "free_backtrace=%d,backtrace_hash=0x%x,calls=%d\n", 359 | i, fbts->backtrace_hash, fbts->call_count); 360 | 361 | for(int j = 0; j < BACKTRACE_DEPTH; j++) { 362 | if(fbts->callers[j] < 0x1000) { 363 | continue; 364 | } 365 | 366 | Dl_info dl; 367 | dladdr((void *) fbts->callers[j], &dl); 368 | 369 | if(dl.dli_sname != NULL) { 370 | _iso_alloc_printf(profiler_fd, "\t0x%x -> %s %s\n", fbts->callers[j], dl.dli_sname, dl.dli_fname); 371 | } else { 372 | _iso_alloc_printf(profiler_fd, "\t0x%x -> [?]\n", fbts->callers[j]); 373 | } 374 | } 375 | } 376 | 377 | for(int i = 0; i < SMALL_SIZE_MAX; i++) { 378 | if(_zone_profiler_map[i].count != 0) { 379 | _iso_alloc_printf(profiler_fd, "%d,%d,%d\n", i, _zone_profiler_map[i].total, _zone_profiler_map[i].count); 380 | } 381 | } 382 | 383 | if(profiler_fd != ERR) { 384 | close(profiler_fd); 385 | profiler_fd = ERR; 386 | } 387 | } 388 | 389 | INTERNAL_HIDDEN void _iso_alloc_profile(size_t size) { 390 | _alloc_count++; 391 | 392 | /* Don't run the profiler on every allocation */ 393 | if(LIKELY((us_rand_uint64(&_root->seed) % PROFILER_ODDS) != 1)) { 394 | return; 395 | } 396 | 397 | _alloc_sampled_count++; 398 | 399 | for(uint16_t i = 0; i < _root->zones_used; i++) { 400 | uint32_t used = 0; 401 | iso_alloc_zone_t *zone = &_root->zones[i]; 402 | 403 | /* For the purposes of the profiler we don't care about 404 | * the differences between canary and leaked chunks. 405 | * So lets just use the full count */ 406 | if(zone->is_full) { 407 | used = zone->chunk_count; 408 | } else { 409 | used = _iso_alloc_zone_leak_detector(zone, true); 410 | } 411 | 412 | used = (int32_t) ((float) (used / zone->chunk_count) * 100.0); 413 | 414 | if(used > CHUNK_USAGE_THRESHOLD) { 415 | _zone_profiler_map[zone->chunk_size].count++; 416 | } 417 | } 418 | 419 | if(_alloc_bts_count < BACKTRACE_DEPTH_SZ) { 420 | iso_alloc_traces_t *abts = NULL; 421 | uint16_t hash = (_get_backtrace_hash() & HG_SIZE); 422 | 423 | /* Take the backtrace hash and determine if its already been seen */ 424 | for(size_t i = 0; i < _alloc_bts_count; i++) { 425 | if(_alloc_bts[i].backtrace_hash == hash) { 426 | abts = &_alloc_bts[i]; 427 | 428 | if(abts->lower_bound_size == 0 || size < abts->lower_bound_size) { 429 | abts->lower_bound_size = size; 430 | } 431 | 432 | if(abts->upper_bound_size == 0 || size > abts->upper_bound_size) { 433 | abts->upper_bound_size = size; 434 | } 435 | 436 | abts->call_count++; 437 | break; 438 | } 439 | } 440 | 441 | /* We haven't seen this backtrace before */ 442 | if(abts == NULL) { 443 | abts = &_alloc_bts[_alloc_bts_count]; 444 | abts->backtrace_hash = hash; 445 | 446 | _save_alloc_backtrace(abts); 447 | _alloc_bts_count++; 448 | } 449 | } 450 | } 451 | 452 | INTERNAL_HIDDEN void _iso_free_profile(void) { 453 | _free_count++; 454 | 455 | /* Don't run the profiler on every allocation */ 456 | if(LIKELY((us_rand_uint64(&_root->seed) % PROFILER_ODDS) != 1)) { 457 | return; 458 | } 459 | 460 | _free_sampled_count++; 461 | 462 | if(_free_bts_count < BACKTRACE_DEPTH_SZ) { 463 | iso_free_traces_t *fbts = NULL; 464 | uint16_t hash = (_get_backtrace_hash() & HG_SIZE); 465 | 466 | /* Take the backtrace hash and determine if its already been seen */ 467 | for(size_t i = 0; i < _free_bts_count; i++) { 468 | if(_free_bts[i].backtrace_hash == hash) { 469 | fbts = &_free_bts[i]; 470 | fbts->call_count++; 471 | break; 472 | } 473 | } 474 | 475 | /* We haven't seen this backtrace before */ 476 | if(fbts == NULL) { 477 | fbts = &_free_bts[_free_bts_count]; 478 | fbts->backtrace_hash = hash; 479 | 480 | _save_free_backtrace(fbts); 481 | _free_bts_count++; 482 | } 483 | } 484 | } 485 | 486 | INTERNAL_HIDDEN void _initialize_profiler(void) { 487 | /* We don't need thread safety for this file descriptor 488 | * as long as we guarantee to never use it if the root 489 | * is not locked */ 490 | if(getenv(PROFILER_ENV_STR) != NULL) { 491 | profiler_fd = open(getenv(PROFILER_ENV_STR), O_RDWR | O_CREAT | O_SYNC, 0666); 492 | } else { 493 | profiler_fd = open(PROFILER_FILE_PATH, O_RDWR | O_CREAT | O_SYNC, 0666); 494 | } 495 | 496 | if(profiler_fd == ERR) { 497 | LOG_AND_ABORT("Cannot open file descriptor for %s", PROFILER_FILE_PATH); 498 | } 499 | } 500 | #endif 501 | --------------------------------------------------------------------------------