├── test ├── __init__.py ├── uninitialized_free.c ├── impossibly_large_malloc.c ├── uninitialized_malloc_usable_size.c ├── test_util.h ├── uninitialized_realloc.c ├── write_zero_size.c ├── unaligned_free_small.c ├── unaligned_free_large.c ├── double_free_small.c ├── double_free_large.c ├── read_zero_size.c ├── unaligned_malloc_usable_size_small.c ├── invalid_free_small_region.c ├── write_after_free_large.c ├── invalid_free_small_region_far.c ├── delete_type_size_mismatch.cc ├── invalid_malloc_usable_size_small_quarantine.c ├── invalid_malloc_usable_size_small.c ├── uninitialized_read_small.c ├── malloc_object_size.c ├── malloc_object_size_offset.c ├── overflow_small_1_byte.c ├── uninitialized_read_large.c ├── invalid_malloc_object_size_small_quarantine.c ├── overflow_large_1_byte.c ├── invalid_malloc_object_size_small.c ├── overflow_large_8_byte.c ├── double_free_small_delayed.c ├── double_free_large_delayed.c ├── invalid_free_protected.c ├── write_after_free_large_reuse.c ├── invalid_free_unprotected.c ├── write_after_free_small.c ├── overflow_small_8_byte.c ├── large_array_growth.c ├── string_overflow.c ├── write_after_free_small_reuse.c ├── read_after_free_small.c ├── read_after_free_large.c ├── realloc_init.c ├── malloc_info.c ├── .gitignore ├── mallinfo2.c ├── mallinfo.c ├── offset.c ├── Makefile └── test_smc.py ├── .gitignore ├── debian ├── source │ ├── format │ ├── options │ └── lintian-overrides ├── hardened-malloc.maintscript ├── hardened_malloc.conf ├── ci_test ├── hardened-malloc-light-enable.install ├── watch ├── hardened-malloc.install ├── hardened-malloc.links ├── rules ├── make-helper-overrides.bsh ├── copyright ├── hardened-malloc-light-enable.prerm ├── hardened-malloc.postinst ├── hardened-malloc-light-enable.postinst ├── control └── changelog ├── .github ├── dependabot.yml └── workflows │ └── build-and-test.yml ├── preload.sh ├── chacha.h ├── .clang-tidy ├── mutex.h ├── random.h ├── memory.h ├── config ├── default.mk └── light.mk ├── pages.h ├── util.c ├── LICENSE ├── util.h ├── KERNEL_FEATURE_WISHLIST.md ├── Android.bp ├── calculate_waste.py ├── README_generic.md ├── pages.c ├── CREDITS ├── memory.c ├── random.c ├── include └── h_malloc.h ├── new.cc ├── chacha.c ├── Makefile └── README.md /test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | out/ 2 | out-light/ 3 | -------------------------------------------------------------------------------- /debian/source/format: -------------------------------------------------------------------------------- 1 | 3.0 (quilt) 2 | -------------------------------------------------------------------------------- /debian/source/options: -------------------------------------------------------------------------------- 1 | extend-diff-ignore = "^\.travis\.yml$" 2 | -------------------------------------------------------------------------------- /debian/source/lintian-overrides: -------------------------------------------------------------------------------- 1 | ## https://phabricator.whonix.org/T277 2 | debian-watch-does-not-check-openpgp-signature 3 | -------------------------------------------------------------------------------- /debian/hardened-malloc.maintscript: -------------------------------------------------------------------------------- 1 | rm_conffile /etc/X11/Xsession.d/50hardened-malloc 2 | rm_conffile /etc/sudoers.d/hardened-malloc-kicksecure 3 | -------------------------------------------------------------------------------- /debian/hardened_malloc.conf: -------------------------------------------------------------------------------- 1 | ## https://github.com/GrapheneOS/hardened_malloc#traditional-linux-based-operating-systems 2 | vm.max_map_count = 1048576 3 | -------------------------------------------------------------------------------- /test/uninitialized_free.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | free((void *)1); 7 | return 0; 8 | } 9 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | target-branch: main 8 | -------------------------------------------------------------------------------- /test/impossibly_large_malloc.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(-8); 7 | return !(p == NULL); 8 | } 9 | -------------------------------------------------------------------------------- /test/uninitialized_malloc_usable_size.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | malloc_usable_size((void *)1); 7 | return 0; 8 | } 9 | -------------------------------------------------------------------------------- /preload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 4 | [[ $LD_PRELOAD ]] && LD_PRELOAD+=" " 5 | export LD_PRELOAD+="$dir/libhardened_malloc.so" 6 | exec "$@" 7 | -------------------------------------------------------------------------------- /test/test_util.h: -------------------------------------------------------------------------------- 1 | #ifndef TEST_UTIL_H 2 | #define TEST_UTIL_H 3 | 4 | #ifdef __clang__ 5 | #define OPTNONE __attribute__((optnone)) 6 | #else 7 | #define OPTNONE __attribute__((optimize(0))) 8 | #endif 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /debian/ci_test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Copyright (C) 2012 - 2023 ENCRYPTED SUPPORT LP 4 | ## See the file COPYING for copying conditions. 5 | 6 | set -x 7 | set -e 8 | 9 | dpkg-buildpackage -b --no-sign 10 | -------------------------------------------------------------------------------- /test/uninitialized_realloc.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | void *p = realloc((void *)1, 16); 7 | if (!p) { 8 | return 1; 9 | } 10 | return 0; 11 | } 12 | -------------------------------------------------------------------------------- /test/write_zero_size.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(0); 7 | if (!p) { 8 | return 1; 9 | } 10 | *p = 5; 11 | return 0; 12 | } 13 | -------------------------------------------------------------------------------- /test/unaligned_free_small.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(16); 7 | if (!p) { 8 | return 1; 9 | } 10 | free(p + 1); 11 | return 0; 12 | } 13 | -------------------------------------------------------------------------------- /test/unaligned_free_large.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(256 * 1024); 7 | if (!p) { 8 | return 1; 9 | } 10 | free(p + 1); 11 | return 0; 12 | } 13 | -------------------------------------------------------------------------------- /test/double_free_small.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | void *p = malloc(16); 7 | if (!p) { 8 | return 1; 9 | } 10 | free(p); 11 | free(p); 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /debian/hardened-malloc-light-enable.install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/dh-exec 2 | 3 | ## Copyright (C) 2020 - 2023 ENCRYPTED SUPPORT LP 4 | ## See the file COPYING for copying conditions. 5 | 6 | debian/hardened_malloc.conf => /etc/sysctl.d/hardened_malloc.conf 7 | -------------------------------------------------------------------------------- /test/double_free_large.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | void *p = malloc(256 * 1024); 7 | if (!p) { 8 | return 1; 9 | } 10 | free(p); 11 | free(p); 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /test/read_zero_size.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | 6 | OPTNONE int main(void) { 7 | char *p = malloc(0); 8 | if (!p) { 9 | return 1; 10 | } 11 | printf("%c\n", *p); 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /test/unaligned_malloc_usable_size_small.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(16); 7 | if (!p) { 8 | return 1; 9 | } 10 | malloc_usable_size(p + 1); 11 | return 0; 12 | } 13 | -------------------------------------------------------------------------------- /test/invalid_free_small_region.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(16); 7 | if (!p) { 8 | return 1; 9 | } 10 | char *q = p + 4096 * 4; 11 | free(q); 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /test/write_after_free_large.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(256 * 1024); 7 | if (!p) { 8 | return 1; 9 | } 10 | free(p); 11 | p[64 * 1024 + 1] = 'a'; 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /test/invalid_free_small_region_far.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(16); 7 | if (!p) { 8 | return 1; 9 | } 10 | char *q = p + 1024 * 1024 * 1024; 11 | free(q); 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /test/delete_type_size_mismatch.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | struct foo { 6 | uint64_t a, b, c, d; 7 | }; 8 | 9 | OPTNONE int main(void) { 10 | void *p = new char; 11 | struct foo *c = (struct foo *)p; 12 | delete c; 13 | return 0; 14 | } 15 | -------------------------------------------------------------------------------- /test/invalid_malloc_usable_size_small_quarantine.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | void *p = malloc(16); 7 | if (!p) { 8 | return 1; 9 | } 10 | free(p); 11 | malloc_usable_size(p); 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /test/invalid_malloc_usable_size_small.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(16); 7 | if (!p) { 8 | return 1; 9 | } 10 | char *q = p + 4096 * 4; 11 | malloc_usable_size(q); 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /debian/watch: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2012 - 2023 ENCRYPTED SUPPORT LP 2 | ## See the file COPYING for copying conditions. 3 | 4 | version=4 5 | opts=filenamemangle=s/.+\/v?(\d\S+)\.tar\.gz/hardened_malloc-$1\.tar\.gz/ \ 6 | https://github.com/Whonix/hardened_malloc/tags .*/v?(\d\S+)\.tar\.gz 7 | -------------------------------------------------------------------------------- /test/uninitialized_read_small.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(8); 7 | for (unsigned i = 0; i < 8; i++) { 8 | if (p[i] != 0) { 9 | return 1; 10 | } 11 | } 12 | free(p); 13 | return 0; 14 | } 15 | -------------------------------------------------------------------------------- /test/malloc_object_size.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | 6 | size_t malloc_object_size(void *ptr); 7 | 8 | OPTNONE int main(void) { 9 | char *p = malloc(16); 10 | size_t size = malloc_object_size(p); 11 | return size != (SLAB_CANARY ? 24 : 32); 12 | } 13 | -------------------------------------------------------------------------------- /test/malloc_object_size_offset.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | 6 | size_t malloc_object_size(void *ptr); 7 | 8 | OPTNONE int main(void) { 9 | char *p = malloc(16); 10 | size_t size = malloc_object_size(p + 5); 11 | return size != (SLAB_CANARY ? 19 : 27); 12 | } 13 | -------------------------------------------------------------------------------- /test/overflow_small_1_byte.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | 6 | OPTNONE int main(void) { 7 | char *p = malloc(8); 8 | if (!p) { 9 | return 1; 10 | } 11 | size_t size = malloc_usable_size(p); 12 | *(p + size) = 1; 13 | free(p); 14 | return 0; 15 | } 16 | -------------------------------------------------------------------------------- /test/uninitialized_read_large.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(256 * 1024); 7 | for (unsigned i = 0; i < 256 * 1024; i++) { 8 | if (p[i] != 0) { 9 | return 1; 10 | } 11 | } 12 | free(p); 13 | return 0; 14 | } 15 | -------------------------------------------------------------------------------- /test/invalid_malloc_object_size_small_quarantine.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | size_t malloc_object_size(void *ptr); 6 | 7 | OPTNONE int main(void) { 8 | void *p = malloc(16); 9 | if (!p) { 10 | return 1; 11 | } 12 | free(p); 13 | malloc_object_size(p); 14 | return 0; 15 | } 16 | -------------------------------------------------------------------------------- /test/overflow_large_1_byte.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | 6 | OPTNONE int main(void) { 7 | char *p = malloc(256 * 1024); 8 | if (!p) { 9 | return 1; 10 | } 11 | size_t size = malloc_usable_size(p); 12 | *(p + size) = 0; 13 | free(p); 14 | return 0; 15 | } 16 | -------------------------------------------------------------------------------- /test/invalid_malloc_object_size_small.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | size_t malloc_object_size(void *ptr); 6 | 7 | OPTNONE int main(void) { 8 | char *p = malloc(16); 9 | if (!p) { 10 | return 1; 11 | } 12 | char *q = p + 4096 * 4; 13 | malloc_object_size(q); 14 | return 0; 15 | } 16 | -------------------------------------------------------------------------------- /test/overflow_large_8_byte.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | 6 | OPTNONE int main(void) { 7 | char *p = malloc(256 * 1024); 8 | if (!p) { 9 | return 1; 10 | } 11 | size_t size = malloc_usable_size(p); 12 | *(p + size + 7) = 0; 13 | free(p); 14 | return 0; 15 | } 16 | -------------------------------------------------------------------------------- /debian/hardened-malloc.install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/dh-exec 2 | 3 | ## Copyright (C) 2019 - 2023 ENCRYPTED SUPPORT LP 4 | ## See the file COPYING for copying conditions. 5 | 6 | out/libhardened_malloc.so => /usr/lib/x86_64-linux-gnu/libhardened_malloc.so 7 | out-light/libhardened_malloc-light.so => /usr/lib/x86_64-linux-gnu/libhardened_malloc-light.so 8 | -------------------------------------------------------------------------------- /test/double_free_small_delayed.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | void *p = malloc(16); 7 | if (!p) { 8 | return 1; 9 | } 10 | void *q = malloc(16); 11 | if (!q) { 12 | return 1; 13 | } 14 | free(p); 15 | free(q); 16 | free(p); 17 | return 0; 18 | } 19 | -------------------------------------------------------------------------------- /test/double_free_large_delayed.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | void *p = malloc(256 * 1024); 7 | if (!p) { 8 | return 1; 9 | } 10 | void *q = malloc(256 * 1024); 11 | if (!q) { 12 | return 1; 13 | } 14 | free(p); 15 | free(q); 16 | free(p); 17 | return 0; 18 | } 19 | -------------------------------------------------------------------------------- /test/invalid_free_protected.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #include "test_util.h" 6 | 7 | OPTNONE int main(void) { 8 | free(malloc(16)); 9 | char *p = mmap(NULL, 4096 * 16, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 10 | if (p == MAP_FAILED) { 11 | return 1; 12 | } 13 | free(p + 4096 * 8); 14 | return 0; 15 | } 16 | -------------------------------------------------------------------------------- /debian/hardened-malloc.links: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2023 - 2023 ENCRYPTED SUPPORT LP 2 | ## See the file COPYING for copying conditions. 3 | 4 | ## legacy 5 | /usr/lib/x86_64-linux-gnu/libhardened_malloc.so /usr/lib/libhardened_malloc.so/libhardened_malloc.so 6 | /usr/lib/x86_64-linux-gnu/libhardened_malloc-light.so /usr/lib/libhardened_malloc.so/libhardened_malloc-light.so 7 | -------------------------------------------------------------------------------- /test/write_after_free_large_reuse.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | #include "../util.h" 6 | 7 | OPTNONE int main(void) { 8 | char *p = malloc(256 * 1024); 9 | if (!p) { 10 | return 1; 11 | } 12 | free(p); 13 | UNUSED char *q = malloc(256 * 1024); 14 | p[64 * 1024 + 1] = 'a'; 15 | return 0; 16 | } 17 | -------------------------------------------------------------------------------- /test/invalid_free_unprotected.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #include "test_util.h" 6 | 7 | OPTNONE int main(void) { 8 | free(malloc(16)); 9 | char *p = mmap(NULL, 4096 * 16, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 10 | if (p == MAP_FAILED) { 11 | return 1; 12 | } 13 | free(p + 4096 * 8); 14 | return 0; 15 | } 16 | -------------------------------------------------------------------------------- /chacha.h: -------------------------------------------------------------------------------- 1 | #ifndef CHACHA_H 2 | #define CHACHA_H 3 | 4 | #include "util.h" 5 | 6 | #define CHACHA_KEY_SIZE 32 7 | #define CHACHA_IV_SIZE 8 8 | 9 | typedef struct { 10 | u32 input[16]; 11 | } chacha_ctx; 12 | 13 | void chacha_keysetup(chacha_ctx *x, const u8 *k); 14 | void chacha_ivsetup(chacha_ctx *x, const u8 *iv); 15 | void chacha_keystream_bytes(chacha_ctx *x, u8 *c, u32 bytes); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /test/write_after_free_small.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | 5 | OPTNONE int main(void) { 6 | char *p = malloc(128); 7 | if (!p) { 8 | return 1; 9 | } 10 | free(p); 11 | 12 | p[65] = 'a'; 13 | 14 | // trigger reuse of the allocation 15 | for (size_t i = 0; i < 100000; i++) { 16 | free(malloc(128)); 17 | } 18 | return 0; 19 | } 20 | -------------------------------------------------------------------------------- /test/overflow_small_8_byte.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | 6 | OPTNONE int main(void) { 7 | char *p = malloc(8); 8 | if (!p) { 9 | return 1; 10 | } 11 | size_t size = malloc_usable_size(p); 12 | // XOR is used to avoid the test having a 1/256 chance to fail 13 | *(p + size + 7) ^= 1; 14 | free(p); 15 | return 0; 16 | } 17 | -------------------------------------------------------------------------------- /test/large_array_growth.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "test_util.h" 5 | 6 | OPTNONE int main(void) { 7 | void *p = NULL; 8 | size_t size = 256 * 1024; 9 | 10 | for (unsigned i = 0; i < 20; i++) { 11 | p = realloc(p, size); 12 | if (!p) { 13 | return 1; 14 | } 15 | memset(p, 'a', size); 16 | size = size * 3 / 2; 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /test/string_overflow.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | #include "test_util.h" 8 | 9 | OPTNONE int main(void) { 10 | char *p = malloc(16); 11 | if (!p) { 12 | return 1; 13 | } 14 | 15 | size_t size = malloc_usable_size(p); 16 | memset(p, 'a', size); 17 | printf("overflow by %zu bytes\n", strlen(p) - size); 18 | 19 | return 0; 20 | } 21 | -------------------------------------------------------------------------------- /test/write_after_free_small_reuse.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "test_util.h" 4 | #include "../util.h" 5 | 6 | OPTNONE int main(void) { 7 | char *p = malloc(128); 8 | if (!p) { 9 | return 1; 10 | } 11 | free(p); 12 | UNUSED char *q = malloc(128); 13 | 14 | p[65] = 'a'; 15 | 16 | // trigger reuse of the allocation 17 | for (size_t i = 0; i < 100000; i++) { 18 | free(malloc(128)); 19 | } 20 | return 0; 21 | } 22 | -------------------------------------------------------------------------------- /test/read_after_free_small.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "test_util.h" 6 | 7 | OPTNONE int main(void) { 8 | char *p = malloc(16); 9 | if (!p) { 10 | return 1; 11 | } 12 | memset(p, 'a', 16); 13 | free(p); 14 | for (size_t i = 0; i < 16; i++) { 15 | printf("%x\n", p[i]); 16 | if (p[i] != '\0') { 17 | return 1; 18 | } 19 | } 20 | return 0; 21 | } 22 | -------------------------------------------------------------------------------- /test/read_after_free_large.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "test_util.h" 6 | 7 | OPTNONE int main(void) { 8 | char *p = malloc(256 * 1024); 9 | if (!p) { 10 | return 1; 11 | } 12 | memset(p, 'a', 16); 13 | free(p); 14 | for (size_t i = 0; i < 256 * 1024; i++) { 15 | printf("%x\n", p[i]); 16 | if (p[i] != '\0') { 17 | return 1; 18 | } 19 | } 20 | return 0; 21 | } 22 | -------------------------------------------------------------------------------- /.clang-tidy: -------------------------------------------------------------------------------- 1 | Checks: 'bugprone-*,-bugprone-easily-swappable-parameters,-bugprone-macro-parentheses,-bugprone-too-small-loop-variable,cert-*,-cert-err33-c,clang-analyzer-*,-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,-clang-diagnostic-constant-logical-operand,readability-*,-readability-function-cognitive-complexity,-readability-identifier-length,-readability-inconsistent-declaration-parameter-name,-readability-magic-numbers,-readability-named-parameter,llvm-include-order,misc-*' 2 | WarningsAsErrors: '*' 3 | -------------------------------------------------------------------------------- /mutex.h: -------------------------------------------------------------------------------- 1 | #ifndef MUTEX_H 2 | #define MUTEX_H 3 | 4 | #include 5 | 6 | #include "util.h" 7 | 8 | struct mutex { 9 | pthread_mutex_t lock; 10 | }; 11 | 12 | #define MUTEX_INITIALIZER (struct mutex){PTHREAD_MUTEX_INITIALIZER} 13 | 14 | static inline void mutex_init(struct mutex *m) { 15 | if (unlikely(pthread_mutex_init(&m->lock, NULL))) { 16 | fatal_error("mutex initialization failed"); 17 | } 18 | } 19 | 20 | static inline void mutex_lock(struct mutex *m) { 21 | pthread_mutex_lock(&m->lock); 22 | } 23 | 24 | static inline void mutex_unlock(struct mutex *m) { 25 | pthread_mutex_unlock(&m->lock); 26 | } 27 | 28 | #endif 29 | -------------------------------------------------------------------------------- /test/realloc_init.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | static void *thread_func(void *arg) { 5 | arg = realloc(arg, 1024); 6 | if (!arg) { 7 | exit(EXIT_FAILURE); 8 | } 9 | 10 | free(arg); 11 | 12 | return NULL; 13 | } 14 | 15 | int main(void) { 16 | void *mem = realloc(NULL, 12); 17 | if (!mem) { 18 | return EXIT_FAILURE; 19 | } 20 | 21 | pthread_t thread; 22 | int r = pthread_create(&thread, NULL, thread_func, mem); 23 | if (r != 0) { 24 | return EXIT_FAILURE; 25 | } 26 | 27 | r = pthread_join(thread, NULL); 28 | if (r != 0) { 29 | return EXIT_FAILURE; 30 | } 31 | 32 | return EXIT_SUCCESS; 33 | } 34 | -------------------------------------------------------------------------------- /random.h: -------------------------------------------------------------------------------- 1 | #ifndef RANDOM_H 2 | #define RANDOM_H 3 | 4 | #include "chacha.h" 5 | #include "util.h" 6 | 7 | #define RANDOM_CACHE_SIZE 256U 8 | #define RANDOM_RESEED_SIZE (256U * 1024) 9 | 10 | struct random_state { 11 | unsigned index; 12 | unsigned reseed; 13 | chacha_ctx ctx; 14 | u8 cache[RANDOM_CACHE_SIZE]; 15 | }; 16 | 17 | void random_state_init(struct random_state *state); 18 | void random_state_init_from_random_state(struct random_state *state, struct random_state *source); 19 | void get_random_bytes(struct random_state *state, void *buf, size_t size); 20 | u16 get_random_u16(struct random_state *state); 21 | u16 get_random_u16_uniform(struct random_state *state, u16 bound); 22 | u64 get_random_u64(struct random_state *state); 23 | u64 get_random_u64_uniform(struct random_state *state, u64 bound); 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /memory.h: -------------------------------------------------------------------------------- 1 | #ifndef MEMORY_H 2 | #define MEMORY_H 3 | 4 | #include 5 | #include 6 | 7 | #ifdef __linux__ 8 | #define HAVE_COMPATIBLE_MREMAP 9 | #endif 10 | 11 | int get_metadata_key(void); 12 | 13 | void *memory_map(size_t size); 14 | bool memory_map_fixed(void *ptr, size_t size); 15 | bool memory_unmap(void *ptr, size_t size); 16 | bool memory_protect_ro(void *ptr, size_t size); 17 | bool memory_protect_rw(void *ptr, size_t size); 18 | bool memory_protect_rw_metadata(void *ptr, size_t size); 19 | #ifdef HAVE_COMPATIBLE_MREMAP 20 | bool memory_remap(void *old, size_t old_size, size_t new_size); 21 | bool memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size); 22 | #endif 23 | bool memory_purge(void *ptr, size_t size); 24 | bool memory_set_name(void *ptr, size_t size, const char *name); 25 | 26 | #endif 27 | -------------------------------------------------------------------------------- /test/malloc_info.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #if defined(__GLIBC__) || defined(__ANDROID__) 5 | #include 6 | #endif 7 | 8 | #include "test_util.h" 9 | #include "../util.h" 10 | 11 | OPTNONE static void leak_memory(void) { 12 | (void)!malloc(1024 * 1024 * 1024); 13 | (void)!malloc(16); 14 | (void)!malloc(32); 15 | (void)!malloc(4096); 16 | } 17 | 18 | static void *do_work(UNUSED void *p) { 19 | leak_memory(); 20 | return NULL; 21 | } 22 | 23 | int main(void) { 24 | pthread_t thread[4]; 25 | for (int i = 0; i < 4; i++) { 26 | pthread_create(&thread[i], NULL, do_work, NULL); 27 | } 28 | for (int i = 0; i < 4; i++) { 29 | pthread_join(thread[i], NULL); 30 | } 31 | 32 | #if defined(__GLIBC__) || defined(__ANDROID__) 33 | malloc_info(0, stdout); 34 | #endif 35 | } 36 | -------------------------------------------------------------------------------- /config/default.mk: -------------------------------------------------------------------------------- 1 | CONFIG_WERROR := true 2 | CONFIG_NATIVE := true 3 | CONFIG_CXX_ALLOCATOR := true 4 | CONFIG_UBSAN := false 5 | CONFIG_SEAL_METADATA := false 6 | CONFIG_ZERO_ON_FREE := true 7 | CONFIG_WRITE_AFTER_FREE_CHECK := true 8 | CONFIG_SLOT_RANDOMIZE := true 9 | CONFIG_SLAB_CANARY := true 10 | CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH := 1 11 | CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH := 1 12 | CONFIG_EXTENDED_SIZE_CLASSES := true 13 | CONFIG_LARGE_SIZE_CLASSES := true 14 | CONFIG_GUARD_SLABS_INTERVAL := 1 15 | CONFIG_GUARD_SIZE_DIVISOR := 2 16 | CONFIG_REGION_QUARANTINE_RANDOM_LENGTH := 256 17 | CONFIG_REGION_QUARANTINE_QUEUE_LENGTH := 1024 18 | CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD := 33554432 # 32MiB 19 | CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH := 32 20 | CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB 21 | CONFIG_N_ARENA := 4 22 | CONFIG_STATS := false 23 | CONFIG_SELF_INIT := true 24 | -------------------------------------------------------------------------------- /config/light.mk: -------------------------------------------------------------------------------- 1 | CONFIG_WERROR := true 2 | CONFIG_NATIVE := true 3 | CONFIG_CXX_ALLOCATOR := true 4 | CONFIG_UBSAN := false 5 | CONFIG_SEAL_METADATA := false 6 | CONFIG_ZERO_ON_FREE := true 7 | CONFIG_WRITE_AFTER_FREE_CHECK := false 8 | CONFIG_SLOT_RANDOMIZE := false 9 | CONFIG_SLAB_CANARY := true 10 | CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH := 0 11 | CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH := 0 12 | CONFIG_EXTENDED_SIZE_CLASSES := true 13 | CONFIG_LARGE_SIZE_CLASSES := true 14 | CONFIG_GUARD_SLABS_INTERVAL := 8 15 | CONFIG_GUARD_SIZE_DIVISOR := 2 16 | CONFIG_REGION_QUARANTINE_RANDOM_LENGTH := 256 17 | CONFIG_REGION_QUARANTINE_QUEUE_LENGTH := 1024 18 | CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD := 33554432 # 32MiB 19 | CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH := 32 20 | CONFIG_CLASS_REGION_SIZE := 34359738368 # 32GiB 21 | CONFIG_N_ARENA := 4 22 | CONFIG_STATS := false 23 | CONFIG_SELF_INIT := true 24 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | 3 | ## Copyright (C) 2016 - 2023 ENCRYPTED SUPPORT LP 4 | ## See the file COPYING for copying conditions. 5 | 6 | #export DH_VERBOSE=1 7 | 8 | ## -gdwarf-4 requried because of: 9 | ## dwz: Unknown debugging section .debug_addr causes some builds to fail 10 | ## https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1016936 11 | export DEB_CFLAGS_MAINT_APPEND += -gdwarf-4 12 | export DEB_CXXFLAGS_MAINT_APPEND += -gdwarf-4 13 | 14 | CC := clang 15 | 16 | %: 17 | dh $@ 18 | 19 | override_dh_installchangelogs: 20 | dh_installchangelogs changelog.upstream upstream 21 | 22 | override_dh_auto_build: 23 | dh_auto_build -- CONFIG_NATIVE=false CC=$(CC) 24 | dh_auto_build -- CONFIG_NATIVE=false CC=$(CC) VARIANT=light 25 | 26 | ## Tests failing inside (cowbuilder) chroot. 27 | ## https://github.com/GrapheneOS/hardened_malloc/issues/119 28 | # override_dh_auto_test: 29 | # true 30 | -------------------------------------------------------------------------------- /pages.h: -------------------------------------------------------------------------------- 1 | #ifndef PAGES_H 2 | #define PAGES_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "util.h" 9 | 10 | #define PAGE_SHIFT 12 11 | #ifndef PAGE_SIZE 12 | #define PAGE_SIZE ((size_t)1 << PAGE_SHIFT) 13 | #endif 14 | 15 | void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotect, const char *name); 16 | void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_size, const char *name); 17 | void deallocate_pages(void *usable, size_t usable_size, size_t guard_size); 18 | 19 | static inline size_t page_align(size_t size) { 20 | return align(size, PAGE_SIZE); 21 | } 22 | 23 | static inline size_t hash_page(const void *p) { 24 | uintptr_t u = (uintptr_t)p >> PAGE_SHIFT; 25 | size_t sum = u; 26 | sum = (sum << 7) - sum + (u >> 16); 27 | sum = (sum << 7) - sum + (u >> 32); 28 | sum = (sum << 7) - sum + (u >> 48); 29 | return sum; 30 | } 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /debian/make-helper-overrides.bsh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Copyright (C) 2019 - 2023 ENCRYPTED SUPPORT LP 4 | ## See the file COPYING for copying conditions. 5 | 6 | version_numbers_by_upstream=true 7 | 8 | ## https://github.com/GrapheneOS/hardened_malloc/issues/86 9 | genmkfile_lintian_post_opts+=" --suppress-tags sharedobject-in-library-directory-missing-soname" 10 | 11 | genmkfile_file_has_been_copied=true 12 | 13 | ## This is only used when manually compiling and installing. 14 | make_install_hook_post() { 15 | if [ ! -d "$DESTDIR/usr/lib" ]; then 16 | mkdir --parents "$DESTDIR/usr/lib/x86_64-linux-gnu" 17 | fi 18 | 19 | cp libhardened_malloc.so "$DESTDIR/usr/lib/x86_64-linux-gnu/libhardened_malloc.so" 20 | chmod u+s "$DESTDIR/usr/lib/x86_64-linux-gnu/libhardened_malloc.so" 21 | 22 | cp libhardened_malloc-light.so "$DESTDIR/usr/lib/x86_64-linux-gnu/libhardened_malloc-light.so" 23 | chmod u+s "$DESTDIR/usr/lib/x86_64-linux-gnu/libhardened_malloc-light.so" 24 | } 25 | -------------------------------------------------------------------------------- /util.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | #ifdef __ANDROID__ 8 | #include 9 | #endif 10 | 11 | #include "util.h" 12 | 13 | static int write_full(int fd, const char *buf, size_t length) { 14 | do { 15 | ssize_t bytes_written = write(fd, buf, length); 16 | if (bytes_written == -1) { 17 | if (errno == EINTR) { 18 | continue; 19 | } 20 | return -1; 21 | } 22 | buf += bytes_written; 23 | length -= bytes_written; 24 | } while (length); 25 | 26 | return 0; 27 | } 28 | 29 | COLD noreturn void fatal_error(const char *s) { 30 | const char *prefix = "fatal allocator error: "; 31 | (void)(write_full(STDERR_FILENO, prefix, strlen(prefix)) != -1 && 32 | write_full(STDERR_FILENO, s, strlen(s)) != -1 && 33 | write_full(STDERR_FILENO, "\n", 1)); 34 | #ifdef __ANDROID__ 35 | async_safe_format_log(ANDROID_LOG_FATAL, "hardened_malloc", "fatal allocator error: %s", s); 36 | #endif 37 | abort(); 38 | } 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright © 2018-2023 GrapheneOS 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /test/.gitignore: -------------------------------------------------------------------------------- 1 | large_array_growth 2 | mallinfo 3 | mallinfo2 4 | malloc_info 5 | offset 6 | delete_type_size_mismatch 7 | double_free_large 8 | double_free_large_delayed 9 | double_free_small 10 | double_free_small_delayed 11 | invalid_free_protected 12 | invalid_free_small_region 13 | invalid_free_small_region_far 14 | invalid_free_unprotected 15 | read_after_free_large 16 | read_after_free_small 17 | read_zero_size 18 | string_overflow 19 | unaligned_free_large 20 | unaligned_free_small 21 | uninitialized_free 22 | uninitialized_malloc_usable_size 23 | uninitialized_realloc 24 | write_after_free_large 25 | write_after_free_large_reuse 26 | write_after_free_small 27 | write_after_free_small_reuse 28 | write_zero_size 29 | unaligned_malloc_usable_size_small 30 | invalid_malloc_usable_size_small 31 | invalid_malloc_usable_size_small_quarantine 32 | malloc_object_size 33 | malloc_object_size_offset 34 | invalid_malloc_object_size_small 35 | invalid_malloc_object_size_small_quarantine 36 | impossibly_large_malloc 37 | overflow_large_1_byte 38 | overflow_large_8_byte 39 | overflow_small_1_byte 40 | overflow_small_8_byte 41 | uninitialized_read_large 42 | uninitialized_read_small 43 | realloc_init 44 | __pycache__/ 45 | -------------------------------------------------------------------------------- /test/mallinfo2.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #if defined(__GLIBC__) 5 | #include 6 | #endif 7 | 8 | #include "test_util.h" 9 | 10 | static void print_mallinfo2(void) { 11 | #if defined(__GLIBC__) 12 | struct mallinfo2 info = mallinfo2(); 13 | printf("mallinfo2:\n"); 14 | printf("arena: %zu\n", (size_t)info.arena); 15 | printf("ordblks: %zu\n", (size_t)info.ordblks); 16 | printf("smblks: %zu\n", (size_t)info.smblks); 17 | printf("hblks: %zu\n", (size_t)info.hblks); 18 | printf("hblkhd: %zu\n", (size_t)info.hblkhd); 19 | printf("usmblks: %zu\n", (size_t)info.usmblks); 20 | printf("fsmblks: %zu\n", (size_t)info.fsmblks); 21 | printf("uordblks: %zu\n", (size_t)info.uordblks); 22 | printf("fordblks: %zu\n", (size_t)info.fordblks); 23 | printf("keepcost: %zu\n", (size_t)info.keepcost); 24 | #endif 25 | } 26 | 27 | OPTNONE int main(void) { 28 | void *a[4]; 29 | 30 | a[0] = malloc(1024 * 1024 * 1024); 31 | a[1] = malloc(16); 32 | a[2] = malloc(32); 33 | a[3] = malloc(64); 34 | 35 | print_mallinfo2(); 36 | 37 | free(a[0]); 38 | free(a[1]); 39 | free(a[2]); 40 | free(a[3]); 41 | 42 | printf("\n"); 43 | print_mallinfo2(); 44 | } 45 | -------------------------------------------------------------------------------- /.github/workflows/build-and-test.yml: -------------------------------------------------------------------------------- 1 | name: Build and run tests 2 | 3 | on: 4 | push: 5 | pull_request: 6 | schedule: 7 | - cron: '0 2 * * *' 8 | 9 | jobs: 10 | build-ubuntu-gcc: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Build 15 | run: make test 16 | build-ubuntu-clang: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: Build 21 | run: CC=clang CXX=clang++ make test 22 | build-musl: 23 | runs-on: ubuntu-latest 24 | container: 25 | image: alpine:latest 26 | steps: 27 | - uses: actions/checkout@v4 28 | - name: Install dependencies 29 | run: apk update && apk add build-base python3 30 | - name: Build 31 | run: make test 32 | build-ubuntu-gcc-aarch64: 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v4 36 | - name: Install dependencies 37 | run: sudo apt-get update && sudo apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libgcc-s1-arm64-cross cpp-aarch64-linux-gnu 38 | - name: Build 39 | run: CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-gcc++ make CONFIG_NATIVE=false 40 | -------------------------------------------------------------------------------- /debian/copyright: -------------------------------------------------------------------------------- 1 | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | 3 | Files: * 4 | Copyright: Copyright (c) 2019 Daniel Micay 5 | License: MIT 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | . 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | . 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /test/mallinfo.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #if defined(__GLIBC__) || defined(__ANDROID__) 5 | #include 6 | #endif 7 | 8 | #include "test_util.h" 9 | 10 | static void print_mallinfo(void) { 11 | #if defined(__GLIBC__) || defined(__ANDROID__) 12 | struct mallinfo info = mallinfo(); 13 | printf("mallinfo:\n"); 14 | printf("arena: %zu\n", (size_t)info.arena); 15 | printf("ordblks: %zu\n", (size_t)info.ordblks); 16 | printf("smblks: %zu\n", (size_t)info.smblks); 17 | printf("hblks: %zu\n", (size_t)info.hblks); 18 | printf("hblkhd: %zu\n", (size_t)info.hblkhd); 19 | printf("usmblks: %zu\n", (size_t)info.usmblks); 20 | printf("fsmblks: %zu\n", (size_t)info.fsmblks); 21 | printf("uordblks: %zu\n", (size_t)info.uordblks); 22 | printf("fordblks: %zu\n", (size_t)info.fordblks); 23 | printf("keepcost: %zu\n", (size_t)info.keepcost); 24 | #endif 25 | } 26 | 27 | OPTNONE int main(void) { 28 | void *a[4]; 29 | 30 | a[0] = malloc(1024 * 1024 * 1024); 31 | a[1] = malloc(16); 32 | a[2] = malloc(32); 33 | a[3] = malloc(64); 34 | 35 | print_mallinfo(); 36 | 37 | free(a[0]); 38 | free(a[1]); 39 | free(a[2]); 40 | free(a[3]); 41 | 42 | printf("\n"); 43 | print_mallinfo(); 44 | } 45 | -------------------------------------------------------------------------------- /test/offset.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | static size_t size_classes[] = { 7 | /* large */ 4 * 1024 * 1024, 8 | /* 0 */ 0, 9 | /* 16 */ 16, 32, 48, 64, 80, 96, 112, 128, 10 | /* 32 */ 160, 192, 224, 256, 11 | /* 64 */ 320, 384, 448, 512, 12 | /* 128 */ 640, 768, 896, 1024, 13 | /* 256 */ 1280, 1536, 1792, 2048, 14 | /* 512 */ 2560, 3072, 3584, 4096, 15 | /* 1024 */ 5120, 6144, 7168, 8192, 16 | /* 2048 */ 10240, 12288, 14336, 16384, 17 | #if CONFIG_EXTENDED_SIZE_CLASSES 18 | /* 4096 */ 20480, 24576, 28672, 32768, 19 | /* 8192 */ 40960, 49152, 57344, 65536, 20 | /* 16384 */ 81920, 98304, 114688, 131072, 21 | #endif 22 | }; 23 | 24 | #define N_SIZE_CLASSES (sizeof(size_classes) / sizeof(size_classes[0])) 25 | 26 | static const size_t canary_size = SLAB_CANARY ? sizeof(uint64_t) : 0; 27 | 28 | int main(void) { 29 | for (unsigned i = 2; i < N_SIZE_CLASSES; i++) { 30 | size_classes[i] -= canary_size; 31 | } 32 | 33 | void *p[N_SIZE_CLASSES]; 34 | for (unsigned i = 0; i < N_SIZE_CLASSES; i++) { 35 | size_t size = size_classes[i]; 36 | p[i] = malloc(size); 37 | if (!p[i]) { 38 | return 1; 39 | } 40 | void *q = malloc(size); 41 | if (!q) { 42 | return 1; 43 | } 44 | if (i != 0) { 45 | printf("%zu to %zu: %zd\n", size_classes[i - 1], size, p[i] - p[i - 1]); 46 | } 47 | printf("%zu to %zu: %zd\n", size, size, q - p[i]); 48 | } 49 | return 0; 50 | } 51 | -------------------------------------------------------------------------------- /util.h: -------------------------------------------------------------------------------- 1 | #ifndef UTIL_H 2 | #define UTIL_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | // C11 noreturn doesn't work in C++ 9 | #define noreturn __attribute__((noreturn)) 10 | 11 | #define likely(x) __builtin_expect(!!(x), 1) 12 | #define unlikely(x) __builtin_expect(!!(x), 0) 13 | 14 | #define min(x, y) ({ \ 15 | __typeof__(x) _x = (x); \ 16 | __typeof__(y) _y = (y); \ 17 | (void) (&_x == &_y); \ 18 | _x < _y ? _x : _y; }) 19 | 20 | #define max(x, y) ({ \ 21 | __typeof__(x) _x = (x); \ 22 | __typeof__(y) _y = (y); \ 23 | (void) (&_x == &_y); \ 24 | _x > _y ? _x : _y; }) 25 | 26 | #define COLD __attribute__((cold)) 27 | #define UNUSED __attribute__((unused)) 28 | #define EXPORT __attribute__((visibility("default"))) 29 | 30 | #define STRINGIFY(s) #s 31 | #define ALIAS(f) __attribute__((alias(STRINGIFY(f)))) 32 | 33 | typedef uint8_t u8; 34 | typedef uint16_t u16; 35 | typedef uint32_t u32; 36 | typedef uint64_t u64; 37 | typedef unsigned __int128 u128; 38 | 39 | #define U64_WIDTH 64 40 | 41 | static inline int ffz64(u64 x) { 42 | return __builtin_ffsll(~x); 43 | } 44 | 45 | // parameter must not be 0 46 | static inline int clz64(u64 x) { 47 | return __builtin_clzll(x); 48 | } 49 | 50 | // parameter must not be 0 51 | static inline u64 log2u64(u64 x) { 52 | return U64_WIDTH - clz64(x) - 1; 53 | } 54 | 55 | static inline size_t align(size_t size, size_t align) { 56 | size_t mask = align - 1; 57 | return (size + mask) & ~mask; 58 | } 59 | 60 | COLD noreturn void fatal_error(const char *s); 61 | 62 | #if CONFIG_SEAL_METADATA 63 | 64 | #ifdef __GLIBC__ 65 | #define USE_PKEY 66 | #else 67 | #error "CONFIG_SEAL_METADATA requires Memory Protection Key support" 68 | #endif 69 | 70 | #endif // CONFIG_SEAL_METADATA 71 | 72 | #endif 73 | -------------------------------------------------------------------------------- /KERNEL_FEATURE_WISHLIST.md: -------------------------------------------------------------------------------- 1 | Very important and should be an easy sell: 2 | 3 | * improved robustness for high vma count on high memory machines 4 | * much higher `vm.max_map_count` by default 5 | * work on improving performance and resource usage with high vma count 6 | * add a way to disable the brk heap and have mmap grow upwards like it did in 7 | the past (preserving the same high base entropy) 8 | 9 | Somewhat important and an easy sell: 10 | 11 | * alternative to `RLIMIT_AS` for accountable mappings only 12 | * memory control groups are sometimes a better option but there are still 13 | users of `RLIMIT_AS` that are problematic for mitigations or simply fast 14 | garbage collector implementations, etc. mapping lots of `PROT_NONE` memory 15 | * mremap flag to disable unmapping the source mapping 16 | * also needed by jemalloc for different reasons 17 | * not needed if the kernel gets first class support for arbitrarily sized 18 | guard pages and a virtual memory quarantine feature 19 | * `MREMAP_DONTUNMAP` is now available but doesn't support expanding the 20 | mapping which may be an issue due to VMA merging being unreliable 21 | 22 | Fairly infeasible to land but could reduce overhead and extend coverage of 23 | security features to other code directly using mmap: 24 | 25 | * first class support for arbitrarily sized guard pages for mmap and mremap to 26 | eliminate half of the resulting VMAs and reduce 2 system calls to 1 27 | * not usable if it doesn't support mremap (shrink, grow, grow via move) 28 | * not usable if the guard page size is static 29 | * should support changing guard size for mremap growth via move 30 | * must be possible to set it up from the process 31 | * virtual memory quarantine 32 | * must be possible to set it up from the process 33 | * first-class support for aligned mappings with mmap and ideally mremap 34 | * not usable unless guard page support is provided and of course it has to 35 | work with this too 36 | -------------------------------------------------------------------------------- /debian/hardened-malloc-light-enable.prerm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Copyright (C) 2020 - 2023 ENCRYPTED SUPPORT LP 4 | ## See the file COPYING for copying conditions. 5 | 6 | if [ -f /usr/libexec/helper-scripts/pre.bsh ]; then 7 | source /usr/libexec/helper-scripts/pre.bsh 8 | fi 9 | 10 | set -e 11 | 12 | true " 13 | ##################################################################### 14 | ## INFO: BEGIN: $DPKG_MAINTSCRIPT_PACKAGE $DPKG_MAINTSCRIPT_NAME $@ 15 | ##################################################################### 16 | " 17 | 18 | ## feature request: /etc/ld.so.preload.d drop-in configuration folder support 19 | ## https://sourceware.org/bugzilla/show_bug.cgi?id=24913 20 | 21 | if [ "$1" = "purge" ] || [ "$1" = "remove" ]; then 22 | if test -r /etc/ld.so.preload ; then 23 | if grep -q libhardened_malloc-light.so /etc/ld.so.preload ; then 24 | if LANG=C str_replace libhardened_malloc-light.so "" /etc/ld.so.preload >/dev/null ; then 25 | echo "INFO: $0: Removed libhardened_malloc-light.so from /etc/ld.so.preload, OK." 26 | else 27 | echo "ERROR: $0: Removal of libhardened_malloc-light.so from /etc/ld.so.preload failed." >&2 28 | fi 29 | else 30 | echo "INFO: $0: libhardened_malloc-light.so was already removed from /etc/ld.so.preload, OK." 31 | fi 32 | else 33 | echo "INFO: $0: /etc/ld.so.preload does not exist, therefore no need to remove libhardened_malloc-light.so, OK." 34 | fi 35 | 36 | rm -f /var/lib/hardened-malloc-kicksecure-enable/enabled 37 | fi 38 | 39 | true "INFO: debhelper beginning here." 40 | 41 | #DEBHELPER# 42 | 43 | true "INFO: Done with debhelper." 44 | 45 | true " 46 | ##################################################################### 47 | ## INFO: END : $DPKG_MAINTSCRIPT_PACKAGE $DPKG_MAINTSCRIPT_NAME $@ 48 | ##################################################################### 49 | " 50 | 51 | ## Explicitly "exit 0", so eventually trapped errors can be ignored. 52 | exit 0 53 | -------------------------------------------------------------------------------- /Android.bp: -------------------------------------------------------------------------------- 1 | common_cflags = [ 2 | "-pipe", 3 | "-O3", 4 | //"-flto", 5 | "-fPIC", 6 | "-fvisibility=hidden", 7 | //"-fno-plt", 8 | "-Wall", 9 | "-Wextra", 10 | "-Wcast-align", 11 | "-Wcast-qual", 12 | "-Wwrite-strings", 13 | "-Werror", 14 | "-DH_MALLOC_PREFIX", 15 | "-DZERO_ON_FREE=true", 16 | "-DWRITE_AFTER_FREE_CHECK=true", 17 | "-DSLOT_RANDOMIZE=true", 18 | "-DSLAB_CANARY=true", 19 | "-DSLAB_QUARANTINE_RANDOM_LENGTH=1", 20 | "-DSLAB_QUARANTINE_QUEUE_LENGTH=1", 21 | "-DCONFIG_EXTENDED_SIZE_CLASSES=true", 22 | "-DCONFIG_LARGE_SIZE_CLASSES=true", 23 | "-DGUARD_SLABS_INTERVAL=1", 24 | "-DGUARD_SIZE_DIVISOR=2", 25 | "-DREGION_QUARANTINE_RANDOM_LENGTH=256", 26 | "-DREGION_QUARANTINE_QUEUE_LENGTH=1024", 27 | "-DREGION_QUARANTINE_SKIP_THRESHOLD=33554432", // 32MiB 28 | "-DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=32", 29 | "-DCONFIG_CLASS_REGION_SIZE=34359738368", // 32GiB 30 | "-DN_ARENA=1", 31 | "-DCONFIG_STATS=true", 32 | "-DCONFIG_SELF_INIT=false", 33 | ] 34 | 35 | cc_defaults { 36 | name: "hardened_malloc_defaults", 37 | defaults: ["linux_bionic_supported"], 38 | cflags: common_cflags, 39 | conlyflags: ["-std=c17", "-Wmissing-prototypes"], 40 | stl: "none", 41 | } 42 | 43 | lib_src_files = [ 44 | "chacha.c", 45 | "h_malloc.c", 46 | "memory.c", 47 | "pages.c", 48 | "random.c", 49 | "util.c", 50 | ] 51 | 52 | cc_library { 53 | name: "libhardened_malloc", 54 | ramdisk_available: true, 55 | vendor_ramdisk_available: true, 56 | recovery_available: true, 57 | defaults: ["hardened_malloc_defaults"], 58 | srcs: lib_src_files, 59 | export_include_dirs: ["include"], 60 | static_libs: ["libasync_safe"], 61 | target: { 62 | android: { 63 | shared: { 64 | enabled: false, 65 | }, 66 | system_shared_libs: [], 67 | }, 68 | linux_bionic: { 69 | system_shared_libs: [], 70 | }, 71 | }, 72 | product_variables: { 73 | debuggable: { 74 | cflags: ["-DLABEL_MEMORY"], 75 | }, 76 | }, 77 | apex_available: [ 78 | "com.android.runtime", 79 | ], 80 | } 81 | -------------------------------------------------------------------------------- /test/Makefile: -------------------------------------------------------------------------------- 1 | CONFIG_SLAB_CANARY := true 2 | CONFIG_EXTENDED_SIZE_CLASSES := true 3 | 4 | ifneq ($(VARIANT),) 5 | $(error testing non-default variants not yet supported) 6 | endif 7 | 8 | ifeq (,$(filter $(CONFIG_SLAB_CANARY),true false)) 9 | $(error CONFIG_SLAB_CANARY must be true or false) 10 | endif 11 | 12 | dir=$(dir $(realpath $(firstword $(MAKEFILE_LIST)))) 13 | 14 | CPPFLAGS := \ 15 | -D_GNU_SOURCE \ 16 | -DSLAB_CANARY=$(CONFIG_SLAB_CANARY) \ 17 | -DCONFIG_EXTENDED_SIZE_CLASSES=$(CONFIG_EXTENDED_SIZE_CLASSES) 18 | 19 | SHARED_FLAGS := -O3 20 | 21 | CFLAGS := -std=c17 $(SHARED_FLAGS) -Wmissing-prototypes 22 | CXXFLAGS := -std=c++17 -fsized-deallocation $(SHARED_FLAGS) 23 | LDFLAGS := -Wl,-L$(dir)../out,-R,$(dir)../out 24 | 25 | LDLIBS := -lpthread -lhardened_malloc 26 | 27 | EXECUTABLES := \ 28 | offset \ 29 | mallinfo \ 30 | mallinfo2 \ 31 | malloc_info \ 32 | large_array_growth \ 33 | double_free_large \ 34 | double_free_large_delayed \ 35 | double_free_small \ 36 | double_free_small_delayed \ 37 | unaligned_free_large \ 38 | unaligned_free_small \ 39 | read_after_free_large \ 40 | read_after_free_small \ 41 | write_after_free_large \ 42 | write_after_free_large_reuse \ 43 | write_after_free_small \ 44 | write_after_free_small_reuse \ 45 | read_zero_size \ 46 | write_zero_size \ 47 | invalid_free_protected \ 48 | invalid_free_unprotected \ 49 | invalid_free_small_region \ 50 | invalid_free_small_region_far \ 51 | uninitialized_read_small \ 52 | uninitialized_read_large \ 53 | uninitialized_free \ 54 | uninitialized_realloc \ 55 | uninitialized_malloc_usable_size \ 56 | overflow_large_1_byte \ 57 | overflow_large_8_byte \ 58 | overflow_small_1_byte \ 59 | overflow_small_8_byte \ 60 | string_overflow \ 61 | delete_type_size_mismatch \ 62 | unaligned_malloc_usable_size_small \ 63 | invalid_malloc_usable_size_small \ 64 | invalid_malloc_usable_size_small_quarantine \ 65 | malloc_object_size \ 66 | malloc_object_size_offset \ 67 | invalid_malloc_object_size_small \ 68 | invalid_malloc_object_size_small_quarantine \ 69 | impossibly_large_malloc \ 70 | realloc_init 71 | 72 | all: $(EXECUTABLES) 73 | 74 | clean: 75 | rm -f $(EXECUTABLES) 76 | rm -fr ./__pycache__ 77 | -------------------------------------------------------------------------------- /debian/hardened-malloc.postinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Copyright (C) 2020 - 2023 ENCRYPTED SUPPORT LP 4 | ## See the file COPYING for copying conditions. 5 | 6 | if [ -f /usr/libexec/helper-scripts/pre.bsh ]; then 7 | source /usr/libexec/helper-scripts/pre.bsh 8 | fi 9 | 10 | set -e 11 | 12 | true " 13 | ##################################################################### 14 | ## INFO: BEGIN: $DPKG_MAINTSCRIPT_PACKAGE $DPKG_MAINTSCRIPT_NAME $@ 15 | ##################################################################### 16 | " 17 | 18 | ## legacy 19 | if test -f /etc/ld.so.preload ; then 20 | ## forked libhardened_malloc_kicksecure.so was renamed to 21 | ## original (unforked) upstream provided libhardened_malloc-light.so 22 | search="/usr/lib/libhardened_malloc.so/libhardened_malloc_kicksecure.so" 23 | replace="/usr/lib/libhardened_malloc.so/libhardened_malloc-light.so" 24 | file_name="/etc/ld.so.preload" 25 | LANG=C str_replace "$search" "$replace" "$file_name" || true 26 | 27 | search="libhardened_malloc_kicksecure.so" 28 | replace="libhardened_malloc-light.so" 29 | file_name="/etc/ld.so.preload" 30 | LANG=C str_replace "$search" "$replace" "$file_name" || true 31 | 32 | ## Should not use full path. 33 | ## https://gist.github.com/SkewedZeppelin/7f293d64c1c651bdc21526519d9e192b 34 | search="/usr/lib/libhardened_malloc.so/libhardened_malloc-light.so" 35 | replace="libhardened_malloc-light.so" 36 | file_name="/etc/ld.so.preload" 37 | LANG=C str_replace "$search" "$replace" "$file_name" || true 38 | 39 | search="/usr/lib/libhardened_malloc.so/libhardened_malloc.so" 40 | replace="libhardened_malloc.so" 41 | file_name="/etc/ld.so.preload" 42 | LANG=C str_replace "$search" "$replace" "$file_name" || true 43 | fi 44 | 45 | chmod u+s /usr/lib/x86_64-linux-gnu/libhardened_malloc.so 46 | chmod u+s /usr/lib/x86_64-linux-gnu/libhardened_malloc-light.so 47 | 48 | true "INFO: debhelper beginning here." 49 | 50 | #DEBHELPER# 51 | 52 | true "INFO: Done with debhelper." 53 | 54 | true " 55 | ##################################################################### 56 | ## INFO: END : $DPKG_MAINTSCRIPT_PACKAGE $DPKG_MAINTSCRIPT_NAME $@ 57 | ##################################################################### 58 | " 59 | 60 | ## Explicitly "exit 0", so eventually trapped errors can be ignored. 61 | exit 0 62 | -------------------------------------------------------------------------------- /debian/hardened-malloc-light-enable.postinst: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Copyright (C) 2020 - 2023 ENCRYPTED SUPPORT LP 4 | ## See the file COPYING for copying conditions. 5 | 6 | if [ -f /usr/libexec/helper-scripts/pre.bsh ]; then 7 | source /usr/libexec/helper-scripts/pre.bsh 8 | fi 9 | 10 | set -e 11 | 12 | true " 13 | ##################################################################### 14 | ## INFO: BEGIN: $DPKG_MAINTSCRIPT_PACKAGE $DPKG_MAINTSCRIPT_NAME $@ 15 | ##################################################################### 16 | " 17 | 18 | ## feature request: /etc/ld.so.preload.d drop-in configuration folder support 19 | ## https://sourceware.org/bugzilla/show_bug.cgi?id=24913 20 | 21 | create_hardened_malloc_light_enabled_status_file() { 22 | mkdir --parents /var/lib/hardened-malloc-kicksecure-enable 23 | touch /var/lib/hardened-malloc-kicksecure-enable/enabled 24 | } 25 | 26 | if test -r /usr/lib/x86_64-linux-gnu/libhardened_malloc-light.so ; then 27 | if grep -q libhardened_malloc-light.so /etc/ld.so.preload ; then 28 | echo "INFO: $0: libhardened_malloc-light.so already enabled in /etc/ld.so.preload, OK." 29 | create_hardened_malloc_light_enabled_status_file 30 | else 31 | if test -f /var/lib/hardened-malloc-kicksecure-enable/enabled ; then 32 | echo "INFO: $0: libhardened_malloc-light.so was already previously enabled by this package, but currently is disabled. Leaving it as is, OK." 33 | elif echo "libhardened_malloc-light.so" | tee "/etc/ld.so.preload" >/dev/null ; then 34 | echo "INFO: $0: enabled libhardened_malloc-light.so in /etc/ld.so.preload, OK." 35 | create_hardened_malloc_light_enabled_status_file 36 | else 37 | echo "ERROR: $0: could not write libhardened_malloc-light.so to /etc/ld.so.preload." >&2 38 | fi 39 | fi 40 | else 41 | echo "ERROR: $0: file /usr/lib/x86_64-linux-gnu/libhardened_malloc-light.so does not exist." >&2 42 | fi 43 | 44 | true "INFO: debhelper beginning here." 45 | 46 | #DEBHELPER# 47 | 48 | true "INFO: Done with debhelper." 49 | 50 | true " 51 | ##################################################################### 52 | ## INFO: END : $DPKG_MAINTSCRIPT_PACKAGE $DPKG_MAINTSCRIPT_NAME $@ 53 | ##################################################################### 54 | " 55 | 56 | ## Explicitly "exit 0", so eventually trapped errors can be ignored. 57 | exit 0 58 | -------------------------------------------------------------------------------- /calculate_waste.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from sys import argv 4 | 5 | size_classes = [ 6 | 16, 32, 48, 64, 80, 96, 112, 128, 7 | 160, 192, 224, 256, 8 | 320, 384, 448, 512, 9 | 640, 768, 896, 1024, 10 | 1280, 1536, 1792, 2048, 11 | 2560, 3072, 3584, 4096, 12 | 5120, 6144, 7168, 8192, 13 | 10240, 12288, 14336, 16384, 14 | 20480, 24576, 28672, 32768, 15 | 40960, 49152, 57344, 65536, 16 | 81920, 98304, 114688, 131072, 17 | ] 18 | 19 | size_class_slots = [ 20 | 256, 128, 85, 64, 51, 42, 36, 64, 21 | 51, 64, 54, 64, 22 | 64, 64, 64, 64, 23 | 64, 64, 64, 64, 24 | 16, 16, 16, 16, 25 | 8, 8, 8, 8, 26 | 8, 8, 8, 8, 27 | 6, 5, 4, 4, 28 | 2, 2, 2, 2, 29 | 1, 1, 1, 1, 30 | 1, 1, 1, 1, 31 | ] 32 | 33 | fragmentation = [100 - 1 / 16 * 100] 34 | 35 | for i in range(len(size_classes) - 1): 36 | size_class = size_classes[i + 1] 37 | worst_case = size_classes[i] + 1 38 | used = worst_case / size_class 39 | fragmentation.append(100 - used * 100); 40 | 41 | def page_align(size): 42 | return (size + 4095) & ~4095 43 | 44 | print("| ", end="") 45 | print("size class", "worst case internal fragmentation", "slab slots", "slab size", "internal fragmentation for slabs", sep=" | ", end=" |\n") 46 | print("| ", end='') 47 | print("-", "-", "-", "-", "-", sep=" | ", end=" |\n") 48 | for size, slots, fragmentation in zip(size_classes, size_class_slots, fragmentation): 49 | used = size * slots 50 | real = page_align(used) 51 | print("| ", end='') 52 | print(size, f"{fragmentation:.4}%", slots, real, str(100 - used / real * 100) + "%", sep=" | ", end=" |\n") 53 | 54 | if len(argv) < 2: 55 | exit() 56 | 57 | max_bits = 256 58 | max_page_span = 16 59 | 60 | print() 61 | 62 | print("maximum bitmap size is {}-bit".format(max_bits)) 63 | print("maximum page span size is {} ({})".format(max_page_span, max_page_span * 4096)) 64 | 65 | for size_class in size_classes: 66 | choices = [] 67 | for bits in range(1, max_bits + 1): 68 | used = size_class * bits 69 | real = page_align(used) 70 | if real > 65536: 71 | continue 72 | pages = real / 4096 73 | efficiency = used / real * 100 74 | choices.append((bits, used, real, pages, efficiency)) 75 | 76 | choices.sort(key=lambda x: x[4], reverse=True) 77 | 78 | print() 79 | print("size_class:", size_class) 80 | for choice in choices[:10]: 81 | print(choice) 82 | -------------------------------------------------------------------------------- /README_generic.md: -------------------------------------------------------------------------------- 1 | # security-focused general purpose memory allocator # 2 | 3 | This is a security-focused general purpose memory allocator providing the 4 | malloc API along with various extensions. It provides substantial hardening 5 | against heap corruption vulnerabilities. The security-focused design also 6 | leads to much less metadata overhead and memory waste from fragmentation than 7 | a more traditional allocator design. It aims to provide decent overall 8 | performance with a focus on long-term performance and memory usage rather than 9 | allocator micro-benchmarks. It offers scalability via a configurable number of 10 | entirely independently arenas, with the internal locking within arenas further 11 | divided up per size class. 12 | 13 | It can be added as a preloaded library using /etc/ld.so.preload. 14 | 15 | Ships two files: 16 | 17 | * [1] 18 | /usr/lib/x86_64-linux-gnu/libhardened_malloc.so/libhardened_malloc.so 19 | * [2] 20 | /usr/lib/x86_64-linux-gnu/libhardened_malloc.so/libhardened_malloc-light.so 21 | 22 | [1] Was compiled with Hardened Malloc Default compilation parameters. 23 | 24 | [2] Was compiled with Hardened Malloc Light compilation parameters. 25 | 26 | ## How to install `hardened_malloc` using apt-get ## 27 | 28 | 1\. Download the APT Signing Key. 29 | 30 | ``` 31 | wget https://www.kicksecure.com/keys/derivative.asc 32 | ``` 33 | 34 | Users can [check the Signing Key](https://www.kicksecure.com/wiki/Signing_Key) for better security. 35 | 36 | 2\. Add the APT Signing Key. 37 | 38 | ``` 39 | sudo cp ~/derivative.asc /usr/share/keyrings/derivative.asc 40 | ``` 41 | 42 | 3\. Add the derivative repository. 43 | 44 | ``` 45 | echo "deb [signed-by=/usr/share/keyrings/derivative.asc] https://deb.kicksecure.com bookworm main contrib non-free" | sudo tee /etc/apt/sources.list.d/derivative.list 46 | ``` 47 | 48 | 4\. Update your package lists. 49 | 50 | ``` 51 | sudo apt-get update 52 | ``` 53 | 54 | 5\. Install `hardened_malloc`. 55 | 56 | ``` 57 | sudo apt-get install hardened_malloc 58 | ``` 59 | 60 | ## How to Build deb Package from Source Code ## 61 | 62 | Can be build using standard Debian package build tools such as: 63 | 64 | ``` 65 | dpkg-buildpackage -b 66 | ``` 67 | 68 | See instructions. 69 | 70 | NOTE: Replace `generic-package` with the actual name of this package `hardened_malloc`. 71 | 72 | * **A)** [easy](https://www.kicksecure.com/wiki/Dev/Build_Documentation/generic-package/easy), _OR_ 73 | * **B)** [including verifying software signatures](https://www.kicksecure.com/wiki/Dev/Build_Documentation/generic-package) 74 | 75 | ## Contact ## 76 | 77 | * [Free Forum Support](https://forums.kicksecure.com) 78 | * [Premium Support](https://www.kicksecure.com/wiki/Premium_Support) 79 | 80 | ## Donate ## 81 | 82 | `hardened_malloc` requires [donations](https://www.kicksecure.com/wiki/Donate) to stay alive! 83 | -------------------------------------------------------------------------------- /debian/control: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2019 - 2023 ENCRYPTED SUPPORT LP 2 | ## See the file COPYING for copying conditions. 3 | 4 | Source: hardened-malloc 5 | Section: libs 6 | Priority: optional 7 | Maintainer: Patrick Schleizer 8 | Build-Depends: clang, 9 | clang-tidy, 10 | debhelper (>= 13), 11 | debhelper-compat (= 13), 12 | dh-exec, 13 | llvm-15-dev 14 | Homepage: https://github.com/GrapheneOS/hardened_malloc 15 | Vcs-Browser: https://github.com/GrapheneOS/hardened_malloc 16 | Vcs-Git: https://github.com/GrapheneOS/hardened_malloc.git 17 | Standards-Version: 4.6.2 18 | Rules-Requires-Root: no 19 | 20 | Package: hardened-malloc 21 | Architecture: amd64 22 | Depends: ${misc:Depends}, 23 | ${shlibs:Depends} 24 | Description: security-focused general purpose memory allocator 25 | This is a security-focused general purpose memory allocator providing the 26 | malloc API along with various extensions. It provides substantial hardening 27 | against heap corruption vulnerabilities. The security-focused design also 28 | leads to much less metadata overhead and memory waste from fragmentation than 29 | a more traditional allocator design. It aims to provide decent overall 30 | performance with a focus on long-term performance and memory usage rather than 31 | allocator micro-benchmarks. It offers scalability via a configurable number of 32 | entirely independently arenas, with the internal locking within arenas further 33 | divided up per size class. 34 | . 35 | It can be added as a preloaded library using /etc/ld.so.preload. 36 | . 37 | Ships two files: 38 | . 39 | * [1] 40 | /usr/lib/x86_64-linux-gnu/libhardened_malloc.so/libhardened_malloc.so 41 | * [2] 42 | /usr/lib/x86_64-linux-gnu/libhardened_malloc.so/libhardened_malloc-light.so 43 | . 44 | [1] Was compiled with Hardened Malloc Default compilation parameters. 45 | . 46 | [2] Was compiled with Hardened Malloc Light compilation parameters. 47 | 48 | Package: hardened-malloc-light-enable 49 | ## Actually is 'Architecture: all' but genmkfile cannot handle that a package 50 | ## being a mix of 'Architecture: any' and 'Architecture: all' yet. 51 | Architecture: amd64 52 | Depends: hardened-malloc, 53 | helper-scripts, 54 | ${misc:Depends}, 55 | ${shlibs:Depends} 56 | Replaces: hardened-malloc-kicksecure-enable 57 | Provides: hardened-malloc-kicksecure-enable 58 | Description: enables Hardened Malloc Light 59 | Adds libhardened_malloc-light.so to 60 | /etc/ld.so.preload systemd wide configuration file. 61 | . 62 | Does this only once per installation. The user is free to undo changes to 63 | /etc/ld.so.preload. Once this package is purged and re-installed it will 64 | re-enable the Hardened Malloc Light. 65 | . 66 | Other than doing that this is an empty package. 67 | -------------------------------------------------------------------------------- /pages.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "memory.h" 4 | #include "pages.h" 5 | #include "util.h" 6 | 7 | static bool add_guards(size_t size, size_t guard_size, size_t *total_size) { 8 | return __builtin_add_overflow(size, guard_size, total_size) || 9 | __builtin_add_overflow(*total_size, guard_size, total_size); 10 | } 11 | 12 | void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotect, const char *name) { 13 | size_t real_size; 14 | if (unlikely(add_guards(usable_size, guard_size, &real_size))) { 15 | errno = ENOMEM; 16 | return NULL; 17 | } 18 | void *real = memory_map(real_size); 19 | if (unlikely(real == NULL)) { 20 | return NULL; 21 | } 22 | memory_set_name(real, real_size, name); 23 | void *usable = (char *)real + guard_size; 24 | if (unprotect && unlikely(memory_protect_rw(usable, usable_size))) { 25 | memory_unmap(real, real_size); 26 | return NULL; 27 | } 28 | return usable; 29 | } 30 | 31 | void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t guard_size, const char *name) { 32 | usable_size = page_align(usable_size); 33 | if (unlikely(!usable_size)) { 34 | errno = ENOMEM; 35 | return NULL; 36 | } 37 | 38 | size_t alloc_size; 39 | if (unlikely(__builtin_add_overflow(usable_size, alignment - PAGE_SIZE, &alloc_size))) { 40 | errno = ENOMEM; 41 | return NULL; 42 | } 43 | 44 | size_t real_alloc_size; 45 | if (unlikely(add_guards(alloc_size, guard_size, &real_alloc_size))) { 46 | errno = ENOMEM; 47 | return NULL; 48 | } 49 | 50 | void *real = memory_map(real_alloc_size); 51 | if (unlikely(real == NULL)) { 52 | return NULL; 53 | } 54 | memory_set_name(real, real_alloc_size, name); 55 | 56 | void *usable = (char *)real + guard_size; 57 | 58 | size_t lead_size = align((uintptr_t)usable, alignment) - (uintptr_t)usable; 59 | size_t trail_size = alloc_size - lead_size - usable_size; 60 | void *base = (char *)usable + lead_size; 61 | 62 | if (unlikely(memory_protect_rw(base, usable_size))) { 63 | memory_unmap(real, real_alloc_size); 64 | return NULL; 65 | } 66 | 67 | if (lead_size) { 68 | if (unlikely(memory_unmap(real, lead_size))) { 69 | memory_unmap(real, real_alloc_size); 70 | return NULL; 71 | } 72 | } 73 | 74 | if (trail_size) { 75 | if (unlikely(memory_unmap((char *)base + usable_size + guard_size, trail_size))) { 76 | memory_unmap(real, real_alloc_size); 77 | return NULL; 78 | } 79 | } 80 | 81 | return base; 82 | } 83 | 84 | void deallocate_pages(void *usable, size_t usable_size, size_t guard_size) { 85 | if (unlikely(memory_unmap((char *)usable - guard_size, usable_size + guard_size * 2))) { 86 | memory_purge(usable, usable_size); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /CREDITS: -------------------------------------------------------------------------------- 1 | chacha.c is a simple conversion of chacha-merged.c to a keystream-only implementation: 2 | 3 | chacha-merged.c version 20080118 4 | D. J. Bernstein 5 | Public domain. 6 | 7 | h_malloc.c open-addressed hash table (regions_grow, regions_insert, regions_find, regions_delete): 8 | 9 | Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek 10 | Copyright (c) 2012 Matthew Dempsky 11 | Copyright (c) 2008 Damien Miller 12 | Copyright (c) 2000 Poul-Henning Kamp 13 | 14 | Permission to use, copy, modify, and distribute this software for any 15 | purpose with or without fee is hereby granted, provided that the above 16 | copyright notice and this permission notice appear in all copies. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 19 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 20 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 21 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 22 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 23 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 24 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 25 | 26 | libdivide: 27 | 28 | Copyright (C) 2010 - 2019 ridiculous_fish, 29 | Copyright (C) 2016 - 2019 Kim Walisch, 30 | 31 | Boost Software License - Version 1.0 - August 17th, 2003 32 | 33 | Permission is hereby granted, free of charge, to any person or organization 34 | obtaining a copy of the software and accompanying documentation covered by 35 | this license (the "Software") to use, reproduce, display, distribute, 36 | execute, and transmit the Software, and to prepare derivative works of the 37 | Software, and to permit third-parties to whom the Software is furnished to 38 | do so, all subject to the following: 39 | 40 | The copyright notices in the Software and this entire statement, including 41 | the above license grant, this restriction and the following disclaimer, 42 | must be included in all copies of the Software, in whole or in part, and 43 | all derivative works of the Software, unless such copies or derivative 44 | works are solely in the form of machine-executable object code generated by 45 | a source language processor. 46 | 47 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 48 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 49 | FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 50 | SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 51 | FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 52 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 53 | DEALINGS IN THE SOFTWARE. 54 | 55 | random.c get_random_{type}_uniform functions are based on Fast Random Integer 56 | Generation in an Interval by Daniel Lemire 57 | -------------------------------------------------------------------------------- /memory.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #ifdef LABEL_MEMORY 6 | #include 7 | #endif 8 | 9 | #ifndef PR_SET_VMA 10 | #define PR_SET_VMA 0x53564d41 11 | #endif 12 | 13 | #ifndef PR_SET_VMA_ANON_NAME 14 | #define PR_SET_VMA_ANON_NAME 0 15 | #endif 16 | 17 | #include "memory.h" 18 | #include "util.h" 19 | 20 | void *memory_map(size_t size) { 21 | void *p = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 22 | if (unlikely(p == MAP_FAILED)) { 23 | if (errno != ENOMEM) { 24 | fatal_error("non-ENOMEM mmap failure"); 25 | } 26 | return NULL; 27 | } 28 | return p; 29 | } 30 | 31 | bool memory_map_fixed(void *ptr, size_t size) { 32 | void *p = mmap(ptr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); 33 | bool ret = p == MAP_FAILED; 34 | if (unlikely(ret) && errno != ENOMEM) { 35 | fatal_error("non-ENOMEM MAP_FIXED mmap failure"); 36 | } 37 | return ret; 38 | } 39 | 40 | bool memory_unmap(void *ptr, size_t size) { 41 | bool ret = munmap(ptr, size); 42 | if (unlikely(ret) && errno != ENOMEM) { 43 | fatal_error("non-ENOMEM munmap failure"); 44 | } 45 | return ret; 46 | } 47 | 48 | static bool memory_protect_prot(void *ptr, size_t size, int prot, UNUSED int pkey) { 49 | #ifdef USE_PKEY 50 | bool ret = pkey_mprotect(ptr, size, prot, pkey); 51 | #else 52 | bool ret = mprotect(ptr, size, prot); 53 | #endif 54 | if (unlikely(ret) && errno != ENOMEM) { 55 | fatal_error("non-ENOMEM mprotect failure"); 56 | } 57 | return ret; 58 | } 59 | 60 | bool memory_protect_ro(void *ptr, size_t size) { 61 | return memory_protect_prot(ptr, size, PROT_READ, -1); 62 | } 63 | 64 | bool memory_protect_rw(void *ptr, size_t size) { 65 | return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE, -1); 66 | } 67 | 68 | bool memory_protect_rw_metadata(void *ptr, size_t size) { 69 | return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE, get_metadata_key()); 70 | } 71 | 72 | #ifdef HAVE_COMPATIBLE_MREMAP 73 | bool memory_remap(void *old, size_t old_size, size_t new_size) { 74 | void *ptr = mremap(old, old_size, new_size, 0); 75 | bool ret = ptr == MAP_FAILED; 76 | if (unlikely(ret) && errno != ENOMEM) { 77 | fatal_error("non-ENOMEM mremap failure"); 78 | } 79 | return ret; 80 | } 81 | 82 | bool memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size) { 83 | void *ptr = mremap(old, old_size, new_size, MREMAP_MAYMOVE|MREMAP_FIXED, new); 84 | bool ret = ptr == MAP_FAILED; 85 | if (unlikely(ret) && errno != ENOMEM) { 86 | fatal_error("non-ENOMEM MREMAP_FIXED mremap failure"); 87 | } 88 | return ret; 89 | } 90 | #endif 91 | 92 | bool memory_purge(void *ptr, size_t size) { 93 | int ret = madvise(ptr, size, MADV_DONTNEED); 94 | if (unlikely(ret) && errno != ENOMEM) { 95 | fatal_error("non-ENOMEM MADV_DONTNEED madvise failure"); 96 | } 97 | return ret; 98 | } 99 | 100 | bool memory_set_name(UNUSED void *ptr, UNUSED size_t size, UNUSED const char *name) { 101 | #ifdef LABEL_MEMORY 102 | return prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, size, name); 103 | #else 104 | return false; 105 | #endif 106 | } 107 | -------------------------------------------------------------------------------- /random.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "chacha.h" 5 | #include "random.h" 6 | #include "util.h" 7 | 8 | #include 9 | 10 | static void get_random_seed(void *buf, size_t size) { 11 | while (size) { 12 | ssize_t r; 13 | 14 | do { 15 | r = getrandom(buf, size, 0); 16 | } while (r == -1 && errno == EINTR); 17 | 18 | if (r <= 0) { 19 | fatal_error("getrandom failed"); 20 | } 21 | 22 | buf = (char *)buf + r; 23 | size -= r; 24 | } 25 | } 26 | 27 | void random_state_init(struct random_state *state) { 28 | u8 rnd[CHACHA_KEY_SIZE + CHACHA_IV_SIZE]; 29 | get_random_seed(rnd, sizeof(rnd)); 30 | chacha_keysetup(&state->ctx, rnd); 31 | chacha_ivsetup(&state->ctx, rnd + CHACHA_KEY_SIZE); 32 | state->index = RANDOM_CACHE_SIZE; 33 | state->reseed = 0; 34 | } 35 | 36 | void random_state_init_from_random_state(struct random_state *state, struct random_state *source) { 37 | u8 rnd[CHACHA_KEY_SIZE + CHACHA_IV_SIZE]; 38 | get_random_bytes(source, rnd, sizeof(rnd)); 39 | chacha_keysetup(&state->ctx, rnd); 40 | chacha_ivsetup(&state->ctx, rnd + CHACHA_KEY_SIZE); 41 | state->index = RANDOM_CACHE_SIZE; 42 | state->reseed = 0; 43 | } 44 | 45 | static void refill(struct random_state *state) { 46 | if (state->reseed >= RANDOM_RESEED_SIZE) { 47 | random_state_init(state); 48 | } 49 | chacha_keystream_bytes(&state->ctx, state->cache, RANDOM_CACHE_SIZE); 50 | state->index = 0; 51 | state->reseed += RANDOM_CACHE_SIZE; 52 | } 53 | 54 | void get_random_bytes(struct random_state *state, void *buf, size_t size) { 55 | // avoid needless copying to and from the cache as an optimization 56 | if (size > RANDOM_CACHE_SIZE / 2) { 57 | chacha_keystream_bytes(&state->ctx, buf, size); 58 | return; 59 | } 60 | 61 | while (size) { 62 | if (state->index == RANDOM_CACHE_SIZE) { 63 | refill(state); 64 | } 65 | 66 | size_t remaining = RANDOM_CACHE_SIZE - state->index; 67 | size_t copy_size = min(size, remaining); 68 | memcpy(buf, state->cache + state->index, copy_size); 69 | state->index += copy_size; 70 | 71 | buf = (char *)buf + copy_size; 72 | size -= copy_size; 73 | } 74 | } 75 | 76 | u16 get_random_u16(struct random_state *state) { 77 | u16 value; 78 | unsigned remaining = RANDOM_CACHE_SIZE - state->index; 79 | if (remaining < sizeof(value)) { 80 | refill(state); 81 | } 82 | memcpy(&value, state->cache + state->index, sizeof(value)); 83 | state->index += sizeof(value); 84 | return value; 85 | } 86 | 87 | // See Fast Random Integer Generation in an Interval by Daniel Lemire 88 | u16 get_random_u16_uniform(struct random_state *state, u16 bound) { 89 | u32 random = get_random_u16(state); 90 | u32 multiresult = random * bound; 91 | u16 leftover = multiresult; 92 | if (leftover < bound) { 93 | u16 threshold = -bound % bound; 94 | while (leftover < threshold) { 95 | random = get_random_u16(state); 96 | multiresult = random * bound; 97 | leftover = (u16)multiresult; 98 | } 99 | } 100 | return multiresult >> 16; 101 | } 102 | 103 | u64 get_random_u64(struct random_state *state) { 104 | u64 value; 105 | unsigned remaining = RANDOM_CACHE_SIZE - state->index; 106 | if (remaining < sizeof(value)) { 107 | refill(state); 108 | } 109 | memcpy(&value, state->cache + state->index, sizeof(value)); 110 | state->index += sizeof(value); 111 | return value; 112 | } 113 | 114 | // See Fast Random Integer Generation in an Interval by Daniel Lemire 115 | u64 get_random_u64_uniform(struct random_state *state, u64 bound) { 116 | u128 random = get_random_u64(state); 117 | u128 multiresult = random * bound; 118 | u64 leftover = multiresult; 119 | if (leftover < bound) { 120 | u64 threshold = -bound % bound; 121 | while (leftover < threshold) { 122 | random = get_random_u64(state); 123 | multiresult = random * bound; 124 | leftover = multiresult; 125 | } 126 | } 127 | return multiresult >> 64; 128 | } 129 | -------------------------------------------------------------------------------- /include/h_malloc.h: -------------------------------------------------------------------------------- 1 | #ifndef ALLOCATOR_H 2 | #define ALLOCATOR_H 3 | 4 | #include 5 | 6 | #include 7 | 8 | #ifdef __cplusplus 9 | extern "C" { 10 | #endif 11 | 12 | #ifndef H_MALLOC_PREFIX 13 | #define h_malloc malloc 14 | #define h_calloc calloc 15 | #define h_realloc realloc 16 | #define h_aligned_alloc aligned_alloc 17 | #define h_free free 18 | 19 | #define h_posix_memalign posix_memalign 20 | 21 | #define h_malloc_usable_size malloc_usable_size 22 | #define h_mallopt mallopt 23 | #define h_malloc_trim malloc_trim 24 | #define h_malloc_stats malloc_stats 25 | #define h_mallinfo mallinfo 26 | #define h_mallinfo2 mallinfo2 27 | #define h_malloc_info malloc_info 28 | 29 | #define h_memalign memalign 30 | #define h_valloc valloc 31 | #define h_pvalloc pvalloc 32 | #define h_cfree cfree 33 | #define h_malloc_get_state malloc_get_state 34 | #define h_malloc_set_state malloc_set_state 35 | 36 | #define h_mallinfo_narenas mallinfo_narenas 37 | #define h_mallinfo_nbins mallinfo_nbins 38 | #define h_mallinfo_arena_info mallinfo_arena_info 39 | #define h_mallinfo_bin_info mallinfo_bin_info 40 | 41 | #define h_malloc_iterate malloc_iterate 42 | #define h_malloc_disable malloc_disable 43 | #define h_malloc_enable malloc_enable 44 | 45 | #define h_malloc_object_size malloc_object_size 46 | #define h_malloc_object_size_fast malloc_object_size_fast 47 | #define h_free_sized free_sized 48 | #endif 49 | 50 | // C standard 51 | __attribute__((malloc)) __attribute__((alloc_size(1))) void *h_malloc(size_t size); 52 | __attribute__((malloc)) __attribute__((alloc_size(1, 2))) void *h_calloc(size_t nmemb, size_t size); 53 | __attribute__((alloc_size(2))) void *h_realloc(void *ptr, size_t size); 54 | __attribute__((malloc)) __attribute__((alloc_size(2))) __attribute__((alloc_align(1))) 55 | void *h_aligned_alloc(size_t alignment, size_t size); 56 | void h_free(void *ptr); 57 | 58 | // POSIX 59 | int h_posix_memalign(void **memptr, size_t alignment, size_t size); 60 | 61 | #ifdef __ANDROID__ 62 | #define H_MALLOC_USABLE_SIZE_CONST const 63 | #else 64 | #define H_MALLOC_USABLE_SIZE_CONST 65 | #endif 66 | 67 | // glibc extensions 68 | size_t h_malloc_usable_size(H_MALLOC_USABLE_SIZE_CONST void *ptr); 69 | int h_mallopt(int param, int value); 70 | int h_malloc_trim(size_t pad); 71 | void h_malloc_stats(void); 72 | #if defined(__GLIBC__) || defined(__ANDROID__) 73 | struct mallinfo h_mallinfo(void); 74 | #endif 75 | #ifndef __ANDROID__ 76 | int h_malloc_info(int options, FILE *fp); 77 | #endif 78 | 79 | // obsolete glibc extensions 80 | __attribute__((malloc)) __attribute__((alloc_size(2))) __attribute__((alloc_align(1))) 81 | void *h_memalign(size_t alignment, size_t size); 82 | #ifndef __ANDROID__ 83 | __attribute__((malloc)) __attribute__((alloc_size(1))) void *h_valloc(size_t size); 84 | __attribute__((malloc)) void *h_pvalloc(size_t size); 85 | #endif 86 | #ifdef __GLIBC__ 87 | void h_cfree(void *ptr) __THROW; 88 | void *h_malloc_get_state(void); 89 | int h_malloc_set_state(void *state); 90 | #endif 91 | 92 | // Android extensions 93 | #ifdef __ANDROID__ 94 | size_t h_mallinfo_narenas(void); 95 | size_t h_mallinfo_nbins(void); 96 | struct mallinfo h_mallinfo_arena_info(size_t arena); 97 | struct mallinfo h_mallinfo_bin_info(size_t arena, size_t bin); 98 | int h_malloc_iterate(uintptr_t base, size_t size, void (*callback)(uintptr_t ptr, size_t size, void *arg), 99 | void *arg); 100 | void h_malloc_disable(void); 101 | void h_malloc_enable(void); 102 | #endif 103 | 104 | // hardened_malloc extensions 105 | 106 | // return an upper bound on object size for any pointer based on malloc metadata 107 | size_t h_malloc_object_size(const void *ptr); 108 | 109 | // similar to malloc_object_size, but avoiding locking so the results are much more limited 110 | size_t h_malloc_object_size_fast(const void *ptr); 111 | 112 | // The free function with an extra parameter for passing the size requested at 113 | // allocation time. 114 | // 115 | // This offers the same functionality as C++14 sized deallocation and can be 116 | // used to implement it. 117 | // 118 | // A performance-oriented allocator would use this as a performance 119 | // enhancement with undefined behavior on a mismatch. Instead, this hardened 120 | // allocator implementation uses it to improve security by checking that the 121 | // passed size matches the allocated size. 122 | void h_free_sized(void *ptr, size_t expected_size); 123 | 124 | #ifdef __cplusplus 125 | } 126 | #endif 127 | 128 | #endif 129 | -------------------------------------------------------------------------------- /new.cc: -------------------------------------------------------------------------------- 1 | // needed with libstdc++ but not libc++ 2 | #if __has_include() 3 | #include 4 | #endif 5 | 6 | #include 7 | 8 | #include "h_malloc.h" 9 | #include "util.h" 10 | 11 | COLD static void *handle_out_of_memory(size_t size, bool nothrow) { 12 | void *ptr = nullptr; 13 | 14 | do { 15 | std::new_handler handler = std::get_new_handler(); 16 | if (handler == nullptr) { 17 | break; 18 | } 19 | 20 | try { 21 | handler(); 22 | } catch (const std::bad_alloc &) { 23 | break; 24 | } 25 | 26 | ptr = h_malloc(size); 27 | } while (ptr == nullptr); 28 | 29 | if (ptr == nullptr && !nothrow) { 30 | std::__throw_bad_alloc(); 31 | } 32 | return ptr; 33 | } 34 | 35 | static inline void *new_impl(size_t size, bool nothrow) { 36 | void *ptr = h_malloc(size); 37 | if (likely(ptr != nullptr)) { 38 | return ptr; 39 | } 40 | return handle_out_of_memory(size, nothrow); 41 | } 42 | 43 | EXPORT void *operator new(size_t size) { 44 | return new_impl(size, false); 45 | } 46 | 47 | EXPORT void *operator new[](size_t size) { 48 | return new_impl(size, false); 49 | } 50 | 51 | EXPORT void *operator new(size_t size, const std::nothrow_t &) noexcept { 52 | return new_impl(size, true); 53 | } 54 | 55 | EXPORT void *operator new[](size_t size, const std::nothrow_t &) noexcept { 56 | return new_impl(size, true); 57 | } 58 | 59 | EXPORT void operator delete(void *ptr) noexcept { 60 | h_free(ptr); 61 | } 62 | 63 | EXPORT void operator delete[](void *ptr) noexcept { 64 | h_free(ptr); 65 | } 66 | 67 | EXPORT void operator delete(void *ptr, const std::nothrow_t &) noexcept { 68 | h_free(ptr); 69 | } 70 | 71 | EXPORT void operator delete[](void *ptr, const std::nothrow_t &) noexcept { 72 | h_free(ptr); 73 | } 74 | 75 | EXPORT void operator delete(void *ptr, size_t size) noexcept { 76 | h_free_sized(ptr, size); 77 | } 78 | 79 | EXPORT void operator delete[](void *ptr, size_t size) noexcept { 80 | h_free_sized(ptr, size); 81 | } 82 | 83 | COLD static void *handle_out_of_memory(size_t size, size_t alignment, bool nothrow) { 84 | void *ptr = nullptr; 85 | 86 | do { 87 | std::new_handler handler = std::get_new_handler(); 88 | if (handler == nullptr) { 89 | break; 90 | } 91 | 92 | try { 93 | handler(); 94 | } catch (const std::bad_alloc &) { 95 | break; 96 | } 97 | 98 | ptr = h_aligned_alloc(alignment, size); 99 | } while (ptr == nullptr); 100 | 101 | if (ptr == nullptr && !nothrow) { 102 | std::__throw_bad_alloc(); 103 | } 104 | return ptr; 105 | } 106 | 107 | static inline void *new_impl(size_t size, size_t alignment, bool nothrow) { 108 | void *ptr = h_aligned_alloc(alignment, size); 109 | if (likely(ptr != nullptr)) { 110 | return ptr; 111 | } 112 | return handle_out_of_memory(size, alignment, nothrow); 113 | } 114 | 115 | EXPORT void *operator new(size_t size, std::align_val_t alignment) { 116 | return new_impl(size, static_cast(alignment), false); 117 | } 118 | 119 | EXPORT void *operator new[](size_t size, std::align_val_t alignment) { 120 | return new_impl(size, static_cast(alignment), false); 121 | } 122 | 123 | EXPORT void *operator new(size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept { 124 | return new_impl(size, static_cast(alignment), true); 125 | } 126 | 127 | EXPORT void *operator new[](size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept { 128 | return new_impl(size, static_cast(alignment), true); 129 | } 130 | 131 | EXPORT void operator delete(void *ptr, std::align_val_t) noexcept { 132 | h_free(ptr); 133 | } 134 | 135 | EXPORT void operator delete[](void *ptr, std::align_val_t) noexcept { 136 | h_free(ptr); 137 | } 138 | 139 | EXPORT void operator delete(void *ptr, std::align_val_t, const std::nothrow_t &) noexcept { 140 | h_free(ptr); 141 | } 142 | 143 | EXPORT void operator delete[](void *ptr, std::align_val_t, const std::nothrow_t &) noexcept { 144 | h_free(ptr); 145 | } 146 | 147 | EXPORT void operator delete(void *ptr, size_t size, std::align_val_t) noexcept { 148 | h_free_sized(ptr, size); 149 | } 150 | 151 | EXPORT void operator delete[](void *ptr, size_t size, std::align_val_t) noexcept { 152 | h_free_sized(ptr, size); 153 | } 154 | -------------------------------------------------------------------------------- /chacha.c: -------------------------------------------------------------------------------- 1 | // Based on chacha-merged.c version 20080118 2 | // D. J. Bernstein 3 | // Public domain. 4 | 5 | #include "chacha.h" 6 | 7 | // ChaCha8 8 | static const unsigned rounds = 8; 9 | 10 | #define U8C(v) (v##U) 11 | #define U32C(v) (v##U) 12 | 13 | #define U8V(v) ((u8)(v) & U8C(0xFF)) 14 | #define U32V(v) ((u32)(v) & U32C(0xFFFFFFFF)) 15 | 16 | #define ROTL32(v, n) \ 17 | (U32V((v) << (n)) | ((v) >> (32 - (n)))) 18 | 19 | #define U8TO32_LITTLE(p) \ 20 | (((u32)((p)[0])) | \ 21 | ((u32)((p)[1]) << 8) | \ 22 | ((u32)((p)[2]) << 16) | \ 23 | ((u32)((p)[3]) << 24)) 24 | 25 | #define U32TO8_LITTLE(p, v) \ 26 | do { \ 27 | (p)[0] = U8V((v)); \ 28 | (p)[1] = U8V((v) >> 8); \ 29 | (p)[2] = U8V((v) >> 16); \ 30 | (p)[3] = U8V((v) >> 24); \ 31 | } while (0) 32 | 33 | #define ROTATE(v, c) (ROTL32(v, c)) 34 | #define XOR(v, w) ((v) ^ (w)) 35 | #define PLUS(v, w) (U32V((v) + (w))) 36 | #define PLUSONE(v) (PLUS((v), 1)) 37 | 38 | #define QUARTERROUND(a, b, c, d) \ 39 | a = PLUS(a, b); d = ROTATE(XOR(d, a), 16); \ 40 | c = PLUS(c, d); b = ROTATE(XOR(b, c), 12); \ 41 | a = PLUS(a, b); d = ROTATE(XOR(d, a), 8); \ 42 | c = PLUS(c, d); b = ROTATE(XOR(b, c), 7); 43 | 44 | static const char sigma[16] = "expand 32-byte k"; 45 | 46 | void chacha_keysetup(chacha_ctx *x, const u8 *k) { 47 | x->input[0] = U8TO32_LITTLE(sigma + 0); 48 | x->input[1] = U8TO32_LITTLE(sigma + 4); 49 | x->input[2] = U8TO32_LITTLE(sigma + 8); 50 | x->input[3] = U8TO32_LITTLE(sigma + 12); 51 | x->input[4] = U8TO32_LITTLE(k + 0); 52 | x->input[5] = U8TO32_LITTLE(k + 4); 53 | x->input[6] = U8TO32_LITTLE(k + 8); 54 | x->input[7] = U8TO32_LITTLE(k + 12); 55 | x->input[8] = U8TO32_LITTLE(k + 16); 56 | x->input[9] = U8TO32_LITTLE(k + 20); 57 | x->input[10] = U8TO32_LITTLE(k + 24); 58 | x->input[11] = U8TO32_LITTLE(k + 28); 59 | } 60 | 61 | void chacha_ivsetup(chacha_ctx *x, const u8 *iv) { 62 | x->input[12] = 0; 63 | x->input[13] = 0; 64 | x->input[14] = U8TO32_LITTLE(iv + 0); 65 | x->input[15] = U8TO32_LITTLE(iv + 4); 66 | } 67 | 68 | void chacha_keystream_bytes(chacha_ctx *x, u8 *c, u32 bytes) { 69 | if (!bytes) { 70 | return; 71 | } 72 | 73 | u8 *ctarget; 74 | u8 tmp[64]; 75 | 76 | u32 j0 = x->input[0]; 77 | u32 j1 = x->input[1]; 78 | u32 j2 = x->input[2]; 79 | u32 j3 = x->input[3]; 80 | u32 j4 = x->input[4]; 81 | u32 j5 = x->input[5]; 82 | u32 j6 = x->input[6]; 83 | u32 j7 = x->input[7]; 84 | u32 j8 = x->input[8]; 85 | u32 j9 = x->input[9]; 86 | u32 j10 = x->input[10]; 87 | u32 j11 = x->input[11]; 88 | u32 j12 = x->input[12]; 89 | u32 j13 = x->input[13]; 90 | u32 j14 = x->input[14]; 91 | u32 j15 = x->input[15]; 92 | 93 | for (;;) { 94 | if (bytes < 64) { 95 | ctarget = c; 96 | c = tmp; 97 | } 98 | u32 x0 = j0; 99 | u32 x1 = j1; 100 | u32 x2 = j2; 101 | u32 x3 = j3; 102 | u32 x4 = j4; 103 | u32 x5 = j5; 104 | u32 x6 = j6; 105 | u32 x7 = j7; 106 | u32 x8 = j8; 107 | u32 x9 = j9; 108 | u32 x10 = j10; 109 | u32 x11 = j11; 110 | u32 x12 = j12; 111 | u32 x13 = j13; 112 | u32 x14 = j14; 113 | u32 x15 = j15; 114 | for (unsigned i = rounds; i > 0; i -= 2) { 115 | QUARTERROUND(x0, x4, x8, x12) 116 | QUARTERROUND(x1, x5, x9, x13) 117 | QUARTERROUND(x2, x6, x10, x14) 118 | QUARTERROUND(x3, x7, x11, x15) 119 | QUARTERROUND(x0, x5, x10, x15) 120 | QUARTERROUND(x1, x6, x11, x12) 121 | QUARTERROUND(x2, x7, x8, x13) 122 | QUARTERROUND(x3, x4, x9, x14) 123 | } 124 | x0 = PLUS(x0, j0); 125 | x1 = PLUS(x1, j1); 126 | x2 = PLUS(x2, j2); 127 | x3 = PLUS(x3, j3); 128 | x4 = PLUS(x4, j4); 129 | x5 = PLUS(x5, j5); 130 | x6 = PLUS(x6, j6); 131 | x7 = PLUS(x7, j7); 132 | x8 = PLUS(x8, j8); 133 | x9 = PLUS(x9, j9); 134 | x10 = PLUS(x10, j10); 135 | x11 = PLUS(x11, j11); 136 | x12 = PLUS(x12, j12); 137 | x13 = PLUS(x13, j13); 138 | x14 = PLUS(x14, j14); 139 | x15 = PLUS(x15, j15); 140 | 141 | j12 = PLUSONE(j12); 142 | if (!j12) { 143 | j13 = PLUSONE(j13); 144 | // stopping at 2^70 bytes per nonce is user's responsibility 145 | } 146 | 147 | U32TO8_LITTLE(c + 0, x0); 148 | U32TO8_LITTLE(c + 4, x1); 149 | U32TO8_LITTLE(c + 8, x2); 150 | U32TO8_LITTLE(c + 12, x3); 151 | U32TO8_LITTLE(c + 16, x4); 152 | U32TO8_LITTLE(c + 20, x5); 153 | U32TO8_LITTLE(c + 24, x6); 154 | U32TO8_LITTLE(c + 28, x7); 155 | U32TO8_LITTLE(c + 32, x8); 156 | U32TO8_LITTLE(c + 36, x9); 157 | U32TO8_LITTLE(c + 40, x10); 158 | U32TO8_LITTLE(c + 44, x11); 159 | U32TO8_LITTLE(c + 48, x12); 160 | U32TO8_LITTLE(c + 52, x13); 161 | U32TO8_LITTLE(c + 56, x14); 162 | U32TO8_LITTLE(c + 60, x15); 163 | 164 | if (bytes <= 64) { 165 | if (bytes < 64) { 166 | for (unsigned i = 0; i < bytes; ++i) { 167 | ctarget[i] = c[i]; 168 | } 169 | } 170 | x->input[12] = j12; 171 | x->input[13] = j13; 172 | return; 173 | } 174 | bytes -= 64; 175 | c += 64; 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VARIANT := default 2 | 3 | ifneq ($(VARIANT),) 4 | CONFIG_FILE := config/$(VARIANT).mk 5 | include config/$(VARIANT).mk 6 | endif 7 | 8 | ifeq ($(VARIANT),default) 9 | SUFFIX := 10 | else 11 | SUFFIX := -$(VARIANT) 12 | endif 13 | 14 | OUT := out$(SUFFIX) 15 | 16 | define safe_flag 17 | $(shell $(CC) $(if $(filter clang%,$(CC)),-Werror=unknown-warning-option) -E $1 - /dev/null 2>&1 && echo $1 || echo $2) 18 | endef 19 | 20 | CPPFLAGS := $(CPPFLAGS) -D_GNU_SOURCE -I include 21 | SHARED_FLAGS := -pipe -O3 -flto -fPIC -fvisibility=hidden -fno-plt \ 22 | -fstack-clash-protection $(call safe_flag,-fcf-protection) -fstack-protector-strong \ 23 | -Wall -Wextra $(call safe_flag,-Wcast-align=strict,-Wcast-align) -Wcast-qual -Wwrite-strings \ 24 | -Wundef 25 | 26 | ifeq ($(CONFIG_WERROR),true) 27 | SHARED_FLAGS += -Werror 28 | endif 29 | 30 | ifeq ($(CONFIG_NATIVE),true) 31 | SHARED_FLAGS += -march=native 32 | endif 33 | 34 | ifeq ($(CONFIG_UBSAN),true) 35 | SHARED_FLAGS += -fsanitize=undefined -fno-sanitize-recover=undefined 36 | endif 37 | 38 | CFLAGS := $(CFLAGS) -std=c17 $(SHARED_FLAGS) -Wmissing-prototypes -Wstrict-prototypes 39 | CXXFLAGS := $(CXXFLAGS) -std=c++17 -fsized-deallocation $(SHARED_FLAGS) 40 | LDFLAGS := $(LDFLAGS) -Wl,-O1,--as-needed,-z,defs,-z,relro,-z,now,-z,nodlopen,-z,text 41 | 42 | SOURCES := chacha.c h_malloc.c memory.c pages.c random.c util.c 43 | OBJECTS := $(SOURCES:.c=.o) 44 | 45 | ifeq ($(CONFIG_CXX_ALLOCATOR),true) 46 | # make sure LTO is compatible in case CC and CXX don't match (such as clang and g++) 47 | CXX := $(CC) 48 | LDLIBS += -lstdc++ 49 | 50 | SOURCES += new.cc 51 | OBJECTS += new.o 52 | endif 53 | 54 | OBJECTS := $(addprefix $(OUT)/,$(OBJECTS)) 55 | 56 | ifeq (,$(filter $(CONFIG_SEAL_METADATA),true false)) 57 | $(error CONFIG_SEAL_METADATA must be true or false) 58 | endif 59 | 60 | ifeq (,$(filter $(CONFIG_ZERO_ON_FREE),true false)) 61 | $(error CONFIG_ZERO_ON_FREE must be true or false) 62 | endif 63 | 64 | ifeq (,$(filter $(CONFIG_WRITE_AFTER_FREE_CHECK),true false)) 65 | $(error CONFIG_WRITE_AFTER_FREE_CHECK must be true or false) 66 | endif 67 | 68 | ifeq (,$(filter $(CONFIG_SLOT_RANDOMIZE),true false)) 69 | $(error CONFIG_SLOT_RANDOMIZE must be true or false) 70 | endif 71 | 72 | ifeq (,$(filter $(CONFIG_SLAB_CANARY),true false)) 73 | $(error CONFIG_SLAB_CANARY must be true or false) 74 | endif 75 | 76 | ifeq (,$(filter $(CONFIG_EXTENDED_SIZE_CLASSES),true false)) 77 | $(error CONFIG_EXTENDED_SIZE_CLASSES must be true or false) 78 | endif 79 | 80 | ifeq (,$(filter $(CONFIG_LARGE_SIZE_CLASSES),true false)) 81 | $(error CONFIG_LARGE_SIZE_CLASSES must be true or false) 82 | endif 83 | 84 | ifeq (,$(filter $(CONFIG_STATS),true false)) 85 | $(error CONFIG_STATS must be true or false) 86 | endif 87 | 88 | ifeq (,$(filter $(CONFIG_SELF_INIT),true false)) 89 | $(error CONFIG_SELF_INIT must be true or false) 90 | endif 91 | 92 | CPPFLAGS += \ 93 | -DCONFIG_SEAL_METADATA=$(CONFIG_SEAL_METADATA) \ 94 | -DZERO_ON_FREE=$(CONFIG_ZERO_ON_FREE) \ 95 | -DWRITE_AFTER_FREE_CHECK=$(CONFIG_WRITE_AFTER_FREE_CHECK) \ 96 | -DSLOT_RANDOMIZE=$(CONFIG_SLOT_RANDOMIZE) \ 97 | -DSLAB_CANARY=$(CONFIG_SLAB_CANARY) \ 98 | -DSLAB_QUARANTINE_RANDOM_LENGTH=$(CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH) \ 99 | -DSLAB_QUARANTINE_QUEUE_LENGTH=$(CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH) \ 100 | -DCONFIG_EXTENDED_SIZE_CLASSES=$(CONFIG_EXTENDED_SIZE_CLASSES) \ 101 | -DCONFIG_LARGE_SIZE_CLASSES=$(CONFIG_LARGE_SIZE_CLASSES) \ 102 | -DGUARD_SLABS_INTERVAL=$(CONFIG_GUARD_SLABS_INTERVAL) \ 103 | -DGUARD_SIZE_DIVISOR=$(CONFIG_GUARD_SIZE_DIVISOR) \ 104 | -DREGION_QUARANTINE_RANDOM_LENGTH=$(CONFIG_REGION_QUARANTINE_RANDOM_LENGTH) \ 105 | -DREGION_QUARANTINE_QUEUE_LENGTH=$(CONFIG_REGION_QUARANTINE_QUEUE_LENGTH) \ 106 | -DREGION_QUARANTINE_SKIP_THRESHOLD=$(CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD) \ 107 | -DFREE_SLABS_QUARANTINE_RANDOM_LENGTH=$(CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH) \ 108 | -DCONFIG_CLASS_REGION_SIZE=$(CONFIG_CLASS_REGION_SIZE) \ 109 | -DN_ARENA=$(CONFIG_N_ARENA) \ 110 | -DCONFIG_STATS=$(CONFIG_STATS) \ 111 | -DCONFIG_SELF_INIT=$(CONFIG_SELF_INIT) 112 | 113 | $(OUT)/libhardened_malloc$(SUFFIX).so: $(OBJECTS) | $(OUT) 114 | $(CC) $(CFLAGS) $(LDFLAGS) -shared $^ $(LDLIBS) -o $@ 115 | 116 | $(OUT): 117 | mkdir -p $(OUT) 118 | 119 | $(OUT)/chacha.o: chacha.c chacha.h util.h $(CONFIG_FILE) | $(OUT) 120 | $(COMPILE.c) $(OUTPUT_OPTION) $< 121 | $(OUT)/h_malloc.o: h_malloc.c include/h_malloc.h mutex.h memory.h pages.h random.h util.h $(CONFIG_FILE) | $(OUT) 122 | $(COMPILE.c) $(OUTPUT_OPTION) $< 123 | $(OUT)/memory.o: memory.c memory.h util.h $(CONFIG_FILE) | $(OUT) 124 | $(COMPILE.c) $(OUTPUT_OPTION) $< 125 | $(OUT)/new.o: new.cc include/h_malloc.h util.h $(CONFIG_FILE) | $(OUT) 126 | $(COMPILE.cc) $(OUTPUT_OPTION) $< 127 | $(OUT)/pages.o: pages.c pages.h memory.h util.h $(CONFIG_FILE) | $(OUT) 128 | $(COMPILE.c) $(OUTPUT_OPTION) $< 129 | $(OUT)/random.o: random.c random.h chacha.h util.h $(CONFIG_FILE) | $(OUT) 130 | $(COMPILE.c) $(OUTPUT_OPTION) $< 131 | $(OUT)/util.o: util.c util.h $(CONFIG_FILE) | $(OUT) 132 | $(COMPILE.c) $(OUTPUT_OPTION) $< 133 | 134 | check: tidy 135 | 136 | tidy: 137 | clang-tidy --extra-arg=-std=c17 $(filter %.c,$(SOURCES)) -- $(CPPFLAGS) 138 | clang-tidy --extra-arg=-std=c++17 $(filter %.cc,$(SOURCES)) -- $(CPPFLAGS) 139 | 140 | clean: 141 | rm -f $(OUT)/libhardened_malloc.so $(OBJECTS) 142 | $(MAKE) -C test/ clean 143 | 144 | test: $(OUT)/libhardened_malloc$(SUFFIX).so 145 | $(MAKE) -C test/ 146 | python3 -m unittest discover --start-directory test/ 147 | 148 | .PHONY: check clean tidy test 149 | -------------------------------------------------------------------------------- /test/test_smc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import unittest 4 | 5 | 6 | class TestSimpleMemoryCorruption(unittest.TestCase): 7 | 8 | @classmethod 9 | def setUpClass(self): 10 | self.dir = os.path.dirname(os.path.realpath(__file__)) 11 | 12 | def run_test(self, test_name): 13 | sub = subprocess.Popen(self.dir + "/" + test_name, 14 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) 15 | stdout, stderr = sub.communicate() 16 | return stdout, stderr, sub.returncode 17 | 18 | def test_delete_type_size_mismatch(self): 19 | _stdout, stderr, returncode = self.run_test( 20 | "delete_type_size_mismatch") 21 | self.assertEqual(returncode, -6) 22 | self.assertEqual(stderr.decode( 23 | "utf-8"), "fatal allocator error: sized deallocation mismatch (small)\n") 24 | 25 | def test_double_free_large_delayed(self): 26 | _stdout, stderr, returncode = self.run_test( 27 | "double_free_large_delayed") 28 | self.assertEqual(returncode, -6) 29 | self.assertEqual(stderr.decode("utf-8"), 30 | "fatal allocator error: invalid free\n") 31 | 32 | def test_double_free_large(self): 33 | _stdout, stderr, returncode = self.run_test("double_free_large") 34 | self.assertEqual(returncode, -6) 35 | self.assertEqual(stderr.decode("utf-8"), 36 | "fatal allocator error: invalid free\n") 37 | 38 | def test_double_free_small_delayed(self): 39 | _stdout, stderr, returncode = self.run_test( 40 | "double_free_small_delayed") 41 | self.assertEqual(returncode, -6) 42 | self.assertEqual(stderr.decode("utf-8"), 43 | "fatal allocator error: double free (quarantine)\n") 44 | 45 | def test_double_free_small(self): 46 | _stdout, stderr, returncode = self.run_test("double_free_small") 47 | self.assertEqual(returncode, -6) 48 | self.assertEqual(stderr.decode("utf-8"), 49 | "fatal allocator error: double free (quarantine)\n") 50 | 51 | def test_overflow_large_1_byte(self): 52 | _stdout, _stderr, returncode = self.run_test( 53 | "overflow_large_1_byte") 54 | self.assertEqual(returncode, -11) 55 | 56 | def test_overflow_large_8_byte(self): 57 | _stdout, _stderr, returncode = self.run_test( 58 | "overflow_large_8_byte") 59 | self.assertEqual(returncode, -11) 60 | 61 | def test_overflow_small_1_byte(self): 62 | _stdout, stderr, returncode = self.run_test( 63 | "overflow_small_1_byte") 64 | self.assertEqual(returncode, -6) 65 | self.assertEqual(stderr.decode("utf-8"), 66 | "fatal allocator error: canary corrupted\n") 67 | 68 | def test_overflow_small_8_byte(self): 69 | _stdout, stderr, returncode = self.run_test( 70 | "overflow_small_8_byte") 71 | self.assertEqual(returncode, -6) 72 | self.assertEqual(stderr.decode("utf-8"), 73 | "fatal allocator error: canary corrupted\n") 74 | 75 | def test_invalid_free_protected(self): 76 | _stdout, stderr, returncode = self.run_test("invalid_free_protected") 77 | self.assertEqual(returncode, -6) 78 | self.assertEqual(stderr.decode("utf-8"), 79 | "fatal allocator error: invalid free\n") 80 | 81 | def test_invalid_free_small_region_far(self): 82 | _stdout, stderr, returncode = self.run_test( 83 | "invalid_free_small_region_far") 84 | self.assertEqual(returncode, -6) 85 | self.assertEqual(stderr.decode( 86 | "utf-8"), "fatal allocator error: invalid free within a slab yet to be used\n") 87 | 88 | def test_invalid_free_small_region(self): 89 | _stdout, stderr, returncode = self.run_test( 90 | "invalid_free_small_region") 91 | self.assertEqual(returncode, -6) 92 | self.assertEqual(stderr.decode("utf-8"), 93 | "fatal allocator error: double free\n") 94 | 95 | def test_invalid_free_unprotected(self): 96 | _stdout, stderr, returncode = self.run_test("invalid_free_unprotected") 97 | self.assertEqual(returncode, -6) 98 | self.assertEqual(stderr.decode("utf-8"), 99 | "fatal allocator error: invalid free\n") 100 | 101 | def test_invalid_malloc_usable_size_small_quarantene(self): 102 | _stdout, stderr, returncode = self.run_test( 103 | "invalid_malloc_usable_size_small_quarantine") 104 | self.assertEqual(returncode, -6) 105 | self.assertEqual(stderr.decode( 106 | "utf-8"), "fatal allocator error: invalid malloc_usable_size (quarantine)\n") 107 | 108 | def test_invalid_malloc_usable_size_small(self): 109 | _stdout, stderr, returncode = self.run_test( 110 | "invalid_malloc_usable_size_small") 111 | self.assertEqual(returncode, -6) 112 | self.assertEqual(stderr.decode( 113 | "utf-8"), "fatal allocator error: invalid malloc_usable_size\n") 114 | 115 | def test_read_after_free_large(self): 116 | _stdout, _stderr, returncode = self.run_test("read_after_free_large") 117 | self.assertEqual(returncode, -11) 118 | 119 | def test_read_after_free_small(self): 120 | stdout, _stderr, returncode = self.run_test("read_after_free_small") 121 | self.assertEqual(returncode, 0) 122 | self.assertEqual(stdout.decode("utf-8"), 123 | "0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n") 124 | 125 | def test_read_zero_size(self): 126 | _stdout, _stderr, returncode = self.run_test("read_zero_size") 127 | self.assertEqual(returncode, -11) 128 | 129 | def test_string_overflow(self): 130 | stdout, _stderr, returncode = self.run_test("string_overflow") 131 | self.assertEqual(returncode, 0) 132 | self.assertEqual(stdout.decode("utf-8"), "overflow by 0 bytes\n") 133 | 134 | def test_unaligned_free_large(self): 135 | _stdout, stderr, returncode = self.run_test("unaligned_free_large") 136 | self.assertEqual(returncode, -6) 137 | self.assertEqual(stderr.decode("utf-8"), 138 | "fatal allocator error: invalid free\n") 139 | 140 | def test_unaligned_free_small(self): 141 | _stdout, stderr, returncode = self.run_test("unaligned_free_small") 142 | self.assertEqual(returncode, -6) 143 | self.assertEqual(stderr.decode("utf-8"), 144 | "fatal allocator error: invalid unaligned free\n") 145 | 146 | def test_unaligned_malloc_usable_size_small(self): 147 | _stdout, stderr, returncode = self.run_test( 148 | "unaligned_malloc_usable_size_small") 149 | self.assertEqual(returncode, -6) 150 | self.assertEqual(stderr.decode("utf-8"), 151 | "fatal allocator error: invalid unaligned malloc_usable_size\n") 152 | 153 | def test_uninitialized_free(self): 154 | _stdout, stderr, returncode = self.run_test("uninitialized_free") 155 | self.assertEqual(returncode, -6) 156 | self.assertEqual(stderr.decode("utf-8"), 157 | "fatal allocator error: invalid free\n") 158 | 159 | def test_uninitialized_malloc_usable_size(self): 160 | _stdout, stderr, returncode = self.run_test( 161 | "uninitialized_malloc_usable_size") 162 | self.assertEqual(returncode, -6) 163 | self.assertEqual(stderr.decode("utf-8"), 164 | "fatal allocator error: invalid malloc_usable_size\n") 165 | 166 | def test_uninitialized_realloc(self): 167 | _stdout, stderr, returncode = self.run_test("uninitialized_realloc") 168 | self.assertEqual(returncode, -6) 169 | self.assertEqual(stderr.decode("utf-8"), 170 | "fatal allocator error: invalid realloc\n") 171 | 172 | def test_write_after_free_large_reuse(self): 173 | _stdout, _stderr, returncode = self.run_test( 174 | "write_after_free_large_reuse") 175 | self.assertEqual(returncode, -11) 176 | 177 | def test_write_after_free_large(self): 178 | _stdout, _stderr, returncode = self.run_test("write_after_free_large") 179 | self.assertEqual(returncode, -11) 180 | 181 | def test_write_after_free_small_reuse(self): 182 | _stdout, stderr, returncode = self.run_test( 183 | "write_after_free_small_reuse") 184 | self.assertEqual(returncode, -6) 185 | self.assertEqual(stderr.decode("utf-8"), 186 | "fatal allocator error: detected write after free\n") 187 | 188 | def test_write_after_free_small(self): 189 | _stdout, stderr, returncode = self.run_test("write_after_free_small") 190 | self.assertEqual(returncode, -6) 191 | self.assertEqual(stderr.decode("utf-8"), 192 | "fatal allocator error: detected write after free\n") 193 | 194 | def test_write_zero_size(self): 195 | _stdout, _stderr, returncode = self.run_test("write_zero_size") 196 | self.assertEqual(returncode, -11) 197 | 198 | def test_malloc_object_size(self): 199 | _stdout, _stderr, returncode = self.run_test("malloc_object_size") 200 | self.assertEqual(returncode, 0) 201 | 202 | def test_malloc_object_size_offset(self): 203 | _stdout, _stderr, returncode = self.run_test( 204 | "malloc_object_size_offset") 205 | self.assertEqual(returncode, 0) 206 | 207 | def test_invalid_malloc_object_size_small(self): 208 | _stdout, stderr, returncode = self.run_test( 209 | "invalid_malloc_object_size_small") 210 | self.assertEqual(returncode, -6) 211 | self.assertEqual(stderr.decode( 212 | "utf-8"), "fatal allocator error: invalid malloc_object_size\n") 213 | 214 | def test_invalid_malloc_object_size_small_quarantine(self): 215 | _stdout, stderr, returncode = self.run_test( 216 | "invalid_malloc_object_size_small_quarantine") 217 | self.assertEqual(returncode, -6) 218 | self.assertEqual(stderr.decode( 219 | "utf-8"), "fatal allocator error: invalid malloc_object_size (quarantine)\n") 220 | 221 | def test_impossibly_large_malloc(self): 222 | _stdout, stderr, returncode = self.run_test( 223 | "impossibly_large_malloc") 224 | self.assertEqual(returncode, 0) 225 | 226 | def test_uninitialized_read_small(self): 227 | _stdout, stderr, returncode = self.run_test( 228 | "uninitialized_read_small") 229 | self.assertEqual(returncode, 0) 230 | 231 | def test_uninitialized_read_large(self): 232 | _stdout, stderr, returncode = self.run_test( 233 | "uninitialized_read_large") 234 | self.assertEqual(returncode, 0) 235 | 236 | def test_realloc_init(self): 237 | _stdout, _stderr, returncode = self.run_test( 238 | "realloc_init") 239 | self.assertEqual(returncode, 0) 240 | 241 | if __name__ == '__main__': 242 | unittest.main() 243 | -------------------------------------------------------------------------------- /debian/changelog: -------------------------------------------------------------------------------- 1 | hardened-malloc (0:12.5-1) unstable; urgency=medium 2 | 3 | * New upstream version (local package). 4 | 5 | -- Patrick Schleizer Thu, 18 Jan 2024 14:09:29 +0000 6 | 7 | hardened-malloc (0:12.4-1) unstable; urgency=medium 8 | 9 | * New upstream version (local package). 10 | 11 | -- Patrick Schleizer Mon, 25 Dec 2023 17:32:34 +0000 12 | 13 | hardened-malloc (0:12.3-1) unstable; urgency=medium 14 | 15 | * New upstream version (local package). 16 | 17 | -- Patrick Schleizer Mon, 25 Dec 2023 17:15:38 +0000 18 | 19 | hardened-malloc (0:12.2-1) unstable; urgency=medium 20 | 21 | * New upstream version (local package). 22 | 23 | -- Patrick Schleizer Mon, 25 Dec 2023 16:26:59 +0000 24 | 25 | hardened-malloc (0:12.1-1) unstable; urgency=medium 26 | 27 | * New upstream version (local package). 28 | 29 | -- Patrick Schleizer Fri, 22 Dec 2023 16:30:58 +0000 30 | 31 | hardened-malloc (0:12.0-1) unstable; urgency=medium 32 | 33 | * New upstream version (local package). 34 | 35 | -- Patrick Schleizer Sun, 01 Oct 2023 14:37:52 +0000 36 | 37 | hardened-malloc (0:11.3-1) unstable; urgency=medium 38 | 39 | * New upstream version (local package). 40 | 41 | -- Patrick Schleizer Mon, 17 Jul 2023 15:44:11 +0000 42 | 43 | hardened-malloc (0:11.2-1) unstable; urgency=medium 44 | 45 | * New upstream version (local package). 46 | 47 | -- Patrick Schleizer Wed, 21 Jun 2023 09:28:07 +0000 48 | 49 | hardened-malloc (0:11.1-7) unstable; urgency=medium 50 | 51 | * New upstream version (local package). 52 | 53 | -- Patrick Schleizer Wed, 21 Jun 2023 09:18:09 +0000 54 | 55 | hardened-malloc (0:11.1-6) unstable; urgency=medium 56 | 57 | * New upstream version (local package). 58 | 59 | -- Patrick Schleizer Fri, 16 Jun 2023 10:57:32 +0000 60 | 61 | hardened-malloc (0:11.1-5) unstable; urgency=medium 62 | 63 | * New upstream version (local package). 64 | 65 | -- Patrick Schleizer Mon, 12 Jun 2023 17:52:19 +0000 66 | 67 | hardened-malloc (0:11.1-4) unstable; urgency=medium 68 | 69 | * New upstream version (local package). 70 | 71 | -- Patrick Schleizer Mon, 12 Jun 2023 15:19:21 +0000 72 | 73 | hardened-malloc (0:11.1-3) unstable; urgency=medium 74 | 75 | * New upstream version (local package). 76 | 77 | -- Patrick Schleizer Sun, 10 Jul 2022 10:59:33 +0000 78 | 79 | hardened-malloc (0:11.1-2) unstable; urgency=medium 80 | 81 | * New upstream version (local package). 82 | 83 | -- Patrick Schleizer Wed, 08 Jun 2022 14:38:53 +0000 84 | 85 | hardened-malloc (0:11.1-1) unstable; urgency=medium 86 | 87 | * New upstream version (local package). 88 | 89 | -- Patrick Schleizer Wed, 25 May 2022 10:06:31 +0000 90 | 91 | hardened-malloc (0:11-1) unstable; urgency=medium 92 | 93 | * New upstream version (local package). 94 | 95 | -- Patrick Schleizer Thu, 27 Jan 2022 13:06:46 +0000 96 | 97 | hardened-malloc (0:10-4) unstable; urgency=medium 98 | 99 | * New upstream version (local package). 100 | 101 | -- Patrick Schleizer Thu, 27 Jan 2022 12:55:12 +0000 102 | 103 | hardened-malloc (0:10-3) unstable; urgency=medium 104 | 105 | * New upstream version (local package). 106 | 107 | -- Patrick Schleizer Sat, 15 Jan 2022 16:24:06 +0000 108 | 109 | hardened-malloc (0:10-2) unstable; urgency=medium 110 | 111 | * New upstream version (local package). 112 | 113 | -- Patrick Schleizer Sat, 15 Jan 2022 15:30:30 +0000 114 | 115 | hardened-malloc (0:10-1) unstable; urgency=medium 116 | 117 | * New upstream version (local package). 118 | 119 | -- Patrick Schleizer Sat, 15 Jan 2022 14:35:26 +0000 120 | 121 | hardened-malloc (0:8-9) unstable; urgency=medium 122 | 123 | * New upstream version (local package). 124 | 125 | -- Patrick Schleizer Sat, 15 Jan 2022 14:26:06 +0000 126 | 127 | hardened-malloc (0:8-8) unstable; urgency=medium 128 | 129 | * New upstream version (local package). 130 | 131 | -- Patrick Schleizer Sat, 15 Jan 2022 14:06:31 +0000 132 | 133 | hardened-malloc (0:8-7) unstable; urgency=medium 134 | 135 | * New upstream version (local package). 136 | 137 | -- Patrick Schleizer Sat, 15 Jan 2022 13:05:01 +0000 138 | 139 | hardened-malloc (0:8-6) unstable; urgency=medium 140 | 141 | * New upstream version (local package). 142 | 143 | -- Patrick Schleizer Fri, 14 Jan 2022 16:23:49 +0000 144 | 145 | hardened-malloc (0:8-5) unstable; urgency=medium 146 | 147 | * New upstream version (local package). 148 | 149 | -- Patrick Schleizer Thu, 06 Jan 2022 20:26:00 +0000 150 | 151 | hardened-malloc (0:8-4) unstable; urgency=medium 152 | 153 | * New upstream version (local package). 154 | 155 | -- Patrick Schleizer Sat, 11 Sep 2021 22:43:35 +0000 156 | 157 | hardened-malloc (0:8-3) unstable; urgency=medium 158 | 159 | * New upstream version (local package). 160 | 161 | -- Patrick Schleizer Sat, 04 Sep 2021 15:58:49 +0000 162 | 163 | hardened-malloc (0:8-2) unstable; urgency=medium 164 | 165 | * New upstream version (local package). 166 | 167 | -- Patrick Schleizer Sun, 22 Aug 2021 09:28:39 +0000 168 | 169 | hardened-malloc (0:8-1) unstable; urgency=medium 170 | 171 | * New upstream version (local package). 172 | 173 | -- Patrick Schleizer Tue, 17 Aug 2021 18:10:54 +0000 174 | 175 | hardened-malloc (0:3.0.13-2) unstable; urgency=medium 176 | 177 | * New upstream version (local package). 178 | 179 | -- Patrick Schleizer Thu, 05 Aug 2021 20:43:41 +0000 180 | 181 | hardened-malloc (0:3.0.13-1) unstable; urgency=medium 182 | 183 | * New upstream version (local package). 184 | 185 | -- Patrick Schleizer Sun, 23 May 2021 18:47:07 +0000 186 | 187 | hardened-malloc (0:3.0.12-1) unstable; urgency=medium 188 | 189 | * New upstream version (local package). 190 | 191 | -- Patrick Schleizer Sat, 10 Apr 2021 21:23:03 +0000 192 | 193 | hardened-malloc (0:3.0.11-1) unstable; urgency=medium 194 | 195 | * New upstream version (local package). 196 | 197 | -- Patrick Schleizer Wed, 17 Mar 2021 15:31:34 +0000 198 | 199 | hardened-malloc (0:3.0.10-1) unstable; urgency=medium 200 | 201 | * New upstream version (local package). 202 | 203 | -- Patrick Schleizer Sun, 21 Feb 2021 16:31:03 +0000 204 | 205 | hardened-malloc (0:3.0.9-1) unstable; urgency=medium 206 | 207 | * New upstream version (local package). 208 | 209 | -- Patrick Schleizer Sat, 06 Feb 2021 11:29:17 +0000 210 | 211 | hardened-malloc (0:3.0.8-1) unstable; urgency=medium 212 | 213 | * New upstream version (local package). 214 | 215 | -- Patrick Schleizer Sat, 09 Jan 2021 04:51:35 +0000 216 | 217 | hardened-malloc (0:3.0.7-1) unstable; urgency=medium 218 | 219 | * New upstream version (local package). 220 | 221 | -- Patrick Schleizer Sun, 20 Dec 2020 12:56:30 +0000 222 | 223 | hardened-malloc (0:3.0.6-1) unstable; urgency=medium 224 | 225 | * New upstream version (local package). 226 | 227 | -- Patrick Schleizer Tue, 15 Dec 2020 11:52:40 +0000 228 | 229 | hardened-malloc (0:3.0.5-1) unstable; urgency=medium 230 | 231 | * New upstream version (local package). 232 | 233 | -- Patrick Schleizer Fri, 13 Nov 2020 13:55:08 +0000 234 | 235 | hardened-malloc (0:3.0.4-1) unstable; urgency=medium 236 | 237 | * New upstream version (local package). 238 | 239 | -- Patrick Schleizer Thu, 08 Oct 2020 17:39:32 +0000 240 | 241 | hardened-malloc (0:3.0.3-1) unstable; urgency=medium 242 | 243 | * New upstream version (local package). 244 | 245 | -- Patrick Schleizer Thu, 08 Oct 2020 09:40:09 +0000 246 | 247 | hardened-malloc (0:3.0.2-1) unstable; urgency=medium 248 | 249 | * New upstream version (local package). 250 | 251 | -- Patrick Schleizer Wed, 07 Oct 2020 15:41:49 +0000 252 | 253 | hardened-malloc (0:3.0.1-1) unstable; urgency=medium 254 | 255 | * New upstream version (local package). 256 | 257 | -- Patrick Schleizer Wed, 07 Oct 2020 14:13:43 +0000 258 | 259 | hardened-malloc (0:3.0.0-1) unstable; urgency=medium 260 | 261 | * New upstream version (local package). 262 | 263 | -- Patrick Schleizer Sat, 19 Sep 2020 13:44:47 +0000 264 | 265 | hardened-malloc (0:2.0.5-1) unstable; urgency=medium 266 | 267 | * New upstream version (local package). 268 | 269 | -- Patrick Schleizer Sat, 19 Sep 2020 13:26:19 +0000 270 | 271 | hardened-malloc (0:2.0.4-1) unstable; urgency=medium 272 | 273 | * New upstream version (local package). 274 | 275 | -- Patrick Schleizer Thu, 16 Apr 2020 12:33:08 +0000 276 | 277 | hardened-malloc (0:2.0.3-1) unstable; urgency=medium 278 | 279 | * New upstream version (local package). 280 | 281 | -- Patrick Schleizer Thu, 02 Apr 2020 11:47:06 +0000 282 | 283 | hardened-malloc (0:2.0.2-1) unstable; urgency=medium 284 | 285 | * New upstream version (local package). 286 | 287 | -- Patrick Schleizer Wed, 01 Apr 2020 14:40:53 +0000 288 | 289 | hardened-malloc (0:2.0.1-1) unstable; urgency=medium 290 | 291 | * New upstream version (local package). 292 | 293 | -- Patrick Schleizer Mon, 25 Nov 2019 08:44:22 +0000 294 | 295 | hardened-malloc (0:2.0.0-1) unstable; urgency=medium 296 | 297 | * New upstream version (local package). 298 | 299 | -- Patrick Schleizer Mon, 25 Nov 2019 08:35:39 +0000 300 | 301 | hardened-malloc (0:0.7.0-1) unstable; urgency=medium 302 | 303 | * New upstream version (local package). 304 | 305 | -- Patrick Schleizer Mon, 25 Nov 2019 07:33:40 +0000 306 | 307 | hardened-malloc (0:0.6-1) unstable; urgency=medium 308 | 309 | * New upstream version (local package). 310 | 311 | -- Patrick Schleizer Tue, 27 Aug 2019 08:56:09 +0000 312 | 313 | hardened-malloc (0:0.5-1) unstable; urgency=medium 314 | 315 | * New upstream version (local package). 316 | 317 | -- Patrick Schleizer Fri, 23 Aug 2019 16:54:48 +0000 318 | 319 | hardened-malloc (0:0.4-1) unstable; urgency=medium 320 | 321 | * New upstream version (local package). 322 | 323 | -- Patrick Schleizer Thu, 01 Aug 2019 11:41:37 +0000 324 | 325 | hardened-malloc (0:0.3-1) unstable; urgency=medium 326 | 327 | * New upstream version (local package). 328 | 329 | -- Patrick Schleizer Mon, 29 Jul 2019 13:09:15 +0000 330 | 331 | hardened-malloc (0:0.2-1) unstable; urgency=medium 332 | 333 | * New upstream version (local package). 334 | 335 | -- Patrick Schleizer Sun, 28 Jul 2019 19:30:10 +0000 336 | 337 | hardened-malloc (0:0.1-1) unstable; urgency=medium 338 | 339 | * New upstream version (local package). 340 | 341 | -- Patrick Schleizer Sun, 21 Jul 2019 00:19:12 +0000 342 | 343 | hardened-malloc (0:0.0-1) unstable; urgency=medium 344 | 345 | * Initial release (local package). 346 | 347 | -- Patrick Schleizer Sat, 20 Jul 2019 23:55:32 +0000 348 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hardened malloc 2 | 3 | * [Introduction](#introduction) 4 | * [Dependencies](#dependencies) 5 | * [Testing](#testing) 6 | * [Individual Applications](#individual-applications) 7 | * [Automated Test Framework](#automated-test-framework) 8 | * [Compatibility](#compatibility) 9 | * [OS integration](#os-integration) 10 | * [Android-based operating systems](#android-based-operating-systems) 11 | * [Traditional Linux-based operating systems](#traditional-linux-based-operating-systems) 12 | * [Configuration](#configuration) 13 | * [Core design](#core-design) 14 | * [Security properties](#security-properties) 15 | * [Randomness](#randomness) 16 | * [Size classes](#size-classes) 17 | * [Scalability](#scalability) 18 | * [Small (slab) allocations](#small-slab-allocations) 19 | * [Thread caching (or lack thereof)](#thread-caching-or-lack-thereof) 20 | * [Large allocations](#large-allocations) 21 | * [Memory tagging](#memory-tagging) 22 | * [API extensions](#api-extensions) 23 | * [Stats](#stats) 24 | * [System calls](#system-calls) 25 | 26 | ## Introduction 27 | 28 | This is a security-focused general purpose memory allocator providing the 29 | malloc API along with various extensions. It provides substantial hardening 30 | against heap corruption vulnerabilities. The security-focused design also leads 31 | to much less metadata overhead and memory waste from fragmentation than a more 32 | traditional allocator design. It aims to provide decent overall performance 33 | with a focus on long-term performance and memory usage rather than allocator 34 | micro-benchmarks. It offers scalability via a configurable number of entirely 35 | independent arenas, with the internal locking within arenas further divided 36 | up per size class. 37 | 38 | This project currently supports Bionic (Android), musl and glibc. It may 39 | support other non-Linux operating systems in the future. For Android, there's 40 | custom integration and other hardening features which is also planned for musl 41 | in the future. The glibc support will be limited to replacing the malloc 42 | implementation because musl is a much more robust and cleaner base to build on 43 | and can cover the same use cases. 44 | 45 | This allocator is intended as a successor to a previous implementation based on 46 | extending OpenBSD malloc with various additional security features. It's still 47 | heavily based on the OpenBSD malloc design, albeit not on the existing code 48 | other than reusing the hash table implementation. The main differences in the 49 | design are that it's solely focused on hardening rather than finding bugs, uses 50 | finer-grained size classes along with slab sizes going beyond 4k to reduce 51 | internal fragmentation, doesn't rely on the kernel having fine-grained mmap 52 | randomization and only targets 64-bit to make aggressive use of the large 53 | address space. There are lots of smaller differences in the implementation 54 | approach. It incorporates the previous extensions made to OpenBSD malloc 55 | including adding padding to allocations for canaries (distinct from the current 56 | OpenBSD malloc canaries), write-after-free detection tied to the existing 57 | clearing on free, queues alongside the existing randomized arrays for 58 | quarantining allocations and proper double-free detection for quarantined 59 | allocations. The per-size-class memory regions with their own random bases were 60 | loosely inspired by the size and type-based partitioning in PartitionAlloc. The 61 | planned changes to OpenBSD malloc ended up being too extensive and invasive so 62 | this project was started as a fresh implementation better able to accomplish 63 | the goals. For 32-bit, a port of OpenBSD malloc with small extensions can be 64 | used instead as this allocator fundamentally doesn't support that environment. 65 | 66 | ## Dependencies 67 | 68 | Debian stable (currently Debian 12) determines the most ancient set of 69 | supported dependencies: 70 | 71 | * glibc 2.36 72 | * Linux 6.1 73 | * Clang 14.0.6 or GCC 12.2.0 74 | 75 | For Android, the Linux GKI 5.10, 5.15 and 6.1 branches are supported. 76 | 77 | However, using more recent releases is highly recommended. Older versions of 78 | the dependencies may be compatible at the moment but are not tested and will 79 | explicitly not be supported. 80 | 81 | For external malloc replacement with musl, musl 1.1.20 is required. However, 82 | there will be custom integration offering better performance in the future 83 | along with other hardening for the C standard library implementation. 84 | 85 | For Android, only the current generation, actively developed maintenance branch of the Android 86 | Open Source Project will be supported, which currently means `android13-qpr2-release`. 87 | 88 | ## Testing 89 | 90 | ### Individual Applications 91 | 92 | The `preload.sh` script can be used for testing with dynamically linked 93 | executables using glibc or musl: 94 | 95 | ./preload.sh krita --new-image RGBA,U8,500,500 96 | 97 | It can be necessary to substantially increase the `vm.max_map_count` sysctl to 98 | accommodate the large number of mappings caused by guard slabs and large 99 | allocation guard regions. The number of mappings can also be drastically 100 | reduced via a significant increase to `CONFIG_GUARD_SLABS_INTERVAL` but the 101 | feature has a low performance and memory usage cost so that isn't recommended. 102 | 103 | It can offer slightly better performance when integrated into the C standard 104 | library and there are other opportunities for similar hardening within C 105 | standard library and dynamic linker implementations. For example, a library 106 | region can be implemented to offer similar isolation for dynamic libraries as 107 | this allocator offers across different size classes. The intention is that this 108 | will be offered as part of hardened variants of the Bionic and musl C standard 109 | libraries. 110 | 111 | ### Automated Test Framework 112 | 113 | A collection of simple, automated tests are provided and can be run with the 114 | make command as follows: 115 | 116 | make test 117 | 118 | ## Compatibility 119 | 120 | OpenSSH 8.1 or higher is required to allow the mprotect `PROT_READ|PROT_WRITE` 121 | system calls in the seccomp-bpf filter rather than killing the process. 122 | 123 | ## OS integration 124 | 125 | ### Android-based operating systems 126 | 127 | On GrapheneOS, hardened\_malloc is integrated into the standard C library as 128 | the standard malloc implementation. Other Android-based operating systems can 129 | reuse [the integration 130 | code](https://github.com/GrapheneOS/platform_bionic/commit/20160b81611d6f2acd9ab59241bebeac7cf1d71c) 131 | to provide it. If desired, jemalloc can be left as a runtime configuration 132 | option by only conditionally using hardened\_malloc to give users the choice 133 | between performance and security. However, this reduces security for threat 134 | models where persistent state is untrusted, i.e. verified boot and attestation 135 | (see the [attestation sister project](https://attestation.app/about)). 136 | 137 | Make sure to raise `vm.max_map_count` substantially too to accommodate the very 138 | large number of guard pages created by hardened\_malloc. This can be done in 139 | `init.rc` (`system/core/rootdir/init.rc`) near the other virtual memory 140 | configuration: 141 | 142 | write /proc/sys/vm/max_map_count 1048576 143 | 144 | This is unnecessary if you set `CONFIG_GUARD_SLABS_INTERVAL` to a very large 145 | value in the build configuration. 146 | 147 | ### Traditional Linux-based operating systems 148 | 149 | On traditional Linux-based operating systems, hardened\_malloc can either be 150 | integrated into the libc implementation as a replacement for the standard 151 | malloc implementation or loaded as a dynamic library. Rather than rebuilding 152 | each executable to be linked against it, it can be added as a preloaded 153 | library to `/etc/ld.so.preload`. For example, with `libhardened_malloc.so` 154 | installed to `/usr/local/lib/libhardened_malloc.so`, add that full path as a 155 | line to the `/etc/ld.so.preload` configuration file: 156 | 157 | /usr/local/lib/libhardened_malloc.so 158 | 159 | The format of this configuration file is a whitespace-separated list, so it's 160 | good practice to put each library on a separate line. 161 | 162 | Using the `LD_PRELOAD` environment variable to load it on a case-by-case basis 163 | will not work when `AT_SECURE` is set such as with setuid binaries. It's also 164 | generally not a recommended approach for production usage. The recommendation 165 | is to enable it globally and make exceptions for performance critical cases by 166 | running the application in a container / namespace without it enabled. 167 | 168 | Make sure to raise `vm.max_map_count` substantially too to accommodate the very 169 | large number of guard pages created by hardened\_malloc. As an example, in 170 | `/etc/sysctl.d/hardened_malloc.conf`: 171 | 172 | vm.max_map_count = 1048576 173 | 174 | This is unnecessary if you set `CONFIG_GUARD_SLABS_INTERVAL` to a very large 175 | value in the build configuration. 176 | 177 | On arm64, make sure your kernel is configured to use 4k pages since we haven't 178 | yet added support for 16k and 64k pages. The kernel also has to be configured 179 | to use 4 level page tables for the full 48 bit address space instead of only 180 | having a 39 bit address space for the default hardened\_malloc configuration. 181 | It's possible to reduce the class region size substantially to make a 39 bit 182 | address space workable but the defaults won't work. 183 | 184 | ## Configuration 185 | 186 | You can set some configuration options at compile-time via arguments to the 187 | make command as follows: 188 | 189 | make CONFIG_EXAMPLE=false 190 | 191 | Configuration options are provided when there are significant compromises 192 | between portability, performance, memory usage or security. The core design 193 | choices are not configurable and the allocator remains very security-focused 194 | even with all the optional features disabled. 195 | 196 | The configuration system supports a configuration template system with two 197 | standard presets: the default configuration (`config/default.mk`) and a light 198 | configuration (`config/light.mk`). Packagers are strongly encouraged to ship 199 | both the standard `default` and `light` configuration. You can choose the 200 | configuration to build using `make VARIANT=light` where `make VARIANT=default` 201 | is the same as `make`. Non-default configuration templates will build a library 202 | with the suffix `-variant` such as `libhardened_malloc-light.so` and will use 203 | an `out-variant` directory instead of `out` for the build. 204 | 205 | The `default` configuration template has all normal optional security features 206 | enabled (just not the niche `CONFIG_SEAL_METADATA`) and is quite aggressive in 207 | terms of sacrificing performance and memory usage for security. The `light` 208 | configuration template disables the slab quarantines, write after free check, 209 | slot randomization and raises the guard slab interval from 1 to 8 but leaves 210 | zero-on-free and slab canaries enabled. The `light` configuration has solid 211 | performance and memory usage while still being far more secure than mainstream 212 | allocators with much better security properties. Disabling zero-on-free would 213 | gain more performance but doesn't make much difference for small allocations 214 | without also disabling slab canaries. Slab canaries slightly raise memory use 215 | and slightly slow down performance but are quite important to mitigate small 216 | overflows and C string overflows. Disabling slab canaries is not recommended 217 | in most cases since it would no longer be a strict upgrade over traditional 218 | allocators with headers on allocations and basic consistency checks for them. 219 | 220 | For reduced memory usage at the expense of performance (this will also reduce 221 | the size of the empty slab caches and quarantines, saving a lot of memory, 222 | since those are currently based on the size of the largest size class): 223 | 224 | make \ 225 | N_ARENA=1 \ 226 | CONFIG_EXTENDED_SIZE_CLASSES=false 227 | 228 | The following boolean configuration options are available: 229 | 230 | * `CONFIG_WERROR`: `true` (default) or `false` to control whether compiler 231 | warnings are treated as errors. This is highly recommended, but it can be 232 | disabled to avoid patching the Makefile if a compiler version not tested by 233 | the project is being used and has warnings. Investigating these warnings is 234 | still recommended and the intention is to always be free of any warnings. 235 | * `CONFIG_NATIVE`: `true` (default) or `false` to control whether the code is 236 | optimized for the detected CPU on the host. If this is disabled, setting up a 237 | custom `-march` higher than the baseline architecture is highly recommended 238 | due to substantial performance benefits for this code. 239 | * `CONFIG_CXX_ALLOCATOR`: `true` (default) or `false` to control whether the 240 | C++ allocator is replaced for slightly improved performance and detection of 241 | mismatched sizes for sized deallocation (often type confusion bugs). This 242 | will result in linking against the C++ standard library. 243 | * `CONFIG_ZERO_ON_FREE`: `true` (default) or `false` to control whether small 244 | allocations are zeroed on free, to mitigate use-after-free and uninitialized 245 | use vulnerabilities along with purging lots of potentially sensitive data 246 | from the process as soon as possible. This has a performance cost scaling to 247 | the size of the allocation, which is usually acceptable. This is not relevant 248 | to large allocations because the pages are given back to the kernel. 249 | * `CONFIG_WRITE_AFTER_FREE_CHECK`: `true` (default) or `false` to control 250 | sanity checking that new small allocations contain zeroed memory. This can 251 | detect writes caused by a write-after-free vulnerability and mixes well with 252 | the features for making memory reuse randomized / delayed. This has a 253 | performance cost scaling to the size of the allocation, which is usually 254 | acceptable. This is not relevant to large allocations because they're always 255 | a fresh memory mapping from the kernel. 256 | * `CONFIG_SLOT_RANDOMIZE`: `true` (default) or `false` to randomize selection 257 | of free slots within slabs. This has a measurable performance cost and isn't 258 | one of the important security features, but the cost has been deemed more 259 | than acceptable to be enabled by default. 260 | * `CONFIG_SLAB_CANARY`: `true` (default) or `false` to enable support for 261 | adding 8 byte canaries to the end of memory allocations. The primary purpose 262 | of the canaries is to render small fixed size buffer overflows harmless by 263 | absorbing them. The first byte of the canary is always zero, containing 264 | overflows caused by a missing C string NUL terminator. The other 7 bytes are 265 | a per-slab random value. On free, integrity of the canary is checked to 266 | detect attacks like linear overflows or other forms of heap corruption caused 267 | by imprecise exploit primitives. However, checking on free will often be too 268 | late to prevent exploitation so it's not the main purpose of the canaries. 269 | * `CONFIG_SEAL_METADATA`: `true` or `false` (default) to control whether Memory 270 | Protection Keys are used to disable access to all writable allocator state 271 | outside of the memory allocator code. It's currently disabled by default due 272 | to a significant performance cost for this use case on current generation 273 | hardware, which may become drastically lower in the future. Whether or not 274 | this feature is enabled, the metadata is all contained within an isolated 275 | memory region with high entropy random guard regions around it. 276 | 277 | The following integer configuration options are available: 278 | 279 | * `CONFIG_SLAB_QUARANTINE_RANDOM_LENGTH`: `1` (default) to control the number 280 | of slots in the random array used to randomize reuse for small memory 281 | allocations. This sets the length for the largest size class (either 16kiB 282 | or 128kiB based on `CONFIG_EXTENDED_SIZE_CLASSES`) and the quarantine length 283 | for smaller size classes is scaled to match the total memory of the 284 | quarantined allocations (1 becomes 1024 for 16 byte allocations with 16kiB 285 | as the largest size class, or 8192 with 128kiB as the largest). 286 | * `CONFIG_SLAB_QUARANTINE_QUEUE_LENGTH`: `1` (default) to control the number of 287 | slots in the queue used to delay reuse for small memory allocations. This 288 | sets the length for the largest size class (either 16kiB or 128kiB based on 289 | `CONFIG_EXTENDED_SIZE_CLASSES`) and the quarantine length for smaller size 290 | classes is scaled to match the total memory of the quarantined allocations (1 291 | becomes 1024 for 16 byte allocations with 16kiB as the largest size class, or 292 | 8192 with 128kiB as the largest). 293 | * `CONFIG_GUARD_SLABS_INTERVAL`: `1` (default) to control the number of slabs 294 | before a slab is skipped and left as an unused memory protected guard slab. 295 | The default of `1` leaves a guard slab between every slab. This feature does 296 | not have a *direct* performance cost, but it makes the address space usage 297 | sparser which can indirectly hurt performance. The kernel also needs to track 298 | a lot more memory mappings, which uses a bit of extra memory and slows down 299 | memory mapping and memory protection changes in the process. The kernel uses 300 | O(log n) algorithms for this and system calls are already fairly slow anyway, 301 | so having many extra mappings doesn't usually add up to a significant cost. 302 | * `CONFIG_GUARD_SIZE_DIVISOR`: `2` (default) to control the maximum size of the 303 | guard regions placed on both sides of large memory allocations, relative to 304 | the usable size of the memory allocation. 305 | * `CONFIG_REGION_QUARANTINE_RANDOM_LENGTH`: `256` (default) to control the 306 | number of slots in the random array used to randomize region reuse for large 307 | memory allocations. 308 | * `CONFIG_REGION_QUARANTINE_QUEUE_LENGTH`: `1024` (default) to control the 309 | number of slots in the queue used to delay region reuse for large memory 310 | allocations. 311 | * `CONFIG_REGION_QUARANTINE_SKIP_THRESHOLD`: `33554432` (default) to control 312 | the size threshold where large allocations will not be quarantined. 313 | * `CONFIG_FREE_SLABS_QUARANTINE_RANDOM_LENGTH`: `32` (default) to control the 314 | number of slots in the random array used to randomize free slab reuse. 315 | * `CONFIG_CLASS_REGION_SIZE`: `34359738368` (default) to control the size of 316 | the size class regions. 317 | * `CONFIG_N_ARENA`: `4` (default) to control the number of arenas 318 | * `CONFIG_STATS`: `false` (default) to control whether stats on allocation / 319 | deallocation count and active allocations are tracked. See the [section on 320 | stats](#stats) for more details. 321 | * `CONFIG_EXTENDED_SIZE_CLASSES`: `true` (default) to control whether small 322 | size class go up to 128kiB instead of the minimum requirement for avoiding 323 | memory waste of 16kiB. The option to extend it even further will be offered 324 | in the future when better support for larger slab allocations is added. See 325 | the [section on size classes](#size-classes) below for details. 326 | * `CONFIG_LARGE_SIZE_CLASSES`: `true` (default) to control whether large 327 | allocations use the slab allocation size class scheme instead of page size 328 | granularity. See the [section on size classes](#size-classes) below for 329 | details. 330 | 331 | There will be more control over enabled features in the future along with 332 | control over fairly arbitrarily chosen values like the size of empty slab 333 | caches (making them smaller improves security and reduces memory usage while 334 | larger caches can substantially improves performance). 335 | 336 | ## Core design 337 | 338 | The core design of the allocator is very simple / minimalist. The allocator is 339 | exclusive to 64-bit platforms in order to take full advantage of the abundant 340 | address space without being constrained by needing to keep the design 341 | compatible with 32-bit. 342 | 343 | The mutable allocator state is entirely located within a dedicated metadata 344 | region, and the allocator is designed around this approach for both small 345 | (slab) allocations and large allocations. This provides reliable, deterministic 346 | protections against invalid free including double frees, and protects metadata 347 | from attackers. Traditional allocator exploitation techniques do not work with 348 | the hardened\_malloc implementation. 349 | 350 | Small allocations are always located in a large memory region reserved for slab 351 | allocations. On free, it can be determined that an allocation is one of the 352 | small size classes from the address range. If arenas are enabled, the arena is 353 | also determined from the address range as each arena has a dedicated sub-region 354 | in the slab allocation region. Arenas provide totally independent slab 355 | allocators with their own allocator state and no coordination between them. 356 | Once the base region is determined (simply the slab allocation region as a 357 | whole without any arenas enabled), the size class is determined from the 358 | address range too, since it's divided up into a sub-region for each size class. 359 | There's a top level slab allocation region, divided up into arenas, with each 360 | of those divided up into size class regions. The size class regions each have a 361 | random base within a large guard region. Once the size class is determined, the 362 | slab size is known, and the index of the slab is calculated and used to obtain 363 | the slab metadata for the slab from the slab metadata array. Finally, the index 364 | of the slot within the slab provides the index of the bit tracking the slot in 365 | the bitmap. Every slab allocation slot has a dedicated bit in a bitmap tracking 366 | whether it's free, along with a separate bitmap for tracking allocations in the 367 | quarantine. The slab metadata entries in the array have intrusive lists 368 | threaded through them to track partial slabs (partially filled, and these are 369 | the first choice for allocation), empty slabs (limited amount of cached free 370 | memory) and free slabs (purged / memory protected). 371 | 372 | Large allocations are tracked via a global hash table mapping their address to 373 | their size and random guard size. They're simply memory mappings and get mapped 374 | on allocation and then unmapped on free. Large allocations are the only dynamic 375 | memory mappings made by the allocator, since the address space for allocator 376 | state (including both small / large allocation metadata) and slab allocations 377 | is statically reserved. 378 | 379 | This allocator is aimed at production usage, not aiding with finding and fixing 380 | memory corruption bugs for software development. It does find many latent bugs 381 | but won't include features like the option of generating and storing stack 382 | traces for each allocation to include the allocation site in related error 383 | messages. The design choices are based around minimizing overhead and 384 | maximizing security which often leads to different decisions than a tool 385 | attempting to find bugs. For example, it uses zero-based sanitization on free 386 | and doesn't minimize slack space from size class rounding between the end of an 387 | allocation and the canary / guard region. Zero-based filling has the least 388 | chance of uncovering latent bugs, but also the best chance of mitigating 389 | vulnerabilities. The canary feature is primarily meant to act as padding 390 | absorbing small overflows to render them harmless, so slack space is helpful 391 | rather than harmful despite not detecting the corruption on free. The canary 392 | needs detection on free in order to have any hope of stopping other kinds of 393 | issues like a sequential overflow, which is why it's included. It's assumed 394 | that an attacker can figure out the allocator is in use so the focus is 395 | explicitly not on detecting bugs that are impossible to exploit with it in use 396 | like an 8 byte overflow. The design choices would be different if performance 397 | was a bit less important and if a core goal was finding latent bugs. 398 | 399 | ## Security properties 400 | 401 | * Fully out-of-line metadata/state with protection from corruption 402 | * Address space for allocator state is entirely reserved during 403 | initialization and never reused for allocations or anything else 404 | * State within global variables is entirely read-only after initialization 405 | with pointers to the isolated allocator state so leaking the address of 406 | the library doesn't leak the address of writable state 407 | * Allocator state is located within a dedicated region with high entropy 408 | randomly sized guard regions around it 409 | * Protection via Memory Protection Keys (MPK) on x86\_64 (disabled by 410 | default due to low benefit-cost ratio on top of baseline protections) 411 | * [future] Protection via MTE on ARMv8.5+ 412 | * Deterministic detection of any invalid free (unallocated, unaligned, etc.) 413 | * Validation of the size passed for C++14 sized deallocation by `delete` 414 | even for code compiled with earlier standards (detects type confusion if 415 | the size is different) and by various containers using the allocator API 416 | directly 417 | * Isolated memory region for slab allocations 418 | * Top-level isolated regions for each arena 419 | * Divided up into isolated inner regions for each size class 420 | * High entropy random base for each size class region 421 | * No deterministic / low entropy offsets between allocations with 422 | different size classes 423 | * Metadata is completely outside the slab allocation region 424 | * No references to metadata within the slab allocation region 425 | * No deterministic / low entropy offsets to metadata 426 | * Entire slab region starts out non-readable and non-writable 427 | * Slabs beyond the cache limit are purged and become non-readable and 428 | non-writable memory again 429 | * Placed into a queue for reuse in FIFO order to maximize the time 430 | spent memory protected 431 | * Randomized array is used to add a random delay for reuse 432 | * Fine-grained randomization within memory regions 433 | * Randomly sized guard regions for large allocations 434 | * Random slot selection within slabs 435 | * Randomized delayed free for small and large allocations along with slabs 436 | themselves 437 | * [in-progress] Randomized choice of slabs 438 | * [in-progress] Randomized allocation of slabs 439 | * Slab allocations are zeroed on free 440 | * Detection of write-after-free for slab allocations by verifying zero filling 441 | is intact at allocation time 442 | * Delayed free via a combination of FIFO and randomization for slab allocations 443 | * Large allocations are purged and memory protected on free with the memory 444 | mapping kept reserved in a quarantine to detect use-after-free 445 | * The quarantine is primarily based on a FIFO ring buffer, with the oldest 446 | mapping in the quarantine being unmapped to make room for the most 447 | recently freed mapping 448 | * Another layer of the quarantine swaps with a random slot in an array to 449 | randomize the number of large deallocations required to push mappings out 450 | of the quarantine 451 | * Memory in fresh allocations is consistently zeroed due to it either being 452 | fresh pages or zeroed on free after previous usage 453 | * Random canaries placed after each slab allocation to *absorb* 454 | and then later detect overflows/underflows 455 | * High entropy per-slab random values 456 | * Leading byte is zeroed to contain C string overflows 457 | * Possible slab locations are skipped and remain memory protected, leaving slab 458 | size class regions interspersed with guard pages 459 | * Zero size allocations are a dedicated size class with the entire region 460 | remaining non-readable and non-writable 461 | * Extension for retrieving the size of allocations with fallback to a sentinel 462 | for pointers not managed by the allocator [in-progress, full implementation 463 | needs to be ported from the previous OpenBSD malloc-based allocator] 464 | * Can also return accurate values for pointers *within* small allocations 465 | * The same applies to pointers within the first page of large allocations, 466 | otherwise it currently has to return a sentinel 467 | * No alignment tricks interfering with ASLR like jemalloc, PartitionAlloc, etc. 468 | * No usage of the legacy brk heap 469 | * Aggressive sanity checks 470 | * Errors other than ENOMEM from mmap, munmap, mprotect and mremap treated 471 | as fatal, which can help to detect memory management gone wrong elsewhere 472 | in the process. 473 | * [future] Memory tagging for slab allocations via MTE on ARMv8.5+ 474 | * random memory tags as the baseline, providing probabilistic protection 475 | against various forms of memory corruption 476 | * dedicated tag for free slots, set on free, for deterministic protection 477 | against accessing freed memory 478 | * store previous random tag within freed slab allocations, and increment it 479 | to get the next tag for that slot to provide deterministic use-after-free 480 | detection through multiple cycles of memory reuse 481 | * guarantee distinct tags for adjacent memory allocations by incrementing 482 | past matching values for deterministic detection of linear overflows 483 | 484 | ## Randomness 485 | 486 | The current implementation of random number generation for randomization-based 487 | mitigations is based on generating a keystream from a stream cipher (ChaCha8) 488 | in small chunks. Separate CSPRNGs are used for each small size class in each 489 | arena, large allocations and initialization in order to fit into the 490 | fine-grained locking model without needing to waste memory per thread by 491 | having the CSPRNG state in Thread Local Storage. Similarly, it's protected via 492 | the same approach taken for the rest of the metadata. The stream cipher is 493 | regularly reseeded from the OS to provide backtracking and prediction 494 | resistance with a negligible cost. The reseed interval simply needs to be 495 | adjusted to the point that it stops registering as having any significant 496 | performance impact. The performance impact on recent Linux kernels is 497 | primarily from the high cost of system calls and locking since the 498 | implementation is quite efficient (ChaCha20), especially for just generating 499 | the key and nonce for another stream cipher (ChaCha8). 500 | 501 | ChaCha8 is a great fit because it's extremely fast across platforms without 502 | relying on hardware support or complex platform-specific code. The security 503 | margins of ChaCha20 would be completely overkill for the use case. Using 504 | ChaCha8 avoids needing to resort to a non-cryptographically secure PRNG or 505 | something without a lot of scrutiny. The current implementation is simply the 506 | reference implementation of ChaCha8 converted into a pure keystream by ripping 507 | out the XOR of the message into the keystream. 508 | 509 | The random range generation functions are a highly optimized implementation 510 | too. Traditional uniform random number generation within a range is very high 511 | overhead and can easily dwarf the cost of an efficient CSPRNG. 512 | 513 | ## Size classes 514 | 515 | The zero byte size class is a special case of the smallest regular size class. 516 | It's allocated in a dedicated region like other size classes but with the slabs 517 | never being made readable and writable so the only memory usage is for the slab 518 | metadata. 519 | 520 | The choice of size classes for slab allocation is the same as jemalloc, which 521 | is a careful balance between minimizing internal and external fragmentation. If 522 | there are more size classes, more memory is wasted on free slots available only 523 | to allocation requests of those sizes (external fragmentation). If there are 524 | fewer size classes, the spacing between them is larger and more memory is 525 | wasted due to rounding up to the size classes (internal fragmentation). There 526 | are 4 special size classes for the smallest sizes (16, 32, 48, 64) that are 527 | simply spaced out by the minimum spacing (16). Afterwards, there are four size 528 | classes for every power of two spacing which results in bounding the internal 529 | fragmentation below 20% for each size class. This also means there are 4 size 530 | classes for each doubling in size. 531 | 532 | The slot counts tied to the size classes are specific to this allocator rather 533 | than being taken from jemalloc. Slabs are always a span of pages so the slot 534 | count needs to be tuned to minimize waste due to rounding to the page size. For 535 | now, this allocator is set up only for 4096 byte pages as a small page size is 536 | desirable for finer-grained memory protection and randomization. It could be 537 | ported to larger page sizes in the future. The current slot counts are only a 538 | preliminary set of values. 539 | 540 | | size class | worst case internal fragmentation | slab slots | slab size | internal fragmentation for slabs | 541 | | - | - | - | - | - | 542 | | 16 | 93.75% | 256 | 4096 | 0.0% | 543 | | 32 | 46.88% | 128 | 4096 | 0.0% | 544 | | 48 | 31.25% | 85 | 4096 | 0.390625% | 545 | | 64 | 23.44% | 64 | 4096 | 0.0% | 546 | | 80 | 18.75% | 51 | 4096 | 0.390625% | 547 | | 96 | 15.62% | 42 | 4096 | 1.5625% | 548 | | 112 | 13.39% | 36 | 4096 | 1.5625% | 549 | | 128 | 11.72% | 64 | 8192 | 0.0% | 550 | | 160 | 19.38% | 51 | 8192 | 0.390625% | 551 | | 192 | 16.15% | 64 | 12288 | 0.0% | 552 | | 224 | 13.84% | 54 | 12288 | 1.5625% | 553 | | 256 | 12.11% | 64 | 16384 | 0.0% | 554 | | 320 | 19.69% | 64 | 20480 | 0.0% | 555 | | 384 | 16.41% | 64 | 24576 | 0.0% | 556 | | 448 | 14.06% | 64 | 28672 | 0.0% | 557 | | 512 | 12.3% | 64 | 32768 | 0.0% | 558 | | 640 | 19.84% | 64 | 40960 | 0.0% | 559 | | 768 | 16.54% | 64 | 49152 | 0.0% | 560 | | 896 | 14.17% | 64 | 57344 | 0.0% | 561 | | 1024 | 12.4% | 64 | 65536 | 0.0% | 562 | | 1280 | 19.92% | 16 | 20480 | 0.0% | 563 | | 1536 | 16.6% | 16 | 24576 | 0.0% | 564 | | 1792 | 14.23% | 16 | 28672 | 0.0% | 565 | | 2048 | 12.45% | 16 | 32768 | 0.0% | 566 | | 2560 | 19.96% | 8 | 20480 | 0.0% | 567 | | 3072 | 16.63% | 8 | 24576 | 0.0% | 568 | | 3584 | 14.26% | 8 | 28672 | 0.0% | 569 | | 4096 | 12.48% | 8 | 32768 | 0.0% | 570 | | 5120 | 19.98% | 8 | 40960 | 0.0% | 571 | | 6144 | 16.65% | 8 | 49152 | 0.0% | 572 | | 7168 | 14.27% | 8 | 57344 | 0.0% | 573 | | 8192 | 12.49% | 8 | 65536 | 0.0% | 574 | | 10240 | 19.99% | 6 | 61440 | 0.0% | 575 | | 12288 | 16.66% | 5 | 61440 | 0.0% | 576 | | 14336 | 14.28% | 4 | 57344 | 0.0% | 577 | | 16384 | 12.49% | 4 | 65536 | 0.0% | 578 | 579 | The slab allocation size classes end at 16384 since that's the final size for 580 | 2048 byte spacing and the next spacing class matches the page size of 4096 581 | bytes on the target platforms. This is the minimum set of small size classes 582 | required to avoid substantial waste from rounding. 583 | 584 | The `CONFIG_EXTENDED_SIZE_CLASSES` option extends the size classes up to 585 | 131072, with a final spacing class of 16384. This offers improved performance 586 | compared to the minimum set of size classes. The security story is complicated, 587 | since the slab allocation has both advantages like size class isolation 588 | completely avoiding reuse of any of the address space for any other size 589 | classes or other data. It also has disadvantages like caching a small number of 590 | empty slabs and deterministic guard sizes. The cache will be configurable in 591 | the future, making it possible to disable slab caching for the largest slab 592 | allocation sizes, to force unmapping them immediately and putting them in the 593 | slab quarantine, which eliminates most of the security disadvantage at the 594 | expense of also giving up most of the performance advantage, but while 595 | retaining the isolation. 596 | 597 | | size class | worst case internal fragmentation | slab slots | slab size | internal fragmentation for slabs | 598 | | - | - | - | - | - | 599 | | 20480 | 20.0% | 1 | 20480 | 0.0% | 600 | | 24576 | 16.66% | 1 | 24576 | 0.0% | 601 | | 28672 | 14.28% | 1 | 28672 | 0.0% | 602 | | 32768 | 12.5% | 1 | 32768 | 0.0% | 603 | | 40960 | 20.0% | 1 | 40960 | 0.0% | 604 | | 49152 | 16.66% | 1 | 49152 | 0.0% | 605 | | 57344 | 14.28% | 1 | 57344 | 0.0% | 606 | | 65536 | 12.5% | 1 | 65536 | 0.0% | 607 | | 81920 | 20.0% | 1 | 81920 | 0.0% | 608 | | 98304 | 16.67% | 1 | 98304 | 0.0% | 609 | | 114688 | 14.28% | 1 | 114688 | 0.0% | 610 | | 131072 | 12.5% | 1 | 131072 | 0.0% | 611 | 612 | The `CONFIG_LARGE_SIZE_CLASSES` option controls whether large allocations use 613 | the same size class scheme providing 4 size classes for every doubling of size. 614 | It increases virtual memory consumption but drastically improves performance 615 | where realloc is used without proper growth factors, which is fairly common and 616 | destroys performance in some commonly used programs. If large size classes are 617 | disabled, the granularity is instead the page size, which is currently always 618 | 4096 bytes on supported platforms. 619 | 620 | ## Scalability 621 | 622 | ### Small (slab) allocations 623 | 624 | As a baseline form of fine-grained locking, the slab allocator has entirely 625 | separate allocators for each size class. Each size class has a dedicated lock, 626 | CSPRNG and other state. 627 | 628 | The slab allocator's scalability primarily comes from dividing up the slab 629 | allocation region into independent arenas assigned to threads. The arenas are 630 | just entirely separate slab allocators with their own sub-regions for each size 631 | class. Using 4 arenas reserves a region 4 times as large and the relevant slab 632 | allocator metadata is determined based on address, as part of the same approach 633 | to finding the per-size-class metadata. The part that's still open to different 634 | design choices is how arenas are assigned to threads. One approach is 635 | statically assigning arenas via round-robin like the standard jemalloc 636 | implementation, or statically assigning to a random arena which is essentially 637 | the current implementation. Another option is dynamic load balancing via a 638 | heuristic like `sched_getcpu` for per-CPU arenas, which would offer better 639 | performance than randomly choosing an arena each time while being more 640 | predictable for an attacker. There are actually some security benefits from 641 | this assignment being completely static, since it isolates threads from each 642 | other. Static assignment can also reduce memory usage since threads may have 643 | varying usage of size classes. 644 | 645 | When there's substantial allocation or deallocation pressure, the allocator 646 | does end up calling into the kernel to purge / protect unused slabs by 647 | replacing them with fresh `PROT_NONE` regions along with unprotecting slabs 648 | when partially filled and cached empty slabs are depleted. There will be 649 | configuration over the amount of cached empty slabs, but it's not entirely a 650 | performance vs. memory trade-off since memory protecting unused slabs is a nice 651 | opportunistic boost to security. However, it's not really part of the core 652 | security model or features so it's quite reasonable to use much larger empty 653 | slab caches when the memory usage is acceptable. It would also be reasonable to 654 | attempt to use heuristics for dynamically tuning the size, but there's not a 655 | great one size fits all approach so it isn't currently part of this allocator 656 | implementation. 657 | 658 | #### Thread caching (or lack thereof) 659 | 660 | Thread caches are a commonly implemented optimization in modern allocators but 661 | aren't very suitable for a hardened allocator even when implemented via arrays 662 | like jemalloc rather than free lists. They would prevent the allocator from 663 | having perfect knowledge about which memory is free in a way that's both race 664 | free and works with fully out-of-line metadata. It would also interfere with 665 | the quality of fine-grained randomization even with randomization support in 666 | the thread caches. The caches would also end up with much weaker protection 667 | than the dedicated metadata region. Potentially worst of all, it's inherently 668 | incompatible with the important quarantine feature. 669 | 670 | The primary benefit from a thread cache is performing batches of allocations 671 | and batches of deallocations to amortize the cost of the synchronization used 672 | by locking. The issue is not contention but rather the cost of synchronization 673 | itself. Performing operations in large batches isn't necessarily a good thing 674 | in terms of reducing contention to improve scalability. Large thread caches 675 | like TCMalloc are a legacy design choice and aren't a good approach for a 676 | modern allocator. In jemalloc, thread caches are fairly small and have a form 677 | of garbage collection to clear them out when they aren't being heavily used. 678 | Since this is a hardened allocator with a bunch of small costs for the security 679 | features, the synchronization is already a smaller percentage of the overall 680 | time compared to a much leaner performance-oriented allocator. These benefits 681 | could be obtained via allocation queues and deallocation queues which would 682 | avoid bypassing the quarantine and wouldn't have as much of an impact on 683 | randomization. However, deallocation queues would also interfere with having 684 | global knowledge about what is free. An allocation queue alone wouldn't have 685 | many drawbacks, but it isn't currently planned even as an optional feature 686 | since it probably wouldn't be enabled by default and isn't worth the added 687 | complexity. 688 | 689 | The secondary benefit of thread caches is being able to avoid the underlying 690 | allocator implementation entirely for some allocations and deallocations when 691 | they're mixed together rather than many allocations being done together or many 692 | frees being done together. The value of this depends a lot on the application 693 | and it's entirely unsuitable / incompatible with a hardened allocator since it 694 | bypasses all of the underlying security and would destroy much of the security 695 | value. 696 | 697 | ### Large allocations 698 | 699 | The expectation is that the allocator does not need to perform well for large 700 | allocations, especially in terms of scalability. When the performance for large 701 | allocations isn't good enough, the approach will be to enable more slab 702 | allocation size classes. Doubling the maximum size of slab allocations only 703 | requires adding 4 size classes while keeping internal waste bounded below 20%. 704 | 705 | Large allocations are implemented as a wrapper on top of the kernel memory 706 | mapping API. The addresses and sizes are tracked in a global data structure 707 | with a global lock. The current implementation is a hash table and could easily 708 | use fine-grained locking, but it would have little benefit since most of the 709 | locking is in the kernel. Most of the contention will be on the `mmap_sem` lock 710 | for the process in the kernel. Ideally, it could simply map memory when 711 | allocating and unmap memory when freeing. However, this is a hardened allocator 712 | and the security features require extra system calls due to lack of direct 713 | support for this kind of hardening in the kernel. Randomly sized guard regions 714 | are placed around each allocation which requires mapping a `PROT_NONE` region 715 | including the guard regions and then unprotecting the usable area between them. 716 | The quarantine implementation requires clobbering the mapping with a fresh 717 | `PROT_NONE` mapping using `MAP_FIXED` on free to hold onto the region while 718 | it's in the quarantine, until it's eventually unmapped when it's pushed out of 719 | the quarantine. This means there are 2x as many system calls for allocating and 720 | freeing as there would be if the kernel supported these features directly. 721 | 722 | ## Memory tagging 723 | 724 | Integrating extensive support for ARMv8.5 memory tagging is planned and this 725 | section will be expanded to cover the details on the chosen design. The approach 726 | for slab allocations is currently covered, but it can also be used for the 727 | allocator metadata region and large allocations. 728 | 729 | Memory allocations are already always multiples of naturally aligned 16 byte 730 | units, so memory tags are a natural fit into a malloc implementation due to the 731 | 16 byte alignment requirement. The only extra memory consumption will come from 732 | the hardware supported storage for the tag values (4 bits per 16 bytes). 733 | 734 | The baseline policy will be to generate random tags for each slab allocation 735 | slot on first use. The highest value will be reserved for marking freed memory 736 | allocations to detect any accesses to freed memory so it won't be part of the 737 | generated range. Adjacent slots will be guaranteed to have distinct memory tags 738 | in order to guarantee that linear overflows are detected. There are a few ways 739 | of implementing this and it will end up depending on the performance costs of 740 | different approaches. If there's an efficient way to fetch the adjacent tag 741 | values without wasting extra memory, it will be possible to check for them and 742 | skip them either by generating a new random value in a loop or incrementing 743 | past them since the tiny bit of bias wouldn't matter. Another approach would be 744 | alternating odd and even tag values but that would substantially reduce the 745 | overall randomness of the tags and there's very little entropy from the start. 746 | 747 | Once a slab allocation has been freed, the tag will be set to the reserved 748 | value for free memory and the previous tag value will be stored inside the 749 | allocation itself. The next time the slot is allocated, the chosen tag value 750 | will be the previous value incremented by one to provide use-after-free 751 | detection between generations of allocations. The stored tag will be wiped 752 | before retagging the memory, to avoid leaking it and as part of preserving the 753 | security property of newly allocated memory being zeroed due to zero-on-free. 754 | It will eventually wrap all the way around, but this ends up providing a strong 755 | guarantee for many allocation cycles due to the combination of 4 bit tags with 756 | the FIFO quarantine feature providing delayed free. It also benefits from 757 | random slot allocation and the randomized portion of delayed free, which result 758 | in a further delay along with preventing a deterministic bypass by forcing a 759 | reuse after a certain number of allocation cycles. Similarly to the initial tag 760 | generation, tag values for adjacent allocations will be skipped by incrementing 761 | past them. 762 | 763 | For example, consider this slab of allocations that are not yet used with 15 764 | representing the tag for free memory. For the sake of simplicity, there will be 765 | no quarantine or other slabs for this example: 766 | 767 | | 15 | 15 | 15 | 15 | 15 | 15 | 768 | 769 | Three slots are randomly chosen for allocations, with random tags assigned (2, 770 | 7, 14) since these slots haven't ever been used and don't have saved values: 771 | 772 | | 15 | 2 | 15 | 7 | 14 | 15 | 773 | 774 | The 2nd allocation slot is freed, and is set back to the tag for free memory 775 | (15), but with the previous tag value stored in the freed space: 776 | 777 | | 15 | 15 | 15 | 7 | 14 | 15 | 778 | 779 | The first slot is allocated for the first time, receiving the random value 3: 780 | 781 | | 3 | 15 | 15 | 7 | 14 | 15 | 782 | 783 | The 2nd slot is randomly chosen again, so the previous tag (2) is retrieved and 784 | incremented to 3 as part of the use-after-free mitigation. An adjacent 785 | allocation already uses the tag 3, so the tag is further incremented to 4 (it 786 | would be incremented to 5 if one of the adjacent tags was 4): 787 | 788 | | 3 | 4 | 15 | 7 | 14 | 15 | 789 | 790 | The last slot is randomly chosen for the next allocation, and is assigned the 791 | random value 14. However, it's placed next to an allocation with the tag 14 so 792 | the tag is incremented and wraps around to 0: 793 | 794 | | 3 | 4 | 15 | 7 | 14 | 0 | 795 | 796 | ## API extensions 797 | 798 | The `void free_sized(void *ptr, size_t expected_size)` function exposes the 799 | sized deallocation sanity checks for C. A performance-oriented allocator could 800 | use the same API as an optimization to avoid a potential cache miss from 801 | reading the size from metadata. 802 | 803 | The `size_t malloc_object_size(void *ptr)` function returns an *upper bound* on 804 | the accessible size of the relevant object (if any) by querying the malloc 805 | implementation. It's similar to the `__builtin_object_size` intrinsic used by 806 | `_FORTIFY_SOURCE` but via dynamically querying the malloc implementation rather 807 | than determining constant sizes at compile-time. The current implementation is 808 | just a naive placeholder returning much looser upper bounds than the intended 809 | implementation. It's a valid implementation of the API already, but it will 810 | become fully accurate once it's finished. This function is **not** currently 811 | safe to call from signal handlers, but another API will be provided to make 812 | that possible with a compile-time configuration option to avoid the necessary 813 | overhead if the functionality isn't being used (in a way that doesn't change 814 | break API compatibility based on the configuration). 815 | 816 | The `size_t malloc_object_size_fast(void *ptr)` is comparable, but avoids 817 | expensive operations like locking or even atomics. It provides significantly 818 | less useful results falling back to higher upper bounds, but is very fast. In 819 | this implementation, it retrieves an upper bound on the size for small memory 820 | allocations based on calculating the size class region. This function is safe 821 | to use from signal handlers already. 822 | 823 | ## Stats 824 | 825 | If stats are enabled, hardened\_malloc keeps tracks allocator statistics in 826 | order to provide implementations of `mallinfo` and `malloc_info`. 827 | 828 | On Android, `mallinfo` is used for [mallinfo-based garbage collection 829 | triggering](https://developer.android.com/preview/features#mallinfo) so 830 | hardened\_malloc enables `CONFIG_STATS` by default. The `malloc_info` 831 | implementation on Android is the standard one in Bionic, with the information 832 | provided to Bionic via Android's internal extended `mallinfo` API with support 833 | for arenas and size class bins. This means the `malloc_info` output is fully 834 | compatible, including still having `jemalloc-1` as the version of the data 835 | format to retain compatibility with existing tooling. 836 | 837 | On non-Android Linux, `mallinfo` has zeroed fields even with `CONFIG_STATS` 838 | enabled because glibc `mallinfo` is inherently broken. It defines the fields as 839 | `int` instead of `size_t`, resulting in undefined signed overflows. It also 840 | misuses the fields and provides a strange, idiosyncratic set of values rather 841 | than following the SVID/XPG `mallinfo` definition. The `malloc_info` function 842 | is still provided, with a similar format as what Android uses, with tweaks for 843 | hardened\_malloc and the version set to `hardened_malloc-1`. The data format 844 | may be changed in the future. 845 | 846 | As an example, consider the following program from the hardened\_malloc tests: 847 | 848 | ```c 849 | #include 850 | 851 | #include 852 | 853 | __attribute__((optimize(0))) 854 | void leak_memory(void) { 855 | (void)malloc(1024 * 1024 * 1024); 856 | (void)malloc(16); 857 | (void)malloc(32); 858 | (void)malloc(4096); 859 | } 860 | 861 | void *do_work(void *p) { 862 | leak_memory(); 863 | return NULL; 864 | } 865 | 866 | int main(void) { 867 | pthread_t thread[4]; 868 | for (int i = 0; i < 4; i++) { 869 | pthread_create(&thread[i], NULL, do_work, NULL); 870 | } 871 | for (int i = 0; i < 4; i++) { 872 | pthread_join(thread[i], NULL); 873 | } 874 | 875 | malloc_info(0, stdout); 876 | } 877 | ``` 878 | 879 | This produces the following output when piped through `xmllint --format -`: 880 | 881 | ```xml 882 | 883 | 884 | 885 | 886 | 1 887 | 0 888 | 4096 889 | 32 890 | 891 | 892 | 1 893 | 0 894 | 4096 895 | 48 896 | 897 | 898 | 4 899 | 0 900 | 20480 901 | 1280 902 | 903 | 904 | 2 905 | 0 906 | 40960 907 | 10240 908 | 909 | 910 | 1 911 | 0 912 | 81920 913 | 81920 914 | 915 | 916 | 917 | 918 | 1 919 | 0 920 | 4096 921 | 32 922 | 923 | 924 | 1 925 | 0 926 | 4096 927 | 48 928 | 929 | 930 | 1 931 | 0 932 | 40960 933 | 5120 934 | 935 | 936 | 937 | 938 | 1 939 | 0 940 | 4096 941 | 32 942 | 943 | 944 | 1 945 | 0 946 | 4096 947 | 48 948 | 949 | 950 | 1 951 | 0 952 | 40960 953 | 5120 954 | 955 | 956 | 957 | 958 | 1 959 | 0 960 | 4096 961 | 32 962 | 963 | 964 | 1 965 | 0 966 | 4096 967 | 48 968 | 969 | 970 | 1 971 | 0 972 | 40960 973 | 5120 974 | 975 | 976 | 977 | 4294967296 978 | 979 | 980 | ``` 981 | 982 | The heap entries correspond to the arenas. Unlike jemalloc, hardened\_malloc 983 | doesn't handle large allocations within the arenas, so it presents those in the 984 | `malloc_info` statistics as a separate arena dedicated to large allocations. 985 | For example, with 4 arenas enabled, there will be a 5th arena in the statistics 986 | for the large allocations. 987 | 988 | The `nmalloc` / `ndalloc` fields are 64-bit integers tracking allocation and 989 | deallocation count. These are defined as wrapping on overflow, per the jemalloc 990 | implementation. 991 | 992 | See the [section on size classes](#size-classes) to map the size class bin 993 | number to the corresponding size class. The bin index begins at 0, mapping to 994 | the 0 byte size class, followed by 1 for the 16 bytes, 2 for 32 bytes, etc. and 995 | large allocations are treated as one group. 996 | 997 | When stats aren't enabled, the `malloc_info` output will be an empty `malloc` 998 | element. 999 | 1000 | ## System calls 1001 | 1002 | This is intended to aid with creating system call whitelists via seccomp-bpf 1003 | and will change over time. 1004 | 1005 | System calls used by all build configurations: 1006 | 1007 | * `futex(uaddr, FUTEX_WAIT_PRIVATE, val, NULL)` (via `pthread_mutex_lock`) 1008 | * `futex(uaddr, FUTEX_WAKE_PRIVATE, val)` (via `pthread_mutex_unlock`) 1009 | * `getrandom(buf, buflen, 0)` (to seed and regularly reseed the CSPRNG) 1010 | * `mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)` 1011 | * `mmap(ptr, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0)` 1012 | * `mprotect(ptr, size, PROT_READ)` 1013 | * `mprotect(ptr, size, PROT_READ|PROT_WRITE)` 1014 | * `mremap(old, old_size, new_size, 0)` 1015 | * `mremap(old, old_size, new_size, MREMAP_MAYMOVE|MREMAP_FIXED, new)` 1016 | * `munmap` 1017 | * `write(STDERR_FILENO, buf, len)` (before aborting due to memory corruption) 1018 | * `madvise(ptr, size, MADV_DONTNEED)` 1019 | 1020 | The main distinction from a typical malloc implementation is the use of 1021 | getrandom. A common compatibility issue is that existing system call whitelists 1022 | often omit getrandom partly due to older code using the legacy `/dev/urandom` 1023 | interface along with the overall lack of security features in mainstream libc 1024 | implementations. 1025 | 1026 | Additional system calls when `CONFIG_SEAL_METADATA=true` is set: 1027 | 1028 | * `pkey_alloc` 1029 | * `pkey_mprotect` instead of `mprotect` with an additional `pkey` parameter, 1030 | but otherwise the same (regular `mprotect` is never called) 1031 | 1032 | Additional system calls for Android builds with `LABEL_MEMORY`: 1033 | 1034 | * `prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, size, name)` 1035 | --------------------------------------------------------------------------------