├── .gitmodules ├── misc ├── run_gdb.sh ├── commands.gdb └── debug_ld.gdb ├── .clang-format ├── run_fuzz_tests.sh ├── run_tests.sh ├── Makefile ├── run_perf_tests.sh ├── include └── mapguard.h ├── README.md ├── tests ├── mapguard_thread_test.c ├── mapguard_perf_test.c └── mapguard_test.c ├── LICENSE └── src └── mapguard.c /.gitmodules: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /misc/run_gdb.sh: -------------------------------------------------------------------------------- 1 | gdb -x debug_ld.gdb build/mapguard_test 2 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | { 2 | BasedOnStyle: llvm, 3 | SpaceBeforeParens: Never, 4 | IndentWidth: 4, 5 | ColumnLimit: 0, 6 | SpaceAfterCStyleCast: true} 7 | -------------------------------------------------------------------------------- /misc/commands.gdb: -------------------------------------------------------------------------------- 1 | set env MG_PANIC_ON_VIOLATION=0 2 | set env MG_USE_MAPPING_CACHE=1 3 | set env MG_PREVENT_RWX=1 4 | set env MG_PREVENT_TRANSITION_FROM_X=1 5 | set env MG_PREVENT_STATIC_ADDRESS=1 6 | set env MG_ENABLE_GUARD_PAGES=1 7 | set env MG_PREVENT_X_TRANSITION=1 8 | set env MG_POISON_ON_ALLOCATION=1 9 | set env MG_ENABLE_SYSLOG=1 10 | set env LD_LIBRARY_PATH=build/ 11 | set env LD_PRELOAD=build/libmapguard.so 12 | r 13 | i r 14 | x/i $rip 15 | bt 16 | info locals 17 | -------------------------------------------------------------------------------- /misc/debug_ld.gdb: -------------------------------------------------------------------------------- 1 | set pagination off 2 | set breakpoint pending on 3 | set logging enabled off 4 | set verbose off 5 | set trace-commands off 6 | 7 | set environ MG_USE_MAPPING_CACHE=1 8 | break main 9 | r 10 | break munmap 11 | c 12 | 13 | # Break just before the call to 14 | # the unmap_guard_pages 15 | break *munmap+604 16 | c 17 | # We need the second 'c' 18 | # because other it is on the plt given 19 | # it is the first call 20 | c 21 | 22 | # This is the PLT stub break 23 | # for the call to unmap_guard_pages 24 | break *0x7ffff7fa4120 25 | 26 | # This continues and should stop in the 27 | # PLT stub 28 | c 29 | disas 30 | -------------------------------------------------------------------------------- /run_fuzz_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | ## Copyright Chris Rohlf - 2025 3 | 4 | make tests 5 | 6 | tests=("mapguard_test" "mapguard_thread_test") 7 | 8 | export MG_USE_MAPPING_CACHE=1 9 | 10 | while true; do 11 | # Randomize environment variables 12 | env_vars=( 13 | MG_PANIC_ON_VIOLATION 14 | MG_PREVENT_RWX 15 | MG_PREVENT_TRANSITION_FROM_X 16 | MG_PREVENT_TRANSITION_TO_X 17 | MG_PREVENT_STATIC_ADDRESS 18 | MG_ENABLE_GUARD_PAGES 19 | MG_POISON_ON_ALLOCATION 20 | MG_ENABLE_SYSLOG 21 | ) 22 | 23 | for var in "${env_vars[@]}"; do 24 | export "$var=$((RANDOM % 2))" 25 | done 26 | 27 | # Ensure correct library path 28 | export LD_LIBRARY_PATH=build/ 29 | 30 | # Run each test until we detect a segfault 31 | for t in "${tests[@]}"; do 32 | ./build/$t 33 | ret=$? 34 | if [ "$ret" -eq 139 ]; then 35 | echo "Segmentation fault detected in $t. Exiting." 36 | for var in "${env_vars[@]}"; do 37 | env | grep $var 38 | done 39 | echo build/$t 40 | exit 1 41 | fi 42 | done 43 | done -------------------------------------------------------------------------------- /run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ## This runs all unit tests and reports the results to stdout 4 | ## Copyright Chris Rohlf - 2025 5 | 6 | make tests 7 | 8 | export MG_PANIC_ON_VIOLATION=0 9 | export MG_USE_MAPPING_CACHE=1 10 | export MG_PREVENT_RWX=1 11 | export MG_PREVENT_TRANSITION_FROM_X=1 12 | export MG_PREVENT_STATIC_ADDRESS=1 13 | export MG_ENABLE_GUARD_PAGES=1 14 | export MG_PREVENT_X_TRANSITION=1 15 | export MG_POISON_ON_ALLOCATION=1 16 | export MG_ENABLE_SYSLOG=0 17 | 18 | tests=("mapguard_test" "mapguard_thread_test") 19 | failure=0 20 | succeeded=0 21 | 22 | mmap_min_addr=`sysctl vm.mmap_min_addr |cut -f3 -d" "` 23 | 24 | if [ "$mmap_min_addr" -ne "0" ] 2>/dev/null 25 | then 26 | echo "vm.mmap_min_addr should be 0 for some of the tests to work" 27 | fi 28 | 29 | for t in "${tests[@]}"; do 30 | echo -n "Running $t test" 31 | echo "Running $t test" >> test_output.txt 2>&1 32 | LD_PRELOAD=build/libmapguard.so build/$t >> test_output.txt 2>&1 33 | ret=$? 34 | 35 | if [ $ret -ne 0 ]; then 36 | echo "... Failed" 37 | echo "... Failed" >> test_output.txt 2>&1 38 | failure=$((failure+1)) 39 | else 40 | echo "... Succeeded" 41 | echo "... Succeeded" >> test_output.txt 2>&1 42 | succeeded=$((succeeded+1)) 43 | fi 44 | done 45 | 46 | unset MG_PANIC_ON_VIOLATION 47 | unset MG_USE_MAPPING_CACHE 48 | unset MG_PREVENT_RWX 49 | unset MG_PREVENT_TRANSITION_FROM_X 50 | unset MG_PREVENT_STATIC_ADDRESS 51 | unset MG_ENABLE_GUARD_PAGES 52 | unset MG_PREVENT_X_TRANSITION 53 | unset MG_POISON_ON_ALLOCATION 54 | unset MG_ENABLE_SYSLOG 55 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ## Map Guard Makefile 2 | 3 | CC = clang 4 | SHELL := /bin/bash 5 | 6 | ## Support for multithreaded programs 7 | THREADS = -DTHREAD_SUPPORT=1 8 | 9 | CFLAGS = -Wall -std=c11 -O2 10 | EXE_CFLAGS = -fPIE -pie 11 | DEBUG_FLAGS = -DDEBUG -ggdb 12 | LIBRARY = -fPIC -shared -ldl 13 | ASAN = -fsanitize=address 14 | TEST_FLAGS = 15 | SRC = src 16 | INCLUDE = include 17 | TEST_SRC = tests 18 | SRC_FILES = *.c 19 | BUILD_DIR = build 20 | STRIP = strip -s $(BUILD_DIR)/libmapguard.so 21 | 22 | UNAME := $(shell uname) 23 | ifeq ($(UNAME), Darwin) 24 | $(error MacOS not supported, mapguard is Linux only) 25 | endif 26 | 27 | ifeq ($(THREADS), -DTHREAD_SUPPORT=1) 28 | CFLAGS += -lpthread $(THREADS) 29 | endif 30 | 31 | all: library tests 32 | 33 | ## Build the library 34 | library: clean 35 | @echo "make clean" 36 | mkdir -p $(BUILD_DIR)/ 37 | $(CC) $(CFLAGS) $(LIBRARY) $(SRC)/$(SRC_FILES) -I $(INCLUDE) -o $(BUILD_DIR)/libmapguard.so 38 | $(STRIP) 39 | 40 | ## Build a debug version of the library 41 | library_debug: clean 42 | @echo "make library_debug" 43 | mkdir -p $(BUILD_DIR)/ 44 | $(CC) $(CFLAGS) $(LIBRARY) $(DEBUG_FLAGS) $(SRC)/$(SRC_FILES) -I $(INCLUDE) -o $(BUILD_DIR)/libmapguard.so 45 | 46 | ## Build the unit tests 47 | tests: clean library_debug 48 | @echo "make tests" 49 | mkdir -p $(BUILD_DIR)/ 50 | $(CC) $(CFLAGS) $(EXE_CFLAGS) $(DEBUG_FLAGS) $(TEST_SRC)/mapguard_test.c -I $(INCLUDE) -o $(BUILD_DIR)/mapguard_test -L build/ -ldl 51 | $(CC) $(CFLAGS) $(EXE_CFLAGS) $(DEBUG_FLAGS) $(TEST_SRC)/mapguard_thread_test.c -I $(INCLUDE) -o $(BUILD_DIR)/mapguard_thread_test -L build/ -lpthread -ldl 52 | 53 | perf_tests: clean library 54 | $(CC) $(CFLAGS) $(EXE_CFLAGS) -o build/mapguard_perf_test tests/mapguard_perf_test.c 55 | 56 | format: 57 | clang-format $(INCLUDE)/*.* $(SRC)/*.* $(TEST_SRC)/*.* -i 58 | 59 | clean: 60 | rm -rf build/* test_output.txt core 61 | -------------------------------------------------------------------------------- /run_perf_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | echo "Building performance test..." 6 | make perf_tests 7 | 8 | echo "================================================================" 9 | echo "Running baseline performance (no MapGuard)..." 10 | echo "================================================================" 11 | ./build/mapguard_perf_test > /tmp/baseline_perf.csv 12 | cat /tmp/baseline_perf.csv 13 | 14 | echo "" 15 | echo "================================================================" 16 | echo "Running with MapGuard (no config)..." 17 | echo "================================================================" 18 | LD_PRELOAD=build/libmapguard.so ./build/mapguard_perf_test > /tmp/minimal_perf.csv 19 | cat /tmp/minimal_perf.csv 20 | 21 | echo "" 22 | echo "================================================================" 23 | echo "Running with MapGuard (cache enabled)..." 24 | echo "================================================================" 25 | MG_USE_MAPPING_CACHE=1 LD_PRELOAD=build/libmapguard.so ./build/mapguard_perf_test > /tmp/cache_perf.csv 26 | cat /tmp/cache_perf.csv 27 | 28 | echo "" 29 | echo "================================================================" 30 | echo "Running with MapGuard (full protection)..." 31 | echo "================================================================" 32 | MG_USE_MAPPING_CACHE=1 MG_ENABLE_GUARD_PAGES=1 \ 33 | MG_PREVENT_RWX=1 MG_PREVENT_TRANSITION_TO_X=1 \ 34 | MG_PREVENT_TRANSITION_FROM_X=1 MG_POISON_ON_ALLOCATION=1 \ 35 | LD_PRELOAD=build/libmapguard.so ./build/mapguard_perf_test > /tmp/full_perf.csv 36 | cat /tmp/full_perf.csv 37 | 38 | echo "" 39 | echo "================================================================" 40 | echo "Performance Summary" 41 | echo "================================================================" 42 | 43 | python3 - <<'EOF' 44 | import csv 45 | 46 | configs = [ 47 | ("Baseline", "/tmp/baseline_perf.csv"), 48 | ("Minimal", "/tmp/minimal_perf.csv"), 49 | ("Cache", "/tmp/cache_perf.csv"), 50 | ("Full", "/tmp/full_perf.csv") 51 | ] 52 | 53 | def iter_perf_rows(filepath): 54 | with open(filepath, 'r') as f: 55 | lines = f.readlines() 56 | 57 | header_idx = next((i for i, line in enumerate(lines) 58 | if line.strip().lower().startswith('test_name,')), None) 59 | if header_idx is None: 60 | return 61 | 62 | for row in csv.DictReader(lines[header_idx:]): 63 | test = row.get('test_name') 64 | ops = row.get('ops_per_sec') 65 | if not test or ops is None: 66 | continue 67 | try: 68 | yield test, float(ops) 69 | except ValueError: 70 | continue 71 | 72 | results = {} 73 | for config_name, filepath in configs: 74 | for test, ops in iter_perf_rows(filepath): 75 | results.setdefault(test, {})[config_name] = ops 76 | 77 | print(f"{'Test':<30} {'Baseline':<11} {'Minimal':<9} {'Cache':<12} {'Full':<11} {'Overhead %':<12}") 78 | print("=" * 100) 79 | 80 | for test in results: 81 | baseline = results[test].get('Baseline') 82 | if baseline is None: 83 | continue 84 | print(f"{test:<25} {baseline:>11.0f}", end='') 85 | for config in ['Minimal', 'Cache', 'Full']: 86 | ops = results[test].get(config) 87 | print(f" {ops:>11.0f}" if ops is not None else f" {'N/A':>11}", end='') 88 | full_ops = results[test].get('Full') 89 | if full_ops is None: 90 | print(f" {'N/A':>11}") 91 | else: 92 | overhead = ((baseline - full_ops) / baseline) * 100 93 | print(f" {overhead:>11.1f}%") 94 | EOF 95 | 96 | echo "" 97 | echo "Raw CSV files saved in /tmp/*_perf.csv" 98 | -------------------------------------------------------------------------------- /include/mapguard.h: -------------------------------------------------------------------------------- 1 | /* MapGuard - Copyright Chris Rohlf - 2025 */ 2 | #pragma once 3 | #define _GNU_SOURCE 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #if THREAD_SUPPORT 19 | #include 20 | #endif 21 | 22 | #include 23 | #include 24 | 25 | #define OK 0 26 | #define ERROR -1 27 | #define GUARD_PAGE_COUNT 2 28 | 29 | /* If you want to log security policy violations then 30 | * modifying this macro is the easiest way to do it */ 31 | #if DEBUG 32 | #define LOG_ERROR(msg, ...) \ 33 | fprintf(stderr, "[LOG][%d](%s) (%s) - " msg "\n", getpid(), __FUNCTION__, strerror(errno), ##__VA_ARGS__); \ 34 | fflush(stderr); 35 | 36 | #define LOG(msg, ...) \ 37 | fprintf(stdout, "[LOG][%d](%s) " msg "\n", getpid(), __FUNCTION__, ##__VA_ARGS__); \ 38 | fflush(stdout); 39 | #else 40 | #define LOG_ERROR(msg, ...) SYSLOG(msg, ##__VA_ARGS__) 41 | #define LOG(msg, ...) SYSLOG(msg, ##__VA_ARGS__) 42 | #endif 43 | 44 | #define LOG_AND_ABORT(msg, ...) \ 45 | fprintf(stderr, "[LOG][%d](%s) (%s) - " msg "\n", getpid(), __FUNCTION__, strerror(errno), ##__VA_ARGS__); \ 46 | fflush(stderr); \ 47 | abort(); 48 | 49 | #define SYSLOG(msg, ...) \ 50 | if(g_mapguard_policy.enable_syslog) { \ 51 | syslog(LOG_ALERT, msg, ##__VA_ARGS__); \ 52 | } 53 | 54 | /* MapGuard Environment variable configurations */ 55 | 56 | /* Prevent PROT_READ, PROT_WRITE, PROT_EXEC mappings */ 57 | #define MG_PREVENT_RWX "MG_PREVENT_RWX" 58 | /* Prevent RW- allocations to ever transition to PROT_EXEC */ 59 | #define MG_PREVENT_TRANSITION_TO_X "MG_PREVENT_TRANSITION_TO_X" 60 | /* Prevent R-X allocations to ever transition to PROT_WRITE */ 61 | #define MG_PREVENT_TRANSITION_FROM_X "MG_PREVENT_TRANSITION_FROM_X" 62 | /* Prevent page allocations at a set address (enforces ASLR) */ 63 | #define MG_PREVENT_STATIC_ADDRESS "MG_PREVENT_STATIC_ADDRESS" 64 | /* Force top and bottom guard page allocations */ 65 | #define MG_ENABLE_GUARD_PAGES "MG_ENABLE_GUARD_PAGES" 66 | /* Abort the process when security policies are violated */ 67 | #define MG_PANIC_ON_VIOLATION "MG_PANIC_ON_VIOLATION" 68 | /* Fill all allocated pages with a byte pattern 0xde */ 69 | #define MG_POISON_ON_ALLOCATION "MG_POISON_ON_ALLOCATION" 70 | /* Enable the mapping cache, required for guard page allocation */ 71 | #define MG_USE_MAPPING_CACHE "MG_USE_MAPPING_CACHE" 72 | /* Enable telemetry via syslog */ 73 | #define MG_ENABLE_SYSLOG "MG_ENABLE_SYSLOG" 74 | 75 | #define ENV_TO_INT(env, config) \ 76 | if(env_to_int(env)) { \ 77 | config = 1; \ 78 | } 79 | 80 | #define MAYBE_PANIC() \ 81 | if(g_mapguard_policy.panic_on_violation) { \ 82 | abort(); \ 83 | } 84 | 85 | #define ROUND_UP_PAGE(N) (((N) + g_page_size - 1) & ~(g_page_size - 1)) 86 | #define ROUND_DOWN_PAGE(N) ((N) & ~(g_page_size - 1)) 87 | 88 | /* Branch prediction hints for hot paths */ 89 | #define likely(x) __builtin_expect(!!(x), 1) 90 | #define unlikely(x) __builtin_expect(!!(x), 0) 91 | 92 | extern pthread_mutex_t _mg_mutex; 93 | 94 | #if THREAD_SUPPORT 95 | #define LOCK_MG() \ 96 | pthread_mutex_lock(&_mg_mutex); 97 | 98 | #define UNLOCK_MG() \ 99 | pthread_mutex_unlock(&_mg_mutex); 100 | #else 101 | #define LOCK_MG() 102 | #define UNLOCK_MG() 103 | #endif 104 | 105 | #define MG_POISON_BYTE 0xde 106 | 107 | typedef struct { 108 | uint8_t prevent_rwx; 109 | uint8_t prevent_transition_to_x; 110 | uint8_t prevent_transition_from_x; 111 | uint8_t prevent_static_address; 112 | uint8_t prevent_policies_enabled; 113 | uint8_t enable_guard_pages; 114 | uint8_t panic_on_violation; 115 | uint8_t poison_on_allocation; 116 | uint8_t use_mapping_cache; 117 | uint8_t actions_enabled; 118 | uint8_t enable_syslog; 119 | } mapguard_policy_t; 120 | 121 | typedef struct { 122 | void *next; /* Points to the next [mapguard_cache_metadata_t ... mapguard_cache_entry_t ... n] */ 123 | bool full; 124 | uint32_t total; 125 | uint32_t free; 126 | } mapguard_cache_metadata_t; 127 | 128 | /* TODO - This structure is not thread safe */ 129 | typedef struct mapguard_cache_entry { 130 | void *start; 131 | /* Tracks which entry this is, uint16_t because pages could be 16k */ 132 | uint16_t idx; 133 | size_t size; 134 | bool guarded_b; 135 | bool guarded_t; 136 | int32_t immutable_prot; 137 | int32_t current_prot; 138 | struct mapguard_cache_entry *hash_next; /* For hash table chaining */ 139 | } mapguard_cache_entry_t; 140 | 141 | mapguard_cache_metadata_t *new_mce_page(); 142 | mapguard_cache_metadata_t *get_mce_metadata_page(mapguard_cache_entry_t *mce); 143 | mapguard_cache_entry_t *find_free_mce(); 144 | mapguard_cache_entry_t *get_cache_entry(void *addr); 145 | void *is_mapguard_entry_cached(void *p, void *data); 146 | int32_t env_to_int(char *string); 147 | uint64_t rand_uint64(void); 148 | void mark_guard_page(void *p); 149 | void *allocate_guard_page(void *p); 150 | void make_guard_page(void *p); 151 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MapGuard 2 | 3 | MapGuard is a page allocation proxy and cache that aims to mitigate some memory safety exploits by intercepting, modifying, and logging `mmap` based page allocations. It enforces a simple set of allocation security policies configurable via environment variables. It works transparently on open and closed source programs with no source modifications in the target required. It can be used along side any mmap based memory allocator. 4 | 5 | ## Implementation 6 | 7 | MapGuard uses the dynamic linker interface via `dlsym` to hook libc functions. When calls to those functions are intercepted MapGuard will inspect their arguments and then consult runtime policies for whether that behavior should be allowed, denied, or logged. 8 | 9 | The library requires hooking `mmap`, `munmap`, `mprotect`, and `mremap`. Enabling all protections may introduce some performance and memory overhead, especially if guard pages are enabled. 10 | 11 | ## Performance 12 | 13 | MapGuard can introduce performance overhead when allocating many raw pages. This is particulary true when `MG_USE_MAPPING_CACHE` is enabled because it has to manage metadata for each page allocation and tracking this data introduces CPU and memory overhead. Faster data structures are available for managing this metadata but they all rely on `malloc` which makes it easier to bypass the security controls the library introduces. 14 | 15 | ## Configuration 16 | 17 | The following functionality can be enabled/disabled via environment variables: 18 | 19 | * `MG_PREVENT_RWX` - Prevent PROT_READ, PROT_WRITE, PROT_EXEC mappings 20 | * `MG_PREVENT_TRANSITION_TO_X` - Prevent RW- allocations to ever transition to PROT_EXEC 21 | * `MG_PREVENT_TRANSITION_FROM_X` - Prevent R-X allocations to ever transition to PROT_WRITE 22 | * `MG_PREVENT_STATIC_ADDRESS` - Prevent page allocations at a set address (enforces ASLR) 23 | * `MG_ENABLE_GUARD_PAGES` - Force guard page allocations on either side of all mappings 24 | * `MG_PANIC_ON_VIOLATION` - Abort the process when any policies are violated 25 | * `MG_POISON_ON_ALLOCATION` - Fill all allocated pages with a byte pattern 0xde 26 | * `MG_USE_MAPPING_CACHE` - Enable the mapping cache, required for guard pages and other protections 27 | * `MG_ENABLE_SYSLOG` - Enable logging of policy violations to syslog 28 | 29 | ## Compiling 30 | 31 | `make library` - Compiles the library 32 | 33 | `make tests` - Compiles a debug version of the library 34 | 35 | `make perf_tests` - Compiles the performance tests 36 | 37 | `make format` - Run clang format on the code base 38 | 39 | Now run your own program with the library: 40 | 41 | ``` 42 | MG_PANIC_ON_VIOLATION=0 \ 43 | MG_USE_MAPPING_CACHE=1 \ 44 | MG_PREVENT_RWX=1 \ 45 | MG_PREVENT_STATIC_ADDRESS=1 \ 46 | MG_ENABLE_GUARD_PAGES=1 \ 47 | MG_PREVENT_TRANSITION_TO_X=1 \ 48 | MG_PREVENT_TRANSITION_FROM_X=1 \ 49 | MG_POISON_ON_ALLOCATION=1 \ 50 | LD_PRELOAD=build/libmapguard.so ./your_program 51 | ``` 52 | 53 | ## Testing 54 | 55 | You can test MapGuard by running `./run_tests.sh`: 56 | 57 | ``` 58 | # ./run_tests.sh 59 | Running mapguard_test test... Succeeded 60 | Running mapguard_thread_test test... Succeeded 61 | 62 | # ./run_perf_test.sh 63 | ================================================================ 64 | Running baseline performance (no MapGuard)... 65 | ================================================================ 66 | test_name,iterations,time_ms,ops_per_sec 67 | simple_alloc_free,10000,8.28,1207522.82 68 | alloc_write_free,10000,27.56,362884.76 69 | batch_alloc_then_free,10000,7.53,1327830.27 70 | varied_sizes,1000,0.75,1325820.35 71 | mprotect_transitions,1000,1.29,777756.17 72 | large_allocations,1000,0.74,1347180.55 73 | partial_munmap,1000,1.36,734686.38 74 | 75 | ================================================================ 76 | Running with MapGuard (no config)... 77 | ================================================================ 78 | test_name,iterations,time_ms,ops_per_sec 79 | simple_alloc_free,10000,8.00,1249934.85 80 | alloc_write_free,10000,26.70,374585.62 81 | batch_alloc_then_free,10000,7.58,1319130.69 82 | varied_sizes,1000,0.81,1228689.91 83 | mprotect_transitions,1000,1.33,753130.01 84 | large_allocations,1000,0.83,1198442.98 85 | partial_munmap,1000,1.31,764696.12 86 | 87 | ================================================================ 88 | Running with MapGuard (cache enabled)... 89 | ================================================================ 90 | test_name,iterations,time_ms,ops_per_sec 91 | simple_alloc_free,10000,8.40,1190452.52 92 | alloc_write_free,10000,27.73,360684.77 93 | batch_alloc_then_free,10000,12.23,817661.49 94 | varied_sizes,1000,0.85,1170561.32 95 | mprotect_transitions,1000,1.46,685674.95 96 | large_allocations,1000,0.84,1190239.56 97 | partial_munmap,1000,1.39,721478.86 98 | 99 | ================================================================ 100 | Running with MapGuard (full protection)... 101 | ================================================================ 102 | test_name,iterations,time_ms,ops_per_sec 103 | simple_alloc_free,10000,38.66,258667.52 104 | alloc_write_free,10000,38.82,257607.74 105 | batch_alloc_then_free,10000,56.12,178201.50 106 | varied_sizes,1000,5.17,193604.62 107 | mprotect_transitions,1000,4.84,206417.86 108 | large_allocations,1000,97.63,10243.05 109 | partial_munmap,1000,6.01,166516.33 110 | 111 | ================================================================ 112 | Performance Summary 113 | ================================================================ 114 | Test Baseline Minimal Cache Full Overhead % 115 | ==================================================================================================== 116 | simple_alloc_free 1207523 1249935 1190453 258668 78.6% 117 | alloc_write_free 362885 374586 360685 257608 29.0% 118 | batch_alloc_then_free 1327830 1319131 817661 178202 86.6% 119 | varied_sizes 1325820 1228690 1170561 193605 85.4% 120 | mprotect_transitions 777756 753130 685675 206418 73.5% 121 | large_allocations 1347181 1198443 1190240 10243 99.2% 122 | partial_munmap 734686 764696 721479 166516 77.3% 123 | 124 | # ./run_fuzz_tests 125 | ... 126 | (see if anything crashes!) 127 | 128 | ``` 129 | 130 | ## Who 131 | 132 | MapGuard is written and maintained by Chris Rohlf - chris.rohlf@gmail.com 133 | -------------------------------------------------------------------------------- /tests/mapguard_thread_test.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "mapguard.h" 14 | 15 | #define NUM_THREADS 4 16 | #define ITERATIONS_PER_THREAD 1000 17 | #define MAX_ALLOCATIONS 100 18 | 19 | typedef struct { 20 | int thread_id; 21 | atomic_int *total_allocs; 22 | atomic_int *total_frees; 23 | atomic_int *errors; 24 | } thread_data_t; 25 | 26 | typedef struct { 27 | void *addr; 28 | size_t size; 29 | } allocation_t; 30 | 31 | static inline uint64_t xorshift64(uint64_t *state) { 32 | uint64_t x = *state; 33 | x ^= x << 13; 34 | x ^= x >> 7; 35 | x ^= x << 17; 36 | *state = x; 37 | return x; 38 | } 39 | 40 | void *worker_thread(void *arg) { 41 | thread_data_t *data = (thread_data_t *) arg; 42 | allocation_t allocs[MAX_ALLOCATIONS]; 43 | int num_allocs = 0; 44 | 45 | /* Per-thread random state seeded with thread ID and time */ 46 | uint64_t rng_state = data->thread_id + time(NULL); 47 | 48 | for(int i = 0; i < ITERATIONS_PER_THREAD; i++) { 49 | uint64_t choice = xorshift64(&rng_state) % 100; 50 | 51 | if(choice < 60 && num_allocs < MAX_ALLOCATIONS) { 52 | /* Allocate memory (60% probability) */ 53 | size_t sizes[] = {4096, 8192, 16384, 32768}; 54 | size_t size = sizes[xorshift64(&rng_state) % 4]; 55 | 56 | void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, 57 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 58 | 59 | if(ptr == MAP_FAILED) { 60 | fprintf(stderr, "[Thread %d] mmap failed at iteration %d\n", 61 | data->thread_id, i); 62 | atomic_fetch_add(data->errors, 1); 63 | continue; 64 | } 65 | 66 | /* Write to the memory to ensure it's accessible */ 67 | memset(ptr, 0xAA + data->thread_id, size); 68 | 69 | allocs[num_allocs].addr = ptr; 70 | allocs[num_allocs].size = size; 71 | num_allocs++; 72 | 73 | atomic_fetch_add(data->total_allocs, 1); 74 | 75 | } else if(num_allocs > 0) { 76 | /* Free memory (40% probability, if we have allocations) */ 77 | int idx = xorshift64(&rng_state) % num_allocs; 78 | 79 | /* Verify memory still contains our pattern before freeing */ 80 | uint8_t *check = (uint8_t *) allocs[idx].addr; 81 | if(*check != (0xAA + data->thread_id)) { 82 | fprintf(stderr, "[Thread %d] Memory corruption detected at %p\n", 83 | data->thread_id, allocs[idx].addr); 84 | atomic_fetch_add(data->errors, 1); 85 | } 86 | 87 | if(munmap(allocs[idx].addr, allocs[idx].size) != 0) { 88 | fprintf(stderr, "[Thread %d] munmap failed\n", data->thread_id); 89 | atomic_fetch_add(data->errors, 1); 90 | } 91 | 92 | atomic_fetch_add(data->total_frees, 1); 93 | 94 | /* Remove from tracking array by moving last element */ 95 | allocs[idx] = allocs[num_allocs - 1]; 96 | num_allocs--; 97 | } 98 | 99 | /* Occasionally do mprotect operations */ 100 | if(num_allocs > 0 && choice < 10) { 101 | int idx = xorshift64(&rng_state) % num_allocs; 102 | 103 | if(mprotect(allocs[idx].addr, allocs[idx].size, PROT_READ) != 0) { 104 | fprintf(stderr, "[Thread %d] mprotect to PROT_READ failed\n", 105 | data->thread_id); 106 | atomic_fetch_add(data->errors, 1); 107 | } else { 108 | if(mprotect(allocs[idx].addr, allocs[idx].size, 109 | PROT_READ | PROT_WRITE) != 0) { 110 | fprintf(stderr, "[Thread %d] mprotect to PROT_RW failed\n", 111 | data->thread_id); 112 | atomic_fetch_add(data->errors, 1); 113 | } 114 | } 115 | } 116 | } 117 | 118 | /* Clean up remaining allocations */ 119 | for(int i = 0; i < num_allocs; i++) { 120 | munmap(allocs[i].addr, allocs[i].size); 121 | atomic_fetch_add(data->total_frees, 1); 122 | } 123 | 124 | printf("[Thread %d] Completed %d iterations\n", 125 | data->thread_id, ITERATIONS_PER_THREAD); 126 | 127 | return NULL; 128 | } 129 | 130 | void *secret_data; 131 | 132 | int main(int argc, char *argv[]) { 133 | pthread_t threads[NUM_THREADS]; 134 | thread_data_t thread_data[NUM_THREADS]; 135 | 136 | atomic_int total_allocs = 0; 137 | atomic_int total_frees = 0; 138 | atomic_int errors = 0; 139 | 140 | printf("Starting multi-threaded stress test:\n"); 141 | printf(" Threads: %d\n", NUM_THREADS); 142 | printf(" Iterations per thread: %d\n", ITERATIONS_PER_THREAD); 143 | printf(" Max concurrent allocations per thread: %d\n", MAX_ALLOCATIONS); 144 | printf("\n"); 145 | 146 | struct timespec start, end; 147 | clock_gettime(CLOCK_MONOTONIC, &start); 148 | 149 | /* Create threads */ 150 | for(int i = 0; i < NUM_THREADS; i++) { 151 | thread_data[i].thread_id = i; 152 | thread_data[i].total_allocs = &total_allocs; 153 | thread_data[i].total_frees = &total_frees; 154 | thread_data[i].errors = &errors; 155 | 156 | if(pthread_create(&threads[i], NULL, worker_thread, &thread_data[i]) != 0) { 157 | fprintf(stderr, "Failed to create thread %d\n", i); 158 | return 1; 159 | } 160 | } 161 | 162 | /* Wait for all threads to complete */ 163 | for(int i = 0; i < NUM_THREADS; i++) { 164 | if(pthread_join(threads[i], NULL) != 0) { 165 | fprintf(stderr, "Failed to join thread %d\n", i); 166 | return 1; 167 | } 168 | } 169 | 170 | clock_gettime(CLOCK_MONOTONIC, &end); 171 | 172 | double elapsed = (end.tv_sec - start.tv_sec) + 173 | (end.tv_nsec - start.tv_nsec) / 1000000000.0; 174 | 175 | printf("\n=== Test Results ===\n"); 176 | printf("Total allocations: %d\n", atomic_load(&total_allocs)); 177 | printf("Total frees: %d\n", atomic_load(&total_frees)); 178 | printf("Errors: %d\n", atomic_load(&errors)); 179 | printf("Elapsed time: %.2f seconds\n", elapsed); 180 | printf("Operations per second: %.0f\n", 181 | (atomic_load(&total_allocs) + atomic_load(&total_frees)) / elapsed); 182 | 183 | if(atomic_load(&errors) > 0) { 184 | printf("\n*** TEST FAILED: %d errors detected ***\n", 185 | atomic_load(&errors)); 186 | return 1; 187 | } 188 | 189 | printf("\n*** TEST PASSED ***\n"); 190 | return 0; 191 | } 192 | -------------------------------------------------------------------------------- /tests/mapguard_perf_test.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #define NUM_ITERATIONS 10000 10 | #define ALLOCATION_SIZE (4096 * 4) 11 | #define VARIED_SIZE_COUNT 1000 12 | 13 | typedef struct { 14 | void *ptr; 15 | size_t size; 16 | } allocation_t; 17 | 18 | static inline double timespec_diff_ms(struct timespec *start, struct timespec *end) { 19 | return (end->tv_sec - start->tv_sec) * 1000.0 + 20 | (end->tv_nsec - start->tv_nsec) / 1000000.0; 21 | } 22 | 23 | void test_simple_alloc_free(void) { 24 | struct timespec start, end; 25 | 26 | clock_gettime(CLOCK_MONOTONIC, &start); 27 | 28 | for(int i = 0; i < NUM_ITERATIONS; i++) { 29 | void *ptr = mmap(NULL, ALLOCATION_SIZE, PROT_READ | PROT_WRITE, 30 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 31 | if(ptr == MAP_FAILED) { 32 | fprintf(stderr, "mmap failed at iteration %d\n", i); 33 | exit(1); 34 | } 35 | 36 | munmap(ptr, ALLOCATION_SIZE); 37 | } 38 | 39 | clock_gettime(CLOCK_MONOTONIC, &end); 40 | 41 | double elapsed = timespec_diff_ms(&start, &end); 42 | printf("simple_alloc_free,%d,%.2f,%.2f\n", 43 | NUM_ITERATIONS, elapsed, NUM_ITERATIONS / (elapsed / 1000.0)); 44 | } 45 | 46 | void test_alloc_write_free(void) { 47 | struct timespec start, end; 48 | 49 | clock_gettime(CLOCK_MONOTONIC, &start); 50 | 51 | for(int i = 0; i < NUM_ITERATIONS; i++) { 52 | void *ptr = mmap(NULL, ALLOCATION_SIZE, PROT_READ | PROT_WRITE, 53 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 54 | if(ptr == MAP_FAILED) { 55 | fprintf(stderr, "mmap failed at iteration %d\n", i); 56 | exit(1); 57 | } 58 | 59 | memset(ptr, 0xAA, ALLOCATION_SIZE); 60 | 61 | munmap(ptr, ALLOCATION_SIZE); 62 | } 63 | 64 | clock_gettime(CLOCK_MONOTONIC, &end); 65 | 66 | double elapsed = timespec_diff_ms(&start, &end); 67 | printf("alloc_write_free,%d,%.2f,%.2f\n", 68 | NUM_ITERATIONS, elapsed, NUM_ITERATIONS / (elapsed / 1000.0)); 69 | } 70 | 71 | void test_batch_alloc_then_free(void) { 72 | struct timespec start, end; 73 | allocation_t *allocs = malloc(NUM_ITERATIONS * sizeof(allocation_t)); 74 | 75 | clock_gettime(CLOCK_MONOTONIC, &start); 76 | 77 | for(int i = 0; i < NUM_ITERATIONS; i++) { 78 | allocs[i].ptr = mmap(NULL, ALLOCATION_SIZE, PROT_READ | PROT_WRITE, 79 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 80 | allocs[i].size = ALLOCATION_SIZE; 81 | if(allocs[i].ptr == MAP_FAILED) { 82 | fprintf(stderr, "mmap failed at iteration %d\n", i); 83 | exit(1); 84 | } 85 | } 86 | 87 | for(int i = 0; i < NUM_ITERATIONS; i++) { 88 | munmap(allocs[i].ptr, allocs[i].size); 89 | } 90 | 91 | clock_gettime(CLOCK_MONOTONIC, &end); 92 | 93 | double elapsed = timespec_diff_ms(&start, &end); 94 | printf("batch_alloc_then_free,%d,%.2f,%.2f\n", 95 | NUM_ITERATIONS, elapsed, NUM_ITERATIONS / (elapsed / 1000.0)); 96 | 97 | free(allocs); 98 | } 99 | 100 | void test_varied_sizes(void) { 101 | struct timespec start, end; 102 | size_t sizes[] = {4096, 8192, 16384, 32768, 65536}; 103 | int num_sizes = sizeof(sizes) / sizeof(sizes[0]); 104 | 105 | clock_gettime(CLOCK_MONOTONIC, &start); 106 | 107 | for(int i = 0; i < VARIED_SIZE_COUNT; i++) { 108 | size_t size = sizes[i % num_sizes]; 109 | void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, 110 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 111 | if(ptr == MAP_FAILED) { 112 | fprintf(stderr, "mmap failed at iteration %d\n", i); 113 | exit(1); 114 | } 115 | 116 | munmap(ptr, size); 117 | } 118 | 119 | clock_gettime(CLOCK_MONOTONIC, &end); 120 | 121 | double elapsed = timespec_diff_ms(&start, &end); 122 | printf("varied_sizes,%d,%.2f,%.2f\n", 123 | VARIED_SIZE_COUNT, elapsed, VARIED_SIZE_COUNT / (elapsed / 1000.0)); 124 | } 125 | 126 | void test_mprotect_transitions(void) { 127 | struct timespec start, end; 128 | 129 | clock_gettime(CLOCK_MONOTONIC, &start); 130 | 131 | for(int i = 0; i < NUM_ITERATIONS / 10; i++) { 132 | void *ptr = mmap(NULL, ALLOCATION_SIZE, PROT_READ | PROT_WRITE, 133 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 134 | if(ptr == MAP_FAILED) { 135 | fprintf(stderr, "mmap failed at iteration %d\n", i); 136 | exit(1); 137 | } 138 | 139 | mprotect(ptr, ALLOCATION_SIZE, PROT_READ); 140 | mprotect(ptr, ALLOCATION_SIZE, PROT_READ | PROT_WRITE); 141 | 142 | munmap(ptr, ALLOCATION_SIZE); 143 | } 144 | 145 | clock_gettime(CLOCK_MONOTONIC, &end); 146 | 147 | double elapsed = timespec_diff_ms(&start, &end); 148 | printf("mprotect_transitions,%d,%.2f,%.2f\n", 149 | NUM_ITERATIONS / 10, elapsed, (NUM_ITERATIONS / 10) / (elapsed / 1000.0)); 150 | } 151 | 152 | void test_large_allocations(void) { 153 | struct timespec start, end; 154 | size_t large_size = 1024 * 1024; 155 | 156 | clock_gettime(CLOCK_MONOTONIC, &start); 157 | 158 | for(int i = 0; i < 1000; i++) { 159 | void *ptr = mmap(NULL, large_size, PROT_READ | PROT_WRITE, 160 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 161 | if(ptr == MAP_FAILED) { 162 | fprintf(stderr, "mmap failed at iteration %d\n", i); 163 | exit(1); 164 | } 165 | 166 | munmap(ptr, large_size); 167 | } 168 | 169 | clock_gettime(CLOCK_MONOTONIC, &end); 170 | 171 | double elapsed = timespec_diff_ms(&start, &end); 172 | printf("large_allocations,1000,%.2f,%.2f\n", 173 | elapsed, 1000 / (elapsed / 1000.0)); 174 | } 175 | 176 | void test_partial_munmap(void) { 177 | struct timespec start, end; 178 | size_t size = 16384; /* 4 pages */ 179 | 180 | clock_gettime(CLOCK_MONOTONIC, &start); 181 | 182 | for(int i = 0; i < NUM_ITERATIONS / 10; i++) { 183 | void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, 184 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 185 | if(ptr == MAP_FAILED) { 186 | fprintf(stderr, "mmap failed at iteration %d\n", i); 187 | exit(1); 188 | } 189 | 190 | /* Test different unmap patterns */ 191 | if(i % 3 == 0) { 192 | /* Unmap from beginning */ 193 | munmap(ptr, 4096); 194 | munmap(ptr + 4096, 12288); 195 | } else if(i % 3 == 1) { 196 | /* Unmap from end */ 197 | munmap(ptr + 12288, 4096); 198 | munmap(ptr, 12288); 199 | } else { 200 | /* Unmap from middle - creates split! */ 201 | munmap(ptr + 4096, 8192); 202 | munmap(ptr, 4096); 203 | munmap(ptr + 12288, 4096); 204 | } 205 | } 206 | 207 | clock_gettime(CLOCK_MONOTONIC, &end); 208 | 209 | double elapsed = timespec_diff_ms(&start, &end); 210 | printf("partial_munmap,%d,%.2f,%.2f\n", 211 | NUM_ITERATIONS / 10, elapsed, (NUM_ITERATIONS / 10) / (elapsed / 1000.0)); 212 | } 213 | 214 | int main(void) { 215 | printf("test_name,iterations,time_ms,ops_per_sec\n"); 216 | 217 | test_simple_alloc_free(); 218 | test_alloc_write_free(); 219 | test_batch_alloc_then_free(); 220 | test_varied_sizes(); 221 | test_mprotect_transitions(); 222 | test_large_allocations(); 223 | test_partial_munmap(); 224 | 225 | return 0; 226 | } 227 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /tests/mapguard_test.c: -------------------------------------------------------------------------------- 1 | /* MapGuard tests 2 | * Copyright Chris Rohlf - 2025 */ 3 | 4 | #define _GNU_SOURCE 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "mapguard.h" 14 | 15 | #define STATIC_ADDRESS 0x7f3bffaaa000 16 | int page_size; 17 | int alloc_size; 18 | 19 | void *map_memory(char *desc, int prot) { 20 | return mmap(0, alloc_size, prot, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 21 | } 22 | 23 | int32_t unmap_memory(void *ptr) { 24 | return munmap(ptr, alloc_size); 25 | } 26 | 27 | int32_t unmap_remapped_memory(void *ptr) { 28 | return munmap(ptr, alloc_size); 29 | } 30 | 31 | void *remap_memory_test(char *desc, void *ptr) { 32 | void *mptr = mremap(ptr, alloc_size, alloc_size * 2, MREMAP_MAYMOVE); 33 | if(mptr != MAP_FAILED) { 34 | LOG("Success: remapped %s memory %p @ %p", desc, ptr, mptr); 35 | } else { 36 | LOG("Failure: remap %s memory", desc); 37 | } 38 | 39 | return mptr; 40 | } 41 | 42 | void map_rw_memory_test() { 43 | void *ptr = map_memory("RW", PROT_READ | PROT_WRITE); 44 | 45 | if(ptr == MAP_FAILED) { 46 | LOG("Failure: map RW memory"); 47 | } else { 48 | LOG("Success: mapped RW memory"); 49 | } 50 | 51 | unmap_memory(ptr); 52 | } 53 | 54 | void map_rwx_memory_test() { 55 | void *ptr = map_memory("RWX", PROT_READ | PROT_WRITE | PROT_EXEC); 56 | 57 | if(ptr != MAP_FAILED) { 58 | LOG("Failure: mapped RWX memory"); 59 | } else { 60 | LOG("Success: failed to map RWX memory"); 61 | } 62 | } 63 | 64 | void check_x_to_w_test() { 65 | void *ptr = map_memory("R-X", PROT_READ | PROT_EXEC); 66 | 67 | if(ptr == MAP_FAILED) { 68 | LOG("Failure: to map R-X memory"); 69 | } 70 | 71 | int32_t ret = mprotect(ptr, page_size * 16, PROT_READ | PROT_WRITE); 72 | 73 | if(ret != ERROR) { 74 | LOG("Failure: allowed mprotect R-X to RW-"); 75 | } else { 76 | LOG("Success: prevented R-X to R-W"); 77 | } 78 | 79 | unmap_memory(ptr); 80 | } 81 | 82 | void map_rw_then_x_memory_test() { 83 | void *ptr = map_memory("RW", PROT_READ | PROT_WRITE); 84 | 85 | if(ptr == MAP_FAILED) { 86 | LOG("Failure: to map RW memory"); 87 | } 88 | 89 | int32_t ret = mprotect(ptr, page_size * 16, PROT_READ | PROT_WRITE | PROT_EXEC); 90 | 91 | if(ret != ERROR) { 92 | LOG("Failure: allowed mprotect of RWX"); 93 | } else { 94 | LOG("Success: prevented RWX mprotect"); 95 | } 96 | 97 | unmap_memory(ptr); 98 | } 99 | 100 | void map_then_mremap_test() { 101 | void *ptr = map_memory("RW", PROT_READ | PROT_WRITE); 102 | 103 | if(ptr == MAP_FAILED) { 104 | LOG("Failure: to map RW memory"); 105 | } 106 | LOG("mapped RW memory %p", ptr); 107 | ptr = remap_memory_test("Remap", ptr); 108 | 109 | if(ptr == MAP_FAILED) { 110 | LOG("Failure: to remap memory"); 111 | } else { 112 | LOG("Success: remapped memory"); 113 | } 114 | 115 | unmap_remapped_memory(ptr); 116 | } 117 | 118 | void map_static_address_test() { 119 | uint8_t *ptr = mmap((void *) STATIC_ADDRESS, page_size * 16, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 120 | 121 | if(ptr != MAP_FAILED) { 122 | LOG("Failure: mmapped memory at static address @ %lx", STATIC_ADDRESS); 123 | } else { 124 | LOG("Success: prevented mmap at static address"); 125 | } 126 | } 127 | 128 | void check_poison_bytes_test() { 129 | void *ptr = map_memory("Poison Bytes", PROT_READ | PROT_WRITE); 130 | 131 | if(ptr == MAP_FAILED) { 132 | LOG("Failure: to map poison bytes memory"); 133 | } 134 | 135 | uint8_t *byte = &ptr[128]; 136 | 137 | if(*byte != MG_POISON_BYTE) { 138 | LOG("Failure: to find poison byte 0x%x, found 0x%x", MG_POISON_BYTE, *byte); 139 | } else { 140 | LOG("Success: mapped memory with poison bytes") 141 | } 142 | 143 | unmap_memory(ptr); 144 | } 145 | 146 | void unmap_partial_rw_memory_test() { 147 | void *ptr = mmap(0, page_size * 3, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 148 | 149 | if(ptr != MAP_FAILED) { 150 | LOG("Success: mmapped memory @ %p", ptr); 151 | } else { 152 | LOG("Failure: to map memory"); 153 | } 154 | 155 | int ret = munmap(ptr + page_size, page_size); 156 | 157 | if(ret != 0) { 158 | LOG("Failure: to unmap bottom page"); 159 | } else { 160 | LOG("Success: unmapped bottom page"); 161 | } 162 | 163 | munmap(ptr, page_size); 164 | munmap(ptr + (page_size * 2), 4096); 165 | } 166 | 167 | void check_map_partial_unmap_bottom_test() { 168 | uint8_t *ptr = mmap(0, 8192, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 169 | 170 | if(ptr != MAP_FAILED) { 171 | LOG("Success: mmapped memory @ %p", ptr); 172 | } else { 173 | LOG("Failure: to map memory"); 174 | } 175 | 176 | int ret = munmap(ptr, 4096); 177 | 178 | if(ret != 0) { 179 | LOG("Failure: to unmap bottom page"); 180 | } else { 181 | LOG("Success: unmapped bottom page"); 182 | } 183 | 184 | munmap(ptr + 4096, 4096); 185 | } 186 | 187 | void check_map_partial_unmap_top_test() { 188 | uint8_t *ptr = mmap(0, 8192, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 189 | 190 | if(ptr != MAP_FAILED) { 191 | LOG("Success: mmapped memory @ %p", ptr); 192 | } else { 193 | LOG("Failure: to map memory"); 194 | } 195 | 196 | int ret = munmap(ptr + 4096, 4096); 197 | 198 | if(ret != 0) { 199 | LOG("Failure: to unmap top page"); 200 | } else { 201 | LOG("Success: unmapped top page"); 202 | } 203 | 204 | munmap(ptr, 4096); 205 | } 206 | 207 | /* Case 1: Full unmap test */ 208 | void check_full_unmap_test() { 209 | uint8_t *ptr = mmap(0, page_size * 8, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 210 | 211 | if(ptr == MAP_FAILED) { 212 | LOG("Failure: to map memory for full unmap test"); 213 | return; 214 | } 215 | 216 | LOG("Case 1 Test: Full unmap of %p (size %d)", ptr, page_size * 8); 217 | 218 | int ret = munmap(ptr, page_size * 8); 219 | 220 | if(ret != 0) { 221 | LOG("Failure: to fully unmap memory"); 222 | } else { 223 | LOG("Success: fully unmapped memory"); 224 | } 225 | } 226 | 227 | /* Case 2: Unmap from beginning */ 228 | void check_unmap_from_beginning_test() { 229 | uint8_t *ptr = mmap(0, page_size * 8, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 230 | 231 | if(ptr == MAP_FAILED) { 232 | LOG("Failure: to map memory for beginning unmap test"); 233 | return; 234 | } 235 | 236 | LOG("Case 2 Test: Unmap first 3 pages from %p", ptr); 237 | 238 | /* Unmap first 3 pages */ 239 | int ret = munmap(ptr, page_size * 3); 240 | 241 | if(ret != 0) { 242 | LOG("Failure: to unmap from beginning"); 243 | } else { 244 | LOG("Success: unmapped from beginning, remaining region starts at %p", ptr + (page_size * 3)); 245 | } 246 | 247 | /* Clean up remaining pages */ 248 | munmap(ptr + (page_size * 3), page_size * 5); 249 | } 250 | 251 | /* Case 3: Unmap from middle to end */ 252 | void check_unmap_middle_to_end_test() { 253 | uint8_t *ptr = mmap(0, page_size * 8, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 254 | 255 | if(ptr == MAP_FAILED) { 256 | LOG("Failure: to map memory for middle-to-end unmap test"); 257 | return; 258 | } 259 | 260 | LOG("Case 3 Test: Unmap from middle (page 4) to end from %p", ptr); 261 | 262 | /* Unmap from page 4 to the end */ 263 | int ret = munmap(ptr + (page_size * 3), page_size * 5); 264 | 265 | if(ret != 0) { 266 | LOG("Failure: to unmap from middle to end"); 267 | } else { 268 | LOG("Success: unmapped from middle to end, remaining region is %p (size %d)", ptr, page_size * 3); 269 | } 270 | 271 | /* Clean up remaining pages */ 272 | munmap(ptr, page_size * 3); 273 | } 274 | 275 | /* Case 4: Unmap single page from middle (creates split with 1 page hole) */ 276 | void check_unmap_single_page_middle_test() { 277 | uint8_t *ptr = mmap(0, page_size * 8, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 278 | 279 | if(ptr == MAP_FAILED) { 280 | LOG("Failure: to map memory for single page middle unmap test"); 281 | return; 282 | } 283 | 284 | LOG("Case 4a Test: Unmap single page (page 4) from middle of %p", ptr); 285 | 286 | /* Unmap page 4 (creates a 1-page hole) */ 287 | int ret = munmap(ptr + (page_size * 3), page_size); 288 | 289 | if(ret != 0) { 290 | LOG("Failure: to unmap single page from middle"); 291 | } else { 292 | LOG("Success: unmapped single page from middle, created 2 regions"); 293 | LOG(" Lower region: %p (size %d)", ptr, page_size * 3); 294 | LOG(" Upper region: %p (size %d)", ptr + (page_size * 4), page_size * 4); 295 | } 296 | 297 | /* Clean up both regions */ 298 | munmap(ptr, page_size * 3); 299 | munmap(ptr + (page_size * 4), page_size * 4); 300 | } 301 | 302 | /* Case 4: Unmap 2 pages from middle (reuse both as guards) */ 303 | void check_unmap_two_pages_middle_test() { 304 | uint8_t *ptr = mmap(0, page_size * 10, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 305 | 306 | if(ptr == MAP_FAILED) { 307 | LOG("Failure: to map memory for two page middle unmap test"); 308 | return; 309 | } 310 | 311 | LOG("Case 4b Test: Unmap 2 pages (pages 4-5) from middle of %p", ptr); 312 | 313 | /* Unmap 2 pages (pages 4-5) */ 314 | int ret = munmap(ptr + (page_size * 3), page_size * 2); 315 | 316 | if(ret != 0) { 317 | LOG("Failure: to unmap 2 pages from middle"); 318 | } else { 319 | LOG("Success: unmapped 2 pages from middle, created 2 regions with guards"); 320 | LOG(" Lower region: %p (size %d)", ptr, page_size * 3); 321 | LOG(" Upper region: %p (size %d)", ptr + (page_size * 5), page_size * 5); 322 | } 323 | 324 | /* Clean up both regions */ 325 | munmap(ptr, page_size * 3); 326 | munmap(ptr + (page_size * 5), page_size * 5); 327 | } 328 | 329 | /* Case 4: Unmap 3 pages from middle (reuse first/last as guards, unmap middle) */ 330 | void check_unmap_three_pages_middle_test() { 331 | uint8_t *ptr = mmap(0, page_size * 10, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 332 | 333 | if(ptr == MAP_FAILED) { 334 | LOG("Failure: to map memory for three page middle unmap test"); 335 | return; 336 | } 337 | 338 | LOG("Case 4c Test: Unmap 3 pages (pages 4-6) from middle of %p", ptr); 339 | 340 | /* Unmap 3 pages (pages 4-6) */ 341 | int ret = munmap(ptr + (page_size * 3), page_size * 3); 342 | 343 | if(ret != 0) { 344 | LOG("Failure: to unmap 3 pages from middle"); 345 | } else { 346 | LOG("Success: unmapped 3 pages from middle, created 2 regions with guards"); 347 | LOG(" Lower region: %p (size %d)", ptr, page_size * 3); 348 | LOG(" Upper region: %p (size %d)", ptr + (page_size * 6), page_size * 4); 349 | } 350 | 351 | /* Clean up both regions */ 352 | munmap(ptr, page_size * 3); 353 | munmap(ptr + (page_size * 6), page_size * 4); 354 | } 355 | 356 | /* Case 4: Unmap 5 pages from middle (stress test with more pages) */ 357 | void check_unmap_five_pages_middle_test() { 358 | uint8_t *ptr = mmap(0, alloc_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 359 | 360 | if(ptr == MAP_FAILED) { 361 | LOG("Failure: to map memory for five page middle unmap test"); 362 | return; 363 | } 364 | 365 | LOG("Case 4d Test: Unmap 5 pages (pages 6-10) from middle of %p", ptr); 366 | 367 | /* Unmap 5 pages from the middle */ 368 | int ret = munmap(ptr + (page_size * 5), page_size * 5); 369 | 370 | if(ret != 0) { 371 | LOG("Failure: to unmap 5 pages from middle"); 372 | } else { 373 | LOG("Success: unmapped 5 pages from middle, created 2 regions with guards"); 374 | LOG(" Lower region: %p (size %d)", ptr, page_size * 5); 375 | LOG(" Upper region: %p (size %d)", ptr + (page_size * 10), page_size * 6); 376 | } 377 | 378 | /* Clean up both regions */ 379 | munmap(ptr, page_size * 5); 380 | munmap(ptr + (page_size * 10), page_size * 6); 381 | } 382 | 383 | /* Stress test: Multiple sequential partial unmaps */ 384 | void check_multiple_partial_unmaps_test() { 385 | uint8_t *ptr = mmap(0, page_size * 20, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 386 | 387 | if(ptr == MAP_FAILED) { 388 | LOG("Failure: to map memory for multiple partial unmap test"); 389 | return; 390 | } 391 | 392 | LOG("Stress Test: Multiple partial unmaps on %p", ptr); 393 | 394 | /* Unmap from beginning (Case 2) */ 395 | int ret = munmap(ptr, page_size * 3); 396 | if(ret != 0) { 397 | LOG("Failure: first partial unmap"); 398 | munmap(ptr + (page_size * 3), page_size * 17); 399 | return; 400 | } 401 | LOG(" Step 1: Unmapped first 3 pages"); 402 | 403 | /* Unmap from middle (Case 4) - creates split */ 404 | ret = munmap(ptr + (page_size * 8), page_size * 4); 405 | if(ret != 0) { 406 | LOG("Failure: middle partial unmap"); 407 | munmap(ptr + (page_size * 3), page_size * 5); 408 | munmap(ptr + (page_size * 12), page_size * 8); 409 | return; 410 | } 411 | LOG(" Step 2: Unmapped 4 pages from middle, created split"); 412 | 413 | /* Clean up remaining regions */ 414 | munmap(ptr + (page_size * 3), page_size * 5); /* Lower region after split */ 415 | munmap(ptr + (page_size * 12), page_size * 8); /* Upper region after split */ 416 | 417 | LOG("Success: completed multiple partial unmaps"); 418 | } 419 | 420 | int main(int argc, char *argv[]) { 421 | page_size = sysconf(_SC_PAGESIZE); 422 | alloc_size = page_size * 16; 423 | 424 | for(int i = 0; i < 16; i++) { 425 | map_rw_memory_test(); 426 | map_rwx_memory_test(); 427 | map_rw_then_x_memory_test(); 428 | map_then_mremap_test(); 429 | map_static_address_test(); 430 | check_poison_bytes_test(); 431 | check_x_to_w_test(); 432 | check_map_partial_unmap_bottom_test(); 433 | check_map_partial_unmap_top_test(); 434 | unmap_partial_rw_memory_test(); 435 | check_full_unmap_test(); 436 | check_unmap_from_beginning_test(); 437 | check_unmap_middle_to_end_test(); 438 | check_unmap_single_page_middle_test(); 439 | check_unmap_two_pages_middle_test(); 440 | check_unmap_three_pages_middle_test(); 441 | check_unmap_five_pages_middle_test(); 442 | check_multiple_partial_unmaps_test(); 443 | } 444 | 445 | LOG("Done testing"); 446 | return OK; 447 | } -------------------------------------------------------------------------------- /src/mapguard.c: -------------------------------------------------------------------------------- 1 | /* MapGuard - Copyright Chris Rohlf - 2025 */ 2 | 3 | #include "mapguard.h" 4 | 5 | pthread_mutex_t _mg_mutex; 6 | 7 | mapguard_cache_metadata_t *mce_head; 8 | 9 | #define HASH_TABLE_SIZE 16384 10 | #define HASH_ADDR(addr) (((uintptr_t) (addr) >> 12) & (HASH_TABLE_SIZE - 1)) 11 | 12 | mapguard_cache_entry_t *g_hash_table[HASH_TABLE_SIZE]; 13 | 14 | /* Globals */ 15 | static size_t g_page_size; 16 | static uint32_t g_page_shift; 17 | 18 | /* Global policy configuration object */ 19 | mapguard_policy_t g_mapguard_policy; 20 | 21 | /* Pointers to hooked libc functions */ 22 | void *(*g_real_mmap)(void *addr, size_t length, int prot, int flags, int fd, off_t offset); 23 | int (*g_real_munmap)(void *addr, size_t length); 24 | int (*g_real_mprotect)(void *addr, size_t len, int prot); 25 | void *(*g_real_mremap)(void *__addr, size_t __old_len, size_t __new_len, int __flags, ...); 26 | 27 | __attribute__((constructor)) void mapguard_ctor() { 28 | #if THREAD_SUPPORT 29 | pthread_mutex_init(&_mg_mutex, NULL); 30 | #endif 31 | 32 | /* Enable configuration of mapguard via environment 33 | * variables during DSO load time only */ 34 | ENV_TO_INT(MG_PREVENT_RWX, g_mapguard_policy.prevent_rwx); 35 | ENV_TO_INT(MG_PREVENT_TRANSITION_TO_X, g_mapguard_policy.prevent_transition_to_x); 36 | ENV_TO_INT(MG_PREVENT_TRANSITION_FROM_X, g_mapguard_policy.prevent_transition_from_x); 37 | ENV_TO_INT(MG_PREVENT_STATIC_ADDRESS, g_mapguard_policy.prevent_static_address); 38 | ENV_TO_INT(MG_ENABLE_GUARD_PAGES, g_mapguard_policy.enable_guard_pages); 39 | ENV_TO_INT(MG_PANIC_ON_VIOLATION, g_mapguard_policy.panic_on_violation); 40 | ENV_TO_INT(MG_POISON_ON_ALLOCATION, g_mapguard_policy.poison_on_allocation); 41 | ENV_TO_INT(MG_USE_MAPPING_CACHE, g_mapguard_policy.use_mapping_cache); 42 | ENV_TO_INT(MG_ENABLE_SYSLOG, g_mapguard_policy.enable_syslog); 43 | 44 | /* Allow for fast paths to ignore policies and actions when disabled */ 45 | g_mapguard_policy.prevent_policies_enabled = g_mapguard_policy.prevent_rwx || 46 | g_mapguard_policy.prevent_transition_to_x || 47 | g_mapguard_policy.prevent_transition_from_x || 48 | g_mapguard_policy.prevent_static_address; 49 | 50 | g_mapguard_policy.actions_enabled = g_mapguard_policy.enable_guard_pages || 51 | g_mapguard_policy.panic_on_violation || 52 | g_mapguard_policy.poison_on_allocation || 53 | g_mapguard_policy.use_mapping_cache; 54 | 55 | /* In order for guard pages to work we need MCE */ 56 | if(g_mapguard_policy.enable_guard_pages == 1 && g_mapguard_policy.use_mapping_cache == 0) { 57 | LOG_AND_ABORT("MG_ENABLE_GUARD_PAGES == 1 but MG_USE_MAPPING_CACHE == 0"); 58 | } 59 | 60 | g_real_mmap = dlsym(RTLD_NEXT, "mmap"); 61 | g_real_munmap = dlsym(RTLD_NEXT, "munmap"); 62 | g_real_mprotect = dlsym(RTLD_NEXT, "mprotect"); 63 | g_real_mremap = dlsym(RTLD_NEXT, "mremap"); 64 | 65 | if(g_mapguard_policy.enable_syslog) { 66 | openlog("mapguard", LOG_CONS | LOG_PID, LOG_AUTH); 67 | } 68 | 69 | g_page_size = getpagesize(); 70 | /* Calculate shift amount for fast page alignment (assumes power of 2) */ 71 | g_page_shift = __builtin_ctzl(g_page_size); 72 | mce_head = new_mce_page(); 73 | LOG("Allocated mce_head at %p", mce_head); 74 | } 75 | 76 | mapguard_cache_metadata_t *new_mce_page() { 77 | /* Produce a random page address as a hint for mmap */ 78 | uint64_t hint = ROUND_DOWN_PAGE(rand_uint64()); 79 | hint &= 0x3FFFFFFFF000; 80 | 81 | void *ptr = g_real_mmap((void *) hint, g_page_size * 3, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 82 | 83 | if(ptr == MAP_FAILED) { 84 | LOG_AND_ABORT("mmap failed in new_mce_page"); 85 | abort(); 86 | } 87 | 88 | make_guard_page((void *) ptr); 89 | make_guard_page((void *) ptr + (g_page_size * 2)); 90 | 91 | mapguard_cache_metadata_t *t = (mapguard_cache_metadata_t *) (ptr + g_page_size); 92 | t->total = (g_page_size - sizeof(mapguard_cache_metadata_t)) / sizeof(mapguard_cache_entry_t); 93 | t->free = t->total; 94 | t->next = NULL; 95 | return t; 96 | } 97 | 98 | /* Attempts to allocate a guard page at a given address */ 99 | void *allocate_guard_page(void *p) { 100 | return g_real_mmap(p, g_page_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 101 | } 102 | 103 | void make_guard_page(void *p) { 104 | g_real_mprotect(p, g_page_size, PROT_NONE); 105 | madvise(p, g_page_size, MADV_DONTNEED); 106 | LOG("Mapped guard page %p", p); 107 | } 108 | 109 | void unmap_top_guard_page(mapguard_cache_entry_t *mce) { 110 | #if DEBUG 111 | if(mce->guarded_t == false) { 112 | LOG_AND_ABORT("Attempting to unmap missing top guard page") 113 | } 114 | #endif 115 | g_real_munmap(mce->start + mce->size, g_page_size); 116 | mce->guarded_t = false; 117 | LOG("Unmapped top guard page %p", mce->start + mce->size); 118 | } 119 | 120 | void unmap_bottom_guard_page(mapguard_cache_entry_t *mce) { 121 | #if DEBUG 122 | if(mce->guarded_b == false) { 123 | LOG_AND_ABORT("Attempting to unmap missing bottom guard page") 124 | } 125 | #endif 126 | g_real_munmap(mce->start - g_page_size, g_page_size); 127 | mce->guarded_b = false; 128 | LOG("Unmapped bottom guard page %p", mce->start - g_page_size); 129 | } 130 | 131 | void unmap_guard_pages(mapguard_cache_entry_t *mce) { 132 | if(NULL == mce) { 133 | LOG_AND_ABORT("This should never happen: mce == NULL"); 134 | } 135 | 136 | if(mce->guarded_b) { 137 | unmap_bottom_guard_page(mce); 138 | } 139 | 140 | if(mce->guarded_t) { 141 | unmap_top_guard_page(mce); 142 | } 143 | } 144 | 145 | void map_bottom_guard_page(mapguard_cache_entry_t *mce) { 146 | make_guard_page(mce->start - g_page_size); 147 | mce->guarded_b = true; 148 | } 149 | 150 | void map_top_guard_page(mapguard_cache_entry_t *mce) { 151 | make_guard_page(mce->start + mce->size); 152 | mce->guarded_t = true; 153 | } 154 | 155 | void mark_guard_pages(mapguard_cache_entry_t *mce) { 156 | map_bottom_guard_page(mce); 157 | map_top_guard_page(mce); 158 | } 159 | 160 | mapguard_cache_entry_t *find_free_mce() { 161 | mapguard_cache_metadata_t *current = mce_head; 162 | mapguard_cache_metadata_t *previous = NULL; 163 | 164 | while(current) { 165 | if(current->free) { 166 | mapguard_cache_entry_t *entries = (mapguard_cache_entry_t *) ((uint8_t *) current + sizeof(mapguard_cache_metadata_t)); 167 | 168 | for(uint32_t i = 0; i < current->total; i++) { 169 | mapguard_cache_entry_t *candidate = entries + i; 170 | 171 | if(candidate->start == NULL) { 172 | current->free--; 173 | return candidate; 174 | } 175 | } 176 | 177 | current->free = 0; 178 | } 179 | 180 | previous = current; 181 | current = current->next; 182 | } 183 | 184 | mapguard_cache_metadata_t *new_page = new_mce_page(); 185 | 186 | if(previous) { 187 | previous->next = new_page; 188 | } else { 189 | mce_head = new_page; 190 | } 191 | 192 | new_page->free--; 193 | return (mapguard_cache_entry_t *) ((uint8_t *) new_page + sizeof(mapguard_cache_metadata_t)); 194 | } 195 | 196 | __attribute__((destructor)) void mapguard_dtor() { 197 | LOCK_MG(); 198 | 199 | if(g_mapguard_policy.enable_syslog) { 200 | closelog(); 201 | } 202 | 203 | mapguard_cache_metadata_t *current = mce_head; 204 | 205 | while(current != NULL) { 206 | #if DEBUG 207 | if(current->free != current->total) { 208 | LOG("Memory leak detected: MCE page at %p has %d/%d used entries", 209 | current, current->total - current->free, current->total); 210 | } 211 | #endif 212 | mapguard_cache_metadata_t *tmp = current->next; 213 | uint8_t *base = (uint8_t *) current - g_page_size; 214 | g_real_munmap(base, g_page_size * 3); 215 | current = tmp; 216 | } 217 | 218 | UNLOCK_MG(); 219 | } 220 | 221 | uint64_t rand_uint64(void) { 222 | uint64_t val = 0; 223 | syscall(SYS_getrandom, &val, sizeof(val), GRND_NONBLOCK); 224 | return val; 225 | } 226 | 227 | int32_t env_to_int(char *string) { 228 | char *p = getenv(string); 229 | 230 | if(p == NULL) { 231 | return 0; 232 | } 233 | 234 | char *endptr; 235 | errno = 0; 236 | unsigned long val = strtoul(p, &endptr, 0); 237 | 238 | if(errno != 0 || *endptr != '\0' || val > INT32_MAX) { 239 | return 0; 240 | } 241 | 242 | return (int32_t) val; 243 | } 244 | 245 | mapguard_cache_entry_t *get_cache_entry(void *addr) { 246 | /* Round down to page boundary for initial lookup */ 247 | void *page_addr = (void *) ROUND_DOWN_PAGE((uintptr_t) addr); 248 | uint32_t bucket = HASH_ADDR(page_addr); 249 | mapguard_cache_entry_t *mce = g_hash_table[bucket]; 250 | 251 | while(mce != NULL) { 252 | /* Check if addr falls within this mapping's range */ 253 | if(addr >= mce->start && addr < (mce->start + mce->size)) { 254 | return mce; 255 | } 256 | 257 | mce = mce->hash_next; 258 | } 259 | 260 | /* Still not found, now backward search for up to 256 pages */ 261 | for(uint32_t i = 1; i < 256; i++) { 262 | void *probe_addr = page_addr - (i << g_page_shift); /* Use shift instead of multiply */ 263 | uint32_t probe_bucket = HASH_ADDR(probe_addr); 264 | 265 | if(probe_bucket == bucket) { 266 | continue; 267 | } 268 | 269 | mce = g_hash_table[probe_bucket]; 270 | while(mce != NULL) { 271 | if(addr >= mce->start && addr < (mce->start + mce->size)) { 272 | return mce; 273 | } 274 | mce = mce->hash_next; 275 | } 276 | } 277 | 278 | return NULL; 279 | } 280 | 281 | void cache_entry_insert(mapguard_cache_entry_t *mce) { 282 | uint32_t bucket = HASH_ADDR(mce->start); 283 | 284 | /* Insert at head of chain */ 285 | mce->hash_next = g_hash_table[bucket]; 286 | g_hash_table[bucket] = mce; 287 | } 288 | 289 | void cache_entry_remove(mapguard_cache_entry_t *mce) { 290 | uint32_t bucket = HASH_ADDR(mce->start); 291 | mapguard_cache_entry_t *current = g_hash_table[bucket]; 292 | mapguard_cache_entry_t *prev = NULL; 293 | 294 | while(current != NULL) { 295 | if(current == mce) { 296 | /* Found it - remove from chain */ 297 | if(prev == NULL) { 298 | /* Removing head of chain */ 299 | g_hash_table[bucket] = current->hash_next; 300 | } else { 301 | /* Removing from middle/end of chain */ 302 | prev->hash_next = current->hash_next; 303 | } 304 | mce->hash_next = NULL; 305 | return; 306 | } 307 | prev = current; 308 | current = current->hash_next; 309 | } 310 | 311 | LOG_AND_ABORT("Failed to find cache entry to remove"); 312 | } 313 | 314 | /* Hook mmap in libc */ 315 | void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { 316 | /* We don't intercept file backed mappings */ 317 | if(fd != -1) { 318 | return g_real_mmap(addr, length, prot, flags, fd, offset); 319 | } 320 | 321 | /* Precompute values before taking lock */ 322 | size_t rounded_length = ROUND_UP_PAGE(length); 323 | const bool is_rwx = (prot & PROT_WRITE) && (prot & PROT_EXEC); 324 | 325 | /* Fast path: no policies enabled, no tracking needed */ 326 | if(g_mapguard_policy.prevent_policies_enabled == 0 && g_mapguard_policy.actions_enabled == 0) { 327 | return g_real_mmap(addr, rounded_length, prot, flags, fd, offset); 328 | } 329 | 330 | /* Policy checks can happen before lock - they only read immutable config */ 331 | if(g_mapguard_policy.prevent_rwx && is_rwx) { 332 | LOG("Preventing RWX memory allocation"); 333 | MAYBE_PANIC(); 334 | return MAP_FAILED; 335 | } 336 | 337 | if(g_mapguard_policy.prevent_static_address && addr != 0) { 338 | LOG("Preventing memory allocation at static address %p", addr); 339 | MAYBE_PANIC(); 340 | return MAP_FAILED; 341 | } 342 | 343 | /* Perform the actual mmap */ 344 | void *map_ptr; 345 | size_t alloc_length = rounded_length; 346 | 347 | if(g_mapguard_policy.enable_guard_pages) { 348 | alloc_length += (g_page_size << 1); 349 | } 350 | 351 | map_ptr = g_real_mmap(addr, alloc_length, prot, flags, fd, offset); 352 | 353 | if(map_ptr == MAP_FAILED) { 354 | return MAP_FAILED; 355 | } 356 | 357 | if(g_mapguard_policy.enable_guard_pages) { 358 | make_guard_page(map_ptr); 359 | make_guard_page(map_ptr + g_page_size + rounded_length); 360 | } 361 | 362 | /* Handle cache and poisoning together to reduce branches */ 363 | if(g_mapguard_policy.use_mapping_cache) { 364 | LOCK_MG(); 365 | mapguard_cache_entry_t *mce = find_free_mce(); 366 | 367 | if(mce == NULL) { 368 | LOG_AND_ABORT("Failed to find free MCE entry in mmap"); 369 | } 370 | 371 | /* Setup MCE based on guard page configuration */ 372 | if(g_mapguard_policy.enable_guard_pages) { 373 | mce->start = map_ptr + g_page_size; 374 | mce->guarded_b = true; 375 | mce->guarded_t = true; 376 | } else { 377 | mce->start = map_ptr; 378 | } 379 | 380 | mce->size = rounded_length; 381 | mce->immutable_prot = prot; 382 | mce->current_prot = prot; 383 | cache_entry_insert(mce); 384 | 385 | /* Poison if needed and writable */ 386 | if(g_mapguard_policy.poison_on_allocation && (prot & PROT_WRITE)) { 387 | memset(mce->start, MG_POISON_BYTE, length); 388 | } 389 | 390 | UNLOCK_MG(); 391 | return mce->start; 392 | } 393 | 394 | if(g_mapguard_policy.poison_on_allocation && (prot & PROT_WRITE)) { 395 | memset(map_ptr, MG_POISON_BYTE, length); 396 | } 397 | 398 | return map_ptr; 399 | } 400 | 401 | /* Hook munmap in libc */ 402 | int munmap(void *addr, size_t length) { 403 | if(length == 0) { 404 | return EINVAL; 405 | } 406 | 407 | if(((uintptr_t) addr & (g_page_size - 1)) != 0) { 408 | return EINVAL; 409 | } 410 | 411 | size_t rounded_length = ROUND_UP_PAGE(length); 412 | 413 | /* Fast path: if not tracking, just call through */ 414 | if(g_mapguard_policy.use_mapping_cache == false) { 415 | return g_real_munmap(addr, rounded_length); 416 | } 417 | 418 | mapguard_cache_entry_t *mce = NULL; 419 | int32_t ret; 420 | 421 | length = ROUND_UP_PAGE(length); 422 | 423 | /* Remove tracked pages from the cache and unmap them 424 | * The cache hash table only has to be updated when 425 | * mce->start changes because that is how the table 426 | * is indexed */ 427 | if(g_mapguard_policy.use_mapping_cache) { 428 | LOCK_MG(); 429 | mce = get_cache_entry(addr); 430 | 431 | if(mce == NULL) { 432 | UNLOCK_MG(); 433 | LOG_AND_ABORT("No mapguard cache entry found for address %p", addr); 434 | } 435 | 436 | /* Case 1: Handle full unmapping (the most common case) */ 437 | if(mce->start == addr && mce->size == length) { 438 | if(g_mapguard_policy.enable_guard_pages == true && mce->guarded_b == true) { 439 | length += g_page_size; 440 | addr -= g_page_size; 441 | } 442 | 443 | if(g_mapguard_policy.enable_guard_pages == true && mce->guarded_t == true) { 444 | length += g_page_size; 445 | } 446 | 447 | ret = g_real_munmap(addr, length); 448 | 449 | if(ret != 0) { 450 | UNLOCK_MG(); 451 | return ret; 452 | } 453 | 454 | cache_entry_remove(mce); 455 | 456 | mapguard_cache_metadata_t *metadata = (mapguard_cache_metadata_t *) ROUND_DOWN_PAGE((uintptr_t) mce); 457 | metadata->free++; 458 | 459 | memset(mce, 0, sizeof(mapguard_cache_entry_t)); 460 | UNLOCK_MG(); 461 | return ret; 462 | } 463 | 464 | /* Case 2: Partial unmapping from the beginning of the range */ 465 | if(mce->start == addr && length < mce->size) { 466 | /* 1. Reuse an existing page as the new bottom guard page */ 467 | if(mce->guarded_b == true) { 468 | void *unmap_addr = addr - g_page_size; 469 | 470 | ret = g_real_munmap(unmap_addr, length); 471 | 472 | if(ret != 0) { 473 | UNLOCK_MG(); 474 | return ret; 475 | } 476 | 477 | /* The new guard page is at the first page of remaining allocation */ 478 | void *new_guard_addr = unmap_addr + length; 479 | 480 | /* Temporarily make it writable to zeroize */ 481 | g_real_mprotect(new_guard_addr, g_page_size, PROT_READ | PROT_WRITE); 482 | memset(new_guard_addr, 0, g_page_size); 483 | make_guard_page(new_guard_addr); 484 | 485 | cache_entry_remove(mce); 486 | mce->start = new_guard_addr + g_page_size; 487 | mce->size -= length; 488 | } else { 489 | /* No guard page */ 490 | ret = g_real_munmap(addr, length); 491 | 492 | if(ret != 0) { 493 | UNLOCK_MG(); 494 | return ret; 495 | } 496 | 497 | cache_entry_remove(mce); 498 | mce->start = addr + length; 499 | mce->size -= length; 500 | } 501 | 502 | cache_entry_insert(mce); 503 | UNLOCK_MG(); 504 | return ret; 505 | } 506 | 507 | /* Case 3: Unmapping from middle to the end of the range */ 508 | if(addr >= mce->start && (addr + length) == (mce->start + mce->size)) { 509 | /* Reuse an existing page as the new top guard page */ 510 | if(g_mapguard_policy.enable_guard_pages == true && mce->guarded_t == true) { 511 | ret = g_real_munmap(addr + g_page_size, length); 512 | 513 | if(ret != 0) { 514 | UNLOCK_MG(); 515 | return ret; 516 | } 517 | 518 | /* Temporarily make it writable to zeroize */ 519 | g_real_mprotect(addr, g_page_size, PROT_READ | PROT_WRITE); 520 | memset(addr, 0, g_page_size); 521 | make_guard_page(addr); 522 | 523 | mce->size = addr - mce->start; 524 | } else { 525 | /* No guard page, simple case */ 526 | ret = g_real_munmap(addr, length); 527 | 528 | if(ret != 0) { 529 | UNLOCK_MG(); 530 | return ret; 531 | } 532 | 533 | /* Calculate new size */ 534 | mce->size = addr - mce->start; 535 | } 536 | 537 | UNLOCK_MG(); 538 | return ret; 539 | } 540 | 541 | /* Case 4: Unmap a hole in the range, split into two regions */ 542 | if(addr >= mce->start && (addr + length) < (mce->start + mce->size)) { 543 | /* Calculate upper region bounds */ 544 | void *upper_start = addr + length; 545 | size_t upper_size = (mce->start + mce->size) - upper_start; 546 | size_t lower_size = addr - mce->start; 547 | 548 | /* Allocate new cache entry for upper region */ 549 | mapguard_cache_entry_t *upper_mce = find_free_mce(); 550 | 551 | if(upper_mce == NULL) { 552 | UNLOCK_MG(); 553 | LOG_AND_ABORT("Failed to allocate MCE for split mapping"); 554 | } 555 | 556 | /* Handle guard pages if enabled */ 557 | if(g_mapguard_policy.enable_guard_pages == true) { 558 | /* If only unmapping a single page, reuse it as top guard for lower region */ 559 | if(length == g_page_size) { 560 | /* Zeroize and convert the unmapped page to a guard page */ 561 | g_real_mprotect(addr, g_page_size, PROT_READ | PROT_WRITE); 562 | memset(addr, 0, g_page_size); 563 | make_guard_page(addr); 564 | 565 | /* Lower region gets new top guard */ 566 | mce->guarded_t = true; 567 | 568 | /* Upper region has no bottom guard */ 569 | upper_mce->guarded_b = false; 570 | upper_mce->guarded_t = mce->guarded_t; 571 | } else if(length == (g_page_size * 2)) { 572 | /* If unmapping exactly 2 pages, reuse both as guards */ 573 | /* First page becomes top guard for lower region */ 574 | g_real_mprotect(addr, g_page_size, PROT_READ | PROT_WRITE); 575 | memset(addr, 0, g_page_size); 576 | make_guard_page(addr); 577 | mce->guarded_t = true; 578 | 579 | /* Second page becomes bottom guard for upper region */ 580 | void *upper_guard = addr + g_page_size; 581 | g_real_mprotect(upper_guard, g_page_size, PROT_READ | PROT_WRITE); 582 | memset(upper_guard, 0, g_page_size); 583 | make_guard_page(upper_guard); 584 | upper_mce->guarded_b = true; 585 | 586 | /* Adjust upper region to account for its new bottom guard */ 587 | upper_start = upper_guard + g_page_size; 588 | upper_size = (mce->start + mce->size) - upper_start; 589 | upper_mce->guarded_t = true; 590 | } else { 591 | /* If unmapping 3+ pages, reuse first and last as guards, unmap middle pages */ 592 | /* First page becomes top guard for lower region */ 593 | g_real_mprotect(addr, g_page_size, PROT_READ | PROT_WRITE); 594 | memset(addr, 0, g_page_size); 595 | make_guard_page(addr); 596 | mce->guarded_t = true; 597 | 598 | /* Last page becomes bottom guard for upper region */ 599 | void *upper_guard = addr + length - g_page_size; 600 | g_real_mprotect(upper_guard, g_page_size, PROT_READ | PROT_WRITE); 601 | memset(upper_guard, 0, g_page_size); 602 | make_guard_page(upper_guard); 603 | upper_mce->guarded_b = true; 604 | 605 | /* Unmap the pages between the two new guard pages */ 606 | void *unmap_start = addr + g_page_size; 607 | size_t unmap_length = length - (g_page_size * 2); 608 | ret = g_real_munmap(unmap_start, unmap_length); 609 | 610 | if(ret != 0) { 611 | memset(upper_mce, 0, sizeof(mapguard_cache_entry_t)); 612 | mapguard_cache_metadata_t *metadata = (mapguard_cache_metadata_t *) ROUND_DOWN_PAGE((uintptr_t) upper_mce); 613 | metadata->free++; 614 | UNLOCK_MG(); 615 | return ret; 616 | } 617 | 618 | /* Adjust upper region to account for its new bottom guard */ 619 | upper_start = upper_guard + g_page_size; 620 | upper_size = (mce->start + mce->size) - upper_start; 621 | upper_mce->guarded_t = mce->guarded_t; 622 | } 623 | } else { 624 | /* No guard pages enabled, simple unmap */ 625 | ret = g_real_munmap(addr, length); 626 | 627 | if(ret != 0) { 628 | memset(upper_mce, 0, sizeof(mapguard_cache_entry_t)); 629 | mapguard_cache_metadata_t *metadata = (mapguard_cache_metadata_t *) ROUND_DOWN_PAGE((uintptr_t) upper_mce); 630 | metadata->free++; 631 | UNLOCK_MG(); 632 | return ret; 633 | } 634 | } 635 | 636 | /* Initialize upper region MCE (common for both guard/no-guard cases) */ 637 | upper_mce->start = upper_start; 638 | upper_mce->size = upper_size; 639 | upper_mce->immutable_prot = mce->immutable_prot; 640 | upper_mce->current_prot = mce->current_prot; 641 | cache_entry_insert(upper_mce); 642 | 643 | /* Update lower region (original MCE) */ 644 | mce->size = lower_size; 645 | 646 | UNLOCK_MG(); 647 | return 0; 648 | } 649 | 650 | UNLOCK_MG(); 651 | 652 | /* Unknown partial unmap case */ 653 | LOG_AND_ABORT("Unknown partial munmap case: addr=%p, length=%zu, mce->start=%p, mce->size=%zu", 654 | addr, length, mce->start, mce->size); 655 | } 656 | 657 | return g_real_munmap(addr, length); 658 | } 659 | 660 | /* Hook mprotect in libc */ 661 | int mprotect(void *addr, size_t len, int prot) { 662 | mapguard_cache_entry_t *mce = NULL; 663 | int32_t ret; 664 | 665 | /* Prevent RWX mappings */ 666 | if(g_mapguard_policy.prevent_rwx && (prot & PROT_WRITE) && (prot & PROT_EXEC)) { 667 | LOG("Preventing RWX mprotect"); 668 | MAYBE_PANIC(); 669 | return ERROR; 670 | } 671 | 672 | /* Prevent transition to/from X (requires the mapping cache) */ 673 | if(g_mapguard_policy.use_mapping_cache) { 674 | LOCK_MG(); 675 | mce = get_cache_entry(addr); 676 | 677 | if(mce != NULL) { 678 | if(g_mapguard_policy.prevent_transition_to_x && (prot & PROT_EXEC) && (mce->immutable_prot & PROT_WRITE)) { 679 | LOG("Cannot allow mapping %p to be set PROT_EXEC, it was previously PROT_WRITE", addr); 680 | MAYBE_PANIC(); 681 | errno = EINVAL; 682 | UNLOCK_MG(); 683 | return ERROR; 684 | } 685 | 686 | if(g_mapguard_policy.prevent_transition_from_x && (prot & PROT_WRITE) && (mce->immutable_prot & PROT_EXEC)) { 687 | LOG("Cannot allow mapping %p to transition from PROT_EXEC to PROT_WRITE", addr); 688 | MAYBE_PANIC(); 689 | errno = EINVAL; 690 | UNLOCK_MG(); 691 | return ERROR; 692 | } 693 | } 694 | 695 | ret = g_real_mprotect(addr, len, prot); 696 | 697 | if(ret == 0 && mce) { 698 | /* Its possible the caller changed the protections on 699 | * only a portion of the mapping. Log it but ignore it */ 700 | if(mce->size != len) { 701 | LOG("Cached mapping size %zu bytes but mprotected %zu bytes", mce->size, len); 702 | } 703 | 704 | /* Update the saved page permissions, even if the size doesn't match */ 705 | mce->immutable_prot |= prot; 706 | mce->current_prot = prot; 707 | } 708 | 709 | UNLOCK_MG(); 710 | } else { 711 | ret = g_real_mprotect(addr, len, prot); 712 | } 713 | 714 | return ret; 715 | } 716 | 717 | /* Hook mremap in libc 718 | * mremap is a complex syscall when you consider all of the flags. 719 | * Instead of trying to intelligently handle these flags we just 720 | * transparently proxy the call and do our best to handle what the 721 | * kernel decides to do with the mapping. */ 722 | void *mremap(void *__addr, size_t __old_len, size_t __new_len, int __flags, ...) { 723 | void *new_address = NULL; 724 | 725 | if((__flags & MREMAP_FIXED) || (__flags & MAP_FIXED_NOREPLACE)) { 726 | va_list vl; 727 | va_start(vl, __flags); 728 | new_address = va_arg(vl, void *); 729 | 730 | if(g_mapguard_policy.prevent_static_address) { 731 | LOG("Attempted mremap with MREMAP_FIXED at %p", new_address); 732 | MAYBE_PANIC(); 733 | errno = EINVAL; 734 | return MAP_FAILED; 735 | } 736 | } 737 | 738 | void *map_ptr = NULL; 739 | 740 | if(new_address != NULL) { 741 | map_ptr = g_real_mremap(__addr, __old_len, __new_len, __flags, new_address); 742 | } else { 743 | map_ptr = g_real_mremap(__addr, __old_len, __new_len, __flags); 744 | } 745 | 746 | if(g_mapguard_policy.use_mapping_cache && map_ptr != MAP_FAILED) { 747 | LOCK_MG(); 748 | mapguard_cache_entry_t *mce = get_cache_entry(__addr); 749 | 750 | /* We are remapping a previously tracked allocation. This 751 | * means we may have to reallocate guard pages and update 752 | * the status of our cache */ 753 | if(mce && map_ptr != MAP_FAILED) { 754 | /* mremap may just allocate new pages above the existing 755 | * allocation to resize it. If it does then theres no 756 | * need to unmap/remap the bottom guard page. If guard 757 | * pages are configured then its probably not possible 758 | * for mremap to grow the allocation in place anyway but 759 | * this is a cheap check regardless */ 760 | if(mce->start != map_ptr) { 761 | unmap_guard_pages(mce); 762 | mce->guarded_b = false; 763 | mce->guarded_t = false; 764 | 765 | cache_entry_remove(mce); 766 | mce->start = map_ptr; 767 | mce->size = __new_len; 768 | cache_entry_insert(mce); 769 | } else { 770 | mce->size = __new_len; 771 | } 772 | 773 | /* Best effort guard page creation */ 774 | void *expected_bottom = map_ptr - g_page_size; 775 | void *ptr = g_real_mmap(expected_bottom, g_page_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 776 | 777 | if(ptr != MAP_FAILED) { 778 | if(ptr == expected_bottom) { 779 | mce->guarded_b = true; 780 | } else { 781 | g_real_munmap(ptr, g_page_size); 782 | } 783 | } 784 | 785 | void *expected_top = map_ptr + __new_len; 786 | ptr = g_real_mmap(expected_top, g_page_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 787 | 788 | if(ptr != MAP_FAILED) { 789 | if(ptr == expected_top) { 790 | mce->guarded_t = true; 791 | } else { 792 | g_real_munmap(ptr, g_page_size); 793 | } 794 | } 795 | } 796 | 797 | UNLOCK_MG(); 798 | } 799 | 800 | return map_ptr; 801 | } 802 | --------------------------------------------------------------------------------