├── Makefile ├── README.md ├── dietlibc_fpic └── dietlibc.a ├── include └── misc.h ├── relros.c ├── static_to_dyn.c ├── test.c ├── test2.c └── test3.c /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | gcc -g relros.c -o relros 3 | gcc -g static_to_dyn.c -o static_to_dyn 4 | gcc -static test.c -o test 5 | gcc -nostdlib -c -fPIC test2.c -o test2.o 6 | gcc -nostdlib test2.o dietlibc_fpic/dietlibc.a -o test2 7 | ./relros test 8 | ./static_to_dyn test2 9 | @echo 'full RELRO applied to test binary' 10 | @echo 'ASLR requirements applied to test2 binary' 11 | clean: 12 | rm -f *.o relros static_to_dyn test test2 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | -= R&D for forcing relro and aslr on statically linked executables =- 2 | 3 | BUILD NOTES: These tools need to be updated to work on versions of glibc that don't 4 | use generic_start_main. This code was all designed on ubuntu 16 with libc 2.27 5 | and GNU CC version 7.3.0. 6 | 7 | Run 'make' 8 | 9 | To build relros.c, and static_to_dyn.c, both of which will automatically be applied to 10 | test.c and test2.c. 11 | 12 | After typing make, test will be a static executable with RELRO and test2 will be a static 13 | executable with ASLR applied. 14 | 15 | - elfmaster 16 | 17 | -------------------------------------------------------------------------------- /dietlibc_fpic/dietlibc.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elfmaster/static_binary_mitigations/95ce4237ca7cf4e1f3ab3af1485f6b4a26132a92/dietlibc_fpic/dietlibc.a -------------------------------------------------------------------------------- /include/misc.h: -------------------------------------------------------------------------------- 1 | #ifndef _MISC_H_ 2 | #define _MISC_H_ 3 | 4 | #ifndef LIST_FOREACH_SAFE 5 | #define LIST_FOREACH_SAFE(var, head, field, tvar) \ 6 | for ((var) = LIST_FIRST((head)); \ 7 | (var) && ((tvar) = LIST_NEXT((var), field), 1); \ 8 | (var) = (tvar)) 9 | #endif 10 | #ifndef SLIST_FOREACH_SAFE 11 | #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ 12 | for ((var) = SLIST_FIRST((head)); \ 13 | (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ 14 | (var) = (tvar)) 15 | #endif 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /relros.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, Ryan O'Neill 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are met: 7 | * 8 | * 1. Redistributions of source code must retain the above copyright notice, this 9 | * list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright notice, 11 | * this list of conditions and the following disclaimer in the documentation 12 | * and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 18 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 21 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | /* 27 | * gcc relros.c -o relros 28 | * ./relros 29 | */ 30 | 31 | #define _GNU_SOURCE 32 | 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | #include 48 | 49 | #define PADDING_SIZE 1024 50 | #ifndef PAGE_SIZE 51 | #define PAGE_SIZE 4096 52 | #endif 53 | #define PAGE_ALIGN(x) (x & ~(PAGE_SIZE - 1)) 54 | #define PAGE_ALIGN_UP(x) (PAGE_ALIGN(x) + PAGE_SIZE) 55 | #define PAGE_ROUND(x) (PAGE_ALIGN_UP(x)) 56 | 57 | 58 | struct segment { 59 | uint64_t vaddr; 60 | uint64_t offset; 61 | uint64_t memsz; 62 | uint64_t filesz; 63 | }; 64 | 65 | /* 66 | * We have removed all calls to libelfmaster since it is not yet open sourced and 67 | * are using a minimalistic set of code for resolving symbols. 68 | */ 69 | typedef struct elfobj2 { 70 | uint8_t *mem; 71 | ElfW(Ehdr) *ehdr; 72 | ElfW(Phdr) *phdr; 73 | ElfW(Shdr) *shdr; 74 | ElfW(Sym) *symtab; 75 | bool dynamic_linked; 76 | size_t symcount; 77 | int fd; 78 | char *shstrtab; 79 | char *strtab; 80 | struct stat st; 81 | uint64_t text_offset; 82 | uint64_t text_base; 83 | size_t size; 84 | char *path; 85 | } elfobj2_t; 86 | 87 | bool _elf_symbol_by_name(elfobj2_t *, char *, ElfW(Sym) *); 88 | void * _elf_address_pointer(elfobj2_t *, uint64_t); 89 | bool _elf_open_object(char *, elfobj2_t *); 90 | 91 | #define IP_RELATIVE_ADDR(target) \ 92 | (get_rip() - ((char *)&get_rip_label - (char *)target)) 93 | 94 | extern long get_rip_label; 95 | 96 | unsigned long get_rip(void) 97 | { 98 | long ret; 99 | __asm__ __volatile__ 100 | ( 101 | "call get_rip_label \n" 102 | ".globl get_rip_label \n" 103 | "get_rip_label: \n" 104 | "pop %%rax \n" 105 | "mov %%rax, %0" : "=r"(ret) 106 | ); 107 | 108 | return ret; 109 | } 110 | #if DEBUG 111 | static inline __attribute__((always_inline)) long 112 | __write(long fd, char *buf, unsigned long len) 113 | { 114 | long ret; 115 | __asm__ volatile( 116 | "mov %0, %%rdi\n" 117 | "mov %1, %%rsi\n" 118 | "mov %2, %%rdx\n" 119 | "mov $1, %%rax\n" 120 | "syscall" : : "g"(fd), "g"(buf), "g"(len)); 121 | asm volatile("mov %%rax, %0" : "=r"(ret)); 122 | return ret; 123 | } 124 | #endif 125 | 126 | void * 127 | _elf_text_pointer(elfobj2_t *obj, uint64_t addr) 128 | { 129 | uint64_t offset = obj->text_offset + addr - obj->text_base; 130 | 131 | printf("%lx - %lx = %lx\n", addr, obj->text_base, addr - obj->text_base); 132 | if (offset > obj->size - 1) 133 | return NULL; 134 | return (void *)((uint8_t *)&obj->mem[offset]); 135 | } 136 | 137 | bool 138 | _elf_symbol_by_name(elfobj2_t *obj, char *name, ElfW(Sym) *out) 139 | { 140 | 141 | ElfW(Sym) *sym = obj->symtab; 142 | char *strtab = obj->strtab; 143 | char *tmp; 144 | int i; 145 | 146 | for (i = 0; i < obj->symcount; i++) { 147 | if (strcmp(&strtab[sym[i].st_name], name) == 0) { 148 | memcpy(out, &sym[i], sizeof(*out)); 149 | return true; 150 | } 151 | } 152 | return false; 153 | } 154 | 155 | bool 156 | _elf_close_object(elfobj2_t *obj) 157 | { 158 | int ret; 159 | 160 | ret = munmap(obj->mem, obj->size); 161 | return ret != 0 ? false : true; 162 | } 163 | 164 | bool 165 | _elf_open_object(char *path, struct elfobj2 *obj) 166 | { 167 | int fd, i; 168 | ElfW(Ehdr) *ehdr; 169 | ElfW(Phdr) *phdr; 170 | ElfW(Shdr) *shdr; 171 | ElfW(Sym) *symtab; 172 | char *shstrtab; 173 | char *strtab; 174 | struct stat st; 175 | uint8_t *mem; 176 | 177 | fd = open(path, O_RDWR); 178 | if (fd < 0) { 179 | perror("open"); 180 | return false; 181 | } 182 | if (fstat(fd, &st) < 0) { 183 | perror("fstat"); 184 | return false; 185 | } 186 | memset(obj, 0, sizeof(*obj)); 187 | 188 | mem = mmap(NULL, st.st_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); 189 | if (mem == MAP_FAILED) { 190 | perror("mmap"); 191 | return false; 192 | } 193 | ehdr = (ElfW(Ehdr) *)mem; 194 | phdr = (ElfW(Phdr) *)&mem[ehdr->e_phoff]; 195 | shdr = (ElfW(Shdr) *)&mem[ehdr->e_shoff]; 196 | shstrtab = (char *)&mem[shdr[ehdr->e_shstrndx].sh_offset]; 197 | 198 | /* 199 | * Grab the data segments offset and base for use in our 200 | * elf_address_pointer function 201 | */ 202 | for (i = 0; i < ehdr->e_phnum; i++) { 203 | if (phdr[i].p_type == PT_LOAD && phdr[i].p_offset == 0) { 204 | obj->text_offset = phdr[i].p_offset; 205 | obj->text_base = phdr[i].p_vaddr; 206 | } 207 | if (phdr[i].p_type == PT_DYNAMIC) 208 | obj->dynamic_linked = true; 209 | } 210 | /* 211 | * Grab the symbol table and string table .strtab. for future 212 | * symbol resolution. 213 | */ 214 | for (i = 0; i < ehdr->e_shnum; i++) { 215 | if (strcmp(&shstrtab[shdr[i].sh_name], ".symtab") == 0) { 216 | symtab = (ElfW(Sym) *)&mem[shdr[i].sh_offset]; 217 | obj->symcount = shdr[i].sh_size / shdr[i].sh_entsize; 218 | } else if (strcmp(&shstrtab[shdr[i].sh_name], ".strtab") == 0) { 219 | strtab = (char *)&mem[shdr[i].sh_offset]; 220 | } 221 | } 222 | 223 | obj->symtab = symtab; 224 | obj->path = path; 225 | obj->size = st.st_size; 226 | obj->mem = mem; 227 | obj->st = st; 228 | obj->ehdr = ehdr; 229 | obj->phdr = phdr; 230 | obj->shdr = shdr; 231 | obj->fd = fd; 232 | obj->strtab = strtab; 233 | obj->shstrtab = shstrtab; 234 | return true; 235 | } 236 | 237 | #define PUSH_LEN 5 238 | #define PUSH_RET_LEN 6 /* push 0x00000000; ret */ 239 | /* 240 | * enable_relro() is injected into the target static executable 241 | * and is invoked instead of main() by the glibc initialization 242 | * routine known as generic_start_main(). We do it this way because 243 | * we must allow all of the initialization routines, including 244 | * generic_start_main() to issue writes to the areas that we will 245 | * eventually be mprotecting as read-only. Currently we have some 246 | * limitations that won't allow multi-threaded applications to work 247 | * right since we mark .tbss and .tdata as read-only. However .data 248 | * is not touched, and any single threaded static executable should 249 | * fair OK (Yah right; prototype, yikes!). 250 | */ 251 | volatile void 252 | unused_delta_begin(void) { volatile int esoteric; return; } 253 | #pragma GCC push_options 254 | #pragma GCC optimize ("-O0") 255 | volatile uint64_t 256 | enable_relro(void) 257 | { 258 | int i; 259 | uint8_t *mem = 260 | (uint8_t *)(sizeof(uintptr_t) == 4 ? 0x8048000 : 0x400000); 261 | ElfW(Ehdr) *ehdr = (ElfW(Ehdr) *)mem; 262 | ElfW(Phdr) *phdr = (ElfW(Phdr) *)&mem[ehdr->e_phoff]; 263 | uint32_t *ptr; 264 | uint64_t main_addr = 0, stub_vaddr = 0; 265 | uint64_t data_vaddr, rsi, rdi, rdx, rcx; 266 | uint64_t relro_vaddr = 0, relro_size; 267 | uint64_t retaddr; 268 | 269 | bool found_data = false; 270 | /* 271 | * Save register state for main(argc, argv, envp) 272 | */ 273 | asm volatile("mov %%rsi, %0" : "=r"(rsi)); 274 | asm volatile("mov %%rdi, %0" : "=r"(rdi)); 275 | asm volatile("mov %%rdx, %0" : "=r"(rdx)); 276 | asm volatile("mov %%rcx, %0" : "=r"(rcx)); 277 | 278 | /* 279 | * If the PT_GNU_RELRO segment exists, which is ironically useless 280 | * until now for statically linked executables, we will make use 281 | * of it to determine where we want to apply the mitigation. 282 | * We also want to find the 3rd loadable segment which is where 283 | * our enable_relro() code is stored as a secondary code segment. 284 | */ 285 | for (relro_vaddr = 0, i = 0; i < ehdr->e_phnum; i++) { 286 | if (phdr[i].p_type == PT_GNU_RELRO) { 287 | relro_vaddr = phdr[i].p_vaddr; 288 | relro_size = PAGE_ALIGN_UP(relro_size); 289 | } else if (phdr[i].p_type == PT_LOAD && phdr[i].p_offset != 0) { 290 | if (found_data == true) { 291 | stub_vaddr = phdr[i].p_vaddr; 292 | break; 293 | } 294 | found_data = true; 295 | continue; 296 | } 297 | } 298 | if (relro_vaddr > 0) { 299 | relro_vaddr = PAGE_ALIGN(relro_vaddr); 300 | asm volatile( 301 | "mov %0, %%rdi \n" 302 | "mov %1, %%rsi \n" 303 | "mov %2, %%rdx \n" 304 | "mov $10, %%rax \n" 305 | "syscall" : : "g"(relro_vaddr), 306 | "g"(4096), "g"(PROT_READ)); 307 | goto process_main; /* Lets go call main() */ 308 | } 309 | /* 310 | * Why have we arrived at this code? 311 | * If for some reason the linker script used to build the static 312 | * executable didn't include the RELRO segment, then we will simply 313 | * use the data segment, which isn't as reliable since it gives the 314 | * p_memsz of the entire datasegment, instead of just the areas that 315 | * should be relro'd up until the .data section (Don't confuse .data 316 | * section and the data segment). Consequently we assume to mprotect 317 | * only 1 PAGE as read-only (Although this is 99% of the time enough). 318 | */ 319 | for (data_vaddr = 0, i = 0; i < ehdr->e_phnum; i++) { 320 | if (phdr[i].p_type == PT_LOAD && phdr[i].p_offset != 0) { 321 | if (data_vaddr == 0) { 322 | data_vaddr = phdr[i].p_vaddr; 323 | } 324 | else { 325 | /* 326 | * Get the address of our stub code. Don't 327 | * forget the first 4 bytes are the magic 328 | * number of the address to main() 329 | */ 330 | stub_vaddr = phdr[i].p_vaddr; 331 | if (stub_vaddr && data_vaddr) 332 | break; 333 | } 334 | } 335 | } 336 | if (data_vaddr == 0) { 337 | asm("int3"); 338 | } 339 | /* 340 | * Mprotect everything from .tdata to the beginning of .data 341 | * this means that only single threaded data sections can be 342 | * written to. Eventually we will fix this with a more sophisticated 343 | * solution that requires moving more things around. 344 | */ 345 | __asm__ volatile( 346 | "mov %0, %%rdi\n" 347 | "mov %1, %%rsi\n" 348 | "mov %2, %%rdx\n" 349 | "mov $10, %%rax\n" 350 | "syscall" : : "g"(data_vaddr & ~4095), "g"(4096), "g"(PROT_READ)); 351 | process_main: 352 | ptr = (uint32_t *)stub_vaddr; 353 | main_addr = *ptr; 354 | 355 | /* 356 | * Restore register state for argc, argv, envp 357 | * passed to main() 358 | */ 359 | asm volatile("mov %0, %%rsi" : : "r"(rsi)); 360 | asm volatile("mov %0, %%rdi" : : "r"(rdi)); 361 | asm volatile("mov %0, %%rcx" : : "r"(rcx)); 362 | asm volatile("mov %0, %%rdx" : : "r"(rdx)); 363 | /* 364 | * Finally lets go to main() by using pushes and rets 365 | * to pretend we are a call. This allows us to avoid 366 | * using a 'call imm' for an absolute address which 367 | * works within same segment, but is not technically correct. 368 | */ 369 | #if 0 370 | retaddr = get_rip() + PUSH_RET_LEN + PUSH_LEN; 371 | asm volatile("push %0" : : "r"(retaddr)); 372 | asm volatile("push %0" : : "r"(main_addr)); 373 | asm volatile("ret"); 374 | #endif 375 | 376 | asm volatile("call %0" : : "r"(main_addr)); 377 | asm volatile("mov $60, %rax\n" 378 | "syscall"); 379 | 380 | /* 381 | * The return value doesn't matter until we move to our 382 | * most advanced approach. This is for future use. We 383 | * exit before we hit this return. 384 | */ 385 | return main_addr; 386 | } 387 | #pragma GCC pop_options 388 | 389 | /* 390 | * NOTE: We assign esoteric to environ here, which is random. 391 | * but we had to assign it to something global to force the 392 | * linker to put delta_end() after enable_relro() so that we 393 | * can calculate enable_relro() delta (its size) 394 | */ 395 | volatile void 396 | delta_end(void) { volatile uintptr_t esoteric = (uintptr_t)&__environ; return; } 397 | 398 | #define TMP_FILE "/tmp/.xyz.static.fucker" 399 | #define GENERIC_START_MAIN_PATCH_OFFSET 580 /* glibc 2.23 - 2.25? */ 400 | #define TRAMPOLINE_OFFSET GENERIC_START_MAIN_PATCH_OFFSET 401 | 402 | bool 403 | inject_relro_code(elfobj2_t *obj) 404 | { 405 | int i, fd; 406 | size_t injection_size, old_size = obj->size; 407 | uint64_t relro_stub_vaddr; 408 | uint64_t base = sizeof(uintptr_t) == 8 ? 0x400000 : 0x8048000; 409 | const size_t relro_stub_size = (const size_t)((char *)&delta_end - 410 | (char *)&unused_delta_begin); 411 | size_t new_map_size; 412 | ElfW(Sym) generic_start_main, main_sym; 413 | uint64_t generic_start_main_off, patch_vaddr, main_offset; 414 | uint8_t *ptr; 415 | const int magic_mark = 0xdeadbeef; 416 | 417 | if (_elf_symbol_by_name(obj, "generic_start_main", 418 | &generic_start_main) == false) { 419 | fprintf(stderr, "elf_symbol_by_name failed\n"); 420 | return false; 421 | } 422 | if (_elf_symbol_by_name(obj, "main", 423 | &main_sym) == false) { 424 | fprintf(stderr, "elf_symbol_by_name failed\n"); 425 | return false; 426 | } 427 | 428 | ptr = _elf_text_pointer(obj, generic_start_main.st_value); 429 | if (ptr == NULL) { 430 | fprintf(stderr, "%#lx could not be found in address range\n", 431 | generic_start_main.st_value); 432 | return false; 433 | } 434 | /* 435 | * Instead of calling main, lets have generic_start_main 436 | * call our enable_relro stub instead, 4 bytes into it 437 | * though since we have auxiliary info (main symbol value) 438 | * stored in the first 4 bytes hence sizeof(uint32_t); 439 | */ 440 | uint32_t enable_relro_vaddr = 0xc000000 + old_size + sizeof(uint32_t); 441 | 442 | ptr += TRAMPOLINE_OFFSET; 443 | ptr[0] = 0x68; /* push */ 444 | *(uint32_t *)&ptr[1] = 0xc000000 + old_size + sizeof(uint32_t); 445 | ptr[5] = 0xc3; /* ret */ 446 | 447 | #if 0 448 | /* 449 | * We cannot use this method because we would need a far call 450 | * aka an lcall *addr since the enable_relro() code exists in 451 | * another text segment all together created by us. Ideally we 452 | * would use a reverse text extension, and just keep the enable_relro 453 | * code within the main text segment, and use this instruction patching 454 | * instead since it is 5 bytes and not 6. Our current 6 byte technique 455 | * clobbers the next instructions and forces us to call exit() after 456 | * main() but before the .dtors/.fini_array pointers are called. 457 | */ 458 | patch_vaddr = generic_start_main.st_value + TRAMPOLINE_OFFSET; 459 | ptr += TRAMPOLINE_OFFSET; 460 | ptr[0] = 0xe8; /* call imm */ 461 | main_offset = vaddr - patch_vaddr - 5; 462 | *(int32_t *)&ptr[1] = main_offset; 463 | printf("Patching with value %#lx\n", main_offset); 464 | #endif 465 | /* 466 | * Locate NOTE segment and change its characteristics 467 | * to that of a loadable segment with an offset that 468 | * point to our enable_relro code. We must modify 469 | * these values directly since libelfmaster doesn't 470 | * support modification yet. 471 | */ 472 | for (i = 0; i < obj->ehdr->e_phnum; i++) { 473 | if (obj->phdr[i].p_type != PT_NOTE) 474 | continue; 475 | obj->phdr[i].p_type = PT_LOAD; 476 | relro_stub_vaddr = obj->phdr[i].p_vaddr = 477 | 0xc000000 + old_size; 478 | injection_size = relro_stub_size; 479 | obj->phdr[i].p_filesz = relro_stub_size + PADDING_SIZE; 480 | obj->phdr[i].p_memsz = obj->phdr[i].p_filesz; 481 | obj->phdr[i].p_flags = PF_R | PF_X; 482 | obj->phdr[i].p_align = 0x200000; 483 | obj->phdr[i].p_paddr = obj->phdr[i].p_vaddr; 484 | obj->phdr[i].p_offset = old_size; 485 | 486 | } 487 | fd = open(TMP_FILE, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU); 488 | if (fd < 0) { 489 | perror("open"); 490 | return false; 491 | } 492 | #if DEBUG 493 | /* 494 | * This extends section 1 so that we can 495 | * view our enable_relro() code with objdump 496 | * during debugging phases. 497 | */ 498 | obj->shdr[1].sh_offset = old_size; 499 | obj->shdr[1].sh_addr = 0xc000000 + old_size; 500 | obj->shdr[1].sh_size = relro_stub_size + 16; 501 | obj->shdr[1].sh_type = SHT_PROGBITS; 502 | #endif 503 | if (write(fd, obj->mem, old_size) < 0) { 504 | perror("write1"); 505 | return false; 506 | } 507 | printf("injection size: %lu\n", injection_size); 508 | 509 | (void) write(fd, &main_sym.st_value, 4); 510 | 511 | if (write(fd, (char *)&enable_relro, injection_size) < 0) { 512 | perror("write2"); 513 | return false; 514 | } 515 | printf("main(): %#lx\n", main_sym.st_value); 516 | close(fd); 517 | if (rename(TMP_FILE, obj->path) < 0) { 518 | perror("rename"); 519 | return false; 520 | } 521 | return true; 522 | } 523 | int main(int argc, char **argv) 524 | { 525 | 526 | elfobj2_t obj; 527 | 528 | if (argc < 2) { 529 | printf("Usage: %s \n", argv[0]); 530 | exit(EXIT_FAILURE); 531 | } 532 | 533 | if (_elf_open_object(argv[1], &obj) == false) { 534 | fprintf(stderr, "_elf_open_object failed\n"); 535 | exit(EXIT_FAILURE); 536 | } 537 | 538 | if (obj.dynamic_linked == true) { 539 | /* 540 | * If there is a PT_DYNAMIC segment then we know 541 | * this isn't a static executable. 542 | */ 543 | fprintf(stderr, "%s is dynamically linked\n", argv[1]); 544 | exit(EXIT_FAILURE); 545 | } 546 | 547 | if (inject_relro_code(&obj) == false) { 548 | fprintf(stderr, "instrumentation failed\n"); 549 | exit(EXIT_FAILURE); 550 | } 551 | 552 | _elf_close_object(&obj); 553 | } 554 | 555 | -------------------------------------------------------------------------------- /static_to_dyn.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018, Ryan O'Neill 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions are met: 7 | * 8 | * 1. Redistributions of source code must retain the above copyright notice, this 9 | * list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright notice, 11 | * this list of conditions and the following disclaimer in the documentation 12 | * and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 18 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 21 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | 27 | #define _GNU_SOURCE 28 | #include 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | #define HUGE_PAGE 0x200000 40 | 41 | int main(int argc, char **argv) 42 | { 43 | ElfW(Ehdr) *ehdr; 44 | ElfW(Phdr) *phdr; 45 | ElfW(Shdr) *shdr; 46 | uint8_t *mem; 47 | int fd; 48 | int i; 49 | struct stat st; 50 | uint64_t old_base; /* original text base */ 51 | uint64_t new_data_base; /* new data base */ 52 | char *StringTable; 53 | 54 | fd = open(argv[1], O_RDWR); 55 | if (fd < 0) { 56 | perror("open"); 57 | goto fail; 58 | } 59 | 60 | fstat(fd, &st); 61 | 62 | mem = mmap(NULL, st.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 63 | if (mem == MAP_FAILED ) { 64 | perror("mmap"); 65 | goto fail; 66 | } 67 | 68 | ehdr = (ElfW(Ehdr) *)mem; 69 | phdr = (ElfW(Phdr) *)&mem[ehdr->e_phoff]; 70 | shdr = (ElfW(Shdr) *)&mem[ehdr->e_shoff]; 71 | StringTable = (char *)&mem[shdr[ehdr->e_shstrndx].sh_offset]; 72 | 73 | printf("Marking e_type to ET_DYN\n"); 74 | ehdr->e_type = ET_DYN; 75 | 76 | printf("Updating PT_LOAD segments to become relocatable from base 0\n"); 77 | for (i = 0; i < ehdr->e_phnum; i++) { 78 | if (phdr[i].p_type == PT_LOAD && phdr[i].p_offset == 0) { 79 | old_base = phdr[i].p_vaddr; 80 | phdr[i].p_vaddr = 0UL; 81 | phdr[i].p_paddr = 0UL; 82 | phdr[i + 1].p_vaddr = HUGE_PAGE + phdr[i + 1].p_offset; 83 | phdr[i + 1].p_paddr = HUGE_PAGE + phdr[i + 1].p_offset; 84 | } else if (phdr[i].p_type == PT_NOTE) { 85 | phdr[i].p_vaddr = phdr[i].p_offset; 86 | phdr[i].p_paddr = phdr[i].p_offset; 87 | } else if (phdr[i].p_type == PT_TLS) { 88 | phdr[i].p_vaddr = HUGE_PAGE + phdr[i].p_offset; 89 | phdr[i].p_paddr = HUGE_PAGE + phdr[i].p_offset; 90 | new_data_base = phdr[i].p_vaddr; 91 | } 92 | } 93 | /* 94 | * If we don't update the section headers to reflect the new address 95 | * space then GDB and objdump will be broken with this binary. 96 | */ 97 | for (i = 0; i < ehdr->e_shnum; i++) { 98 | if (!(shdr[i].sh_flags & SHF_ALLOC)) 99 | continue; 100 | shdr[i].sh_addr = (shdr[i].sh_addr < old_base + HUGE_PAGE) ? 101 | 0UL + shdr[i].sh_offset : new_data_base + shdr[i].sh_offset; 102 | printf("Setting %s sh_addr to %#lx\n", &StringTable[shdr[i].sh_name], 103 | shdr[i].sh_addr); 104 | } 105 | 106 | printf("Setting new entry point: %#lx\n", ehdr->e_entry - old_base); 107 | ehdr->e_entry = ehdr->e_entry - old_base; 108 | munmap(mem, st.st_size); 109 | exit(0); 110 | fail: 111 | exit(-1); 112 | } 113 | -------------------------------------------------------------------------------- /test.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main(void) 6 | { 7 | printf("I'm pausing while you check my RELRO status in /proc/%d/maps\n", getpid()); 8 | pause(); 9 | exit(EXIT_SUCCESS); 10 | } 11 | -------------------------------------------------------------------------------- /test2.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | /* Make sure we have a data segment for testing purposes */ 6 | static int test_dummy = 5; 7 | 8 | int _start() { 9 | int argc; 10 | long **args; 11 | long *rbp; 12 | int i; 13 | int j = 0; 14 | 15 | /* Extract argc from stack */ 16 | asm __volatile__("mov 8(%%rbp), %%rcx " : "=c" (argc)); 17 | 18 | /* Extract argv from stack */ 19 | asm __volatile__("lea 16(%%rbp), %%rcx " : "=c" (args)); 20 | 21 | for (i = 0; i < argc; i++) { 22 | sleep(1); 23 | printf("%s\n", (char *)args[i]); 24 | } 25 | exit(0); 26 | } 27 | 28 | -------------------------------------------------------------------------------- /test3.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | /* Make sure we have a data segment for testing purposes */ 6 | static int test_dummy = 5; 7 | 8 | int main(int argc, char **argv) { 9 | int i; 10 | int j = 0; 11 | 12 | printf("Hello world\n"); 13 | exit(0); 14 | } 15 | 16 | --------------------------------------------------------------------------------