├── ent.plist
├── README.md
├── find_kernel_base_under_checkra1n.h
├── find_kernel_base_under_checkra1n.c
└── fda.c
/ent.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | task_for_pid-allow
5 | run-unsigned-code
6 | get-task-allow
7 |
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # iOSFuckDenyAttach
2 | tool that manually disable ptrace deny attach under kernel model
3 |
4 |
5 | currnetly this tool only support under checkra1n and
6 | iphone8 iOS 13.4 , Darwin Kernel Version 19.4.0: Mon Feb 24 22:04:29 PST 2020; root:xnu-6153.102.3~1/RELEASE_ARM64_T8015
7 |
8 |
9 | maybe it could support more iOS version in future
10 | if I have time
11 |
12 |
13 |
--------------------------------------------------------------------------------
/find_kernel_base_under_checkra1n.h:
--------------------------------------------------------------------------------
1 |
2 | // find_kernel_base_under_checkra1n.h
3 | #ifndef FIND_KERNEL_BASE_UNDER_CHECKRA1N
4 | #define FIND_KERNEL_BASE_UNDER_CHECKRA1N 1
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | void kernel_task_init();
11 | uint64_t kernel_base_init();
12 | bool kernel_read(uint64_t address, void *data, size_t size);
13 | uint64_t kernel_read64(uint64_t address);
14 | bool kernel_write(uint64_t address, const void *data, size_t size);
15 |
16 | #define KERNEL_WRITE64(addr , val) kernel_write(addr, &val, sizeof(int64_t));
17 |
18 | #define KERNEL_WRITE32(addr , val) kernel_write(addr, &val, sizeof(int32_t));
19 |
20 |
21 | #endif
--------------------------------------------------------------------------------
/find_kernel_base_under_checkra1n.c:
--------------------------------------------------------------------------------
1 | // find_kernel_base_under_checkra1n.c
2 |
3 | #include "find_kernel_base_under_checkra1n.h"
4 |
5 | // ---- mach_vm.h ---------------------------------------------------------------------------------
6 |
7 | extern
8 | kern_return_t mach_vm_read_overwrite
9 | (
10 | vm_map_t target_task,
11 | mach_vm_address_t address,
12 | mach_vm_size_t size,
13 | mach_vm_address_t data,
14 | mach_vm_size_t *outsize
15 | );
16 |
17 | extern
18 | kern_return_t mach_vm_region_recurse
19 | (
20 | vm_map_t target_task,
21 | mach_vm_address_t *address,
22 | mach_vm_size_t *size,
23 | natural_t *nesting_depth,
24 | vm_region_recurse_info_t info,
25 | mach_msg_type_number_t *infoCnt
26 | );
27 |
28 | kern_return_t mach_vm_write
29 | (
30 | vm_map_t target_task,
31 | mach_vm_address_t address,
32 | vm_offset_t data,
33 | mach_msg_type_number_t dataCnt
34 | );
35 |
36 | // ---- Kernel task -------------------------------------------------------------------------------
37 |
38 | static mach_port_t kernel_task_port;
39 |
40 | void
41 | kernel_task_init() {
42 | task_for_pid(mach_task_self(), 0, &kernel_task_port);
43 | assert(kernel_task_port != MACH_PORT_NULL);
44 | printf("kernel task: 0x%x\n", kernel_task_port);
45 | }
46 |
47 | bool
48 | kernel_read(uint64_t address, void *data, size_t size) {
49 | mach_vm_size_t size_out;
50 | kern_return_t kr = mach_vm_read_overwrite(kernel_task_port, address, size,
51 | (mach_vm_address_t) data, &size_out);
52 | return (kr == KERN_SUCCESS);
53 | }
54 |
55 | uint64_t
56 | kernel_read64(uint64_t address) {
57 | uint64_t value = 0;
58 | bool ok = kernel_read(address, &value, sizeof(value));
59 | if (!ok) {
60 | printf("error: %s(0x%016llx)\n", __func__, address);
61 | }
62 | return value;
63 | }
64 |
65 |
66 | bool kernel_write(uint64_t address, const void *data, size_t size) {
67 | size_t offset = 0;
68 | kern_return_t kr = KERN_FAILURE;
69 | while (offset < size) {
70 | size_t chunk = 2048;
71 | if (chunk > size - offset) {
72 | chunk = size - offset;
73 | }
74 | kr = mach_vm_write(kernel_task_port, address + offset, (mach_vm_offset_t)data + offset, (int)chunk);
75 | if (kr != KERN_SUCCESS) {
76 | printf("error: %s(0x%016llx)\n",__func__, address);
77 | break;
78 | }
79 | offset += chunk;
80 | }
81 | return (kr == KERN_SUCCESS);
82 | }
83 |
84 |
85 | // ---- Kernel base -------------------------------------------------------------------------------
86 |
87 | static uint64_t kernel_base;
88 |
89 | bool
90 | is_kernel_base(uint64_t base) {
91 | uint64_t header[2] = { 0x0100000cfeedfacf, 0x0000000200000000 };
92 | uint64_t data[2] = {};
93 | bool ok = kernel_read(base, &data, sizeof(data));
94 | if (ok && memcmp(data, header, sizeof(data)) == 0) {
95 | return true;
96 | }
97 | return false;
98 | }
99 |
100 | bool
101 | kernel_base_init_with_unsafe_heap_scan() {
102 | uint64_t kernel_region_base = 0xfffffff000000000;
103 | uint64_t kernel_region_end = 0xfffffffbffffc000;
104 | // Try and find a pointer in the kernel heap to data in the kernel image. We'll take the
105 | // smallest such pointer.
106 | uint64_t kernel_ptr = (uint64_t)(-1);
107 | mach_vm_address_t address = 0;
108 | for (;;) {
109 | // Get the next memory region.
110 | mach_vm_size_t size = 0;
111 | uint32_t depth = 2;
112 | struct vm_region_submap_info_64 info;
113 | mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
114 | kern_return_t kr = mach_vm_region_recurse(kernel_task_port, &address, &size,
115 | &depth, (vm_region_recurse_info_t) &info, &count);
116 | if (kr != KERN_SUCCESS) {
117 | break;
118 | }
119 | // Skip any region that is not on the heap, not in a submap, not readable and
120 | // writable, or not fully mapped.
121 | int prot = VM_PROT_READ | VM_PROT_WRITE;
122 | if (info.user_tag != 12
123 | || depth != 1
124 | || (info.protection & prot) != prot
125 | || info.pages_resident * 0x4000 != size) {
126 | goto next;
127 | }
128 | // Read the first word of each page in this region.
129 | for (size_t offset = 0; offset < size; offset += 0x4000) {
130 | uint64_t value = 0;
131 | bool ok = kernel_read(address + offset, &value, sizeof(value));
132 | if (ok
133 | && kernel_region_base <= value
134 | && value < kernel_region_end
135 | && value < kernel_ptr) {
136 | kernel_ptr = value;
137 | }
138 | }
139 | next:
140 | address += size;
141 | }
142 | // If we didn't find any such pointer, abort.
143 | if (kernel_ptr == (uint64_t)(-1)) {
144 | return false;
145 | }
146 | printf("found kernel pointer %p\n", (void *)kernel_ptr);
147 | // Now that we have a pointer, we want to scan pages until we reach the kernel's Mach-O
148 | // header.
149 | uint64_t page = kernel_ptr & ~0x3fff;
150 | for (;;) {
151 | bool found = is_kernel_base(page);
152 | if (found) {
153 | kernel_base = page;
154 | return true;
155 | }
156 | page -= 0x4000;
157 | }
158 | return false;
159 | }
160 |
161 | uint64_t
162 | kernel_base_init() {
163 | bool ok = kernel_base_init_with_unsafe_heap_scan();
164 | assert(ok);
165 | printf("kernel base: %p\n", (void *)kernel_base);
166 | return kernel_base;
167 | }
168 |
--------------------------------------------------------------------------------
/fda.c:
--------------------------------------------------------------------------------
1 | //fda.c
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #include "find_kernel_base_under_checkra1n.h"
10 |
11 | #include
12 | #include
13 |
14 |
15 | //#define CONFIG_DTRACE 1
16 |
17 | //#define CONFIG_PERSONAS 1
18 | //#define _PROC_HAS_SCHEDINFO_ 1
19 |
20 | typedef char command_t[MAXCOMLEN + 1];
21 | typedef char proc_name_t[2*MAXCOMLEN + 1];
22 | typedef void * lck_mtx_t;
23 | typedef void * kauth_cred_t;
24 | typedef void * lck_spin_t;
25 | /*
26 | * Description of a process.
27 | *
28 | * This structure contains the information needed to manage a thread of
29 | * control, known in UN*X as a process; it has references to substructures
30 | * containing descriptions of things that the process uses, but may share
31 | * with related processes. The process structure and the substructures
32 | * are always addressible except for those marked "(PROC ONLY)" below,
33 | * which might be addressible only on a processor on which the process
34 | * is running.
35 | */
36 | struct proc {
37 | LIST_ENTRY(proc) p_list; /* List of all processes. */
38 |
39 | void * task; /* corresponding task (static)*/
40 | struct proc * p_pptr; /* Pointer to parent process.(LL) */
41 | pid_t p_ppid; /* process's parent pid number */
42 | pid_t p_original_ppid; /* process's original parent pid number, doesn't change if reparented */
43 | pid_t p_pgrpid; /* process group id of the process (LL)*/
44 | uid_t p_uid;
45 | gid_t p_gid;
46 | uid_t p_ruid;
47 | gid_t p_rgid;
48 | uid_t p_svuid;
49 | gid_t p_svgid;
50 | uint64_t p_uniqueid; /* process unique ID - incremented on fork/spawn/vfork, remains same across exec. */
51 | uint64_t p_puniqueid; /* parent's unique ID - set on fork/spawn/vfork, doesn't change if reparented. */
52 |
53 | lck_mtx_t p_mlock; /* mutex lock for proc */
54 | pid_t p_pid; /* Process identifier. (static)*/
55 | char p_stat; /* S* process status. (PL)*/
56 | char p_shutdownstate;
57 | char p_kdebug; /* P_KDEBUG eq (CC)*/
58 | char p_btrace; /* P_BTRACE eq (CC)*/
59 |
60 | LIST_ENTRY(proc) p_pglist; /* List of processes in pgrp.(PGL) */
61 | LIST_ENTRY(proc) p_sibling; /* List of sibling processes. (LL)*/
62 | LIST_HEAD(, proc) p_children; /* Pointer to list of children. (LL)*/
63 | TAILQ_HEAD(, uthread) p_uthlist; /* List of uthreads (PL) */
64 |
65 | LIST_ENTRY(proc) p_hash; /* Hash chain. (LL)*/
66 | TAILQ_HEAD(, eventqelt) p_evlist; /* (PL) */
67 |
68 | #if CONFIG_PERSONAS
69 | struct persona *p_persona;
70 | LIST_ENTRY(proc) p_persona_list;
71 | #endif
72 |
73 | lck_mtx_t p_fdmlock; /* proc lock to protect fdesc */
74 | lck_mtx_t p_ucred_mlock; /* mutex lock to protect p_ucred */
75 |
76 | /* substructures: */
77 | kauth_cred_t p_ucred; /* Process owner's identity. (PUCL) */
78 | struct filedesc *p_fd; /* Ptr to open files structure. (PFDL) */
79 | struct pstats *p_stats; /* Accounting/statistics (PL). */
80 | struct plimit *p_limit; /* Process limits.(PL) */
81 |
82 | struct sigacts *p_sigacts; /* Signal actions, state (PL) */
83 | lck_spin_t p_slock; /* spin lock for itimer/profil protection */
84 |
85 | #define p_rlimit p_limit->pl_rlimit
86 |
87 | void * unknow0[7]; //和实际内存的中偏移不符,所以加了unknow0填充
88 |
89 | struct plimit *p_olimit; /* old process limits - not inherited by child (PL) */
90 | int p_siglist; /* signals captured back from threads */
91 | unsigned int p_flag; /* P_* flags. (atomic bit ops) */
92 | unsigned int p_lflag; /* local flags (PL) */
93 | unsigned int p_listflag; /* list flags (LL) */
94 | unsigned int p_ladvflag; /* local adv flags (atomic) */
95 | int p_refcount; /* number of outstanding users(LL) */
96 | int p_childrencnt; /* children holding ref on parent (LL) */
97 | int p_parentref; /* children lookup ref on parent (LL) */
98 | pid_t p_oppid; /* Save parent pid during ptrace. XXX */
99 | u_int p_xstat; /* Exit status for wait; also stop signal. */
100 |
101 | #ifdef _PROC_HAS_SCHEDINFO_
102 | /* may need cleanup, not used */
103 | u_int p_estcpu; /* Time averaged value of p_cpticks.(used by aio and proc_comapre) */
104 | fixpt_t p_pctcpu; /* %cpu for this process during p_swtime (used by aio)*/
105 | u_int p_slptime; /* used by proc_compare */
106 | #endif /* _PROC_HAS_SCHEDINFO_ */
107 |
108 | struct itimerval p_realtimer; /* Alarm timer. (PSL) */
109 | struct timeval p_rtime; /* Real time.(PSL) */
110 | struct itimerval p_vtimer_user; /* Virtual timers.(PSL) */
111 | struct itimerval p_vtimer_prof; /* (PSL) */
112 |
113 | struct timeval p_rlim_cpu; /* Remaining rlim cpu value.(PSL) */
114 | int p_debugger; /* NU 1: can exec set-bit programs if suser */
115 | boolean_t sigwait; /* indication to suspend (PL) */
116 | void *sigwait_thread; /* 'thread' holding sigwait(PL) */
117 | void *exit_thread; /* Which thread is exiting(PL) */
118 | void * p_vforkact; /* activation running this vfork proc)(static) */
119 | int p_vforkcnt; /* number of outstanding vforks(PL) */
120 | int p_fpdrainwait; /* (PFDL) */
121 | /* Following fields are info from SIGCHLD (PL) */
122 | pid_t si_pid; /* (PL) */
123 | u_int si_status; /* (PL) */
124 | u_int si_code; /* (PL) */
125 | uid_t si_uid; /* (PL) */
126 |
127 | void * vm_shm; /* (SYSV SHM Lock) for sysV shared memory */
128 |
129 | #if CONFIG_DTRACE
130 | user_addr_t p_dtrace_argv; /* (write once, read only after that) */
131 | user_addr_t p_dtrace_envp; /* (write once, read only after that) */
132 | lck_mtx_t p_dtrace_sprlock; /* sun proc lock emulation */
133 | uint8_t p_dtrace_stop; /* indicates a DTrace-desired stop */
134 | int p_dtrace_probes; /* (PL) are there probes for this proc? */
135 | u_int p_dtrace_count; /* (sprlock) number of DTrace tracepoints */
136 | struct dtrace_ptss_page* p_dtrace_ptss_pages; /* (sprlock) list of user ptss pages */
137 | struct dtrace_ptss_page_entry* p_dtrace_ptss_free_list; /* (atomic) list of individual ptss entries */
138 | struct dtrace_helpers* p_dtrace_helpers; /* (dtrace_lock) DTrace per-proc private */
139 | struct dof_ioctl_data* p_dtrace_lazy_dofs; /* (sprlock) unloaded dof_helper_t's */
140 | #endif /* CONFIG_DTRACE */
141 |
142 | /* XXXXXXXXXXXXX BCOPY'ed on fork XXXXXXXXXXXXXXXX */
143 | /* The following fields are all copied upon creation in fork. */
144 | #define p_startcopy p_argslen
145 |
146 | u_int p_argslen; /* Length of process arguments. */
147 | int p_argc; /* saved argc for sysctl_procargs() */
148 | user_addr_t user_stack; /* where user stack was allocated */
149 | struct vnode *p_textvp; /* Vnode of executable. */
150 | off_t p_textoff; /* offset in executable vnode */
151 |
152 | sigset_t p_sigmask; /* DEPRECATED */
153 | sigset_t p_sigignore; /* Signals being ignored. (PL) */
154 | sigset_t p_sigcatch; /* Signals being caught by user.(PL) */
155 |
156 | u_char p_priority; /* (NU) Process priority. */
157 | u_char p_resv0; /* (NU) User-priority based on p_cpu and p_nice. */
158 | char p_nice; /* Process "nice" value.(PL) */
159 | u_char p_resv1; /* (NU) User-priority based on p_cpu and p_nice. */
160 |
161 | // types currently in sys/param.h
162 | command_t p_comm;
163 | proc_name_t p_name; /* can be changed by the process */
164 | uint8_t p_xhighbits; /* Stores the top byte of exit status to avoid truncation*/
165 | pid_t p_contproc; /* last PID to send us a SIGCONT (PL) */
166 |
167 | struct pgrp *p_pgrp; /* Pointer to process group. (LL) */
168 | uint32_t p_csflags; /* flags for codesign (PL) */
169 | uint32_t p_pcaction; /* action for process control on starvation */
170 | uint8_t p_uuid[16]; /* from LC_UUID load command */
171 |
172 | /*
173 | * CPU type and subtype of binary slice executed in
174 | * this process. Protected by proc lock.
175 | */
176 | cpu_type_t p_cputype;
177 | cpu_subtype_t p_cpusubtype;
178 |
179 | uint8_t *syscall_filter_mask; /* syscall filter bitmask (length: nsysent bits) */
180 | uint32_t p_platform;
181 | uint32_t p_sdk;
182 |
183 | /* End area that is copied on creation. */
184 | /* XXXXXXXXXXXXX End of BCOPY'ed on fork (AIOLOCK)XXXXXXXXXXXXXXXX */
185 | #define p_endcopy p_aio_total_count
186 | int p_aio_total_count; /* all allocated AIO requests for this proc */
187 | int p_aio_active_count; /* all unfinished AIO requests for this proc */
188 | TAILQ_HEAD(, aio_workq_entry ) p_aio_activeq; /* active async IO requests */
189 | TAILQ_HEAD(, aio_workq_entry ) p_aio_doneq; /* completed async IO requests */
190 |
191 | struct klist p_klist; /* knote list (PL ?)*/
192 |
193 | struct rusage_superset *p_ru; /* Exit information. (PL) */
194 | thread_t p_signalholder;
195 | thread_t p_transholder;
196 | int p_sigwaitcnt;
197 | /* DEPRECATE following field */
198 | u_short p_acflag; /* Accounting flags. */
199 | volatile u_short p_vfs_iopolicy; /* VFS iopolicy flags. (atomic bit ops) */
200 |
201 | user_addr_t p_threadstart; /* pthread start fn */
202 | user_addr_t p_wqthread; /* pthread workqueue fn */
203 | int p_pthsize; /* pthread size */
204 | uint32_t p_pth_tsd_offset; /* offset from pthread_t to TSD for new threads */
205 | user_addr_t p_stack_addr_hint; /* stack allocation hint for wq threads */
206 | struct workqueue *_Atomic p_wqptr; /* workq ptr */
207 |
208 | struct timeval p_start; /* starting time */
209 | void * p_rcall;
210 | int p_ractive;
211 | int p_idversion; /* version of process identity */
212 | void * p_pthhash; /* pthread waitqueue hash */
213 | volatile uint64_t was_throttled __attribute__((aligned(8))); /* Counter for number of throttled I/Os */
214 | volatile uint64_t did_throttle __attribute__((aligned(8))); /* Counter for number of I/Os this proc throttled */
215 |
216 | #if DIAGNOSTIC
217 | unsigned int p_fdlock_pc[4];
218 | unsigned int p_fdunlock_pc[4];
219 | #if SIGNAL_DEBUG
220 | unsigned int lockpc[8];
221 | unsigned int unlockpc[8];
222 | #endif /* SIGNAL_DEBUG */
223 | #endif /* DIAGNOSTIC */
224 | uint64_t p_dispatchqueue_offset;
225 | uint64_t p_dispatchqueue_serialno_offset;
226 | uint64_t p_dispatchqueue_label_offset;
227 | uint64_t p_return_to_kernel_offset;
228 | uint64_t p_mach_thread_self_offset;
229 | #if VM_PRESSURE_EVENTS
230 | struct timeval vm_pressure_last_notify_tstamp;
231 | #endif
232 |
233 | #if CONFIG_MEMORYSTATUS
234 | /* Fields protected by proc list lock */
235 | TAILQ_ENTRY(proc) p_memstat_list; /* priority bucket link */
236 | uint32_t p_memstat_state; /* state. Also used as a wakeup channel when the memstat's LOCKED bit changes */
237 | int32_t p_memstat_effectivepriority; /* priority after transaction state accounted for */
238 | int32_t p_memstat_requestedpriority; /* active priority */
239 | int32_t p_memstat_assertionpriority; /* assertion driven priority */
240 | uint32_t p_memstat_dirty; /* dirty state */
241 | uint64_t p_memstat_userdata; /* user state */
242 | uint64_t p_memstat_idledeadline; /* time at which process became clean */
243 | uint64_t p_memstat_idle_start; /* abstime process transitions into the idle band */
244 | uint64_t p_memstat_idle_delta; /* abstime delta spent in idle band */
245 | int32_t p_memstat_memlimit; /* cached memory limit, toggles between active and inactive limits */
246 | int32_t p_memstat_memlimit_active; /* memory limit enforced when process is in active jetsam state */
247 | int32_t p_memstat_memlimit_inactive; /* memory limit enforced when process is in inactive jetsam state */
248 | int32_t p_memstat_relaunch_flags; /* flags indicating relaunch behavior for the process */
249 | #if CONFIG_FREEZE
250 | uint32_t p_memstat_freeze_sharedanon_pages; /* shared pages left behind after freeze */
251 | uint32_t p_memstat_frozen_count;
252 | uint32_t p_memstat_thaw_count;
253 | #endif /* CONFIG_FREEZE */
254 | #endif /* CONFIG_MEMORYSTATUS */
255 |
256 | /* cached proc-specific data required for corpse inspection */
257 | pid_t p_responsible_pid; /* pid resonsible for this process */
258 | _Atomic uint32_t p_user_faults; /* count the number of user faults generated */
259 |
260 | uint32_t p_memlimit_increase; /* byte increase for memory limit for dyld SPI rdar://problem/49950264, structure packing 32-bit and 64-bit */
261 |
262 | struct os_reason *p_exit_reason;
263 |
264 | #if !CONFIG_EMBEDDED
265 | uint64_t p_user_data; /* general-purpose storage for userland-provided data */
266 | #endif /* !CONFIG_EMBEDDED */
267 | };
268 |
269 |
270 | #define P_LNOATTACH 0x00001000 /* */
271 |
272 | /* Macros to clear/set/test flags. */
273 | #define SET(t, f) (t) |= (f)
274 | #define CLR(t, f) (t) &= ~(f)
275 | #define ISSET(t, f) ((t) & (f))
276 |
277 | // ---- Main --------------------------------------------------------------------------------------
278 |
279 | //iphone8 ios 13.4 kernel
280 | #define TARGET_KERNELCACHE_VERSION_STRING "@(#)VERSION: Darwin Kernel Version 19.4.0: Mon Feb 24 22:04:29 PST 2020; root:xnu-6153.102.3~1/RELEASE_ARM64_T8015"
281 |
282 | int main() {
283 | kernel_task_init();
284 | uint64_t kb = kernel_base_init();
285 | for (size_t i = 0; i < 8; i++) {
286 | printf("%016llx\n", kernel_read64(kb + 8 * i));
287 | }
288 | uint64_t versionstraddr = kb + 0x2FB64;
289 | char versionstr[256];
290 | if(kernel_read(versionstraddr, (void *)&versionstr, sizeof(versionstr)))
291 | {
292 | printf("%s\n", versionstr);
293 | if(strcmp(TARGET_KERNELCACHE_VERSION_STRING,versionstr) == 0)
294 | {
295 | printf("kernel cache hit\n");
296 | //226AF60 kernproc
297 | uint64_t kernel_proc0 = kernel_read64(kb + 0x226AF60);
298 |
299 | struct proc * proc0 = (void *)malloc(sizeof(struct proc));
300 |
301 | if(!kernel_read(kernel_proc0, (void *)proc0, sizeof(struct proc)))
302 | {
303 | printf("proc0 read failed\n");
304 | return -1;
305 | }
306 | printf("uniqueid offset 0x%llx comm offset 0x%llx \n",(int64_t)&(proc0->p_uniqueid) - (int64_t)proc0, (int64_t)&(proc0->p_comm)- (int64_t)proc0);
307 |
308 | struct proc * proc1 = (struct proc *)malloc(sizeof(struct proc));
309 | uint64_t preptr = (uint64_t)(proc0->p_list.le_prev);
310 | while(preptr){
311 | if(!kernel_read(preptr, (void *)proc1, sizeof(struct proc)))
312 | {
313 | printf("procnext read failed\n");
314 | return -1;
315 | }else{
316 | if(proc1->p_list.le_prev == 0)
317 | {
318 | printf("proc1->p_list.le_prev == 0\n");
319 | break;
320 | }
321 | int64_t lflagoffset = (int64_t)&(proc1->p_lflag) - (int64_t)proc1;
322 | int lflagvalue = proc1->p_lflag;
323 | printf("(%llu)%s proc = 0x%llx lflag = 0x%x lflag offset = 0x%llx"
324 | ,proc1->p_uniqueid,
325 | proc1->p_comm,//(char *)((int64_t)proc1 + 0x258),
326 | preptr,lflagvalue,lflagoffset);
327 |
328 | if(ISSET(lflagvalue, P_LNOATTACH))
329 | {
330 | printf(" !!!P_LNOATTACH set");
331 | CLR(lflagvalue, P_LNOATTACH);
332 | KERNEL_WRITE32(preptr + lflagoffset, lflagvalue);
333 | }
334 | printf("\n");
335 |
336 | preptr = (uint64_t)(proc1->p_list.le_prev);
337 | }
338 | }
339 |
340 | printf("end\n");
341 | free(proc0);
342 | free(proc1);
343 | }else{
344 | printf("kernel cache version mismatch\n");
345 | }
346 | }else{
347 | printf("failed to read kernel version string\n");
348 | }
349 | return 0;
350 | }
--------------------------------------------------------------------------------