├── code ├── Makefile ├── proc_rss.h ├── ver_control.h ├── api_proxy.h ├── rwProcMem_module.h ├── proc_root.h ├── proc_list.h ├── proc_root_auto_offset.h ├── hide_procfs_dir.h ├── linux_kernel_api.h ├── proc_cmdline.h ├── proc_cmdline_auto_offset.h ├── proc_list_auto_offset.h ├── phy_mem_auto_offset.h ├── test.h ├── phy_mem.h ├── proc_maps_auto_offset.h ├── rwProcMem_module.c └── proc_maps.h ├── README_zh.md ├── README.md └── .github └── workflows └── main.yml /code/Makefile: -------------------------------------------------------------------------------- 1 | obj-m += rwProcMem_module.o 2 | -------------------------------------------------------------------------------- /code/proc_rss.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_RSS_H_ 2 | #define PROC_RSS_H_ 3 | //声明 4 | ////////////////////////////////////////////////////////////////////////// 5 | #include 6 | #include "api_proxy.h" 7 | #include "ver_control.h" 8 | static size_t read_proc_rss_size(struct pid* proc_pid_struct); 9 | 10 | //实现 11 | ////////////////////////////////////////////////////////////////////////// 12 | #include "proc_cmdline_auto_offset.h" 13 | static size_t read_proc_rss_size(struct pid* proc_pid_struct) { 14 | struct task_struct *task; 15 | struct mm_struct *mm; 16 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 17 | if (!task) { 18 | return 0; 19 | } 20 | mm = get_task_mm(task); 21 | if (mm) { 22 | //精确偏移 23 | size_t total_rss; 24 | ssize_t offset = g_init_arg_start_offset_success ? g_arg_start_offset : 0; 25 | total_rss = x_read_mm_struct_rss(mm, offset); 26 | mmput(mm); 27 | return total_rss; 28 | } 29 | return 0; 30 | 31 | } 32 | #endif /* PROC_RSS_H_ */ -------------------------------------------------------------------------------- /code/ver_control.h: -------------------------------------------------------------------------------- 1 | #ifndef VERSION_CONTROL_H_ 2 | #define VERSION_CONTROL_H_ 3 | 4 | // 独立内核模块入口模式 5 | #define CONFIG_MODULE_GUIDE_ENTRY 6 | 7 | // 生成proc用户层交互节点文件 8 | #define CONFIG_USE_PROC_FILE_NODE 9 | // 隐蔽通信密钥 10 | #define CONFIG_PROC_NODE_AUTH_KEY "c2a2b5792edd296763fdfc72cff44380" 11 | 12 | // 打印内核调试信息 13 | //#define CONFIG_DEBUG_PRINTK 14 | 15 | #ifndef KERNEL_VERSION 16 | #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) 17 | #endif 18 | #ifndef MY_LINUX_VERSION_CODE 19 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(3,10,0) 20 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(3,10,84) 21 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(3,18,71) 22 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(3,18,140) 23 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,4,21) 24 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,4,78) 25 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,4,153) 26 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,4,192) 27 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,9,112) 28 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,9,186) 29 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,14,83) 30 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,14,117) 31 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,14,141) 32 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,19,81) 33 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(4,19,113) 34 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(5,4,61) 35 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(5,10,43) 36 | #define MY_LINUX_VERSION_CODE KERNEL_VERSION(5,15,41) 37 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(6,1,75) 38 | //#define MY_LINUX_VERSION_CODE KERNEL_VERSION(6,6,30) 39 | #endif 40 | 41 | #ifdef CONFIG_DEBUG_PRINTK 42 | #define printk_debug printk 43 | #else 44 | static inline void printk_debug(char *fmt, ...) {} 45 | #endif 46 | 47 | #endif /* VERSION_CONTROL_H_ */ 48 | -------------------------------------------------------------------------------- /code/api_proxy.h: -------------------------------------------------------------------------------- 1 | #ifndef API_PROXY_H_ 2 | #define API_PROXY_H_ 3 | #include "ver_control.h" 4 | #include "linux_kernel_api.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | //声明 11 | ////////////////////////////////////////////////////////////////////////// 12 | static inline int x_atoi(const char arr[]); 13 | static inline bool x_isdigit(char c); 14 | static inline struct task_struct* x_get_current(void); 15 | static inline void * x_kmalloc(size_t size, gfp_t flags); 16 | static inline unsigned long x_copy_from_user(void *to, const void __user *from, unsigned long n); 17 | static inline unsigned long x_copy_to_user(void __user *to, const void *from, unsigned long n); 18 | 19 | //实现 20 | ////////////////////////////////////////////////////////////////////////// 21 | static inline bool x_isdigit(char c) { return (unsigned)(c - '0') < 10; } 22 | static inline int x_atoi(const char arr[]) { 23 | int index = 0; 24 | int flag = 1; 25 | int num = 0; 26 | 27 | if (arr == NULL) { return -1; } 28 | while (isspace(arr[index])) { index++; } 29 | if (arr[index] == '-') { flag = -1; } 30 | if (arr[index] == '-' || arr[index] == '+') { index++; } 31 | while (arr[index] >= '0' && arr[index] <= '9') { num = num * 10 + arr[index] - '0'; index++; } 32 | return flag * num; 33 | } 34 | 35 | static struct task_struct *x_get_current(void) { 36 | unsigned long sp_el0; 37 | asm ("mrs %0, sp_el0" : "=r" (sp_el0)); 38 | return (struct task_struct *)sp_el0; 39 | } 40 | 41 | static void * x_kmalloc(size_t size, gfp_t flags) { 42 | return __kmalloc(size, flags); 43 | } 44 | 45 | static unsigned long x_copy_from_user(void *to, const void __user *from, unsigned long n) { 46 | return __arch_copy_from_user(to, from, n); 47 | } 48 | 49 | static unsigned long x_copy_to_user(void __user *to, const void *from, unsigned long n) { 50 | return __arch_copy_to_user(to, from, n); 51 | } 52 | #endif /* API_PROXY_H_ */ 53 | 54 | 55 | -------------------------------------------------------------------------------- /code/rwProcMem_module.h: -------------------------------------------------------------------------------- 1 | #ifndef RWPROCMEM_H_ 2 | #define RWPROCMEM_H_ 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include "api_proxy.h" 18 | #include "phy_mem.h" 19 | #include "proc_maps.h" 20 | #include "proc_list.h" 21 | #include "proc_root.h" 22 | #include "proc_rss.h" 23 | #include "proc_cmdline.h" 24 | #include "ver_control.h" 25 | #include "test.h" 26 | #ifdef CONFIG_USE_PROC_FILE_NODE 27 | #include 28 | #include "hide_procfs_dir.h" 29 | #endif 30 | ////////////////////////////////////////////////////////////////// 31 | 32 | enum { 33 | CMD_INIT_DEVICE_INFO = 1, // 初始化设备信息 34 | CMD_OPEN_PROCESS, // 打开进程 35 | CMD_READ_PROCESS_MEMORY, // 读取进程内存 36 | CMD_WRITE_PROCESS_MEMORY, // 写入进程内存 37 | CMD_CLOSE_PROCESS, // 关闭进程 38 | CMD_GET_PROCESS_MAPS_COUNT, // 获取进程的内存块地址数量 39 | CMD_GET_PROCESS_MAPS_LIST, // 获取进程的内存块地址列表 40 | CMD_CHECK_PROCESS_ADDR_PHY, // 检查进程内存是否有物理内存位置 41 | CMD_GET_PID_LIST, // 获取进程PID列表 42 | CMD_SET_PROCESS_ROOT, // 提升进程权限到Root 43 | CMD_GET_PROCESS_RSS, // 获取进程的物理内存占用大小 44 | CMD_GET_PROCESS_CMDLINE_ADDR, // 获取进程cmdline的内存地址 45 | CMD_HIDE_KERNEL_MODULE, // 隐藏驱动 46 | }; 47 | 48 | struct rwProcMemDev { 49 | #ifdef CONFIG_USE_PROC_FILE_NODE 50 | struct proc_dir_entry *proc_parent; 51 | struct proc_dir_entry *proc_entry; 52 | #endif 53 | bool is_hidden_module; 54 | }; 55 | static struct rwProcMemDev *g_rwProcMem_devp; 56 | 57 | static ssize_t rwProcMem_read(struct file* filp, char __user* buf, size_t size, loff_t* ppos); 58 | static const struct proc_ops rwProcMem_proc_ops = { 59 | .proc_read = rwProcMem_read, 60 | }; 61 | 62 | #endif /* RWPROCMEM_H_ */ -------------------------------------------------------------------------------- /code/proc_root.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_ROOT_H_ 2 | #define PROC_ROOT_H_ 3 | #include 4 | #include 5 | #include "proc_root_auto_offset.h" 6 | #include "ver_control.h" 7 | //声明 8 | ////////////////////////////////////////////////////////////////////////// 9 | static inline int set_process_root(struct pid* proc_pid_struct); 10 | 11 | 12 | //实现 13 | ////////////////////////////////////////////////////////////////////////// 14 | static uint64_t get_cap_ability_max(void) { 15 | 16 | #if MY_LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) 17 | uint64_t cap_default = 0x3FFFFFFFFF; 18 | #elif MY_LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0) 19 | uint64_t cap_default = 0xFFFFFFFFFF; 20 | #else 21 | uint64_t cap_default = 0x1FFFFFFFFFF; 22 | #endif 23 | 24 | return cap_default; 25 | } 26 | 27 | static inline int set_process_root(struct pid* proc_pid_struct) { 28 | if (g_init_real_cred_offset_success == false) { 29 | return -ENOENT; 30 | } 31 | 32 | if (g_init_real_cred_offset_success) { 33 | struct task_struct * task = NULL; 34 | struct cred * cred = NULL; 35 | char *pCred = NULL; 36 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 37 | if (!task) { return -1; } 38 | 39 | pCred = (char*)&task->real_cred; 40 | 41 | pCred += g_real_cred_offset; 42 | pCred += sizeof(void*); 43 | cred = (struct cred *)*(size_t*)pCred; 44 | if (cred) { 45 | uint64_t cap = get_cap_ability_max(); 46 | cred->uid = cred->suid = cred->euid = cred->fsuid = GLOBAL_ROOT_UID; 47 | cred->gid = cred->sgid = cred->egid = cred->fsgid = GLOBAL_ROOT_GID; 48 | memcpy(&cred->cap_inheritable, &cap, sizeof(cap)); 49 | memcpy(&cred->cap_permitted, &cap, sizeof(cap)); 50 | memcpy(&cred->cap_effective, &cap, sizeof(cap)); 51 | memcpy(&cred->cap_bset, &cap, sizeof(cap)); 52 | memcpy(&cred->cap_ambient, &cap, sizeof(cap)); 53 | return 0; 54 | } 55 | return -EBADF; 56 | 57 | } 58 | return -ESPIPE; 59 | 60 | } 61 | #endif /* PROC_ROOT_H_ */ 62 | 63 | 64 | -------------------------------------------------------------------------------- /code/proc_list.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_LIST_H_ 2 | #define PROC_LIST_H_ 3 | #include "api_proxy.h" 4 | #include "proc_list_auto_offset.h" 5 | #include "ver_control.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | //声明 14 | ////////////////////////////////////////////////////////////////////////// 15 | static ssize_t get_proc_pid_list(bool is_kernel_buf, char* buf, size_t buf_size); 16 | 17 | 18 | //实现 19 | ////////////////////////////////////////////////////////////////////////// 20 | 21 | static ssize_t get_proc_pid_list(bool is_kernel_buf, 22 | char* buf, 23 | size_t buf_size) { 24 | struct task_struct *p, *next; 25 | ssize_t count = 0; 26 | size_t buf_pos = 0; 27 | 28 | if (!g_init_task_next_offset_success || !g_init_task_pid_offset_success) { 29 | return -EFAULT; 30 | } 31 | 32 | p = &init_task; 33 | while (1) { 34 | uintptr_t list_next = *(uintptr_t *)((char *)p + g_task_next_offset); 35 | next = (struct task_struct *)(list_next - g_task_next_offset); 36 | if (next == &init_task) 37 | break; 38 | 39 | count++; 40 | 41 | { 42 | pid_t pid_v = *(pid_t *)((char *)next + g_task_pid_offset); 43 | int pid_n = pid_v; 44 | printk_debug(KERN_INFO "iter_task: pid = %d\n", pid_n); 45 | if (buf_pos < buf_size) { 46 | if (is_kernel_buf) { 47 | memcpy((void*)((size_t)buf + (size_t)buf_pos), &pid_n, sizeof(pid_n)); 48 | } else { 49 | x_copy_to_user((void*)((size_t)buf + (size_t)buf_pos), &pid_n, sizeof(pid_n)); 50 | } 51 | buf_pos += sizeof(pid_n); 52 | } 53 | } 54 | p = next; 55 | } 56 | 57 | return count; 58 | } 59 | 60 | #endif /* PROC_LIST_H_ */ 61 | 62 | 63 | -------------------------------------------------------------------------------- /README_zh.md: -------------------------------------------------------------------------------- 1 | **其他语言版本: [English](README.md), [中文](README_zh.md).** 2 | # 自动化内核驱动构建工具 3 | 本 GitHub Action 可在云端自动化编译 Android 内核驱动程序,无需本地编译环境。解决访问 Google 源码仓库等问题,编译时间控制在 30min内。 4 | 5 | ## 核心功能 6 | 7 | - ✅ **云端编译** - 无需本地环境配置 8 | - ✅ **自动源码管理** - 从官方仓库获取 Android 内核源码 9 | - ✅ **版本感知构建** - 自动选择正确的构建系统 10 | - ✅ **参数化输入** - 通过工作流参数自定义构建 11 | - ✅ **结果打包** - 下载编译好的驱动和内核镜像 12 | 13 | ## 使用指南 14 | 15 | ### 1. 仓库设置 16 | 1. 在仓库中创建 `code` 目录 17 | 2. 将以下文件放入 `code` 目录: 18 | - 驱动源文件 (`.c` 和 `.h`) 19 | - 驱动的 `Makefile` 20 | - 其他依赖文件 21 | 22 | ### 2. 运行工作流 23 | 1. 转到 GitHub 仓库的 **Actions** 标签页 24 | 2. 选择 **Android Kernel Driver Builder** 25 | 3. 点击 **Run workflow** 26 | 4. 提供以下参数: 27 | - `android_version`: Android 版本(内核) (例如 `14`) 28 | - `kernel_version`: 内核版本 (例如 `6.1`) 29 | - `driver_name`: 驱动文件名 (例如 `mydriver.ko`) 30 | - `target_arch`: 设备架构 (默认 `aarch64`) 31 | 32 | ### 3. 获取结果 33 | 编译成功后 (30 分钟): 34 | 1. 转到完成的工作流运行 35 | 2. 下载 `kernel-driver-<架构>` 产物 36 | 3. 解压后包含: 37 | - 编译好的驱动 (`.ko` 文件) 38 | - 内核镜像 (`boot.img`) 39 | - 构建日志 40 | 41 | ## 配置参考 42 | 43 | ### 输入参数 44 | 45 | | 参数 | 说明 | 示例 | 46 | |------|------|------| 47 | | `android_version` | Android 系统版本 | `11`, `12`, `13`, `14` | 48 | | `kernel_version` | Linux 内核版本 | `5.10`, `5.15`, `6.1` | 49 | | `driver_name` | 驱动文件名 | `custom_driver.ko` | 50 | | `target_arch` | 设备 CPU 架构 | `aarch64`, `x86_64` | 51 | 52 | ### 技术说明 53 | 54 | 1. **构建系统选择**: 55 | - Android 11 及更早版本:使用传统 `build.sh` 系统 56 | - Android 12 及更新版本:使用现代 Bazel 构建系统 57 | 58 | 2. **源码管理**: 59 | - 自动从 Google 仓库获取内核源码 60 | - 使用并行下载加速同步过程 61 | 62 | 3. **驱动集成**: 63 | - 自动将驱动添加到内核构建系统 64 | - 注册驱动为 GKI 模块 65 | - 自动处理 Makefile 修改 66 | 67 | ## 故障排除 68 | 69 | **Q: "repo sync" 步骤失败** 70 | A: 重新运行工作流,Google 服务器偶尔会出现超时 71 | 72 | **Q: 输出产物中找不到驱动文件** 73 | A: 检查: 74 | - `driver_name` 参数是否正确(需与 Makefile 匹配) 75 | - 源文件是否在 `/code` 目录 76 | - Makefile 是否生成预期的 `.ko` 文件 77 | 78 | **Q: 出现 "Kernel configuration not found" 错误** 79 | A: 确认内核版本在 [Android 内核源码](https://android.googlesource.com/kernel/manifest/) 中存在对应分支 80 | 81 | ## 支持 82 | 83 | 问题反馈和功能请求: 84 | - [提交 Issue](https://github.com/systemnb/compile_android_driver/issues) 85 | - 请提供工作流日志和输入参数 86 | -------------------------------------------------------------------------------- /code/proc_root_auto_offset.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_ROOT_AUTO_OFFSET_H_ 2 | #define PROC_ROOT_AUTO_OFFSET_H_ 3 | #include 4 | #include "ver_control.h" 5 | 6 | static ssize_t g_real_cred_offset = 0; 7 | static bool g_init_real_cred_offset_success = false; 8 | 9 | static inline int init_proc_root_offset(const char* my_name) { 10 | const ssize_t offset_lookup_min = -100; 11 | const ssize_t offset_lookup_max = 300; 12 | const ssize_t min_real_cred_offset_limit = offset_lookup_min + sizeof(void*) * 3; 13 | if(g_init_real_cred_offset_success) { 14 | return 0; 15 | } 16 | 17 | g_init_real_cred_offset_success = false; 18 | for (g_real_cred_offset = offset_lookup_min; g_real_cred_offset <= offset_lookup_max; g_real_cred_offset++) { 19 | 20 | char* pcomm = (char*)¤t->real_cred; 21 | pcomm += g_real_cred_offset; 22 | 23 | printk_debug(KERN_EMERG "curent g_real_cred_offset:%zd, bytes:%x\n", g_real_cred_offset, *(unsigned char*)pcomm); 24 | 25 | if(g_real_cred_offset < min_real_cred_offset_limit) { 26 | continue; 27 | } 28 | if (strcmp(pcomm, my_name) == 0) { 29 | ssize_t maybe_real_cred_offset = g_real_cred_offset - sizeof(void*) * 2; 30 | char * p_test_mem1 = (char*)¤t->real_cred + maybe_real_cred_offset; 31 | char * p_test_mem2 = (char*)¤t->real_cred + maybe_real_cred_offset + sizeof(void*); // for get cred *cred; 32 | if(memcmp(p_test_mem1, p_test_mem2, sizeof(void*)) != 0 ) { // becasuse the real_cred is equal the cred 33 | maybe_real_cred_offset = g_real_cred_offset - sizeof(void*) * 3; 34 | p_test_mem1 = (char*)¤t->real_cred + maybe_real_cred_offset; 35 | p_test_mem2 = (char*)¤t->real_cred + maybe_real_cred_offset + sizeof(void*); // for get cred *cred; 36 | if(memcmp(p_test_mem1, p_test_mem2, sizeof(void*)) != 0 ) { // becasuse the real_cred is equal the cred 37 | break; // failed 38 | } 39 | } 40 | g_real_cred_offset = maybe_real_cred_offset; 41 | 42 | printk_debug(KERN_EMERG "strcmp found %zd\n", g_real_cred_offset); 43 | 44 | g_init_real_cred_offset_success = true; 45 | break; 46 | } 47 | 48 | } 49 | 50 | if (!g_init_real_cred_offset_success) { 51 | printk_debug(KERN_INFO "real_cred offset failed\n"); 52 | return -ESPIPE; 53 | } 54 | printk_debug(KERN_INFO "g_real_cred_offset:%zu\n", g_real_cred_offset); 55 | return 0; 56 | } 57 | #endif /* PROC_ROOT_AUTO_OFFSET_H_ */ 58 | 59 | 60 | -------------------------------------------------------------------------------- /code/hide_procfs_dir.h: -------------------------------------------------------------------------------- 1 | #ifndef _HIDE_PROCFS_DIR_H_ 2 | #define _HIDE_PROCFS_DIR_H_ 3 | 4 | #include "ver_control.h" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | static char g_hide_dir_name[256] = {0}; 14 | 15 | static filldir_t old_filldir; 16 | 17 | #if MY_LINUX_VERSION_CODE < KERNEL_VERSION(6,1,0) 18 | static int my_filldir(struct dir_context *buf, 19 | const char *name, 20 | int namelen, 21 | loff_t offset, 22 | u64 ino, 23 | unsigned int d_type) 24 | { 25 | if (namelen == strlen(g_hide_dir_name) && 26 | !strncmp(name, g_hide_dir_name, namelen)) 27 | { 28 | return 0; 29 | } 30 | return old_filldir(buf, name, namelen, offset, ino, d_type); 31 | } 32 | #else 33 | static bool my_filldir(struct dir_context *ctx, 34 | const char *name, 35 | int namelen, 36 | loff_t offset, 37 | u64 ino, 38 | unsigned int d_type) 39 | { 40 | if (namelen == strlen(g_hide_dir_name) && 41 | !strncmp(name, g_hide_dir_name, namelen)) 42 | { 43 | return true; 44 | } 45 | return old_filldir(ctx, name, namelen, offset, ino, d_type); 46 | } 47 | #endif 48 | 49 | static int handler_pre(struct kprobe *kp, struct pt_regs *regs) 50 | { 51 | struct dir_context *ctx = (struct dir_context *)regs->regs[1]; 52 | old_filldir = ctx->actor; 53 | ctx->actor = my_filldir; 54 | return 0; 55 | } 56 | 57 | static struct kprobe kp_hide_procfs_dir = { 58 | .symbol_name = "proc_root_readdir", 59 | .pre_handler = handler_pre, 60 | }; 61 | 62 | static bool start_hide_procfs_dir(const char* hide_dir_name) 63 | { 64 | //这里原理上可以换成SKRoot的汇编写法。避免kprobe。 65 | int ret; 66 | strlcpy(g_hide_dir_name, hide_dir_name, sizeof(g_hide_dir_name)); 67 | ret = register_kprobe(&kp_hide_procfs_dir); 68 | if (ret) { 69 | printk_debug("[hide_procfs_dir] register_kprobe failed: %d\n", ret); 70 | return false; 71 | } 72 | printk_debug("[hide_procfs_dir] kprobe installed, hiding \"%s\"\n", g_hide_dir_name); 73 | return true; 74 | } 75 | 76 | static void stop_hide_procfs_dir(void) 77 | { 78 | unregister_kprobe(&kp_hide_procfs_dir); 79 | printk_debug("[hide_procfs_dir] kprobe removed\n"); 80 | } 81 | 82 | #endif // _HIDE_PROCFS_DIR_H_ 83 | -------------------------------------------------------------------------------- /code/linux_kernel_api.h: -------------------------------------------------------------------------------- 1 | #ifndef LINUX_KERNEL_API_H_ 2 | #define LINUX_KERNEL_API_H_ 3 | #include "ver_control.h" 4 | 5 | #include 6 | 7 | #if MY_LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) 8 | 9 | long probe_kernel_read(void* dst, const void* src, size_t size); 10 | 11 | static long x_probe_kernel_read(void* bounce, const char* ptr, size_t sz) { 12 | return probe_kernel_read(bounce, ptr, sz); 13 | } 14 | 15 | #endif 16 | 17 | #if MY_LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0) 18 | 19 | long copy_from_kernel_nofault(void* dst, const void* src, size_t size); 20 | 21 | static long x_probe_kernel_read(void* bounce, const char* ptr, size_t sz) { 22 | return copy_from_kernel_nofault(bounce, ptr, sz); 23 | } 24 | 25 | #endif 26 | 27 | 28 | #if MY_LINUX_VERSION_CODE < KERNEL_VERSION(6,6,0) 29 | static inline pte_t x_pte_mkwrite(pte_t pte) { 30 | return pte_mkwrite(pte); 31 | } 32 | #else 33 | static inline pte_t x_pte_mkwrite(pte_t pte) { 34 | struct vm_area_struct vma = {.vm_flags = VM_READ}; 35 | return pte_mkwrite(pte, &vma); 36 | } 37 | #endif 38 | 39 | #if MY_LINUX_VERSION_CODE < KERNEL_VERSION(6,6,0) 40 | static size_t x_read_mm_struct_rss(struct mm_struct * mm, ssize_t offset) { 41 | struct mm_rss_stat *rss_stat = (struct mm_rss_stat *)((size_t)&mm->rss_stat + offset); 42 | size_t total_rss; 43 | ssize_t val1, val2, val3; 44 | val1 = atomic_long_read(&rss_stat->count[MM_FILEPAGES]); 45 | val2 = atomic_long_read(&rss_stat->count[MM_ANONPAGES]); 46 | #ifdef MM_SHMEMPAGES 47 | val3 = atomic_long_read(&rss_stat->count[MM_SHMEMPAGES]); 48 | #else 49 | val3 = 0; 50 | #endif 51 | if (val1 < 0) { val1 = 0; } 52 | if (val2 < 0) { val2 = 0; } 53 | if (val3 < 0) { val3 = 0; } 54 | total_rss = val1 + val2 + val3; 55 | return total_rss; 56 | } 57 | #else 58 | static size_t x_read_mm_struct_rss(struct mm_struct * mm, ssize_t offset) { 59 | struct percpu_counter *rss_stat = (struct percpu_counter *)((size_t)&mm->rss_stat + offset); 60 | size_t total_rss; 61 | ssize_t val1, val2, val3; 62 | val1 = percpu_counter_read(&rss_stat[MM_FILEPAGES]); 63 | val2 = percpu_counter_read(&rss_stat[MM_ANONPAGES]); 64 | 65 | #ifdef MM_SHMEMPAGES 66 | val3 = percpu_counter_read(&rss_stat[MM_SHMEMPAGES]); 67 | #else 68 | val3 = 0; 69 | #endif 70 | if (val1 < 0) { val1 = 0; } 71 | if (val2 < 0) { val2 = 0; } 72 | if (val3 < 0) { val3 = 0; } 73 | total_rss = val1 + val2 + val3; 74 | return total_rss; 75 | } 76 | #endif 77 | 78 | #endif /* LINUX_KERNEL_API_H_ */ 79 | -------------------------------------------------------------------------------- /code/proc_cmdline.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_CMDLINE_H_ 2 | #define PROC_CMDLINE_H_ 3 | //声明 4 | ////////////////////////////////////////////////////////////////////////// 5 | #include 6 | #include "ver_control.h" 7 | 8 | static inline struct pid * get_proc_pid_struct(int pid); 9 | static inline int get_proc_pid(struct pid* proc_pid_struct); 10 | static inline void release_proc_pid_struct(struct pid* proc_pid_struct); 11 | static inline int get_proc_cmdline_addr(struct pid* proc_pid_struct, size_t * arg_start, size_t * arg_end); 12 | static inline int get_task_proc_cmdline_addr(struct task_struct *task, size_t * arg_start, size_t * arg_end); 13 | 14 | 15 | //实现 16 | ////////////////////////////////////////////////////////////////////////// 17 | #include "phy_mem.h" 18 | #include "proc_cmdline_auto_offset.h" 19 | #include "api_proxy.h" 20 | 21 | static inline struct pid * get_proc_pid_struct(int pid) { 22 | return find_get_pid(pid); 23 | } 24 | 25 | static inline int get_proc_pid(struct pid* proc_pid_struct) { 26 | return proc_pid_struct->numbers[0].nr; 27 | } 28 | 29 | static inline void release_proc_pid_struct(struct pid* proc_pid_struct) { 30 | put_pid(proc_pid_struct); 31 | } 32 | 33 | static inline int get_proc_cmdline_addr(struct pid* proc_pid_struct, size_t * arg_start, size_t * arg_end) { 34 | int ret = 0; 35 | struct task_struct *task = NULL; 36 | 37 | 38 | if (g_init_arg_start_offset_success == false) { 39 | return -ENOENT; 40 | } 41 | 42 | 43 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 44 | if (!task) { return -EFAULT; } 45 | ret = get_task_proc_cmdline_addr(task, arg_start, arg_end); 46 | return ret; 47 | } 48 | 49 | static inline int get_task_proc_cmdline_addr(struct task_struct *task, size_t * arg_start, size_t * arg_end) { 50 | if (g_init_arg_start_offset_success) { 51 | struct mm_struct *mm; 52 | ssize_t accurate_offset; 53 | mm = get_task_mm(task); 54 | 55 | if (!mm) { return -EFAULT; } 56 | 57 | //精确偏移 58 | accurate_offset = (ssize_t)((size_t)&mm->arg_start - (size_t)mm + g_arg_start_offset); 59 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 60 | mmput(mm); 61 | return -EFAULT; 62 | } 63 | 64 | if (down_read_mmap_lock(mm) != 0) { 65 | mmput(mm); 66 | return -EFAULT; 67 | } 68 | printk_debug(KERN_INFO "accurate_offset:%zd\n", accurate_offset); 69 | 70 | *arg_start = *(size_t*)((size_t)mm + (size_t)accurate_offset); 71 | *arg_end = *(size_t*)((size_t)mm + (size_t)accurate_offset + sizeof(unsigned long)); 72 | 73 | printk_debug(KERN_INFO "arg_start addr:0x%p\n", (void*)*arg_start); 74 | 75 | up_read_mmap_lock(mm); 76 | mmput(mm); 77 | return 0; 78 | } 79 | return -ESPIPE; 80 | } 81 | #endif /* PROC_CMDLINE_H_ */ -------------------------------------------------------------------------------- /code/proc_cmdline_auto_offset.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_CMDLINE_AUTO_OFFSET_H_ 2 | #define PROC_CMDLINE_AUTO_OFFSET_H_ 3 | //声明 4 | ////////////////////////////////////////////////////////////////////////// 5 | #include 6 | #include "ver_control.h" 7 | //实现 8 | ////////////////////////////////////////////////////////////////////////// 9 | #include "api_proxy.h" 10 | static ssize_t g_arg_start_offset = 0; 11 | static bool g_init_arg_start_offset_success = false; 12 | 13 | typedef int(*t_get_task_proc_cmdline_addr)(struct task_struct *task, size_t * arg_start, size_t * arg_end); 14 | 15 | static inline int init_proc_cmdline_offset(const char* my_cmdline, 16 | t_get_task_proc_cmdline_addr o_get_task_proc_cmdline_addr) { 17 | 18 | int is_found_cmdline_offset = 0; 19 | size_t size = 4096; 20 | char *new_cmd_line_buf = NULL; 21 | struct task_struct * mytask = x_get_current(); 22 | 23 | if(g_init_arg_start_offset_success) { 24 | return 0; 25 | } 26 | 27 | new_cmd_line_buf = (char*)kmalloc(size, GFP_KERNEL); 28 | g_init_arg_start_offset_success = true; 29 | for (g_arg_start_offset = -64; g_arg_start_offset <= 64; g_arg_start_offset += 1) { 30 | size_t arg_start = 0, arg_end = 0; 31 | printk_debug(KERN_INFO "get_task_proc_cmdline_addr g_arg_start_offset %zd\n", g_arg_start_offset); 32 | if (o_get_task_proc_cmdline_addr(mytask, &arg_start, &arg_end) == 0) { 33 | printk_debug(KERN_INFO "get_task_proc_cmdline_addr arg_start %p\n", (void*)arg_start); 34 | 35 | if (arg_start > 0) { 36 | 37 | size_t read_size = 0; 38 | 39 | memset(new_cmd_line_buf, 0, size); 40 | 41 | while (read_size < size) { 42 | size_t phy_addr; 43 | size_t pfn_sz; 44 | char *lpOutBuf; 45 | 46 | pte_t *pte; 47 | phy_addr = get_task_proc_phy_addr(mytask, arg_start + read_size, (pte_t*)&pte); 48 | printk_debug(KERN_INFO "phy_addr:0x%zx\n", phy_addr); 49 | if (phy_addr == 0) { 50 | break; 51 | } 52 | 53 | pfn_sz = size_inside_page(phy_addr, ((size - read_size) > PAGE_SIZE) ? PAGE_SIZE : (size - read_size)); 54 | printk_debug(KERN_INFO "pfn_sz:%zu\n", pfn_sz); 55 | 56 | lpOutBuf = (char*)(new_cmd_line_buf + read_size); 57 | read_ram_physical_addr(true, phy_addr, lpOutBuf, pfn_sz); 58 | read_size += pfn_sz; 59 | } 60 | 61 | printk_debug(KERN_INFO "new_cmd_line_buf:%s, len:%ld\n", new_cmd_line_buf, strlen(new_cmd_line_buf)); 62 | 63 | if (strcmp(new_cmd_line_buf, my_cmdline) == 0) { 64 | is_found_cmdline_offset = 1; 65 | break; 66 | } 67 | 68 | 69 | } 70 | } 71 | } 72 | 73 | kfree(new_cmd_line_buf); 74 | 75 | if (!is_found_cmdline_offset) { 76 | g_init_arg_start_offset_success = false; 77 | printk_debug(KERN_INFO "find cmdline offset failed\n"); 78 | return -ESPIPE; 79 | } 80 | printk_debug(KERN_INFO "g_arg_start_offset:%zu\n", g_arg_start_offset); 81 | return 0; 82 | } 83 | #endif /* PROC_CMDLINE_AUTO_OFFSET_H_ */ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **Read this in other languages: [English](README.md), [中文](README_zh.md).** 2 | # Automated Kernel Driver Builder 3 | 4 | This GitHub Action automates the process of building Android kernel drivers in the cloud, eliminating the need for local compilation environments. It solves common issues like accessing Google's source repositories and reduces compilation time to under 30min. 5 | 6 | ## Key Features 7 | 8 | - ✅ **Cloud-based compilation** - No local setup required 9 | - ✅ **Automatic source handling** - Fetches official Android kernel sources 10 | - ✅ **Version-aware building** - Automatically selects correct build system 11 | - ✅ **Parameterized inputs** - Customize builds via workflow inputs 12 | - ✅ **Artifact packaging** - Downloads compiled drivers and kernel images 13 | 14 | ## Usage Guide 15 | 16 | ### 1. Repository Setup 17 | 1. Create a `code` directory in your repository 18 | 2. Place these files in the `code` directory: 19 | - Driver source files (`.c` and `.h`) 20 | - `Makefile` for your driver 21 | - Any additional dependencies 22 | 23 | ### 2. Running the Workflow 24 | 1. Go to your GitHub repository's **Actions** tab 25 | 2. Select **Android Kernel Driver Builder** 26 | 3. Click **Run workflow** 27 | 4. Provide these parameters: 28 | - `android_version`: Your Android version (Kernel) (e.g., `14`) 29 | - `kernel_version`: Kernel version (e.g., `6.1`) 30 | - `driver_name`: Your driver filename (e.g., `mydriver.ko`) 31 | - `target_arch`: Device architecture (default: `aarch64`) 32 | 33 | ### 3. Retrieving Results 34 | After successful compilation (30minutes): 35 | 1. Go to the completed workflow run 36 | 2. Download the `kernel-driver-` artifact 37 | 3. Extract to find: 38 | - Compiled driver (`.ko` file) 39 | - Kernel images (`boot.img`) 40 | - Build logs 41 | 42 | ## Configuration Reference 43 | 44 | ### Input Parameters 45 | 46 | | Parameter | Description | Example | 47 | |-----------|-------------|---------| 48 | | `android_version` | Android OS version | `11`, `12`, `13`, `14` | 49 | | `kernel_version` | Linux kernel version | `5.10`, `5.15`, `6.1` | 50 | | `driver_name` | Output driver filename | `custom_driver.ko` | 51 | | `target_arch` | Device CPU architecture | `aarch64`, `x86_64` | 52 | 53 | ### Technical Notes 54 | 55 | 1. **Build System Selection**: 56 | - Android 11 and earlier: Legacy `build.sh` system 57 | - Android 12 and later: Modern Bazel build system 58 | 59 | 2. **Source Management**: 60 | - Automatically fetches kernel sources from Google's repositories 61 | - Uses parallel downloading for faster sync 62 | 63 | 3. **Driver Integration**: 64 | - Automatically adds driver to kernel build system 65 | - Registers driver as GKI module 66 | - Handles Makefile modifications 67 | 68 | ## Troubleshooting 69 | 70 | **Q: Build fails with "repo sync" errors** 71 | A: Retry the workflow. Google's servers can occasionally timeout. 72 | 73 | **Q: Driver not found in output artifacts** 74 | A: Verify: 75 | - Correct `driver_name` parameter (must match Makefile) 76 | - Source files are in `/code` directory 77 | - Makefile produces expected `.ko` filename 78 | 79 | **Q: "Kernel configuration not found" error** 80 | A: Confirm your kernel_version matches existing branches at [Android Kernel Sources](https://android.googlesource.com/kernel/manifest/) 81 | 82 | ## Support 83 | 84 | For issues and feature requests: 85 | - [Open an Issue](https://github.com/systemnb/compile_android_driver/issues) 86 | - Provide workflow logs and input parameters 87 | -------------------------------------------------------------------------------- /code/proc_list_auto_offset.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_LIST_AUTO_OFFSET_H_ 2 | #define PROC_LIST_AUTO_OFFSET_H_ 3 | #include 4 | #include "ver_control.h" 5 | 6 | static ssize_t g_task_next_offset = 0; 7 | static bool g_init_task_next_offset_success = false; 8 | 9 | static ssize_t g_task_pid_offset = 0; 10 | static bool g_init_task_pid_offset_success = false; 11 | 12 | static inline int init_task_next_offset(void) { 13 | struct task_struct * mytask = x_get_current(); 14 | uintptr_t addr_mytask = (uintptr_t)mytask; 15 | uintptr_t addr_mm = (uintptr_t)get_task_mm(mytask); 16 | size_t off_mm = 0; 17 | size_t off = 0; 18 | if(g_init_task_next_offset_success) { 19 | return 0; 20 | } 21 | 22 | for (off = 0; off <= sizeof(struct task_struct) - sizeof(void*); off+=4) { 23 | void *v = *(void **)(addr_mytask + off); 24 | if ((uintptr_t)v == addr_mm) { 25 | off_mm = off; 26 | break; 27 | } 28 | } 29 | 30 | if(off_mm == 0) { 31 | printk_debug(KERN_EMERG "init_task_next_offset mm_struct failed.\n"); 32 | return -EFAULT; 33 | } 34 | printk_debug(KERN_EMERG "init_task_next_offset mm_struct found:%zu.\n", off_mm); 35 | g_task_next_offset = off_mm - sizeof(mytask->pushable_dl_tasks) - sizeof(mytask->pushable_tasks) - sizeof(mytask->tasks); 36 | g_init_task_next_offset_success = true; 37 | printk_debug(KERN_INFO "init_task_next_offset: found tasks offset = %zu bytes\n", g_task_next_offset); 38 | return 0; 39 | } 40 | 41 | static inline int init_task_pid_offset(int pid, int tgid) { 42 | struct task_struct *mytask = x_get_current(); 43 | uintptr_t addr_mytask = (uintptr_t)mytask; 44 | uintptr_t addr_mm = (uintptr_t)get_task_mm(mytask); 45 | size_t off_mm = 0; 46 | size_t off = 0; 47 | 48 | size_t off_pid_static = (uintptr_t)&mytask->pid - addr_mytask; 49 | printk_debug(KERN_INFO 50 | "init_task_pid_offset: mytask@%p, &pid@%p, static off_pid = %zu, sizeof(%zu, %zu)\n", 51 | mytask, &mytask->pid, off_pid_static, sizeof(mytask->pid), sizeof(mytask->tgid)); 52 | 53 | if (g_init_task_pid_offset_success) { 54 | return 0; 55 | } 56 | 57 | for (off = 0; off <= sizeof(struct task_struct) - sizeof(void*); off+=4) { 58 | void *v = *(void **)(addr_mytask + off); 59 | if ((uintptr_t)v == addr_mm) { 60 | off_mm = off; 61 | break; 62 | } 63 | } 64 | 65 | if (off_mm == 0) { 66 | printk_debug(KERN_EMERG "init_task_pid_offset: mm_struct offset not found\n"); 67 | return -EFAULT; 68 | } 69 | printk_debug(KERN_INFO "init_task_pid_offset: mm_struct offset found = %zu\n", off_mm); 70 | 71 | for (off = off_mm; off <= sizeof(struct task_struct) - 2 * sizeof(pid_t); off += 4) { 72 | pid_t pid_v = *(pid_t *)(addr_mytask + off); 73 | pid_t tgid_v = *(pid_t *)(addr_mytask + off + sizeof(pid_t)); 74 | if (pid_v == pid && tgid_v == tgid) { 75 | g_task_pid_offset = off; 76 | g_init_task_pid_offset_success = true; 77 | printk_debug(KERN_INFO 78 | "init_task_pid_offset: found pid/tgid offset = %zu (pid=%d, tgid=%d)\n", 79 | g_task_pid_offset, pid, tgid); 80 | return 0; 81 | } 82 | } 83 | 84 | printk_debug(KERN_EMERG 85 | "init_task_pid_offset: failed to match pid=%d, tgid=%d\n", 86 | pid, tgid); 87 | return -ENOENT; 88 | } 89 | 90 | #endif /* PROC_LIST_AUTO_OFFSET_H_ */ 91 | 92 | 93 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Android Kernel Driver Builder 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | android_version: 7 | description: 'Android Version (Kernel) (e.g., 14)' 8 | required: true 9 | kernel_version: 10 | description: 'Kernel Version (e.g., 6.1)' 11 | required: true 12 | driver_name: 13 | description: 'Driver Module Name (e.g., mydriver.ko)' 14 | required: true 15 | target_arch: 16 | description: 'Target Architecture (aarch64, x86_64, etc.)' 17 | required: true 18 | default: 'aarch64' 19 | 20 | jobs: 21 | build: 22 | runs-on: ubuntu-latest 23 | 24 | steps: 25 | - name: Checkout repository 26 | uses: actions/checkout@v4.2.2 27 | 28 | - name: Prepare kerneldriver directory 29 | run: | 30 | mkdir kerneldriver 31 | mv ./code/*.h ./code/*.c ./code/Makefile kerneldriver/ 32 | 33 | - name: Install repo tool 34 | run: | 35 | sudo curl -L https://storage.googleapis.com/git-repo-downloads/repo -o /usr/local/bin/repo 36 | sudo chmod a+x /usr/local/bin/repo 37 | 38 | - name: Set up Android Kernel source 39 | run: | 40 | mkdir -p android-kernel && cd android-kernel 41 | repo init -u https://android.googlesource.com/kernel/manifest -b common-android${{ github.event.inputs.android_version }}-${{ github.event.inputs.kernel_version }} 42 | repo sync -j$(nproc) -c --no-tags --optimized-fetch --force-sync 43 | 44 | - name: Copy kerneldriver 45 | run: | 46 | cd android-kernel 47 | cp -r ../kerneldriver common/drivers 48 | 49 | - name: Modify Makefile 50 | run: | 51 | cd android-kernel 52 | echo "obj-y += kerneldriver/" >> common/drivers/Makefile 53 | 54 | - name: Add module to GKI modules list 55 | if: ${{ github.event.inputs.android_version > 13 }} 56 | run: | 57 | cd android-kernel 58 | MODULE_NAME="drivers/kerneldriver/${{ github.event.inputs.driver_name }}" 59 | 60 | awk -i inplace -v module="$MODULE_NAME" ' 61 | BEGIN { added=0 } 62 | /_COMMON_GKI_MODULES_LIST = \[/ { in_list=1 } 63 | in_list && /\]/ { 64 | if (!added) { 65 | print " \"" module "\"," 66 | added=1 67 | } 68 | in_list=0 69 | } 70 | in_list && !added { 71 | if (module < $0) { 72 | print " \"" module "\"," 73 | added=1 74 | } 75 | } 76 | { print } 77 | ' common/modules.bzl 78 | 79 | - name: Prepare module list for legacy builds 80 | if: ${{ github.event.inputs.android_version <= 13 }} 81 | run: | 82 | cd android-kernel 83 | echo "drivers/kerneldriver/${{ github.event.inputs.driver_name }}" >> common/android/gki_${{ github.event.inputs.target_arch }}_modules 84 | - name: Increase stack frame size limit 85 | run: | 86 | cd android-kernel 87 | find . -type f -name "Makefile*" -exec sed -i 's/-Wframe-larger-than=[0-9]*/-Wframe-larger-than=4096/g' {} + 88 | grep -q "FRAME_WARN" common/Makefile || echo 'KBUILD_CFLAGS += -Wframe-larger-than=4096' >> common/Makefile 89 | 90 | - name: Install dependencies 91 | run: | 92 | sudo apt-get update 93 | sudo apt-get install -y build-essential flex bison libssl-dev libelf-dev bc python-is-python3 94 | - name: Fix missing kprobe symbols for Android 13 95 | if: ${{ github.event.inputs.android_version == 13 }} 96 | run: | 97 | cd android-kernel 98 | for symbol in register_kprobe unregister_kprobe; do 99 | grep -q "$symbol" common/android/abi_gki_${{ github.event.inputs.target_arch }} || \ 100 | echo "$symbol" >> common/android/abi_gki_${{ github.event.inputs.target_arch }} 101 | done 102 | sed -i '/EXPORT_SYMBOL(unregister_kprobe);/a EXPORT_SYMBOL(register_kprobe);' common/kernel/kprobes.c 103 | 104 | - name: Build kernel module 105 | run: | 106 | cd android-kernel 107 | if [ ${{ github.event.inputs.android_version }} -le 13 ]; then 108 | BUILD_CONFIG=common/build.config.gki.${{ github.event.inputs.target_arch }} LTO=thin build/build.sh -j32 109 | OUTPUT_PATH="out/android${{ github.event.inputs.android_version }}-${{ github.event.inputs.kernel_version }}/dist" 110 | else 111 | echo "Using Bazel build system for Android ${{ github.event.inputs.android_version }}" 112 | tools/bazel run //common:kernel_${{ github.event.inputs.target_arch }}_dist 113 | OUTPUT_PATH="out/kernel_${{ github.event.inputs.target_arch }}" 114 | fi 115 | echo "OUTPUT_PATH=$OUTPUT_PATH" >> $GITHUB_ENV 116 | continue-on-error: true 117 | 118 | - name: Upload artifacts 119 | uses: actions/upload-artifact@v4.6.2 120 | with: 121 | name: kernel-driver-${{ github.event.inputs.target_arch }} 122 | path: | 123 | android-kernel/${{ env.OUTPUT_PATH }} 124 | -------------------------------------------------------------------------------- /code/phy_mem_auto_offset.h: -------------------------------------------------------------------------------- 1 | #ifndef PHY_MEM_AUTO_OFFSET_H_ 2 | #define PHY_MEM_AUTO_OFFSET_H_ 3 | #include "api_proxy.h" 4 | #include "ver_control.h" 5 | 6 | #undef pgd_offset 7 | #if MY_LINUX_VERSION_CODE <= KERNEL_VERSION(3,10,84) 8 | #define my_pgd_offset(pgd, addr) (pgd+pgd_index(addr)) 9 | #define my_pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) 10 | #endif 11 | #if MY_LINUX_VERSION_CODE < KERNEL_VERSION(5,10,43) 12 | #define my_pgd_offset(pgd, addr) (pgd+pgd_index(addr)) 13 | #define my_pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) 14 | #endif 15 | #if MY_LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,43) 16 | #define my_pgd_offset(pgd, address) pgd_offset_pgd(pgd, address) 17 | #endif 18 | 19 | #define my_get_fs() (current_thread_info()->addr_limit) 20 | 21 | static size_t g_phy_total_memory_size = 0; // 物理内存总大小 22 | static int init_phy_total_memory_size(void) { 23 | struct sysinfo si; 24 | unsigned long mem_total, sav_total; 25 | unsigned int bitcount = 0; 26 | unsigned int mem_unit = 0; 27 | if (g_phy_total_memory_size) { 28 | return 0; 29 | } 30 | 31 | si_meminfo(&si); 32 | mem_unit = si.mem_unit; 33 | 34 | mem_total = si.totalram; 35 | while (mem_unit > 1) { 36 | bitcount++; 37 | mem_unit >>= 1; 38 | sav_total = mem_total; 39 | mem_total <<= 1; 40 | if (mem_total < sav_total) { 41 | return 0; 42 | } 43 | } 44 | si.totalram <<= bitcount; 45 | g_phy_total_memory_size = __pa(si.totalram); 46 | printk_debug(KERN_INFO "MemTotal si.totalram:%ld\n", si.totalram); 47 | printk_debug(KERN_INFO "g_phy_total_memory_size:%ld\n", g_phy_total_memory_size); 48 | return 0; 49 | } 50 | 51 | static ssize_t g_pgd_offset_mm_struct = 0; 52 | static bool g_init_pgd_offset_success = false; 53 | 54 | #if MY_LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,75) 55 | 56 | static int init_pgd_offset(struct mm_struct *mm) { 57 | int is_found_pgd_offset = 0; 58 | g_init_pgd_offset_success = false; 59 | for (g_pgd_offset_mm_struct = -40; g_pgd_offset_mm_struct <= 80; g_pgd_offset_mm_struct += 1) { 60 | char *rp; 61 | size_t val; 62 | ssize_t accurate_offset = (ssize_t)((size_t)&mm->pgd - (size_t)mm + g_pgd_offset_mm_struct); 63 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 64 | return -EFAULT; 65 | } 66 | rp = (char*)((size_t)mm + (size_t)accurate_offset); 67 | val = *(size_t*)(rp); 68 | printk_debug(KERN_EMERG "init_pgd_offset %zd:%zd:%p:%ld\n", g_pgd_offset_mm_struct, accurate_offset, rp, val); 69 | 70 | if (val == TASK_SIZE) { 71 | g_pgd_offset_mm_struct += sizeof(unsigned long); 72 | printk_debug(KERN_EMERG "found g_init_pgd_offset_success:%zd\n", g_pgd_offset_mm_struct); 73 | is_found_pgd_offset = 1; 74 | break; 75 | } 76 | } 77 | if (!is_found_pgd_offset) { 78 | printk_debug(KERN_INFO "find pgd offset failed\n"); 79 | return -ESPIPE; 80 | } 81 | g_init_pgd_offset_success = true; 82 | printk_debug(KERN_INFO "g_pgd_offset_mm_struct:%zu\n", g_pgd_offset_mm_struct); 83 | return 0; 84 | } 85 | #else 86 | static int init_pgd_offset(struct mm_struct *mm) { 87 | int is_found_pgd_offset = 0; 88 | g_init_pgd_offset_success = false; 89 | for (g_pgd_offset_mm_struct = -40; g_pgd_offset_mm_struct <= 80; g_pgd_offset_mm_struct += 1) { 90 | char *rp; 91 | size_t val; 92 | ssize_t accurate_offset = (ssize_t)((size_t)&mm->pgd - (size_t)mm + g_pgd_offset_mm_struct); 93 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 94 | return -EFAULT; 95 | } 96 | rp = (char*)((size_t)mm + (size_t)accurate_offset); 97 | val = *(size_t*)(rp); 98 | printk_debug(KERN_EMERG "init_pgd_offset %zd:%zd:%p:%ld\n", g_pgd_offset_mm_struct, accurate_offset, rp, val); 99 | 100 | if (val == TASK_SIZE) { 101 | g_pgd_offset_mm_struct += sizeof(unsigned long); 102 | g_pgd_offset_mm_struct += sizeof(unsigned long); 103 | printk_debug(KERN_EMERG "found g_init_pgd_offset_success:%zd\n", g_pgd_offset_mm_struct); 104 | is_found_pgd_offset = 1; 105 | break; 106 | } 107 | } 108 | if (!is_found_pgd_offset) { 109 | printk_debug(KERN_INFO "find pgd offset failed\n"); 110 | return -ESPIPE; 111 | } 112 | g_init_pgd_offset_success = true; 113 | printk_debug(KERN_INFO "g_pgd_offset_mm_struct:%zu\n", g_pgd_offset_mm_struct); 114 | return 0; 115 | } 116 | #endif 117 | 118 | static inline pgd_t *x_pgd_offset(struct mm_struct *mm, size_t addr) { 119 | size_t pgd; 120 | ssize_t accurate_offset; 121 | if (g_init_pgd_offset_success == false) { 122 | if (init_pgd_offset(mm) != 0) { 123 | return NULL; 124 | } 125 | } 126 | accurate_offset = (ssize_t)((size_t)&mm->pgd - (size_t)mm + g_pgd_offset_mm_struct); 127 | printk_debug(KERN_INFO "x_pgd_offset accurate_offset:%zd\n", accurate_offset); 128 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 129 | return NULL; 130 | } 131 | 132 | //拷贝到我自己的pgd指针变量里去 133 | //写法一(可读性强) 134 | //void * rv = (size_t*)((size_t)mm + (size_t)accurate_offset); 135 | //pgd_t *pgd; 136 | //memcpy(&pgd, rv, sizeof(pgd_t *)); 137 | 138 | //写法二(快些) 139 | pgd = *(size_t*)((size_t)mm + (size_t)accurate_offset); 140 | 141 | return my_pgd_offset((pgd_t*)pgd, addr); 142 | } 143 | 144 | #endif /* PHY_MEM_AUTO_OFFSET_H_ */ -------------------------------------------------------------------------------- /code/test.h: -------------------------------------------------------------------------------- 1 | #ifndef TEST_H_ 2 | #define TEST_H_ 3 | #include "phy_mem.h" 4 | #include "proc_maps.h" 5 | #include "proc_list.h" 6 | #include "proc_cmdline.h" 7 | #include "proc_rss.h" 8 | #include "ver_control.h" 9 | 10 | // 11 | //static void test1(void) { 12 | // size_t phy_addr; 13 | // struct file * pFile = open_pagemap(14861); 14 | // printk(KERN_INFO "open_pagemap %d\n", pFile); 15 | // if (pFile) { 16 | // phy_addr = get_pagemap_phy_addr(pFile, 0x10106000); 17 | // printk(KERN_INFO "pagemap phy_addr 0x%llx\n", phy_addr); 18 | // 19 | // char buf[4]; 20 | // size_t ret; 21 | // memset(buf, 0, 4); 22 | // read_ram_physical_addr(true, &ret, phy_addr, buf, 4); 23 | // if (ret) { 24 | // int i; 25 | // for (i = 0; i < 4; i++) { 26 | // printk(KERN_INFO "[%d]0x%x ", i, buf[i]); 27 | // } 28 | // } 29 | // close_pagemap(pFile); 30 | // } 31 | //} 32 | // 33 | ///* 34 | //static void test2(void) 35 | //{ 36 | // struct pid * proc_pid_struct = get_proc_pid_struct(14861); 37 | // int map_count = get_proc_map_count(proc_pid_struct); 38 | // printk(KERN_INFO "map_count:%d\n", map_count); 39 | // 40 | // char test[8 + 8 + 4 + 4096] = { 0 }; 41 | // get_proc_maps_list(proc_pid_struct, 4096, &test,sizeof(test), true, NULL); 42 | // 43 | // printk("start:0x%lx,end:0x%lx,flags:%x%x%x%x,name:%s\n", 44 | // *(unsigned long*)&test[0], 45 | // *(unsigned long*)&test[8], 46 | // *(unsigned char*)&test[16], 47 | // *(unsigned char*)&test[17], 48 | // *(unsigned char*)&test[18], 49 | // *(unsigned char*)&test[19], 50 | // &test[20]); 51 | // 52 | // 53 | // release_proc_pid_struct(proc_pid_struct); 54 | // 55 | //} 56 | //*/ 57 | // 58 | //static void test3(void) { 59 | // struct pid * proc_pid_struct = get_proc_pid_struct(14861); 60 | // printk(KERN_INFO "test3 get_proc_pid_struct:%ld\n", proc_pid_struct); 61 | // if (proc_pid_struct) { 62 | // size_t phy_addr = 0; 63 | // pte_t *pte; 64 | // get_proc_phy_addr(&phy_addr, proc_pid_struct, 0x10106000, &pte); 65 | // printk(KERN_INFO "calc phy_addr:0x%llx\n", phy_addr); 66 | // 67 | // release_proc_pid_struct(proc_pid_struct); 68 | // } 69 | // 70 | //} 71 | // 72 | // 73 | //static void test4(void) { 74 | // struct pid * proc_pid_struct = get_proc_pid_struct(23948); 75 | // printk(KERN_INFO "test4 get_proc_pid_struct:%ld\n", proc_pid_struct); 76 | // if (proc_pid_struct) { 77 | // size_t arg_start = 0, arg_end = 0; 78 | // int res = get_proc_cmdline_addr(proc_pid_struct, -8, &arg_start, &arg_end); 79 | // printk(KERN_INFO "test4 get_proc_cmdline_addr arg_start:0x%llx arg_end:0x%llx\n", arg_start, arg_end); 80 | // 81 | // 82 | // int i = -32; 83 | // for (; i <= 32; i += 4) { 84 | // size_t accurate_offset = get_proc_cmdline_maybe_addr(proc_pid_struct, i, &arg_start); 85 | // 86 | // 87 | // size_t phy_addr = get_proc_phy_addr(proc_pid_struct, arg_start); 88 | // char name[100] = { 0 }; 89 | // 90 | // if (phy_addr) { 91 | // read_ram_physical_addr_to_kernel(phy_addr, &name, sizeof(name)); 92 | // } 93 | // printk(KERN_INFO "test4 get_proc_cmdline_maybe_addr arg_start:0x%llx 0x%llx %s\n", arg_start, phy_addr, name); 94 | // } 95 | // 96 | // 97 | // release_proc_pid_struct(proc_pid_struct); 98 | // } 99 | // 100 | //} 101 | // 102 | // 103 | //static void test5(void) { 104 | // int *pid = x_kmalloc(sizeof(int) * 100, GFP_KERNEL); 105 | // int i = 0; 106 | // int count = get_proc_pid_list(true, (char*)pid, sizeof(int) * 100); 107 | // printk(KERN_INFO "test5 count:%d\n", count); 108 | // 109 | // for (i = 0; i < 100; i++) { 110 | // if (!!pid[i]) { 111 | // printk(KERN_INFO "test5 pid[%d]:%d\n", i, pid[i]); 112 | // } 113 | // } 114 | // 115 | // 116 | // kfree(pid); 117 | // 118 | //} 119 | // 120 | //static void test6(void) { 121 | // int ret = 0; 122 | // struct pid * proc_pid_struct = get_proc_pid_struct(17597); 123 | // 124 | // printk(KERN_INFO "test6 get_proc_pid_struct:%ld\n", proc_pid_struct); 125 | // 126 | // ret = set_process_root(proc_pid_struct); 127 | // 128 | // printk(KERN_INFO "test6 ret:%d\n", ret); 129 | // 130 | // release_proc_pid_struct(proc_pid_struct); 131 | // 132 | //} 133 | // 134 | //static void test7(void) { 135 | // struct pid * proc_pid_struct = get_proc_pid_struct(11533); 136 | // size_t ret = read_proc_rss_size(proc_pid_struct); 137 | // 138 | // printk(KERN_INFO "test7 get_proc_pid_struct:%ld, ret:%zu\n", proc_pid_struct, ret); 139 | // 140 | // release_proc_pid_struct(proc_pid_struct); 141 | // 142 | //} 143 | // 144 | //static void test8(void) { 145 | // struct pid * proc_pid_struct = get_proc_pid_struct(17597); 146 | // printk(KERN_INFO "test8 get_proc_pid_struct:%ld\n", proc_pid_struct); 147 | // if (proc_pid_struct) { 148 | // size_t arg_start = 0, arg_end = 0; 149 | // int res = get_proc_cmdline_addr(proc_pid_struct, &arg_start, &arg_end); 150 | // printk(KERN_INFO "test8 get_proc_cmdline_addr arg_start:0x%llx arg_end:0x%llx\n", arg_start, arg_end); 151 | // release_proc_pid_struct(proc_pid_struct); 152 | // } 153 | // 154 | //} 155 | // 156 | //static void test9(void) { 157 | // struct pid * proc_pid_struct = get_proc_pid_struct(14861); 158 | // printk(KERN_INFO "test9 get_proc_pid_struct:%ld\n", proc_pid_struct); 159 | // if (proc_pid_struct) { 160 | // 161 | // //set_process_root(proc_pid_struct); 162 | // 163 | // release_proc_pid_struct(proc_pid_struct); 164 | // } 165 | // 166 | //} 167 | 168 | #endif /* TEST_H_ */ -------------------------------------------------------------------------------- /code/phy_mem.h: -------------------------------------------------------------------------------- 1 | #ifndef PHY_MEM_H_ 2 | #define PHY_MEM_H_ 3 | //声明 4 | ////////////////////////////////////////////////////////////////////////// 5 | #include 6 | #include 7 | #include 8 | #include "phy_mem_auto_offset.h" 9 | #include "api_proxy.h" 10 | #include "ver_control.h" 11 | 12 | static inline int is_pte_can_read(pte_t* pte); 13 | static inline int is_pte_can_write(pte_t* pte); 14 | static inline int is_pte_can_exec(pte_t* pte); 15 | static inline int change_pte_read_status(pte_t* pte, bool can_read); 16 | static inline int change_pte_write_status(pte_t* pte, bool can_write); 17 | static inline int change_pte_exec_status(pte_t* pte, bool can_exec); 18 | 19 | static inline size_t get_task_proc_phy_addr(struct task_struct* task, size_t virt_addr, pte_t* out_pte); 20 | static inline size_t get_proc_phy_addr(struct pid* proc_pid_struct, size_t virt_addr, pte_t* out_pte); 21 | static inline size_t read_ram_physical_addr(bool is_kernel_buf, size_t phy_addr, char* lpBuf, size_t read_size); 22 | static inline size_t write_ram_physical_addr(size_t phy_addr, char* lpBuf, bool is_kernel_buf, size_t write_size); 23 | 24 | //实现 25 | ////////////////////////////////////////////////////////////////////////// 26 | #include 27 | #include 28 | #include 29 | 30 | #if MY_LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,83) 31 | #include 32 | #include 33 | #endif 34 | 35 | 36 | #define RETURN_VALUE(size_t_ptr___out_ret, size_t___value) *size_t_ptr___out_ret=size_t___value;break; 37 | 38 | #include 39 | 40 | static inline size_t get_task_proc_phy_addr(struct task_struct* task, size_t virt_addr, pte_t *out_pte) { 41 | struct mm_struct *mm; 42 | pgd_t *pgd; 43 | p4d_t *p4d; 44 | pud_t *pud; 45 | pmd_t *pmd; 46 | pte_t *pte; 47 | unsigned long paddr = 0; 48 | unsigned long page_addr = 0; 49 | unsigned long page_offset = 0; 50 | *(size_t*)out_pte = 0; 51 | 52 | if (!task) { 53 | return 0; 54 | } 55 | mm = get_task_mm(task); 56 | if (!mm) { 57 | return 0; 58 | } 59 | pgd = x_pgd_offset(mm, virt_addr); 60 | if (pgd == NULL) { 61 | printk_debug("pgd is null\n"); 62 | goto out; 63 | } 64 | //printk_debug("pgd_val = 0x%lx pgd addr:0x%p\n", (unsigned long int)pgd_val(*pgd), (void*)pgd); 65 | //printk_debug("init_mm pgd val:0x%lx,pgd addr:0x%p\n", (unsigned long)pgd_val(*(mm->pgd)), (void*)mm->pgd); 66 | printk_debug("pgd_index = %zu\n", pgd_index(virt_addr)); 67 | if (pgd_none(*pgd)) { 68 | printk_debug("not mapped in pgd\n"); 69 | goto out; 70 | } 71 | printk_debug("pgd_offset ok\n"); 72 | 73 | /* 74 | * (p4ds are folded into pgds so this doesn't get actually called, 75 | * but the define is needed for a generic inline function.) 76 | */ 77 | p4d = p4d_offset(pgd, virt_addr); 78 | //printk_debug("p4d_val = 0x%llx, p4d_index = %d\n", p4d_val(*p4d), p4d_index(virt_addr)); 79 | printk_debug("p4d_val = 0x%llx\n", p4d_val(*p4d)); 80 | if (p4d_none(*p4d)) 81 | { 82 | printk_debug("not mapped in p4d\n"); 83 | goto out; 84 | } 85 | 86 | pud = pud_offset(p4d, virt_addr); 87 | printk_debug("pud_val = 0x%llx \n", pud_val(*pud)); 88 | if (pud_none(*pud)) { 89 | printk_debug("not mapped in pud\n"); 90 | goto out; 91 | } 92 | printk_debug("pud_offset ok\n"); 93 | 94 | pmd = pmd_offset(pud, virt_addr); 95 | printk_debug("pmd_val = 0x%llx\n", pmd_val(*pmd)); 96 | //printk_debug("pmd_index = %d\n", pmd_index(virt_addr)); 97 | if (pmd_none(*pmd)) { 98 | printk_debug("not mapped in pmd\n"); 99 | goto out; 100 | } 101 | printk_debug("pmd_offset ok\n"); 102 | 103 | pte = pte_offset_kernel(pmd, virt_addr); 104 | printk_debug("pte_val = 0x%llx\n", pte_val(*pte)); 105 | //printk_debug("pte_index = %d\n", pte_index(virt_addr)); 106 | if (pte_none(*pte)) { 107 | printk_debug("not mapped in pte\n"); 108 | goto out; 109 | } 110 | printk_debug("pte_offset_kernel ok\n"); 111 | 112 | page_addr = page_to_phys(pte_page(*pte)); 113 | page_offset = virt_addr & ~PAGE_MASK; 114 | paddr = page_addr | page_offset; 115 | 116 | printk_debug("page_addr = %lx, page_offset = %lx\n", page_addr, page_offset); 117 | printk_debug("vaddr = %zx, paddr = %lx\n", virt_addr, paddr); 118 | 119 | *(size_t*)out_pte = (size_t)pte; 120 | 121 | out: 122 | mmput(mm); 123 | return paddr; 124 | } 125 | 126 | 127 | static inline size_t get_proc_phy_addr(struct pid* proc_pid_struct, size_t virt_addr, pte_t* out_pte) { 128 | struct task_struct* task = pid_task(proc_pid_struct, PIDTYPE_PID); 129 | if (!task) { return 0; } 130 | return get_task_proc_phy_addr(task, virt_addr, out_pte); 131 | } 132 | 133 | 134 | static inline int is_pte_can_read(pte_t* pte) { 135 | if (!pte) { return 0; } 136 | #ifdef pte_read 137 | if (pte_read(*pte)) { return 1; } else { return 0; } 138 | #endif 139 | return 1; 140 | } 141 | static inline int is_pte_can_write(pte_t* pte) { 142 | if (!pte) { return 0; } 143 | if (pte_write(*pte)) { return 1; } else { return 0; } 144 | } 145 | static inline int is_pte_can_exec(pte_t* pte) { 146 | if (!pte) { return 0; } 147 | #ifdef pte_exec 148 | if (pte_exec(*pte)) { return 1; } else { return 0; } 149 | #endif 150 | #ifdef pte_user_exec 151 | if (pte_user_exec(*pte)) { return 1; } else { return 0; } 152 | #endif 153 | return 0; 154 | } 155 | static inline int change_pte_read_status(pte_t* pte, bool can_read) { 156 | if (!pte) { return 0; } 157 | return 1; 158 | } 159 | static inline int change_pte_write_status(pte_t* pte, bool can_write) { 160 | if (!pte) { return 0; } 161 | if (can_write) { 162 | set_pte(pte, x_pte_mkwrite(*pte)); 163 | } else { 164 | set_pte(pte, pte_wrprotect(*pte)); 165 | } 166 | return 1; 167 | } 168 | static inline int change_pte_exec_status(pte_t* pte, bool can_exec) { 169 | if (!pte) { return 0; } 170 | if (can_exec) { 171 | #ifdef pte_mknexec 172 | set_pte(pte, x_pte_mkwrite(*pte)); 173 | #endif 174 | } else { 175 | #ifdef pte_mkexec 176 | set_pte(pte, x_pte_mkwrite(*pte)); 177 | #endif 178 | } 179 | return 1; 180 | } 181 | 182 | static inline unsigned long size_inside_page(unsigned long start, 183 | unsigned long size) { 184 | unsigned long sz; 185 | 186 | sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); 187 | 188 | return min(sz, size); 189 | } 190 | 191 | 192 | static inline int check_phys_addr_valid_range(size_t addr, size_t count) { 193 | if (g_phy_total_memory_size == 0) { 194 | init_phy_total_memory_size(); 195 | } 196 | return (addr + count) <= g_phy_total_memory_size; 197 | } 198 | 199 | 200 | static inline size_t read_ram_physical_addr(bool is_kernel_buf, size_t phy_addr, char* lpBuf, size_t read_size) { 201 | void *bounce; 202 | size_t realRead = 0; 203 | if (!check_phys_addr_valid_range(phy_addr, read_size)) { 204 | printk_debug(KERN_INFO "Error in check_phys_addr_valid_range:%zu,size:%zu\n", phy_addr, read_size); 205 | return 0; 206 | } 207 | bounce = x_kmalloc(PAGE_SIZE, GFP_KERNEL); 208 | if (!bounce) { 209 | return 0; 210 | } 211 | 212 | while (read_size > 0) { 213 | size_t sz = size_inside_page(phy_addr, read_size); 214 | 215 | /* 216 | * On ia64 if a page has been mapped somewhere as uncached, then 217 | * it must also be accessed uncached by the kernel or data 218 | * corruption may occur. 219 | */ 220 | 221 | char *ptr = xlate_dev_mem_ptr(phy_addr); 222 | int probe; 223 | 224 | if (!ptr) { 225 | printk_debug(KERN_INFO "Error in x_xlate_dev_mem_ptr:0x%zx\n", phy_addr); 226 | break; 227 | } 228 | probe = x_probe_kernel_read(bounce, ptr, sz); 229 | unxlate_dev_mem_ptr(phy_addr, ptr); 230 | if (probe) { 231 | break; 232 | } 233 | if (is_kernel_buf) { 234 | memcpy(lpBuf, bounce, sz); 235 | } else { 236 | unsigned long remaining = x_copy_to_user(lpBuf, bounce, sz); 237 | if (remaining) { 238 | printk_debug(KERN_INFO "Error in x_copy_to_user(\n"); 239 | break; 240 | } 241 | } 242 | lpBuf += sz; 243 | phy_addr += sz; 244 | read_size -= sz; 245 | realRead += sz; 246 | } 247 | kfree(bounce); 248 | return realRead; 249 | } 250 | 251 | static inline size_t write_ram_physical_addr(size_t phy_addr, char* lpBuf, bool is_kernel_buf, size_t write_size) { 252 | size_t realWrite = 0; 253 | if (!check_phys_addr_valid_range(phy_addr, write_size)) { 254 | printk_debug(KERN_INFO "Error in check_phys_addr_valid_range:0x%zx,size:%zu\n", phy_addr, write_size); 255 | return 0; 256 | } 257 | 258 | while (write_size > 0) { 259 | size_t sz = size_inside_page(phy_addr, write_size); 260 | 261 | /* 262 | * On ia64 if a page has been mapped somewhere as uncached, then 263 | * it must also be accessed uncached by the kernel or data 264 | * corruption may occur. 265 | */ 266 | 267 | char *ptr = xlate_dev_mem_ptr(phy_addr); 268 | if (!ptr) { 269 | printk_debug(KERN_INFO "Error in xlate_dev_mem_ptr:0x%zx\n", phy_addr); 270 | break; 271 | } 272 | if (is_kernel_buf) { 273 | memcpy(ptr, lpBuf, sz); 274 | } else { 275 | unsigned long copied = x_copy_from_user(ptr, lpBuf, sz); 276 | if (copied) { 277 | unxlate_dev_mem_ptr(phy_addr, ptr); 278 | realWrite += sz - copied; 279 | printk_debug(KERN_INFO "Error in x_copy_from_user(\n"); 280 | break; 281 | } 282 | } 283 | unxlate_dev_mem_ptr(phy_addr, ptr); 284 | 285 | lpBuf += sz; 286 | phy_addr += sz; 287 | write_size -= sz; 288 | realWrite += sz; 289 | } 290 | return realWrite; 291 | } 292 | #endif /* PHY_MEM_H_ */ -------------------------------------------------------------------------------- /code/proc_maps_auto_offset.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_MAPS_AUTO_OFFSET_H_ 2 | #define PROC_MAPS_AUTO_OFFSET_H_ 3 | #include "api_proxy.h" 4 | #include "ver_control.h" 5 | 6 | 7 | #ifndef MM_STRUCT_MMAP_LOCK 8 | #if MY_LINUX_VERSION_CODE < KERNEL_VERSION(5,10,43) 9 | #define MM_STRUCT_MMAP_LOCK mmap_sem 10 | #endif 11 | #if MY_LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,43) 12 | #define MM_STRUCT_MMAP_LOCK mmap_lock 13 | #endif 14 | #endif 15 | 16 | static ssize_t g_mmap_lock_offset = 0; 17 | static bool g_init_mmap_lock_offset_success = false; 18 | 19 | static ssize_t g_map_count_offset = 0; 20 | static bool g_init_map_count_offset_success = false; 21 | 22 | static ssize_t g_vm_file_offset = 0; 23 | static bool g_init_vm_file_offset_success = false; 24 | 25 | static int get_mytask_maps_cnt(void) { 26 | struct task_struct * mytask = x_get_current(); 27 | struct mm_struct * mm = get_task_mm(mytask); 28 | struct vm_area_struct* vma; 29 | int cnt = 0; 30 | #if MY_LINUX_VERSION_CODE < KERNEL_VERSION(6,1,0) 31 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 32 | cnt++; 33 | } 34 | #else 35 | { 36 | VMA_ITERATOR(iter, mm, 0); 37 | for_each_vma(iter, vma) { 38 | cnt++; 39 | } 40 | } 41 | #endif 42 | return cnt; 43 | } 44 | 45 | 46 | static int init_mmap_lock_offset(void) { 47 | int is_found_mmap_lock_offset = 0; 48 | struct task_struct * mytask = x_get_current(); 49 | struct mm_struct * mm = get_task_mm(mytask); 50 | int maps_cnt = get_mytask_maps_cnt(); 51 | if(g_init_mmap_lock_offset_success) { 52 | return 0; 53 | } 54 | printk_debug(KERN_EMERG "init_mmap_lock_offset maps_cnt:%d, mm->map_count:%p:%d\n", maps_cnt, &mm->map_count, (int)mm->map_count); 55 | 56 | g_init_mmap_lock_offset_success = true; 57 | for (g_mmap_lock_offset = -80; g_mmap_lock_offset <= 80; g_mmap_lock_offset += 1) { 58 | char *rp; 59 | int val; 60 | ssize_t accurate_offset = (ssize_t)((size_t)&mm->MM_STRUCT_MMAP_LOCK - (size_t)mm + g_mmap_lock_offset); 61 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 62 | mmput(mm); 63 | return -EFAULT; 64 | } 65 | rp = (char*)((size_t)mm + (size_t)accurate_offset); 66 | val = *(int*)(rp); 67 | printk_debug(KERN_EMERG "init_mmap_lock_offset %zd:%zd:%p:%d\n", g_mmap_lock_offset, accurate_offset, rp, val); 68 | 69 | if (val == maps_cnt) { 70 | printk_debug(KERN_EMERG "val == maps_cnt %zd:%zd:%p:%d\n", g_mmap_lock_offset, accurate_offset, rp, val); 71 | g_mmap_lock_offset += sizeof(val); 72 | g_mmap_lock_offset += sizeof(int); 73 | is_found_mmap_lock_offset = 1; 74 | break; 75 | } 76 | 77 | } 78 | 79 | 80 | if (!is_found_mmap_lock_offset) { 81 | g_init_mmap_lock_offset_success = false; 82 | mmput(mm); 83 | printk_debug(KERN_INFO "find mmap_lock offset failed\n"); 84 | return -ESPIPE; 85 | } 86 | mmput(mm); 87 | printk_debug(KERN_INFO "found g_mmap_lock_offset:%zu\n", g_mmap_lock_offset); 88 | return 0; 89 | } 90 | 91 | static inline int down_read_mmap_lock(struct mm_struct *mm) { 92 | ssize_t accurate_offset; 93 | struct rw_semaphore *sem; 94 | if (g_init_mmap_lock_offset_success == false) { 95 | return -ENOENT; 96 | } 97 | 98 | accurate_offset = (ssize_t)((size_t)&mm->MM_STRUCT_MMAP_LOCK - (size_t)mm + g_mmap_lock_offset); 99 | printk_debug(KERN_INFO "down_read_mmap_lock accurate_offset:%zd\n", accurate_offset); 100 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 101 | return -ERANGE; 102 | } 103 | sem = (struct rw_semaphore *)((size_t)mm + (size_t)accurate_offset); 104 | down_read(sem); 105 | return 0; 106 | } 107 | 108 | static inline int up_read_mmap_lock(struct mm_struct *mm) { 109 | ssize_t accurate_offset; 110 | struct rw_semaphore *sem; 111 | if (g_init_mmap_lock_offset_success == false) { 112 | return -ENOENT; 113 | } 114 | accurate_offset = (ssize_t)((size_t)&mm->MM_STRUCT_MMAP_LOCK - (size_t)mm + g_mmap_lock_offset); 115 | printk_debug(KERN_INFO "accurate_offset:%zd\n", accurate_offset); 116 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 117 | return -ERANGE; 118 | } 119 | sem = (struct rw_semaphore *)((size_t)mm + (size_t)accurate_offset); 120 | 121 | up_read(sem); 122 | return 0; 123 | } 124 | 125 | static int init_map_count_offset(void) { 126 | int is_found_map_count_offset = 0; 127 | struct task_struct * mytask = x_get_current(); 128 | struct mm_struct * mm = get_task_mm(mytask); 129 | int maps_cnt = get_mytask_maps_cnt(); 130 | if(g_init_map_count_offset_success) { 131 | return 0; 132 | } 133 | printk_debug(KERN_EMERG "init_map_count_offset maps_cnt:%d, mm->map_count:%p:%d\n", maps_cnt, &mm->map_count, (int)mm->map_count); 134 | 135 | g_init_map_count_offset_success = true; 136 | for (g_map_count_offset = -40; g_map_count_offset <= 40; g_map_count_offset += 1) { 137 | char *rp; 138 | int val; 139 | ssize_t accurate_offset = (ssize_t)((size_t)&mm->map_count - (size_t)mm + g_map_count_offset); 140 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 141 | mmput(mm); 142 | return -EFAULT; 143 | } 144 | rp = (char*)((size_t)mm + (size_t)accurate_offset); 145 | val = *(int*)(rp); 146 | printk_debug(KERN_EMERG "init_map_count_offset %zd:%zd:%p:%d\n", g_map_count_offset, accurate_offset, rp, val); 147 | 148 | if (val == maps_cnt) { 149 | printk_debug(KERN_EMERG "val == maps_cnt %zd:%zd:%p:%d\n", g_map_count_offset, accurate_offset, rp, val); 150 | is_found_map_count_offset = 1; 151 | break; 152 | } 153 | } 154 | 155 | 156 | if (!is_found_map_count_offset) { 157 | g_init_map_count_offset_success = false; 158 | printk_debug(KERN_INFO "find map_count offset failed\n"); 159 | mmput(mm); 160 | return -ESPIPE; 161 | } 162 | 163 | mmput(mm); 164 | printk_debug(KERN_INFO "g_map_count_offset:%zu\n", g_map_count_offset); 165 | return 0; 166 | } 167 | 168 | #if MY_LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,75) 169 | static int init_vm_file_offset(void) { 170 | int is_found_vm_file_offset = 0; 171 | struct vm_area_struct *vma; 172 | struct task_struct * mytask = x_get_current(); 173 | struct mm_struct *mm = get_task_mm(mytask); 174 | if(g_init_vm_file_offset_success) { 175 | return 0; 176 | } 177 | if (down_read_mmap_lock(mm) != 0) { 178 | mmput(mm); 179 | return -EFAULT; 180 | } 181 | 182 | g_init_vm_file_offset_success = false; 183 | { 184 | VMA_ITERATOR(iter, mm, 0); 185 | for_each_vma(iter, vma) { 186 | if (is_found_vm_file_offset == 1) { 187 | break; 188 | } 189 | for (g_vm_file_offset = -80; g_vm_file_offset <= 80; g_vm_file_offset += 1) { 190 | char *rp; 191 | size_t addr_val1; 192 | size_t addr_val2; 193 | unsigned long vm_pgoff; 194 | ssize_t accurate_offset = (ssize_t)((size_t)&vma->vm_file - (size_t)vma + g_vm_file_offset); 195 | //这里故意屏蔽,因为vm_file已经接近vm_area_struct结构体尾部了 196 | /*if (accurate_offset >= sizeof(struct vm_area_struct) - sizeof(struct file *)) 197 | { 198 | mmput(mm); 199 | return -EFAULT; 200 | }*/ 201 | rp = (char*)((size_t)vma + (size_t)accurate_offset); 202 | addr_val1 = *(size_t*)(rp); 203 | rp += (size_t)sizeof(void*); 204 | addr_val2 = *(size_t*)(rp); 205 | printk_debug(KERN_EMERG "init_vm_file_offset %zd:%zd:%p:%zu\n", g_vm_file_offset, accurate_offset, rp, addr_val1); 206 | if (addr_val1 > 0 && addr_val2 > 0 && addr_val1 == addr_val2) //struct list_head anon_vma_chain;里面两个值一样 207 | { 208 | int vm_pgoff_offset = 0; 209 | int found_vm_pgoff = 0; 210 | 211 | printk_debug(KERN_EMERG "init_vm_file_offset addr_val1 == addr_val2 %zd:%zd:%p:%zu\n", g_vm_file_offset, accurate_offset, rp, addr_val1); 212 | rp += (size_t)sizeof(void*); 213 | for (; vm_pgoff_offset < 8 * 5; vm_pgoff_offset += 4) { 214 | vm_pgoff = *(unsigned long*)(rp); 215 | if (vm_pgoff > 0 && vm_pgoff < 1000/*这个值是vm_pgoff我见过的最大值吧,如果最大值比1000还有大再改大*/) { 216 | found_vm_pgoff = 1; 217 | break; 218 | } 219 | rp += 4; 220 | } 221 | if (found_vm_pgoff) { 222 | rp += (size_t)sizeof(unsigned long); 223 | rp += (size_t)sizeof(struct file *); 224 | 225 | addr_val1 = *(size_t*)(rp); 226 | rp += (size_t)sizeof(void*); 227 | addr_val2 = *(size_t*)(rp); 228 | 229 | if (addr_val1 == 0 && addr_val2 == 0) { 230 | g_vm_file_offset += sizeof(void*) * 2; 231 | g_vm_file_offset += vm_pgoff_offset; 232 | g_vm_file_offset += sizeof(unsigned long); 233 | printk_debug(KERN_EMERG "init_vm_file_offset ok, addr_val1 == addr_val2 == 0 %zd:%d\n", g_vm_file_offset, vm_pgoff_offset); 234 | is_found_vm_file_offset = 1; 235 | break; 236 | } 237 | 238 | } 239 | 240 | 241 | } 242 | } 243 | } 244 | } 245 | 246 | up_read_mmap_lock(mm); 247 | mmput(mm); 248 | 249 | if (!is_found_vm_file_offset) { 250 | printk_debug(KERN_INFO "find vm_file offset failed\n"); 251 | return -ESPIPE; 252 | } 253 | g_init_vm_file_offset_success = true; 254 | 255 | return 0; 256 | } 257 | #else 258 | 259 | static int init_vm_file_offset(void) { 260 | int is_found_vm_file_offset = 0; 261 | struct vm_area_struct *vma; 262 | struct task_struct * mytask = x_get_current(); 263 | struct mm_struct *mm = get_task_mm(mytask); 264 | if(g_init_vm_file_offset_success) { 265 | return 0; 266 | } 267 | if (down_read_mmap_lock(mm) != 0) { 268 | mmput(mm); 269 | return -EFAULT; 270 | } 271 | 272 | g_init_vm_file_offset_success = false; 273 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 274 | if (is_found_vm_file_offset == 1) { 275 | break; 276 | } 277 | for (g_vm_file_offset = -80; g_vm_file_offset <= 80; g_vm_file_offset += 1) { 278 | char *rp; 279 | size_t addr_val1; 280 | size_t addr_val2; 281 | unsigned long vm_pgoff; 282 | ssize_t accurate_offset = (ssize_t)((size_t)&vma->vm_file - (size_t)vma + g_vm_file_offset); 283 | //这里故意屏蔽,因为vm_file已经接近vm_area_struct结构体尾部了 284 | /*if (accurate_offset >= sizeof(struct vm_area_struct) - sizeof(struct file *)) 285 | { 286 | mmput(mm); 287 | return -EFAULT; 288 | }*/ 289 | rp = (char*)((size_t)vma + (size_t)accurate_offset); 290 | addr_val1 = *(size_t*)(rp); 291 | rp += (size_t)sizeof(void*); 292 | addr_val2 = *(size_t*)(rp); 293 | printk_debug(KERN_EMERG "init_vm_file_offset %zd:%zd:%p:%zu\n", g_vm_file_offset, accurate_offset, rp, addr_val1); 294 | if (addr_val1 > 0 && addr_val2 > 0 && addr_val1 == addr_val2) //struct list_head anon_vma_chain;里面两个值一样 295 | { 296 | int vm_pgoff_offset = 0; 297 | int found_vm_pgoff = 0; 298 | 299 | printk_debug(KERN_EMERG "init_vm_file_offset addr_val1 == addr_val2 %zd:%zd:%p:%zu\n", g_vm_file_offset, accurate_offset, rp, addr_val1); 300 | rp += (size_t)sizeof(void*); 301 | for (; vm_pgoff_offset < 8 * 5; vm_pgoff_offset += 4) { 302 | vm_pgoff = *(unsigned long*)(rp); 303 | if (vm_pgoff > 0 && vm_pgoff < 1000/*这个值是vm_pgoff我见过的最大值吧,如果最大值比1000还有大再改大*/) { 304 | found_vm_pgoff = 1; 305 | break; 306 | } 307 | rp += 4; 308 | } 309 | if (found_vm_pgoff) { 310 | rp += (size_t)sizeof(unsigned long); 311 | rp += (size_t)sizeof(struct file *); 312 | 313 | addr_val1 = *(size_t*)(rp); 314 | rp += (size_t)sizeof(void*); 315 | addr_val2 = *(size_t*)(rp); 316 | 317 | if (addr_val1 == 0 && addr_val2 == 0) { 318 | g_vm_file_offset += sizeof(void*) * 2; 319 | g_vm_file_offset += vm_pgoff_offset; 320 | g_vm_file_offset += sizeof(unsigned long); 321 | printk_debug(KERN_EMERG "init_vm_file_offset ok, addr_val1 == addr_val2 == 0 %zd:%d\n", g_vm_file_offset, vm_pgoff_offset); 322 | is_found_vm_file_offset = 1; 323 | break; 324 | } 325 | 326 | } 327 | 328 | 329 | } 330 | } 331 | } 332 | 333 | up_read_mmap_lock(mm); 334 | mmput(mm); 335 | 336 | if (!is_found_vm_file_offset) { 337 | printk_debug(KERN_INFO "find vm_file offset failed\n"); 338 | return -ESPIPE; 339 | } 340 | g_init_vm_file_offset_success = true; 341 | return 0; 342 | } 343 | #endif 344 | 345 | static inline struct file * get_vm_file(struct vm_area_struct *vma) { 346 | struct file * vm_file; 347 | ssize_t accurate_offset; 348 | if (g_init_vm_file_offset_success == false) { 349 | if (init_vm_file_offset() != 0) { 350 | return NULL; 351 | } 352 | } 353 | 354 | accurate_offset = (ssize_t)((size_t)&vma->vm_file - (size_t)vma + g_vm_file_offset); 355 | printk_debug(KERN_INFO "get_vm_file accurate_offset:%zd\n", accurate_offset); 356 | //这里故意屏蔽,因为vm_file已经接近vm_area_struct结构体尾部了 357 | //if (accurate_offset >= sizeof(struct vm_area_struct) - sizeof(struct file *)) 358 | //{ 359 | // return NULL; 360 | //} 361 | vm_file = (struct file*) *(size_t*)((size_t)vma + (size_t)accurate_offset); 362 | return vm_file; 363 | } 364 | #endif /* PROC_MAPS_AUTO_OFFSET_H_ */ -------------------------------------------------------------------------------- /code/rwProcMem_module.c: -------------------------------------------------------------------------------- 1 | #include "rwProcMem_module.h" 2 | 3 | #define MY_TASK_COMM_LEN 16 4 | 5 | #pragma pack(push,1) 6 | struct ioctl_request { 7 | char cmd; /* 1 字节命令 */ 8 | uint64_t param1; /* 参数1 */ 9 | uint64_t param2; /* 参数2 */ 10 | uint64_t param3; /* 参数3 */ 11 | uint64_t buf_size; /* 紧随其后的动态数据长度 */ 12 | }; 13 | struct init_device_info { 14 | int pid; 15 | int tgid; 16 | char my_name[MY_TASK_COMM_LEN + 1]; 17 | char my_cmdline[1024]; 18 | }; 19 | struct arg_info { 20 | uint64_t arg_start; 21 | uint64_t arg_end; 22 | }; 23 | #pragma pack(pop) 24 | 25 | static ssize_t OnCmdInitDeviceInfo(struct ioctl_request *hdr, char __user* buf) { 26 | long err = 0; 27 | struct init_device_info* pinit_device_info = (struct init_device_info*)x_kmalloc(sizeof(struct init_device_info), GFP_KERNEL); 28 | if (!pinit_device_info) { 29 | return -ENOMEM; 30 | } 31 | printk_debug(KERN_INFO "CMD_INIT_DEVICE_INFO\n"); 32 | memset(pinit_device_info, 0, sizeof(struct init_device_info)); 33 | if (x_copy_from_user((void*)pinit_device_info, (void*)buf, sizeof(struct init_device_info)) == 0) { 34 | printk_debug(KERN_INFO "my_cmdline:%s\n", pinit_device_info->my_cmdline); 35 | printk_debug(KERN_INFO "my_name:%s\n", pinit_device_info->my_name); 36 | printk_debug(KERN_INFO "pid:%d, tgid:%d\n", pinit_device_info->pid, pinit_device_info->tgid); 37 | do { 38 | err = init_mmap_lock_offset(); 39 | if(err) { break; } 40 | err = init_map_count_offset(); 41 | if(err) { break; } 42 | err = init_proc_cmdline_offset(pinit_device_info->my_cmdline, get_task_proc_cmdline_addr); 43 | if(err) { break; } 44 | err = init_proc_root_offset(pinit_device_info->my_name); 45 | if(err) { break; } 46 | err = init_task_next_offset(); 47 | if(err) { break; } 48 | err = init_task_pid_offset(pinit_device_info->pid, pinit_device_info->tgid); 49 | } while(0); 50 | } else { 51 | err = -EINVAL; 52 | } 53 | kfree(pinit_device_info); 54 | return err; 55 | } 56 | 57 | static ssize_t OnCmdOpenProcess(struct ioctl_request *hdr, char __user* buf) { 58 | uint64_t pid = hdr->param1, handle = 0; 59 | struct pid * proc_pid_struct = NULL; 60 | printk_debug(KERN_INFO "CMD_OPEN_PROCESS\n"); 61 | 62 | printk_debug(KERN_INFO "pid:%llu,size:%ld\n", pid, sizeof(pid)); 63 | proc_pid_struct = get_proc_pid_struct(pid); 64 | printk_debug(KERN_INFO "proc_pid_struct *:0x%p\n", (void*)proc_pid_struct); 65 | if (!proc_pid_struct) { 66 | return -EINVAL; 67 | } 68 | handle = (uint64_t)proc_pid_struct; 69 | 70 | printk_debug(KERN_INFO "handle:%llu,size:%ld\n", handle, sizeof(handle)); 71 | if (!!x_copy_to_user((void*)buf, (void*)&handle, sizeof(handle))) { 72 | return -EINVAL; 73 | } 74 | return 0; 75 | } 76 | 77 | static ssize_t OnCmdCloseProcess(struct ioctl_request *hdr, char __user* buf) { 78 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 79 | printk_debug(KERN_INFO "CMD_CLOSE_PROCESS\n"); 80 | printk_debug(KERN_INFO "proc_pid_struct*:0x%p,size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 81 | release_proc_pid_struct(proc_pid_struct); 82 | return 0; 83 | } 84 | 85 | static ssize_t OnCmdReadProcessMemory(struct ioctl_request *hdr, char __user* buf) { 86 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 87 | size_t proc_virt_addr = (size_t)hdr->param2; 88 | bool is_force_read = hdr->param3 == 1 ? true : false; 89 | size_t size = (size_t)hdr->buf_size; 90 | size_t read_size = 0; 91 | 92 | printk_debug(KERN_INFO "CMD_READ_PROCESS_MEMORY\n"); 93 | printk_debug(KERN_INFO "READ proc_pid_struct*:0x%p,size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 94 | printk_debug(KERN_INFO "READ proc_virt_addr:0x%zx,size:%ld\n", proc_virt_addr, sizeof(proc_virt_addr)); 95 | if (is_force_read == false && !check_proc_map_can_read(proc_pid_struct, proc_virt_addr, size)) { 96 | return -EFAULT; 97 | } 98 | while (read_size < size) { 99 | size_t phy_addr = 0; 100 | size_t pfn_sz = 0; 101 | char *lpOutBuf = NULL; 102 | pte_t *pte; 103 | 104 | bool old_pte_can_read; 105 | phy_addr = get_proc_phy_addr(proc_pid_struct, proc_virt_addr + read_size, (pte_t*)&pte); 106 | printk_debug(KERN_INFO "calc phy_addr:0x%zx\n", phy_addr); 107 | 108 | if (phy_addr == 0) { 109 | break; 110 | } 111 | 112 | old_pte_can_read = is_pte_can_read(pte); 113 | if (is_force_read) { 114 | if (!old_pte_can_read) { 115 | if (!change_pte_read_status(pte, true)) { break; } 116 | 117 | } 118 | } else if (!old_pte_can_read) { 119 | break; 120 | } 121 | 122 | pfn_sz = size_inside_page(phy_addr, ((size - read_size) > PAGE_SIZE) ? PAGE_SIZE : (size - read_size)); 123 | printk_debug(KERN_INFO "pfn_sz:%zu\n", pfn_sz); 124 | 125 | 126 | lpOutBuf = (char*)(buf + read_size); 127 | read_ram_physical_addr(false, phy_addr, lpOutBuf, pfn_sz); 128 | 129 | if (is_force_read && old_pte_can_read == false) { 130 | change_pte_read_status(pte, false); 131 | } 132 | read_size += pfn_sz; 133 | } 134 | return read_size; 135 | } 136 | 137 | static ssize_t OnCmdWriteProcessMemory(struct ioctl_request *hdr, char __user* buf) { 138 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 139 | size_t proc_virt_addr = (size_t)hdr->param2; 140 | bool is_force_write = hdr->param3 == 1 ? true : false; 141 | size_t size = (size_t)hdr->buf_size; 142 | size_t write_size = 0; 143 | printk_debug(KERN_INFO "CMD_WRITE_PROCESS_MEMORY\n"); 144 | printk_debug(KERN_INFO "WRITE proc_pid_struct*:0x%p,size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 145 | printk_debug(KERN_INFO "WRITE proc_virt_addr:0x%zx,size:%ld\n", proc_virt_addr, sizeof(proc_virt_addr)); 146 | if (is_force_write == false && !check_proc_map_can_write(proc_pid_struct, proc_virt_addr, size)) { 147 | return -EFAULT; 148 | } 149 | 150 | while (write_size < size) { 151 | size_t phy_addr = 0; 152 | size_t pfn_sz = 0; 153 | char * input_buf = NULL; 154 | 155 | pte_t *pte; 156 | bool old_pte_can_write; 157 | phy_addr = get_proc_phy_addr(proc_pid_struct, proc_virt_addr + write_size, (pte_t*)&pte); 158 | printk_debug(KERN_INFO "phy_addr:0x%zx\n", phy_addr); 159 | if (phy_addr == 0) { 160 | break; 161 | } 162 | 163 | old_pte_can_write = is_pte_can_write(pte); 164 | if (is_force_write) { 165 | if (!old_pte_can_write) { 166 | if (!change_pte_write_status(pte, true)) { break; } 167 | } 168 | } else if (!old_pte_can_write) { 169 | break; 170 | } 171 | 172 | pfn_sz = size_inside_page(phy_addr, ((size - write_size) > PAGE_SIZE) ? PAGE_SIZE : (size - write_size)); 173 | printk_debug(KERN_INFO "pfn_sz:%zu\n", pfn_sz); 174 | 175 | input_buf = (char*)(((size_t)buf + write_size)); 176 | write_ram_physical_addr(phy_addr, input_buf, false, pfn_sz); 177 | 178 | if (is_force_write && old_pte_can_write == false) { 179 | change_pte_write_status(pte, false); 180 | } 181 | 182 | write_size += pfn_sz; 183 | } 184 | return write_size; 185 | } 186 | 187 | static ssize_t OnCmdGetProcessMapsCount(struct ioctl_request *hdr, char __user* buf) { 188 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 189 | printk_debug(KERN_INFO "CMD_GET_PROCESS_MAPS_COUNT\n"); 190 | printk_debug(KERN_INFO "proc_pid_struct*:0x%p, size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 191 | return get_proc_map_count(proc_pid_struct); 192 | } 193 | 194 | static ssize_t OnCmdGetProcessMapsList(struct ioctl_request *hdr, char __user* buf) { 195 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 196 | printk_debug(KERN_INFO "CMD_GET_PROCESS_MAPS_LIST\n"); 197 | printk_debug(KERN_INFO "proc_pid_struct*:0x%p,size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 198 | printk_debug(KERN_INFO "buf_size:%llu\n", hdr->buf_size); 199 | return get_proc_maps_list(false, proc_pid_struct, (void*)(buf), hdr->buf_size - 1); 200 | } 201 | 202 | static ssize_t OnCmdCheckProcessPhyAddr(struct ioctl_request *hdr, char __user* buf) { 203 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 204 | size_t proc_virt_addr = (size_t)hdr->param2; 205 | pte_t *pte; 206 | printk_debug(KERN_INFO "CMD_CHECK_PROCESS_ADDR_PHY\n"); 207 | printk_debug(KERN_INFO "proc_pid_struct *:0x%p,size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 208 | printk_debug(KERN_INFO "proc_virt_addr :0x%zx\n", proc_virt_addr); 209 | if (get_proc_phy_addr(proc_pid_struct, proc_virt_addr, (pte_t*)&pte)) { 210 | return 1; 211 | } 212 | return 0; 213 | 214 | } 215 | 216 | static ssize_t OnCmdGetPidList(struct ioctl_request *hdr, char __user* buf) { 217 | printk_debug(KERN_INFO "CMD_GET_PID_LIST\n"); 218 | printk_debug(KERN_INFO "buf_size:%llu\n", hdr->buf_size); 219 | return get_proc_pid_list(false, buf, hdr->buf_size); 220 | } 221 | 222 | static ssize_t OnCmdSetProcessRoot(struct ioctl_request *hdr, char __user* buf) { 223 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 224 | printk_debug(KERN_INFO "CMD_SET_PROCESS_ROOT\n"); 225 | printk_debug(KERN_INFO "proc_pid_struct*:0x%p,size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 226 | return set_process_root(proc_pid_struct); 227 | } 228 | 229 | static ssize_t OnCmdGetProcessRss(struct ioctl_request *hdr, char __user* buf) { 230 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 231 | uint64_t rss = 0; 232 | printk_debug(KERN_INFO "CMD_GET_PROCESS_RSS\n"); 233 | printk_debug(KERN_INFO "proc_pid_struct*:0x%p,size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 234 | rss = read_proc_rss_size(proc_pid_struct); 235 | if (!!x_copy_to_user((void*)buf, &rss, sizeof(rss))) { 236 | return -EINVAL; 237 | } 238 | return 0; 239 | } 240 | 241 | static ssize_t OnCmdGetProcessCmdlineAddr(struct ioctl_request *hdr, char __user* buf) { 242 | struct pid * proc_pid_struct = (struct pid *)hdr->param1; 243 | size_t arg_start = 0, arg_end = 0; 244 | int res; 245 | struct arg_info aginfo = {0}; 246 | printk_debug(KERN_INFO "CMD_GET_PROCESS_CMDLINE_ADDR\n"); 247 | printk_debug(KERN_INFO "proc_pid_struct *:0x%p,size:%ld\n", (void*)proc_pid_struct, sizeof(proc_pid_struct)); 248 | res = get_proc_cmdline_addr(proc_pid_struct, &arg_start, &arg_end); 249 | aginfo.arg_start = (uint64_t)arg_start; 250 | aginfo.arg_end = (uint64_t)arg_end; 251 | if (!!x_copy_to_user((void*)buf, &aginfo, sizeof(aginfo))) { 252 | return -EINVAL; 253 | } 254 | return res; 255 | } 256 | 257 | static ssize_t OnCmdHideKernelModule(struct ioctl_request *hdr, char __user* buf) { 258 | printk_debug(KERN_INFO "CMD_HIDE_KERNEL_MODULE\n"); 259 | if (g_rwProcMem_devp->is_hidden_module == false) { 260 | g_rwProcMem_devp->is_hidden_module = true; 261 | list_del_init(&__this_module.list); 262 | kobject_del(&THIS_MODULE->mkobj.kobj); 263 | } 264 | return 0; 265 | } 266 | 267 | static inline ssize_t DispatchCommand(struct ioctl_request *hdr, char __user* buf) { 268 | switch (hdr->cmd) { 269 | case CMD_INIT_DEVICE_INFO: 270 | return OnCmdInitDeviceInfo(hdr, buf); 271 | case CMD_OPEN_PROCESS: 272 | return OnCmdOpenProcess(hdr, buf); 273 | case CMD_READ_PROCESS_MEMORY: 274 | return OnCmdReadProcessMemory(hdr, buf); 275 | case CMD_WRITE_PROCESS_MEMORY: 276 | return OnCmdWriteProcessMemory(hdr, buf); 277 | case CMD_CLOSE_PROCESS: 278 | return OnCmdCloseProcess(hdr, buf); 279 | case CMD_GET_PROCESS_MAPS_COUNT: 280 | return OnCmdGetProcessMapsCount(hdr, buf); 281 | case CMD_GET_PROCESS_MAPS_LIST: 282 | return OnCmdGetProcessMapsList(hdr, buf); 283 | case CMD_CHECK_PROCESS_ADDR_PHY: 284 | return OnCmdCheckProcessPhyAddr(hdr, buf); 285 | case CMD_GET_PID_LIST: 286 | return OnCmdGetPidList(hdr, buf); 287 | case CMD_SET_PROCESS_ROOT: 288 | return OnCmdSetProcessRoot(hdr, buf); 289 | case CMD_GET_PROCESS_RSS: 290 | return OnCmdGetProcessRss(hdr, buf); 291 | case CMD_GET_PROCESS_CMDLINE_ADDR: 292 | return OnCmdGetProcessCmdlineAddr(hdr, buf); 293 | case CMD_HIDE_KERNEL_MODULE: 294 | return OnCmdHideKernelModule(hdr, buf); 295 | default: 296 | return -EINVAL; 297 | } 298 | return -EINVAL; 299 | } 300 | 301 | static ssize_t rwProcMem_read(struct file* filp, 302 | char __user* buf, 303 | size_t size, 304 | loff_t* ppos) { 305 | struct ioctl_request hdr = {0}; 306 | size_t header_size = sizeof(hdr); 307 | if (size < header_size) { 308 | return -EINVAL; 309 | } 310 | if (x_copy_from_user(&hdr, buf, header_size)) { 311 | return -EFAULT; 312 | } 313 | if (size < header_size + hdr.buf_size) { 314 | return -EINVAL; 315 | } 316 | return DispatchCommand(&hdr, buf + header_size); 317 | } 318 | 319 | #ifdef CONFIG_MODULE_GUIDE_ENTRY 320 | static 321 | #endif 322 | int __init rwProcMem_dev_init(void) { 323 | printk(KERN_EMERG "Start init.\n"); 324 | 325 | g_rwProcMem_devp = x_kmalloc(sizeof(struct rwProcMemDev), GFP_KERNEL); 326 | memset(g_rwProcMem_devp, 0, sizeof(struct rwProcMemDev)); 327 | 328 | #ifdef CONFIG_USE_PROC_FILE_NODE 329 | g_rwProcMem_devp->proc_parent = proc_mkdir(CONFIG_PROC_NODE_AUTH_KEY, NULL); 330 | if(g_rwProcMem_devp->proc_parent) { 331 | g_rwProcMem_devp->proc_entry = proc_create(CONFIG_PROC_NODE_AUTH_KEY, S_IRUGO | S_IWUGO, g_rwProcMem_devp->proc_parent, &rwProcMem_proc_ops); 332 | start_hide_procfs_dir(CONFIG_PROC_NODE_AUTH_KEY); 333 | } 334 | #endif 335 | 336 | #ifdef CONFIG_DEBUG_PRINTK 337 | printk(KERN_EMERG "Hello, %s debug\n", CONFIG_PROC_NODE_AUTH_KEY); 338 | //test1(); 339 | //test2(); 340 | //test3(); 341 | //test4(); 342 | //test5(); 343 | #else 344 | printk(KERN_EMERG "Hello\n"); 345 | #endif 346 | return 0; 347 | } 348 | 349 | #ifdef CONFIG_MODULE_GUIDE_ENTRY 350 | static 351 | #endif 352 | void __exit rwProcMem_dev_exit(void) { 353 | 354 | printk(KERN_EMERG "Start exit.\n"); 355 | 356 | #ifdef CONFIG_USE_PROC_FILE_NODE 357 | if(g_rwProcMem_devp->proc_entry) { 358 | proc_remove(g_rwProcMem_devp->proc_entry); 359 | g_rwProcMem_devp->proc_entry = NULL; 360 | } 361 | 362 | if(g_rwProcMem_devp->proc_parent) { 363 | proc_remove(g_rwProcMem_devp->proc_parent); 364 | g_rwProcMem_devp->proc_parent = NULL; 365 | } 366 | stop_hide_procfs_dir(); 367 | #endif 368 | kfree(g_rwProcMem_devp); 369 | printk(KERN_EMERG "Goodbye\n"); 370 | } 371 | 372 | #ifndef CONFIG_MODULE_GUIDE_ENTRY 373 | //Hook:__cfi_check_fn 374 | unsigned char* __check_(unsigned char* result, void *ptr, void *diag) 375 | { 376 | printk_debug(KERN_EMERG "my__cfi_check_fn!!!\n"); 377 | return result; 378 | } 379 | 380 | //Hook:__cfi_check_fail 381 | unsigned char * __check_fail_(unsigned char *result) 382 | { 383 | printk_debug(KERN_EMERG "my__cfi_check_fail!!!\n"); 384 | return result; 385 | } 386 | #endif 387 | 388 | unsigned long __stack_chk_guard; 389 | 390 | #ifdef CONFIG_MODULE_GUIDE_ENTRY 391 | module_init(rwProcMem_dev_init); 392 | module_exit(rwProcMem_dev_exit); 393 | #endif 394 | MODULE_LICENSE("GPL"); 395 | MODULE_AUTHOR("Linux"); 396 | MODULE_DESCRIPTION("Linux default module"); 397 | 398 | -------------------------------------------------------------------------------- /code/proc_maps.h: -------------------------------------------------------------------------------- 1 | #ifndef PROC_MAPS_H_ 2 | #define PROC_MAPS_H_ 3 | 4 | //声明 5 | ////////////////////////////////////////////////////////////////////////// 6 | #include 7 | #include 8 | #include 9 | #if MY_LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,83) 10 | #include 11 | #include 12 | #endif 13 | 14 | static inline int down_read_mmap_lock(struct mm_struct *mm); 15 | static inline int up_read_mmap_lock(struct mm_struct *mm); 16 | static inline size_t get_proc_map_count(struct pid* proc_pid_struct); 17 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size); 18 | 19 | //实现 20 | ////////////////////////////////////////////////////////////////////////// 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include "api_proxy.h" 29 | #include "proc_maps_auto_offset.h" 30 | #include "ver_control.h" 31 | 32 | #define MY_PATH_MAX_LEN 1024 33 | #pragma pack(push,1) 34 | struct map_entry { 35 | unsigned long start; 36 | unsigned long end; 37 | unsigned char flags[4]; 38 | char path[MY_PATH_MAX_LEN]; 39 | }; 40 | #pragma pack(pop) 41 | 42 | static inline size_t get_proc_map_count(struct pid* proc_pid_struct) { 43 | ssize_t accurate_offset; 44 | struct task_struct *task = pid_task(proc_pid_struct, PIDTYPE_PID); 45 | struct mm_struct *mm = get_task_mm(task); 46 | size_t count = 0; 47 | if (g_init_map_count_offset_success == false) { 48 | return 0; 49 | } 50 | 51 | if (down_read_mmap_lock(mm) != 0) { 52 | goto _exit; 53 | } 54 | 55 | accurate_offset = (ssize_t)((size_t)&mm->map_count - (size_t)mm + g_map_count_offset); 56 | printk_debug(KERN_INFO "mm->map_count accurate_offset:%zd\n", accurate_offset); 57 | if (accurate_offset >= sizeof(struct mm_struct) - sizeof(ssize_t)) { 58 | return 0; 59 | } 60 | count = *(int *)((size_t)mm + (size_t)accurate_offset); 61 | 62 | up_read_mmap_lock(mm); 63 | 64 | _exit:mmput(mm); 65 | return count; 66 | } 67 | 68 | 69 | static inline int check_proc_map_can_read(struct pid* proc_pid_struct, size_t proc_virt_addr, size_t size) { 70 | struct task_struct *task = pid_task(proc_pid_struct, PIDTYPE_PID); 71 | struct mm_struct *mm; 72 | struct vm_area_struct *vma; 73 | int res = 0; 74 | if (!task) { return res; } 75 | 76 | mm = get_task_mm(task); 77 | 78 | if (!mm) { return res; } 79 | 80 | if (down_read_mmap_lock(mm) != 0) { 81 | goto _exit; 82 | } 83 | 84 | vma = find_vma(mm, proc_virt_addr); 85 | if (vma) { 86 | if (vma->vm_flags & VM_READ) { 87 | size_t read_end = proc_virt_addr + size; 88 | if (read_end <= vma->vm_end) { 89 | res = 1; 90 | } 91 | } 92 | } 93 | up_read_mmap_lock(mm); 94 | 95 | _exit:mmput(mm); 96 | return res; 97 | } 98 | static inline int check_proc_map_can_write(struct pid* proc_pid_struct, size_t proc_virt_addr, size_t size) { 99 | struct task_struct *task = pid_task(proc_pid_struct, PIDTYPE_PID); 100 | struct mm_struct *mm; 101 | struct vm_area_struct *vma; 102 | int res = 0; 103 | 104 | if (!task) { return res; } 105 | 106 | mm = get_task_mm(task); 107 | 108 | if (!mm) { return res; } 109 | 110 | if (down_read_mmap_lock(mm) != 0) { 111 | mmput(mm); 112 | return res; 113 | } 114 | 115 | vma = find_vma(mm, proc_virt_addr); 116 | if (vma) { 117 | if (vma->vm_flags & VM_WRITE) { 118 | size_t read_end = proc_virt_addr + size; 119 | if (read_end <= vma->vm_end) { 120 | res = 1; 121 | } 122 | } 123 | } 124 | up_read_mmap_lock(mm); 125 | mmput(mm); 126 | return res; 127 | } 128 | 129 | 130 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(3,10,0) 131 | /* Check if the vma is being used as a stack by this task */ 132 | static int vm_is_stack_for_task(struct task_struct *t, 133 | struct vm_area_struct *vma) { 134 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 135 | } 136 | 137 | /* 138 | * Check if the vma is being used as a stack. 139 | * If is_group is non-zero, check in the entire thread group or else 140 | * just check in the current task. Returns the pid of the task that 141 | * the vma is stack for. 142 | */ 143 | static pid_t my_vm_is_stack(struct task_struct *task, 144 | struct vm_area_struct *vma, int in_group) { 145 | pid_t ret = 0; 146 | 147 | if (vm_is_stack_for_task(task, vma)) 148 | return task->pid; 149 | 150 | if (in_group) { 151 | struct task_struct *t; 152 | rcu_read_lock(); 153 | if (!pid_alive(task)) 154 | goto done; 155 | 156 | t = task; 157 | do { 158 | if (vm_is_stack_for_task(t, vma)) { 159 | ret = t->pid; 160 | goto done; 161 | } 162 | } while_each_thread(task, t); 163 | done: 164 | rcu_read_unlock(); 165 | } 166 | 167 | return ret; 168 | } 169 | 170 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 171 | 172 | struct task_struct *task; 173 | struct mm_struct *mm; 174 | struct vm_area_struct *vma; 175 | char path_buf[MY_PATH_MAX_LEN]; 176 | int success_cnt = 0; 177 | 178 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 179 | if (!task) { 180 | return -2; 181 | } 182 | 183 | mm = get_task_mm(task); 184 | if (!mm) { 185 | return -3; 186 | } 187 | 188 | 189 | if (is_kernel_buf) { 190 | memset(buf, 0, buf_size); 191 | } 192 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 193 | 194 | copy_pos = (size_t)buf; 195 | end_pos = (size_t)((size_t)buf + buf_size); 196 | 197 | if (down_read_mmap_lock(mm) != 0) { 198 | mmput(mm); 199 | return -4; 200 | } 201 | 202 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 203 | struct map_entry entry; 204 | struct file * vm_file; 205 | if (copy_pos >= end_pos) { 206 | break; 207 | } 208 | entry.start = vma->vm_start; 209 | entry.end = vma->vm_end; 210 | /* We don't show the stack guard page in /proc/maps */ 211 | if (stack_guard_page_start(vma, entry.start)) 212 | entry.start += PAGE_SIZE; 213 | if (stack_guard_page_end(vma, entry.end)) 214 | entry.end -= PAGE_SIZE; 215 | 216 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 217 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 218 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 219 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 220 | memset(entry.path, 0, sizeof(entry.path)); 221 | vm_file = get_vm_file(vma); 222 | if (vm_file) { 223 | char *path; 224 | memset(path_buf, 0, sizeof(path_buf)); 225 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 226 | if (path > 0) { 227 | strncat(entry.path, path, sizeof(entry.path) - 1); 228 | } 229 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 230 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 231 | } else { 232 | if (vma->vm_start <= mm->brk && 233 | vma->vm_end >= mm->start_brk) { 234 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 235 | } else { 236 | pid_t tid = my_vm_is_stack(task, vma, 1); 237 | if (tid != 0) { 238 | /* 239 | * Thread stack in /proc/PID/task/TID/maps or 240 | * the main process stack. 241 | */ 242 | 243 | /* Thread stack in /proc/PID/maps */ 244 | 245 | sprintf(entry.path, "[stack:%d]", tid); 246 | } 247 | } 248 | 249 | } 250 | 251 | if (is_kernel_buf) { 252 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 253 | } else { 254 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 255 | break; 256 | } 257 | } 258 | copy_pos += sizeof(entry); 259 | success_cnt++; 260 | } 261 | up_read_mmap_lock(mm); 262 | mmput(mm); 263 | 264 | return success_cnt; 265 | } 266 | 267 | 268 | 269 | #endif 270 | 271 | 272 | 273 | 274 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(3,10,84) 275 | /* Check if the vma is being used as a stack by this task */ 276 | static int vm_is_stack_for_task(struct task_struct *t, 277 | struct vm_area_struct *vma) { 278 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 279 | } 280 | 281 | /* 282 | * Check if the vma is being used as a stack. 283 | * If is_group is non-zero, check in the entire thread group or else 284 | * just check in the current task. Returns the pid of the task that 285 | * the vma is stack for. 286 | */ 287 | static pid_t my_vm_is_stack(struct task_struct *task, 288 | struct vm_area_struct *vma, int in_group) { 289 | pid_t ret = 0; 290 | 291 | if (vm_is_stack_for_task(task, vma)) 292 | return task->pid; 293 | 294 | if (in_group) { 295 | struct task_struct *t; 296 | rcu_read_lock(); 297 | if (!pid_alive(task)) 298 | goto done; 299 | 300 | t = task; 301 | do { 302 | if (vm_is_stack_for_task(t, vma)) { 303 | ret = t->pid; 304 | goto done; 305 | } 306 | } while_each_thread(task, t); 307 | done: 308 | rcu_read_unlock(); 309 | } 310 | 311 | return ret; 312 | } 313 | 314 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 315 | 316 | struct task_struct *task; 317 | struct mm_struct *mm; 318 | struct vm_area_struct *vma; 319 | char path_buf[MY_PATH_MAX_LEN]; 320 | int success_cnt = 0; 321 | size_t copy_pos; 322 | size_t end_pos; 323 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 324 | if (!task) { 325 | return -2; 326 | } 327 | 328 | mm = get_task_mm(task); 329 | if (!mm) { 330 | return -3; 331 | } 332 | 333 | 334 | if (is_kernel_buf) { 335 | memset(buf, 0, buf_size); 336 | } 337 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 338 | 339 | 340 | copy_pos = (size_t)buf; 341 | end_pos = (size_t)((size_t)buf + buf_size); 342 | 343 | if (down_read_mmap_lock(mm) != 0) { 344 | mmput(mm); 345 | return -4; 346 | } 347 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 348 | struct map_entry entry; 349 | struct file * vm_file; 350 | if (copy_pos >= end_pos) { 351 | break; 352 | } 353 | entry.start = vma->vm_start; 354 | entry.end = vma->vm_end; 355 | /* We don't show the stack guard page in /proc/maps */ 356 | if (stack_guard_page_start(vma, entry.start)) 357 | entry.start += PAGE_SIZE; 358 | if (stack_guard_page_end(vma, entry.end)) 359 | entry.end -= PAGE_SIZE; 360 | 361 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 362 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 363 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 364 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 365 | memset(entry.path, 0, sizeof(entry.path)); 366 | vm_file = get_vm_file(vma); 367 | if (vm_file) { 368 | char *path; 369 | memset(path_buf, 0, sizeof(path_buf)); 370 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 371 | if (path > 0) { 372 | strncat(entry.path, path, sizeof(entry.path) - 1); 373 | } 374 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 375 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 376 | } else { 377 | if (vma->vm_start <= mm->brk && 378 | vma->vm_end >= mm->start_brk) { 379 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 380 | } else { 381 | pid_t tid = my_vm_is_stack(task, vma, 1); 382 | if (tid != 0) { 383 | /* 384 | * Thread stack in /proc/PID/task/TID/maps or 385 | * the main process stack. 386 | */ 387 | 388 | /* Thread stack in /proc/PID/maps */ 389 | 390 | sprintf(entry.path, "[stack:%d]", tid); 391 | } 392 | } 393 | 394 | } 395 | 396 | 397 | if (is_kernel_buf) { 398 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 399 | } else { 400 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 401 | break; 402 | } 403 | } 404 | copy_pos += sizeof(entry); 405 | success_cnt++; 406 | } 407 | up_read_mmap_lock(mm); 408 | mmput(mm); 409 | 410 | return success_cnt; 411 | } 412 | 413 | #endif 414 | 415 | 416 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(3,18,71) 417 | /* Check if the vma is being used as a stack by this task */ 418 | static int vm_is_stack_for_task(struct task_struct *t, 419 | struct vm_area_struct *vma) { 420 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 421 | } 422 | 423 | 424 | /* 425 | * Check if the vma is being used as a stack. 426 | * If is_group is non-zero, check in the entire thread group or else 427 | * just check in the current task. Returns the task_struct of the task 428 | * that the vma is stack for. Must be called under rcu_read_lock(). 429 | */ 430 | struct task_struct *task_of_stack(struct task_struct *task, 431 | struct vm_area_struct *vma, bool in_group) { 432 | if (vm_is_stack_for_task(task, vma)) 433 | return task; 434 | 435 | if (in_group) { 436 | struct task_struct *t; 437 | 438 | for_each_thread(task, t) { 439 | if (vm_is_stack_for_task(t, vma)) 440 | return t; 441 | } 442 | } 443 | 444 | return NULL; 445 | } 446 | 447 | 448 | static pid_t pid_of_stack(struct task_struct *task, 449 | struct vm_area_struct *vma, bool is_pid) { 450 | pid_t ret = 0; 451 | 452 | rcu_read_lock(); 453 | task = task_of_stack(task, vma, is_pid); 454 | if (task) { 455 | ret = task->pid; 456 | } 457 | rcu_read_unlock(); 458 | 459 | return ret; 460 | } 461 | 462 | 463 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 464 | struct task_struct *task; 465 | struct mm_struct *mm; 466 | struct vm_area_struct *vma; 467 | char path_buf[MY_PATH_MAX_LEN]; 468 | int success_cnt = 0; 469 | size_t copy_pos; 470 | size_t end_pos; 471 | 472 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 473 | if (!task) { 474 | return -2; 475 | } 476 | 477 | mm = get_task_mm(task); 478 | if (!mm) { 479 | return -3; 480 | } 481 | 482 | if (is_kernel_buf) { 483 | memset(buf, 0, buf_size); 484 | } 485 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 486 | 487 | 488 | copy_pos = (size_t)buf; 489 | end_pos = (size_t)((size_t)buf + buf_size); 490 | 491 | if (down_read_mmap_lock(mm) != 0) { 492 | mmput(mm); 493 | return -4; 494 | } 495 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 496 | struct map_entry entry; 497 | struct file * vm_file; 498 | if (copy_pos >= end_pos) { 499 | break; 500 | } 501 | entry.start = vma->vm_start; 502 | entry.end = vma->vm_end; 503 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 504 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 505 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 506 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 507 | memset(entry.path, 0, sizeof(entry.path)); 508 | vm_file = get_vm_file(vma); 509 | if (vm_file) { 510 | char *path; 511 | memset(path_buf, 0, sizeof(path_buf)); 512 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 513 | if (path > 0) { 514 | strncat(entry.path, path, sizeof(entry.path) - 1); 515 | } 516 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 517 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 518 | } else { 519 | if (vma->vm_start <= mm->brk && 520 | vma->vm_end >= mm->start_brk) { 521 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 522 | } else { 523 | pid_t tid = pid_of_stack(task, vma, 1); 524 | if (tid != 0) { 525 | /* 526 | * Thread stack in /proc/PID/task/TID/maps or 527 | * the main process stack. 528 | */ 529 | 530 | /* Thread stack in /proc/PID/maps */ 531 | if (vma->vm_start <= mm->start_stack && 532 | vma->vm_end >= mm->start_stack) { 533 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 534 | } else { 535 | snprintf(entry.path, sizeof(entry.path), "[stack:%d]", tid); 536 | } 537 | } 538 | 539 | } 540 | 541 | } 542 | if (is_kernel_buf) { 543 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 544 | } else { 545 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 546 | break; 547 | } 548 | } 549 | copy_pos += sizeof(entry); 550 | success_cnt++; 551 | } 552 | up_read_mmap_lock(mm); 553 | mmput(mm); 554 | 555 | return success_cnt; 556 | } 557 | 558 | #endif 559 | 560 | 561 | 562 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(3,18,140) 563 | /* 564 | * Indicate if the VMA is a stack for the given task; for 565 | * /proc/PID/maps that is the stack of the main task. 566 | */ 567 | static int is_stack(struct vm_area_struct *vma) { 568 | /* 569 | * We make no effort to guess what a given thread considers to be 570 | * its "stack". It's not even well-defined for programs written 571 | * languages like Go. 572 | */ 573 | return vma->vm_start <= vma->vm_mm->start_stack && 574 | vma->vm_end >= vma->vm_mm->start_stack; 575 | } 576 | 577 | 578 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 579 | struct task_struct *task; 580 | struct mm_struct *mm; 581 | struct vm_area_struct *vma; 582 | char path_buf[MY_PATH_MAX_LEN]; 583 | int success_cnt = 0; 584 | size_t copy_pos; 585 | size_t end_pos; 586 | 587 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 588 | if (!task) { 589 | return -2; 590 | } 591 | 592 | mm = get_task_mm(task); 593 | 594 | if (!mm) { 595 | return -3; 596 | } 597 | 598 | if (is_kernel_buf) { 599 | memset(buf, 0, buf_size); 600 | } 601 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 602 | 603 | 604 | copy_pos = (size_t)buf; 605 | end_pos = (size_t)((size_t)buf + buf_size); 606 | 607 | if (down_read_mmap_lock(mm) != 0) { 608 | mmput(mm); 609 | return -4; 610 | } 611 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 612 | struct map_entry entry; 613 | struct file * vm_file; 614 | if (copy_pos >= end_pos) { 615 | break; 616 | } 617 | entry.start = vma->vm_start; 618 | entry.end = vma->vm_end; 619 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 620 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 621 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 622 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 623 | memset(entry.path, 0, sizeof(entry.path)); 624 | vm_file = get_vm_file(vma); 625 | if (vm_file) { 626 | char *path; 627 | memset(path_buf, 0, sizeof(path_buf)); 628 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 629 | if (path > 0) { 630 | strncat(entry.path, path, sizeof(entry.path) - 1); 631 | } 632 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 633 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 634 | } else { 635 | if (vma->vm_start <= mm->brk && 636 | vma->vm_end >= mm->start_brk) { 637 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 638 | } else { 639 | if (is_stack(vma)) { 640 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 641 | } 642 | } 643 | 644 | } 645 | 646 | if (is_kernel_buf) { 647 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 648 | } else { 649 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 650 | break; 651 | } 652 | } 653 | copy_pos += sizeof(entry); 654 | success_cnt++; 655 | } 656 | up_read_mmap_lock(mm); 657 | mmput(mm); 658 | 659 | return success_cnt; 660 | } 661 | 662 | 663 | #endif 664 | 665 | 666 | 667 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,4,21) 668 | /* Check if the vma is being used as a stack by this task */ 669 | int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t) { 670 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 671 | } 672 | 673 | /* 674 | * Indicate if the VMA is a stack for the given task; for 675 | * /proc/PID/maps that is the stack of the main task. 676 | */ 677 | static int is_stack(struct task_struct *task, 678 | struct vm_area_struct *vma, int is_pid) { 679 | int stack = 0; 680 | 681 | if (is_pid) { 682 | stack = vma->vm_start <= vma->vm_mm->start_stack && 683 | vma->vm_end >= vma->vm_mm->start_stack; 684 | } else { 685 | rcu_read_lock(); 686 | stack = vma_is_stack_for_task(vma, task); 687 | rcu_read_unlock(); 688 | } 689 | return stack; 690 | } 691 | 692 | 693 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 694 | struct task_struct *task; 695 | struct mm_struct *mm; 696 | struct vm_area_struct *vma; 697 | char path_buf[MY_PATH_MAX_LEN]; 698 | int success_cnt = 0; 699 | size_t copy_pos; 700 | size_t end_pos; 701 | 702 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 703 | if (!task) { 704 | return -2; 705 | } 706 | 707 | mm = get_task_mm(task); 708 | 709 | if (!mm) { 710 | return -3; 711 | } 712 | 713 | if (is_kernel_buf) { 714 | memset(buf, 0, buf_size); 715 | } 716 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 717 | 718 | copy_pos = (size_t)buf; 719 | end_pos = (size_t)((size_t)buf + buf_size); 720 | 721 | if (down_read_mmap_lock(mm) != 0) { 722 | mmput(mm); 723 | return -4; 724 | } 725 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 726 | struct map_entry entry; 727 | struct file * vm_file; 728 | 729 | if (copy_pos >= end_pos) { 730 | break; 731 | } 732 | entry.start = vma->vm_start; 733 | entry.end = vma->vm_end; 734 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 735 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 736 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 737 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 738 | memset(entry.path, 0, sizeof(entry.path)); 739 | vm_file = get_vm_file(vma); 740 | if (vm_file) { 741 | char *path; 742 | memset(path_buf, 0, sizeof(path_buf)); 743 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 744 | if (path > 0) { 745 | strncat(entry.path, path, sizeof(entry.path) - 1); 746 | } 747 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 748 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 749 | } else { 750 | if (vma->vm_start <= mm->brk && 751 | vma->vm_end >= mm->start_brk) { 752 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 753 | } else { 754 | pid_t tid = is_stack(task, vma, 1); 755 | if (tid != 0) { 756 | /* 757 | * Thread stack in /proc/PID/task/TID/maps or 758 | * the main process stack. 759 | */ 760 | 761 | /* Thread stack in /proc/PID/maps */ 762 | sprintf(entry.path, "[stack:%d]", tid); 763 | } 764 | 765 | } 766 | 767 | } 768 | if (is_kernel_buf) { 769 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 770 | } else { 771 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 772 | break; 773 | } 774 | } 775 | copy_pos += sizeof(entry); 776 | success_cnt++; 777 | } 778 | up_read_mmap_lock(mm); 779 | mmput(mm); 780 | 781 | return success_cnt; 782 | } 783 | 784 | 785 | 786 | #endif 787 | 788 | 789 | 790 | 791 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,4,78) 792 | /* Check if the vma is being used as a stack by this task */ 793 | int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t) { 794 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 795 | } 796 | 797 | /* 798 | * Indicate if the VMA is a stack for the given task; for 799 | * /proc/PID/maps that is the stack of the main task. 800 | */ 801 | static int is_stack(struct task_struct *task, 802 | struct vm_area_struct *vma, int is_pid) { 803 | int stack = 0; 804 | 805 | if (is_pid) { 806 | stack = vma->vm_start <= vma->vm_mm->start_stack && 807 | vma->vm_end >= vma->vm_mm->start_stack; 808 | } else { 809 | rcu_read_lock(); 810 | stack = vma_is_stack_for_task(vma, task); 811 | rcu_read_unlock(); 812 | } 813 | return stack; 814 | } 815 | 816 | 817 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 818 | struct task_struct *task; 819 | struct mm_struct *mm; 820 | struct vm_area_struct *vma; 821 | char path_buf[MY_PATH_MAX_LEN]; 822 | int success_cnt = 0; 823 | size_t copy_pos; 824 | size_t end_pos; 825 | 826 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 827 | if (!task) { 828 | return -2; 829 | } 830 | 831 | mm = get_task_mm(task); 832 | 833 | if (!mm) { 834 | return -3; 835 | } 836 | 837 | 838 | if (is_kernel_buf) { 839 | memset(buf, 0, buf_size); 840 | } 841 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 842 | 843 | 844 | copy_pos = (size_t)buf; 845 | end_pos = (size_t)((size_t)buf + buf_size); 846 | 847 | if (down_read_mmap_lock(mm) != 0) { 848 | mmput(mm); 849 | return -4; 850 | } 851 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 852 | struct map_entry entry; 853 | struct file * vm_file; 854 | if (copy_pos >= end_pos) { 855 | break; 856 | } 857 | entry.start = vma->vm_start; 858 | entry.end = vma->vm_end; 859 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 860 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 861 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 862 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 863 | memset(entry.path, 0, sizeof(entry.path)); 864 | vm_file = get_vm_file(vma); 865 | if (vm_file) { 866 | char *path; 867 | memset(path_buf, 0, sizeof(path_buf)); 868 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 869 | if (path > 0) { 870 | strncat(entry.path, path, sizeof(entry.path) - 1); 871 | } 872 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 873 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 874 | } else { 875 | if (vma->vm_start <= mm->brk && 876 | vma->vm_end >= mm->start_brk) { 877 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 878 | } else { 879 | pid_t tid = is_stack(task, vma, 1); 880 | if (tid != 0) { 881 | /* 882 | * Thread stack in /proc/PID/task/TID/maps or 883 | * the main process stack. 884 | */ 885 | 886 | /* Thread stack in /proc/PID/maps */ 887 | sprintf(entry.path, "[stack:%d]", tid); 888 | } 889 | 890 | } 891 | 892 | } 893 | if (is_kernel_buf) { 894 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 895 | } else { 896 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 897 | break; 898 | } 899 | } 900 | copy_pos += sizeof(entry); 901 | success_cnt++; 902 | } 903 | up_read_mmap_lock(mm); 904 | mmput(mm); 905 | 906 | return success_cnt; 907 | } 908 | 909 | 910 | #endif 911 | 912 | 913 | 914 | 915 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,4,153) 916 | /* 917 | * Indicate if the VMA is a stack for the given task; for 918 | * /proc/PID/maps that is the stack of the main task. 919 | */ 920 | static int is_stack(struct task_struct *task, 921 | struct vm_area_struct *vma) { 922 | /* 923 | * We make no effort to guess what a given thread considers to be 924 | * its "stack". It's not even well-defined for programs written 925 | * languages like Go. 926 | */ 927 | return vma->vm_start <= vma->vm_mm->start_stack && 928 | vma->vm_end >= vma->vm_mm->start_stack; 929 | } 930 | 931 | 932 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 933 | struct task_struct *task; 934 | struct mm_struct *mm; 935 | struct vm_area_struct *vma; 936 | char path_buf[MY_PATH_MAX_LEN]; 937 | int success_cnt = 0; 938 | size_t copy_pos; 939 | size_t end_pos; 940 | 941 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 942 | if (!task) { 943 | return -2; 944 | } 945 | 946 | mm = get_task_mm(task); 947 | 948 | if (!mm) { 949 | return -3; 950 | } 951 | 952 | if (is_kernel_buf) { 953 | memset(buf, 0, buf_size); 954 | } 955 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 956 | 957 | copy_pos = (size_t)buf; 958 | end_pos = (size_t)((size_t)buf + buf_size); 959 | 960 | if (down_read_mmap_lock(mm) != 0) { 961 | mmput(mm); 962 | return -4; 963 | } 964 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 965 | struct map_entry entry; 966 | struct file * vm_file; 967 | if (copy_pos >= end_pos) { 968 | break; 969 | } 970 | entry.start = vma->vm_start; 971 | entry.end = vma->vm_end; 972 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 973 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 974 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 975 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 976 | memset(entry.path, 0, sizeof(entry.path)); 977 | vm_file = get_vm_file(vma); 978 | if (vm_file) { 979 | char *path; 980 | memset(path_buf, 0, sizeof(path_buf)); 981 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 982 | if (path > 0) { 983 | strncat(entry.path, path, sizeof(entry.path) - 1); 984 | } 985 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 986 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 987 | } else { 988 | if (vma->vm_start <= mm->brk && 989 | vma->vm_end >= mm->start_brk) { 990 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 991 | } else { 992 | pid_t tid = is_stack(task, vma); 993 | if (tid != 0) { 994 | /* 995 | * Thread stack in /proc/PID/task/TID/maps or 996 | * the main process stack. 997 | */ 998 | 999 | /* Thread stack in /proc/PID/maps */ 1000 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1001 | } 1002 | 1003 | } 1004 | 1005 | } 1006 | if (is_kernel_buf) { 1007 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1008 | } else { 1009 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1010 | break; 1011 | } 1012 | } 1013 | copy_pos += sizeof(entry); 1014 | success_cnt++; 1015 | } 1016 | up_read_mmap_lock(mm); 1017 | mmput(mm); 1018 | 1019 | return success_cnt; 1020 | } 1021 | 1022 | #endif 1023 | 1024 | 1025 | 1026 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,4,192) 1027 | /* 1028 | * Indicate if the VMA is a stack for the given task; for 1029 | * /proc/PID/maps that is the stack of the main task. 1030 | */ 1031 | static int is_stack(struct task_struct *task, 1032 | struct vm_area_struct *vma) { 1033 | /* 1034 | * We make no effort to guess what a given thread considers to be 1035 | * its "stack". It's not even well-defined for programs written 1036 | * languages like Go. 1037 | */ 1038 | return vma->vm_start <= vma->vm_mm->start_stack && 1039 | vma->vm_end >= vma->vm_mm->start_stack; 1040 | } 1041 | 1042 | 1043 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1044 | struct task_struct *task; 1045 | struct mm_struct *mm; 1046 | struct vm_area_struct *vma; 1047 | char path_buf[MY_PATH_MAX_LEN]; 1048 | int success_cnt = 0; 1049 | size_t copy_pos; 1050 | size_t end_pos; 1051 | 1052 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1053 | if (!task) { 1054 | return -2; 1055 | } 1056 | 1057 | mm = get_task_mm(task); 1058 | 1059 | if (!mm) { 1060 | return -3; 1061 | } 1062 | 1063 | if (is_kernel_buf) { 1064 | memset(buf, 0, buf_size); 1065 | } 1066 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1067 | 1068 | 1069 | copy_pos = (size_t)buf; 1070 | end_pos = (size_t)((size_t)buf + buf_size); 1071 | 1072 | if (down_read_mmap_lock(mm) != 0) { 1073 | mmput(mm); 1074 | return -4; 1075 | } 1076 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1077 | struct map_entry entry; 1078 | struct file * vm_file; 1079 | if (copy_pos >= end_pos) { 1080 | break; 1081 | } 1082 | entry.start = vma->vm_start; 1083 | entry.end = vma->vm_end; 1084 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1085 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1086 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1087 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1088 | memset(entry.path, 0, sizeof(entry.path)); 1089 | vm_file = get_vm_file(vma); 1090 | if (vm_file) { 1091 | char *path; 1092 | memset(path_buf, 0, sizeof(path_buf)); 1093 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1094 | if (path > 0) { 1095 | strncat(entry.path, path, sizeof(entry.path) - 1); 1096 | } 1097 | 1098 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1099 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1100 | } else { 1101 | if (vma->vm_start <= mm->brk && 1102 | vma->vm_end >= mm->start_brk) { 1103 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1104 | } else { 1105 | pid_t tid = is_stack(task, vma); 1106 | if (tid != 0) { 1107 | /* 1108 | * Thread stack in /proc/PID/task/TID/maps or 1109 | * the main process stack. 1110 | */ 1111 | 1112 | /* Thread stack in /proc/PID/maps */ 1113 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1114 | } 1115 | 1116 | } 1117 | 1118 | } 1119 | if (is_kernel_buf) { 1120 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1121 | } else { 1122 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1123 | break; 1124 | } 1125 | } 1126 | copy_pos += sizeof(entry); 1127 | success_cnt++; 1128 | } 1129 | up_read_mmap_lock(mm); 1130 | mmput(mm); 1131 | 1132 | return success_cnt; 1133 | } 1134 | 1135 | 1136 | #endif 1137 | 1138 | 1139 | 1140 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,9,112) 1141 | /* 1142 | * Indicate if the VMA is a stack for the given task; for 1143 | * /proc/PID/maps that is the stack of the main task. 1144 | */ 1145 | static int is_stack(struct vm_area_struct *vma) { 1146 | /* 1147 | * We make no effort to guess what a given thread considers to be 1148 | * its "stack". It's not even well-defined for programs written 1149 | * languages like Go. 1150 | */ 1151 | return vma->vm_start <= vma->vm_mm->start_stack && 1152 | vma->vm_end >= vma->vm_mm->start_stack; 1153 | } 1154 | 1155 | 1156 | 1157 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1158 | struct task_struct *task; 1159 | struct mm_struct *mm; 1160 | struct vm_area_struct *vma; 1161 | char path_buf[MY_PATH_MAX_LEN]; 1162 | int success_cnt = 0; 1163 | size_t copy_pos; 1164 | size_t end_pos; 1165 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1166 | if (!task) { 1167 | return -2; 1168 | } 1169 | 1170 | mm = get_task_mm(task); 1171 | 1172 | if (!mm) { 1173 | return -3; 1174 | } 1175 | 1176 | if (is_kernel_buf) { 1177 | memset(buf, 0, buf_size); 1178 | } 1179 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1180 | 1181 | copy_pos = (size_t)buf; 1182 | end_pos = (size_t)((size_t)buf + buf_size); 1183 | 1184 | if (down_read_mmap_lock(mm) != 0) { 1185 | mmput(mm); 1186 | return -4; 1187 | } 1188 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1189 | struct map_entry entry; 1190 | struct file * vm_file; 1191 | if (copy_pos >= end_pos) { 1192 | break; 1193 | } 1194 | entry.start = vma->vm_start; 1195 | entry.end = vma->vm_end; 1196 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1197 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1198 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1199 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1200 | memset(entry.path, 0, sizeof(entry.path)); 1201 | vm_file = get_vm_file(vma); 1202 | if (vm_file) { 1203 | char *path; 1204 | memset(path_buf, 0, sizeof(path_buf)); 1205 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1206 | if (path > 0) { 1207 | strncat(entry.path, path, sizeof(entry.path) - 1); 1208 | } 1209 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1210 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1211 | } else { 1212 | if (vma->vm_start <= mm->brk && 1213 | vma->vm_end >= mm->start_brk) { 1214 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1215 | } else { 1216 | if (is_stack(vma)) { 1217 | /* 1218 | * Thread stack in /proc/PID/task/TID/maps or 1219 | * the main process stack. 1220 | */ 1221 | 1222 | /* Thread stack in /proc/PID/maps */ 1223 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1224 | } 1225 | 1226 | } 1227 | 1228 | } 1229 | if (is_kernel_buf) { 1230 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1231 | } else { 1232 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1233 | break; 1234 | } 1235 | } 1236 | copy_pos += sizeof(entry); 1237 | success_cnt++; 1238 | } 1239 | up_read_mmap_lock(mm); 1240 | mmput(mm); 1241 | 1242 | return success_cnt; 1243 | } 1244 | 1245 | #endif 1246 | 1247 | 1248 | 1249 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,9,186) 1250 | /* 1251 | * Indicate if the VMA is a stack for the given task; for 1252 | * /proc/PID/maps that is the stack of the main task. 1253 | */ 1254 | static int is_stack(struct vm_area_struct *vma) { 1255 | /* 1256 | * We make no effort to guess what a given thread considers to be 1257 | * its "stack". It's not even well-defined for programs written 1258 | * languages like Go. 1259 | */ 1260 | return vma->vm_start <= vma->vm_mm->start_stack && 1261 | vma->vm_end >= vma->vm_mm->start_stack; 1262 | } 1263 | 1264 | 1265 | 1266 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1267 | struct task_struct *task; 1268 | struct mm_struct *mm; 1269 | struct vm_area_struct *vma; 1270 | char path_buf[MY_PATH_MAX_LEN]; 1271 | int success_cnt = 0; 1272 | size_t copy_pos; 1273 | size_t end_pos; 1274 | 1275 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1276 | if (!task) { 1277 | return -2; 1278 | } 1279 | 1280 | mm = get_task_mm(task); 1281 | 1282 | if (!mm) { 1283 | return -3; 1284 | } 1285 | 1286 | if (is_kernel_buf) { 1287 | memset(buf, 0, buf_size); 1288 | } 1289 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1290 | 1291 | 1292 | copy_pos = (size_t)buf; 1293 | end_pos = (size_t)((size_t)buf + buf_size); 1294 | 1295 | if (down_read_mmap_lock(mm) != 0) { 1296 | mmput(mm); 1297 | return -4; 1298 | } 1299 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1300 | struct map_entry entry; 1301 | struct file * vm_file; 1302 | if (copy_pos >= end_pos) { 1303 | break; 1304 | } 1305 | entry.start = vma->vm_start; 1306 | entry.end = vma->vm_end; 1307 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1308 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1309 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1310 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1311 | memset(entry.path, 0, sizeof(entry.path)); 1312 | vm_file = get_vm_file(vma); 1313 | if (vm_file) { 1314 | char *path; 1315 | memset(path_buf, 0, sizeof(path_buf)); 1316 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1317 | if (path > 0) { 1318 | strncat(entry.path, path, sizeof(entry.path) - 1); 1319 | } 1320 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1321 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1322 | } else { 1323 | if (vma->vm_start <= mm->brk && 1324 | vma->vm_end >= mm->start_brk) { 1325 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1326 | } else { 1327 | if (is_stack(vma)) { 1328 | /* 1329 | * Thread stack in /proc/PID/task/TID/maps or 1330 | * the main process stack. 1331 | */ 1332 | 1333 | /* Thread stack in /proc/PID/maps */ 1334 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1335 | } 1336 | 1337 | } 1338 | 1339 | } 1340 | if (is_kernel_buf) { 1341 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1342 | } else { 1343 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1344 | break; 1345 | } 1346 | } 1347 | copy_pos += sizeof(entry); 1348 | success_cnt++; 1349 | } 1350 | up_read_mmap_lock(mm); 1351 | mmput(mm); 1352 | 1353 | return success_cnt; 1354 | } 1355 | 1356 | 1357 | #endif 1358 | 1359 | 1360 | 1361 | 1362 | 1363 | 1364 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,14,83) 1365 | 1366 | 1367 | /* 1368 | * Indicate if the VMA is a stack for the given task; for 1369 | * /proc/PID/maps that is the stack of the main task. 1370 | */ 1371 | static int is_stack(struct vm_area_struct *vma) { 1372 | /* 1373 | * We make no effort to guess what a given thread considers to be 1374 | * its "stack". It's not even well-defined for programs written 1375 | * languages like Go. 1376 | */ 1377 | return vma->vm_start <= vma->vm_mm->start_stack && 1378 | vma->vm_end >= vma->vm_mm->start_stack; 1379 | } 1380 | 1381 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1382 | struct task_struct *task; 1383 | struct mm_struct *mm; 1384 | struct vm_area_struct *vma; 1385 | char path_buf[MY_PATH_MAX_LEN]; 1386 | int success_cnt = 0; 1387 | size_t copy_pos; 1388 | size_t end_pos; 1389 | 1390 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1391 | if (!task) { 1392 | return -2; 1393 | } 1394 | 1395 | mm = get_task_mm(task); 1396 | 1397 | if (!mm) { 1398 | return -3; 1399 | } 1400 | if (is_kernel_buf) { 1401 | memset(buf, 0, buf_size); 1402 | } 1403 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1404 | 1405 | copy_pos = (size_t)buf; 1406 | end_pos = (size_t)((size_t)buf + buf_size); 1407 | 1408 | if (down_read_mmap_lock(mm) != 0) { 1409 | mmput(mm); 1410 | return -4; 1411 | } 1412 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1413 | struct map_entry entry; 1414 | struct file * vm_file; 1415 | if (copy_pos >= end_pos) { 1416 | break; 1417 | } 1418 | entry.start = vma->vm_start; 1419 | entry.end = vma->vm_end; 1420 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1421 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1422 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1423 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1424 | memset(entry.path, 0, sizeof(entry.path)); 1425 | vm_file = get_vm_file(vma); 1426 | if (vm_file) { 1427 | char *path; 1428 | memset(path_buf, 0, sizeof(path_buf)); 1429 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1430 | if (path > 0) { 1431 | strncat(entry.path, path, sizeof(entry.path) - 1); 1432 | } 1433 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1434 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1435 | } else { 1436 | if (vma->vm_start <= mm->brk && 1437 | vma->vm_end >= mm->start_brk) { 1438 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1439 | } else { 1440 | if (is_stack(vma)) { 1441 | /* 1442 | * Thread stack in /proc/PID/task/TID/maps or 1443 | * the main process stack. 1444 | */ 1445 | 1446 | /* Thread stack in /proc/PID/maps */ 1447 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1448 | } 1449 | 1450 | } 1451 | 1452 | } 1453 | if (is_kernel_buf) { 1454 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1455 | } else { 1456 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1457 | break; 1458 | } 1459 | } 1460 | copy_pos += sizeof(entry); 1461 | success_cnt++; 1462 | } 1463 | up_read_mmap_lock(mm); 1464 | mmput(mm); 1465 | 1466 | return success_cnt; 1467 | } 1468 | 1469 | 1470 | #endif 1471 | 1472 | 1473 | 1474 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,14,117) 1475 | 1476 | 1477 | /* 1478 | * Indicate if the VMA is a stack for the given task; for 1479 | * /proc/PID/maps that is the stack of the main task. 1480 | */ 1481 | static int is_stack(struct vm_area_struct *vma) { 1482 | /* 1483 | * We make no effort to guess what a given thread considers to be 1484 | * its "stack". It's not even well-defined for programs written 1485 | * languages like Go. 1486 | */ 1487 | return vma->vm_start <= vma->vm_mm->start_stack && 1488 | vma->vm_end >= vma->vm_mm->start_stack; 1489 | } 1490 | 1491 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1492 | struct task_struct *task; 1493 | struct mm_struct *mm; 1494 | struct vm_area_struct *vma; 1495 | char path_buf[MY_PATH_MAX_LEN]; 1496 | int success_cnt = 0; 1497 | size_t copy_pos; 1498 | size_t end_pos; 1499 | 1500 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1501 | if (!task) { 1502 | return -2; 1503 | } 1504 | 1505 | mm = get_task_mm(task); 1506 | 1507 | if (!mm) { 1508 | return -3; 1509 | } 1510 | 1511 | 1512 | if (is_kernel_buf) { 1513 | memset(buf, 0, buf_size); 1514 | } 1515 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1516 | 1517 | copy_pos = (size_t)buf; 1518 | end_pos = (size_t)((size_t)buf + buf_size); 1519 | 1520 | if (down_read_mmap_lock(mm) != 0) { 1521 | mmput(mm); 1522 | return -4; 1523 | } 1524 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1525 | struct map_entry entry; 1526 | struct file * vm_file; 1527 | if (copy_pos >= end_pos) { 1528 | break; 1529 | } 1530 | entry.start = vma->vm_start; 1531 | entry.end = vma->vm_end; 1532 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1533 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1534 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1535 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1536 | memset(entry.path, 0, sizeof(entry.path)); 1537 | vm_file = get_vm_file(vma); 1538 | if (vm_file) { 1539 | char *path; 1540 | memset(path_buf, 0, sizeof(path_buf)); 1541 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1542 | if (path > 0) { 1543 | strncat(entry.path, path, sizeof(entry.path) - 1); 1544 | } 1545 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1546 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1547 | } else { 1548 | if (vma->vm_start <= mm->brk && 1549 | vma->vm_end >= mm->start_brk) { 1550 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1551 | } else { 1552 | if (is_stack(vma)) { 1553 | /* 1554 | * Thread stack in /proc/PID/task/TID/maps or 1555 | * the main process stack. 1556 | */ 1557 | 1558 | /* Thread stack in /proc/PID/maps */ 1559 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1560 | } 1561 | 1562 | } 1563 | 1564 | } 1565 | 1566 | if (is_kernel_buf) { 1567 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1568 | } else { 1569 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1570 | break; 1571 | } 1572 | } 1573 | copy_pos += sizeof(entry); 1574 | success_cnt++; 1575 | } 1576 | up_read_mmap_lock(mm); 1577 | mmput(mm); 1578 | 1579 | return success_cnt; 1580 | } 1581 | 1582 | 1583 | 1584 | #endif 1585 | 1586 | 1587 | 1588 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,14,141) 1589 | 1590 | 1591 | /* 1592 | * Indicate if the VMA is a stack for the given task; for 1593 | * /proc/PID/maps that is the stack of the main task. 1594 | */ 1595 | static int is_stack(struct vm_area_struct *vma) { 1596 | /* 1597 | * We make no effort to guess what a given thread considers to be 1598 | * its "stack". It's not even well-defined for programs written 1599 | * languages like Go. 1600 | */ 1601 | return vma->vm_start <= vma->vm_mm->start_stack && 1602 | vma->vm_end >= vma->vm_mm->start_stack; 1603 | } 1604 | 1605 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1606 | struct task_struct *task; 1607 | struct mm_struct *mm; 1608 | struct vm_area_struct *vma; 1609 | char path_buf[MY_PATH_MAX_LEN]; 1610 | int success_cnt = 0; 1611 | size_t copy_pos; 1612 | size_t end_pos; 1613 | 1614 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1615 | if (!task) { 1616 | return -2; 1617 | } 1618 | 1619 | mm = get_task_mm(task); 1620 | 1621 | if (!mm) { 1622 | return -3; 1623 | } 1624 | if (is_kernel_buf) { 1625 | memset(buf, 0, buf_size); 1626 | } 1627 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1628 | 1629 | copy_pos = (size_t)buf; 1630 | end_pos = (size_t)((size_t)buf + buf_size); 1631 | 1632 | if (down_read_mmap_lock(mm) != 0) { 1633 | mmput(mm); 1634 | return -4; 1635 | } 1636 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1637 | struct map_entry entry; 1638 | struct file * vm_file; 1639 | if (copy_pos >= end_pos) { 1640 | break; 1641 | } 1642 | entry.start = vma->vm_start; 1643 | entry.end = vma->vm_end; 1644 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1645 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1646 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1647 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1648 | memset(entry.path, 0, sizeof(entry.path)); 1649 | vm_file = get_vm_file(vma); 1650 | if (vm_file) { 1651 | char *path; 1652 | memset(path_buf, 0, sizeof(path_buf)); 1653 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1654 | if (path > 0) { 1655 | strncat(entry.path, path, sizeof(entry.path) - 1); 1656 | } 1657 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1658 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1659 | } else { 1660 | if (vma->vm_start <= mm->brk && 1661 | vma->vm_end >= mm->start_brk) { 1662 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1663 | } else { 1664 | if (is_stack(vma)) { 1665 | /* 1666 | * Thread stack in /proc/PID/task/TID/maps or 1667 | * the main process stack. 1668 | */ 1669 | 1670 | /* Thread stack in /proc/PID/maps */ 1671 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1672 | } 1673 | 1674 | } 1675 | 1676 | } 1677 | if (is_kernel_buf) { 1678 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1679 | } else { 1680 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1681 | break; 1682 | } 1683 | } 1684 | copy_pos += sizeof(entry); 1685 | success_cnt++; 1686 | } 1687 | up_read_mmap_lock(mm); 1688 | mmput(mm); 1689 | 1690 | return success_cnt; 1691 | } 1692 | 1693 | 1694 | #endif 1695 | 1696 | 1697 | 1698 | 1699 | 1700 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,19,81) 1701 | 1702 | 1703 | /* 1704 | * Indicate if the VMA is a stack for the given task; for 1705 | * /proc/PID/maps that is the stack of the main task. 1706 | */ 1707 | static int is_stack(struct vm_area_struct *vma) { 1708 | /* 1709 | * We make no effort to guess what a given thread considers to be 1710 | * its "stack". It's not even well-defined for programs written 1711 | * languages like Go. 1712 | */ 1713 | return vma->vm_start <= vma->vm_mm->start_stack && 1714 | vma->vm_end >= vma->vm_mm->start_stack; 1715 | } 1716 | 1717 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1718 | struct task_struct *task; 1719 | struct mm_struct *mm; 1720 | struct vm_area_struct *vma; 1721 | char path_buf[MY_PATH_MAX_LEN]; 1722 | int success_cnt = 0; 1723 | size_t copy_pos; 1724 | size_t end_pos; 1725 | 1726 | 1727 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1728 | if (!task) { 1729 | return -2; 1730 | } 1731 | 1732 | mm = get_task_mm(task); 1733 | 1734 | if (!mm) { 1735 | return -3; 1736 | } 1737 | if (is_kernel_buf) { 1738 | memset(buf, 0, buf_size); 1739 | } 1740 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1741 | 1742 | copy_pos = (size_t)buf; 1743 | end_pos = (size_t)((size_t)buf + buf_size); 1744 | 1745 | if (down_read_mmap_lock(mm) != 0) { 1746 | mmput(mm); 1747 | return -4; 1748 | } 1749 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1750 | struct map_entry entry; 1751 | struct file * vm_file; 1752 | 1753 | if (copy_pos >= end_pos) { 1754 | break; 1755 | } 1756 | entry.start = vma->vm_start; 1757 | entry.end = vma->vm_end; 1758 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1759 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1760 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1761 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1762 | memset(entry.path, 0, sizeof(entry.path)); 1763 | vm_file = get_vm_file(vma); 1764 | if (vm_file) { 1765 | char *path; 1766 | memset(path_buf, 0, sizeof(path_buf)); 1767 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1768 | if (path > 0) { 1769 | strncat(entry.path, path, sizeof(entry.path) - 1); 1770 | } 1771 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1772 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1773 | } else { 1774 | if (vma->vm_start <= mm->brk && 1775 | vma->vm_end >= mm->start_brk) { 1776 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1777 | } else { 1778 | if (is_stack(vma)) { 1779 | /* 1780 | * Thread stack in /proc/PID/task/TID/maps or 1781 | * the main process stack. 1782 | */ 1783 | 1784 | /* Thread stack in /proc/PID/maps */ 1785 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1786 | } 1787 | 1788 | } 1789 | 1790 | } 1791 | if (is_kernel_buf) { 1792 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1793 | } else { 1794 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1795 | break; 1796 | } 1797 | } 1798 | copy_pos += sizeof(entry); 1799 | success_cnt++; 1800 | } 1801 | up_read_mmap_lock(mm); 1802 | mmput(mm); 1803 | 1804 | return success_cnt; 1805 | } 1806 | #endif 1807 | 1808 | 1809 | 1810 | 1811 | 1812 | 1813 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(4,19,113) 1814 | 1815 | /* 1816 | * Indicate if the VMA is a stack for the given task; for 1817 | * /proc/PID/maps that is the stack of the main task. 1818 | */ 1819 | static int is_stack(struct vm_area_struct *vma) { 1820 | /* 1821 | * We make no effort to guess what a given thread considers to be 1822 | * its "stack". It's not even well-defined for programs written 1823 | * languages like Go. 1824 | */ 1825 | return vma->vm_start <= vma->vm_mm->start_stack && 1826 | vma->vm_end >= vma->vm_mm->start_stack; 1827 | } 1828 | 1829 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1830 | struct task_struct *task; 1831 | struct mm_struct *mm; 1832 | struct vm_area_struct *vma; 1833 | char path_buf[MY_PATH_MAX_LEN]; 1834 | int success_cnt = 0; 1835 | size_t copy_pos; 1836 | size_t end_pos; 1837 | 1838 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1839 | if (!task) { 1840 | return -2; 1841 | } 1842 | 1843 | mm = get_task_mm(task); 1844 | 1845 | if (!mm) { 1846 | return -3; 1847 | } 1848 | if (is_kernel_buf) { 1849 | memset(buf, 0, buf_size); 1850 | } 1851 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1852 | 1853 | copy_pos = (size_t)buf; 1854 | end_pos = (size_t)((size_t)buf + buf_size); 1855 | 1856 | if (down_read_mmap_lock(mm) != 0) { 1857 | mmput(mm); 1858 | return -4; 1859 | } 1860 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1861 | struct map_entry entry; 1862 | struct file * vm_file; 1863 | if (copy_pos >= end_pos) { 1864 | break; 1865 | } 1866 | entry.start = vma->vm_start; 1867 | entry.end = vma->vm_end; 1868 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1869 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1870 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1871 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1872 | memset(entry.path, 0, sizeof(entry.path)); 1873 | vm_file = get_vm_file(vma); 1874 | if (vm_file) { 1875 | char *path; 1876 | memset(path_buf, 0, sizeof(path_buf)); 1877 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1878 | if (path > 0) { 1879 | strncat(entry.path, path, sizeof(entry.path) - 1); 1880 | } 1881 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1882 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1883 | } else { 1884 | if (vma->vm_start <= mm->brk && 1885 | vma->vm_end >= mm->start_brk) { 1886 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1887 | } else { 1888 | if (is_stack(vma)) { 1889 | /* 1890 | * Thread stack in /proc/PID/task/TID/maps or 1891 | * the main process stack. 1892 | */ 1893 | 1894 | /* Thread stack in /proc/PID/maps */ 1895 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 1896 | } 1897 | 1898 | } 1899 | 1900 | } 1901 | if (is_kernel_buf) { 1902 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 1903 | } else { 1904 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 1905 | break; 1906 | } 1907 | } 1908 | copy_pos += sizeof(entry); 1909 | success_cnt++; 1910 | } 1911 | up_read_mmap_lock(mm); 1912 | mmput(mm); 1913 | 1914 | return success_cnt; 1915 | } 1916 | #endif 1917 | 1918 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(5,4,61) 1919 | 1920 | /* 1921 | * Indicate if the VMA is a stack for the given task; for 1922 | * /proc/PID/maps that is the stack of the main task. 1923 | */ 1924 | static int is_stack(struct vm_area_struct *vma) { 1925 | /* 1926 | * We make no effort to guess what a given thread considers to be 1927 | * its "stack". It's not even well-defined for programs written 1928 | * languages like Go. 1929 | */ 1930 | return vma->vm_start <= vma->vm_mm->start_stack && 1931 | vma->vm_end >= vma->vm_mm->start_stack; 1932 | } 1933 | 1934 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 1935 | struct task_struct *task; 1936 | struct mm_struct *mm; 1937 | struct vm_area_struct *vma; 1938 | char path_buf[MY_PATH_MAX_LEN]; 1939 | int success_cnt = 0; 1940 | size_t copy_pos; 1941 | size_t end_pos; 1942 | 1943 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 1944 | if (!task) { 1945 | return -2; 1946 | } 1947 | 1948 | mm = get_task_mm(task); 1949 | 1950 | if (!mm) { 1951 | return -3; 1952 | } 1953 | if (is_kernel_buf) { 1954 | memset(buf, 0, buf_size); 1955 | } 1956 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 1957 | 1958 | copy_pos = (size_t)buf; 1959 | end_pos = (size_t)((size_t)buf + buf_size); 1960 | 1961 | if (down_read_mmap_lock(mm) != 0) { 1962 | mmput(mm); 1963 | return -4; 1964 | } 1965 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 1966 | struct map_entry entry; 1967 | struct file * vm_file; 1968 | if (copy_pos >= end_pos) { 1969 | break; 1970 | } 1971 | entry.start = vma->vm_start; 1972 | entry.end = vma->vm_end; 1973 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 1974 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 1975 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 1976 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 1977 | memset(entry.path, 0, sizeof(entry.path)); 1978 | vm_file = get_vm_file(vma); 1979 | if (vm_file) { 1980 | char *path; 1981 | memset(path_buf, 0, sizeof(path_buf)); 1982 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 1983 | if (path > 0) { 1984 | strncat(entry.path, path, sizeof(entry.path) - 1); 1985 | } 1986 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 1987 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 1988 | } else { 1989 | if (vma->vm_start <= mm->brk && 1990 | vma->vm_end >= mm->start_brk) { 1991 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 1992 | } else { 1993 | if (is_stack(vma)) { 1994 | /* 1995 | * Thread stack in /proc/PID/task/TID/maps or 1996 | * the main process stack. 1997 | */ 1998 | 1999 | /* Thread stack in /proc/PID/maps */ 2000 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 2001 | } 2002 | 2003 | } 2004 | 2005 | } 2006 | if (is_kernel_buf) { 2007 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 2008 | } else { 2009 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 2010 | break; 2011 | } 2012 | } 2013 | copy_pos += sizeof(entry); 2014 | success_cnt++; 2015 | } 2016 | up_read_mmap_lock(mm); 2017 | mmput(mm); 2018 | 2019 | return success_cnt; 2020 | } 2021 | #endif 2022 | 2023 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(5,10,43) 2024 | 2025 | /* 2026 | * Indicate if the VMA is a stack for the given task; for 2027 | * /proc/PID/maps that is the stack of the main task. 2028 | */ 2029 | static int is_stack(struct vm_area_struct* vma) { 2030 | /* 2031 | * We make no effort to guess what a given thread considers to be 2032 | * its "stack". It's not even well-defined for programs written 2033 | * languages like Go. 2034 | */ 2035 | return vma->vm_start <= vma->vm_mm->start_stack && 2036 | vma->vm_end >= vma->vm_mm->start_stack; 2037 | } 2038 | 2039 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 2040 | struct task_struct* task; 2041 | struct mm_struct* mm; 2042 | struct vm_area_struct* vma; 2043 | char path_buf[MY_PATH_MAX_LEN]; 2044 | int success_cnt = 0; 2045 | size_t copy_pos; 2046 | size_t end_pos; 2047 | 2048 | 2049 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 2050 | if (!task) { 2051 | return -2; 2052 | } 2053 | 2054 | mm = get_task_mm(task); 2055 | 2056 | if (!mm) { 2057 | return -3; 2058 | } 2059 | if (is_kernel_buf) { 2060 | memset(buf, 0, buf_size); 2061 | } 2062 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 2063 | 2064 | copy_pos = (size_t)buf; 2065 | end_pos = (size_t)((size_t)buf + buf_size); 2066 | 2067 | if (down_read_mmap_lock(mm) != 0) { 2068 | mmput(mm); 2069 | return -4; 2070 | } 2071 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 2072 | struct map_entry entry; 2073 | struct file* vm_file; 2074 | if (copy_pos >= end_pos) { 2075 | break; 2076 | } 2077 | entry.start = vma->vm_start; 2078 | entry.end = vma->vm_end; 2079 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 2080 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 2081 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 2082 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 2083 | memset(entry.path, 0, sizeof(entry.path)); 2084 | vm_file = get_vm_file(vma); 2085 | if (vm_file) { 2086 | char* path; 2087 | memset(path_buf, 0, sizeof(path_buf)); 2088 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 2089 | if (path > 0) { 2090 | strncat(entry.path, path, sizeof(entry.path) - 1); 2091 | } 2092 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 2093 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 2094 | } else { 2095 | if (vma->vm_start <= mm->brk && 2096 | vma->vm_end >= mm->start_brk) { 2097 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 2098 | } else { 2099 | if (is_stack(vma)) { 2100 | /* 2101 | * Thread stack in /proc/PID/task/TID/maps or 2102 | * the main process stack. 2103 | */ 2104 | 2105 | /* Thread stack in /proc/PID/maps */ 2106 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 2107 | } 2108 | 2109 | } 2110 | 2111 | } 2112 | if (is_kernel_buf) { 2113 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 2114 | } else { 2115 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 2116 | break; 2117 | } 2118 | } 2119 | copy_pos += sizeof(entry); 2120 | success_cnt++; 2121 | } 2122 | up_read_mmap_lock(mm); 2123 | mmput(mm); 2124 | 2125 | return success_cnt; 2126 | } 2127 | #endif 2128 | 2129 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(5,15,41) 2130 | 2131 | /* 2132 | * Indicate if the VMA is a stack for the given task; for 2133 | * /proc/PID/maps that is the stack of the main task. 2134 | */ 2135 | static int is_stack(struct vm_area_struct* vma) { 2136 | /* 2137 | * We make no effort to guess what a given thread considers to be 2138 | * its "stack". It's not even well-defined for programs written 2139 | * languages like Go. 2140 | */ 2141 | return vma->vm_start <= vma->vm_mm->start_stack && 2142 | vma->vm_end >= vma->vm_mm->start_stack; 2143 | } 2144 | 2145 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 2146 | struct task_struct* task; 2147 | struct mm_struct* mm; 2148 | struct vm_area_struct* vma; 2149 | char path_buf[MY_PATH_MAX_LEN]; 2150 | int success_cnt = 0; 2151 | size_t copy_pos; 2152 | size_t end_pos; 2153 | 2154 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 2155 | if (!task) { 2156 | return -2; 2157 | } 2158 | 2159 | mm = get_task_mm(task); 2160 | 2161 | if (!mm) { 2162 | return -3; 2163 | } 2164 | if (is_kernel_buf) { 2165 | memset(buf, 0, buf_size); 2166 | } 2167 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 2168 | 2169 | copy_pos = (size_t)buf; 2170 | end_pos = (size_t)((size_t)buf + buf_size); 2171 | 2172 | if (down_read_mmap_lock(mm) != 0) { 2173 | mmput(mm); 2174 | return -4; 2175 | } 2176 | for (vma = mm->mmap; vma; vma = vma->vm_next) { 2177 | struct map_entry entry; 2178 | struct file* vm_file; 2179 | if (copy_pos >= end_pos) { 2180 | break; 2181 | } 2182 | entry.start = vma->vm_start; 2183 | entry.end = vma->vm_end; 2184 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 2185 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 2186 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 2187 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 2188 | memset(entry.path, 0, sizeof(entry.path)); 2189 | vm_file = get_vm_file(vma); 2190 | if (vm_file) { 2191 | char* path; 2192 | memset(path_buf, 0, sizeof(path_buf)); 2193 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 2194 | if (path > 0) { 2195 | strncat(entry.path, path, sizeof(entry.path) - 1); 2196 | } 2197 | } else if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 2198 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 2199 | } else { 2200 | if (vma->vm_start <= mm->brk && 2201 | vma->vm_end >= mm->start_brk) { 2202 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 2203 | } else { 2204 | if (is_stack(vma)) { 2205 | /* 2206 | * Thread stack in /proc/PID/task/TID/maps or 2207 | * the main process stack. 2208 | */ 2209 | 2210 | /* Thread stack in /proc/PID/maps */ 2211 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 2212 | } 2213 | 2214 | } 2215 | 2216 | } 2217 | if (is_kernel_buf) { 2218 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 2219 | } else { 2220 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 2221 | break; 2222 | } 2223 | } 2224 | copy_pos += sizeof(entry); 2225 | success_cnt++; 2226 | } 2227 | up_read_mmap_lock(mm); 2228 | mmput(mm); 2229 | 2230 | return success_cnt; 2231 | } 2232 | #endif 2233 | 2234 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(6,1,75) 2235 | #include 2236 | struct anon_vma_name * __weak anon_vma_name(struct vm_area_struct* vma) { 2237 | return NULL; 2238 | } 2239 | 2240 | /* 2241 | * Indicate if the VMA is a stack for the given task; for 2242 | * /proc/PID/maps that is the stack of the main task. 2243 | */ 2244 | static int is_stack(struct vm_area_struct* vma) { 2245 | /* 2246 | * We make no effort to guess what a given thread considers to be 2247 | * its "stack". It's not even well-defined for programs written 2248 | * languages like Go. 2249 | */ 2250 | return vma->vm_start <= vma->vm_mm->start_stack && 2251 | vma->vm_end >= vma->vm_mm->start_stack; 2252 | } 2253 | 2254 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 2255 | struct task_struct* task; 2256 | struct mm_struct* mm; 2257 | struct vm_area_struct* vma; 2258 | char path_buf[MY_PATH_MAX_LEN] = {0}; 2259 | int success_cnt = 0; 2260 | size_t copy_pos; 2261 | size_t end_pos; 2262 | 2263 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 2264 | if (!task) { 2265 | return -2; 2266 | } 2267 | 2268 | mm = get_task_mm(task); 2269 | 2270 | if (!mm) { 2271 | return -3; 2272 | } 2273 | if (is_kernel_buf) { 2274 | memset(buf, 0, buf_size); 2275 | } 2276 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 2277 | 2278 | copy_pos = (size_t)buf; 2279 | end_pos = (size_t)((size_t)buf + buf_size); 2280 | 2281 | if (down_read_mmap_lock(mm) != 0) { 2282 | mmput(mm); 2283 | return -4; 2284 | } 2285 | 2286 | { 2287 | VMA_ITERATOR(iter, mm, 0); 2288 | for_each_vma(iter, vma) { 2289 | struct map_entry entry; 2290 | struct file* vm_file; 2291 | struct anon_vma_name *anon_name = NULL; 2292 | if (copy_pos >= end_pos) { 2293 | break; 2294 | } 2295 | entry.start = vma->vm_start; 2296 | entry.end = vma->vm_end; 2297 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 2298 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 2299 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 2300 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 2301 | memset(entry.path, 0, sizeof(entry.path)); 2302 | vm_file = get_vm_file(vma); 2303 | if (vm_file) { 2304 | char* path; 2305 | memset(path_buf, 0, sizeof(path_buf)); 2306 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 2307 | if (path > 0) { 2308 | strncat(entry.path, path, sizeof(entry.path) - 1); 2309 | } 2310 | } else if (!vma->vm_mm) { 2311 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 2312 | } else if (vma->vm_start <= mm->brk && 2313 | vma->vm_end >= mm->start_brk) { 2314 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 2315 | } else if (is_stack(vma)) { 2316 | /* 2317 | * Thread stack in /proc/PID/task/TID/maps or 2318 | * the main process stack. 2319 | */ 2320 | 2321 | /* Thread stack in /proc/PID/maps */ 2322 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 2323 | } else { 2324 | anon_name = anon_vma_name(vma); 2325 | if(anon_name) { 2326 | snprintf(entry.path, sizeof(entry.path), "[anon:%s]", anon_name->name); 2327 | } 2328 | } 2329 | 2330 | if (is_kernel_buf) { 2331 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 2332 | } else { 2333 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 2334 | break; 2335 | } 2336 | } 2337 | copy_pos += sizeof(entry); 2338 | success_cnt++; 2339 | } 2340 | } 2341 | up_read_mmap_lock(mm); 2342 | mmput(mm); 2343 | 2344 | return success_cnt; 2345 | } 2346 | #endif 2347 | 2348 | 2349 | #if MY_LINUX_VERSION_CODE == KERNEL_VERSION(6,6,30) 2350 | #include 2351 | struct anon_vma_name * __weak anon_vma_name(struct vm_area_struct* vma) { 2352 | return NULL; 2353 | } 2354 | 2355 | static int get_proc_maps_list(bool is_kernel_buf, struct pid* proc_pid_struct, char* buf, size_t buf_size) { 2356 | struct task_struct* task; 2357 | struct mm_struct* mm; 2358 | struct vm_area_struct* vma; 2359 | char path_buf[MY_PATH_MAX_LEN]; 2360 | int success_cnt = 0; 2361 | size_t copy_pos; 2362 | size_t end_pos; 2363 | 2364 | task = pid_task(proc_pid_struct, PIDTYPE_PID); 2365 | if (!task) { 2366 | return -2; 2367 | } 2368 | 2369 | mm = get_task_mm(task); 2370 | 2371 | if (!mm) { 2372 | return -3; 2373 | } 2374 | if (is_kernel_buf) { 2375 | memset(buf, 0, buf_size); 2376 | } 2377 | //else if (clear_user(buf, buf_size)) { return -4; } //清空用户的缓冲区 2378 | 2379 | copy_pos = (size_t)buf; 2380 | end_pos = (size_t)((size_t)buf + buf_size); 2381 | 2382 | if (down_read_mmap_lock(mm) != 0) { 2383 | mmput(mm); 2384 | return -4; 2385 | } 2386 | 2387 | { 2388 | VMA_ITERATOR(iter, mm, 0); 2389 | for_each_vma(iter, vma) { 2390 | struct map_entry entry; 2391 | struct file* vm_file; 2392 | struct anon_vma_name *anon_name = NULL; 2393 | if (copy_pos >= end_pos) { 2394 | break; 2395 | } 2396 | entry.start = vma->vm_start; 2397 | entry.end = vma->vm_end; 2398 | entry.flags[0] = (vma->vm_flags & VM_READ) ? 1 : 0; 2399 | entry.flags[1] = (vma->vm_flags & VM_WRITE) ? 1 : 0; 2400 | entry.flags[2] = (vma->vm_flags & VM_EXEC) ? 1 : 0; 2401 | entry.flags[3] = (vma->vm_flags & VM_MAYSHARE) ? 1 : 0; 2402 | memset(entry.path, 0, sizeof(entry.path)); 2403 | vm_file = get_vm_file(vma); 2404 | if (vm_file) { 2405 | char* path; 2406 | memset(path_buf, 0, sizeof(path_buf)); 2407 | path = d_path(&vm_file->f_path, path_buf, sizeof(path_buf)); 2408 | if (path > 0) { 2409 | strncat(entry.path, path, sizeof(entry.path) - 1); 2410 | } 2411 | } else if (!vma->vm_mm) { 2412 | snprintf(entry.path, sizeof(entry.path), "%s[vdso]", entry.path); 2413 | } else if (vma_is_initial_heap(vma)) { 2414 | snprintf(entry.path, sizeof(entry.path), "%s[heap]", entry.path); 2415 | } else if (vma_is_initial_stack(vma)) { 2416 | snprintf(entry.path, sizeof(entry.path), "%s[stack]", entry.path); 2417 | } else { 2418 | anon_name = anon_vma_name(vma); 2419 | if(anon_name) { 2420 | snprintf(entry.path, sizeof(entry.path), "[anon:%s]", anon_name->name); 2421 | } 2422 | } 2423 | 2424 | if (is_kernel_buf) { 2425 | memcpy((void *)copy_pos, &entry, sizeof(entry)); 2426 | } else { 2427 | if (x_copy_to_user((void *)copy_pos, &entry, sizeof(entry))) { 2428 | break; 2429 | } 2430 | } 2431 | copy_pos += sizeof(entry); 2432 | success_cnt++; 2433 | } 2434 | } 2435 | up_read_mmap_lock(mm); 2436 | mmput(mm); 2437 | 2438 | return success_cnt; 2439 | } 2440 | #endif 2441 | 2442 | //Update: vm_is_stack\vm_is_stack_for_task: /mm/util.c 2443 | //Update: get_proc_maps_list: fs\proc\task_mmu.c 2444 | 2445 | #endif /* PROC_MAPS_H_ */ --------------------------------------------------------------------------------