├── .gitignore ├── ICE ├── ICE.c ├── README.txt ├── System.map-5.15.0-43-generic ├── find_ranges ├── find_ranges.c └── include │ ├── direct_syscall_hook.h │ ├── resolve_kallsyms.h │ └── set_page_flags.h ├── icebreaker ├── KASLR_spoof │ ├── KASLR_spoof.c │ ├── Makefile │ └── include │ │ └── resolve_kallsyms.h └── cr0_write_test │ ├── Makefile │ ├── cr0.c │ └── include │ ├── direct_syscall_hook.h │ ├── resolve_kallsyms.h │ └── set_page_flags.h ├── libvmi.conf └── ubuntu-hvm.cfg /.gitignore: -------------------------------------------------------------------------------- 1 | debian-11.5.0-amd64-netinst.iso 2 | ubuntu-22.04.1-desktop-amd64.iso 3 | vault/ 4 | libvmi/ 5 | ignore/ 6 | -------------------------------------------------------------------------------- /ICE: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/3intermute/hvICE/164a7306c9a22d429895e279890da41b5afd02bc/ICE -------------------------------------------------------------------------------- /ICE.c: -------------------------------------------------------------------------------- 1 | // adapted from libvmi mem_evet example 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | 13 | #define PAGE_SHIFT 12 14 | #define PAGESIZE 4096 15 | 16 | static bool interrupted = false; 17 | 18 | static addr_t kernel_text_start_GVA; 19 | static addr_t kernel_text_end_GVA; 20 | 21 | static void close_handler(int sig) { 22 | (void) sig; 23 | interrupted = true; 24 | } 25 | 26 | event_response_t singlestep_cb(vmi_instance_t vmi, vmi_event_t *event) { 27 | addr_t gfn = GPOINTER_TO_SIZE(event->data); 28 | if (vmi_set_mem_event(vmi, gfn, VMI_MEMACCESS_W, 0) == VMI_FAILURE) { 29 | fprintf(stderr, "ICE: singlestep_cb failed to set page permissions on gfn %llx\n", gfn); 30 | } 31 | 32 | return VMI_EVENT_RESPONSE_TOGGLE_SINGLESTEP; 33 | } 34 | 35 | // int register_singlestep_event(vmi_instance_t vmi, void *cb, vmi_event_t *singlestep_event_ptr) { 36 | // SETUP_SINGLESTEP_EVENT(singlestep_event_ptr, VMI_BIT_MASK(0, vmi_get_num_vcpus(vmi) - 1), cb, false); 37 | // if (VMI_FAILURE == vmi_register_event(vmi, singlestep_event_ptr)) { 38 | // return 1; 39 | // } 40 | // return 0; 41 | // } 42 | 43 | event_response_t mem_cb(vmi_instance_t vmi, vmi_event_t *event) { 44 | if (event->x86_regs->rip > kernel_text_start_GVA & event->x86_regs->rip < kernel_text_end_GVA) { 45 | // printf("ICE; %RIP GVA %llx in kernel text, ignoring violation...\n", event->x86_regs->rip, event->mem_event.gfn << PAGE_SHIFT); 46 | if (vmi_set_mem_event(vmi, event->mem_event.gfn, VMI_MEMACCESS_N, 0) == VMI_FAILURE) { 47 | fprintf(stderr, "ICE: mem_cb failed to set page permissions on gfn %llx\n", event->mem_event.gfn); 48 | } 49 | vmi_event_t *singlestep_event_ptr = (vmi_event_t *) event->data; 50 | singlestep_event_ptr->data = GSIZE_TO_POINTER(event->mem_event.gfn); 51 | 52 | return VMI_EVENT_RESPONSE_TOGGLE_SINGLESTEP; 53 | } 54 | else { 55 | // inject fault, then dump dmesg ?? 56 | printf("ICE: !! EPT WRITE VIOLATION @ GFN %llx\n", event->mem_event.gfn); 57 | printf("ICE: @ GPA %llx\n", event->mem_event.gfn << PAGE_SHIFT); 58 | printf("ICE: @ %RIP GVA %llx\n", event->x86_regs->rip); 59 | printf("ICE: !! %RIP IS NOT IN KERNEL TEXT\n"); 60 | printf("ICE: pausing vm...\n"); 61 | vmi_pause_vm(vmi); 62 | } 63 | 64 | return VMI_EVENT_RESPONSE_NONE; 65 | } 66 | 67 | int register_mem_event_range(vmi_instance_t vmi, addr_t GVA_start, addr_t GVA_end, vmi_mem_access_t access_type, void *cb) { 68 | // printf("ICE: registering mem_event on GVA range (%llx -> %llx)\n", GVA_start, GVA_end); 69 | GVA_start = (GVA_start >> PAGE_SHIFT) << PAGE_SHIFT; 70 | GVA_end = (GVA_end >> PAGE_SHIFT) << PAGE_SHIFT; 71 | 72 | addr_t GPA_start; 73 | addr_t GPA_end; 74 | vmi_translate_kv2p(vmi, GVA_start, &GPA_start); 75 | vmi_translate_kv2p(vmi, GVA_end, &GPA_end); 76 | 77 | uint64_t gfn_start = GPA_start >> PAGE_SHIFT; 78 | uint64_t gfn_end = GPA_end >> PAGE_SHIFT; 79 | 80 | uint64_t n_frames = (GVA_end - GVA_start) / PAGESIZE; 81 | printf("ICE: range aligned to page boundaries (%llx -> %llx), %lli frames\n", GVA_start, GVA_end, n_frames); 82 | 83 | for (uint64_t i = gfn_start; i < gfn_end; i++) { 84 | if (VMI_FAILURE == vmi_set_mem_event(vmi, i, access_type, 0)) { 85 | fprintf(stderr, "ICE: register_mem_event_range failed to set page permissions on gfn %llx\n", i); 86 | return 1; 87 | } 88 | } 89 | 90 | printf("ICE: registered mem_event on GVA range (%llx -> %llx)\n", GVA_start, GVA_end); 91 | 92 | return 0; 93 | } 94 | 95 | int main(int argc, char **argv) { 96 | vmi_instance_t vmi = {0}; 97 | vmi_mode_t mode = {0}; 98 | vmi_event_t mem_event = {0}; 99 | vmi_event_t singlestep_event = {0}; 100 | struct sigaction act = {0}; 101 | vmi_init_data_t *init_data = NULL; 102 | int status = 1; 103 | 104 | act.sa_handler = close_handler; 105 | act.sa_flags = 0; 106 | sigemptyset(&act.sa_mask); 107 | sigaction(SIGHUP, &act, NULL); 108 | sigaction(SIGTERM, &act, NULL); 109 | sigaction(SIGINT, &act, NULL); 110 | sigaction(SIGALRM, &act, NULL); 111 | 112 | if (argc < 2) { 113 | fprintf(stderr, "usage: %s \n", argv[0]); 114 | return status; 115 | } 116 | 117 | unsigned char logo[] = { 118 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5f, 0x5f, 119 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5f, 0x5f, 120 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5f, 0x5f, 121 | 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x5c, 0x20, 122 | 0x20, 0x5c, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x5c, 0x20, 123 | 0x20, 0x5c, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x5c, 0x20, 124 | 0x20, 0x5c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5c, 125 | 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5c, 126 | 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5c, 127 | 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 128 | 0x2f, 0x5c, 0x2f, 0x3a, 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x20, 0x20, 0x20, 129 | 0x2f, 0x5c, 0x2f, 0x3a, 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x20, 0x20, 0x20, 130 | 0x2f, 0x5c, 0x2f, 0x3a, 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x0a, 0x20, 0x20, 131 | 0x20, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x3a, 0x2f, 0x5c, 0x2f, 0x5f, 0x5f, 132 | 0x2f, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x3a, 0x2f, 0x5c, 0x2f, 0x5f, 0x5f, 133 | 0x2f, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x3a, 0x2f, 0x5c, 0x2f, 0x5f, 0x5f, 134 | 0x2f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 135 | 0x5f, 0x5f, 0x5c, 0x5f, 0x5f, 0x5f, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 136 | 0x5f, 0x5f, 0x5c, 0x5f, 0x5f, 0x5f, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 137 | 0x5f, 0x5f, 0x5c, 0x5f, 0x5f, 0x5f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 138 | 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 0x5f, 0x2f, 0x5c, 0x20, 0x20, 0x5c, 139 | 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 0x5f, 0x2f, 0x5c, 0x20, 0x20, 0x5c, 140 | 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 0x5f, 0x2f, 0x5c, 0x20, 0x20, 0x5c, 141 | 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 142 | 0x2f, 0x3a, 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x20, 0x20, 0x20, 0x20, 0x20, 143 | 0x2f, 0x3a, 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x20, 0x20, 0x20, 0x20, 0x20, 144 | 0x2f, 0x3a, 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x0a, 0x20, 0x20, 0x20, 0x20, 145 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x3a, 0x2f, 0x5c, 0x3a, 0x5c, 146 | 0x5f, 0x5f, 0x5c, 0x20, 0x20, 0x20, 0x2f, 0x3a, 0x2f, 0x5c, 0x3a, 0x5c, 147 | 0x5f, 0x5f, 0x5c, 0x20, 0x20, 0x20, 0x2f, 0x3a, 0x2f, 0x5c, 0x3a, 0x5c, 148 | 0x5f, 0x5f, 0x5c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 149 | 0x20, 0x20, 0x5c, 0x3a, 0x5c, 0x20, 0x5c, 0x2f, 0x5f, 0x5f, 0x2f, 0x20, 150 | 0x20, 0x20, 0x5c, 0x3a, 0x5c, 0x20, 0x5c, 0x2f, 0x5f, 0x5f, 0x2f, 0x20, 151 | 0x20, 0x20, 0x5c, 0x3a, 0x5c, 0x20, 0x5c, 0x2f, 0x5f, 0x5f, 0x2f, 0x0a, 152 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 153 | 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x5f, 0x5f, 0x5f, 0x20, 0x20, 0x20, 0x5c, 154 | 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x5f, 0x5f, 0x5f, 0x20, 0x20, 0x20, 0x5c, 155 | 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x5f, 0x5f, 0x5f, 0x0a, 0x20, 0x20, 0x20, 156 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 157 | 0x5f, 0x2f, 0x5c, 0x20, 0x20, 0x5c, 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 158 | 0x5f, 0x2f, 0x5c, 0x20, 0x20, 0x5c, 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 159 | 0x5f, 0x2f, 0x5c, 0x20, 0x20, 0x5c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 160 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x3a, 161 | 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x3a, 162 | 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x3a, 163 | 0x3a, 0x5c, 0x20, 0x20, 0x5c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 164 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x3a, 0x3a, 0x5c, 165 | 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x20, 0x20, 0x20, 0x2f, 0x3a, 0x3a, 0x5c, 166 | 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x20, 0x20, 0x20, 0x2f, 0x3a, 0x3a, 0x5c, 167 | 0x3a, 0x5c, 0x5f, 0x5f, 0x5c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 168 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 0x3a, 169 | 0x5c, 0x2f, 0x20, 0x20, 0x2f, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 0x3a, 170 | 0x5c, 0x2f, 0x20, 0x20, 0x2f, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 0x3a, 171 | 0x5c, 0x2f, 0x20, 0x20, 0x2f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 172 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 173 | 0x2f, 0x20, 0x20, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 174 | 0x2f, 0x20, 0x20, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x3a, 0x5c, 175 | 0x2f, 0x20, 0x20, 0x2f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 176 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 177 | 0x5f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 178 | 0x5f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5c, 0x2f, 0x5f, 179 | 0x5f, 0x2f, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 180 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x48, 0x56, 0x43, 0x49, 181 | 0x63, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 182 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x72, 0x75, 183 | 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 184 | 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x20, 0x65, 0x6c, 0x65, 0x63, 185 | 0x74, 0x72, 0x6f, 0x6e, 0x69, 0x63, 0x73, 0x20, 0x76, 0x30, 0x2e, 0x34, 186 | 0x0a, 0x0 187 | }; 188 | 189 | printf("%s\n\n", logo); 190 | 191 | char *domain_name = argv[1]; 192 | printf("ICE: connecting to domain %s... ", domain_name); 193 | 194 | if (VMI_FAILURE == vmi_get_access_mode(NULL, (void*) domain_name, VMI_INIT_DOMAINNAME | VMI_INIT_EVENTS, init_data, &mode)) { 195 | fprintf(stderr, "\nICE: failed to get access mode\n"); 196 | goto error_exit; 197 | } 198 | if (VMI_FAILURE == vmi_init_complete(&vmi, (void*) domain_name, VMI_INIT_DOMAINNAME | VMI_INIT_EVENTS, init_data, VMI_CONFIG_GLOBAL_FILE_ENTRY, NULL, NULL)) { 199 | fprintf(stderr, "\nICE: failed to init libVMI library.\n"); 200 | goto error_exit; 201 | } 202 | 203 | page_mode_t page_mode = vmi_init_paging(vmi, 0); 204 | printf("done\n"); 205 | printf("ICE: init paging mode %i\n", page_mode); 206 | 207 | printf("ICE: pausing vm... "); 208 | if (VMI_FAILURE == vmi_pause_vm(vmi)) { 209 | fprintf(stderr, "Failed to pause vm\n"); 210 | goto error_exit; 211 | } 212 | printf("done\n"); 213 | 214 | addr_t kernel_text_start_GVA; 215 | addr_t kernel_text_end_GVA; 216 | addr_t kernel_text_start_GPA; 217 | addr_t kernel_text_end_GPA; 218 | 219 | vmi_translate_ksym2v(vmi, "_text", &kernel_text_start_GVA); 220 | vmi_translate_kv2p(vmi, kernel_text_start_GVA, &kernel_text_start_GPA); 221 | printf("ICE: kernel text start @ GVA %llx\n", kernel_text_start_GVA); 222 | printf(" @ GPA %llx\n", kernel_text_start_GPA); 223 | 224 | vmi_translate_ksym2v(vmi, "_etext", &kernel_text_end_GVA); 225 | vmi_translate_kv2p(vmi, kernel_text_end_GVA, &kernel_text_end_GPA); 226 | printf("ICE: kernel text end @ GVA %llx\n", kernel_text_end_GVA); 227 | printf(" @ GPA %llx\n", kernel_text_end_GPA); 228 | 229 | 230 | addr_t kernel_rodata_start_GVA; 231 | addr_t kernel_rodata_end_GVA; 232 | addr_t kernel_rodata_start_GPA; 233 | addr_t kernel_rodata_end_GPA; 234 | 235 | vmi_translate_ksym2v(vmi, "__start_rodata", &kernel_rodata_start_GVA); 236 | vmi_translate_kv2p(vmi, kernel_text_start_GVA, &kernel_rodata_start_GPA); 237 | printf("ICE: kernel rodata start @ GVA %llx\n", kernel_rodata_start_GVA); 238 | printf(" @ GPA %llx\n", kernel_rodata_start_GPA); 239 | 240 | vmi_translate_ksym2v(vmi, "__end_rodata", &kernel_rodata_end_GVA); 241 | vmi_translate_kv2p(vmi, kernel_text_end_GVA, &kernel_rodata_end_GPA); 242 | printf("ICE: kernel rodata end @ GVA %llx\n", kernel_rodata_end_GVA); 243 | printf(" @ GPA %llx\n", kernel_rodata_end_GPA); 244 | 245 | // if (register_singlestep_event(vmi, singlestep_cb, &singlestep_event) 246 | // != 0) { 247 | // goto error_exit; 248 | // } 249 | SETUP_SINGLESTEP_EVENT(&singlestep_event, VMI_BIT_MASK(0, vmi_get_num_vcpus(vmi) - 1), singlestep_cb, false); 250 | if (VMI_FAILURE == vmi_register_event(vmi, &singlestep_event)) { 251 | goto error_exit; 252 | } 253 | 254 | mem_event.data = (void *) &singlestep_event; 255 | SETUP_MEM_EVENT(&mem_event, ~0ULL, VMI_MEMACCESS_W, mem_cb, true); 256 | if (VMI_FAILURE == vmi_register_event(vmi, &mem_event)) { 257 | fprintf(stderr, "ICE: register_mem_event_range failed\n"); 258 | return 1; 259 | } 260 | 261 | if (register_mem_event_range(vmi, kernel_text_start_GVA, kernel_text_end_GVA, VMI_MEMACCESS_W, mem_cb) 262 | != 0) { 263 | goto error_exit; 264 | } 265 | 266 | if (register_mem_event_range(vmi, kernel_rodata_start_GVA, kernel_rodata_end_GVA, VMI_MEMACCESS_W, mem_cb) 267 | != 0) { 268 | goto error_exit; 269 | } 270 | 271 | if (VMI_FAILURE == vmi_resume_vm(vmi)) { 272 | fprintf(stderr, "ICE: failed to resume vm\n"); 273 | goto error_exit; 274 | } 275 | printf("ICE: VM resumed\n"); 276 | 277 | printf("ICE: EPT write protection set on GVA ranges (%llx -> %llx)\n", kernel_text_start_GVA, kernel_text_end_GVA); 278 | printf("ICE: (%llx -> %llx)\n", kernel_rodata_start_GVA, kernel_rodata_end_GVA); 279 | printf("ICE: waiting for violations...\n"); 280 | 281 | while (!interrupted) { 282 | vmi_events_listen(vmi,500); 283 | } 284 | 285 | status = 0; 286 | error_exit: 287 | // free all mem events and free array 288 | // unregister_mem_event_range(vmi, mem_event_arr, nframes); 289 | vmi_clear_event(vmi, &mem_event, NULL); 290 | vmi_resume_vm(vmi); 291 | vmi_destroy(vmi); 292 | 293 | return status; 294 | } 295 | -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | ___ ___ ___ 2 | /\ \ /\ \ /\ \ 3 | _\:\ \ _\:\ \ _\:\ \ 4 | /\/::\__\ /\/::\__\ /\/::\__\ 5 | \::/\/__/ \::/\/__/ \::/\/__/ 6 | \:\__\___ \:\__\___ \:\__\___ 7 | \/__/\ \ \/__/\ \ \/__/\ \ 8 | /::\ \ /::\ \ /::\ \ 9 | /:/\:\__\ /:/\:\__\ /:/\:\__\ 10 | \:\ \/__/ \:\ \/__/ \:\ \/__/ 11 | \:\__\___ \:\__\___ \:\__\___ 12 | \/__/\ \ \/__/\ \ \/__/\ \ 13 | /::\ \ /::\ \ /::\ \ 14 | /::\:\__\ /::\:\__\ /::\:\__\ 15 | \:\:\/ / \:\:\/ / \:\:\/ / 16 | \:\/ / \:\/ / \:\/ / 17 | \/__/ \/__/ \/__/ 18 | 19 | HVice 20 | intrusion countermeasure electronics v0.4 21 | 22 | featured on tmp.0ut vol3: https://tmpout.sh/3/ 23 | 24 | HVice is a proof of concept implementation of hypervisor enforced code/data integrity for the linux kernel using xen and libvmi. 25 | it requires no modification to the guest OS. 26 | HVice achieves this setting all pages between _text and _etext and all of kernel rodata to not writable in the guests EPT, 27 | then pausing the VM and logging the violation if an attempted write did not come from within kernel text. 28 | writes by code within kernel text are ignored to prevent false positives due to kernel self patching. 29 | 30 | 31 | example: 32 | kernel self protection is insufficiently secure. 33 | despite recent kernel versions preventing writes to cr0 and setting protected pages as writeable via kernel functions, 34 | bypassing these protections is as simple as writing to either cr0 or the PTE directly as shown in this snippet. 35 | 36 | 37 | `` 38 | extern unsigned long __force_order ; 39 | inline void mywrite_cr0(unsigned long cr0) { 40 | asm volatile("mov %0,%%cr0" : "+r"(cr0), "+m"(__force_order)); 41 | } 42 | 43 | void disable_write_protection(void) { 44 | unsigned long cr0 = read_cr0(); 45 | clear_bit(16, &cr0); 46 | mywrite_cr0(cr0); 47 | } 48 | 49 | static int __init cr0_write_init(void) 50 | { 51 | char **text = kallsyms_lookup_name_("_text"); 52 | disable_write_protection(); 53 | 54 | text[0] = 0x90; // overwrite first byte of kernel text with nop 55 | 56 | return 0; 57 | } 58 | `` 59 | 60 | the above code will execute on an unprotected system but on a system protected with HVCIce, a violation is triggered and the vm is paused 61 | 62 | `` 63 | ICE: connecting to domain ubuntu-hvm... done 64 | ICE: init paging mode 4 65 | ICE: pausing vm... done 66 | ICE: kernel text start @ GVA ffffffff9be00000 67 | @ GPA 16a00000 68 | ICE: kernel text end @ GVA ffffffff9ce02520 69 | @ GPA 17a02520 70 | ICE: kernel rodata start @ GVA ffffffff9d000000 71 | @ GPA 16a00000 72 | ICE: kernel rodata end @ GVA ffffffff9da8c000 73 | @ GPA 17a02520 74 | ICE: range aligned to page boundaries (ffffffff9be00000 -> ffffffff9ce02000), 4098 frames 75 | ICE: registered mem_event on GVA range (ffffffff9be00000 -> ffffffff9ce02000) 76 | ICE: range aligned to page boundaries (ffffffff9d000000 -> ffffffff9da8c000), 2700 frames 77 | ICE: registered mem_event on GVA range (ffffffff9d000000 -> ffffffff9da8c000) 78 | ICE: VM resumed 79 | ICE: EPT write protection set on GVA ranges (ffffffff9be00000 -> ffffffff9ce02520) 80 | ICE: (ffffffff9d000000 -> ffffffff9da8c000) 81 | ICE: waiting for violations... 82 | ICE: !! EPT WRITE VIOLATION @ GFN 16a00 83 | ICE: @ GPA 16a00000 84 | ICE: @ %RIP GVA 7fa8d9e7895f 85 | ICE: !! %RIP IS NOT IN KERNEL TEXT 86 | ICE: pausing vm... 87 | `` 88 | 89 | internals: 90 | libvmi exposes a simple but powerful API, first hvICE initializes libvmi and pauses the guest vm: 91 | 92 | `` 93 | if (VMI_FAILURE == vmi_get_access_mode(NULL, (void*) domain_name, VMI_INIT_DOMAINNAME | VMI_INIT_EVENTS, init_data, &mode)) { 94 | goto error_exit; 95 | } 96 | if (VMI_FAILURE == vmi_init_complete(&vmi, (void*) domain_name, VMI_INIT_DOMAINNAME | VMI_INIT_EVENTS, init_data, VMI_CONFIG_GLOBAL_FILE_ENTRY, NULL, NULL)) { 97 | goto error_exit; 98 | } 99 | 100 | page_mode_t page_mode = vmi_init_paging(vmi, 0); 101 | 102 | if (VMI_FAILURE == vmi_pause_vm(vmi)) { 103 | goto error_exit; 104 | } 105 | 106 | `` 107 | 108 | the libvmi function vmi_translate_ksym2v ** finds the guest virtual address of a kernel symbol and, 109 | the libvmi function vmi_translate_kv2p translates a guest virtual address to a guest physical address, 110 | hvICE can use these functions to find: 111 | - the start and end addresses of kernel text (via the _text and _etext symbols) 112 | - the start and end addresses of kernel rodata (via the __start_rodata and __end_rodata symbols). 113 | 114 | `` 115 | addr_t kernel_text_start_GVA; 116 | addr_t kernel_text_end_GVA; 117 | addr_t kernel_text_start_GPA; 118 | addr_t kernel_text_end_GPA; 119 | 120 | vmi_translate_ksym2v(vmi, "_text", &kernel_text_start_GVA); 121 | vmi_translate_kv2p(vmi, kernel_text_start_GVA, &kernel_text_start_GPA); 122 | 123 | vmi_translate_ksym2v(vmi, "_etext", &kernel_text_end_GVA); 124 | vmi_translate_kv2p(vmi, kernel_text_end_GVA, &kernel_text_end_GPA); 125 | 126 | addr_t kernel_rodata_start_GVA; 127 | addr_t kernel_rodata_end_GVA; 128 | addr_t kernel_rodata_start_GPA; 129 | addr_t kernel_rodata_end_GPA; 130 | 131 | vmi_translate_ksym2v(vmi, "__start_rodata", &kernel_rodata_start_GVA); 132 | vmi_translate_kv2p(vmi, kernel_text_start_GVA, &kernel_rodata_start_GPA); 133 | 134 | vmi_translate_ksym2v(vmi, "__end_rodata", &kernel_rodata_end_GVA); 135 | vmi_translate_kv2p(vmi, kernel_text_end_GVA, &kernel_rodata_end_GPA); 136 | `` 137 | 138 | ** it is possible to spoof vmi_translate_ksym2v, see icebreaker 139 | 140 | hvICE then initializes a singlestep event and mem event: 141 | if a protected page is written to, an EPT violation occurs and libvmi calls the mem_cb callback. 142 | to handle kernel self patching, mem_cb checks if the write came from within kernel text, 143 | and if it did, it relaxes the permissions on the protected page 144 | then toggles singlestep on, calling the singlestep_cb callback which allows the instruction that caused the violation to execute, 145 | then resets the permissions on the protected page to prevent writes once more and toggles single step off: 146 | 147 | `` 148 | SETUP_SINGLESTEP_EVENT(&singlestep_event, VMI_BIT_MASK(0, vmi_get_num_vcpus(vmi) - 1), singlestep_cb, false); 149 | if (VMI_FAILURE == vmi_register_event(vmi, &singlestep_event)) { 150 | goto error_exit; 151 | } 152 | 153 | mem_event.data = (void *) &singlestep_event; 154 | SETUP_MEM_EVENT(&mem_event, ~0ULL, VMI_MEMACCESS_W, mem_cb, true); 155 | if (VMI_FAILURE == vmi_register_event(vmi, &mem_event)) { 156 | return 1; 157 | } 158 | `` 159 | ... 160 | `` 161 | event_response_t mem_cb(vmi_instance_t vmi, vmi_event_t *event) { 162 | if (event->x86_regs->rip > kernel_text_start_GVA & event->x86_regs->rip < kernel_text_end_GVA) { 163 | if (vmi_set_mem_event(vmi, event->mem_event.gfn, VMI_MEMACCESS_N, 0) == VMI_FAILURE) { 164 | } 165 | vmi_event_t *singlestep_event_ptr = (vmi_event_t *) event->data; 166 | singlestep_event_ptr->data = GSIZE_TO_POINTER(event->mem_event.gfn); 167 | 168 | return VMI_EVENT_RESPONSE_TOGGLE_SINGLESTEP; 169 | } 170 | else { 171 | vmi_pause_vm(vmi); 172 | } 173 | 174 | return VMI_EVENT_RESPONSE_NONE; 175 | } 176 | `` 177 | ... 178 | `` 179 | event_response_t singlestep_cb(vmi_instance_t vmi, vmi_event_t *event) { 180 | addr_t gfn = GPOINTER_TO_SIZE(event->data); 181 | if (vmi_set_mem_event(vmi, gfn, VMI_MEMACCESS_W, 0) == VMI_FAILURE) { 182 | } 183 | 184 | return VMI_EVENT_RESPONSE_TOGGLE_SINGLESTEP; 185 | } 186 | `` 187 | 188 | libvmi then registers memory events on the range of pages from _text to _etext and __start_rodata to __end_rodata: 189 | as kernel text is contiguous in guest physical memory, it is possible to find the gfns needed by- 190 | simply iterating through every gfn between the start and end gfn: 191 | 192 | 193 | `` 194 | if (register_mem_event_range(vmi, kernel_text_start_GVA, kernel_text_end_GVA, VMI_MEMACCESS_W, mem_cb) 195 | != 0) { 196 | goto error_exit; 197 | } 198 | 199 | if (register_mem_event_range(vmi, kernel_rodata_start_GVA, kernel_rodata_end_GVA, VMI_MEMACCESS_W, mem_cb) 200 | != 0) { 201 | goto error_exit; 202 | } 203 | `` 204 | ... 205 | `` 206 | int register_mem_event_range(vmi_instance_t vmi, addr_t GVA_start, addr_t GVA_end, vmi_mem_access_t access_type, void *cb) { 207 | GVA_start = (GVA_start >> PAGE_SHIFT) << PAGE_SHIFT; 208 | GVA_end = (GVA_end >> PAGE_SHIFT) << PAGE_SHIFT; 209 | 210 | addr_t GPA_start; 211 | addr_t GPA_end; 212 | vmi_translate_kv2p(vmi, GVA_start, &GPA_start); 213 | vmi_translate_kv2p(vmi, GVA_end, &GPA_end); 214 | 215 | uint64_t gfn_start = GPA_start >> PAGE_SHIFT; 216 | uint64_t gfn_end = GPA_end >> PAGE_SHIFT; 217 | 218 | uint64_t n_frames = (GVA_end - GVA_start) / PAGESIZE; 219 | 220 | for (uint64_t i = gfn_start; i < gfn_end; i++) { 221 | if (VMI_FAILURE == vmi_set_mem_event(vmi, i, access_type, 0)) { 222 | return 1; 223 | } 224 | } 225 | 226 | return 0; 227 | } 228 | `` 229 | 230 | hvICE then resumes the vm and waits for a violation: 231 | `` 232 | if (VMI_FAILURE == vmi_resume_vm(vmi)) { 233 | goto error_exit; 234 | } 235 | 236 | while (!interrupted) { 237 | vmi_events_listen(vmi,500); 238 | } 239 | `` 240 | 241 | 242 | icebreaker: 243 | libvmi brute forces the KASLR offset by scanning the range of possible offsets and stopping when it finds a mapped address. 244 | `` 245 | static status_t get_kaslr_offset_ia32e(vmi_instance_t vmi) 246 | { 247 | addr_t va, pa; 248 | addr_t kernel_text_start = 0xffffffff81000000; 249 | addr_t kernel_text_end = kernel_text_start + (1024*1024*1024); 250 | 251 | linux_instance_t linux_instance = vmi->os_data; 252 | 253 | vmi->init_task = linux_instance->init_task_fixed; 254 | 255 | for (va = kernel_text_start; va < kernel_text_end; va += 0x200000) { 256 | if ( vmi_translate_kv2p(vmi, va, &pa) == VMI_SUCCESS ) { 257 | linux_instance->kaslr_offset = va - kernel_text_start; 258 | vmi->init_task += linux_instance->kaslr_offset; 259 | dbprint(VMI_DEBUG_MISC, "**calculated KASLR offset in 64-bit mode: 0x%"PRIx64"\n", linux_instance->kaslr_offset); 260 | return VMI_SUCCESS; 261 | } 262 | } 263 | return VMI_FAILURE; 264 | } 265 | `` 266 | 267 | it is possible to spoof the offset with a kernel module in the guest that maps an address before the real KASLR kernel text start. 268 | see: icebreaker/KASLR_spoof/KASLR_spoof.c 269 | `` 270 | pgd = pgd_offset(init_mm_ptr, SPOOFED_KERNEL_TEXT_START); 271 | 272 | p4d = p4d_alloc_(init_mm_ptr, pgd, SPOOFED_KERNEL_TEXT_START); 273 | if (!p4d) { 274 | return -ENOMEM; 275 | } 276 | 277 | pud = pud_alloc_(init_mm_ptr, p4d, SPOOFED_KERNEL_TEXT_START); 278 | if (!pud) { 279 | return -ENOMEM; 280 | } 281 | 282 | pmd = pmd_alloc_(init_mm_ptr, pud, SPOOFED_KERNEL_TEXT_START); 283 | if (!pmd) { 284 | return -ENOMEM; 285 | } 286 | 287 | ptep = pte_offset_map(pmd, SPOOFED_KERNEL_TEXT_START); 288 | 289 | uint64_t dummy_page = vmalloc(PAGE_SIZE); 290 | 291 | pte_t new_pte = pfn_pte(virt_to_phys(dummy_page) >> PAGE_SHIFT, PAGE_KERNEL); 292 | set_pte(ptep, new_pte); 293 | `` 294 | 295 | todo: 296 | - icebreaker: libvmi brute-forces KASLR offset or uses init_task to calculate it (DONE) 297 | - protect rodata (DONE) 298 | - protect important structures via sub-page write protection 299 | 300 | 301 | compilation: 302 | gcc ICE.c -o ICE -lvmi -I/usr/include/glib-2.0 -I/usr/lib/x86_64-linux-gnu/glib-2.0/include -lglib-2.0 -g 303 | 304 | usage: 305 | sudo ./ICE 306 | -------------------------------------------------------------------------------- /find_ranges/find_ranges.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "include/resolve_kallsyms.h" 8 | 9 | MODULE_LICENSE("GPL"); 10 | MODULE_AUTHOR("wintermute#0440"); 11 | MODULE_DESCRIPTION("find kernel code start/end and libvmi offsets"); 12 | MODULE_VERSION("0.1"); 13 | 14 | static int __init find_ranges_init(void) { 15 | printk(KERN_INFO "ICE: kernel module loaded"); 16 | printk(KERN_INFO "ICE: kernel code start @ GVA %llx\n", kallsyms_lookup_name_("_text")); 17 | printk(KERN_INFO " @ GPA %llx\n", virt_to_phys(kallsyms_lookup_name_("_text"))); 18 | printk(KERN_INFO "ICE: kernel code end @ GVA %llx\n", kallsyms_lookup_name_("_etext")); 19 | printk(KERN_INFO " @ GPA %llx\n", virt_to_phys(kallsyms_lookup_name_("_etext"))); 20 | 21 | 22 | // adapted from libvmi offset finder 23 | struct task_struct *p = NULL; 24 | unsigned long commOffset; 25 | unsigned long tasksOffset; 26 | unsigned long mmOffset; 27 | unsigned long pidOffset; 28 | unsigned long pgdOffset; 29 | unsigned long addrOffset; 30 | 31 | p = current; 32 | 33 | if (p != NULL) { 34 | commOffset = (unsigned long) (&(p->comm)) - (unsigned long) (p); 35 | tasksOffset = 36 | (unsigned long) (&(p->tasks)) - (unsigned long) (p); 37 | mmOffset = (unsigned long) (&(p->mm)) - (unsigned long) (p); 38 | pidOffset = (unsigned long) (&(p->pid)) - (unsigned long) (p); 39 | pgdOffset = 40 | (unsigned long) (&(p->mm->pgd)) - (unsigned long) (p->mm); 41 | addrOffset = 42 | (unsigned long) (&(p->mm->start_code)) - 43 | (unsigned long) (p->mm); 44 | 45 | printk(KERN_INFO "ICE: generated libvmi config..."); 46 | printk(KERN_INFO "[domain name] {\n"); 47 | printk(KERN_INFO " ostype = \"Linux\";\n"); 48 | printk(KERN_INFO " sysmap = \"[insert path here]\";\n"); 49 | printk(KERN_INFO " linux_name = 0x%x;\n", 50 | (unsigned int) commOffset); 51 | printk(KERN_INFO " linux_tasks = 0x%x;\n", 52 | (unsigned int) tasksOffset); 53 | printk(KERN_INFO " linux_mm = 0x%x;\n", 54 | (unsigned int) mmOffset); 55 | printk(KERN_INFO " linux_pid = 0x%x;\n", 56 | (unsigned int) pidOffset); 57 | printk(KERN_INFO " linux_pgd = 0x%x;\n", 58 | (unsigned int) pgdOffset); 59 | printk(KERN_INFO "}\n"); 60 | } else { 61 | printk(KERN_INFO 62 | "ICE: found no process to populate task_struct.\n"); 63 | } 64 | 65 | return 0; 66 | 67 | return 0; 68 | } 69 | 70 | static void __exit find_ranges_exit(void) { 71 | printk(KERN_INFO "ICE: kernel module unloaded\n"); 72 | } 73 | 74 | module_init(find_ranges_init); 75 | module_exit(find_ranges_exit); 76 | -------------------------------------------------------------------------------- /find_ranges/include/direct_syscall_hook.h: -------------------------------------------------------------------------------- 1 | #ifndef _RESOLV_DIRECT_HOOK_H_ 2 | #define _RESOLV_DIRECT_HOOK_H_ 3 | 4 | #include 5 | #include "resolve_kallsyms.h" 6 | #include "set_page_flags.h" 7 | 8 | struct direct_syscall_hook { 9 | int number; 10 | void *new; 11 | void *orig; 12 | }; 13 | 14 | static void **sys_call_table_addr = NULL; 15 | 16 | static int resolve_syscall_table(void) { 17 | sys_call_table_addr = kallsyms_lookup_name_("sys_call_table"); 18 | 19 | pte_t *sys_call_table_ptep = page_from_virt(sys_call_table_addr); 20 | if (!sys_call_table_ptep) { 21 | pr_info("debug: page_from_virt of %pK failed\n", sys_call_table_addr); 22 | return -ENOENT; 23 | } 24 | pr_info("debug: page_from_virt of %pK success, pte @ %pK\n", sys_call_table_addr, sys_call_table_ptep); 25 | 26 | // TODO: unset write bit after hook is finished 27 | pr_info("debug: ptep @ %pK, pte_write flag (%i)\n", sys_call_table_ptep, pte_write(*sys_call_table_ptep)); 28 | pr_info("debug: flipping write protect flag...\n"); 29 | pte_flip_write_protect(sys_call_table_ptep); 30 | pr_info("debug: ptep @ %pK, pte_write flag (%i)\n", sys_call_table_ptep, pte_write(*sys_call_table_ptep)); 31 | return 0; 32 | } 33 | 34 | void hook_syscall(struct direct_syscall_hook *hook) { 35 | if (!sys_call_table_addr) { 36 | resolve_syscall_table(); 37 | } 38 | hook->orig = sys_call_table_addr[hook->number]; 39 | // pte_flip_write_protect(page_from_virt(&sys_call_table_addr[hook->number])); 40 | sys_call_table_addr[hook->number] = hook->new; 41 | pr_info("debug: hook_syscall of #%i, orig @ %pK, new @%pK, success\n", hook->number, hook->orig, hook->new); 42 | } 43 | 44 | void unhook_syscall(struct direct_syscall_hook *hook) { 45 | if (!sys_call_table_addr) { 46 | resolve_syscall_table(); 47 | } 48 | sys_call_table_addr[hook->number] = hook->orig; 49 | pr_info("debug: unhook_syscall of #%i, orig restored @ %pK, new @%pK, success\n", hook->number, hook->orig, hook->new); 50 | } 51 | 52 | #endif 53 | -------------------------------------------------------------------------------- /find_ranges/include/resolve_kallsyms.h: -------------------------------------------------------------------------------- 1 | #ifndef _RESOLV_KALLSYMS_H_ 2 | #define _RESOLV_KALLSYMS_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | typedef uintptr_t (*kallsyms_lookup_name_t)(const char *symbol_name); 9 | static kallsyms_lookup_name_t kallsyms_lookup_name__ = NULL; 10 | 11 | uintptr_t kprobe_get_func_addr(const char *func_name) { 12 | static struct kprobe kp; 13 | kp.symbol_name = func_name; 14 | if (register_kprobe(&kp) < 0) { 15 | pr_info("debug: kprobe_get_func_addr of %s failed\n", func_name); 16 | return -ENOENT; 17 | } 18 | uintptr_t tmp = kp.addr; 19 | unregister_kprobe(&kp); 20 | pr_info("debug: kprobe_get_func_addr %s @ %pK\n", func_name, tmp); 21 | return tmp; 22 | } 23 | 24 | uintptr_t kallsyms_lookup_name_(const char *symbol_name) { 25 | if (!kallsyms_lookup_name__) { 26 | kallsyms_lookup_name__ = kprobe_get_func_addr("kallsyms_lookup_name"); 27 | } 28 | uintptr_t tmp = kallsyms_lookup_name__(symbol_name); 29 | pr_info("debug: kallsyms_lookup_name_ %s @ %pK\n", symbol_name, tmp); 30 | return tmp; 31 | } 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /find_ranges/include/set_page_flags.h: -------------------------------------------------------------------------------- 1 | #ifndef _SET_PAGE_FLAGS_H_ 2 | #define _SET_PAGE_FLAGS_H_ 3 | 4 | #include 5 | #include "resolve_kallsyms.h" 6 | 7 | pte_t *page_from_virt(uintptr_t addr) { 8 | pgd_t *pgd; 9 | pud_t *pud; 10 | pmd_t *pmd; 11 | pte_t *ptep; 12 | 13 | struct mm_struct *init_mm_ptr = kallsyms_lookup_name_("init_mm"); 14 | pgd = pgd_offset(init_mm_ptr, addr); 15 | if (pgd_none(*pgd) || pgd_bad(*pgd)) { 16 | return NULL; 17 | } 18 | 19 | pud = pud_offset(pgd, addr); 20 | if (pud_none(*pud) || pud_bad(*pud)) { 21 | return NULL; 22 | } 23 | 24 | pmd = pmd_offset(pud, addr); 25 | if (pmd_none(*pmd) || pmd_bad(*pmd)) { 26 | return NULL; 27 | } 28 | 29 | ptep = pte_offset_map(pmd, addr); 30 | if (!ptep) { 31 | return NULL; 32 | } 33 | 34 | return ptep; 35 | } 36 | 37 | void pte_flip_write_protect(pte_t *ptep) { 38 | if (!pte_write(*ptep)) { 39 | *ptep = pte_mkwrite(pte_mkdirty(*ptep)); 40 | *ptep = clear_pte_bit(*ptep, __pgprot((_AT(pteval_t, 1) << 7))); 41 | return; 42 | } 43 | pte_wrprotect(*ptep); 44 | } 45 | 46 | #endif 47 | -------------------------------------------------------------------------------- /icebreaker/KASLR_spoof/KASLR_spoof.c: -------------------------------------------------------------------------------- 1 | // paging mode compatability mode, libvmi brute forces https://github.com/libvmi/libvmi/blob/master/libvmi/os/linux/core.c 2 | // - spoof brute force 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include "include/resolve_kallsyms.h" 13 | 14 | MODULE_LICENSE("GPL"); 15 | MODULE_AUTHOR("wintermute#0440"); 16 | MODULE_DESCRIPTION("spoof libvmi KASLR offset"); 17 | MODULE_VERSION("0.1"); 18 | 19 | #define KERNEL_TEXT_START 0xffffffff81000000 20 | #define SPOOFED_KASLR_OFFSET 0x00 21 | #define SPOOFED_KERNEL_TEXT_START KERNEL_TEXT_START + SPOOFED_KASLR_OFFSET 22 | 23 | struct mm_struct *init_mm_ptr; 24 | 25 | // int (*__pte_alloc_)(struct mm_struct *mm, pmd_t *pmd); 26 | // 27 | // #define pte_alloc_map(mm, pmd, address) \ 28 | // (__pte_alloc_(mm, pmd) ? NULL : pte_offset_map(pmd, address)) 29 | 30 | int (*__pmd_alloc_)(struct mm_struct *mm, pud_t *pud, unsigned long address); 31 | int (*__pud_alloc_)(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 32 | int (*__p4d_alloc_)(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 33 | 34 | static inline p4d_t *p4d_alloc_(struct mm_struct *mm, pgd_t *pgd, 35 | unsigned long address) 36 | { 37 | return (unlikely(pgd_none(*pgd)) && __p4d_alloc_(mm, pgd, address)) ? 38 | NULL : p4d_offset(pgd, address); 39 | } 40 | 41 | static inline pud_t *pud_alloc_(struct mm_struct *mm, p4d_t *p4d, 42 | unsigned long address) 43 | { 44 | return (unlikely(p4d_none(*p4d)) && __pud_alloc_(mm, p4d, address)) ? 45 | NULL : pud_offset(p4d, address); 46 | } 47 | 48 | static inline pmd_t *pmd_alloc_(struct mm_struct *mm, pud_t *pud, unsigned long address) 49 | { 50 | return (unlikely(pud_none(*pud)) && __pmd_alloc_(mm, pud, address)) ? 51 | NULL: pmd_offset(pud, address); 52 | } 53 | 54 | static int __init icebreaker_init(void) 55 | { 56 | struct mm_struct *init_mm_ptr = kallsyms_lookup_name_("init_mm"); 57 | // __pte_alloc_ = kallsyms_lookup_name_("__pte_alloc"); 58 | __pmd_alloc_ = kallsyms_lookup_name_("__pmd_alloc"); 59 | __pud_alloc_ = kallsyms_lookup_name_("__pud_alloc"); 60 | __p4d_alloc_ = kallsyms_lookup_name_("__p4d_alloc"); 61 | 62 | pgd_t *pgd; 63 | p4d_t *p4d; 64 | pud_t *pud; 65 | pmd_t *pmd; 66 | pte_t *ptep; 67 | 68 | pgd = pgd_offset(init_mm_ptr, SPOOFED_KERNEL_TEXT_START); 69 | 70 | p4d = p4d_alloc_(init_mm_ptr, pgd, SPOOFED_KERNEL_TEXT_START); 71 | if (!p4d) { 72 | printk(KERN_INFO "ICEBREAKER: failed to allocate p4d"); 73 | return -ENOMEM; 74 | } 75 | 76 | pud = pud_alloc_(init_mm_ptr, p4d, SPOOFED_KERNEL_TEXT_START); 77 | if (!pud) { 78 | printk(KERN_INFO "ICEBREAKER: failed to allocate pud"); 79 | return -ENOMEM; 80 | } 81 | 82 | pmd = pmd_alloc_(init_mm_ptr, pud, SPOOFED_KERNEL_TEXT_START); 83 | if (!pmd) { 84 | printk(KERN_INFO "ICEBREAKER: failed to allocate pmd"); 85 | return -ENOMEM; 86 | } 87 | 88 | ptep = pte_offset_map(pmd, SPOOFED_KERNEL_TEXT_START); 89 | printk("ICEBREAKER: spoofed kernel text start ptep @ %llx\n", ptep); 90 | 91 | uint64_t dummy_page = vmalloc(PAGE_SIZE); 92 | 93 | pte_t new_pte = pfn_pte(virt_to_phys(dummy_page) >> PAGE_SHIFT, PAGE_KERNEL); 94 | set_pte(ptep, new_pte); 95 | 96 | // if (pte_alloc_map(init_mm_ptr, pmd, SPOOFED_KERNEL_TEXT_START)) { 97 | // printk(KERN_INFO "ICEBREAKER: failed to allocate pte"); 98 | // return -ENOMEM; 99 | // } 100 | 101 | printk("ICEBREAKER: spoofed kernel KASLR offset to %llx, spoofed kernel text start @ %llx\n", SPOOFED_KASLR_OFFSET, SPOOFED_KERNEL_TEXT_START); 102 | 103 | return 0; 104 | } 105 | 106 | static void __exit icebreaker_exit(void) 107 | { 108 | printk(KERN_INFO "ICEBREAKER: module unloaded\n"); 109 | } 110 | 111 | module_init(icebreaker_init); 112 | module_exit(icebreaker_exit); 113 | -------------------------------------------------------------------------------- /icebreaker/KASLR_spoof/Makefile: -------------------------------------------------------------------------------- 1 | obj-m += KASLR_spoof.o 2 | 3 | all: 4 | make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules 5 | 6 | clean: 7 | make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean 8 | -------------------------------------------------------------------------------- /icebreaker/KASLR_spoof/include/resolve_kallsyms.h: -------------------------------------------------------------------------------- 1 | #ifndef _RESOLV_KALLSYMS_H_ 2 | #define _RESOLV_KALLSYMS_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | typedef uintptr_t (*kallsyms_lookup_name_t)(const char *symbol_name); 9 | static kallsyms_lookup_name_t kallsyms_lookup_name__ = NULL; 10 | 11 | uintptr_t kprobe_get_func_addr(const char *func_name) { 12 | static struct kprobe kp; 13 | kp.symbol_name = func_name; 14 | if (register_kprobe(&kp) < 0) { 15 | pr_info("debug: kprobe_get_func_addr of %s failed\n", func_name); 16 | return -ENOENT; 17 | } 18 | uintptr_t tmp = kp.addr; 19 | unregister_kprobe(&kp); 20 | pr_info("debug: kprobe_get_func_addr %s @ %pK\n", func_name, tmp); 21 | return tmp; 22 | } 23 | 24 | uintptr_t kallsyms_lookup_name_(const char *symbol_name) { 25 | if (!kallsyms_lookup_name__) { 26 | kallsyms_lookup_name__ = kprobe_get_func_addr("kallsyms_lookup_name"); 27 | } 28 | uintptr_t tmp = kallsyms_lookup_name__(symbol_name); 29 | pr_info("debug: kallsyms_lookup_name_ %s @ %pK\n", symbol_name, tmp); 30 | return tmp; 31 | } 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /icebreaker/cr0_write_test/Makefile: -------------------------------------------------------------------------------- 1 | obj-m += cr0.o 2 | 3 | all: 4 | make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules 5 | 6 | clean: 7 | make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean 8 | -------------------------------------------------------------------------------- /icebreaker/cr0_write_test/cr0.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "include/resolve_kallsyms.h" 5 | 6 | MODULE_LICENSE("GPL"); 7 | MODULE_AUTHOR("wintermute#0440"); 8 | MODULE_DESCRIPTION("overwrite sys_call_table write protect via setting cr0"); 9 | MODULE_VERSION("0.1"); 10 | 11 | extern unsigned long __force_order ; 12 | inline void mywrite_cr0(unsigned long cr0) { 13 | asm volatile("mov %0,%%cr0" : "+r"(cr0), "+m"(__force_order)); 14 | } 15 | 16 | void enable_write_protection(void) { 17 | unsigned long cr0 = read_cr0(); 18 | set_bit(16, &cr0); 19 | mywrite_cr0(cr0); 20 | } 21 | 22 | void disable_write_protection(void) { 23 | unsigned long cr0 = read_cr0(); 24 | clear_bit(16, &cr0); 25 | mywrite_cr0(cr0); 26 | } 27 | 28 | static int __init cr0_init(void) 29 | { 30 | printk(KERN_INFO "cr0: loaded\n"); 31 | char **text = kallsyms_lookup_name_("_text"); 32 | disable_write_protection(); 33 | text[0] = 0x90; 34 | enable_write_protection(); 35 | return 0; 36 | } 37 | 38 | static void __exit cr0_exit(void) 39 | { 40 | printk(KERN_INFO "cr0: unloaded\n"); 41 | } 42 | 43 | module_init(cr0_init); 44 | module_exit(cr0_exit); 45 | -------------------------------------------------------------------------------- /icebreaker/cr0_write_test/include/direct_syscall_hook.h: -------------------------------------------------------------------------------- 1 | #ifndef _RESOLV_DIRECT_HOOK_H_ 2 | #define _RESOLV_DIRECT_HOOK_H_ 3 | 4 | #include 5 | #include "resolve_kallsyms.h" 6 | #include "set_page_flags.h" 7 | 8 | struct direct_syscall_hook { 9 | int number; 10 | void *new; 11 | void *orig; 12 | }; 13 | 14 | static void **sys_call_table_addr = NULL; 15 | 16 | static int resolve_syscall_table(void) { 17 | sys_call_table_addr = kallsyms_lookup_name_("sys_call_table"); 18 | 19 | pte_t *sys_call_table_ptep = page_from_virt(sys_call_table_addr); 20 | if (!sys_call_table_ptep) { 21 | pr_info("debug: page_from_virt of %pK failed\n", sys_call_table_addr); 22 | return -ENOENT; 23 | } 24 | pr_info("debug: page_from_virt of %pK success, pte @ %pK\n", sys_call_table_addr, sys_call_table_ptep); 25 | 26 | // TODO: unset write bit after hook is finished 27 | pr_info("debug: ptep @ %pK, pte_write flag (%i)\n", sys_call_table_ptep, pte_write(*sys_call_table_ptep)); 28 | pr_info("debug: flipping write protect flag...\n"); 29 | pte_flip_write_protect(sys_call_table_ptep); 30 | pr_info("debug: ptep @ %pK, pte_write flag (%i)\n", sys_call_table_ptep, pte_write(*sys_call_table_ptep)); 31 | return 0; 32 | } 33 | 34 | void hook_syscall(struct direct_syscall_hook *hook) { 35 | if (!sys_call_table_addr) { 36 | resolve_syscall_table(); 37 | } 38 | hook->orig = sys_call_table_addr[hook->number]; 39 | // pte_flip_write_protect(page_from_virt(&sys_call_table_addr[hook->number])); 40 | sys_call_table_addr[hook->number] = hook->new; 41 | pr_info("debug: hook_syscall of #%i, orig @ %pK, new @%pK, success\n", hook->number, hook->orig, hook->new); 42 | } 43 | 44 | void unhook_syscall(struct direct_syscall_hook *hook) { 45 | if (!sys_call_table_addr) { 46 | resolve_syscall_table(); 47 | } 48 | sys_call_table_addr[hook->number] = hook->orig; 49 | pr_info("debug: unhook_syscall of #%i, orig restored @ %pK, new @%pK, success\n", hook->number, hook->orig, hook->new); 50 | } 51 | 52 | #endif 53 | -------------------------------------------------------------------------------- /icebreaker/cr0_write_test/include/resolve_kallsyms.h: -------------------------------------------------------------------------------- 1 | #ifndef _RESOLV_KALLSYMS_H_ 2 | #define _RESOLV_KALLSYMS_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | typedef uintptr_t (*kallsyms_lookup_name_t)(const char *symbol_name); 9 | static kallsyms_lookup_name_t kallsyms_lookup_name__ = NULL; 10 | 11 | uintptr_t kprobe_get_func_addr(const char *func_name) { 12 | static struct kprobe kp; 13 | kp.symbol_name = func_name; 14 | if (register_kprobe(&kp) < 0) { 15 | pr_info("debug: kprobe_get_func_addr of %s failed\n", func_name); 16 | return -ENOENT; 17 | } 18 | uintptr_t tmp = kp.addr; 19 | unregister_kprobe(&kp); 20 | pr_info("debug: kprobe_get_func_addr %s @ %pK\n", func_name, tmp); 21 | return tmp; 22 | } 23 | 24 | uintptr_t kallsyms_lookup_name_(const char *symbol_name) { 25 | if (!kallsyms_lookup_name__) { 26 | kallsyms_lookup_name__ = kprobe_get_func_addr("kallsyms_lookup_name"); 27 | } 28 | uintptr_t tmp = kallsyms_lookup_name__(symbol_name); 29 | pr_info("debug: kallsyms_lookup_name_ %s @ %pK\n", symbol_name, tmp); 30 | return tmp; 31 | } 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /icebreaker/cr0_write_test/include/set_page_flags.h: -------------------------------------------------------------------------------- 1 | #ifndef _SET_PAGE_FLAGS_H_ 2 | #define _SET_PAGE_FLAGS_H_ 3 | 4 | #include 5 | #include "resolve_kallsyms.h" 6 | 7 | pte_t *page_from_virt(uintptr_t addr) { 8 | pgd_t *pgd; 9 | pud_t *pud; 10 | pmd_t *pmd; 11 | pte_t *ptep; 12 | 13 | struct mm_struct *init_mm_ptr = kallsyms_lookup_name_("init_mm"); 14 | pgd = pgd_offset(init_mm_ptr, addr); 15 | if (pgd_none(*pgd) || pgd_bad(*pgd)) { 16 | return NULL; 17 | } 18 | 19 | pud = pud_offset(pgd, addr); 20 | if (pud_none(*pud) || pud_bad(*pud)) { 21 | return NULL; 22 | } 23 | 24 | pmd = pmd_offset(pud, addr); 25 | if (pmd_none(*pmd) || pmd_bad(*pmd)) { 26 | return NULL; 27 | } 28 | 29 | ptep = pte_offset_map(pmd, addr); 30 | if (!ptep) { 31 | return NULL; 32 | } 33 | 34 | return ptep; 35 | } 36 | 37 | void pte_flip_write_protect(pte_t *ptep) { 38 | if (!pte_write(*ptep)) { 39 | *ptep = pte_mkwrite(pte_mkdirty(*ptep)); 40 | *ptep = clear_pte_bit(*ptep, __pgprot((_AT(pteval_t, 1) << 7))); 41 | return; 42 | } 43 | pte_wrprotect(*ptep); 44 | } 45 | 46 | #endif 47 | -------------------------------------------------------------------------------- /libvmi.conf: -------------------------------------------------------------------------------- 1 | ubuntu-hvm 2 | { 3 | sysmap = "/home/null/Desktop/projects/ICE/System.map-5.15.0-43-generic"; 4 | ostype = "Linux"; 5 | linux_tasks = 0xb28; 6 | linux_name = 0x848; 7 | linux_mm = 0x898; 8 | linux_pid = 0x950; 9 | linux_pgd = 0x50; 10 | } 11 | -------------------------------------------------------------------------------- /ubuntu-hvm.cfg: -------------------------------------------------------------------------------- 1 | builder="hvm" 2 | name="ubuntu-hvm" 3 | vif=['bridge=virbr0'] 4 | memory="4096" 5 | vcpus=4 6 | disk=['phy:/dev/vg0/ubuntu-hvm,hda,w'] 7 | sdl=0 8 | vnc=1 9 | vncpasswd='' 10 | stdvga=0 11 | serial='pty' 12 | tsc_mode="default" 13 | hap=1 14 | on_crash="destroy" 15 | --------------------------------------------------------------------------------