├── src ├── utility │ ├── win │ │ └── win.cpp │ ├── physmem │ │ ├── physmem.hpp │ │ ├── physmem_structs.hpp │ │ ├── page_table_helpers.hpp │ │ └── physmem.cpp │ ├── general_asm.asm │ └── safety_net │ │ ├── safety_net_asm.asm │ │ └── safety_net.cpp ├── main.cpp ├── includes │ ├── includes.h │ ├── structs.hpp │ └── func_defs.hpp ├── hv_detect.vcxproj.filters ├── hv_detect.vcxproj └── detections │ └── idt.cpp ├── hv detect.sln ├── README.md ├── .gitattributes └── .gitignore /src/utility/win/win.cpp: -------------------------------------------------------------------------------- 1 | #include "../includes/includes.h" 2 | #include "../includes/func_defs.hpp" 3 | 4 | namespace win { 5 | uint64_t win_get_virtual_address(uint64_t physical_address) { 6 | PHYSICAL_ADDRESS phys_addr = { 0 }; 7 | phys_addr.QuadPart = physical_address; 8 | 9 | return (uint64_t)(MmGetVirtualForPhysical(phys_addr)); 10 | } 11 | 12 | uint64_t win_get_physical_address(void* virtual_address) { 13 | return MmGetPhysicalAddress(virtual_address).QuadPart; 14 | } 15 | }; -------------------------------------------------------------------------------- /src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "includes/includes.h" 2 | #include "includes/func_defs.hpp" 3 | #include "utility/physmem/physmem.hpp" 4 | 5 | void execute_detections(uint64_t driver_base, uint64_t driver_size) { 6 | if (!physmem::init_physmem()) { 7 | log_error("Failed to init physmem"); 8 | return; 9 | } 10 | 11 | log_new_line(); 12 | log_info("Physmem inited"); 13 | 14 | if (!safety_net::init_safety_net(driver_base, driver_size)) { 15 | log_error("Failed to init safety net"); 16 | return; 17 | } 18 | 19 | log_info("Safety net inited\n"); 20 | 21 | // Setup the driver for supervisor access (yes, some detections switch into cpl = 3...) 22 | safety_net_t storage; 23 | if (!safety_net::start_safety_net(storage)) { 24 | log_error("Failed to start safety net"); 25 | return; 26 | } 27 | 28 | if (!physmem::paging_manipulation::prepare_driver_for_supervisor_access((void*)driver_base, driver_size, __readcr3())) { 29 | safety_net::stop_safety_net(storage); 30 | log_error("Failed to setup driver for supervisor access"); 31 | return; 32 | } 33 | 34 | safety_net::stop_safety_net(storage); 35 | 36 | log_info("IDT:"); 37 | idt::execute_idt_detections(); 38 | log_new_line(); 39 | } 40 | 41 | NTSTATUS driver_entry(uint64_t driver_base, uint64_t driver_size) { 42 | 43 | log_info("Driver loaded at %p with size %p", driver_base, driver_size); 44 | 45 | execute_detections(driver_base, driver_size); 46 | 47 | return STATUS_SUCCESS; 48 | } -------------------------------------------------------------------------------- /src/includes/includes.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include "ia32.hpp" 5 | 6 | #define log_info(fmt, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL, "[*] " fmt "\n", ##__VA_ARGS__) 7 | #define log_success(fmt, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL, "[+] " fmt "\n", ##__VA_ARGS__) 8 | #define log_error(fmt, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL, "[-!-] " fmt "\n", ##__VA_ARGS__) 9 | 10 | 11 | #define INDENT_SPACES(count) ((count) == 1 ? " " : \ 12 | (count) == 2 ? " " : \ 13 | (count) == 3 ? " " : \ 14 | (count) == 4 ? " " : \ 15 | (count) == 5 ? " " : \ 16 | (count) == 6 ? " " : \ 17 | (count) == 7 ? " " : \ 18 | (count) == 8 ? " " : \ 19 | (count) == 9 ? " " : " ") 20 | #define log_info_indent(indent, fmt, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL, "%s[*] " fmt "\n", INDENT_SPACES(indent), ##__VA_ARGS__) 21 | #define log_success_indent(indent, fmt, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL, "%s[+] " fmt "\n", INDENT_SPACES(indent), ##__VA_ARGS__) 22 | #define log_error_indent(indent, fmt, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL, "%s[-!-] " fmt "\n", INDENT_SPACES(indent), ##__VA_ARGS__) 23 | 24 | #define log_new_line() DbgPrintEx(DPFLTR_IHVDRIVER_ID, DPFLTR_ERROR_LEVEL,"\n") 25 | -------------------------------------------------------------------------------- /src/utility/physmem/physmem.hpp: -------------------------------------------------------------------------------- 1 | #include "../includes/structs.hpp" 2 | #include "../includes/includes.h" 3 | #include "../includes/func_defs.hpp" 4 | 5 | #include "physmem_structs.hpp" 6 | #include "page_table_helpers.hpp" 7 | 8 | namespace physmem { 9 | // Initialization functions 10 | bool init_physmem(void); 11 | bool is_initialized(void); 12 | 13 | namespace util { 14 | cr3 get_constructed_cr3(void); 15 | cr3 get_system_cr3(void); 16 | }; 17 | 18 | namespace runtime { 19 | bool translate_to_physical_address(uint64_t outside_target_cr3, void* virtual_address, uint64_t& physical_address, uint64_t* remaining_bytes = 0); 20 | 21 | void copy_physical_memory(uint64_t dst_physical, uint64_t src_physical, uint64_t size); 22 | bool copy_virtual_memory(void* dst, void* src, uint64_t size, uint64_t dst_cr3, uint64_t src_cr3); 23 | bool copy_memory_to_constructed_cr3(void* dst, void* src, uint64_t size, uint64_t src_cr3); 24 | bool copy_memory_from_constructed_cr3(void* dst, void* src, uint64_t size, uint64_t dst_cr3); 25 | }; 26 | 27 | namespace remapping { 28 | bool ensure_memory_mapping_for_range(void* target_address, uint64_t size, uint64_t mem_cr3_u64); 29 | bool overwrite_virtual_address_mapping(void* target_address, void* new_memory, uint64_t target_address_cr3_u64, uint64_t new_mem_cr3_u64); 30 | }; 31 | 32 | namespace paging_manipulation { 33 | bool win_destroy_memory_page_mapping(void* memory, uint64_t& stored_flags); 34 | bool win_restore_memory_page_mapping(void* memory, uint64_t stored_flags); 35 | bool win_set_memory_range_supervisor(void* memory, uint64_t size, uint64_t mem_cr3, bool supervisor); 36 | bool is_memory_page_mapped(void* memory); 37 | bool prepare_driver_for_supervisor_access(void* driver_base, uint64_t driver_size, uint64_t mem_cr3); 38 | }; 39 | 40 | }; -------------------------------------------------------------------------------- /hv detect.sln: -------------------------------------------------------------------------------- 1 | 2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio Version 17 4 | VisualStudioVersion = 17.11.35208.52 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "hv_detect", "src\hv_detect.vcxproj", "{8502FB29-F1AF-4F96-9ED6-CE90F79C575E}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|ARM64 = Debug|ARM64 11 | Debug|x64 = Debug|x64 12 | Release|ARM64 = Release|ARM64 13 | Release|x64 = Release|x64 14 | EndGlobalSection 15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 16 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Debug|ARM64.ActiveCfg = Debug|ARM64 17 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Debug|ARM64.Build.0 = Debug|ARM64 18 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Debug|ARM64.Deploy.0 = Debug|ARM64 19 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Debug|x64.ActiveCfg = Debug|x64 20 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Debug|x64.Build.0 = Debug|x64 21 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Debug|x64.Deploy.0 = Debug|x64 22 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Release|ARM64.ActiveCfg = Release|ARM64 23 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Release|ARM64.Build.0 = Release|ARM64 24 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Release|ARM64.Deploy.0 = Release|ARM64 25 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Release|x64.ActiveCfg = Release|x64 26 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Release|x64.Build.0 = Release|x64 27 | {8502FB29-F1AF-4F96-9ED6-CE90F79C575E}.Release|x64.Deploy.0 = Release|x64 28 | EndGlobalSection 29 | GlobalSection(SolutionProperties) = preSolution 30 | HideSolutionNode = FALSE 31 | EndGlobalSection 32 | GlobalSection(ExtensibilityGlobals) = postSolution 33 | SolutionGuid = {CF9D9F9F-7739-4309-B551-951B43E8CB5A} 34 | EndGlobalSection 35 | EndGlobal 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hypervisor IDT Detections (SIDT / LIDT) 2 | 3 | Small pet project of mine to detect hypervisors by checking for inconsistencies when exiting on sidt or lidt 4 | 5 | **Warning:** this can crash your PC. Tested only on my machine (; 6 | 7 | ## How it works (short) 8 | 9 | Before running any detection, the driver sets up a "safety net": 10 | 11 | -> Creates its own GDT / IDT / TSS / CS / SS / CR3 and loads those 12 | -> disables interrupts and records all exceptions (safety_net.cpp) 13 | -> allows temporary CPL switches (CPL0 ↔ CPL3) via syscall/sysret to allow to monitor user mode behaviour from the driver even in kernel memory (prepare the memory range for um access first though: physmem.cpp:prepare_driver_for_supervisor_access) 14 | -> allows switching to compatibility mode (32-bit) for certain tests which was the most work out of all of those checks (cancerous to implement in my opinion (; ) 15 | 16 | Detections are executed inside this environment and everything is restored afterward. 17 | 18 | 19 | ## SIDT 20 | 21 | /* 22 | List of checks: 23 | 24 | detection_1 -> #GP(0) due to lock Prefix 25 | detection_2 -> #PF due to invalid memory operand 26 | detection_3 -> SIDT with operand not mapped in cr3 but in TLB 27 | detection_4 -> Timing check (500 tsc ticks acceptable) 28 | detection_5 -> Compatibility mode idtr storing 29 | detection_6 -> Non canonical address passed as memory operand 30 | detection_7 -> Non canonical address passed as memory operand in SS segment -> #SS 31 | detection_8 -> Executing sidt with cpl = 3 but with cr4.umip = 1 -> #GP(0) should be caused 32 | */ 33 | 34 | ## LIDT 35 | 36 | /* 37 | List of checks: 38 | 39 | detection_1 -> #GP(0) due to lock Prefix 40 | detection_2 -> #PF due to invalid memory operand 41 | detection_3 -> LIDT with operand not mapped in cr3 but in TLB 42 | detection_4 -> Timing check (500 tsc ticks acceptable) 43 | detection_5 -> Compatibility mode idtr storing (TO DO!) 44 | detection_6 -> Non canonical address passed as memory operand 45 | detection_7 -> Non canonical address passed as memory operand in SS segment -> #SS 46 | */ 47 | 48 | ## Notes 49 | If you have any other hv detections that you think would fit into this repo make a pull request or sth. 50 | No timing checks please, as I want sorta new ideas to be collected here <3 51 | 52 | -> “Failed detection” means behavior did not match bare metal expectations 53 | -------------------------------------------------------------------------------- /src/utility/physmem/physmem_structs.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "../includes/structs.hpp" 3 | #include "../includes/includes.h" 4 | #include "../includes/func_defs.hpp" 5 | 6 | typedef struct { 7 | uint32_t eax; 8 | uint32_t ebx; 9 | uint32_t ecx; 10 | uint32_t edx; 11 | } cpuidsplit_t; 12 | 13 | 14 | typedef struct { 15 | void* table; 16 | bool large_page; 17 | } slot_t; 18 | 19 | typedef struct { 20 | va_64_t remapped_va; 21 | 22 | // Pml4 slot not needed as we only have 1 anyways 23 | slot_t pdpt_table; 24 | slot_t pd_table; 25 | void* pt_table; 26 | 27 | bool used; 28 | } remapped_entry_t; 29 | 30 | typedef enum { 31 | pdpt_table_valid, // Means that the pml4 at the correct index already points to a remapped pdpt table 32 | pde_table_valid, // Means that the pdpt at the correct index already points to a remapped pde table 33 | pte_table_valid, // Means that the pde at the correct index already points to a remapped pte table 34 | non_valid, // Means that the pml4 indexes didn't match 35 | } usable_until_t; 36 | 37 | #define PAGE_TABLE_ENTRY_COUNT 512 38 | typedef struct { 39 | alignas(0x1000) pml4e_64 pml4_table[PAGE_TABLE_ENTRY_COUNT]; // Basically only is a windows copy; We replace one entry and point it to our paging structure 40 | alignas(0x1000) pdpte_64 pdpt_table[PAGE_TABLE_ENTRY_COUNT]; 41 | alignas(0x1000) pde_2mb_64 pd_2mb_table[PAGE_TABLE_ENTRY_COUNT][PAGE_TABLE_ENTRY_COUNT]; 42 | } page_tables_t; 43 | 44 | #define REMAPPING_TABLE_COUNT 100 45 | #define MAX_REMAPPINGS 250 46 | typedef struct { 47 | union { 48 | pdpte_64* pdpt_table[REMAPPING_TABLE_COUNT]; 49 | pdpte_1gb_64* pdpt_1gb_table[REMAPPING_TABLE_COUNT]; 50 | }; 51 | union { 52 | pde_64* pd_table[REMAPPING_TABLE_COUNT]; 53 | pde_2mb_64* pd_2mb_table[REMAPPING_TABLE_COUNT]; 54 | }; 55 | 56 | pte_64* pt_table[REMAPPING_TABLE_COUNT]; 57 | 58 | bool is_pdpt_table_occupied[REMAPPING_TABLE_COUNT]; 59 | bool is_pd_table_occupied[REMAPPING_TABLE_COUNT]; 60 | bool is_pt_table_occupied[REMAPPING_TABLE_COUNT]; 61 | 62 | remapped_entry_t remapping_list[MAX_REMAPPINGS]; 63 | } remapping_tables_t; 64 | 65 | typedef struct { 66 | // These page tables make up our cr3 67 | page_tables_t* page_tables; 68 | 69 | // These page tables are sole entries we use to 70 | // remap addresses in our cr3 71 | remapping_tables_t remapping_tables; 72 | 73 | cr3 kernel_cr3; 74 | 75 | cr3 constructed_cr3; 76 | uint64_t mapped_physical_mem_base; // Is the base where we mapped the first 512 gb of physical memory 77 | 78 | bool initialized; 79 | } physmem_t; -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Set default behavior to automatically normalize line endings. 3 | ############################################################################### 4 | * text=auto 5 | 6 | ############################################################################### 7 | # Set default behavior for command prompt diff. 8 | # 9 | # This is need for earlier builds of msysgit that does not have it on by 10 | # default for csharp files. 11 | # Note: This is only used by command line 12 | ############################################################################### 13 | #*.cs diff=csharp 14 | 15 | ############################################################################### 16 | # Set the merge driver for project and solution files 17 | # 18 | # Merging from the command prompt will add diff markers to the files if there 19 | # are conflicts (Merging from VS is not affected by the settings below, in VS 20 | # the diff markers are never inserted). Diff markers may cause the following 21 | # file extensions to fail to load in VS. An alternative would be to treat 22 | # these files as binary and thus will always conflict and require user 23 | # intervention with every merge. To do so, just uncomment the entries below 24 | ############################################################################### 25 | #*.sln merge=binary 26 | #*.csproj merge=binary 27 | #*.vbproj merge=binary 28 | #*.vcxproj merge=binary 29 | #*.vcproj merge=binary 30 | #*.dbproj merge=binary 31 | #*.fsproj merge=binary 32 | #*.lsproj merge=binary 33 | #*.wixproj merge=binary 34 | #*.modelproj merge=binary 35 | #*.sqlproj merge=binary 36 | #*.wwaproj merge=binary 37 | 38 | ############################################################################### 39 | # behavior for image files 40 | # 41 | # image files are treated as binary by default. 42 | ############################################################################### 43 | #*.jpg binary 44 | #*.png binary 45 | #*.gif binary 46 | 47 | ############################################################################### 48 | # diff behavior for common document formats 49 | # 50 | # Convert binary document formats to text before diffing them. This feature 51 | # is only available from the command line. Turn it on by uncommenting the 52 | # entries below. 53 | ############################################################################### 54 | #*.doc diff=astextplain 55 | #*.DOC diff=astextplain 56 | #*.docx diff=astextplain 57 | #*.DOCX diff=astextplain 58 | #*.dot diff=astextplain 59 | #*.DOT diff=astextplain 60 | #*.pdf diff=astextplain 61 | #*.PDF diff=astextplain 62 | #*.rtf diff=astextplain 63 | #*.RTF diff=astextplain 64 | -------------------------------------------------------------------------------- /src/includes/structs.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include "ia32.hpp" 5 | 6 | #define MAX_RECORDABLE_INTERRUPTS 10 7 | 8 | #define MAX_ACCEPTABLE_TSC 500 9 | 10 | typedef union { 11 | 12 | struct { 13 | uint64_t offset_1gb : 30; 14 | uint64_t pdpte_idx : 9; 15 | uint64_t pml4e_idx : 9; 16 | uint64_t reserved : 16; 17 | }; 18 | 19 | struct { 20 | uint64_t offset_2mb : 21; 21 | uint64_t pde_idx : 9; 22 | uint64_t pdpte_idx : 9; 23 | uint64_t pml4e_idx : 9; 24 | uint64_t reserved : 16; 25 | }; 26 | 27 | struct { 28 | uint64_t offset_4kb : 12; 29 | uint64_t pte_idx : 9; 30 | uint64_t pde_idx : 9; 31 | uint64_t pdpte_idx : 9; 32 | uint64_t pml4e_idx : 9; 33 | uint64_t reserved : 16; 34 | }; 35 | 36 | uint64_t flags; 37 | } va_64_t; 38 | 39 | #define UNW_FLAG_EHANDLER 1 40 | 41 | typedef struct 42 | { 43 | UINT32 BeginAddress; 44 | UINT32 EndAddress; 45 | UINT32 HandlerAddress; 46 | UINT32 JumpTarget; 47 | } SCOPE_RECORD; 48 | 49 | typedef struct 50 | { 51 | UINT32 Count; 52 | SCOPE_RECORD ScopeRecords[1]; 53 | } SCOPE_TABLE; 54 | 55 | typedef struct 56 | { 57 | UINT32 BeginAddress; 58 | UINT32 EndAddress; 59 | UINT32 UnwindData; 60 | } RUNTIME_FUNCTION; 61 | 62 | #pragma warning(push) 63 | #pragma warning(disable : 4200) 64 | #pragma warning(disable : 4201) 65 | #pragma warning(disable : 4214) 66 | typedef union { 67 | UINT8 CodeOffset; 68 | UINT8 UnwindOp : 4; 69 | UINT8 OpInfo : 4; 70 | UINT16 FrameOffset; 71 | } UNWIND_CODE; 72 | 73 | typedef struct { 74 | UINT8 Version : 3; 75 | UINT8 Flags : 5; 76 | UINT8 SizeOfProlog; 77 | UINT8 CountOfCodes; 78 | UINT8 FrameRegister : 4; 79 | UINT8 FrameOffset : 4; 80 | UNWIND_CODE UnwindCode[1]; 81 | 82 | union { 83 | UINT32 ExceptionHandler; 84 | UINT32 FunctionEntry; 85 | }; 86 | 87 | UINT32 ExceptionData[]; 88 | } UNWIND_INFO; 89 | #pragma warning(pop) 90 | 91 | #pragma pack(push) 92 | typedef struct { 93 | uint64_t r15; 94 | uint64_t r14; 95 | uint64_t r13; 96 | uint64_t r12; 97 | uint64_t r11; 98 | uint64_t r10; 99 | uint64_t r9; 100 | uint64_t r8; 101 | uint64_t rbp; 102 | uint64_t rdi; 103 | uint64_t rsi; 104 | uint64_t rdx; 105 | uint64_t rcx; 106 | uint64_t rbx; 107 | uint64_t rax; 108 | 109 | uint64_t exception_vector; 110 | uint64_t error_code; 111 | 112 | uint64_t rip; 113 | uint64_t cs_selector; 114 | rflags rflags; 115 | uint64_t rsp; 116 | uint64_t ss_selector; 117 | } idt_regs_ecode_t; 118 | #pragma push(pop) 119 | 120 | typedef struct { 121 | segment_descriptor_register_64 safed_idtr; 122 | segment_descriptor_register_64 safed_gdtr; 123 | 124 | uint16_t safed_ss; 125 | uint16_t safed_cs; 126 | uint16_t safed_tr; 127 | 128 | uint64_t safed_cr3; 129 | uint64_t safed_cr4; // Safed and exchanged to disable SMEP and SMAP 130 | 131 | KPCR* safed_kpcr; 132 | }safety_net_t; 133 | -------------------------------------------------------------------------------- /src/hv_detect.vcxproj.filters: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF} 6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx 7 | 8 | 9 | {dbad0024-c056-4ee5-9b12-c5019c972685} 10 | 11 | 12 | {fc2c1a04-fa3f-4b5c-9d52-3c4b4100cd2f} 13 | 14 | 15 | {5c1b2101-9c48-4a6e-afd3-74bf28a537fa} 16 | 17 | 18 | {08e02b5a-1178-494f-8cbe-8c306169c3d7} 19 | 20 | 21 | {3a8a4140-528b-457f-a7e1-fc68462d3f12} 22 | 23 | 24 | {6ad3315e-0c5d-458e-b6af-a5ece93a9878} 25 | 26 | 27 | 28 | 29 | Source Files 30 | 31 | 32 | Source Files\utility\physmem 33 | 34 | 35 | Source Files\utility\safety_net 36 | 37 | 38 | Source Files\utility\win 39 | 40 | 41 | Source Files\detections 42 | 43 | 44 | 45 | 46 | Source Files\utility\physmem 47 | 48 | 49 | Source Files\utility\physmem 50 | 51 | 52 | Source Files\utility\physmem 53 | 54 | 55 | Source Files\includes 56 | 57 | 58 | Source Files\includes 59 | 60 | 61 | Source Files\includes 62 | 63 | 64 | Source Files\includes 65 | 66 | 67 | 68 | 69 | Source Files\utility\safety_net 70 | 71 | 72 | Source Files\utility 73 | 74 | 75 | -------------------------------------------------------------------------------- /src/utility/physmem/page_table_helpers.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "physmem_structs.hpp" 3 | 4 | namespace pt_helpers { 5 | inline bool is_index_valid(uint64_t index) { 6 | return index < 512; 7 | } 8 | 9 | inline uint32_t find_free_pml4e_index(pml4e_64* pml4e_table) { 10 | for (uint32_t i = 0; i < 512; i++) { 11 | if (!pml4e_table[i].present) { 12 | return i; 13 | } 14 | } 15 | 16 | return MAXULONG32; 17 | } 18 | 19 | inline uint32_t find_free_pdpt_index(pdpte_64* pdpte_table) { 20 | for (uint32_t i = 0; i < 512; i++) { 21 | if (!pdpte_table[i].present) { 22 | return i; 23 | } 24 | } 25 | 26 | return MAXULONG32; 27 | } 28 | 29 | inline uint32_t find_free_pd_index(pde_64* pde_table) { 30 | for (uint32_t i = 0; i < 512; i++) { 31 | if (!pde_table[i].present) { 32 | return i; 33 | } 34 | } 35 | 36 | return MAXULONG32; 37 | } 38 | 39 | inline uint32_t find_free_pt_index(pte_64* pte_table) { 40 | for (uint32_t i = 0; i < 512; i++) { 41 | if (!pte_table[i].present) { 42 | return i; 43 | } 44 | } 45 | 46 | return MAXULONG32; 47 | } 48 | }; 49 | 50 | namespace pt_manager { 51 | // Allocation helpers 52 | inline pdpte_64* get_free_pdpt_table(remapping_tables_t* table) { 53 | for (uint32_t i = 0; i < REMAPPING_TABLE_COUNT; i++) { 54 | if (!table->is_pdpt_table_occupied[i]) { 55 | table->is_pdpt_table_occupied[i] = true; 56 | return table->pdpt_table[i]; 57 | } 58 | } 59 | 60 | return 0; 61 | } 62 | 63 | inline pde_64* get_free_pd_table(remapping_tables_t* table) { 64 | for (uint32_t i = 0; i < REMAPPING_TABLE_COUNT; i++) { 65 | if (!table->is_pd_table_occupied[i]) { 66 | table->is_pd_table_occupied[i] = true; 67 | return table->pd_table[i]; 68 | } 69 | } 70 | 71 | return 0; 72 | } 73 | 74 | inline pte_64* get_free_pt_table(remapping_tables_t* table) { 75 | for (uint32_t i = 0; i < REMAPPING_TABLE_COUNT; i++) { 76 | if (!table->is_pt_table_occupied[i]) { 77 | table->is_pt_table_occupied[i] = true; 78 | return table->pt_table[i]; 79 | } 80 | } 81 | 82 | return 0; 83 | } 84 | 85 | // Freeing helpers 86 | inline void free_pdpt_table(remapping_tables_t* table, pdpte_64* pdpt_table) { 87 | for (uint32_t i = 0; i < REMAPPING_TABLE_COUNT; i++) { 88 | if (table->pdpt_table[i] == pdpt_table) { 89 | table->is_pdpt_table_occupied[i] = false; 90 | memset(pdpt_table, 0, 512 * sizeof(pdpte_64)); 91 | return; 92 | } 93 | } 94 | } 95 | 96 | inline void free_pd_table(remapping_tables_t* table, pde_64* pd_table) { 97 | for (uint32_t i = 0; i < REMAPPING_TABLE_COUNT; i++) { 98 | if (table->pd_table[i] == pd_table) { 99 | table->is_pd_table_occupied[i] = false; 100 | memset(pd_table, 0, 512 * sizeof(pde_64)); 101 | return; 102 | } 103 | } 104 | } 105 | 106 | inline void free_pt_table(remapping_tables_t* table, pte_64* pt_table) { 107 | for (uint32_t i = 0; i < REMAPPING_TABLE_COUNT; i++) { 108 | if (table->pt_table[i] == pt_table) { 109 | table->is_pt_table_occupied[i] = false; 110 | memset(pt_table, 0, 512 * sizeof(pte_64)); 111 | return; 112 | } 113 | } 114 | } 115 | }; -------------------------------------------------------------------------------- /src/utility/general_asm.asm: -------------------------------------------------------------------------------- 1 | .code 2 | 3 | ; ------------------------------------------------------------ 4 | ; General Purpose Register Reading/Manipulation 5 | ; ------------------------------------------------------------ 6 | 7 | __read_rsp proc 8 | mov rax, rsp 9 | add rax, 8 10 | ret 11 | __read_rsp endp 12 | 13 | __read_r15 proc 14 | mov rax, r15 15 | ret 16 | __read_r15 endp 17 | 18 | ; ------------------------------------------------------------ 19 | ; Segment Register Reading/Manipulation 20 | ; ------------------------------------------------------------ 21 | 22 | ; Read Task Register (TR) 23 | __read_tr proc 24 | str rax 25 | ret 26 | __read_tr endp 27 | 28 | ; Write Task Register (TR) 29 | __write_tr proc 30 | ltr cx 31 | ret 32 | __write_tr endp 33 | 34 | ; Read Code Segment (CS) 35 | __read_cs proc 36 | mov ax, cs 37 | movzx rax, ax 38 | ret 39 | __read_cs endp 40 | 41 | ; Write Code Segment (CS) 42 | __write_cs proc 43 | push rbx 44 | 45 | ; Save SS 46 | mov rbx, ss 47 | push rbx 48 | 49 | ; Save RSP 50 | mov rbx, rsp 51 | add rbx, 8 52 | push rbx 53 | 54 | ; Save RFLAGS 55 | pushfq 56 | 57 | ; Save CS 58 | movzx rcx, cx 59 | push rcx 60 | 61 | ; Save RIP 62 | lea rbx, [continue] 63 | push rbx 64 | 65 | iretq 66 | 67 | continue: 68 | pop rbx 69 | ret 70 | __write_cs endp 71 | 72 | ; Read Data Segment (DS) 73 | __read_ds proc 74 | mov ax, ds 75 | movzx rax, ax 76 | ret 77 | __read_ds endp 78 | 79 | ; Write Data Segment (DS) 80 | __write_ds proc 81 | mov ds, cx 82 | ret 83 | __write_ds endp 84 | 85 | ; Read Extra Segment (ES) 86 | __read_es proc 87 | mov ax, es 88 | movzx rax, ax 89 | ret 90 | __read_es endp 91 | 92 | ; Write Extra Segment (ES) 93 | __write_es proc 94 | mov es, cx 95 | ret 96 | __write_es endp 97 | 98 | ; Read Stack Segment (SS) 99 | __read_ss proc 100 | mov ax, ss 101 | movzx rax, ax 102 | ret 103 | __read_ss endp 104 | 105 | ; Write Stack Segment (SS) 106 | __write_ss proc 107 | mov ss, cx 108 | ret 109 | __write_ss endp 110 | 111 | ; Read FS Segment 112 | __read_fs proc 113 | mov ax, fs 114 | movzx rax, ax 115 | ret 116 | __read_fs endp 117 | 118 | ; Write FS Segment 119 | __write_fs proc 120 | mov fs, cx 121 | ret 122 | __write_fs endp 123 | 124 | ; Read GS Segment 125 | __read_gs proc 126 | mov ax, gs 127 | movzx rax, ax 128 | ret 129 | __read_gs endp 130 | 131 | ; Write GS Segment 132 | __write_gs proc 133 | mov gs, cx 134 | ret 135 | __write_gs endp 136 | 137 | ; ------------------------------------------------------------ 138 | ; CLI/STI Operations 139 | ; ------------------------------------------------------------ 140 | 141 | _cli proc 142 | cli 143 | ret 144 | _cli endp 145 | 146 | _sti proc 147 | sti 148 | ret 149 | _sti endp 150 | 151 | ; ------------------------------------------------------------ 152 | ; SIDT Operations (Fault Handling and Locking) 153 | ; ------------------------------------------------------------ 154 | 155 | __lock_sidt proc 156 | db 0F0h ; lock prefix 157 | sidt qword ptr [rcx] 158 | ret 159 | __lock_sidt endp 160 | 161 | __ss_fault_sidt proc 162 | mov rax, rsp ; Save RSP into RAX 163 | mov rsp, 4AAAAAAAA555A555h ; Non-canonical value to trigger #SS 164 | sidt qword ptr [rsp] ; SIDT will cause #SS 165 | mov rsp, rax ; Restore RSP 166 | ret 167 | __ss_fault_sidt endp 168 | 169 | __gp_fault_sidt proc 170 | mov rax, 4AAAAAAAA555A555h ; Non-canonical value to trigger #GP 171 | sidt qword ptr [rax] ; SIDT will cause #GP 172 | ret 173 | __gp_fault_sidt endp 174 | 175 | ; ------------------------------------------------------------ 176 | ; LIDT Operations (Fault Handling and Locking) 177 | ; ------------------------------------------------------------ 178 | 179 | __lock_lidt proc 180 | db 0F0h ; lock prefix 181 | lidt fword ptr [rcx] 182 | ret 183 | __lock_lidt endp 184 | 185 | __ss_fault_lidt proc 186 | mov rax, rsp ; Save RSP into RAX 187 | mov rsp, 4AAAAAAAA555A555h ; Non-canonical value to trigger #SS 188 | lidt fword ptr [rsp] ; LIDT will cause #SS 189 | mov rsp, rax ; Restore RSP 190 | ret 191 | __ss_fault_lidt endp 192 | 193 | __gp_fault_lidt proc 194 | mov rax, 4AAAAAAAA555A555h ; Non-canonical value to trigger #GP 195 | lidt fword ptr [rax] ; LIDT will cause #GP 196 | ret 197 | __gp_fault_lidt endp 198 | 199 | ; ------------------------------------------------------------ 200 | ; Special Fault Operations 201 | ; ------------------------------------------------------------ 202 | 203 | __cause_ss proc 204 | mov rax, rsp ; Save current RSP into RAX 205 | mov rsp, 4AAAAAAAA555A555h ; Set RSP to non-canonical value 206 | mov qword ptr [rsp], rax ; Should trigger #SS 207 | mov rsp, rax ; Restore RSP (won't be reached if #SS occurs) 208 | ret 209 | __cause_ss endp 210 | 211 | ; ------------------------------------------------------------ 212 | ; Utility Functions 213 | ; ------------------------------------------------------------ 214 | 215 | get_proc_number proc 216 | push rbx 217 | push rcx 218 | push rdx 219 | 220 | xor eax, eax ; Clear EAX 221 | mov eax, 0Bh ; CPUID leaf 0x0B (Extended Topology Enumeration) 222 | xor ecx, ecx ; Sub-leaf 0 223 | cpuid 224 | 225 | mov eax, edx ; Save APIC ID in EAX 226 | 227 | pop rdx 228 | pop rcx 229 | pop rbx 230 | 231 | ret 232 | get_proc_number endp 233 | 234 | end 235 | -------------------------------------------------------------------------------- /src/includes/func_defs.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "includes.h" 3 | #include "structs.hpp" 4 | 5 | /* 6 | Assembly 7 | */ 8 | 9 | // ------------------------------------------------------------ 10 | // General Purpose Register Manipulation 11 | // ------------------------------------------------------------ 12 | extern "C" uint64_t __read_rsp(void); 13 | extern "C" uint64_t __read_r15(void); 14 | 15 | // ------------------------------------------------------------ 16 | // Segment Register Reading/Manipulation 17 | // ------------------------------------------------------------ 18 | extern "C" segment_selector __read_tr(void); 19 | extern "C" segment_selector __read_cs(void); 20 | extern "C" segment_selector __read_ss(void); 21 | extern "C" segment_selector __read_ds(void); 22 | extern "C" segment_selector __read_es(void); 23 | extern "C" segment_selector __read_fs(void); 24 | extern "C" segment_selector __read_gs(void); 25 | 26 | extern "C" void __write_tr(uint16_t selector); 27 | extern "C" void __write_cs(uint16_t selector); 28 | extern "C" void __write_ss(uint16_t selector); 29 | extern "C" void __write_ds(uint16_t selector); 30 | extern "C" void __write_es(uint16_t selector); 31 | extern "C" void __write_fs(uint16_t selector); 32 | extern "C" void __write_gs(uint16_t selector); 33 | 34 | // ------------------------------------------------------------ 35 | // CLI/STI Operations 36 | // ------------------------------------------------------------ 37 | extern "C" void _cli(void); 38 | extern "C" void _sti(void); 39 | 40 | // ------------------------------------------------------------ 41 | // GDT Operations (SGDT, LGDT) 42 | // ------------------------------------------------------------ 43 | extern "C" void _sgdt(void* gdtr); 44 | extern "C" void _lgdt(void* gdtr); 45 | 46 | // ------------------------------------------------------------ 47 | // Special Fault Operations 48 | // ------------------------------------------------------------ 49 | extern "C" void __cause_ss(void); 50 | 51 | // ------------------------------------------------------------ 52 | // Utility Functions 53 | // ------------------------------------------------------------ 54 | extern "C" uint32_t get_proc_number(void); 55 | extern "C" void asm_switch_cpl(uint64_t new_cpl); 56 | 57 | // ------------------------------------------------------------ 58 | // SIDT Operations (Fault Handling and Locking) 59 | // ------------------------------------------------------------ 60 | extern "C" void __lock_sidt(void* idtr_storage); 61 | extern "C" void __ss_fault_sidt(void); 62 | extern "C" void __gp_fault_sidt(void); 63 | 64 | // ------------------------------------------------------------ 65 | // LIDT Operations (Fault Handling and Locking) 66 | // ------------------------------------------------------------ 67 | extern "C" void __lock_lidt(void* idtr_storage); 68 | extern "C" void __ss_fault_lidt(void); 69 | extern "C" void __gp_fault_lidt(void); 70 | 71 | 72 | // ------------------------------------------------------------ 73 | // IDT HANDLERS 74 | // ------------------------------------------------------------ 75 | extern "C" void asm_de_handler(); 76 | extern "C" void asm_db_handler(); 77 | extern "C" void asm_nmi_handler(); 78 | extern "C" void asm_bp_handler(); 79 | extern "C" void asm_of_handler(); 80 | extern "C" void asm_br_handler(); 81 | extern "C" void asm_ud_handler(); 82 | extern "C" void asm_nm_handler(); 83 | extern "C" void asm_df_handler(); 84 | extern "C" void asm_ts_handler(); 85 | extern "C" void asm_np_handler(); 86 | extern "C" void asm_ss_handler(); 87 | extern "C" void asm_gp_handler(); 88 | extern "C" void asm_pf_handler(); 89 | extern "C" void asm_mf_handler(); 90 | extern "C" void asm_ac_handler(); 91 | extern "C" void asm_mc_handler(); 92 | extern "C" void asm_xm_handler(); 93 | extern "C" void asm_ve_handler(); 94 | extern "C" void asm_cp_handler(); 95 | extern "C" void seh_handler_ecode(idt_regs_ecode_t* regs); 96 | 97 | // ------------------------------------------------------------ 98 | // CPL CHANGING / EXECUTION MODE CHANGING 99 | // ------------------------------------------------------------ 100 | extern "C" void asm_syscall_handler(void); 101 | extern "C" void asm_switch_segments(uint16_t cs, uint16_t ss); 102 | extern "C" void asm_switch_to_cpl_0(void); 103 | extern "C" void asm_execute_compatibility_mode_code(void); 104 | 105 | /* 106 | High level detections 107 | */ 108 | 109 | namespace win { 110 | uint64_t win_get_virtual_address(uint64_t physical_address); 111 | 112 | uint64_t win_get_physical_address(void* virtual_address); 113 | }; 114 | 115 | namespace idt { 116 | void execute_idt_detections(void); 117 | }; 118 | 119 | namespace gdt { 120 | void execute_gdt_detections(void); 121 | }; 122 | 123 | namespace tr { 124 | void execute_tr_detections(void); 125 | }; 126 | 127 | namespace safety_net { 128 | bool init_safety_net(uint64_t image_base, uint64_t image_size); 129 | void free_safety_net(void); 130 | 131 | /* 132 | Note: Have to be called from cpl = 0 133 | */ 134 | void set_safety_net_kpcr(KPCR* kpcr); 135 | bool is_safety_net_active(); 136 | bool start_safety_net(safety_net_t& info_storage); 137 | void stop_safety_net(safety_net_t& info_storage); 138 | 139 | namespace gdt { 140 | void log_constructed_gdt_descriptors(void); 141 | }; 142 | 143 | namespace idt { 144 | idt_regs_ecode_t* get_core_last_interrupt_record(void); 145 | idt_regs_ecode_t* get_interrupt_record(uint32_t interrupt_idx); 146 | uint64_t get_interrupt_count(void); 147 | void log_all_interrupts(); 148 | void reset_interrupt_count(void); 149 | 150 | segment_descriptor_register_64 get_constructed_idtr(void); 151 | }; 152 | 153 | namespace cpl { 154 | /* 155 | Done via sysret; 156 | In here we need to ensure that we write to all necessary MSR's 157 | so that we can later restore shit 158 | */ 159 | bool switch_to_cpl_3(void); 160 | 161 | /* 162 | Done via syscall; 163 | In here we need to ensure that we restore all polluted MSR's 164 | */ 165 | bool switch_to_cpl_0(void); 166 | }; 167 | 168 | namespace execution_mode { 169 | bool handle_mode_switch(idt_regs_ecode_t* record); 170 | 171 | uint32_t get_compatibility_data_page_address(void); 172 | void* get_compatibility_data_page(void); 173 | 174 | bool execute_32_bit_shellcode(void* shellcode, uint64_t shellcode_size); 175 | }; 176 | }; -------------------------------------------------------------------------------- /src/utility/safety_net/safety_net_asm.asm: -------------------------------------------------------------------------------- 1 | .data 2 | 3 | ; Error codes 4 | dummy_error_code dq 0h 5 | 6 | ; Exception vectors 7 | divide_error_vector dq 00h 8 | debug_vector dq 01h 9 | nmi_vector dq 02h 10 | breakpoint_vector dq 03h 11 | overflow_vector dq 04h 12 | bound_range_exceeded_vector dq 05h 13 | invalid_opcode_vector dq 06h 14 | device_not_available_vector dq 07h 15 | double_fault_vector dq 08h 16 | invalid_tss_vector dq 0Ah 17 | segment_not_present_vector dq 0Bh 18 | stack_segment_fault_vector dq 0Ch 19 | general_protection_vector dq 0Dh 20 | page_fault_vector dq 0Eh 21 | x87_floating_point_error_vector dq 10h 22 | alignment_check_vector dq 11h 23 | machine_check_vector dq 12h 24 | simd_floating_point_error_vector dq 13h 25 | virtualization_exception_vector dq 14h 26 | control_protection_vector dq 15h 27 | 28 | 29 | extern exception_handler:proc 30 | 31 | .code 32 | 33 | save_general_regs macro 34 | push rax 35 | push rbx 36 | push rcx 37 | push rdx 38 | push rsi 39 | push rdi 40 | push rbp 41 | push r8 42 | push r9 43 | push r10 44 | push r11 45 | push r12 46 | push r13 47 | push r14 48 | push r15 49 | endm 50 | 51 | restore_general_regs macro 52 | pop r15 53 | pop r14 54 | pop r13 55 | pop r12 56 | pop r11 57 | pop r10 58 | pop r9 59 | pop r8 60 | pop rbp 61 | pop rdi 62 | pop rsi 63 | pop rdx 64 | pop rcx 65 | pop rbx 66 | pop rax 67 | endm 68 | 69 | ; Core handler where all handlers will jump to 70 | asm_core_handler proc 71 | save_general_regs 72 | 73 | mov rcx, rsp 74 | sub rsp, 20h 75 | call exception_handler 76 | add rsp, 20h 77 | 78 | restore_general_regs 79 | add rsp, 8 ; remove exception vector 80 | add rsp, 8 ; remove error code 81 | 82 | iretq 83 | asm_core_handler endp 84 | 85 | 86 | ; Exception Handlers (each one pushes its vector and error code, and then jumps to asm_core_handler). 87 | 88 | ; #DE - Divide Error (no error code). 89 | asm_de_handler proc 90 | push qword ptr [dummy_error_code] 91 | push qword ptr [divide_error_vector] 92 | jmp asm_core_handler 93 | asm_de_handler endp 94 | 95 | ; #DB - Debug Exception (no error code). 96 | asm_db_handler proc 97 | push qword ptr [dummy_error_code] 98 | push qword ptr [debug_vector] 99 | jmp asm_core_handler 100 | asm_db_handler endp 101 | 102 | ; #NMI - Non-Maskable Interrupt (no error code). 103 | asm_nmi_handler proc 104 | push qword ptr [dummy_error_code] 105 | push qword ptr [nmi_vector] 106 | jmp asm_core_handler 107 | asm_nmi_handler endp 108 | 109 | ; #BP - Breakpoint (no error code). 110 | asm_bp_handler proc 111 | push qword ptr [dummy_error_code] 112 | push qword ptr [breakpoint_vector] 113 | jmp asm_core_handler 114 | asm_bp_handler endp 115 | 116 | ; #OF - Overflow (no error code). 117 | asm_of_handler proc 118 | push qword ptr [dummy_error_code] 119 | push qword ptr [overflow_vector] 120 | jmp asm_core_handler 121 | asm_of_handler endp 122 | 123 | ; #BR - Bound Range Exceeded (no error code). 124 | asm_br_handler proc 125 | push qword ptr [dummy_error_code] 126 | push qword ptr [bound_range_exceeded_vector] 127 | jmp asm_core_handler 128 | asm_br_handler endp 129 | 130 | ; #UD - Invalid Opcode (no error code). 131 | asm_ud_handler proc 132 | push qword ptr [dummy_error_code] 133 | push qword ptr [invalid_opcode_vector] 134 | jmp asm_core_handler 135 | asm_ud_handler endp 136 | 137 | ; #NM - Device Not Available (no error code). 138 | asm_nm_handler proc 139 | push qword ptr [dummy_error_code] 140 | push qword ptr [device_not_available_vector] 141 | jmp asm_core_handler 142 | asm_nm_handler endp 143 | 144 | ; #DF - Double Fault (has an error code 0). 145 | asm_df_handler proc 146 | push qword ptr [double_fault_vector] 147 | jmp asm_core_handler 148 | asm_df_handler endp 149 | 150 | ; #TS - Invalid TSS (with error code). 151 | asm_ts_handler proc 152 | push qword ptr [invalid_tss_vector] 153 | jmp asm_core_handler 154 | asm_ts_handler endp 155 | 156 | ; #NP - Segment Not Present (with error code). 157 | asm_np_handler proc 158 | push qword ptr [segment_not_present_vector] 159 | jmp asm_core_handler 160 | asm_np_handler endp 161 | 162 | ; #SS - Stack Segment Fault (with error code). 163 | asm_ss_handler proc 164 | push qword ptr [stack_segment_fault_vector] 165 | jmp asm_core_handler 166 | asm_ss_handler endp 167 | 168 | ; #GP - General Protection Fault (with error code). 169 | asm_gp_handler proc 170 | push qword ptr [general_protection_vector] 171 | jmp asm_core_handler 172 | asm_gp_handler endp 173 | 174 | ; #PF - Page Fault (with error code). 175 | asm_pf_handler proc 176 | push qword ptr [page_fault_vector] 177 | jmp asm_core_handler 178 | asm_pf_handler endp 179 | 180 | ; #MF - x87 Floating-Point Error (no error code). 181 | asm_mf_handler proc 182 | push qword ptr [dummy_error_code] 183 | push qword ptr [x87_floating_point_error_vector] 184 | jmp asm_core_handler 185 | asm_mf_handler endp 186 | 187 | ; #AC - Alignment Check (with error code). 188 | asm_ac_handler proc 189 | push qword ptr [alignment_check_vector] 190 | jmp asm_core_handler 191 | asm_ac_handler endp 192 | 193 | ; #MC - Machine Check (no error code). 194 | asm_mc_handler proc 195 | push qword ptr [dummy_error_code] 196 | push qword ptr [machine_check_vector] 197 | jmp asm_core_handler 198 | asm_mc_handler endp 199 | 200 | ; #XM - SIMD Floating-Point Error (no error code). 201 | asm_xm_handler proc 202 | push qword ptr [dummy_error_code] 203 | push qword ptr [simd_floating_point_error_vector] 204 | jmp asm_core_handler 205 | asm_xm_handler endp 206 | 207 | ; #VE - Virtualization Exception (no error code). 208 | asm_ve_handler proc 209 | push qword ptr [dummy_error_code] 210 | push qword ptr [virtualization_exception_vector] 211 | jmp asm_core_handler 212 | asm_ve_handler endp 213 | 214 | ; #CP - Control Protection Exception (with error code). 215 | asm_cp_handler proc 216 | push qword ptr [control_protection_vector] 217 | jmp asm_core_handler 218 | asm_cp_handler endp 219 | 220 | 221 | ; Cpl switching 222 | asm_syscall_handler proc 223 | jmp rcx ; Basically just ignores the syscall and jumps to the next instruction 224 | asm_syscall_handler endp 225 | 226 | asm_switch_segments proc 227 | push rbx 228 | 229 | ; SS 230 | movzx rbx, dx ; 16 byte argument passed 231 | push rbx 232 | 233 | ; Rsp 234 | mov rbx, rsp 235 | add rbx, 8 236 | push rbx 237 | 238 | ; Rflags 239 | pushfq 240 | 241 | ; CS 242 | movzx rbx, cx ; 16 byte argument passed 243 | push rbx 244 | 245 | ; Rip 246 | lea rbx, [continue] 247 | push rbx 248 | 249 | iretq 250 | 251 | continue: 252 | 253 | pop rbx 254 | ret 255 | asm_switch_segments endp 256 | 257 | asm_switch_to_cpl_0 proc 258 | push rcx 259 | push r11 260 | 261 | syscall ; Will be a jump to the next instruction basically 262 | 263 | pop r11 264 | pop rcx 265 | 266 | ret 267 | asm_switch_to_cpl_0 endp 268 | 269 | ; Mode switching 270 | asm_execute_compatibility_mode_code proc 271 | 272 | ; Switches into compatibility mode 273 | mov rax, 01337h 274 | int 3 275 | 276 | ; The shellcode is responsible for executing the second int 3 277 | 278 | ret 279 | asm_execute_compatibility_mode_code endp 280 | 281 | end -------------------------------------------------------------------------------- /src/hv_detect.vcxproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Debug 6 | x64 7 | 8 | 9 | Release 10 | x64 11 | 12 | 13 | Debug 14 | ARM64 15 | 16 | 17 | Release 18 | ARM64 19 | 20 | 21 | 22 | {8502fb29-f1af-4f96-9ed6-ce90f79c575e} 23 | {dd38f7fc-d7bd-488b-9242-7d8754cde80d} 24 | v4.5 25 | 12.0 26 | Debug 27 | x64 28 | hv_detect 29 | $(LatestTargetPlatformVersion) 30 | 31 | 32 | 33 | Windows10 34 | true 35 | WindowsKernelModeDriver10.0 36 | Driver 37 | WDM 38 | Spectre 39 | 40 | 41 | Windows10 42 | false 43 | WindowsKernelModeDriver10.0 44 | Driver 45 | WDM 46 | Spectre 47 | 48 | 49 | Windows10 50 | true 51 | WindowsKernelModeDriver10.0 52 | Driver 53 | WDM 54 | Spectre 55 | 56 | 57 | Windows10 58 | false 59 | WindowsKernelModeDriver10.0 60 | Driver 61 | WDM 62 | Spectre 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | DbgengKernelDebugger 74 | .\output directory\ 75 | .\intermediate directory\ 76 | 77 | 78 | DbgengKernelDebugger 79 | .\output directory\ 80 | .\intermediate directory\ 81 | 82 | 83 | DbgengKernelDebugger 84 | 85 | 86 | DbgengKernelDebugger 87 | 88 | 89 | 90 | sha256 91 | 92 | 93 | stdcpp20 94 | 95 | 96 | stdc17 97 | 4201;4996 98 | Async 99 | false 100 | 101 | 102 | driver_entry 103 | 104 | 105 | 106 | 107 | sha256 108 | 109 | 110 | driver_entry 111 | 112 | 113 | stdcpp20 114 | 115 | 116 | stdc17 117 | 4201;4996 118 | Async 119 | 120 | 121 | 122 | 123 | StdCall 124 | 4201;4996 125 | Async 126 | false 127 | 128 | 129 | 130 | 131 | StdCall 132 | 4201;4996 133 | Async 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # User-specific files 2 | *.suo 3 | *.user 4 | *.sln.docstates 5 | 6 | # Build results 7 | [Dd]ebug/ 8 | [Dd]ebugPublic/ 9 | [Rr]elease/ 10 | [Rr]eleases/ 11 | x64/ 12 | x86/ 13 | .vs/ 14 | build/ 15 | bld/ 16 | [Bb]in/ 17 | [Oo]bj/ 18 | 19 | # Roslyn cache directories 20 | *.ide/ 21 | 22 | # MSTest test Results 23 | [Tt]est[Rr]esult*/ 24 | [Bb]uild[Ll]og.* 25 | 26 | #NUNIT 27 | *.VisualState.xml 28 | TestResult.xml 29 | 30 | # Build Results of an ATL Project 31 | [Dd]ebugPS/ 32 | [Rr]eleasePS/ 33 | dlldata.c 34 | 35 | *_i.c 36 | *_p.c 37 | *_i.h 38 | *.ilk 39 | *.meta 40 | *.obj 41 | *.pch 42 | *.pdb 43 | *.pgc 44 | *.pgd 45 | *.rsp 46 | *.sbr 47 | *.tlb 48 | *.tli 49 | *.tlh 50 | *.tmp 51 | *.tmp_proj 52 | *.log 53 | *.vspscc 54 | *.vssscc 55 | .builds 56 | *.pidb 57 | *.svclog 58 | *.scc 59 | 60 | # Chutzpah Test files 61 | _Chutzpah* 62 | 63 | # Visual C++ cache files 64 | ipch/ 65 | *.aps 66 | *.ncb 67 | *.opensdf 68 | *.sdf 69 | *.cachefile 70 | 71 | # Visual Studio profiler 72 | *.psess 73 | *.vsp 74 | *.vspx 75 | 76 | # TFS 2012 Local Workspace 77 | $tf/ 78 | 79 | # Guidance Automation Toolkit 80 | *.gpState 81 | 82 | # ReSharper is a .NET coding add-in 83 | _ReSharper*/ 84 | *.[Rr]e[Ss]harper 85 | *.DotSettings.user 86 | 87 | # JustCode is a .NET coding addin-in 88 | .JustCode 89 | 90 | # TeamCity is a build add-in 91 | _TeamCity* 92 | 93 | # DotCover is a Code Coverage Tool 94 | *.dotCover 95 | 96 | # NCrunch 97 | _NCrunch_* 98 | .*crunch*.local.xml 99 | 100 | # MightyMoose 101 | *.mm.* 102 | AutoTest.Net/ 103 | 104 | # Web workbench (sass) 105 | .sass-cache/ 106 | 107 | # Installshield output folder 108 | [Ee]xpress/ 109 | 110 | # DocProject is a documentation generator add-in 111 | DocProject/buildhelp/ 112 | DocProject/Help/*.HxT 113 | DocProject/Help/*.HxC 114 | DocProject/Help/*.hhc 115 | DocProject/Help/*.hhk 116 | DocProject/Help/*.hhp 117 | DocProject/Help/Html2 118 | DocProject/Help/html 119 | 120 | # Click-Once directory 121 | publish/ 122 | 123 | # Publish Web Output 124 | *.[Pp]ublish.xml 125 | *.azurePubxml 126 | # TODO: Comment the next line if you want to checkin your web deploy settings 127 | # but database connection strings (with potential passwords) will be unencrypted 128 | *.pubxml 129 | *.publishproj 130 | 131 | # NuGet Packages 132 | *.nupkg 133 | # The packages folder can be ignored because of Package Restore 134 | **/packages/* 135 | # except build/, which is used as an MSBuild target. 136 | !**/packages/build/ 137 | # If using the old MSBuild-Integrated Package Restore, uncomment this: 138 | #!**/packages/repositories.config 139 | 140 | # Windows Azure Build Output 141 | csx/ 142 | *.build.csdef 143 | 144 | # Windows Store app package directory 145 | AppPackages/ 146 | 147 | # Others 148 | sql/ 149 | *.Cache 150 | ClientBin/ 151 | [Ss]tyle[Cc]op.* 152 | ~$* 153 | *~ 154 | *.dbmdl 155 | *.dbproj.schemaview 156 | *.pfx 157 | *.publishsettings 158 | node_modules/ 159 | 160 | # RIA/Silverlight projects 161 | Generated_Code/ 162 | 163 | # Backup & report files from converting an old project file 164 | # to a newer Visual Studio version. Backup files are not needed, 165 | # because we have git ;-) 166 | _UpgradeReport_Files/ 167 | Backup*/ 168 | UpgradeLog*.XML 169 | UpgradeLog*.htm 170 | 171 | # SQL Server files 172 | *.mdf 173 | *.ldf 174 | 175 | # Business Intelligence projects 176 | *.rdl.data 177 | *.bim.layout 178 | *.bim_*.settings 179 | 180 | # Microsoft Fakes 181 | FakesAssemblies/ 182 | 183 | 184 | ### Windows ### 185 | # Windows image file caches 186 | Thumbs.db 187 | ehthumbs.db 188 | 189 | # Folder config file 190 | Desktop.ini 191 | 192 | # Recycle Bin used on file shares 193 | $RECYCLE.BIN/ 194 | 195 | # Windows Installer files 196 | *.cab 197 | *.msi 198 | *.msm 199 | *.msp 200 | *.exe 201 | *.i64 202 | *.dll 203 | 204 | 205 | # Logs 206 | logs 207 | *.log 208 | npm-debug.log* 209 | yarn-debug.log* 210 | yarn-error.log* 211 | lerna-debug.log* 212 | .pnpm-debug.log* 213 | 214 | # Diagnostic reports (https://nodejs.org/api/report.html) 215 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 216 | 217 | # Runtime data 218 | pids 219 | *.pid 220 | *.seed 221 | *.pid.lock 222 | 223 | # Directory for instrumented libs generated by jscoverage/JSCover 224 | lib-cov 225 | 226 | # Coverage directory used by tools like istanbul 227 | coverage 228 | *.lcov 229 | 230 | # nyc test coverage 231 | .nyc_output 232 | 233 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 234 | .grunt 235 | 236 | # Bower dependency directory (https://bower.io/) 237 | bower_components 238 | 239 | # node-waf configuration 240 | .lock-wscript 241 | 242 | # Compiled binary addons (https://nodejs.org/api/addons.html) 243 | build/Release 244 | 245 | # Dependency directories 246 | node_modules/ 247 | jspm_packages/ 248 | 249 | # Snowpack dependency directory (https://snowpack.dev/) 250 | web_modules/ 251 | 252 | # TypeScript cache 253 | *.tsbuildinfo 254 | 255 | # Optional npm cache directory 256 | .npm 257 | 258 | # Optional eslint cache 259 | .eslintcache 260 | 261 | # Optional stylelint cache 262 | .stylelintcache 263 | 264 | # Microbundle cache 265 | .rpt2_cache/ 266 | .rts2_cache_cjs/ 267 | .rts2_cache_es/ 268 | .rts2_cache_umd/ 269 | 270 | # Optional REPL history 271 | .node_repl_history 272 | 273 | # Output of 'npm pack' 274 | *.tgz 275 | 276 | # Yarn Integrity file 277 | .yarn-integrity 278 | 279 | # dotenv environment variable files 280 | .env 281 | .env.development.local 282 | .env.test.local 283 | .env.production.local 284 | .env.local 285 | 286 | # parcel-bundler cache (https://parceljs.org/) 287 | .cache 288 | .parcel-cache 289 | 290 | # Next.js build output 291 | .next 292 | out 293 | 294 | # Nuxt.js build / generate output 295 | .nuxt 296 | dist 297 | 298 | # Gatsby files 299 | .cache/ 300 | # Comment in the public line in if your project uses Gatsby and not Next.js 301 | # https://nextjs.org/blog/next-9-1#public-directory-support 302 | # public 303 | 304 | # vuepress build output 305 | .vuepress/dist 306 | 307 | # vuepress v2.x temp and cache directory 308 | .temp 309 | .cache 310 | 311 | # Docusaurus cache and generated files 312 | .docusaurus 313 | 314 | # Serverless directories 315 | .serverless/ 316 | 317 | # FuseBox cache 318 | .fusebox/ 319 | 320 | # DynamoDB Local files 321 | .dynamodb/ 322 | 323 | # TernJS port file 324 | .tern-port 325 | 326 | # Stores VSCode versions used for testing VSCode extensions 327 | .vscode-test 328 | 329 | # yarn v2 330 | .yarn/cache 331 | .yarn/unplugged 332 | .yarn/build-state.yml 333 | .yarn/install-state.gz 334 | .pnp.* 335 | 336 | *jar 337 | 338 | ##### Windows 339 | # Windows thumbnail cache files 340 | Thumbs.db 341 | Thumbs.db:encryptable 342 | ehthumbs.db 343 | ehthumbs_vista.db 344 | 345 | # Dump file 346 | *.stackdump 347 | 348 | # Folder config file 349 | [Dd]esktop.ini 350 | 351 | # Recycle Bin used on file shares 352 | $RECYCLE.BIN/ 353 | 354 | # Windows Installer files 355 | *.cab 356 | *.msi 357 | *.msix 358 | *.msm 359 | *.msp 360 | 361 | # Windows shortcuts 362 | *.lnk 363 | 364 | ##### Linux 365 | *~ 366 | 367 | # temporary files which can be created if a process still has a handle open of a deleted file 368 | .fuse_hidden* 369 | 370 | # KDE directory preferences 371 | .directory 372 | 373 | # Linux trash folder which might appear on any partition or disk 374 | .Trash-* 375 | 376 | # .nfs files are created when an open file is removed but is still being accessed 377 | .nfs* 378 | 379 | ##### MacOS 380 | # General 381 | .DS_Store 382 | .AppleDouble 383 | .LSOverride 384 | 385 | # Icon must end with two \r 386 | Icon 387 | 388 | # Thumbnails 389 | ._* 390 | 391 | # Files that might appear in the root of a volume 392 | .DocumentRevisions-V100 393 | .fseventsd 394 | .Spotlight-V100 395 | .TemporaryItems 396 | .Trashes 397 | .VolumeIcon.icns 398 | .com.apple.timemachine.donotpresent 399 | 400 | # Directories potentially created on remote AFP share 401 | .AppleDB 402 | .AppleDesktop 403 | Network Trash Folder 404 | Temporary Items 405 | .apdisk 406 | 407 | ##### Backup 408 | *.bak 409 | *.gho 410 | *.ori 411 | *.orig 412 | *.tmp 413 | 414 | ##### GPG 415 | secring.* 416 | 417 | ##### Dropbox 418 | # Dropbox settings and caches 419 | .dropbox 420 | .dropbox.attr 421 | .dropbox.cache 422 | 423 | ##### SynopsysVCS 424 | # Waveform formats 425 | *.vcd 426 | *.vpd 427 | *.evcd 428 | *.fsdb 429 | 430 | # Default name of the simulation executable. A different name can be 431 | # specified with this switch (the associated daidir database name is 432 | # also taken from here): -o / 433 | simv 434 | 435 | # Generated for Verilog and VHDL top configs 436 | simv.daidir/ 437 | simv.db.dir/ 438 | 439 | # Infrastructure necessary to co-simulate SystemC models with 440 | # Verilog/VHDL models. An alternate directory may be specified with this 441 | # switch: -Mdir= 442 | csrc/ 443 | 444 | # Log file - the following switch allows to specify the file that will be 445 | # used to write all messages from simulation: -l 446 | *.log 447 | 448 | # Coverage results (generated with urg) and database location. The 449 | # following switch can also be used: urg -dir .vdb 450 | simv.vdb/ 451 | urgReport/ 452 | 453 | # DVE and UCLI related files. 454 | DVEfiles/ 455 | ucli.key 456 | 457 | # When the design is elaborated for DirectC, the following file is created 458 | # with declarations for C/C++ functions. 459 | vc_hdrs.h 460 | 461 | ##### SVN 462 | .svn/ 463 | 464 | ##### Mercurial 465 | .hg/ 466 | .hgignore 467 | .hgsigs 468 | .hgsub 469 | .hgsubstate 470 | .hgtags 471 | 472 | ##### Bazaar 473 | .bzr/ 474 | .bzrignore 475 | 476 | ##### CVS 477 | /CVS/* 478 | **/CVS/* 479 | .cvsignore 480 | */.cvsignore 481 | 482 | ##### TortoiseGit 483 | # Project-level settings 484 | /.tgitconfig 485 | 486 | ##### PuTTY 487 | # Private key 488 | *.ppk 489 | 490 | ##### Vim 491 | # Swap 492 | [._]*.s[a-v][a-z] 493 | !*.svg # comment out if you don't need vector files 494 | [._]*.sw[a-p] 495 | [._]s[a-rt-v][a-z] 496 | [._]ss[a-gi-z] 497 | [._]sw[a-p] 498 | 499 | # Session 500 | Session.vim 501 | Sessionx.vim 502 | 503 | # Temporary 504 | .netrwhist 505 | *~ 506 | # Auto-generated tag files 507 | tags 508 | # Persistent undo 509 | [._]*.un~ 510 | 511 | ##### Emacs 512 | # -*- mode: gitignore; -*- 513 | *~ 514 | \#*\# 515 | /.emacs.desktop 516 | /.emacs.desktop.lock 517 | *.elc 518 | auto-save-list 519 | tramp 520 | .\#* 521 | 522 | # Org-mode 523 | .org-id-locations 524 | *_archive 525 | 526 | # flymake-mode 527 | *_flymake.* 528 | 529 | # eshell files 530 | /eshell/history 531 | /eshell/lastdir 532 | 533 | # elpa packages 534 | /elpa/ 535 | 536 | # reftex files 537 | *.rel 538 | 539 | # AUCTeX auto folder 540 | /auto/ 541 | 542 | # cask packages 543 | .cask/ 544 | dist/ 545 | 546 | # Flycheck 547 | flycheck_*.el 548 | 549 | # server auth directory 550 | /server/ 551 | 552 | # projectiles files 553 | .projectile 554 | 555 | # directory configuration 556 | .dir-locals.el 557 | 558 | # network security 559 | /network-security.data 560 | 561 | ##### SublimeText 562 | # Cache files for Sublime Text 563 | *.tmlanguage.cache 564 | *.tmPreferences.cache 565 | *.stTheme.cache 566 | 567 | # Workspace files are user-specific 568 | *.sublime-workspace 569 | 570 | # Project files should be checked into the repository, unless a significant 571 | # proportion of contributors will probably not be using Sublime Text 572 | # *.sublime-project 573 | 574 | # SFTP configuration file 575 | sftp-config.json 576 | sftp-config-alt*.json 577 | 578 | # Package control specific files 579 | Package Control.last-run 580 | Package Control.ca-list 581 | Package Control.ca-bundle 582 | Package Control.system-ca-bundle 583 | Package Control.cache/ 584 | Package Control.ca-certs/ 585 | Package Control.merged-ca-bundle 586 | Package Control.user-ca-bundle 587 | oscrypto-ca-bundle.crt 588 | bh_unicode_properties.cache 589 | 590 | # Sublime-github package stores a github token in this file 591 | # https://packagecontrol.io/packages/sublime-github 592 | GitHub.sublime-settings 593 | 594 | ##### Notepad++ 595 | # Notepad++ backups # 596 | *.bak 597 | 598 | ##### TextMate 599 | *.tmproj 600 | *.tmproject 601 | tmtags 602 | 603 | ##### VisualStudioCode 604 | .vscode/* 605 | !.vscode/settings.json 606 | !.vscode/tasks.json 607 | !.vscode/launch.json 608 | !.vscode/extensions.json 609 | *.code-workspace 610 | 611 | # Local History for Visual Studio Code 612 | .history/ 613 | 614 | ##### NetBeans 615 | **/nbproject/private/ 616 | **/nbproject/Makefile-*.mk 617 | **/nbproject/Package-*.bash 618 | build/ 619 | nbbuild/ 620 | dist/ 621 | nbdist/ 622 | .nb-gradle/ 623 | 624 | ##### JetBrains 625 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 626 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 627 | 628 | # User-specific stuff 629 | .idea/**/workspace.xml 630 | .idea/**/tasks.xml 631 | .idea/**/usage.statistics.xml 632 | .idea/**/dictionaries 633 | .idea/**/shelf 634 | 635 | # Generated files 636 | .idea/**/contentModel.xml 637 | 638 | # Sensitive or high-churn files 639 | .idea/**/dataSources/ 640 | .idea/**/dataSources.ids 641 | .idea/**/dataSources.local.xml 642 | .idea/**/sqlDataSources.xml 643 | .idea/**/dynamic.xml 644 | .idea/**/uiDesigner.xml 645 | .idea/**/dbnavigator.xml 646 | 647 | # Gradle 648 | .idea/**/gradle.xml 649 | .idea/**/libraries 650 | 651 | # Gradle and Maven with auto-import 652 | # When using Gradle or Maven with auto-import, you should exclude module files, 653 | # since they will be recreated, and may cause churn. Uncomment if using 654 | # auto-import. 655 | # .idea/artifacts 656 | # .idea/compiler.xml 657 | # .idea/jarRepositories.xml 658 | # .idea/modules.xml 659 | # .idea/*.iml 660 | # .idea/modules 661 | # *.iml 662 | # *.ipr 663 | 664 | # CMake 665 | cmake-build-*/ 666 | 667 | # Mongo Explorer plugin 668 | .idea/**/mongoSettings.xml 669 | 670 | # File-based project format 671 | *.iws 672 | 673 | # IntelliJ 674 | out/ 675 | 676 | # mpeltonen/sbt-idea plugin 677 | .idea_modules/ 678 | 679 | # JIRA plugin 680 | atlassian-ide-plugin.xml 681 | 682 | # Cursive Clojure plugin 683 | .idea/replstate.xml 684 | 685 | # Crashlytics plugin (for Android Studio and IntelliJ) 686 | com_crashlytics_export_strings.xml 687 | crashlytics.properties 688 | crashlytics-build.properties 689 | fabric.properties 690 | 691 | # Editor-based Rest Client 692 | .idea/httpRequests 693 | 694 | # Android studio 3.1+ serialized cache file 695 | .idea/caches/build_file_checksums.ser 696 | 697 | ##### Eclipse 698 | .metadata 699 | bin/ 700 | tmp/ 701 | *.tmp 702 | *.bak 703 | *.swp 704 | *~.nib 705 | local.properties 706 | .settings/ 707 | .loadpath 708 | .recommenders 709 | 710 | # External tool builders 711 | .externalToolBuilders/ 712 | 713 | # Locally stored "Eclipse launch configurations" 714 | *.launch 715 | 716 | # PyDev specific (Python IDE for Eclipse) 717 | *.pydevproject 718 | 719 | # CDT-specific (C/C++ Development Tooling) 720 | .cproject 721 | 722 | # CDT- autotools 723 | .autotools 724 | 725 | # Java annotation processor (APT) 726 | .factorypath 727 | 728 | # PDT-specific (PHP Development Tools) 729 | .buildpath 730 | 731 | # sbteclipse plugin 732 | .target 733 | 734 | # Tern plugin 735 | .tern-project 736 | 737 | # TeXlipse plugin 738 | .texlipse 739 | 740 | # STS (Spring Tool Suite) 741 | .springBeans 742 | 743 | # Code Recommenders 744 | .recommenders/ 745 | 746 | # Annotation Processing 747 | .apt_generated/ 748 | .apt_generated_test/ 749 | 750 | # Scala IDE specific (Scala & Java development for Eclipse) 751 | .cache-main 752 | .scala_dependencies 753 | .worksheet 754 | 755 | # Uncomment this line if you wish to ignore the project description file. 756 | # Typically, this file would be tracked if it contains build/dependency configurations: 757 | #.project 758 | 759 | ##### Dreamweaver 760 | # DW Dreamweaver added files 761 | _notes 762 | _compareTemp 763 | configs/ 764 | dwsync.xml 765 | dw_php_codehinting.config 766 | *.mno 767 | 768 | ##### CodeKit 769 | # General CodeKit files to ignore 770 | config.codekit 771 | config.codekit3 772 | /min 773 | 774 | ##### Gradle 775 | .gradle 776 | **/build/ 777 | !src/**/build/ 778 | 779 | # Ignore Gradle GUI config 780 | gradle-app.setting 781 | 782 | # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored) 783 | !gradle-wrapper.jar 784 | 785 | # Cache of project 786 | .gradletasknamecache 787 | 788 | # # Work around https://youtrack.jetbrains.com/issue/IDEA-116898 789 | # gradle/wrapper/gradle-wrapper.properties 790 | 791 | ##### Composer 792 | composer.phar 793 | /vendor/ 794 | 795 | # Commit your application's lock file https://getcomposer.org/doc/01-basic-usage.md#commit-your-composer-lock-file-to-version-control 796 | # You may choose to ignore a library lock file http://getcomposer.org/doc/02-libraries.md#lock-file 797 | composer.lock 798 | 799 | ##### PHP CodeSniffer 800 | # gitignore for the PHP Codesniffer framework 801 | # website: https://github.com/squizlabs/PHP_CodeSniffer 802 | # 803 | # Recommended template: PHP.gitignore 804 | 805 | /wpcs/* 806 | 807 | ##### SASS 808 | .sass-cache/ 809 | *.css.map 810 | *.sass.map 811 | *.scss.map 812 | 813 | # serverless files 814 | .serverless 815 | *.vsidx 816 | *.woff 817 | *.woff2 818 | *.eot 819 | *.svg 820 | *.ttf 821 | 822 | 823 | DashboardTemplates/ 824 | .idea/ 825 | 826 | # Generated by Cargo 827 | # will have compiled files and executables 828 | debug/ 829 | target/ 830 | 831 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 832 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 833 | Cargo.lock 834 | 835 | # These are backup files generated by rustfmt 836 | **/*.rs.bk 837 | 838 | # MSVC Windows builds of rustc generate these, which store debugging information 839 | *.pdb 840 | 841 | intermediate directory/ 842 | output directory/ 843 | goliath_*/intermediate\ directory/ 844 | goliath_*/output\ directory/ 845 | goliath_*/vcpkg_installed/ -------------------------------------------------------------------------------- /src/detections/idt.cpp: -------------------------------------------------------------------------------- 1 | #include "../includes/includes.h" 2 | #include "../includes/func_defs.hpp" 3 | #include "../utility/physmem/physmem.hpp" 4 | namespace idt { 5 | char allocated_memory_page[0x1000]; 6 | segment_descriptor_register_32 comp_idt; 7 | segment_descriptor_register_64 actual_idt; 8 | 9 | 10 | /* 11 | List of checks: 12 | 13 | detection_1 -> #GP(0) due to lock Prefix 14 | detection_2 -> #PF due to invalid memory operand 15 | detection_3 -> SIDT with operand not mapped in cr3 but in TLB 16 | detection_4 -> Timing check (500 tsc ticks acceptable) 17 | detection_5 -> Compatibility mode idtr storing 18 | detection_6 -> Non canonical address passed as memory operand 19 | detection_7 -> Non canonical address passed as memory operand in SS segment -> #SS 20 | detection_8 -> Executing sidt with cpl = 3 but with cr4.umip = 0 -> Should go through 21 | detection_9 -> Executing sidt with cpl = 3 but with cr4.umip = 1 -> #GP(0) should be caused 22 | */ 23 | namespace storing { 24 | bool detection_1(void) { 25 | bool hypervisor_detected = false; 26 | 27 | // Lock prefix should cause an exception 28 | segment_descriptor_register_64 idtr; 29 | __try { 30 | __lock_sidt(&idtr); 31 | hypervisor_detected = true; 32 | } 33 | __except (EXCEPTION_EXECUTE_HANDLER) { 34 | if (safety_net::idt::get_core_last_interrupt_record()->exception_vector != invalid_opcode) 35 | hypervisor_detected = true; 36 | } 37 | 38 | return hypervisor_detected; 39 | } 40 | 41 | bool detection_2(void) { 42 | bool hypervisor_detected = false; 43 | 44 | // Invalid operand should cause an exception 45 | __try { 46 | // This will cause #PF and not #GP as 0xdead is canonical (; 47 | __sidt((void*)0xdead); // If there actually is a va 0xdead then you honestly deserve that dub 48 | hypervisor_detected = true; 49 | } 50 | __except (EXCEPTION_EXECUTE_HANDLER) { 51 | if (safety_net::idt::get_core_last_interrupt_record()->exception_vector != page_fault) 52 | hypervisor_detected = true; 53 | } 54 | 55 | return hypervisor_detected; 56 | } 57 | 58 | bool detection_3(void) { 59 | 60 | volatile segment_descriptor_register_64* idtr_in_tlb = (volatile segment_descriptor_register_64*)allocated_memory_page; 61 | // Put the part of the memory page we will use into the tlb 62 | // so that the cpu will be able to access it when executing sidt 63 | for (uint32_t i = 0; i < sizeof(segment_descriptor_register_64); i++) { 64 | volatile uint8_t dummy = *(uint8_t*)((uint64_t)allocated_memory_page + i); 65 | UNREFERENCED_PARAMETER(dummy); 66 | } 67 | 68 | uint64_t stored_flags; 69 | if (!physmem::paging_manipulation::win_destroy_memory_page_mapping(allocated_memory_page, stored_flags)) { 70 | return false; 71 | } 72 | 73 | if (physmem::paging_manipulation::is_memory_page_mapped(allocated_memory_page)) { 74 | return false; 75 | } 76 | 77 | // The instruction should go through as the idtr page is still in the tlb (but not mapped in cr3!) 78 | bool hypervisor_detected = false; 79 | __try { 80 | __sidt((void*)idtr_in_tlb); 81 | } 82 | 83 | __except (EXCEPTION_EXECUTE_HANDLER) { 84 | hypervisor_detected = true; // Should not happen on bare metal 85 | } 86 | 87 | physmem::paging_manipulation::win_restore_memory_page_mapping(allocated_memory_page, stored_flags); 88 | 89 | return hypervisor_detected; 90 | } 91 | 92 | bool detection_4(void) { 93 | uint64_t lowest_tsc = MAXULONG64; 94 | segment_descriptor_register_64 idtr; 95 | 96 | for (int i = 0; i < 10; i++) { 97 | 98 | _mm_lfence(); 99 | uint64_t start = __rdtsc(); 100 | _mm_lfence(); 101 | 102 | __sidt(&idtr); 103 | 104 | _mm_lfence(); 105 | uint64_t end = __rdtsc(); 106 | _mm_lfence(); 107 | 108 | uint64_t delta = (end - start); 109 | if (delta < lowest_tsc) 110 | lowest_tsc = delta; 111 | 112 | // Account for hypervisors over adjusting the tsc 113 | if (delta & (1ull << 63)) { 114 | return true; 115 | } 116 | } 117 | 118 | return lowest_tsc > MAX_ACCEPTABLE_TSC; 119 | } 120 | 121 | bool detection_5(void) { 122 | static uint8_t compatibility_shellcode[] = { 123 | 0xB8, 0x00, 0x00, 0x00, 0x00, // mov eax, 0x00000000 124 | 0x0F, 0x01, 0x08, // sidt [eax] 125 | 0xC3 // ret 126 | }; 127 | 128 | *(uint32_t*)(compatibility_shellcode + 1) = safety_net::execution_mode::get_compatibility_data_page_address(); 129 | 130 | void* data_page = safety_net::execution_mode::get_compatibility_data_page(); // This is where the idtr will be stored 131 | memset(data_page, 0, 0x1000); 132 | 133 | bool result = false; 134 | __try { 135 | result = safety_net::execution_mode::execute_32_bit_shellcode(compatibility_shellcode, sizeof(compatibility_shellcode)); 136 | } 137 | __except (EXCEPTION_EXECUTE_HANDLER) { 138 | result = true; // Something went wrong in mode switching emulation of the hypervisor 139 | } 140 | 141 | if (!result) 142 | return true; 143 | 144 | segment_descriptor_register_64 curr_idt; 145 | __sidt(&curr_idt); 146 | 147 | comp_idt = *(segment_descriptor_register_32*)data_page; 148 | actual_idt = curr_idt; 149 | 150 | 151 | // If these two match it means that a shitty hypervisor is present 152 | // that stored 10 bytes instead of 6; 153 | if (memcmp((void*)&curr_idt, data_page, sizeof(segment_descriptor_register_64)) == 0) { 154 | return true; 155 | } 156 | 157 | return false; 158 | } 159 | 160 | bool detection_6(void) { 161 | bool hypervisor_detected = false; 162 | 163 | // Invalid operand should cause an exception 164 | __try { 165 | // This will cause #GP as we pass a non canonical address as an operand 166 | __gp_fault_sidt(); 167 | hypervisor_detected = true; 168 | } 169 | __except (EXCEPTION_EXECUTE_HANDLER) { 170 | if (safety_net::idt::get_core_last_interrupt_record()->exception_vector != general_protection) 171 | hypervisor_detected = true; 172 | } 173 | 174 | return hypervisor_detected; 175 | } 176 | 177 | bool detection_7(void) { 178 | bool hypervisor_detected = false; 179 | 180 | // Invalid operand should cause an exception 181 | __try { 182 | // This will cause #SS as we pass a non canonical address and we do so in rsp (-> stack segment) 183 | __ss_fault_sidt(); 184 | hypervisor_detected = true; 185 | } 186 | __except (EXCEPTION_EXECUTE_HANDLER) { 187 | if (safety_net::idt::get_core_last_interrupt_record()->exception_vector != stack_segment_fault) 188 | hypervisor_detected = true; 189 | } 190 | 191 | return hypervisor_detected; 192 | } 193 | 194 | bool detection_8(void) { 195 | cr4 curr_cr4; 196 | cr4 new_cr4; 197 | 198 | curr_cr4.flags = __readcr4(); 199 | new_cr4.flags = curr_cr4.flags; 200 | 201 | new_cr4.usermode_instruction_prevention = 0; 202 | new_cr4.smap_enable = 0; 203 | new_cr4.smep_enable = 0; 204 | __writecr4(new_cr4.flags); 205 | 206 | rflags curr_flags; 207 | rflags new_flags; 208 | curr_flags.flags = __readeflags(); 209 | new_flags.flags = curr_flags.flags; 210 | 211 | new_flags.alignment_check_flag = 0; 212 | __writeeflags(new_flags.flags); 213 | 214 | if (!safety_net::cpl::switch_to_cpl_3()) { 215 | __writecr4(curr_cr4.flags); 216 | __writeeflags(curr_flags.flags); 217 | return false; 218 | } 219 | 220 | bool hypervisor_detected = false; 221 | segment_descriptor_register_64 idtr; 222 | 223 | __try { 224 | // This should not cause an exception since we disable cr4.usermode_instruction_prevention 225 | __sidt(&idtr); 226 | } 227 | __except (EXCEPTION_EXECUTE_HANDLER) { 228 | hypervisor_detected = true; 229 | } 230 | 231 | if (!safety_net::cpl::switch_to_cpl_0()) { 232 | __writecr4(curr_cr4.flags); 233 | __writeeflags(curr_flags.flags); 234 | return false; 235 | } 236 | 237 | __writecr4(curr_cr4.flags); 238 | __writeeflags(curr_flags.flags); 239 | 240 | return hypervisor_detected; 241 | } 242 | 243 | bool detection_9(void) { 244 | cr4 curr_cr4; 245 | cr4 new_cr4; 246 | 247 | curr_cr4.flags = __readcr4(); 248 | new_cr4.flags = curr_cr4.flags; 249 | 250 | new_cr4.usermode_instruction_prevention = 1; 251 | new_cr4.smap_enable = 0; 252 | new_cr4.smep_enable = 0; 253 | __writecr4(new_cr4.flags); 254 | 255 | rflags curr_flags; 256 | rflags new_flags; 257 | curr_flags.flags = __readeflags(); 258 | new_flags.flags = curr_flags.flags; 259 | 260 | new_flags.alignment_check_flag = 0; 261 | __writeeflags(new_flags.flags); 262 | 263 | if (!safety_net::cpl::switch_to_cpl_3()) { 264 | __writecr4(curr_cr4.flags); 265 | __writeeflags(curr_flags.flags); 266 | return false; 267 | } 268 | 269 | bool hypervisor_detected = false; 270 | segment_descriptor_register_64 idtr; 271 | 272 | __try { 273 | // This should cause an exception since we set cr4.usermode_instruction_prevention 274 | __sidt(&idtr); 275 | hypervisor_detected = true; 276 | } 277 | __except (EXCEPTION_EXECUTE_HANDLER) { 278 | idt_regs_ecode_t* record = safety_net::idt::get_core_last_interrupt_record(); 279 | if (record->exception_vector != general_protection || record->error_code != 0) 280 | hypervisor_detected = true; 281 | } 282 | 283 | if (!safety_net::cpl::switch_to_cpl_0()) { 284 | __writecr4(curr_cr4.flags); 285 | __writeeflags(curr_flags.flags); 286 | return false; 287 | } 288 | 289 | __writecr4(curr_cr4.flags); 290 | __writeeflags(curr_flags.flags); 291 | 292 | return hypervisor_detected; 293 | } 294 | 295 | void execute_detections(void) { 296 | safety_net_t storage; 297 | if (!safety_net::start_safety_net(storage)) 298 | return; 299 | 300 | const int num_detections = 9; 301 | bool detection_results[num_detections]; 302 | bool (*detections[])(void) = { detection_1, detection_2, detection_3, detection_4, detection_5, detection_6, detection_7, detection_8, detection_9 }; 303 | 304 | for (int i = 0; i < num_detections; ++i) { 305 | detection_results[i] = detections[i](); 306 | } 307 | 308 | safety_net::stop_safety_net(storage); 309 | 310 | for (int i = 0; i < num_detections; ++i) { 311 | if (detection_results[i]) { 312 | log_error_indent(2, "Failed detection %d", i + 1); 313 | } 314 | else { 315 | log_success_indent(2, "Passed detection %d", i + 1); 316 | } 317 | } 318 | } 319 | }; 320 | 321 | /* 322 | List of checks: 323 | 324 | detection_1 -> #GP(0) due to lock Prefix 325 | detection_2 -> #PF due to invalid memory operand 326 | detection_3 -> LIDT with operand not mapped in cr3 but in TLB 327 | detection_4 -> Timing check (500 tsc ticks acceptable) 328 | detection_5 -> Non canonical address passed as memory operand 329 | detection_6 -> Non canonical address passed as memory operand in SS segment -> #SS 330 | detection_7 -> Executing lidt with cr4.umip = 0 -> Should cause #GP(0) as lidt is not affected by the state of cr4.umip 331 | */ 332 | namespace loading { 333 | 334 | bool detection_1(void) { 335 | bool hypervisor_detected = false; 336 | 337 | // Lock prefix should cause an exception 338 | segment_descriptor_register_64 idtr; 339 | __sidt(&idtr); // Just in case the hv actually loads the idtr 340 | 341 | __try { 342 | __lock_lidt(&idtr); 343 | hypervisor_detected = true; 344 | } 345 | __except (EXCEPTION_EXECUTE_HANDLER) { 346 | if (safety_net::idt::get_core_last_interrupt_record()->exception_vector != invalid_opcode) 347 | hypervisor_detected = true; 348 | } 349 | 350 | return hypervisor_detected; 351 | } 352 | 353 | bool detection_2(void) { 354 | bool hypervisor_detected = false; 355 | 356 | // Invalid operand should cause a page fault 357 | __try { 358 | // This should cause #PF since 0xdead is canonical but not mapped 359 | __lidt((void*)0xdead); 360 | hypervisor_detected = true; 361 | } 362 | __except (EXCEPTION_EXECUTE_HANDLER) { 363 | if (safety_net::idt::get_core_last_interrupt_record()->exception_vector != page_fault) 364 | hypervisor_detected = true; 365 | } 366 | 367 | return hypervisor_detected; 368 | } 369 | 370 | bool detection_3(void) { 371 | memset(allocated_memory_page, 0, 0x1000); 372 | 373 | segment_descriptor_register_64 idtr; 374 | __sidt(&idtr); 375 | 376 | // This memcpy also maps the va into the tlb 377 | memcpy(allocated_memory_page, &idtr, sizeof(idtr)); 378 | 379 | uint64_t stored_flags; 380 | if (!physmem::paging_manipulation::win_destroy_memory_page_mapping(allocated_memory_page, stored_flags)) 381 | return false; 382 | 383 | if (physmem::paging_manipulation::is_memory_page_mapped(allocated_memory_page)) 384 | return false; 385 | 386 | // The instruction should go through as the idtr page is still in the tlb (but not mapped in cr3!) 387 | bool hypervisor_detected = false; 388 | __try { 389 | __lidt(allocated_memory_page); 390 | } 391 | 392 | __except (EXCEPTION_EXECUTE_HANDLER) { 393 | hypervisor_detected = true; // Should not happen on bare metal 394 | } 395 | 396 | physmem::paging_manipulation::win_restore_memory_page_mapping(allocated_memory_page, stored_flags); 397 | return hypervisor_detected; 398 | } 399 | 400 | bool detection_4(void) { 401 | uint64_t lowest_tsc = MAXULONG64; 402 | segment_descriptor_register_64 idtr; 403 | 404 | // Copy the current idtr into idtr to ensure we load valid idtrs via lidt during timing 405 | __sidt(&idtr); 406 | 407 | for (int i = 0; i < 10; i++) { 408 | 409 | _mm_lfence(); 410 | uint64_t start = __rdtsc(); 411 | _mm_lfence(); 412 | 413 | __lidt(&idtr); 414 | 415 | _mm_lfence(); 416 | uint64_t end = __rdtsc(); 417 | _mm_lfence(); 418 | 419 | uint64_t delta = (end - start); 420 | if (delta < lowest_tsc) 421 | lowest_tsc = delta; 422 | 423 | if (delta & (1ull << 63)) { 424 | return true; 425 | } 426 | } 427 | 428 | return lowest_tsc > MAX_ACCEPTABLE_TSC; 429 | } 430 | 431 | bool detection_5(void) { 432 | bool hypervisor_detected = false; 433 | 434 | // Non-canonical address should cause a general protection fault 435 | __try { 436 | __gp_fault_lidt(); 437 | hypervisor_detected = true; 438 | } 439 | __except (EXCEPTION_EXECUTE_HANDLER) { 440 | if (safety_net::idt::get_core_last_interrupt_record()->exception_vector != general_protection) 441 | hypervisor_detected = true; 442 | } 443 | 444 | return hypervisor_detected; 445 | } 446 | 447 | bool detection_6(void) { 448 | bool hypervisor_detected = false; 449 | 450 | // Non-canonical address in SS segment should cause a stack segment fault 451 | __try { 452 | __ss_fault_lidt(); 453 | hypervisor_detected = true; 454 | } 455 | __except (EXCEPTION_EXECUTE_HANDLER) { 456 | if (safety_net::idt::get_core_last_interrupt_record()->exception_vector != stack_segment_fault) 457 | hypervisor_detected = true; 458 | } 459 | 460 | return hypervisor_detected; 461 | } 462 | 463 | bool detection_7(void) { 464 | segment_descriptor_register_64 idtr; 465 | __sidt(&idtr); 466 | 467 | cr4 curr_cr4; 468 | cr4 new_cr4; 469 | 470 | curr_cr4.flags = __readcr4(); 471 | new_cr4.flags = curr_cr4.flags; 472 | 473 | new_cr4.usermode_instruction_prevention = 0; 474 | new_cr4.smap_enable = 0; 475 | new_cr4.smep_enable = 0; 476 | __writecr4(new_cr4.flags); 477 | 478 | if (!safety_net::cpl::switch_to_cpl_3()) { 479 | __writecr4(curr_cr4.flags); 480 | return false; 481 | } 482 | 483 | bool hypervisor_detected = false; 484 | __try { 485 | // This should cause an exception since it is not affected by cr4.usermode_instruction_prevention 486 | __lidt(&idtr); 487 | hypervisor_detected = true; 488 | } 489 | __except (EXCEPTION_EXECUTE_HANDLER) { 490 | idt_regs_ecode_t* record = safety_net::idt::get_core_last_interrupt_record(); 491 | if (record->exception_vector != general_protection || record->error_code != 0) 492 | hypervisor_detected = true; 493 | } 494 | 495 | if (!safety_net::cpl::switch_to_cpl_0()) { 496 | __writecr4(curr_cr4.flags); 497 | return false; 498 | } 499 | 500 | __writecr4(curr_cr4.flags); 501 | return hypervisor_detected; 502 | } 503 | 504 | void execute_detections(void) { 505 | safety_net_t storage; 506 | if (!safety_net::start_safety_net(storage)) 507 | return; 508 | 509 | const int num_detections = 7; 510 | bool detection_results[num_detections]; 511 | bool (*detections[])(void) = { detection_1, detection_2, detection_3, detection_4, detection_5, detection_6, detection_7 }; 512 | 513 | for (int i = 0; i < num_detections; ++i) { 514 | detection_results[i] = detections[i](); 515 | } 516 | 517 | safety_net::stop_safety_net(storage); 518 | 519 | for (int i = 0; i < num_detections; ++i) { 520 | if (detection_results[i]) { 521 | log_error_indent(2, "Failed detection %d", i + 1); 522 | } 523 | else { 524 | log_success_indent(2, "Passed detection %d", i + 1); 525 | } 526 | } 527 | } 528 | }; 529 | 530 | void execute_idt_detections(void) { 531 | memset(allocated_memory_page, 0, 0x1000); 532 | 533 | log_new_line(); 534 | log_info_indent(1, "SIDT"); 535 | storing::execute_detections(); 536 | log_new_line(); 537 | 538 | log_info_indent(1, "LIDT"); 539 | loading::execute_detections(); 540 | log_new_line(); 541 | } 542 | }; -------------------------------------------------------------------------------- /src/utility/safety_net/safety_net.cpp: -------------------------------------------------------------------------------- 1 | #include "../includes/includes.h" 2 | #include "../includes/func_defs.hpp" 3 | #include "../includes/structs.hpp" 4 | #include "../utility/physmem/physmem.hpp" 5 | 6 | #include 7 | 8 | namespace safety_net { 9 | bool inited = false; 10 | 11 | uint64_t g_image_base; 12 | uint64_t g_image_size; 13 | 14 | 15 | namespace gdt { 16 | // Compile time variables 17 | extern "C" constexpr segment_selector zero_descriptor_selector = {0, 0, 0}; 18 | 19 | extern "C" constexpr segment_selector constructed_cpl0_cs = { 0, 0, 1 }; 20 | extern "C" constexpr segment_selector constructed_cpl0_ss = { 0, 0, 2 }; 21 | 22 | extern "C" constexpr segment_selector constructed_cpl3_ss = { 3, 0, 3 }; 23 | extern "C" constexpr segment_selector constructed_cpl3_cs = { 3, 0, 4 }; 24 | 25 | extern "C" constexpr segment_selector constructed_tr = { 0, 0, 5 }; // Takes up 2 slots 26 | 27 | extern "C" constexpr segment_selector compatibility_constructed_cpl0_cs = { 0, 0, 7 }; 28 | extern "C" constexpr segment_selector compatibility_constructed_cpl0_ss = { 0, 0, 8 }; 29 | 30 | extern "C" constexpr segment_selector constructed_ds_es_fs_gs = { 0, 0, 9 }; 31 | 32 | constexpr uint16_t constructed_gdt_size = 10; 33 | 34 | // Runtime data 35 | bool gdt_inited = false; 36 | 37 | void* interrupt_stack = 0; 38 | task_state_segment_64* my_tss = 0; 39 | segment_descriptor_32* my_gdt = 0; 40 | segment_descriptor_register_64 my_gdtr = { 0 }; 41 | 42 | /* 43 | Utility / Exposed API's 44 | */ 45 | 46 | segment_descriptor_register_64 get_constructed_gdtr(void) { 47 | return my_gdtr; 48 | } 49 | 50 | void log_segment_descriptor_64(segment_descriptor_64* descriptor, const char* segment_name) { 51 | // Calculate the full base address 52 | uint64_t base_address = ((uint64_t)descriptor->base_address_upper << 32) | 53 | (descriptor->base_address_high << 24) | 54 | (descriptor->base_address_middle << 16) | 55 | descriptor->base_address_low; 56 | 57 | // Calculate the full segment limit 58 | uint32_t segment_limit = (descriptor->segment_limit_high << 16) | 59 | descriptor->segment_limit_low; 60 | 61 | // Check granularity flag to determine the effective segment limit 62 | if (descriptor->granularity) { 63 | segment_limit = (segment_limit << 12) | 0xFFF; // Granularity set, multiply by 4 KB 64 | } 65 | 66 | // Log information about the segment descriptor 67 | log_info("Segment Descriptor (%s):", segment_name); 68 | log_info(" Base Address: 0x%016llX", base_address); 69 | log_info(" Segment Limit: 0x%X", segment_limit); 70 | log_info(" Type: 0x%X", descriptor->type); 71 | log_info(" Descriptor Type (S flag): 0x%X", descriptor->descriptor_type); 72 | log_info(" DPL (Descriptor Privilege Level): 0x%X", descriptor->descriptor_privilege_level); 73 | log_info(" Present (P flag): 0x%X", descriptor->present); 74 | log_info(" Granularity (G flag): 0x%X", descriptor->granularity); 75 | log_info(" Default/Big (D flag): 0x%X", descriptor->default_big); 76 | log_info(" Long Mode (L flag): 0x%X", descriptor->long_mode); 77 | log_info(" System: 0x%X", descriptor->system); 78 | log_info(" System: 0x%X", descriptor->descriptor_type == 0 ? 1 : 0); // System flag is derived from the S flag 79 | } 80 | 81 | void log_segment_descriptor_32(segment_descriptor_32* descriptor, const char* segment_name) { 82 | // Calculate full base address 83 | uint32_t base_address = (descriptor->base_address_high << 24) | 84 | (descriptor->base_address_middle << 16) | 85 | descriptor->base_address_low; 86 | 87 | // Calculate full segment limit 88 | uint32_t segment_limit = (descriptor->segment_limit_high << 16) | 89 | descriptor->segment_limit_low; 90 | 91 | // Check granularity flag to determine the effective segment limit 92 | if (descriptor->granularity) { 93 | segment_limit = (segment_limit << 12) | 0xFFF; // Granularity set, multiply by 4 KB 94 | } 95 | 96 | log_info("Segment Descriptor (%s):", segment_name); 97 | log_info(" Base Address: 0x%08X", base_address); 98 | log_info(" Segment Limit: 0x%X", segment_limit); 99 | log_info(" Type: 0x%X", descriptor->type); 100 | log_info(" Descriptor Type (S flag): 0x%X", descriptor->descriptor_type); 101 | log_info(" DPL (Descriptor Privilege Level): 0x%X", descriptor->descriptor_privilege_level); 102 | log_info(" Present (P flag): 0x%X", descriptor->present); 103 | log_info(" Granularity (G flag): 0x%X", descriptor->granularity); 104 | log_info(" Default/Big (D/B flag): 0x%X", descriptor->default_big); 105 | log_info(" Long Mode (L flag): 0x%X", descriptor->long_mode); 106 | log_info(" System: 0x%X\n", descriptor->system); 107 | } 108 | 109 | void log_segment_selector(segment_selector* selector, const char* selector_name) { 110 | // Extract values from the segment selector fields 111 | uint16_t rpl = selector->request_privilege_level; // Requested Privilege Level (RPL) 112 | uint16_t table = selector->table; // Table Indicator (0 = GDT, 1 = LDT) 113 | uint16_t index = selector->index; // Descriptor index 114 | 115 | // Determine if the selector points to the GDT or LDT 116 | const char* table_name = (table == 0) ? "GDT" : "LDT"; 117 | 118 | // Print out the segment selector details 119 | log_info("[%s] Segment Selector Details:", selector_name); 120 | log_info(" Request Privilege Level (RPL): %u", rpl); 121 | log_info(" Table Indicator (TI): %s (%u)", table_name, table); 122 | log_info(" Index: %u", index); 123 | log_info(" Raw Flags: 0x%04X", selector->flags); // Optional: print the raw flags for debugging 124 | } 125 | 126 | void log_constructed_gdt_descriptors(void) { 127 | // Retrieve the GDTR register 128 | segment_descriptor_register_64 win_gdtr; 129 | _sgdt(&win_gdtr); 130 | 131 | // Get the base address of the GDT 132 | segment_descriptor_32* win_gdt = (segment_descriptor_32*)win_gdtr.base_address; 133 | 134 | // Define segment selectors 135 | segment_selector cs, ds, ss, es, fs, gs, tr; 136 | 137 | // Read the segment selectors 138 | cs = __read_cs(); 139 | ds = __read_ds(); 140 | ss = __read_ss(); 141 | es = __read_es(); 142 | fs = __read_fs(); 143 | gs = __read_gs(); 144 | tr = __read_tr(); 145 | 146 | // Log segment selector values and their corresponding GDT entries 147 | log_info("CS: 0x%04x", *(uint16_t*)&cs); 148 | log_segment_descriptor_32(&win_gdt[cs.index], "CS"); 149 | 150 | log_info("DS: 0x%04x", *(uint16_t*)&ds); 151 | log_segment_descriptor_32(&win_gdt[ds.index], "DS"); 152 | 153 | log_info("SS: 0x%04x", *(uint16_t*)&ss); 154 | log_segment_descriptor_32(&win_gdt[ss.index], "SS"); 155 | 156 | log_info("ES: 0x%04x", *(uint16_t*)&es); 157 | log_segment_descriptor_32(&win_gdt[es.index], "ES"); 158 | 159 | log_info("FS: 0x%04x", *(uint16_t*)&fs); 160 | log_segment_descriptor_32(&win_gdt[fs.index], "FS"); 161 | 162 | log_info("GS: 0x%04x", *(uint16_t*)&gs); 163 | log_segment_descriptor_32(&win_gdt[gs.index], "GS"); 164 | 165 | log_info("TR: 0x%04x", *(uint16_t*)&tr); 166 | log_segment_descriptor_64((segment_descriptor_64*)(&win_gdt[tr.index]), "TSS"); 167 | } 168 | 169 | /* 170 | Note: We only need 1 gdt as we lock execution 171 | to one core via disabling of interrupts and no others execute whilst it is 172 | */ 173 | bool init_gdt(void) { 174 | PHYSICAL_ADDRESS max_addr = { 0 }; 175 | max_addr.QuadPart = MAXULONG64; 176 | 177 | my_gdt = (segment_descriptor_32*)MmAllocateContiguousMemory(max(sizeof(segment_descriptor_32) * constructed_gdt_size, 0x1000), max_addr); 178 | if (!my_gdt) 179 | return false; 180 | memset(my_gdt, 0, max(sizeof(segment_descriptor_32) * constructed_gdt_size, 0x1000)); 181 | 182 | my_tss = (task_state_segment_64*)MmAllocateContiguousMemory(0x1000, max_addr); 183 | if (!my_tss) 184 | return false; 185 | memset(my_tss, 0, 0x1000); 186 | 187 | interrupt_stack = MmAllocateContiguousMemory(KERNEL_STACK_SIZE, max_addr); 188 | if (!interrupt_stack) 189 | return false; 190 | memset(interrupt_stack, 0, KERNEL_STACK_SIZE); 191 | 192 | my_tss->ist1 = (uint64_t)interrupt_stack + KERNEL_STACK_SIZE; 193 | my_tss->ist2 = (uint64_t)interrupt_stack + KERNEL_STACK_SIZE; 194 | my_tss->ist3 = (uint64_t)interrupt_stack + KERNEL_STACK_SIZE; 195 | my_tss->ist4 = (uint64_t)interrupt_stack + KERNEL_STACK_SIZE; 196 | my_tss->ist5 = (uint64_t)interrupt_stack + KERNEL_STACK_SIZE; 197 | my_tss->ist6 = (uint64_t)interrupt_stack + KERNEL_STACK_SIZE; 198 | my_tss->ist7 = (uint64_t)interrupt_stack + KERNEL_STACK_SIZE; 199 | 200 | uint64_t tss_base = reinterpret_cast(my_tss); 201 | 202 | // Null descriptor 203 | segment_descriptor_32* zero_descriptor = &my_gdt[zero_descriptor_selector.index]; 204 | memset(zero_descriptor, 0, sizeof(segment_descriptor_32)); 205 | 206 | // Kernel Code Segment (64-bit) 207 | segment_descriptor_32* cpl_0_cs_descriptor = &my_gdt[constructed_cpl0_cs.index]; 208 | memset(cpl_0_cs_descriptor, 0, sizeof(segment_descriptor_32)); 209 | cpl_0_cs_descriptor->present = 1; 210 | cpl_0_cs_descriptor->type = SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_READ_ACCESSED; 211 | cpl_0_cs_descriptor->descriptor_type = SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA; 212 | cpl_0_cs_descriptor->descriptor_privilege_level = 0; 213 | cpl_0_cs_descriptor->long_mode = 1; 214 | 215 | 216 | // Kernel Data Segment (64-bit) 217 | segment_descriptor_32* cpl_0_ss_descriptor = &my_gdt[constructed_cpl0_ss.index]; 218 | memset(cpl_0_ss_descriptor, 0, sizeof(segment_descriptor_32)); 219 | cpl_0_ss_descriptor->present = 1; 220 | cpl_0_ss_descriptor->type = SEGMENT_DESCRIPTOR_TYPE_DATA_READ_WRITE_ACCESSED; 221 | cpl_0_ss_descriptor->descriptor_type = SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA; 222 | cpl_0_ss_descriptor->descriptor_privilege_level = 0; 223 | cpl_0_ss_descriptor->default_big = 1; 224 | 225 | 226 | // User Code Segment (64-bit) 227 | segment_descriptor_32* cpl_3_cs_descriptor = &my_gdt[constructed_cpl3_cs.index]; 228 | memset(cpl_3_cs_descriptor, 0, sizeof(segment_descriptor_32)); 229 | cpl_3_cs_descriptor->present = 1; 230 | cpl_3_cs_descriptor->type = SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_READ_ACCESSED; 231 | cpl_3_cs_descriptor->descriptor_type = SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA; 232 | cpl_3_cs_descriptor->descriptor_privilege_level = 3; 233 | cpl_3_cs_descriptor->long_mode = 1; 234 | 235 | // User Data Segment (64-bit) 236 | segment_descriptor_32* cpl_3_ss_descriptor = &my_gdt[constructed_cpl3_ss.index]; 237 | memset(cpl_3_ss_descriptor, 0, sizeof(segment_descriptor_32)); 238 | cpl_3_ss_descriptor->present = 1; 239 | cpl_3_ss_descriptor->type = SEGMENT_DESCRIPTOR_TYPE_DATA_READ_WRITE_ACCESSED; 240 | cpl_3_ss_descriptor->descriptor_type = SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA; 241 | cpl_3_ss_descriptor->descriptor_privilege_level = 3; 242 | cpl_3_ss_descriptor->granularity = 1; 243 | cpl_3_ss_descriptor->default_big = 1; 244 | cpl_3_ss_descriptor->segment_limit_low = 0xFFFF; // Lower 16 bits of the segment limit. 245 | cpl_3_ss_descriptor->segment_limit_high = 0xF; // Upper 4 bits of the segment limit. 246 | 247 | // Compatibility Mode Kernel Code Segment (32-bit) 248 | segment_descriptor_32* comp_cpl_0_cs_descriptor = &my_gdt[compatibility_constructed_cpl0_cs.index]; 249 | memset(comp_cpl_0_cs_descriptor, 0, sizeof(segment_descriptor_32)); 250 | comp_cpl_0_cs_descriptor->present = 1; 251 | comp_cpl_0_cs_descriptor->type = SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_READ_ACCESSED; 252 | comp_cpl_0_cs_descriptor->descriptor_type = SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA; 253 | comp_cpl_0_cs_descriptor->descriptor_privilege_level = 0; 254 | comp_cpl_0_cs_descriptor->default_big = 1; // 32-bit operation size 255 | comp_cpl_0_cs_descriptor->granularity = 1; // Get the max limits 256 | comp_cpl_0_cs_descriptor->segment_limit_low = 0xFFFF; // Lower 16 bits of the segment limit. 257 | comp_cpl_0_cs_descriptor->segment_limit_high = 0xF; // Upper 4 bits of the segment limit. 258 | comp_cpl_0_cs_descriptor->long_mode = 0; 259 | 260 | // Compatibility Mode Kernel Data Segment (32-bit) 261 | segment_descriptor_32* comp_cpl_0_ss_descriptor = &my_gdt[compatibility_constructed_cpl0_ss.index]; 262 | memset(comp_cpl_0_ss_descriptor, 0, sizeof(segment_descriptor_32)); 263 | comp_cpl_0_ss_descriptor->present = 1; 264 | comp_cpl_0_ss_descriptor->type = SEGMENT_DESCRIPTOR_TYPE_DATA_READ_WRITE_ACCESSED; 265 | comp_cpl_0_ss_descriptor->descriptor_type = SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA; 266 | comp_cpl_0_ss_descriptor->descriptor_privilege_level = 0; 267 | comp_cpl_0_ss_descriptor->default_big = 1; // 32-bit operation size 268 | comp_cpl_0_ss_descriptor->granularity = 1; // Get the max limits 269 | comp_cpl_0_ss_descriptor->segment_limit_low = 0xFFFF; // Lower 16 bits of the segment limit. 270 | comp_cpl_0_ss_descriptor->segment_limit_high = 0xF; // Upper 4 bits of the segment limit. 271 | 272 | // Task State Segment 273 | segment_descriptor_64* tss_descriptor = reinterpret_cast(&my_gdt[constructed_tr.index]); 274 | memset(tss_descriptor, 0, sizeof(segment_descriptor_64)); 275 | 276 | tss_descriptor->present = 1; 277 | tss_descriptor->type = SEGMENT_DESCRIPTOR_TYPE_TSS_BUSY; 278 | tss_descriptor->descriptor_type = SEGMENT_DESCRIPTOR_TYPE_SYSTEM; 279 | tss_descriptor->descriptor_privilege_level = 0; 280 | tss_descriptor->segment_limit_low = sizeof(task_state_segment_64) - 1; 281 | tss_descriptor->base_address_low = (tss_base >> 00) & 0xFFFF; 282 | tss_descriptor->base_address_middle = (tss_base >> 16) & 0xFF; 283 | tss_descriptor->base_address_high = (tss_base >> 24) & 0xFF; 284 | tss_descriptor->base_address_upper = (tss_base >> 32) & 0xFFFFFFFF; 285 | 286 | // DS / ES / GS 287 | segment_descriptor_32* ds_es_fs_gs_descriptor = &my_gdt[constructed_ds_es_fs_gs.index]; 288 | memset(ds_es_fs_gs_descriptor, 0, sizeof(segment_descriptor_32)); 289 | ds_es_fs_gs_descriptor->present = 1; 290 | ds_es_fs_gs_descriptor->type = SEGMENT_DESCRIPTOR_TYPE_DATA_READ_WRITE_ACCESSED; 291 | ds_es_fs_gs_descriptor->descriptor_type = SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA; 292 | ds_es_fs_gs_descriptor->descriptor_privilege_level = 0; 293 | ds_es_fs_gs_descriptor->default_big = 1; 294 | 295 | my_gdtr.base_address = (uint64_t)my_gdt; 296 | my_gdtr.limit = (constructed_gdt_size * sizeof(segment_descriptor_32)); 297 | 298 | gdt_inited = true; 299 | 300 | return true; 301 | } 302 | }; 303 | 304 | namespace idt { 305 | bool idt_inited = false; 306 | segment_descriptor_interrupt_gate_64* my_idt = 0; 307 | segment_descriptor_register_64 my_idtr = { 0 }; 308 | 309 | uint64_t total_interrupts = 1; 310 | idt_regs_ecode_t* context_storage = 0; 311 | 312 | /* 313 | Utility / Exposed API's 314 | */ 315 | segment_descriptor_interrupt_gate_64 create_interrupt_gate(void* assembly_handler) { 316 | segment_descriptor_interrupt_gate_64 gate = { 0 }; 317 | 318 | gate.interrupt_stack_table = 4; // Doesn't really matter which one we point it to as all point to the same; Just has to be non 0 319 | gate.segment_selector = gdt::constructed_cpl0_cs.flags; 320 | gate.must_be_zero_0 = 0; 321 | gate.type = SEGMENT_DESCRIPTOR_TYPE_INTERRUPT_GATE; 322 | gate.must_be_zero_1 = 0; 323 | gate.descriptor_privilege_level = 3; // Is the minimum cpl required to use the gate 324 | gate.present = 1; 325 | gate.reserved = 0; 326 | 327 | uint64_t offset = (uint64_t)assembly_handler; 328 | gate.offset_low = (offset >> 0) & 0xFFFF; 329 | gate.offset_middle = (offset >> 16) & 0xFFFF; 330 | gate.offset_high = (offset >> 32) & 0xFFFFFFFF; 331 | 332 | return gate; 333 | } 334 | 335 | segment_descriptor_register_64 get_constructed_idtr(void) { 336 | return my_idtr; 337 | } 338 | 339 | void increase_interrupt_counter(void) { 340 | total_interrupts++; 341 | } 342 | 343 | uint64_t get_interrupt_count(void) { 344 | return idt::total_interrupts; 345 | } 346 | 347 | idt_regs_ecode_t* get_interrupt_record(uint32_t interrupt_idx) { 348 | if (!idt_inited || interrupt_idx >= MAX_RECORDABLE_INTERRUPTS) 349 | return 0; 350 | 351 | return &context_storage[interrupt_idx]; 352 | } 353 | 354 | void reset_interrupt_count(void) { 355 | total_interrupts = 0; 356 | } 357 | 358 | idt_regs_ecode_t* get_core_last_interrupt_record(void) { 359 | if (!idt_inited || total_interrupts >= MAX_RECORDABLE_INTERRUPTS || !total_interrupts) 360 | return 0; 361 | 362 | idt_regs_ecode_t* record = &context_storage[total_interrupts - 1]; 363 | 364 | // Done to avoid overflows 365 | reset_interrupt_count(); 366 | 367 | return record; 368 | } 369 | 370 | void safe_interrupt_record(idt_regs_ecode_t* record) { 371 | if (!idt_inited || total_interrupts >= MAX_RECORDABLE_INTERRUPTS) 372 | return; 373 | 374 | memcpy(&context_storage[total_interrupts], record, sizeof(idt_regs_ecode_t)); 375 | } 376 | 377 | void log_all_interrupts() { 378 | uint64_t interrupt_count = get_interrupt_count(); 379 | if (interrupt_count == 0) { 380 | log_info("No interrupts have occurred."); 381 | return; 382 | } 383 | 384 | log_info("Interrupt count: %p", interrupt_count); 385 | 386 | for (uint32_t i = 0; i < interrupt_count; ++i) { 387 | idt_regs_ecode_t* record = get_interrupt_record(i); 388 | if (!record) { 389 | log_error("Interrupt #%d has no valid record.", i); 390 | continue; 391 | } 392 | 393 | log_new_line(); 394 | 395 | log_info("Interrupt #%d:", i); 396 | log_info_indent(1, "RAX: 0x%llx", record->rax); 397 | log_info_indent(1, "RBX: 0x%llx", record->rbx); 398 | log_info_indent(1, "RCX: 0x%llx", record->rcx); 399 | log_info_indent(1, "RDX: 0x%llx", record->rdx); 400 | log_info_indent(1, "RSI: 0x%llx", record->rsi); 401 | log_info_indent(1, "RDI: 0x%llx", record->rdi); 402 | log_info_indent(1, "RBP: 0x%llx", record->rbp); 403 | log_info_indent(1, "R8: 0x%llx", record->r8); 404 | log_info_indent(1, "R9: 0x%llx", record->r9); 405 | log_info_indent(1, "R10: 0x%llx", record->r10); 406 | log_info_indent(1, "R11: 0x%llx", record->r11); 407 | log_info_indent(1, "R12: 0x%llx", record->r12); 408 | log_info_indent(1, "R13: 0x%llx", record->r13); 409 | log_info_indent(1, "R14: 0x%llx", record->r14); 410 | log_info_indent(1, "R15: 0x%llx", record->r15); 411 | log_info_indent(1, "RIP: 0x%llx", record->rip); 412 | log_info_indent(1, "CS: 0x%llx", record->cs_selector); 413 | log_info_indent(1, "RFLAGS: 0x%llx", record->rflags.flags); 414 | log_info_indent(1, "RSP: 0x%llx", record->rsp); 415 | log_info_indent(1, "SS: 0x%llx", record->ss_selector); 416 | log_info_indent(1, "Exception Vector: 0x%llx", record->exception_vector); 417 | log_info_indent(1, "Error Code: 0x%llx", record->error_code); 418 | } 419 | } 420 | 421 | /* 422 | Core 423 | */ 424 | 425 | // Core exception handler 426 | extern "C" void exception_handler(idt_regs_ecode_t* record) { 427 | 428 | // Safe data about the interrupt for various purposes 429 | safe_interrupt_record(record); 430 | increase_interrupt_counter(); 431 | 432 | // We use this as a mode switch from long to compatibility mode 433 | if (execution_mode::handle_mode_switch(record)) 434 | return; 435 | 436 | // stack_segment_fault faults require the real rsp in rax (; 437 | // Look into detect_asm.asm:__ss_fault_sidt for more details 438 | if (record->exception_vector == stack_segment_fault) { 439 | record->rsp = record->rax; 440 | } 441 | 442 | // Just mock nmis 443 | if (record->exception_vector == nmi) 444 | return; 445 | 446 | IMAGE_DOS_HEADER* dos_header = (IMAGE_DOS_HEADER*)g_image_base; 447 | IMAGE_NT_HEADERS64* nt_headers = (IMAGE_NT_HEADERS64*)(g_image_base + dos_header->e_lfanew); 448 | IMAGE_DATA_DIRECTORY* exception = &nt_headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXCEPTION]; 449 | RUNTIME_FUNCTION* rt_functions = (RUNTIME_FUNCTION*)(g_image_base + exception->VirtualAddress); 450 | 451 | uint64_t rip_rva = record->rip - g_image_base; 452 | 453 | // Try to resolve the exception directly with rip 454 | for (ULONG idx = 0; idx < exception->Size / sizeof(RUNTIME_FUNCTION); ++idx) { 455 | RUNTIME_FUNCTION* function = &rt_functions[idx]; 456 | if (!(rip_rva >= function->BeginAddress && rip_rva < function->EndAddress)) 457 | continue; 458 | 459 | UNWIND_INFO* unwind_info = (UNWIND_INFO*)(g_image_base + function->UnwindData); 460 | if (!(unwind_info->Flags & UNW_FLAG_EHANDLER)) 461 | continue; 462 | 463 | SCOPE_TABLE* scope_table = (SCOPE_TABLE*)((uint64_t)(&unwind_info->UnwindCode[(unwind_info->CountOfCodes + 1) & ~1]) + sizeof(uint32_t)); 464 | for (uint32_t entry = 0; entry < scope_table->Count; ++entry) { 465 | SCOPE_RECORD* scope_record = &scope_table->ScopeRecords[entry]; 466 | if (rip_rva >= scope_record->BeginAddress && rip_rva < scope_record->EndAddress) { 467 | 468 | record->rip = g_image_base + scope_record->JumpTarget; 469 | 470 | return; 471 | } 472 | } 473 | } 474 | 475 | // If we reached here this means that the exception couldn't get 476 | // resolved with rip, so we have to stack trace to find the __except 477 | // block (Just walk rsp chain by 8 bytes at a time) 478 | uint64_t* stack_ptr = (uint64_t*)record->rsp; 479 | while (stack_ptr) { 480 | uint64_t potential_caller_rip = *stack_ptr; 481 | uint64_t potential_caller_rva = potential_caller_rip - g_image_base; 482 | 483 | // Check whether the current stack address can even be a function in our driver 484 | if (potential_caller_rva > g_image_size) { 485 | stack_ptr++; 486 | continue; 487 | } 488 | 489 | // Check whether the potential_caller_rva corresponds to an __except block 490 | for (ULONG idx = 0; idx < exception->Size / sizeof(RUNTIME_FUNCTION); ++idx) { 491 | RUNTIME_FUNCTION* function = &rt_functions[idx]; 492 | if (!(potential_caller_rva >= function->BeginAddress && potential_caller_rva < function->EndAddress)) 493 | continue; 494 | 495 | UNWIND_INFO* unwind_info = (UNWIND_INFO*)(g_image_base + function->UnwindData); 496 | if (!(unwind_info->Flags & UNW_FLAG_EHANDLER)) 497 | continue; 498 | 499 | SCOPE_TABLE* scope_table = (SCOPE_TABLE*)((uint64_t)(&unwind_info->UnwindCode[(unwind_info->CountOfCodes + 1) & ~1]) + sizeof(uint32_t)); 500 | for (uint32_t entry = 0; entry < scope_table->Count; ++entry) { 501 | SCOPE_RECORD* scope_record = &scope_table->ScopeRecords[entry]; 502 | if (potential_caller_rva >= scope_record->BeginAddress && potential_caller_rva < scope_record->EndAddress) { 503 | 504 | record->rip = g_image_base + scope_record->JumpTarget; 505 | record->rsp = (uint64_t)(stack_ptr + 1); // Point rsp to below the return address (*mostly* is the state of the stack of the caller function) 506 | 507 | return; 508 | } 509 | } 510 | } 511 | 512 | stack_ptr++; 513 | } 514 | } 515 | 516 | /* 517 | Initialization 518 | */ 519 | 520 | void create_idt(segment_descriptor_interrupt_gate_64* idt) { 521 | // Set IDT entries manually for each exception vector. 522 | idt[divide_error] = idt::create_interrupt_gate(asm_de_handler); 523 | idt[debug] = idt::create_interrupt_gate(asm_db_handler); 524 | idt[nmi] = idt::create_interrupt_gate(asm_nmi_handler); 525 | idt[breakpoint] = idt::create_interrupt_gate(asm_bp_handler); 526 | idt[overflow] = idt::create_interrupt_gate(asm_of_handler); 527 | idt[bound_range_exceeded] = idt::create_interrupt_gate(asm_br_handler); 528 | idt[invalid_opcode] = idt::create_interrupt_gate(asm_ud_handler); 529 | idt[device_not_available] = idt::create_interrupt_gate(asm_nm_handler); 530 | idt[double_fault] = idt::create_interrupt_gate(asm_df_handler); 531 | idt[invalid_tss] = idt::create_interrupt_gate(asm_ts_handler); 532 | idt[segment_not_present] = idt::create_interrupt_gate(asm_np_handler); 533 | idt[stack_segment_fault] = idt::create_interrupt_gate(asm_ss_handler); 534 | idt[general_protection] = idt::create_interrupt_gate(asm_gp_handler); 535 | idt[page_fault] = idt::create_interrupt_gate(asm_pf_handler); 536 | idt[x87_floating_point_error] = idt::create_interrupt_gate(asm_mf_handler); 537 | idt[alignment_check] = idt::create_interrupt_gate(asm_ac_handler); 538 | idt[machine_check] = idt::create_interrupt_gate(asm_mc_handler); 539 | idt[simd_floating_point_error] = idt::create_interrupt_gate(asm_xm_handler); 540 | idt[virtualization_exception] = idt::create_interrupt_gate(asm_ve_handler); 541 | idt[control_protection] = idt::create_interrupt_gate(asm_cp_handler); 542 | 543 | my_idtr.base_address = (uint64_t)idt; 544 | my_idtr.limit = MAXUINT16; // Since we allocate up to that size 545 | } 546 | 547 | bool init_idt(void) { 548 | PHYSICAL_ADDRESS max_addr = { 0 }; 549 | max_addr.QuadPart = MAXULONG64; 550 | my_idt = (segment_descriptor_interrupt_gate_64*)MmAllocateContiguousMemory(MAXUINT16, max_addr); 551 | if (!my_idt) 552 | return false; 553 | memset(my_idt, 0, MAXUINT16); 554 | 555 | create_idt(my_idt); 556 | 557 | context_storage = (idt_regs_ecode_t*)MmAllocateContiguousMemory(MAX_RECORDABLE_INTERRUPTS * sizeof(idt_regs_ecode_t), max_addr); 558 | if (!context_storage) 559 | return false; 560 | memset(context_storage, 0, MAX_RECORDABLE_INTERRUPTS * sizeof(idt_regs_ecode_t)); 561 | 562 | idt_inited = true; 563 | 564 | return true; 565 | } 566 | }; 567 | 568 | namespace cpl { 569 | bool cpl_switching_inited = false; 570 | bool currently_in_cpl_3 = false; 571 | 572 | // Runtime data 573 | // IA32_STAR: Contains info about cs and ss for um and km 574 | // IA32_LSTAR: Contains where rip will be set to after syscall 575 | // IA32_FMASK: Every bit set in this will be unset in rflags after a syscall 576 | uint64_t original_star = 0; 577 | uint64_t original_lstar = 0; 578 | uint64_t original_fmask = 0; 579 | 580 | uint64_t constructed_star = 0; 581 | uint64_t constructed_lstar = 0; 582 | uint64_t constructed_fmask = 0; 583 | 584 | /* 585 | Done via sysret; 586 | In here we need to ensure that we write to all necessary MSR's 587 | so that we can later restore shit 588 | */ 589 | bool switch_to_cpl_3(void) { 590 | if (!is_safety_net_active()) 591 | return false; 592 | 593 | cr4 curr_cr4; 594 | curr_cr4.flags = __readcr4(); 595 | if (curr_cr4.smap_enable || curr_cr4.smep_enable) 596 | return false; 597 | 598 | __writemsr(IA32_STAR, constructed_star); 599 | __writemsr(IA32_LSTAR, constructed_lstar); 600 | __writemsr(IA32_FMASK, constructed_fmask); 601 | 602 | __try { 603 | asm_switch_segments(gdt::constructed_cpl3_cs.flags, gdt::constructed_cpl3_ss.flags); // Note: From now on you can not execute all restricted instructions (e.g. wrmsr) 604 | } 605 | __except (EXCEPTION_EXECUTE_HANDLER) { 606 | // All is left to do here is to pray 607 | __writemsr(IA32_STAR, original_star); 608 | __writemsr(IA32_LSTAR, original_lstar); 609 | __writemsr(IA32_FMASK, original_fmask); 610 | return false; 611 | } 612 | 613 | currently_in_cpl_3 = true; 614 | 615 | return true; 616 | } 617 | 618 | /* 619 | Done via syscall; 620 | In here we need to ensure that we restore all polluted MSR's 621 | */ 622 | bool switch_to_cpl_0(void) { 623 | 624 | /* 625 | We can't do shit here as we do not have access to privileged instrucitons (e.g. wrmsr) 626 | */ 627 | 628 | if (!currently_in_cpl_3) { // Here for testing 629 | __writemsr(IA32_STAR, constructed_star); 630 | __writemsr(IA32_LSTAR, constructed_lstar); 631 | __writemsr(IA32_FMASK, constructed_fmask); 632 | } 633 | 634 | __try { 635 | asm_switch_to_cpl_0(); // Note: From now on you can execute all privileged instructions (e.g. wrmsr) 636 | } 637 | __except (EXCEPTION_EXECUTE_HANDLER) { 638 | // All is left to do here is to pray 639 | currently_in_cpl_3 = false; 640 | __writemsr(IA32_STAR, original_star); 641 | __writemsr(IA32_LSTAR, original_lstar); 642 | __writemsr(IA32_FMASK, original_fmask); 643 | return false; 644 | } 645 | 646 | __writemsr(IA32_STAR, original_star); 647 | __writemsr(IA32_LSTAR, original_lstar); 648 | __writemsr(IA32_FMASK, original_fmask); 649 | 650 | currently_in_cpl_3 = false; 651 | 652 | return true; 653 | } 654 | 655 | bool init_cpl_switcher(void) { 656 | 657 | ia32_efer_register efer; 658 | efer.flags = __readmsr(IA32_EFER); 659 | if (!efer.syscall_enable || !efer.ia32e_mode_enable) 660 | return false; 661 | 662 | // Backup orig values 663 | original_star = __readmsr(IA32_STAR); 664 | original_lstar = __readmsr(IA32_LSTAR); 665 | original_fmask = __readmsr(IA32_FMASK); 666 | 667 | 668 | ia32_star_register star; 669 | star.flags = 0; 670 | star.kernel_cs_selector = gdt::constructed_cpl0_cs.flags; 671 | star.user_cs_selector = gdt::constructed_cpl3_cs.flags - 16; // Honestly fuck you to whoever wrote that +16 for sysret behaviour 672 | constructed_star = star.flags; 673 | 674 | constructed_lstar = (uint64_t)asm_syscall_handler; 675 | 676 | constructed_fmask = 0; 677 | 678 | cpl_switching_inited = true; 679 | 680 | return true; 681 | } 682 | }; 683 | 684 | namespace execution_mode { 685 | bool execution_mode_changing_allocated = false; 686 | bool execution_mode_changing_remapped = false; 687 | 688 | void* compatibility_stack = 0; 689 | void* compatibility_data_page = 0; 690 | void* compatibility_execution_page = 0; 691 | 692 | uint64_t backed_rsp; 693 | uint64_t backed_rip; 694 | 695 | #define EXECUTION_PAGE_32_BIT_ADDRESS 0x0001000 696 | #define EXECUTION_PAGE_32_BIT_USER_SHELLCODE_START EXECUTION_PAGE_32_BIT_ADDRESS + 0x100 697 | #define DATA_PAGE_32_BIT_ADDRESS 0x00002000 698 | #define COMPATIBILITY_STACK_32_BIT_ADDRESS 0x00003000 699 | 700 | /* 701 | If the shellcode does not int 3 to go back it's your own fault (; 702 | Note: The user is self responsible for setting up the data page 703 | */ 704 | bool execute_32_bit_shellcode(void* shellcode, uint64_t shellcode_size) { 705 | if (execution_mode_changing_allocated && !execution_mode_changing_remapped) { 706 | if (!physmem::remapping::overwrite_virtual_address_mapping((void*)EXECUTION_PAGE_32_BIT_ADDRESS, compatibility_execution_page, physmem::util::get_constructed_cr3().flags, physmem::util::get_system_cr3().flags)) 707 | return false; 708 | 709 | if (!physmem::paging_manipulation::win_set_memory_range_supervisor((void*)EXECUTION_PAGE_32_BIT_ADDRESS, 0x1000, physmem::util::get_constructed_cr3().flags, 1)) 710 | return false; 711 | 712 | if (!physmem::remapping::overwrite_virtual_address_mapping((void*)DATA_PAGE_32_BIT_ADDRESS, compatibility_data_page, physmem::util::get_constructed_cr3().flags, physmem::util::get_system_cr3().flags)) 713 | return false; 714 | 715 | if (!physmem::paging_manipulation::win_set_memory_range_supervisor((void*)DATA_PAGE_32_BIT_ADDRESS, 0x1000, physmem::util::get_constructed_cr3().flags, 1)) 716 | return false; 717 | 718 | for (uint64_t i = 0; i <= KERNEL_STACK_SIZE; i += PAGE_SIZE) { 719 | if (!physmem::remapping::overwrite_virtual_address_mapping((void*)(COMPATIBILITY_STACK_32_BIT_ADDRESS + i), (void*)((uint64_t)compatibility_stack + i), physmem::util::get_constructed_cr3().flags, physmem::util::get_system_cr3().flags)) 720 | return false; 721 | } 722 | 723 | if (!physmem::paging_manipulation::win_set_memory_range_supervisor((void*)COMPATIBILITY_STACK_32_BIT_ADDRESS, KERNEL_STACK_SIZE, physmem::util::get_constructed_cr3().flags, 1)) 724 | return false; 725 | 726 | execution_mode_changing_remapped = true; 727 | } 728 | 729 | // Clear everything out before usage 730 | memset(compatibility_stack, 0, KERNEL_STACK_SIZE); 731 | memset(compatibility_execution_page, 0, 0x1000); 732 | 733 | /* 734 | Prologue is there to switch seg regs and call the actual function / shellcode 735 | */ 736 | static uint8_t compatibility_prologue[] = { 737 | 0x8C, 0xD0, // mov ax, ss 738 | 0x8E, 0xD8, // mov ds, ax 739 | 0x8E, 0xC0, // mov es, ax 740 | 0x8E, 0xE0, // mov fs, ax 741 | 0x8E, 0xE8, // mov gs, ax 742 | 743 | 0xB8, 0x00, 0x00, 0x00, 0x00, // mov eax, 0x00000000 744 | 0xFF, 0xD0, // call eax 745 | }; 746 | *(uint32_t*)(compatibility_prologue + 11) = (uint32_t)EXECUTION_PAGE_32_BIT_USER_SHELLCODE_START; 747 | 748 | /* 749 | The epilogue just switches back to long mode 750 | */ 751 | static uint8_t compatibility_epilogue[] = { 752 | 0xB8, 0x31, 0x73, 0x00, 0x00, // mov eax, 0x00007331 753 | 0xCC // int 3 (Will switch back to long mode via idt handler) 754 | }; 755 | 756 | memcpy((void*)(EXECUTION_PAGE_32_BIT_USER_SHELLCODE_START), shellcode, shellcode_size); 757 | memcpy((void*)compatibility_execution_page, compatibility_prologue, sizeof(compatibility_prologue)); 758 | memcpy((void*)((uint64_t)compatibility_execution_page + sizeof(compatibility_prologue)), compatibility_epilogue, sizeof(compatibility_epilogue)); 759 | 760 | // Switches into compatibiltiy mode and executes code in 32 bit addressable ranges 761 | asm_execute_compatibility_mode_code(); 762 | 763 | return true; 764 | } 765 | 766 | void* allocate_32bit_accessible_page(uint64_t size) { 767 | PHYSICAL_ADDRESS max_addr; 768 | 769 | max_addr.QuadPart = 0xFFFFFFFF; // 32-bit addressable range (limit to first 4GB) 770 | void* mem = MmAllocateContiguousMemory(size, max_addr); 771 | if (!mem) 772 | return 0; 773 | 774 | // Zero out the allocated memory for safety 775 | memset(mem, 0, size); 776 | return mem; 777 | } 778 | 779 | /* 780 | Relies on atomicity 781 | */ 782 | bool handle_mode_switch(idt_regs_ecode_t* record) { 783 | if (!record) 784 | return false; 785 | 786 | uint64_t eax = record->rax & 0xFFFFFFFF; 787 | if (eax == 0x1337 && record->exception_vector == breakpoint) { 788 | 789 | backed_rsp = record->rsp; 790 | backed_rip = record->rip; 791 | 792 | record->rsp = (uint64_t)COMPATIBILITY_STACK_32_BIT_ADDRESS + KERNEL_STACK_SIZE; // Do not forget that the stack grows downwards 793 | record->rip = (uint64_t)EXECUTION_PAGE_32_BIT_ADDRESS; 794 | 795 | record->cs_selector = gdt::compatibility_constructed_cpl0_cs.flags; 796 | record->ss_selector = gdt::compatibility_constructed_cpl0_ss.flags; 797 | 798 | return true; 799 | } 800 | else if (eax == 0x7331 && record->exception_vector == breakpoint) { 801 | record->rsp = backed_rsp; 802 | record->rip = backed_rip; 803 | 804 | record->cs_selector = gdt::constructed_cpl0_cs.flags; 805 | record->ss_selector = gdt::constructed_cpl0_ss.flags; 806 | 807 | return true; 808 | } 809 | 810 | // Primitive exception handling here; We just return back to long mode and restore everything if we hit an exception here 811 | if (record->cs_selector == gdt::compatibility_constructed_cpl0_cs.flags && 812 | record->ss_selector == gdt::compatibility_constructed_cpl0_ss.flags) { 813 | record->rsp = backed_rsp; 814 | record->rip = backed_rip; 815 | 816 | record->cs_selector = gdt::constructed_cpl0_cs.flags; 817 | record->ss_selector = gdt::constructed_cpl0_ss.flags; 818 | 819 | return true; 820 | } 821 | 822 | return false; 823 | } 824 | 825 | uint32_t get_compatibility_data_page_address(void) { 826 | return (uint32_t)DATA_PAGE_32_BIT_ADDRESS; 827 | } 828 | 829 | void* get_compatibility_data_page(void) { 830 | return compatibility_data_page; 831 | } 832 | 833 | bool init_execution_mode_changer(void) { 834 | 835 | compatibility_stack = allocate_32bit_accessible_page(KERNEL_STACK_SIZE); 836 | if (!compatibility_stack) 837 | return false; 838 | 839 | compatibility_execution_page = allocate_32bit_accessible_page(0x1000); 840 | if (!compatibility_execution_page) 841 | return false; 842 | 843 | compatibility_data_page = allocate_32bit_accessible_page(0x1000); 844 | if (!compatibility_data_page) 845 | return false; 846 | 847 | execution_mode_changing_allocated = true; 848 | return true; 849 | } 850 | }; 851 | 852 | /* 853 | Exposed API's 854 | */ 855 | 856 | KPCR* safety_net_kpcr = 0; 857 | 858 | bool is_safety_net_active(void) { 859 | if (!inited) 860 | return false; 861 | 862 | // Check GDTR 863 | segment_descriptor_register_64 current_gdtr; 864 | _sgdt(¤t_gdtr); 865 | if (current_gdtr.base_address != gdt::my_gdtr.base_address || 866 | current_gdtr.limit != gdt::my_gdtr.limit) { 867 | return false; 868 | } 869 | 870 | // Check IDTR 871 | segment_descriptor_register_64 current_idtr; 872 | __sidt(¤t_idtr); 873 | if (current_idtr.base_address != idt::my_idtr.base_address || 874 | current_idtr.limit != idt::my_idtr.limit) { 875 | return false; 876 | } 877 | 878 | // Check segment selectors 879 | uint16_t current_ss = __read_ss().flags; 880 | uint16_t current_cs = __read_cs().flags; 881 | uint16_t current_tr = __read_tr().flags; 882 | 883 | if (current_ss != gdt::constructed_cpl0_ss.flags || 884 | current_cs != gdt::constructed_cpl0_cs.flags || 885 | current_tr != gdt::constructed_tr.flags) { 886 | return false; 887 | } 888 | 889 | // Check RFLAGS 890 | rflags flags; 891 | flags.flags = __readeflags(); 892 | if (flags.interrupt_enable_flag) 893 | return false; 894 | 895 | return true; 896 | } 897 | 898 | void set_safety_net_kpcr(KPCR* kpcr) { 899 | safety_net_kpcr = kpcr; 900 | } 901 | 902 | bool init_safety_net(uint64_t image_base, uint64_t image_size) { 903 | if (!image_base || !image_size) 904 | return false; 905 | 906 | g_image_base = image_base; 907 | g_image_size = image_size; 908 | 909 | if (!gdt::init_gdt()) 910 | return false; 911 | 912 | if (!idt::init_idt()) 913 | return false; 914 | 915 | if (!cpl::init_cpl_switcher()) 916 | return false; 917 | 918 | if (!execution_mode::init_execution_mode_changer()) 919 | return false; 920 | 921 | inited = true; 922 | 923 | return true; 924 | } 925 | 926 | void free_safety_net(void) { 927 | // Gdt 928 | MmFreeContiguousMemory(gdt::my_gdt); 929 | MmFreeContiguousMemory(gdt::my_tss); 930 | MmFreeContiguousMemory(gdt::interrupt_stack); 931 | 932 | // Idt 933 | MmFreeContiguousMemory(idt::my_idt); 934 | MmFreeContiguousMemory(idt::context_storage); 935 | 936 | // Compatibility mode execution 937 | MmFreeContiguousMemory(execution_mode::compatibility_stack); 938 | MmFreeContiguousMemory(execution_mode::compatibility_execution_page); 939 | MmFreeContiguousMemory(execution_mode::compatibility_data_page); 940 | } 941 | 942 | /* 943 | Note: Has to be called from cpl = 0 944 | */ 945 | bool start_safety_net(safety_net_t& info_storage) { 946 | if (!inited) 947 | return false; 948 | 949 | _cli(); 950 | 951 | info_storage.safed_kpcr = (KPCR*)__readmsr(IA32_GS_BASE); 952 | if (safety_net_kpcr) 953 | __writemsr(IA32_GS_BASE, (uint64_t)safety_net_kpcr); 954 | 955 | // Store the old gdtr 956 | _sgdt(&info_storage.safed_gdtr); 957 | 958 | // Load the new gdtr 959 | _lgdt(&gdt::my_gdtr); 960 | 961 | // Store the old selectors 962 | info_storage.safed_ss = __read_ss().flags; 963 | info_storage.safed_cs = __read_cs().flags; 964 | info_storage.safed_tr = __read_tr().flags; 965 | 966 | // Load all associated selectors 967 | __write_ss(gdt::constructed_cpl0_ss.flags); 968 | __write_cs(gdt::constructed_cpl0_cs.flags); 969 | 970 | // Mark tss as available and switch tr 971 | gdt::my_gdt[gdt::constructed_tr.index].type = SEGMENT_DESCRIPTOR_TYPE_TSS_AVAILABLE; 972 | __write_tr(gdt::constructed_tr.flags); 973 | 974 | // Store the old idtr 975 | __sidt(&info_storage.safed_idtr); 976 | 977 | // Load the new idtr 978 | __lidt(&idt::my_idtr); 979 | 980 | // Store the old cr3 981 | info_storage.safed_cr3 = __readcr3(); 982 | 983 | // Load the new cr3 984 | __writecr3(physmem::util::get_constructed_cr3().flags); 985 | 986 | cr4 curr_cr4; 987 | curr_cr4.flags = __readcr4(); 988 | info_storage.safed_cr4 = curr_cr4.flags; 989 | 990 | curr_cr4.smap_enable = 0; 991 | curr_cr4.smep_enable = 0; 992 | __writecr4(curr_cr4.flags); 993 | 994 | return true; 995 | } 996 | 997 | void stop_safety_net(safety_net_t& info_storage) { 998 | _lgdt(&info_storage.safed_gdtr); 999 | 1000 | __write_ss(info_storage.safed_ss); 1001 | __write_cs(info_storage.safed_cs); 1002 | 1003 | // Mark tss as available and switch tr 1004 | segment_descriptor_32* gdt = (segment_descriptor_32*)info_storage.safed_gdtr.base_address; 1005 | segment_selector tr_selec; 1006 | tr_selec.flags = info_storage.safed_tr; 1007 | gdt[tr_selec.index].type = SEGMENT_DESCRIPTOR_TYPE_TSS_AVAILABLE; 1008 | __write_tr(info_storage.safed_tr); 1009 | 1010 | __lidt(&info_storage.safed_idtr); 1011 | 1012 | __writecr3(info_storage.safed_cr3); 1013 | 1014 | __writecr4(info_storage.safed_cr4); 1015 | 1016 | __writemsr(IA32_GS_BASE, (uint64_t)info_storage.safed_kpcr); 1017 | 1018 | _sti(); 1019 | } 1020 | } -------------------------------------------------------------------------------- /src/utility/physmem/physmem.cpp: -------------------------------------------------------------------------------- 1 | #include "physmem.hpp" 2 | 3 | namespace physmem { 4 | /* 5 | Global variables 6 | */ 7 | physmem_t physmem; 8 | 9 | namespace support { 10 | bool is_physmem_supported(void) { 11 | // Add support checks that determine whether the systems 12 | // supports all our needs 13 | 14 | // Only AMD or INTEL processors are supported 15 | char vendor[13] = { 0 }; 16 | cpuidsplit_t vendor_cpuid_data; 17 | __cpuid((int*)&vendor_cpuid_data, 0); 18 | ((int*)vendor)[0] = vendor_cpuid_data.ebx; 19 | ((int*)vendor)[1] = vendor_cpuid_data.edx; 20 | ((int*)vendor)[2] = vendor_cpuid_data.ecx; 21 | if ((strncmp(vendor, "GenuineIntel", 12) != 0) && 22 | (strncmp(vendor, "AuthenticAMD", 12) != 0)) { 23 | return false; 24 | } 25 | 26 | // Abort on 5 level paging 27 | cr4 curr_cr4; 28 | curr_cr4.flags = __readcr4(); 29 | if (curr_cr4.linear_addresses_57_bit) { 30 | return false; 31 | } 32 | 33 | // Since we map 512 gb of physical memory to 2MB pages they should be supported -.- 34 | cpuid_eax_01 cpuid_1; 35 | __cpuid((int*)(&cpuid_1), 1); 36 | if (!cpuid_1.cpuid_feature_information_edx.physical_address_extension) { 37 | return false; 38 | } 39 | 40 | // SSE2 support should be enable as we use mfence etc 41 | if (!cpuid_1.cpuid_feature_information_edx.sse2_support) { 42 | return false; 43 | } 44 | 45 | // We need an apic on chip as we check for the apic id and use that as a cpu index 46 | if(!cpuid_1.cpuid_feature_information_edx.apic_on_chip) { 47 | return false; 48 | } 49 | 50 | return true; 51 | } 52 | }; 53 | 54 | namespace page_table_initialization { 55 | void* allocate_zero_table(PHYSICAL_ADDRESS max_addr) { 56 | void* table = (void*)MmAllocateContiguousMemory(PAGE_SIZE, max_addr); 57 | 58 | if (table) 59 | memset(table, 0, PAGE_SIZE); 60 | 61 | return table; 62 | } 63 | 64 | bool allocate_page_tables(void) { 65 | PHYSICAL_ADDRESS max_addr = { 0 }; 66 | max_addr.QuadPart = MAXULONG64; 67 | 68 | physmem.page_tables = (page_tables_t*)MmAllocateContiguousMemory(sizeof(page_tables_t), max_addr); 69 | if(!physmem.page_tables) { 70 | return false; 71 | } 72 | 73 | 74 | memset(physmem.page_tables, 0, sizeof(page_tables_t)); 75 | 76 | for (uint64_t i = 0; i < REMAPPING_TABLE_COUNT; i++) { 77 | physmem.remapping_tables.pdpt_table[i] = (pdpte_64*)allocate_zero_table(max_addr); 78 | physmem.remapping_tables.pd_table[i] = (pde_64*)allocate_zero_table(max_addr); 79 | physmem.remapping_tables.pt_table[i] = (pte_64*)allocate_zero_table(max_addr); 80 | 81 | if (!physmem.remapping_tables.pdpt_table[i] || !physmem.remapping_tables.pd_table[i] || !physmem.remapping_tables.pt_table[i]) { 82 | return false; 83 | } 84 | 85 | } 86 | 87 | return true; 88 | } 89 | 90 | uint64_t get_cr3(uint64_t target_pid) { 91 | PEPROCESS sys_process = PsInitialSystemProcess; 92 | PEPROCESS curr_entry = sys_process; 93 | 94 | do { 95 | uint64_t curr_pid; 96 | 97 | memcpy(&curr_pid, (void*)((uintptr_t)curr_entry + 0x440), sizeof(curr_pid)); 98 | 99 | // Check whether we found our process 100 | if (target_pid == curr_pid) { 101 | 102 | uint32_t active_threads; 103 | 104 | memcpy((void*)&active_threads, (void*)((uintptr_t)curr_entry + 0x5f0), sizeof(active_threads)); 105 | 106 | if (active_threads || target_pid == 4) { 107 | uint64_t cr3; 108 | 109 | memcpy(&cr3, (void*)((uintptr_t)curr_entry + 0x28), sizeof(cr3)); 110 | 111 | return cr3; 112 | } 113 | } 114 | 115 | PLIST_ENTRY list = (PLIST_ENTRY)((uintptr_t)(curr_entry)+0x448); 116 | curr_entry = (PEPROCESS)((uintptr_t)list->Flink - 0x448); 117 | } while (curr_entry != sys_process); 118 | 119 | return 0; 120 | } 121 | 122 | bool copy_kernel_page_tables(void) { 123 | pml4e_64* kernel_pml4_page_table = 0; 124 | 125 | physmem.kernel_cr3.flags = get_cr3(4); 126 | if (!physmem.kernel_cr3.flags) 127 | return false; 128 | 129 | kernel_pml4_page_table = (pml4e_64*)win::win_get_virtual_address(physmem.kernel_cr3.address_of_page_directory << 12); 130 | if (!kernel_pml4_page_table) 131 | return false; 132 | 133 | memcpy(physmem.page_tables->pml4_table, kernel_pml4_page_table, sizeof(pml4e_64) * 512); 134 | 135 | physmem.constructed_cr3.flags = physmem.kernel_cr3.flags; 136 | physmem.constructed_cr3.address_of_page_directory = win::win_get_physical_address(physmem.page_tables->pml4_table) >> 12; 137 | if (!physmem.constructed_cr3.address_of_page_directory) 138 | return false; 139 | 140 | return true; 141 | } 142 | 143 | uint64_t calculate_physical_memory_base(uint64_t pml4e_idx) { 144 | // Shift the pml4 index right 36 bits to get the virtual address of the first byte of the 512 gb we mapped 145 | return (pml4e_idx << (9 + 9 + 9 + 12)); 146 | } 147 | 148 | bool map_full_system_physical_memory(uint32_t free_pml4_idx) { 149 | page_tables_t* page_tables = physmem.page_tables; 150 | 151 | // TO DO: 152 | // Dynamically determine the range of physical memory this pc has 153 | 154 | // Map the first 512 gb of physical memory; If any user has more than 512 gb of memory just kill yourselfes ig? 155 | page_tables->pml4_table[free_pml4_idx].present = 1; 156 | page_tables->pml4_table[free_pml4_idx].write = 1; 157 | page_tables->pml4_table[free_pml4_idx].page_frame_number = win::win_get_physical_address(&page_tables->pdpt_table) >> 12; 158 | if (!page_tables->pml4_table[free_pml4_idx].page_frame_number) 159 | return false; 160 | 161 | for (uint64_t i = 0; i < PAGE_TABLE_ENTRY_COUNT; i++) { 162 | page_tables->pdpt_table[i].present = 1; 163 | page_tables->pdpt_table[i].write = 1; 164 | page_tables->pdpt_table[i].page_frame_number = win::win_get_physical_address(&page_tables->pd_2mb_table[i]) >> 12; 165 | if (!page_tables->pdpt_table[i].page_frame_number) 166 | return false; 167 | 168 | for (uint64_t j = 0; j < PAGE_TABLE_ENTRY_COUNT; j++) { 169 | page_tables->pd_2mb_table[i][j].present = 1; 170 | page_tables->pd_2mb_table[i][j].write = 1; 171 | page_tables->pd_2mb_table[i][j].large_page = 1; 172 | page_tables->pd_2mb_table[i][j].page_frame_number = (i << 9) + j; 173 | } 174 | } 175 | 176 | return true; 177 | } 178 | 179 | bool construct_my_page_tables(void) { 180 | page_tables_t* page_tables = physmem.page_tables; 181 | 182 | uint32_t free_pml4_idx = pt_helpers::find_free_pml4e_index(page_tables->pml4_table); 183 | if (!pt_helpers::is_index_valid(free_pml4_idx)) 184 | return false; 185 | 186 | bool status = map_full_system_physical_memory(free_pml4_idx); 187 | if (status != true) 188 | return status; 189 | 190 | physmem.mapped_physical_mem_base = calculate_physical_memory_base(free_pml4_idx); 191 | if (!physmem.mapped_physical_mem_base) 192 | return false; // Can't happen basically 193 | 194 | return true; 195 | } 196 | 197 | bool initialize_page_tables(void) { 198 | bool status = page_table_initialization::allocate_page_tables(); 199 | if (status != true) 200 | return status; 201 | 202 | status = page_table_initialization::copy_kernel_page_tables(); 203 | if (status != true) 204 | return status; 205 | 206 | status = page_table_initialization::construct_my_page_tables(); 207 | if (status != true) 208 | return status; 209 | 210 | return status; 211 | } 212 | } 213 | 214 | namespace util { 215 | cr3 get_constructed_cr3(void) { 216 | return physmem.constructed_cr3; 217 | } 218 | 219 | cr3 get_system_cr3(void) { 220 | return physmem.kernel_cr3; 221 | } 222 | }; 223 | 224 | /* 225 | Exposed core runtime API's 226 | */ 227 | namespace runtime { 228 | bool translate_to_physical_address(uint64_t outside_target_cr3, void* virtual_address, uint64_t& physical_address, uint64_t* remaining_bytes) { 229 | rflags flags; 230 | flags.flags = __readeflags(); 231 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 232 | return false; 233 | 234 | cr3 target_cr3 = { 0 }; 235 | va_64_t va = { 0 }; 236 | 237 | target_cr3.flags = outside_target_cr3; 238 | va.flags = (uint64_t)virtual_address; 239 | 240 | bool status = true; 241 | pml4e_64* mapped_pml4_table = 0; 242 | pml4e_64* mapped_pml4_entry = 0; 243 | 244 | pdpte_64* mapped_pdpt_table = 0; 245 | pdpte_64* mapped_pdpt_entry = 0; 246 | 247 | pde_64* mapped_pde_table = 0; 248 | pde_64* mapped_pde_entry = 0; 249 | 250 | pte_64* mapped_pte_table = 0; 251 | pte_64* mapped_pte_entry = 0; 252 | 253 | mapped_pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + (target_cr3.address_of_page_directory << 12)); 254 | mapped_pml4_entry = &mapped_pml4_table[va.pml4e_idx]; 255 | if (!mapped_pml4_entry->present) { 256 | status = false; 257 | return status; 258 | } 259 | 260 | mapped_pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (mapped_pml4_entry->page_frame_number << 12)); 261 | mapped_pdpt_entry = &mapped_pdpt_table[va.pdpte_idx]; 262 | if (!mapped_pdpt_entry->present) { 263 | status = false; 264 | return status; 265 | } 266 | 267 | if (mapped_pdpt_entry->large_page) { 268 | pdpte_1gb_64 mapped_pdpte_1gb_entry; 269 | mapped_pdpte_1gb_entry.flags = mapped_pdpt_entry->flags; 270 | 271 | physical_address = (mapped_pdpte_1gb_entry.page_frame_number << 30) + va.offset_1gb; 272 | if(remaining_bytes) 273 | *remaining_bytes = 0x40000000 - va.offset_1gb; 274 | 275 | return status; 276 | } 277 | 278 | 279 | mapped_pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (mapped_pdpt_entry->page_frame_number << 12)); 280 | mapped_pde_entry = &mapped_pde_table[va.pde_idx]; 281 | if (!mapped_pde_entry->present) { 282 | status = false; 283 | return status; 284 | } 285 | 286 | if (mapped_pde_entry->large_page) { 287 | pde_2mb_64 mapped_pde_2mb_entry; 288 | mapped_pde_2mb_entry.flags = mapped_pde_entry->flags; 289 | 290 | physical_address = (mapped_pde_2mb_entry.page_frame_number << 21) + va.offset_2mb; 291 | if (remaining_bytes) 292 | *remaining_bytes = 0x200000 - va.offset_2mb; 293 | 294 | return status; 295 | } 296 | 297 | mapped_pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (mapped_pde_entry->page_frame_number << 12)); 298 | mapped_pte_entry = &mapped_pte_table[va.pte_idx]; 299 | if (!mapped_pte_entry->present) { 300 | status = false; 301 | return status; 302 | } 303 | 304 | physical_address = (mapped_pte_entry->page_frame_number << 12) + va.offset_4kb; 305 | if (remaining_bytes) 306 | *remaining_bytes = 0x1000 - va.offset_4kb; 307 | 308 | return status; 309 | } 310 | 311 | void copy_physical_memory(uint64_t dst_physical, uint64_t src_physical, uint64_t size) { 312 | rflags flags; 313 | flags.flags = __readeflags(); 314 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 315 | return; 316 | 317 | void* virtual_src = 0; 318 | void* virtual_dst = 0; 319 | 320 | virtual_src = (void*)(src_physical + physmem.mapped_physical_mem_base); 321 | virtual_dst = (void*)(dst_physical + physmem.mapped_physical_mem_base); 322 | 323 | memcpy(virtual_dst, virtual_src, size); 324 | } 325 | 326 | bool copy_virtual_memory(void* dst, void* src, uint64_t size, uint64_t dst_cr3, uint64_t src_cr3) { 327 | rflags flags; 328 | flags.flags = __readeflags(); 329 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 330 | return false; 331 | 332 | bool status = true; 333 | 334 | void* current_virtual_src = 0; 335 | void* current_virtual_dst = 0; 336 | uint64_t current_physical_src = 0; 337 | uint64_t current_physical_dst = 0; 338 | uint64_t src_remaining = 0; 339 | uint64_t dst_remaining = 0; 340 | uint64_t copyable_size = 0; 341 | uint64_t copied_bytes = 0; 342 | 343 | while (copied_bytes < size) { 344 | // Translate both the src and dst into physical addresses 345 | status = translate_to_physical_address(src_cr3, (void*)((uint64_t)src + copied_bytes), current_physical_src, &src_remaining); 346 | if (status != true) 347 | break; 348 | status = translate_to_physical_address(dst_cr3, (void*)((uint64_t)dst + copied_bytes), current_physical_dst, &dst_remaining); 349 | if (status != true) 350 | break; 351 | 352 | current_virtual_src = (void*)(current_physical_src + physmem.mapped_physical_mem_base); 353 | current_virtual_dst = (void*)(current_physical_dst + physmem.mapped_physical_mem_base); 354 | 355 | copyable_size = min(PAGE_SIZE, size - copied_bytes); 356 | copyable_size = min(copyable_size, src_remaining); 357 | copyable_size = min(copyable_size, dst_remaining); 358 | 359 | // Then copy the mem 360 | memcpy(current_virtual_dst, current_virtual_src, copyable_size); 361 | 362 | copied_bytes += copyable_size; 363 | } 364 | 365 | return status; 366 | } 367 | 368 | bool copy_memory_to_constructed_cr3(void* dst, void* src, uint64_t size, uint64_t src_cr3) { 369 | rflags flags; 370 | flags.flags = __readeflags(); 371 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 372 | return false; 373 | 374 | bool status = true; 375 | 376 | void* current_virtual_src = 0; 377 | void* current_virtual_dst = 0; 378 | uint64_t current_physical_src = 0; 379 | uint64_t src_remaining = 0; 380 | uint64_t copyable_size = 0; 381 | uint64_t copied_bytes = 0; 382 | 383 | while (copied_bytes < size) { 384 | // Translate the src into a physical address 385 | status = translate_to_physical_address(src_cr3, (void*)((uint64_t)src + copied_bytes), current_physical_src, &src_remaining); 386 | if (status != true) 387 | break; 388 | 389 | current_virtual_src = (void*)(current_physical_src + physmem.mapped_physical_mem_base); 390 | current_virtual_dst = (void*)((uint64_t)dst + copied_bytes); 391 | 392 | copyable_size = min(PAGE_SIZE, size - copied_bytes); 393 | copyable_size = min(copyable_size, src_remaining); 394 | 395 | // Then copy the mem 396 | memcpy(current_virtual_dst, current_virtual_src, copyable_size); 397 | 398 | copied_bytes += copyable_size; 399 | } 400 | 401 | return status; 402 | } 403 | 404 | bool copy_memory_from_constructed_cr3(void* dst, void* src, uint64_t size, uint64_t dst_cr3) { 405 | rflags flags; 406 | flags.flags = __readeflags(); 407 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 408 | return false; 409 | 410 | bool status = true; 411 | 412 | void* current_virtual_src = 0; 413 | void* current_virtual_dst = 0; 414 | uint64_t current_physical_dst = 0; 415 | uint64_t dst_remaining = 0; 416 | uint64_t copyable_size = 0; 417 | uint64_t copied_bytes = 0; 418 | 419 | while (copied_bytes < size) { 420 | // Translate the dst into a physical address 421 | status = translate_to_physical_address(dst_cr3, (void*)((uint64_t)dst + copied_bytes), current_physical_dst, &dst_remaining); 422 | if (status != true) 423 | break; 424 | 425 | current_virtual_src = (void*)((uint64_t)src + copied_bytes); 426 | current_virtual_dst = (void*)(current_physical_dst + physmem.mapped_physical_mem_base); 427 | 428 | copyable_size = min(PAGE_SIZE, size - copied_bytes); 429 | copyable_size = min(copyable_size, dst_remaining); 430 | 431 | // Then copy the mem 432 | memcpy(current_virtual_dst, current_virtual_src, copyable_size); 433 | 434 | copied_bytes += copyable_size; 435 | } 436 | 437 | return status; 438 | } 439 | }; 440 | 441 | /* 442 | The exposed API's in here are designed for initialization 443 | */ 444 | namespace remapping { 445 | bool get_remapping_entry(void* mem, remapped_entry_t*& remapping_entry) { 446 | va_64_t target_va = { 0 }; 447 | remapped_entry_t dummy = { 0 }; 448 | remapped_entry_t* curr_closest_entry = &dummy; 449 | 450 | target_va.flags = (uint64_t)mem; 451 | 452 | for (uint32_t i = 0; i < MAX_REMAPPINGS; i++) { 453 | remapped_entry_t* curr_entry = &physmem.remapping_tables.remapping_list[i]; 454 | 455 | // Sort out all the irrelevant ones 456 | if (!curr_entry->used) 457 | continue; 458 | 459 | // Check whether the pml4 index overlaps 460 | if (curr_entry->remapped_va.pml4e_idx != target_va.pml4e_idx) 461 | continue; 462 | 463 | // Check whether the pdpt index overlaps 464 | if (curr_entry->remapped_va.pdpte_idx != target_va.pdpte_idx) { 465 | 466 | // The curr closest entry is already as good as the entry at the current index 467 | if (curr_closest_entry->remapped_va.pml4e_idx == target_va.pml4e_idx) 468 | continue; 469 | 470 | // Set the curr entry as closest entry 471 | curr_closest_entry = curr_entry; 472 | continue; 473 | } 474 | 475 | // If it points to an entry marked as large page 476 | // we can return it immediately as there won't be 477 | // a more fitting entry than this one (paging hierachy 478 | // for that va range ends there 479 | if (curr_entry->pdpt_table.large_page) { 480 | curr_closest_entry = curr_entry; 481 | goto cleanup; 482 | } 483 | 484 | // Check whether the pde index overlaps 485 | if (curr_entry->remapped_va.pde_idx != target_va.pde_idx) { 486 | 487 | // The curr closest entry is already as good as the entry at the current index 488 | if (curr_closest_entry->remapped_va.pml4e_idx == target_va.pml4e_idx && 489 | curr_closest_entry->remapped_va.pdpte_idx == target_va.pdpte_idx) 490 | continue; 491 | 492 | // Set the curr entry as closest entry 493 | curr_closest_entry = curr_entry; 494 | continue; 495 | } 496 | 497 | if (curr_entry->pd_table.large_page) { 498 | curr_closest_entry = curr_entry; 499 | goto cleanup; 500 | } 501 | 502 | // Check whether the pte index overlaps 503 | if (curr_entry->remapped_va.pte_idx != target_va.pte_idx) { 504 | 505 | // The curr closest entry is already as good as the entry at the current index 506 | if (curr_closest_entry->remapped_va.pml4e_idx == target_va.pml4e_idx && 507 | curr_closest_entry->remapped_va.pdpte_idx == target_va.pdpte_idx && 508 | curr_closest_entry->remapped_va.pde_idx == target_va.pde_idx) 509 | continue; 510 | 511 | // Set the curr entry as closest entry 512 | curr_closest_entry = curr_entry; 513 | continue; 514 | } 515 | 516 | // Everything overlapped, the address resides in the same pte table 517 | // as another one we mapped, we can reuse everything 518 | curr_closest_entry = curr_entry; 519 | goto cleanup; 520 | } 521 | 522 | cleanup: 523 | 524 | if (curr_closest_entry == &dummy) { 525 | return false; 526 | } 527 | else { 528 | remapping_entry = curr_closest_entry; 529 | } 530 | 531 | return true; 532 | } 533 | 534 | bool add_remapping_entry(remapped_entry_t new_entry) { 535 | 536 | for (uint32_t i = 0; i < MAX_REMAPPINGS; i++) { 537 | remapped_entry_t* curr_entry = &physmem.remapping_tables.remapping_list[i]; 538 | 539 | // Check whether the current entry is present/occupied 540 | if (curr_entry->used) 541 | continue; 542 | 543 | memcpy(curr_entry, &new_entry, sizeof(remapped_entry_t)); 544 | curr_entry->used = true; 545 | 546 | return true; 547 | } 548 | 549 | return false; 550 | } 551 | 552 | bool get_max_remapping_level(remapped_entry_t* remapping_entry, uint64_t target_address, usable_until_t& usable_level) { 553 | va_64_t target_va; 554 | target_va.flags = target_address; 555 | 556 | if (!remapping_entry || !target_address) { 557 | usable_level = non_valid; 558 | return false; 559 | } 560 | 561 | // Check whether the pml4 index overlaps 562 | if (remapping_entry->remapped_va.pml4e_idx != target_va.pml4e_idx) { 563 | usable_level = non_valid; 564 | return false; 565 | } 566 | 567 | // Check whether the pdpt index overlaps 568 | if (remapping_entry->remapped_va.pdpte_idx != target_va.pdpte_idx) { 569 | usable_level = pdpt_table_valid; 570 | return true; 571 | } 572 | 573 | if (remapping_entry->pdpt_table.large_page) { 574 | usable_level = pdpt_table_valid; 575 | return true; 576 | } 577 | 578 | // Check whether the pde index overlaps 579 | if (remapping_entry->remapped_va.pde_idx != target_va.pde_idx) { 580 | usable_level = pde_table_valid; 581 | return true; 582 | } 583 | 584 | if (remapping_entry->pd_table.large_page) { 585 | usable_level = pde_table_valid; 586 | return true; 587 | } 588 | 589 | usable_level = pte_table_valid; 590 | return true; 591 | } 592 | 593 | 594 | bool ensure_memory_mapping_without_previous_mapping(void* mem, uint64_t mem_cr3_u64, uint64_t* ensured_size) { 595 | if (!ensured_size || !mem || !mem_cr3_u64) 596 | return false; 597 | 598 | va_64_t mem_va = { 0 }; 599 | cr3 mem_cr3 = { 0 }; 600 | 601 | mem_va.flags = (uint64_t)mem; 602 | mem_cr3.flags = mem_cr3_u64; 603 | bool status = true; 604 | 605 | // Pointers to mapped system tables 606 | pml4e_64* mapped_pml4_table = 0; 607 | pdpte_64* mapped_pdpt_table = 0; 608 | pde_64* mapped_pde_table = 0; 609 | pte_64* mapped_pte_table = 0; 610 | 611 | // Pointers to my tables 612 | pml4e_64* my_pml4_table = 0; 613 | pdpte_64* my_pdpt_table = 0; 614 | pde_64* my_pde_table = 0; 615 | pte_64* my_pte_table = 0; 616 | 617 | // Physical addresses of my page tables 618 | uint64_t pdpt_phys = 0; 619 | uint64_t pd_phys = 0; 620 | uint64_t pt_phys = 0; 621 | 622 | // A new entry for remapping 623 | remapped_entry_t new_entry = { 0 }; 624 | 625 | my_pml4_table = physmem.page_tables->pml4_table; 626 | 627 | mapped_pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + (mem_cr3.address_of_page_directory << 12)); 628 | mapped_pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (mapped_pml4_table[mem_va.pml4e_idx].page_frame_number << 12)); 629 | 630 | if (mapped_pdpt_table[mem_va.pdpte_idx].large_page) { 631 | my_pdpt_table = pt_manager::get_free_pdpt_table(&physmem.remapping_tables); 632 | if (!my_pdpt_table) { 633 | status = false; 634 | goto cleanup; 635 | } 636 | 637 | pdpte_1gb_64* my_1gb_pdpt_table = (pdpte_1gb_64*)my_pdpt_table; 638 | 639 | if (runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_1gb_pdpt_table, pdpt_phys) != true) 640 | goto cleanup; 641 | 642 | memcpy(my_1gb_pdpt_table, mapped_pdpt_table, sizeof(pdpte_1gb_64) * 512); 643 | memcpy(&my_pml4_table[mem_va.pml4e_idx], &mapped_pml4_table[mem_va.pml4e_idx], sizeof(pml4e_64)); 644 | 645 | my_pml4_table[mem_va.pml4e_idx].page_frame_number = pdpt_phys >> 12; 646 | 647 | // Create a new remapping entry 648 | new_entry.used = true; 649 | new_entry.remapped_va = mem_va; 650 | 651 | new_entry.pdpt_table.large_page = true; 652 | new_entry.pdpt_table.table = my_pdpt_table; 653 | 654 | status = add_remapping_entry(new_entry); 655 | 656 | *ensured_size = 0x40000000 - mem_va.offset_1gb; 657 | 658 | goto cleanup; 659 | } 660 | 661 | mapped_pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (mapped_pdpt_table[mem_va.pdpte_idx].page_frame_number << 12)); 662 | 663 | if (mapped_pde_table[mem_va.pde_idx].large_page) { 664 | my_pdpt_table = pt_manager::get_free_pdpt_table(&physmem.remapping_tables); 665 | if (!my_pdpt_table) { 666 | status = false; 667 | goto cleanup; 668 | } 669 | 670 | my_pde_table = pt_manager::get_free_pd_table(&physmem.remapping_tables); 671 | if (!my_pde_table) { 672 | status = false; 673 | goto cleanup; 674 | } 675 | 676 | pde_2mb_64* my_2mb_pd_table = (pde_2mb_64*)my_pde_table; 677 | 678 | if (runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pdpt_table, pdpt_phys) != true) 679 | goto cleanup; 680 | 681 | if (runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pde_table, pd_phys) != true) 682 | goto cleanup; 683 | 684 | 685 | memcpy(my_2mb_pd_table, mapped_pde_table, sizeof(pde_2mb_64) * 512); 686 | memcpy(my_pdpt_table, mapped_pdpt_table, sizeof(pdpte_64) * 512); 687 | memcpy(&my_pml4_table[mem_va.pml4e_idx], &mapped_pml4_table[mem_va.pml4e_idx], sizeof(pml4e_64)); 688 | 689 | my_pdpt_table[mem_va.pdpte_idx].page_frame_number = pd_phys >> 12; 690 | my_pml4_table[mem_va.pml4e_idx].page_frame_number = pdpt_phys >> 12; 691 | 692 | // Create a new remapping entry 693 | new_entry.used = true; 694 | new_entry.remapped_va = mem_va; 695 | 696 | new_entry.pdpt_table.large_page = false; 697 | new_entry.pdpt_table.table = my_pdpt_table; 698 | 699 | new_entry.pd_table.large_page = true; 700 | new_entry.pd_table.table = my_pde_table; 701 | 702 | status = add_remapping_entry(new_entry); 703 | 704 | *ensured_size = 0x200000 - mem_va.offset_2mb; 705 | 706 | goto cleanup; 707 | } 708 | 709 | mapped_pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (mapped_pde_table[mem_va.pde_idx].page_frame_number << 12)); 710 | 711 | my_pdpt_table = pt_manager::get_free_pdpt_table(&physmem.remapping_tables); 712 | if (!my_pdpt_table) { 713 | status = false; 714 | goto cleanup; 715 | } 716 | 717 | my_pde_table = pt_manager::get_free_pd_table(&physmem.remapping_tables); 718 | if (!my_pde_table) { 719 | status = false; 720 | goto cleanup; 721 | } 722 | 723 | my_pte_table = pt_manager::get_free_pt_table(&physmem.remapping_tables); 724 | if (!my_pte_table) { 725 | status = false; 726 | goto cleanup; 727 | } 728 | 729 | status = runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pdpt_table, pdpt_phys); 730 | if (status != true) 731 | goto cleanup; 732 | 733 | status = runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pde_table, pd_phys); 734 | if (status != true) 735 | goto cleanup; 736 | 737 | status = runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pte_table, pt_phys); 738 | if (status != true) 739 | goto cleanup; 740 | 741 | memcpy(my_pte_table, mapped_pte_table, sizeof(pte_64) * 512); 742 | memcpy(my_pde_table, mapped_pde_table, sizeof(pde_64) * 512); 743 | memcpy(my_pdpt_table, mapped_pdpt_table, sizeof(pdpte_64) * 512); 744 | memcpy(&my_pml4_table[mem_va.pml4e_idx], &mapped_pml4_table[mem_va.pml4e_idx], sizeof(pml4e_64)); 745 | 746 | my_pde_table[mem_va.pde_idx].present = 1; 747 | my_pde_table[mem_va.pde_idx].page_frame_number = pt_phys >> 12; 748 | 749 | my_pdpt_table[mem_va.pdpte_idx].present = 1; 750 | my_pdpt_table[mem_va.pdpte_idx].page_frame_number = pd_phys >> 12; 751 | 752 | my_pml4_table[mem_va.pml4e_idx].present = 1; 753 | my_pml4_table[mem_va.pml4e_idx].page_frame_number = pdpt_phys >> 12; 754 | 755 | // Create a new remapping entry 756 | new_entry.used = true; 757 | new_entry.remapped_va = mem_va; 758 | 759 | new_entry.pdpt_table.large_page = false; 760 | new_entry.pdpt_table.table = my_pdpt_table; 761 | 762 | new_entry.pd_table.large_page = false; 763 | new_entry.pd_table.table = my_pde_table; 764 | 765 | new_entry.pt_table = my_pte_table; 766 | 767 | status = add_remapping_entry(new_entry); 768 | 769 | *ensured_size = 0x1000 - mem_va.offset_4kb; 770 | 771 | cleanup: 772 | 773 | __invlpg(mem); 774 | 775 | return status; 776 | } 777 | 778 | bool ensure_memory_mapping_with_previous_mapping(void* mem, uint64_t mem_cr3_u64, remapped_entry_t* remapping_entry, uint64_t* ensured_size) { 779 | if (!ensured_size || !mem || !mem_cr3_u64 || !remapping_entry) 780 | return false; 781 | 782 | bool status = true; 783 | va_64_t mem_va = { 0 }; 784 | cr3 mem_cr3 = { 0 }; 785 | 786 | mem_va.flags = (uint64_t)mem; 787 | mem_cr3.flags = mem_cr3_u64; 788 | 789 | // Pointers to mapped system tables 790 | pml4e_64* mapped_pml4_table = 0; 791 | pdpte_64* mapped_pdpt_table = 0; 792 | pde_64* mapped_pde_table = 0; 793 | pte_64* mapped_pte_table = 0; 794 | 795 | // Pointers to our tables 796 | pdpte_64* my_pdpt_table = 0; 797 | pde_64* my_pde_table = 0; 798 | pte_64* my_pte_table = 0; 799 | 800 | usable_until_t max_usable = non_valid; 801 | status = get_max_remapping_level(remapping_entry, (uint64_t)mem, max_usable); 802 | if (status != true) 803 | goto cleanup; 804 | 805 | mapped_pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + (mem_cr3.address_of_page_directory << 12)); 806 | mapped_pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (mapped_pml4_table[mem_va.pml4e_idx].page_frame_number << 12)); 807 | 808 | if (mapped_pdpt_table[mem_va.pdpte_idx].large_page) { 809 | switch (max_usable) { 810 | case pdpt_table_valid: 811 | case pde_table_valid: 812 | case pte_table_valid: { 813 | my_pdpt_table = (pdpte_64*)remapping_entry->pdpt_table.table; 814 | if (mem_va.pdpte_idx == remapping_entry->remapped_va.pdpte_idx) { 815 | status = false; 816 | goto cleanup; 817 | } 818 | 819 | 820 | // Remember the order to change mappings (pt, pd, pdpt, pml4). If you don't do it in this order you will bsod sometimes 821 | memcpy(&my_pdpt_table[mem_va.pdpte_idx], &mapped_pdpt_table[mem_va.pdpte_idx], sizeof(pdpte_1gb_64)); 822 | 823 | remapped_entry_t new_entry; 824 | new_entry.used = true; 825 | new_entry.remapped_va = mem_va; 826 | 827 | new_entry.pdpt_table.large_page = true; 828 | new_entry.pdpt_table.table = remapping_entry->pdpt_table.table; 829 | 830 | status = add_remapping_entry(new_entry); 831 | 832 | *ensured_size = 0x40000000 - mem_va.offset_1gb; 833 | 834 | goto cleanup; 835 | } 836 | } 837 | } 838 | 839 | mapped_pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (mapped_pdpt_table[mem_va.pdpte_idx].page_frame_number << 12)); 840 | 841 | if (mapped_pde_table[mem_va.pde_idx].large_page) { 842 | switch (max_usable) { 843 | case pdpt_table_valid: { 844 | my_pdpt_table = (pdpte_64*)remapping_entry->pdpt_table.table; 845 | if (mem_va.pdpte_idx == remapping_entry->remapped_va.pdpte_idx) { 846 | status = false; 847 | goto cleanup; 848 | } 849 | 850 | my_pde_table = pt_manager::get_free_pd_table(&physmem.remapping_tables); 851 | if (!my_pde_table) { 852 | status = false; 853 | goto cleanup; 854 | } 855 | 856 | 857 | uint64_t pd_phys; 858 | status = runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pde_table, pd_phys); 859 | if (status != true) 860 | goto cleanup; 861 | 862 | // Remember the order to change mappings (pt, pd, pdpt, pml4). If you don't do it in this order you will bsod sometimes 863 | memcpy(my_pde_table, mapped_pde_table, sizeof(pde_2mb_64) * 512); 864 | my_pdpt_table[mem_va.pdpte_idx].page_frame_number = pd_phys >> 12; 865 | 866 | remapped_entry_t new_entry; 867 | new_entry.used = true; 868 | new_entry.remapped_va = mem_va; 869 | 870 | new_entry.pdpt_table.large_page = false; 871 | new_entry.pdpt_table.table = remapping_entry->pdpt_table.table; 872 | 873 | new_entry.pd_table.large_page = true; 874 | new_entry.pd_table.table = my_pde_table; 875 | 876 | status = add_remapping_entry(new_entry); 877 | 878 | *ensured_size = 0x200000 - mem_va.offset_2mb; 879 | 880 | goto cleanup; 881 | } 882 | case pde_table_valid: 883 | case pte_table_valid: { 884 | pde_2mb_64* my_2mb_pde_table = (pde_2mb_64*)remapping_entry->pd_table.table; 885 | if (mem_va.pde_idx == remapping_entry->remapped_va.pde_idx) { 886 | status = false; 887 | goto cleanup; 888 | } 889 | 890 | // Remember the order to change mappings (pt, pd, pdpt, pml4). If you don't do it in this order you will bsod sometimes 891 | memcpy(&my_2mb_pde_table[mem_va.pde_idx], &mapped_pde_table[mem_va.pde_idx], sizeof(pde_2mb_64)); 892 | 893 | remapped_entry_t new_entry; 894 | new_entry.used = true; 895 | new_entry.remapped_va = mem_va; 896 | 897 | new_entry.pdpt_table.large_page = false; 898 | new_entry.pdpt_table.table = remapping_entry->pdpt_table.table; 899 | 900 | new_entry.pd_table.large_page = true; 901 | new_entry.pd_table.table = remapping_entry->pd_table.table; 902 | 903 | status = add_remapping_entry(new_entry); 904 | 905 | *ensured_size = 0x200000 - mem_va.offset_2mb; 906 | 907 | goto cleanup; 908 | } 909 | } 910 | } 911 | 912 | mapped_pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (mapped_pde_table[mem_va.pde_idx].page_frame_number << 12)); 913 | 914 | switch (max_usable) { 915 | case pdpt_table_valid: { 916 | my_pdpt_table = (pdpte_64*)remapping_entry->pdpt_table.table; 917 | if (mem_va.pdpte_idx == remapping_entry->remapped_va.pdpte_idx) { 918 | status = false; 919 | goto cleanup; 920 | } 921 | my_pde_table = pt_manager::get_free_pd_table(&physmem.remapping_tables); 922 | if (!my_pde_table) { 923 | status = false; 924 | goto cleanup; 925 | } 926 | my_pte_table = pt_manager::get_free_pt_table(&physmem.remapping_tables); 927 | if (!my_pte_table) { 928 | status = false; 929 | goto cleanup; 930 | } 931 | 932 | uint64_t pd_phys = 0; 933 | status = runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pde_table, pd_phys); 934 | if (status != true) 935 | goto cleanup; 936 | 937 | uint64_t pt_phys = 0; 938 | status = runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pte_table, pt_phys); 939 | if (status != true) 940 | goto cleanup; 941 | 942 | 943 | // Remember the order to change mappings (pt, pd, pdpt, pml4). If you don't do it in this order you will bsod sometimes 944 | memcpy(my_pte_table, mapped_pte_table, sizeof(pte_64) * 512); 945 | memcpy(my_pde_table, mapped_pde_table, sizeof(pde_2mb_64) * 512); 946 | my_pde_table[mem_va.pde_idx].page_frame_number = pt_phys >> 12; 947 | my_pdpt_table[mem_va.pdpte_idx].page_frame_number = pd_phys >> 12; 948 | 949 | 950 | remapped_entry_t new_entry; 951 | new_entry.used = true; 952 | new_entry.remapped_va = mem_va; 953 | 954 | new_entry.pdpt_table.large_page = false; 955 | new_entry.pdpt_table.table = remapping_entry->pdpt_table.table; 956 | 957 | new_entry.pd_table.large_page = false; 958 | new_entry.pd_table.table = my_pde_table; 959 | 960 | new_entry.pt_table = my_pte_table; 961 | 962 | status = add_remapping_entry(new_entry); 963 | 964 | *ensured_size = 0x1000 - mem_va.offset_4kb; 965 | 966 | goto cleanup; 967 | } 968 | case pde_table_valid: { 969 | my_pde_table = (pde_64*)remapping_entry->pd_table.table; 970 | if (mem_va.pde_idx == remapping_entry->remapped_va.pde_idx) { 971 | status = false; 972 | goto cleanup; 973 | } 974 | 975 | my_pte_table = pt_manager::get_free_pt_table(&physmem.remapping_tables); 976 | if (!my_pte_table) { 977 | status = false; 978 | goto cleanup; 979 | } 980 | 981 | uint64_t pt_phys = 0; 982 | status = runtime::translate_to_physical_address(physmem.constructed_cr3.flags, my_pte_table, pt_phys); 983 | if (status != true) 984 | goto cleanup; 985 | 986 | 987 | // Remember the order to change mappings (pt, pd, pdpt, pml4). If you don't do it in this order you will bsod sometimes 988 | memcpy(my_pte_table, mapped_pte_table, sizeof(pte_64) * 512); 989 | my_pde_table[mem_va.pde_idx].page_frame_number = pt_phys >> 12; 990 | 991 | 992 | remapped_entry_t new_entry; 993 | new_entry.used = true; 994 | new_entry.remapped_va = mem_va; 995 | 996 | new_entry.pdpt_table.large_page = false; 997 | new_entry.pdpt_table.table = remapping_entry->pdpt_table.table; 998 | 999 | new_entry.pd_table.large_page = false; 1000 | new_entry.pd_table.table = remapping_entry->pd_table.table; 1001 | 1002 | new_entry.pt_table = my_pte_table; 1003 | 1004 | status = add_remapping_entry(new_entry); 1005 | 1006 | *ensured_size = 0x1000 - mem_va.offset_4kb; 1007 | 1008 | goto cleanup; 1009 | } 1010 | case pte_table_valid: { 1011 | my_pte_table = (pte_64*)remapping_entry->pt_table; 1012 | if (mem_va.pte_idx == remapping_entry->remapped_va.pte_idx) { 1013 | status = false; 1014 | goto cleanup; 1015 | } 1016 | 1017 | 1018 | memcpy(&my_pte_table[mem_va.pte_idx], &mapped_pte_table[mem_va.pte_idx], sizeof(pte_64)); 1019 | 1020 | remapped_entry_t new_entry; 1021 | new_entry.used = true; 1022 | new_entry.remapped_va = mem_va; 1023 | 1024 | new_entry.pdpt_table.large_page = false; 1025 | new_entry.pdpt_table.table = remapping_entry->pdpt_table.table; 1026 | 1027 | new_entry.pd_table.large_page = false; 1028 | new_entry.pd_table.table = remapping_entry->pd_table.table; 1029 | 1030 | new_entry.pt_table = remapping_entry->pt_table; 1031 | 1032 | status = add_remapping_entry(new_entry); 1033 | 1034 | *ensured_size = 0x1000 - mem_va.offset_4kb; 1035 | 1036 | goto cleanup; 1037 | } 1038 | } 1039 | 1040 | cleanup: 1041 | 1042 | __invlpg(mem); 1043 | return status; 1044 | } 1045 | 1046 | bool ensure_memory_mapping(void* mem, uint64_t mem_cr3_u64, uint64_t* ensured_size = 0) { 1047 | if (!mem || !mem_cr3_u64) 1048 | return false; 1049 | 1050 | bool status = true; 1051 | remapped_entry_t* remapping_entry = 0; 1052 | uint64_t dummy_size = 0; 1053 | 1054 | status = get_remapping_entry(mem, remapping_entry); 1055 | 1056 | if (!ensured_size) 1057 | ensured_size = &dummy_size; 1058 | 1059 | if (status == true) { 1060 | status = ensure_memory_mapping_with_previous_mapping(mem, mem_cr3_u64, remapping_entry, ensured_size); 1061 | } 1062 | else { 1063 | status = ensure_memory_mapping_without_previous_mapping(mem, mem_cr3_u64, ensured_size); 1064 | } 1065 | 1066 | return status; 1067 | } 1068 | 1069 | /* 1070 | Exposed API's 1071 | */ 1072 | bool ensure_memory_mapping_for_range(void* target_address, uint64_t size, uint64_t mem_cr3_u64) { 1073 | rflags flags; 1074 | flags.flags = __readeflags(); 1075 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 1076 | return false; 1077 | 1078 | bool status = true; 1079 | uint64_t copied_bytes = 0; 1080 | 1081 | while (copied_bytes < size) { 1082 | void* current_target = (void*)((uint64_t)target_address + copied_bytes); 1083 | uint64_t ensured_size = 0; 1084 | 1085 | status = ensure_memory_mapping(current_target, mem_cr3_u64, &ensured_size); 1086 | if (status != true) { 1087 | return status; 1088 | } 1089 | 1090 | copied_bytes += ensured_size; 1091 | } 1092 | 1093 | return status; 1094 | } 1095 | 1096 | bool overwrite_virtual_address_mapping(void* target_address, void* new_memory, uint64_t target_address_cr3_u64, uint64_t new_mem_cr3_u64) { 1097 | rflags flags; 1098 | flags.flags = __readeflags(); 1099 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 1100 | return false; 1101 | 1102 | if (PAGE_ALIGN(target_address) != target_address || 1103 | PAGE_ALIGN(new_memory) != new_memory) 1104 | return false; 1105 | 1106 | bool status = true; 1107 | 1108 | cr3 new_mem_cr3 = { 0 }; 1109 | 1110 | va_64_t target_va = { 0 }; 1111 | va_64_t new_mem_va = { 0 }; 1112 | 1113 | target_va.flags = (uint64_t)target_address; 1114 | new_mem_va.flags = (uint64_t)new_memory; 1115 | 1116 | new_mem_cr3.flags = (uint64_t)new_mem_cr3_u64; 1117 | 1118 | pml4e_64* my_pml4_table = 0; 1119 | pdpte_64* my_pdpt_table = 0; 1120 | pde_64* my_pde_table = 0; 1121 | pte_64* my_pte_table = 0; 1122 | 1123 | pml4e_64* new_mem_pml4_table = 0; 1124 | pdpte_64* new_mem_pdpt_table = 0; 1125 | pde_64* new_mem_pde_table = 0; 1126 | pte_64* new_mem_pte_table = 0; 1127 | 1128 | 1129 | // First ensure the mapping of the my address 1130 | // in our cr3 1131 | status = ensure_memory_mapping(target_address, target_address_cr3_u64); 1132 | if (status != true) 1133 | goto cleanup; 1134 | 1135 | 1136 | my_pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + (physmem.constructed_cr3.address_of_page_directory << 12)); 1137 | new_mem_pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + (new_mem_cr3.address_of_page_directory << 12)); 1138 | 1139 | my_pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (my_pml4_table[target_va.pml4e_idx].page_frame_number << 12)); 1140 | new_mem_pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (new_mem_pml4_table[new_mem_va.pml4e_idx].page_frame_number << 12)); 1141 | 1142 | if (my_pdpt_table[target_va.pdpte_idx].large_page || new_mem_pdpt_table[new_mem_va.pdpte_idx].large_page) { 1143 | if (!my_pdpt_table[target_va.pdpte_idx].large_page || !new_mem_pdpt_table[new_mem_va.pdpte_idx].large_page) { 1144 | status = false; 1145 | goto cleanup; 1146 | } 1147 | 1148 | memcpy(&my_pdpt_table[target_va.pdpte_idx], &new_mem_pdpt_table[new_mem_va.pdpte_idx], sizeof(pdpte_1gb_64)); 1149 | 1150 | goto cleanup; 1151 | } 1152 | 1153 | my_pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (my_pdpt_table[target_va.pdpte_idx].page_frame_number << 12)); 1154 | new_mem_pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (new_mem_pdpt_table[new_mem_va.pdpte_idx].page_frame_number << 12)); 1155 | 1156 | if (my_pde_table[target_va.pde_idx].large_page || new_mem_pde_table[new_mem_va.pde_idx].large_page) { 1157 | if (!my_pde_table[target_va.pde_idx].large_page || !new_mem_pde_table[new_mem_va.pde_idx].large_page) { 1158 | status = false; 1159 | goto cleanup; 1160 | } 1161 | 1162 | memcpy(&my_pde_table[target_va.pde_idx], &new_mem_pde_table[new_mem_va.pde_idx], sizeof(pde_2mb_64)); 1163 | 1164 | goto cleanup; 1165 | } 1166 | 1167 | 1168 | my_pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (my_pde_table[target_va.pde_idx].page_frame_number << 12)); 1169 | new_mem_pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (new_mem_pde_table[new_mem_va.pde_idx].page_frame_number << 12)); 1170 | 1171 | memcpy(&my_pte_table[target_va.pte_idx], &new_mem_pte_table[new_mem_va.pte_idx], sizeof(pte_64)); 1172 | 1173 | cleanup: 1174 | __invlpg(target_address); 1175 | 1176 | return status; 1177 | } 1178 | }; 1179 | 1180 | namespace paging_manipulation { 1181 | bool win_destroy_memory_page_mapping(void* memory, uint64_t& stored_flags) { 1182 | rflags flags; 1183 | flags.flags = __readeflags(); 1184 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 1185 | return false; 1186 | 1187 | 1188 | va_64_t mem_va; 1189 | mem_va.flags = (uint64_t)memory; 1190 | 1191 | pml4e_64* pml4_table = 0; 1192 | pdpte_64* pdpt_table = 0; 1193 | pde_64* pde_table = 0; 1194 | pte_64* pte_table = 0; 1195 | 1196 | pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + __readcr3()); 1197 | if (!pml4_table) 1198 | return false; 1199 | 1200 | pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (pml4_table[mem_va.pml4e_idx].page_frame_number << 12)); 1201 | if (!pdpt_table) { 1202 | return false; 1203 | } 1204 | 1205 | pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (pdpt_table[mem_va.pdpte_idx].page_frame_number << 12)); 1206 | if (!pde_table) { 1207 | return false; 1208 | } 1209 | 1210 | pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (pde_table[mem_va.pde_idx].page_frame_number << 12)); 1211 | if (!pte_table) 1212 | return false; 1213 | 1214 | stored_flags = pte_table[mem_va.pte_idx].flags; 1215 | pte_table[mem_va.pte_idx].flags = 0; 1216 | 1217 | // DO NOT FUCKING FLUSH THE TRANSLATION 1218 | // OR THE IDTR/GDTR STORING DETECTION 1219 | // WILL NOT WORK PROPERLY 1220 | 1221 | return true; 1222 | } 1223 | 1224 | bool win_restore_memory_page_mapping(void* memory, uint64_t stored_flags) { 1225 | rflags flags; 1226 | flags.flags = __readeflags(); 1227 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 1228 | return false; 1229 | 1230 | PHYSICAL_ADDRESS max_addr = { 0 }; 1231 | max_addr.QuadPart = MAXULONG64; 1232 | 1233 | va_64_t mem_va; 1234 | mem_va.flags = (uint64_t)memory; 1235 | 1236 | pml4e_64* pml4_table = 0; 1237 | pdpte_64* pdpt_table = 0; 1238 | pde_64* pde_table = 0; 1239 | pte_64* pte_table = 0; 1240 | 1241 | pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + __readcr3()); 1242 | if (!pml4_table) 1243 | return false; 1244 | 1245 | pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (pml4_table[mem_va.pml4e_idx].page_frame_number << 12)); 1246 | if (!pdpt_table) { 1247 | return false; 1248 | } 1249 | 1250 | pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (pdpt_table[mem_va.pdpte_idx].page_frame_number << 12)); 1251 | if (!pde_table) { 1252 | return false; 1253 | } 1254 | 1255 | pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (pde_table[mem_va.pde_idx].page_frame_number << 12)); 1256 | if (!pte_table) 1257 | return false; 1258 | 1259 | pte_table[mem_va.pte_idx].flags = stored_flags; 1260 | 1261 | return true; 1262 | } 1263 | 1264 | bool set_single_page_supervisor(void* memory, cr3 mem_cr3, bool supervisor, uint64_t* set_size) { 1265 | va_64_t mem_va; 1266 | mem_va.flags = (uint64_t)memory; 1267 | 1268 | pml4e_64* pml4_table = 0; 1269 | pdpte_64* pdpt_table = 0; 1270 | pde_64* pde_table = 0; 1271 | pte_64* pte_table = 0; 1272 | 1273 | pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + (mem_cr3.address_of_page_directory << 12)); 1274 | if (!pml4_table[mem_va.pml4e_idx].present) 1275 | return pml4_table[mem_va.pml4e_idx].flags; 1276 | 1277 | pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (pml4_table[mem_va.pml4e_idx].page_frame_number << 12)); 1278 | if (!pdpt_table[mem_va.pdpte_idx].present) 1279 | return false; 1280 | 1281 | if (pdpt_table[mem_va.pdpte_idx].large_page) { 1282 | pdpt_table[mem_va.pdpte_idx].supervisor = supervisor; 1283 | pml4_table[mem_va.pml4e_idx].supervisor = supervisor; 1284 | __invlpg(memory); 1285 | if(set_size) 1286 | *set_size = 0x40000000 - mem_va.offset_1gb; 1287 | 1288 | return true; 1289 | } 1290 | 1291 | pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (pdpt_table[mem_va.pdpte_idx].page_frame_number << 12)); 1292 | if (!pde_table[mem_va.pde_idx].present) 1293 | return false; 1294 | 1295 | if (pde_table[mem_va.pde_idx].large_page) { 1296 | pde_table[mem_va.pde_idx].supervisor = supervisor; 1297 | pdpt_table[mem_va.pdpte_idx].supervisor = supervisor; 1298 | pml4_table[mem_va.pml4e_idx].supervisor = supervisor; 1299 | __invlpg(memory); 1300 | if(set_size) 1301 | *set_size = 0x200000 - mem_va.offset_2mb; 1302 | 1303 | return true; 1304 | } 1305 | 1306 | pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (pde_table[mem_va.pde_idx].page_frame_number << 12)); 1307 | if (!pte_table[mem_va.pte_idx].present) 1308 | return false; 1309 | 1310 | pte_table[mem_va.pte_idx].supervisor = supervisor; 1311 | pde_table[mem_va.pde_idx].supervisor = supervisor; 1312 | pdpt_table[mem_va.pdpte_idx].supervisor = supervisor; 1313 | pml4_table[mem_va.pml4e_idx].supervisor = supervisor; 1314 | 1315 | if(set_size) 1316 | *set_size = 0x1000 - mem_va.offset_4kb; 1317 | 1318 | __invlpg(memory); 1319 | 1320 | return true; 1321 | } 1322 | 1323 | bool win_set_memory_range_supervisor(void* memory, uint64_t size, uint64_t mem_cr3, bool supervisor) { 1324 | 1325 | cr3 cr3_mem_cr3; 1326 | cr3_mem_cr3.flags = mem_cr3; 1327 | 1328 | bool status = true; 1329 | uint64_t set_bytes = 0; 1330 | 1331 | while (set_bytes < size) { 1332 | void* current_target = (void*)((uint64_t)memory + set_bytes); 1333 | uint64_t remaining_bytes = 0; 1334 | 1335 | status = set_single_page_supervisor(current_target, cr3_mem_cr3, supervisor, &remaining_bytes); 1336 | if (status != true) { 1337 | return status; 1338 | } 1339 | 1340 | set_bytes += remaining_bytes; 1341 | } 1342 | 1343 | return status; 1344 | } 1345 | 1346 | bool is_memory_page_mapped(void* memory) { 1347 | rflags flags; 1348 | flags.flags = __readeflags(); 1349 | if (flags.interrupt_enable_flag || __readcr3() != physmem.constructed_cr3.flags) 1350 | return false; 1351 | 1352 | PHYSICAL_ADDRESS max_addr = { 0 }; 1353 | max_addr.QuadPart = MAXULONG64; 1354 | 1355 | va_64_t mem_va; 1356 | mem_va.flags = (uint64_t)memory; 1357 | 1358 | pml4e_64* pml4_table = 0; 1359 | pdpte_64* pdpt_table = 0; 1360 | pde_64* pde_table = 0; 1361 | pte_64* pte_table = 0; 1362 | 1363 | pml4_table = (pml4e_64*)(physmem.mapped_physical_mem_base + __readcr3()); 1364 | if (!pml4_table) 1365 | return false; 1366 | 1367 | pdpt_table = (pdpte_64*)(physmem.mapped_physical_mem_base + (pml4_table[mem_va.pml4e_idx].page_frame_number << 12)); 1368 | if (!pdpt_table) { 1369 | return false; 1370 | } 1371 | 1372 | pde_table = (pde_64*)(physmem.mapped_physical_mem_base + (pdpt_table[mem_va.pdpte_idx].page_frame_number << 12)); 1373 | if (!pde_table) { 1374 | return false; 1375 | } 1376 | 1377 | pte_table = (pte_64*)(physmem.mapped_physical_mem_base + (pde_table[mem_va.pde_idx].page_frame_number << 12)); 1378 | if (!pte_table) 1379 | return false; 1380 | 1381 | return pte_table[mem_va.pte_idx].present; 1382 | } 1383 | 1384 | bool prepare_driver_for_supervisor_access(void* driver_base, uint64_t driver_size, uint64_t mem_cr3) { 1385 | /* 1386 | First prepare the driver then the stack 1387 | */ 1388 | if (!physmem::remapping::ensure_memory_mapping_for_range((void*)driver_base, driver_size, mem_cr3)) 1389 | return false; 1390 | 1391 | if (!physmem::paging_manipulation::win_set_memory_range_supervisor((void*)driver_base, driver_size, mem_cr3, 1)) 1392 | return false; 1393 | 1394 | /* 1395 | Then prepare the stack 1396 | */ 1397 | KPCR* kpcr = (KPCR*)__readmsr(IA32_GS_BASE); 1398 | void* curr_thread = *(void**)((uint64_t)kpcr->CurrentPrcb + 0x8); 1399 | uint64_t stack_base = *(uint64_t*)((uint64_t)curr_thread + 0x38); 1400 | uint64_t stack_limit = *(uint64_t*)((uint64_t)curr_thread + 0x30); 1401 | uint64_t stack_size = stack_base - stack_limit; 1402 | 1403 | /* 1404 | Since the stack grows downwards, stack_limit actually is the base of the mem 1405 | */ 1406 | if (!physmem::remapping::ensure_memory_mapping_for_range((void*)stack_limit, stack_size, mem_cr3)) 1407 | return false; 1408 | 1409 | if (!physmem::paging_manipulation::win_set_memory_range_supervisor((void*)stack_limit, stack_size, mem_cr3, 1)) 1410 | return false; 1411 | 1412 | /* 1413 | We need to set the interrupt record pages to supervisor to be able to access them from cpl = 3 1414 | */ 1415 | if (!physmem::remapping::ensure_memory_mapping_for_range((void*)safety_net::idt::get_interrupt_record(0), MAX_RECORDABLE_INTERRUPTS * sizeof(idt_regs_ecode_t), mem_cr3)) 1416 | return false; 1417 | 1418 | if (!physmem::paging_manipulation::win_set_memory_range_supervisor((void*)safety_net::idt::get_interrupt_record(0), MAX_RECORDABLE_INTERRUPTS * sizeof(idt_regs_ecode_t), mem_cr3, 1)) 1419 | return false; 1420 | 1421 | safety_net::set_safety_net_kpcr(kpcr); 1422 | 1423 | return true; 1424 | } 1425 | }; 1426 | 1427 | bool is_initialized(void) { 1428 | return physmem.initialized; 1429 | } 1430 | 1431 | bool init_physmem(void) { 1432 | if (!support::is_physmem_supported()) 1433 | return false; 1434 | 1435 | if (!page_table_initialization::initialize_page_tables()) 1436 | return false; 1437 | 1438 | physmem.initialized = true; 1439 | 1440 | return true; 1441 | }; 1442 | }; --------------------------------------------------------------------------------