├── LoadSymbol.sln
├── LoadSymbol
├── LoadSymbol.vcxproj
├── LoadSymbol.vcxproj.filters
├── LoadSymbol.vcxproj.user
├── Symbols.h
└── main.cpp
├── README.md
├── ReloadDbg
├── HookFunc.cpp
├── HookFunc.h
├── KernelDbgStruct.h
├── Lde.h
├── ReloadDbg.lst
├── ReloadDbg.vcxproj
├── ReloadDbg.vcxproj.filters
├── ReloadDbg.vcxproj.user
├── dbg.cpp
├── dbg.h
├── hv
│ └── hv
│ │ ├── arch.asm
│ │ ├── arch.h
│ │ ├── ept.cpp
│ │ ├── ept.h
│ │ ├── exception-routines.asm
│ │ ├── exception-routines.h
│ │ ├── exit-handlers.cpp
│ │ ├── exit-handlers.h
│ │ ├── gdt.cpp
│ │ ├── gdt.h
│ │ ├── guest-context.h
│ │ ├── hv.cpp
│ │ ├── hv.h
│ │ ├── hypercalls.cpp
│ │ ├── hypercalls.h
│ │ ├── ia32.h
│ │ ├── ia32.hpp
│ │ ├── ia32_compact.h
│ │ ├── ia32_defines_only.h
│ │ ├── idt.cpp
│ │ ├── idt.h
│ │ ├── interrupt-handlers.asm
│ │ ├── interrupt-handlers.h
│ │ ├── introspection.cpp
│ │ ├── introspection.h
│ │ ├── logger.cpp
│ │ ├── logger.h
│ │ ├── main.cpp
│ │ ├── mm.cpp
│ │ ├── mm.h
│ │ ├── mtrr.cpp
│ │ ├── mtrr.h
│ │ ├── page-tables.cpp
│ │ ├── page-tables.h
│ │ ├── segment.cpp
│ │ ├── segment.h
│ │ ├── spin-lock.h
│ │ ├── timing.cpp
│ │ ├── timing.h
│ │ ├── trap-frame.h
│ │ ├── vcpu.cpp
│ │ ├── vcpu.h
│ │ ├── vm-exit.asm
│ │ ├── vm-launch.asm
│ │ ├── vmcs.cpp
│ │ ├── vmcs.h
│ │ ├── vmx.asm
│ │ ├── vmx.h
│ │ └── vmx.inl
├── main.cpp
└── vmintrin.h
├── dbghelp.dll
├── ntkrnlmp.pdb
├── 223C6C6606ED35973A9AD057262282DB1
│ └── ntkrnlmp.pdb
├── 3177D31000BA7590DED335936C93E3741
│ └── ntkrnlmp.pdb
├── 3844DBB920174967BE7AA4A2C20430FA2
│ └── ntkrnlmp.pdb
├── 47114209A62F3B9930F6B8998DFD4A991
│ └── ntkrnlmp.pdb
├── 67CAF02E081BE9CB68937D22531F99C01
│ └── ntkrnlmp.pdb
├── 68A17FAF3012B7846079AEECDBE0A5831
│ └── ntkrnlmp.pdb
├── 76B0354BFFCF79294F039F39D1321C171
│ └── ntkrnlmp.pdb
├── 992A9A48F30EC2C58B01A5934DCE2D9C1
│ └── ntkrnlmp.pdb
├── CA8E2F01B822EDE6357898BFBF8629971
│ └── ntkrnlmp.pdb
└── F526DBB121425697CBBF4FB22502519F1
│ └── ntkrnlmp.pdb
└── symsrv.dll
/LoadSymbol.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 16
4 | VisualStudioVersion = 16.0.29102.190
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LoadSymbol", "LoadSymbol\LoadSymbol.vcxproj", "{95C35A98-0F8F-4E0F-9A3C-6F8FD3707E75}"
7 | EndProject
8 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ReloadDbg", "ReloadDbg\ReloadDbg.vcxproj", "{576B8DB9-01C6-4D08-9CEA-253D6F9B17C3}"
9 | EndProject
10 | Global
11 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
12 | Win10WithVm|x64 = Win10WithVm|x64
13 | Win7WithVm|x64 = Win7WithVm|x64
14 | EndGlobalSection
15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
16 | {95C35A98-0F8F-4E0F-9A3C-6F8FD3707E75}.Win10WithVm|x64.ActiveCfg = Release|x64
17 | {95C35A98-0F8F-4E0F-9A3C-6F8FD3707E75}.Win10WithVm|x64.Build.0 = Release|x64
18 | {95C35A98-0F8F-4E0F-9A3C-6F8FD3707E75}.Win7WithVm|x64.ActiveCfg = Release|x64
19 | {95C35A98-0F8F-4E0F-9A3C-6F8FD3707E75}.Win7WithVm|x64.Build.0 = Release|x64
20 | {576B8DB9-01C6-4D08-9CEA-253D6F9B17C3}.Win10WithVm|x64.ActiveCfg = Win10WithVm|x64
21 | {576B8DB9-01C6-4D08-9CEA-253D6F9B17C3}.Win10WithVm|x64.Build.0 = Win10WithVm|x64
22 | {576B8DB9-01C6-4D08-9CEA-253D6F9B17C3}.Win10WithVm|x64.Deploy.0 = Win10WithVm|x64
23 | {576B8DB9-01C6-4D08-9CEA-253D6F9B17C3}.Win7WithVm|x64.ActiveCfg = Win7WithVm|x64
24 | {576B8DB9-01C6-4D08-9CEA-253D6F9B17C3}.Win7WithVm|x64.Build.0 = Win7WithVm|x64
25 | {576B8DB9-01C6-4D08-9CEA-253D6F9B17C3}.Win7WithVm|x64.Deploy.0 = Win7WithVm|x64
26 | EndGlobalSection
27 | GlobalSection(SolutionProperties) = preSolution
28 | HideSolutionNode = FALSE
29 | EndGlobalSection
30 | GlobalSection(ExtensibilityGlobals) = postSolution
31 | SolutionGuid = {6D73D0B1-D96F-4484-A5BB-DFEE471E9FD2}
32 | EndGlobalSection
33 | EndGlobal
34 |
--------------------------------------------------------------------------------
/LoadSymbol/LoadSymbol.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Release
6 | x64
7 |
8 |
9 |
10 | 16.0
11 | {95C35A98-0F8F-4E0F-9A3C-6F8FD3707E75}
12 | LoadSymbol
13 | 10.0
14 |
15 |
16 |
17 | Application
18 | false
19 | v142
20 | true
21 | MultiByte
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 | Level3
36 | MaxSpeed
37 | true
38 | true
39 | true
40 | true
41 | _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)
42 |
43 |
44 | Console
45 | true
46 | true
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/LoadSymbol/LoadSymbol.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
7 |
8 |
9 | {93995380-89BD-4b04-88EB-625FBE52EBFB}
10 | h;hh;hpp;hxx;hm;inl;inc;ipp;xsd
11 |
12 |
13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
15 |
16 |
17 |
18 |
19 | 头文件
20 |
21 |
22 |
23 |
24 | 源文件
25 |
26 |
27 |
--------------------------------------------------------------------------------
/LoadSymbol/LoadSymbol.vcxproj.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/LoadSymbol/Symbols.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/LoadSymbol/Symbols.h
--------------------------------------------------------------------------------
/LoadSymbol/main.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include"symbols.h"
3 |
4 | HANDLE g_DeviceHandle = NULL;
5 | bool SetPrivilegeA(const LPCSTR lpszPrivilege, const BOOL bEnablePrivilege) {
6 | TOKEN_PRIVILEGES priv = { 0,0,0,0 };
7 | HANDLE hToken = nullptr;
8 | LUID luid = { 0,0 };
9 | if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &hToken)) {
10 | if (hToken)
11 | CloseHandle(hToken);
12 | return false;
13 | }
14 | if (!LookupPrivilegeValueA(nullptr, lpszPrivilege, &luid)) {
15 | if (hToken)
16 | CloseHandle(hToken);
17 | return false;
18 | }
19 | priv.PrivilegeCount = 1;
20 | priv.Privileges[0].Luid = luid;
21 | priv.Privileges[0].Attributes = bEnablePrivilege ? SE_PRIVILEGE_ENABLED : SE_PRIVILEGE_REMOVED;
22 | if (!AdjustTokenPrivileges(hToken, false, &priv, 0, nullptr, nullptr)) {
23 | if (hToken)
24 | CloseHandle(hToken);
25 | return false;
26 | }
27 | if (hToken)
28 | CloseHandle(hToken);
29 | return true;
30 | }
31 | int openProcExp()
32 | {
33 | SetPrivilegeA(SE_DEBUG_NAME, TRUE);
34 | if (!g_DeviceHandle)
35 | g_DeviceHandle = CreateFile("\\\\.\\YCData", GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
36 |
37 | if (g_DeviceHandle == INVALID_HANDLE_VALUE)
38 | {
39 | g_DeviceHandle = NULL;
40 | printf("OpenFailed YCData \n");
41 | return 0;
42 | }
43 | return 1;
44 | }
45 |
46 | void closeProcExp()
47 | {
48 | CloseHandle(g_DeviceHandle);
49 | }
50 |
51 | void sendData(ULONG IoCtl, PVOID inData, ULONG inLen, PVOID outData, ULONG outLne)
52 | {
53 | DWORD ReturnLength = 0;
54 | BOOL IsOk = DeviceIoControl(
55 | g_DeviceHandle,
56 | IoCtl,
57 | inData,
58 | inLen,
59 | outData,
60 | outLne,
61 | &ReturnLength,
62 | NULL);
63 | }
64 |
65 | #define CTL_LOAD_DRIVER 0x800
66 | int main()
67 | {
68 | if (LoadSymbol())
69 | {
70 | printf("load Success!\n");
71 | printf("g_SymbolsData.NtCreateDebugObject %p \n", g_SymbolsData.NtCreateDebugObject);
72 | printf("g_SymbolsData.DbgkpProcessDebugPortMutex %p \n", g_SymbolsData.DbgkpProcessDebugPortMutex);
73 | if (openProcExp())
74 | {
75 | sendData(CTL_CODE(FILE_DEVICE_UNKNOWN, CTL_LOAD_DRIVER, METHOD_BUFFERED, FILE_ANY_ACCESS), &g_SymbolsData, sizeof(SYMBOLS_DATA), NULL, NULL);
76 | closeProcExp();
77 | }
78 | }
79 |
80 | system("pause");
81 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # vt-ReloadDbg
2 |
3 | ## 实现
4 |
5 | 下面的结构需要自己去稍微修改一下
6 |
7 | #define Thread_CrossThreadFlags 0x448
8 |
9 | #define Thread_RundownProtect 0x430
10 |
11 | #define Process_DebugPort 0x1f0
12 |
13 | #define Process_RundownProtect 0x178
14 |
15 | #define ProcessFlagS 0x440
16 |
17 | #define ProcessSectionObject 0x268
18 |
19 | #define ProcessSectionBaseAddress 0x270
20 |
21 | #define ThreadStartAddress 0x388
22 |
23 | 主要实现了win7(sp1)和win10(20h1),里面这些进程结构的偏移是写死的,需要你根据当前的Windows版本修改,其他地方到没什么需要修改的。
24 |
25 | ## 说明
26 |
27 | 采用的是下载符号,传到内核,这样没必要动态定位dbg的部分函数,需要先加载驱动,在执行LoadSymbol.exe,执行的时候需要dbghelp.dll和symsrv.dll的依赖。
28 |
29 | 没有完全重写调试体系,主要涉及debugport的地方都重写了。
30 |
31 | ## vt部分
32 |
33 | 由于各种vt检测的缘故,这里替换vt为jono大佬写的,处理的已经很完善了。win7不支持性能控制器,会出现BSOD,这里把win7下的IA32_PERF_GLOBAL_CTRL给注释掉。Hook函数原作者没写,但提供了ept的替换页表,简单的实现了hook函数,但不支持跨页。
34 |
35 | ## 参考
36 |
37 | 1.https://bbs.kanxue.com/thread-260034-1.htm
38 |
39 | 2.https://github.com/Air14/HyperHide
40 |
41 | 3.https://github.com/DragonQuestHero/Kernel-Anit-Anit-Debug-Plugins
42 |
43 | 4.https://github.com/jonomango/hv
44 |
--------------------------------------------------------------------------------
/ReloadDbg/HookFunc.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ReloadDbg/HookFunc.cpp
--------------------------------------------------------------------------------
/ReloadDbg/HookFunc.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "vmintrin.h"
3 |
4 | #define KGDT64_R3_CMCODE (2 * 16) // user mode 32-bit code
5 | #define DBGKP_FIELD_FROM_IMAGE_OPTIONAL_HEADER(hdrs,field) \
6 | ((hdrs)->OptionalHeader.##field)
7 | typedef struct _DebugInfomation{
8 | LIST_ENTRY List;
9 | HANDLE SourceProcessId;
10 | HANDLE TargetProcessId;
11 | //HANDLE DebugObjectHandle;
12 | //PVOID TargetEPROCESS;
13 | DEBUG_OBJECT* DebugObject;
14 | }DebugInfomation,*PDebugInfomation;
15 |
16 |
17 | typedef VOID(*__DbgkCreateThread)(PETHREAD Thread);
18 | typedef VOID(*__DbgkpWakeTarget)(PDEBUG_EVENT DebugEvent);
19 | typedef PVOID(*__PsCaptureExceptionPort)(PEPROCESS Process);
20 | typedef PETHREAD(*__PsGetNextProcessThread)(PEPROCESS Process, PETHREAD Thread);
21 | typedef NTSTATUS(*__DbgkpPostFakeThreadMessages)(PEPROCESS Process, PDEBUG_OBJECT DebugObject, PETHREAD StartThread, PETHREAD* pFirstThread, PETHREAD* pLastThread);
22 |
23 |
24 |
25 | #ifdef WIN7
26 | typedef NTSTATUS(*__DbgkpSendApiMessage)(BOOLEAN SuspendProcess, PDBGKM_APIMSG ApiMsg);
27 | #else
28 | typedef NTSTATUS(*__DbgkpSendApiMessage)(PEPROCESS Process, BOOLEAN SuspendProcess, PDBGKM_APIMSG ApiMsg);
29 | #endif
30 |
31 | typedef BOOLEAN(*__DbgkpSuppressDbgMsg)(PVOID teb);
32 | typedef VOID(*__DbgkpMarkProcessPeb)(PEPROCESS Process);
33 | typedef HANDLE(*__DbgkpSectionToFileHandle)(PVOID SectionObject);
34 | typedef NTSTATUS(*__NtTerminateProcess)(HANDLE ProcessHandle, NTSTATUS ExitStatus);
35 | typedef NTSTATUS(*__DbgkpSendApiMessageLpc)(PDBGKM_APIMSG ApiMsg, PVOID Port, BOOLEAN SuspendProcess);
36 | typedef VOID(*__DbgkSendSystemDllMessages)(PETHREAD Thread, PDEBUG_OBJECT DebugObject, PDBGKM_APIMSG ApiMsg);
37 | typedef NTSTATUS(*__DbgkpSendErrorMessage)(PEXCEPTION_RECORD ExceptionRecord, ULONG Falge, PDBGKM_APIMSG DbgApiMsg);
38 | typedef NTSTATUS(*__DbgkpPostFakeProcessCreateMessages)(PEPROCESS Process, PDEBUG_OBJECT DebugObject, PETHREAD* pLastThread);
39 | typedef VOID(*__KiDispatchException)(PEXCEPTION_RECORD ExceptionRecord, void* ExceptionFrame, void* TrapFrame, KPROCESSOR_MODE PreviousMode, BOOLEAN FirstChance);
40 | typedef NTSTATUS(*__NtCreateUserProcess)(PHANDLE ProcessHandle, PETHREAD ThreadHandle, ACCESS_MASK ProcessDesiredAccess, ACCESS_MASK ThreadDesiredAccess, PVOID ProcessObjectAttributes, PVOID ThreadObjectAttributes, ULONG ProcessFlags, ULONG ThreadFlags, PVOID ProcessParameters, void* CreateInfo, void* AttributeList);
41 |
42 | VOID DbgkCreateThread(PETHREAD Thread);
43 | VOID DbgkUnMapViewOfSection(PEPROCESS Process, PVOID BaseAddress);
44 | NTSTATUS NtTerminateProcess(HANDLE ProcessHandle, NTSTATUS ExitStatus);
45 | NTSTATUS NtDebugActiveProcess(HANDLE ProcessHandle, HANDLE DebugObjectHandle);
46 | VOID DbgkMapViewOfSection(PEPROCESS Process, PVOID SectionObject, PVOID BaseAddress);
47 | BOOLEAN DbgkForwardException(PEXCEPTION_RECORD ExceptionRecord, BOOLEAN DebugException, BOOLEAN SecondChance);
48 | NTSTATUS DbgkpSetProcessDebugObject(PEPROCESS Process, PDEBUG_OBJECT DebugObject, NTSTATUS MsgStatus, PETHREAD LastThread);
49 | NTSTATUS DbgkpQueueMessage(PEPROCESS Process, PETHREAD Thread, PDBGKM_APIMSG ApiMsg, ULONG Flags, PDEBUG_OBJECT TargetDebugObject);
50 | NTSTATUS NtCreateDebugObject(PHANDLE DebugObjectHandle, ACCESS_MASK DesiredAccess, POBJECT_ATTRIBUTES ObjectAttributes, ULONG Flags);
51 | VOID KiDispatchException(PEXCEPTION_RECORD ExceptionRecord,void* ExceptionFrame,PKTRAP_FRAME TrapFrame,KPROCESSOR_MODE PreviousMode,BOOLEAN FirstChance);
52 | NTSTATUS NtCreateUserProcess(PHANDLE ProcessHandle,PETHREAD ThreadHandle,ACCESS_MASK ProcessDesiredAccess,ACCESS_MASK ThreadDesiredAccess,PVOID ProcessObjectAttributes,PVOID ThreadObjectAttributes,ULONG ProcessFlags,ULONG ThreadFlags,PVOID ProcessParameters,void* CreateInfo, void* AttributeList);
--------------------------------------------------------------------------------
/ReloadDbg/KernelDbgStruct.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ReloadDbg/KernelDbgStruct.h
--------------------------------------------------------------------------------
/ReloadDbg/Lde.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ReloadDbg/Lde.h
--------------------------------------------------------------------------------
/ReloadDbg/ReloadDbg.lst:
--------------------------------------------------------------------------------
1 | Microsoft (R) Macro Assembler (x64) Version 14.21.27702.2 12/29/22 15:10:34
2 | vmintrin.asm Page 1 - 1
3 |
4 |
5 | 00000000 .CODE
6 | 00000000 __vm_call proc
7 | 00000000 48/ B8 mov rax,0CDAEFAEDBBAEBEEFh
8 | CDAEFAEDBBAEBEEF
9 | 0000000A 0F 01 C1 vmcall
10 | 0000000D C3 ret
11 | 0000000E __vm_call endp
12 |
13 | 0000000E __vm_call_ex proc
14 | 0000000E 48/ B8 mov rax,0CDAEFAEDBBAEBEEFh ; Our vmcall indentitifer
15 | CDAEFAEDBBAEBEEF
16 |
17 | 00000018 48/ 83 EC 30 sub rsp, 30h
18 | 0000001C 4C/ 89 14 24 mov qword ptr [rsp], r10
19 | 00000020 4C/ 89 5C 24 mov qword ptr [rsp + 8h], r11
20 | 08
21 | 00000025 4C/ 89 64 24 mov qword ptr [rsp + 10h], r12
22 | 10
23 | 0000002A 4C/ 89 6C 24 mov qword ptr [rsp + 18h], r13
24 | 18
25 | 0000002F 4C/ 89 74 24 mov qword ptr [rsp + 20h], r14
26 | 20
27 | 00000034 4C/ 89 7C 24 mov qword ptr [rsp + 28h], r15
28 | 28
29 |
30 | 00000039 4C/ 8B 54 24 mov r10, qword ptr [rsp + 58h]
31 | 58
32 | 0000003E 4C/ 8B 5C 24 mov r11, qword ptr [rsp + 60h]
33 | 60
34 | 00000043 4C/ 8B 64 24 mov r12, qword ptr [rsp + 68h]
35 | 68
36 | 00000048 4C/ 8B 6C 24 mov r13, qword ptr [rsp + 70h]
37 | 70
38 | 0000004D 4C/ 8B 74 24 mov r14, qword ptr [rsp + 78h]
39 | 78
40 | 00000052 4C/ 8B BC 24 mov r15, qword ptr [rsp + 80h]
41 | 00000080
42 |
43 | 0000005A 0F 01 C1 vmcall
44 | 0000005D 4C/ 8B 14 24 mov r10, qword ptr [rsp]
45 | 00000061 4C/ 8B 5C 24 mov r11, qword ptr [rsp + 8h]
46 | 08
47 | 00000066 4C/ 8B 64 24 mov r12, qword ptr [rsp + 10h]
48 | 10
49 | 0000006B 4C/ 8B 6C 24 mov r13, qword ptr [rsp + 18h]
50 | 18
51 | 00000070 4C/ 8B 74 24 mov r14, qword ptr [rsp + 20h]
52 | 20
53 | 00000075 4C/ 8B 7C 24 mov r15, qword ptr [rsp + 28h]
54 | 28
55 | 0000007A 48/ 83 C4 30 add rsp, 30h
56 |
57 | 0000007E C3 ret
58 | 0000007F __vm_call_ex endp
59 |
60 | END
61 | Microsoft (R) Macro Assembler (x64) Version 14.21.27702.2 12/29/22 15:10:34
62 | vmintrin.asm Symbols 2 - 1
63 |
64 |
65 |
66 |
67 | Procedures, parameters, and locals:
68 |
69 | N a m e Type Value Attr
70 |
71 | __vm_call_ex . . . . . . . . . . P 0000000E _TEXT Length= 00000071 Public
72 | __vm_call . . . . . . . . . . . P 00000000 _TEXT Length= 0000000E Public
73 |
74 | 0 Warnings
75 | 0 Errors
76 |
--------------------------------------------------------------------------------
/ReloadDbg/ReloadDbg.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Win10WithVm
6 | x64
7 |
8 |
9 | Win7WithVm
10 | x64
11 |
12 |
13 |
14 | {576B8DB9-01C6-4D08-9CEA-253D6F9B17C3}
15 | {dd38f7fc-d7bd-488b-9242-7d8754cde80d}
16 | v4.5
17 | 12.0
18 | Debug
19 | Win32
20 | ReloadDbg
21 | 10.0.19041.0
22 |
23 |
24 |
25 | Windows7
26 | true
27 | WindowsKernelModeDriver10.0
28 | Driver
29 | WDM
30 | false
31 |
32 |
33 | Windows7
34 | true
35 | WindowsKernelModeDriver10.0
36 | Driver
37 | WDM
38 | false
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 | DbgengKernelDebugger
49 |
50 |
51 | DbgengKernelDebugger
52 |
53 |
54 |
55 | false
56 | Level3
57 | %(PreprocessorDefinitions)
58 | stdcpp17
59 | 5040;%(DisableSpecificWarnings)
60 |
61 |
62 |
63 |
64 | false
65 | Level3
66 | WIN7;%(PreprocessorDefinitions)
67 | stdcpp17
68 | 5040;%(DisableSpecificWarnings)
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
--------------------------------------------------------------------------------
/ReloadDbg/ReloadDbg.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
7 |
8 |
9 | {93995380-89BD-4b04-88EB-625FBE52EBFB}
10 | h;hpp;hxx;hm;inl;inc;xsd
11 |
12 |
13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
15 |
16 |
17 | {8E41214B-6785-4CFE-B992-037D68949A14}
18 | inf;inv;inx;mof;mc;
19 |
20 |
21 | {891f04a0-e006-4a3c-a1e4-9f5ea3b1e654}
22 |
23 |
24 |
25 |
26 | Header Files
27 |
28 |
29 | Header Files
30 |
31 |
32 | Header Files
33 |
34 |
35 | Header Files
36 |
37 |
38 | Source Files\hv
39 |
40 |
41 | Source Files\hv
42 |
43 |
44 | Source Files\hv
45 |
46 |
47 | Source Files\hv
48 |
49 |
50 | Source Files\hv
51 |
52 |
53 | Source Files\hv
54 |
55 |
56 | Source Files\hv
57 |
58 |
59 | Source Files\hv
60 |
61 |
62 | Source Files\hv
63 |
64 |
65 | Source Files\hv
66 |
67 |
68 | Source Files\hv
69 |
70 |
71 | Source Files\hv
72 |
73 |
74 | Source Files\hv
75 |
76 |
77 | Source Files\hv
78 |
79 |
80 | Source Files\hv
81 |
82 |
83 | Source Files\hv
84 |
85 |
86 | Source Files\hv
87 |
88 |
89 | Source Files\hv
90 |
91 |
92 | Source Files\hv
93 |
94 |
95 | Source Files\hv
96 |
97 |
98 | Source Files\hv
99 |
100 |
101 | Source Files\hv
102 |
103 |
104 | Source Files\hv
105 |
106 |
107 | Source Files\hv
108 |
109 |
110 | Source Files\hv
111 |
112 |
113 | Source Files\hv
114 |
115 |
116 | Header Files
117 |
118 |
119 |
120 |
121 | Source Files
122 |
123 |
124 | Source Files
125 |
126 |
127 | Source Files
128 |
129 |
130 | Source Files\hv
131 |
132 |
133 | Source Files\hv
134 |
135 |
136 | Source Files\hv
137 |
138 |
139 | Source Files\hv
140 |
141 |
142 | Source Files\hv
143 |
144 |
145 | Source Files\hv
146 |
147 |
148 | Source Files\hv
149 |
150 |
151 | Source Files\hv
152 |
153 |
154 | Source Files\hv
155 |
156 |
157 | Source Files\hv
158 |
159 |
160 | Source Files\hv
161 |
162 |
163 | Source Files\hv
164 |
165 |
166 | Source Files\hv
167 |
168 |
169 | Source Files\hv
170 |
171 |
172 | Source Files\hv
173 |
174 |
175 |
176 |
177 | Source Files\hv
178 |
179 |
180 | Source Files\hv
181 |
182 |
183 | Source Files\hv
184 |
185 |
186 | Source Files\hv
187 |
188 |
189 | Source Files\hv
190 |
191 |
192 | Source Files\hv
193 |
194 |
195 |
196 |
197 | Source Files\hv
198 |
199 |
200 |
--------------------------------------------------------------------------------
/ReloadDbg/ReloadDbg.vcxproj.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Off
5 |
6 |
7 | Off
8 |
9 |
--------------------------------------------------------------------------------
/ReloadDbg/dbg.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ReloadDbg/dbg.cpp
--------------------------------------------------------------------------------
/ReloadDbg/dbg.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "vmintrin.h"
3 | #include "./hv/hv/hv.h"
4 |
5 | BOOLEAN DbgInit();
6 | BOOLEAN UnHookFuncs();
7 |
8 |
9 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/arch.asm:
--------------------------------------------------------------------------------
1 | .code
2 |
3 | ?read_cs@hv@@YA?ATsegment_selector@@XZ proc
4 | mov ax, cs
5 | ret
6 | ?read_cs@hv@@YA?ATsegment_selector@@XZ endp
7 |
8 | ?read_ss@hv@@YA?ATsegment_selector@@XZ proc
9 | mov ax, ss
10 | ret
11 | ?read_ss@hv@@YA?ATsegment_selector@@XZ endp
12 |
13 | ?read_ds@hv@@YA?ATsegment_selector@@XZ proc
14 | mov ax, ds
15 | ret
16 | ?read_ds@hv@@YA?ATsegment_selector@@XZ endp
17 |
18 | ?read_es@hv@@YA?ATsegment_selector@@XZ proc
19 | mov ax, es
20 | ret
21 | ?read_es@hv@@YA?ATsegment_selector@@XZ endp
22 |
23 | ?read_fs@hv@@YA?ATsegment_selector@@XZ proc
24 | mov ax, fs
25 | ret
26 | ?read_fs@hv@@YA?ATsegment_selector@@XZ endp
27 |
28 | ?read_gs@hv@@YA?ATsegment_selector@@XZ proc
29 | mov ax, gs
30 | ret
31 | ?read_gs@hv@@YA?ATsegment_selector@@XZ endp
32 |
33 | ?read_tr@hv@@YA?ATsegment_selector@@XZ proc
34 | str ax
35 | ret
36 | ?read_tr@hv@@YA?ATsegment_selector@@XZ endp
37 |
38 | ?read_ldtr@hv@@YA?ATsegment_selector@@XZ proc
39 | sldt ax
40 | ret
41 | ?read_ldtr@hv@@YA?ATsegment_selector@@XZ endp
42 |
43 | ?write_ds@hv@@YAXG@Z proc
44 | mov ds, cx
45 | ret
46 | ?write_ds@hv@@YAXG@Z endp
47 |
48 | ?write_es@hv@@YAXG@Z proc
49 | mov es, cx
50 | ret
51 | ?write_es@hv@@YAXG@Z endp
52 |
53 | ?write_fs@hv@@YAXG@Z proc
54 | mov fs, cx
55 | ret
56 | ?write_fs@hv@@YAXG@Z endp
57 |
58 | ?write_gs@hv@@YAXG@Z proc
59 | mov gs, cx
60 | ret
61 | ?write_gs@hv@@YAXG@Z endp
62 |
63 | ?write_tr@hv@@YAXG@Z proc
64 | ltr cx
65 | ret
66 | ?write_tr@hv@@YAXG@Z endp
67 |
68 | ?write_ldtr@hv@@YAXG@Z proc
69 | lldt cx
70 | ret
71 | ?write_ldtr@hv@@YAXG@Z endp
72 |
73 | end
74 |
75 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/arch.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include "ia32.hpp"
5 |
6 | extern "C" {
7 |
8 | // https://docs.microsoft.com/en-us/cpp/intrinsics/x64-amd64-intrinsics-list
9 | void _sgdt(segment_descriptor_register_64* gdtr);
10 | void _lgdt(segment_descriptor_register_64* gdtr);
11 |
12 | } // extern "C"
13 |
14 | namespace hv {
15 |
16 | // defined in arch.asm
17 |
18 | segment_selector read_cs();
19 | segment_selector read_ss();
20 | segment_selector read_ds();
21 | segment_selector read_es();
22 | segment_selector read_fs();
23 | segment_selector read_gs();
24 | segment_selector read_tr();
25 | segment_selector read_ldtr();
26 |
27 | void write_ds(uint16_t selector);
28 | void write_es(uint16_t selector);
29 | void write_fs(uint16_t selector);
30 | void write_gs(uint16_t selector);
31 | void write_tr(uint16_t selector);
32 | void write_ldtr(uint16_t selector);
33 |
34 | } // namespace hv
35 |
36 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/ept.cpp:
--------------------------------------------------------------------------------
1 | #include "ept.h"
2 | #include "arch.h"
3 | #include "vcpu.h"
4 | #include "mtrr.h"
5 | #include "mm.h"
6 |
7 | namespace hv {
8 |
9 | // identity-map the EPT paging structures
10 | void prepare_ept(vcpu_ept_data& ept) {
11 | memset(&ept, 0, sizeof(ept));
12 |
13 | ept.num_used_free_pages = 0;
14 |
15 | for (size_t i = 0; i < ept_free_page_count; ++i)
16 | ept.free_page_pfns[i] = MmGetPhysicalAddress(&ept.free_pages[i]).QuadPart >> 12;
17 |
18 | ept.hooks.active_list_head = nullptr;
19 | ept.hooks.free_list_head = &ept.hooks.buffer[0];
20 |
21 | for (size_t i = 0; i < ept.hooks.capacity - 1; ++i)
22 | ept.hooks.buffer[i].next = &ept.hooks.buffer[i + 1];
23 |
24 | // the last node points to NULL
25 | ept.hooks.buffer[ept.hooks.capacity - 1].next = nullptr;
26 |
27 | // setup the first PML4E so that it points to our PDPT
28 | auto& pml4e = ept.pml4[0];
29 | pml4e.flags = 0;
30 | pml4e.read_access = 1;
31 | pml4e.write_access = 1;
32 | pml4e.execute_access = 1;
33 | pml4e.accessed = 0;
34 | pml4e.user_mode_execute = 1;
35 | pml4e.page_frame_number = MmGetPhysicalAddress(&ept.pdpt).QuadPart >> 12;
36 |
37 | // MTRR data for setting memory types
38 | auto const mtrrs = read_mtrr_data();
39 |
40 | // TODO: allocate a PT for the fixed MTRRs region so that we can get
41 | // more accurate memory typing in that area (as opposed to just
42 | // mapping the whole PDE as UC).
43 |
44 | for (size_t i = 0; i < ept_pd_count; ++i) {
45 | // point each PDPTE to the corresponding PD
46 | auto& pdpte = ept.pdpt[i];
47 | pdpte.flags = 0;
48 | pdpte.read_access = 1;
49 | pdpte.write_access = 1;
50 | pdpte.execute_access = 1;
51 | pdpte.accessed = 0;
52 | pdpte.user_mode_execute = 1;
53 | pdpte.page_frame_number = MmGetPhysicalAddress(&ept.pds[i]).QuadPart >> 12;
54 |
55 | for (size_t j = 0; j < 512; ++j) {
56 | // identity-map every GPA to the corresponding HPA
57 | auto& pde = ept.pds_2mb[i][j];
58 | pde.flags = 0;
59 | pde.read_access = 1;
60 | pde.write_access = 1;
61 | pde.execute_access = 1;
62 | pde.ignore_pat = 0;
63 | pde.large_page = 1;
64 | pde.accessed = 0;
65 | pde.dirty = 0;
66 | pde.user_mode_execute = 1;
67 | pde.suppress_ve = 0;
68 | pde.page_frame_number = (i << 9) + j;
69 | pde.memory_type = calc_mtrr_mem_type(mtrrs,
70 | pde.page_frame_number << 21, 0x1000 << 9);
71 | }
72 | }
73 | }
74 |
75 | // update the memory types in the EPT paging structures based on the MTRRs.
76 | // this function should only be called from root-mode during vmx-operation.
77 | void update_ept_memory_type(vcpu_ept_data& ept) {
78 | // TODO: completely virtualize the guest MTRRs
79 | auto const mtrrs = read_mtrr_data();
80 |
81 | for (size_t i = 0; i < ept_pd_count; ++i) {
82 | for (size_t j = 0; j < 512; ++j) {
83 | auto& pde = ept.pds_2mb[i][j];
84 |
85 | // 2MB large page
86 | if (pde.large_page) {
87 | // update the memory type for this PDE
88 | pde.memory_type = calc_mtrr_mem_type(mtrrs,
89 | pde.page_frame_number << 21, 0x1000 << 9);
90 | }
91 | // PDE points to a PT
92 | else {
93 | auto const pt = reinterpret_cast(host_physical_memory_base
94 | + (ept.pds[i][j].page_frame_number << 12));
95 |
96 | // update the memory type for every PTE
97 | for (size_t k = 0; k < 512; ++k) {
98 | pt[k].memory_type = calc_mtrr_mem_type(mtrrs,
99 | pt[k].page_frame_number << 12, 0x1000);
100 | }
101 | }
102 | }
103 | }
104 | }
105 |
106 | // set the memory type in every EPT paging structure to the specified value
107 | void set_ept_memory_type(vcpu_ept_data& ept, uint8_t const memory_type) {
108 | for (size_t i = 0; i < ept_pd_count; ++i) {
109 | for (size_t j = 0; j < 512; ++j) {
110 | auto& pde = ept.pds_2mb[i][j];
111 |
112 | // 2MB large page
113 | if (pde.large_page)
114 | pde.memory_type = memory_type;
115 | // PDE points to a PT
116 | else {
117 | auto const pt = reinterpret_cast(host_physical_memory_base
118 | + (ept.pds[i][j].page_frame_number << 12));
119 |
120 | // update the memory type for every PTE
121 | for (size_t k = 0; k < 512; ++k)
122 | pt[k].memory_type = memory_type;
123 | }
124 | }
125 | }
126 | }
127 |
128 | // get the corresponding EPT PDPTE for a given physical address
129 | ept_pdpte* get_ept_pdpte(vcpu_ept_data& ept, uint64_t const physical_address) {
130 | pml4_virtual_address const addr = { reinterpret_cast(physical_address) };
131 |
132 | if (addr.pml4_idx != 0)
133 | return nullptr;
134 |
135 | if (addr.pdpt_idx >= ept_pd_count)
136 | return nullptr;
137 |
138 | return &ept.pdpt[addr.pdpt_idx];
139 | }
140 |
141 | // get the corresponding EPT PDE for a given physical address
142 | ept_pde* get_ept_pde(vcpu_ept_data& ept, uint64_t const physical_address) {
143 | pml4_virtual_address const addr = { reinterpret_cast(physical_address) };
144 |
145 | if (addr.pml4_idx != 0)
146 | return nullptr;
147 |
148 | if (addr.pdpt_idx >= ept_pd_count)
149 | return nullptr;
150 |
151 | return &ept.pds[addr.pdpt_idx][addr.pd_idx];
152 | }
153 |
154 | // get the corresponding EPT PTE for a given physical address
155 | ept_pte* get_ept_pte(vcpu_ept_data& ept,
156 | uint64_t const physical_address, bool const force_split) {
157 | pml4_virtual_address const addr = { reinterpret_cast(physical_address) };
158 |
159 | if (addr.pml4_idx != 0)
160 | return nullptr;
161 |
162 | if (addr.pdpt_idx >= ept_pd_count)
163 | return nullptr;
164 |
165 | auto& pde_2mb = ept.pds_2mb[addr.pdpt_idx][addr.pd_idx];
166 |
167 | if (pde_2mb.large_page) {
168 | if (!force_split)
169 | return nullptr;
170 |
171 | split_ept_pde(ept, &pde_2mb);
172 |
173 | // failed to split the PDE
174 | if (pde_2mb.large_page)
175 | return nullptr;
176 | }
177 |
178 | auto const pt = reinterpret_cast(host_physical_memory_base
179 | + (ept.pds[addr.pdpt_idx][addr.pd_idx].page_frame_number << 12));
180 |
181 | return &pt[addr.pt_idx];
182 | }
183 |
184 | // split a 2MB EPT PDE so that it points to an EPT PT
185 | void split_ept_pde(vcpu_ept_data& ept, ept_pde_2mb* const pde_2mb) {
186 | // this PDE is already split
187 | if (!pde_2mb->large_page)
188 | return;
189 |
190 | // no available free pages
191 | if (ept.num_used_free_pages >= ept_free_page_count)
192 | return;
193 |
194 | // allocate a free page for the PT
195 | auto const pt_pfn = ept.free_page_pfns[ept.num_used_free_pages];
196 | auto const pt = reinterpret_cast(
197 | &ept.free_pages[ept.num_used_free_pages]);
198 | ++ept.num_used_free_pages;
199 |
200 | for (size_t i = 0; i < 512; ++i) {
201 | auto& pte = pt[i];
202 | pte.flags = 0;
203 |
204 | // copy the parent PDE flags
205 | pte.read_access = pde_2mb->read_access;
206 | pte.write_access = pde_2mb->write_access;
207 | pte.execute_access = pde_2mb->execute_access;
208 | pte.memory_type = pde_2mb->memory_type;
209 | pte.ignore_pat = pde_2mb->ignore_pat;
210 | pte.accessed = pde_2mb->accessed;
211 | pte.dirty = pde_2mb->dirty;
212 | pte.user_mode_execute = pde_2mb->user_mode_execute;
213 | pte.verify_guest_paging = pde_2mb->verify_guest_paging;
214 | pte.paging_write_access = pde_2mb->paging_write_access;
215 | pte.supervisor_shadow_stack = pde_2mb->supervisor_shadow_stack;
216 | pte.suppress_ve = pde_2mb->suppress_ve;
217 | pte.page_frame_number = (pde_2mb->page_frame_number << 9) + i;
218 | }
219 |
220 | auto const pde = reinterpret_cast(pde_2mb);
221 | pde->flags = 0;
222 | pde->read_access = 1;
223 | pde->write_access = 1;
224 | pde->execute_access = 1;
225 | pde->user_mode_execute = 1;
226 | pde->page_frame_number = pt_pfn;
227 | }
228 |
229 | // memory read/written will use the original page while code
230 | // being executed will use the executable page instead
231 | bool install_ept_hook(vcpu_ept_data& ept,
232 | uint64_t const original_page_pfn,
233 | uint64_t const executable_page_pfn) {
234 | // we ran out of EPT hooks :(
235 | if (!ept.hooks.free_list_head)
236 | return false;
237 |
238 | // get the EPT PTE, and possible split an existing PDE if needed
239 | auto const pte = get_ept_pte(ept, original_page_pfn << 12, true);
240 | if (!pte)
241 | return false;
242 |
243 | // remove a hook node from the free list
244 | auto const hook_node = ept.hooks.free_list_head;
245 | ept.hooks.free_list_head = hook_node->next;
246 |
247 | // insert the hook node into the active list
248 | hook_node->next = ept.hooks.active_list_head;
249 | ept.hooks.active_list_head = hook_node;
250 |
251 | // initialize the hook node
252 | hook_node->orig_pfn = static_cast(original_page_pfn);
253 | hook_node->exec_pfn = static_cast(executable_page_pfn);
254 |
255 | // an instruction fetch to this physical address will now trigger
256 | // an ept-violation vm-exit where the real "meat" of the ept hook is
257 | pte->execute_access = 0;
258 |
259 | vmx_invept(invept_all_context, {});
260 |
261 | return true;
262 | }
263 |
264 | // remove an EPT hook that was installed with install_ept_hook()
265 | void remove_ept_hook(vcpu_ept_data& ept, uint64_t const original_page_pfn) {
266 | if (!ept.hooks.active_list_head)
267 | return;
268 |
269 | // the head is the target node
270 | if (ept.hooks.active_list_head->orig_pfn == original_page_pfn) {
271 | auto const new_head = ept.hooks.active_list_head->next;
272 |
273 | // add to the free list
274 | ept.hooks.active_list_head->next = ept.hooks.free_list_head;
275 | ept.hooks.free_list_head = ept.hooks.active_list_head;
276 |
277 | // remove from the active list
278 | ept.hooks.active_list_head = new_head;
279 | } else {
280 | auto prev = ept.hooks.active_list_head;
281 |
282 | // search for the node BEFORE the target node (prev if this was doubly)
283 | while (prev->next) {
284 | if (prev->next->orig_pfn == original_page_pfn)
285 | break;
286 |
287 | prev = prev->next;
288 | }
289 |
290 | if (!prev->next)
291 | return;
292 |
293 | auto const new_next = prev->next->next;
294 |
295 | // add to the free list
296 | prev->next->next = ept.hooks.free_list_head;
297 | ept.hooks.free_list_head = prev->next;
298 |
299 | // remove from the active list
300 | prev->next = new_next;
301 | }
302 |
303 | auto const pte = get_ept_pte(ept, original_page_pfn << 12, false);
304 |
305 | // this should NOT fail
306 | if (!pte)
307 | return;
308 |
309 | // restore original EPT page attributes
310 | pte->read_access = 1;
311 | pte->write_access = 1;
312 | pte->execute_access = 1;
313 | pte->page_frame_number = original_page_pfn;
314 |
315 | vmx_invept(invept_all_context, {});
316 | }
317 |
318 | // find the EPT hook for the specified PFN
319 | vcpu_ept_hook_node* find_ept_hook(vcpu_ept_data& ept,
320 | uint64_t const original_page_pfn) {
321 | // TODO:
322 | // maybe use a more optimal data structure to handle a large
323 | // amount of EPT hooks?
324 |
325 | // linear search through the active hook list
326 | for (auto curr = ept.hooks.active_list_head; curr; curr = curr->next) {
327 | if (curr->orig_pfn == original_page_pfn)
328 | return curr;
329 | }
330 |
331 | return nullptr;
332 | }
333 |
334 | } // namespace hv
335 |
336 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/ept.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | struct vcpu;
8 |
9 | // number of PDs in the EPT paging structures
10 | inline constexpr size_t ept_pd_count = 64;
11 | inline constexpr size_t ept_free_page_count = 10;
12 |
13 | struct vcpu_ept_hook_node {
14 | vcpu_ept_hook_node* next;
15 |
16 | // these can be stored as 32-bit integers to conserve space since
17 | // nobody is going to have more than 16,000 GB of physical memory
18 | uint32_t orig_pfn;
19 | uint32_t exec_pfn;
20 | };
21 |
22 | struct vcpu_ept_hooks {
23 | // buffer of nodes (there can be unused nodes in the middle
24 | // of the buffer if a hook was removed for example)
25 | static constexpr size_t capacity = 64;
26 | vcpu_ept_hook_node buffer[capacity];
27 |
28 | // list of currently active EPT hooks
29 | vcpu_ept_hook_node* active_list_head;
30 |
31 | // list of unused nodes
32 | vcpu_ept_hook_node* free_list_head;
33 | };
34 |
35 | struct vcpu_ept_data {
36 | // EPT PML4
37 | alignas(0x1000) ept_pml4e pml4[512];
38 |
39 | // EPT PDPT - a single one covers 512GB of physical memory
40 | alignas(0x1000) ept_pdpte pdpt[512];
41 | static_assert(ept_pd_count <= 512, "Only 512 EPT PDs are supported!");
42 |
43 | // an array of EPT PDs - each PD covers 1GB
44 | union {
45 | alignas(0x1000) ept_pde pds[ept_pd_count][512];
46 | alignas(0x1000) ept_pde_2mb pds_2mb[ept_pd_count][512];
47 | };
48 |
49 | // free pages that can be used to split PDEs or for other purposes
50 | alignas(0x1000) uint8_t free_pages[ept_free_page_count][0x1000];
51 |
52 | // an array of PFNs that point to each free page in the free page array
53 | uint64_t free_page_pfns[ept_free_page_count];
54 |
55 | // # of free pages that are currently in use
56 | size_t num_used_free_pages;
57 |
58 | // EPT hooks
59 | vcpu_ept_hooks hooks;
60 | };
61 |
62 | // identity-map the EPT paging structures
63 | void prepare_ept(vcpu_ept_data& ept);
64 |
65 | // update the memory types in the EPT paging structures based on the MTRRs.
66 | // this function should only be called from root-mode during vmx-operation.
67 | void update_ept_memory_type(vcpu_ept_data& ept);
68 |
69 | // set the memory type in every EPT paging structure to the specified value
70 | void set_ept_memory_type(vcpu_ept_data& ept, uint8_t memory_type);
71 |
72 | // get the corresponding EPT PDPTE for a given physical address
73 | ept_pdpte* get_ept_pdpte(vcpu_ept_data& ept, uint64_t physical_address);
74 |
75 | // get the corresponding EPT PDE for a given physical address
76 | ept_pde* get_ept_pde(vcpu_ept_data& ept, uint64_t physical_address);
77 |
78 | // get the corresponding EPT PTE for a given physical address
79 | ept_pte* get_ept_pte(vcpu_ept_data& ept,
80 | uint64_t physical_address, bool force_split = false);
81 |
82 | // split a 2MB EPT PDE so that it points to an EPT PT
83 | void split_ept_pde(vcpu_ept_data& ept, ept_pde_2mb* pde_2mb);
84 |
85 | // memory read/written will use the original page while code
86 | // being executed will use the executable page instead
87 | bool install_ept_hook(vcpu_ept_data& ept,
88 | uint64_t original_page_pfn, uint64_t executable_page_pfn);
89 |
90 | // remove an EPT hook that was installed with install_ept_hook()
91 | void remove_ept_hook(vcpu_ept_data& ept, uint64_t original_page_pfn);
92 |
93 | // find the EPT hook for the specified PFN
94 | vcpu_ept_hook_node* find_ept_hook(vcpu_ept_data& ept, uint64_t original_page_pfn);
95 |
96 | } // namespace hv
97 |
98 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/exception-routines.asm:
--------------------------------------------------------------------------------
1 | .code
2 |
3 | ?memcpy_safe@hv@@YAXAEAUhost_exception_info@1@PEAXPEBX_K@Z proc
4 | mov r10, ehandler
5 | mov r11, rcx
6 | mov byte ptr [rcx], 0
7 |
8 | ; store RSI and RDI
9 | push rsi
10 | push rdi
11 |
12 | mov rsi, r8
13 | mov rdi, rdx
14 | mov rcx, r9
15 |
16 | rep movsb
17 |
18 | ehandler:
19 | ; restore RDI and RSI
20 | pop rdi
21 | pop rsi
22 |
23 | ret
24 | ?memcpy_safe@hv@@YAXAEAUhost_exception_info@1@PEAXPEBX_K@Z endp
25 |
26 | ?xsetbv_safe@hv@@YAXAEAUhost_exception_info@1@I_K@Z proc
27 | mov r10, ehandler
28 | mov r11, rcx
29 | mov byte ptr [rcx], 0
30 |
31 | ; idx
32 | mov ecx, edx
33 |
34 | ; value (low part)
35 | mov eax, r8d
36 |
37 | ; value (high part)
38 | mov rdx, r8
39 | shr rdx, 32
40 |
41 | xsetbv
42 |
43 | ehandler:
44 | ret
45 | ?xsetbv_safe@hv@@YAXAEAUhost_exception_info@1@I_K@Z endp
46 |
47 | ?wrmsr_safe@hv@@YAXAEAUhost_exception_info@1@I_K@Z proc
48 | mov r10, ehandler
49 | mov r11, rcx
50 | mov byte ptr [rcx], 0
51 |
52 | ; msr
53 | mov ecx, edx
54 |
55 | ; value
56 | mov eax, r8d
57 | mov rdx, r8
58 | shr rdx, 32
59 |
60 | wrmsr
61 |
62 | ehandler:
63 | ret
64 | ?wrmsr_safe@hv@@YAXAEAUhost_exception_info@1@I_K@Z endp
65 |
66 | ?rdmsr_safe@hv@@YA_KAEAUhost_exception_info@1@I@Z proc
67 | mov r10, ehandler
68 | mov r11, rcx
69 | mov byte ptr [rcx], 0
70 |
71 | ; msr
72 | mov ecx, edx
73 |
74 | rdmsr
75 |
76 | ; return value
77 | shl rdx, 32
78 | and rax, rdx
79 |
80 | ehandler:
81 | ret
82 | ?rdmsr_safe@hv@@YA_KAEAUhost_exception_info@1@I@Z endp
83 |
84 | end
85 |
86 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/exception-routines.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | // structure that gets filled out when a host exception occurs
8 | struct host_exception_info {
9 | // whether an exception occurred or not
10 | bool exception_occurred;
11 |
12 | // interrupt vector
13 | uint64_t vector;
14 |
15 | // error code
16 | uint64_t error;
17 | };
18 |
19 | // memcpy with exception handling
20 | void memcpy_safe(host_exception_info& e, void* dst, void const* src, size_t size);
21 |
22 | // xsetbv with exception handling
23 | void xsetbv_safe(host_exception_info& e, uint32_t idx, uint64_t value);
24 |
25 | // wrmsr with exception handling
26 | void wrmsr_safe(host_exception_info& e, uint32_t msr, uint64_t value);
27 |
28 | // rdmsr with exception handling
29 | uint64_t rdmsr_safe(host_exception_info& e, uint32_t msr);
30 |
31 | } // namespace hv
32 |
33 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/exit-handlers.cpp:
--------------------------------------------------------------------------------
1 | #include "exit-handlers.h"
2 | #include "guest-context.h"
3 | #include "exception-routines.h"
4 | #include "hypercalls.h"
5 | #include "vcpu.h"
6 | #include "vmx.h"
7 | #include "logger.h"
8 |
9 | namespace hv {
10 |
11 | void emulate_cpuid(vcpu* const cpu) {
12 | auto const ctx = cpu->ctx;
13 |
14 | int regs[4];
15 | __cpuidex(regs, ctx->eax, ctx->ecx);
16 |
17 | ctx->rax = regs[0];
18 | ctx->rbx = regs[1];
19 | ctx->rcx = regs[2];
20 | ctx->rdx = regs[3];
21 |
22 | cpu->hide_vm_exit_overhead = true;
23 | skip_instruction();
24 | }
25 |
26 | void emulate_rdmsr(vcpu* const cpu) {
27 | if (cpu->ctx->ecx == IA32_FEATURE_CONTROL) {
28 | // return the fake guest FEATURE_CONTROL MSR
29 | cpu->ctx->rax = cpu->cached.guest_feature_control.flags & 0xFFFF'FFFF;
30 | cpu->ctx->rdx = cpu->cached.guest_feature_control.flags >> 32;
31 |
32 | cpu->hide_vm_exit_overhead = true;
33 | skip_instruction();
34 | return;
35 | }
36 |
37 | host_exception_info e;
38 |
39 | // the guest could be reading from MSRs that are outside of the MSR bitmap
40 | // range. refer to https://www.unknowncheats.me/forum/3425463-post15.html
41 | auto const msr_value = rdmsr_safe(e, cpu->ctx->ecx);
42 |
43 | if (e.exception_occurred) {
44 | // reflect the exception back into the guest
45 | inject_hw_exception(general_protection, 0);
46 | return;
47 | }
48 |
49 | cpu->ctx->rax = msr_value & 0xFFFF'FFFF;
50 | cpu->ctx->rdx = msr_value >> 32;
51 |
52 | cpu->hide_vm_exit_overhead = true;
53 | skip_instruction();
54 | }
55 |
56 | void emulate_wrmsr(vcpu* const cpu) {
57 | auto const msr = cpu->ctx->ecx;
58 | auto const value = (cpu->ctx->rdx << 32) | cpu->ctx->eax;
59 |
60 | // let the guest write to the MSRs
61 | host_exception_info e;
62 | wrmsr_safe(e, msr, value);
63 |
64 | if (e.exception_occurred) {
65 | inject_hw_exception(general_protection, 0);
66 | return;
67 | }
68 |
69 | // we need to make sure to update EPT memory types if the guest
70 | // modifies any of the MTRR registers
71 | if (msr == IA32_MTRR_DEF_TYPE || msr == IA32_MTRR_FIX64K_00000 ||
72 | msr == IA32_MTRR_FIX16K_80000 || msr == IA32_MTRR_FIX16K_A0000 ||
73 | (msr >= IA32_MTRR_FIX4K_C0000 && msr <= IA32_MTRR_FIX4K_F8000) ||
74 | (msr >= IA32_MTRR_PHYSBASE0 && msr <= IA32_MTRR_PHYSBASE0 + 511)) {
75 | // update EPT memory types
76 | if (!read_effective_guest_cr0().cache_disable)
77 | update_ept_memory_type(cpu->ept);
78 |
79 | vmx_invept(invept_all_context, {});
80 | }
81 |
82 | cpu->hide_vm_exit_overhead = true;
83 | skip_instruction();
84 | return;
85 | }
86 |
87 | void emulate_getsec(vcpu*) {
88 | // inject a #GP(0) since SMX is disabled in the IA32_FEATURE_CONTROL MSR
89 | inject_hw_exception(general_protection, 0);
90 | }
91 |
92 | void emulate_invd(vcpu*) {
93 | // TODO: properly implement INVD (can probably make a very small stub
94 | // that flushes specific cacheline entries prior to executing INVD)
95 | inject_hw_exception(general_protection, 0);
96 | }
97 |
98 | void emulate_xsetbv(vcpu* const cpu) {
99 | // 3.2.6
100 |
101 | // CR4.OSXSAVE must be 1
102 | if (!read_effective_guest_cr4().os_xsave) {
103 | inject_hw_exception(invalid_opcode);
104 | return;
105 | }
106 |
107 | xcr0 new_xcr0;
108 | new_xcr0.flags = (cpu->ctx->rdx << 32) | cpu->ctx->eax;
109 |
110 | // only XCR0 is supported
111 | if (cpu->ctx->ecx != 0) {
112 | inject_hw_exception(general_protection, 0);
113 | return;
114 | }
115 |
116 | // #GP(0) if trying to set an unsupported bit
117 | if (new_xcr0.flags & cpu->cached.xcr0_unsupported_mask) {
118 | inject_hw_exception(general_protection, 0);
119 | return;
120 | }
121 |
122 | // #GP(0) if clearing XCR0.X87
123 | if (!new_xcr0.x87) {
124 | inject_hw_exception(general_protection, 0);
125 | return;
126 | }
127 |
128 | // #GP(0) if XCR0.AVX is 1 while XCRO.SSE is cleared
129 | if (new_xcr0.avx && !new_xcr0.sse) {
130 | inject_hw_exception(general_protection, 0);
131 | return;
132 | }
133 |
134 | // #GP(0) if XCR0.AVX is clear and XCR0.opmask, XCR0.ZMM_Hi256, or XCR0.Hi16_ZMM is set
135 | if (!new_xcr0.avx && (new_xcr0.opmask || new_xcr0.zmm_hi256 || new_xcr0.zmm_hi16)) {
136 | inject_hw_exception(general_protection, 0);
137 | return;
138 | }
139 |
140 | // #GP(0) if setting XCR0.BNDREG or XCR0.BNDCSR while not setting the other
141 | if (new_xcr0.bndreg != new_xcr0.bndcsr) {
142 | inject_hw_exception(general_protection, 0);
143 | return;
144 | }
145 |
146 | // #GP(0) if setting XCR0.opmask, XCR0.ZMM_Hi256, or XCR0.Hi16_ZMM while not setting all of them
147 | if (new_xcr0.opmask != new_xcr0.zmm_hi256 || new_xcr0.zmm_hi256 != new_xcr0.zmm_hi16) {
148 | inject_hw_exception(general_protection, 0);
149 | return;
150 | }
151 |
152 | host_exception_info e;
153 | xsetbv_safe(e, cpu->ctx->ecx, new_xcr0.flags);
154 |
155 | if (e.exception_occurred) {
156 | // TODO: assert that it was a #GP(0) that occurred, although I really
157 | // doubt that any other exception could happen (according to manual).
158 | inject_hw_exception(general_protection, 0);
159 | return;
160 | }
161 |
162 | HV_LOG_VERBOSE("Wrote %p to XCR0.", new_xcr0.flags);
163 |
164 | cpu->hide_vm_exit_overhead = true;
165 | skip_instruction();
166 | }
167 |
168 | void emulate_vmxon(vcpu*) {
169 | // usually a #UD doesn't trigger a vm-exit, but in this case it is possible
170 | // that CR4.VMXE is 1 while guest shadow CR4.VMXE is 0.
171 | if (!read_effective_guest_cr4().vmx_enable) {
172 | inject_hw_exception(invalid_opcode);
173 | return;
174 | }
175 |
176 | // we are spoofing the value of the IA32_FEATURE_CONTROL MSR in
177 | // order to convince the guest that VMX has been disabled by BIOS.
178 | inject_hw_exception(general_protection, 0);
179 | }
180 |
181 | void emulate_vmcall(vcpu* const cpu) {
182 | auto const code = cpu->ctx->rax & 0xFF;
183 | auto const key = cpu->ctx->rax >> 8;
184 |
185 | // validate the hypercall key
186 | if (key != hypercall_key) {
187 | inject_hw_exception(invalid_opcode);
188 | return;
189 | }
190 |
191 | // handle the hypercall
192 | switch (code) {
193 | case hypercall_ping: hc::ping(cpu); return;
194 | case hypercall_test: hc::test(cpu); return;
195 | case hypercall_unload: hc::unload(cpu); return;
196 | case hypercall_read_phys_mem: hc::read_phys_mem(cpu); return;
197 | case hypercall_write_phys_mem: hc::write_phys_mem(cpu); return;
198 | case hypercall_read_virt_mem: hc::read_virt_mem(cpu); return;
199 | case hypercall_write_virt_mem: hc::write_virt_mem(cpu); return;
200 | case hypercall_query_process_cr3: hc::query_process_cr3(cpu); return;
201 | case hypercall_install_ept_hook: hc::install_ept_hook(cpu); return;
202 | case hypercall_remove_ept_hook: hc::remove_ept_hook(cpu); return;
203 | case hypercall_flush_logs: hc::flush_logs(cpu); return;
204 | case hypercall_get_physical_address: hc::get_physical_address(cpu); return;
205 | }
206 |
207 | HV_LOG_VERBOSE("Unhandled VMCALL. RIP=%p.", vmx_vmread(VMCS_GUEST_RIP));
208 |
209 | inject_hw_exception(invalid_opcode);
210 | }
211 |
212 | void handle_vmx_preemption(vcpu*) {
213 | // do nothing.
214 | }
215 |
216 | void emulate_mov_to_cr0(vcpu* const cpu, uint64_t const gpr) {
217 | // 2.4.3
218 | // 3.2.5
219 | // 3.4.10.1
220 | // 3.26.3.2.1
221 |
222 | cr0 new_cr0;
223 | new_cr0.flags = read_guest_gpr(cpu->ctx, gpr);
224 |
225 | auto const curr_cr0 = read_effective_guest_cr0();
226 | auto const curr_cr4 = read_effective_guest_cr4();
227 |
228 | // CR0[15:6] is always 0
229 | new_cr0.reserved1 = 0;
230 |
231 | // CR0[17] is always 0
232 | new_cr0.reserved2 = 0;
233 |
234 | // CR0[28:19] is always 0
235 | new_cr0.reserved3 = 0;
236 |
237 | // CR0.ET is always 1
238 | new_cr0.extension_type = 1;
239 |
240 | // #GP(0) if setting any reserved bits in CR0[63:32]
241 | if (new_cr0.reserved4) {
242 | inject_hw_exception(general_protection, 0);
243 | return;
244 | }
245 |
246 | // #GP(0) if setting CR0.PG while CR0.PE is clear
247 | if (new_cr0.paging_enable && !new_cr0.protection_enable) {
248 | inject_hw_exception(general_protection, 0);
249 | return;
250 | }
251 |
252 | // #GP(0) if invalid bit combination
253 | if (!new_cr0.cache_disable && new_cr0.not_write_through) {
254 | inject_hw_exception(general_protection, 0);
255 | return;
256 | }
257 |
258 | // #GP(0) if an attempt is made to clear CR0.PG
259 | if (!new_cr0.paging_enable) {
260 | inject_hw_exception(general_protection, 0);
261 | return;
262 | }
263 |
264 | // #GP(0) if an attempt is made to clear CR0.WP while CR4.CET is set
265 | if (!new_cr0.write_protect && curr_cr4.control_flow_enforcement_enable) {
266 | inject_hw_exception(general_protection, 0);
267 | return;
268 | }
269 |
270 | // the guest tried to modify CR0.CD or CR0.NW, which must be updated manually
271 | if (new_cr0.cache_disable != curr_cr0.cache_disable ||
272 | new_cr0.not_write_through != curr_cr0.not_write_through) {
273 | // TODO: should we care about NW?
274 | if (new_cr0.cache_disable)
275 | set_ept_memory_type(cpu->ept, MEMORY_TYPE_UNCACHEABLE);
276 | else
277 | update_ept_memory_type(cpu->ept);
278 |
279 | vmx_invept(invept_all_context, {});
280 | }
281 |
282 | HV_LOG_VERBOSE("Writing %p to CR0.", new_cr0.flags);
283 |
284 | vmx_vmwrite(VMCS_CTRL_CR0_READ_SHADOW, new_cr0.flags);
285 |
286 | // make sure to account for VMX reserved bits when setting the real CR0
287 | new_cr0.flags |= cpu->cached.vmx_cr0_fixed0;
288 | new_cr0.flags &= cpu->cached.vmx_cr0_fixed1;
289 |
290 | vmx_vmwrite(VMCS_GUEST_CR0, new_cr0.flags);
291 |
292 | cpu->hide_vm_exit_overhead = true;
293 | skip_instruction();
294 | }
295 |
296 | void emulate_mov_to_cr3(vcpu* const cpu, uint64_t const gpr) {
297 | cr3 new_cr3;
298 | new_cr3.flags = read_guest_gpr(cpu->ctx, gpr);
299 |
300 | auto const curr_cr4 = read_effective_guest_cr4();
301 |
302 | bool invalidate_tlb = true;
303 |
304 | // 3.4.10.4.1
305 | if (curr_cr4.pcid_enable && (new_cr3.flags & (1ull << 63))) {
306 | invalidate_tlb = false;
307 | new_cr3.flags &= ~(1ull << 63);
308 | }
309 |
310 | // a mask where bits [63:MAXPHYSADDR] are set to 1
311 | auto const reserved_mask = ~((1ull << cpu->cached.max_phys_addr) - 1);
312 |
313 | // 3.2.5
314 | if (new_cr3.flags & reserved_mask) {
315 | inject_hw_exception(general_protection, 0);
316 | return;
317 | }
318 |
319 | // 3.28.4.3.3
320 | if (invalidate_tlb) {
321 | invvpid_descriptor desc;
322 | desc.linear_address = 0;
323 | desc.reserved1 = 0;
324 | desc.reserved2 = 0;
325 | desc.vpid = guest_vpid;
326 | vmx_invvpid(invvpid_single_context_retaining_globals, desc);
327 | }
328 |
329 | // it is now safe to write the new guest cr3
330 | vmx_vmwrite(VMCS_GUEST_CR3, new_cr3.flags);
331 |
332 | cpu->hide_vm_exit_overhead = true;
333 | skip_instruction();
334 | }
335 |
336 | void emulate_mov_to_cr4(vcpu* const cpu, uint64_t const gpr) {
337 | // 2.4.3
338 | // 2.6.2.1
339 | // 3.2.5
340 | // 3.4.10.1
341 | // 3.4.10.4.1
342 |
343 | cr4 new_cr4;
344 | new_cr4.flags = read_guest_gpr(cpu->ctx, gpr);
345 |
346 | cr3 curr_cr3;
347 | curr_cr3.flags = vmx_vmread(VMCS_GUEST_CR3);
348 |
349 | auto const curr_cr0 = read_effective_guest_cr0();
350 | auto const curr_cr4 = read_effective_guest_cr4();
351 |
352 | // #GP(0) if an attempt is made to set CR4.SMXE when SMX is not supported
353 | if (!cpu->cached.cpuid_01.cpuid_feature_information_ecx.safer_mode_extensions
354 | && new_cr4.smx_enable) {
355 | inject_hw_exception(general_protection, 0);
356 | return;
357 | }
358 |
359 | // #GP(0) if an attempt is made to write a 1 to any reserved bits
360 | if (new_cr4.reserved1 || new_cr4.reserved2) {
361 | inject_hw_exception(general_protection, 0);
362 | return;
363 | }
364 |
365 | // #GP(0) if an attempt is made to change CR4.PCIDE from 0 to 1 while CR3[11:0] != 000H
366 | if ((new_cr4.pcid_enable && !curr_cr4.pcid_enable) && (curr_cr3.flags & 0xFFF)) {
367 | inject_hw_exception(general_protection, 0);
368 | return;
369 | }
370 |
371 | // #GP(0) if CR4.PAE is cleared
372 | if (!new_cr4.physical_address_extension) {
373 | inject_hw_exception(general_protection, 0);
374 | return;
375 | }
376 |
377 | // #GP(0) if CR4.LA57 is enabled
378 | if (new_cr4.linear_addresses_57_bit) {
379 | inject_hw_exception(general_protection, 0);
380 | return;
381 | }
382 |
383 | // #GP(0) if CR4.CET == 1 and CR0.WP == 0
384 | if (new_cr4.control_flow_enforcement_enable && !curr_cr0.write_protect) {
385 | inject_hw_exception(general_protection, 0);
386 | return;
387 | }
388 |
389 | // invalidate TLB entries if required
390 | if (new_cr4.page_global_enable != curr_cr4.page_global_enable ||
391 | !new_cr4.pcid_enable && curr_cr4.pcid_enable ||
392 | new_cr4.smep_enable && !curr_cr4.smep_enable) {
393 | invvpid_descriptor desc;
394 | desc.linear_address = 0;
395 | desc.reserved1 = 0;
396 | desc.reserved2 = 0;
397 | desc.vpid = guest_vpid;
398 | vmx_invvpid(invvpid_single_context, desc);
399 | }
400 |
401 | HV_LOG_VERBOSE("Writing %p to CR4.", new_cr4.flags);
402 |
403 | vmx_vmwrite(VMCS_CTRL_CR4_READ_SHADOW, new_cr4.flags);
404 |
405 | // make sure to account for VMX reserved bits when setting the real CR4
406 | new_cr4.flags |= cpu->cached.vmx_cr4_fixed0;
407 | new_cr4.flags &= cpu->cached.vmx_cr4_fixed1;
408 |
409 | vmx_vmwrite(VMCS_GUEST_CR4, new_cr4.flags);
410 |
411 | cpu->hide_vm_exit_overhead = true;
412 | skip_instruction();
413 | }
414 |
415 | void emulate_mov_from_cr3(vcpu* const cpu, uint64_t const gpr) {
416 | write_guest_gpr(cpu->ctx, gpr, vmx_vmread(VMCS_GUEST_CR3));
417 |
418 | cpu->hide_vm_exit_overhead = true;
419 | skip_instruction();
420 | }
421 |
422 | void emulate_clts(vcpu* const cpu) {
423 | // clear CR0.TS in the read shadow
424 | vmx_vmwrite(VMCS_CTRL_CR0_READ_SHADOW,
425 | vmx_vmread(VMCS_CTRL_CR0_READ_SHADOW) & ~CR0_TASK_SWITCHED_FLAG);
426 |
427 | // clear CR0.TS in the real CR0 register
428 | vmx_vmwrite(VMCS_GUEST_CR0,
429 | vmx_vmread(VMCS_GUEST_CR0) & ~CR0_TASK_SWITCHED_FLAG);
430 |
431 | cpu->hide_vm_exit_overhead = true;
432 | skip_instruction();
433 | }
434 |
435 | void emulate_lmsw(vcpu* const cpu, uint16_t const value) {
436 | // 3.25.1.3
437 |
438 | cr0 new_cr0;
439 | new_cr0.flags = value;
440 |
441 | // update the guest CR0 read shadow
442 | cr0 shadow_cr0;
443 | shadow_cr0.flags = vmx_vmread(VMCS_CTRL_CR0_READ_SHADOW);
444 | shadow_cr0.protection_enable = new_cr0.protection_enable;
445 | shadow_cr0.monitor_coprocessor = new_cr0.monitor_coprocessor;
446 | shadow_cr0.emulate_fpu = new_cr0.emulate_fpu;
447 | shadow_cr0.task_switched = new_cr0.task_switched;
448 | vmx_vmwrite(VMCS_CTRL_CR0_READ_SHADOW, shadow_cr0.flags);
449 |
450 | // update the real guest CR0.
451 | // we don't have to worry about VMX reserved bits since CR0.PE (the only
452 | // reserved bit) can't be cleared to 0 by the LMSW instruction while in
453 | // protected mode.
454 | cr0 real_cr0;
455 | real_cr0.flags = vmx_vmread(VMCS_GUEST_CR0);
456 | real_cr0.protection_enable = new_cr0.protection_enable;
457 | real_cr0.monitor_coprocessor = new_cr0.monitor_coprocessor;
458 | real_cr0.emulate_fpu = new_cr0.emulate_fpu;
459 | real_cr0.task_switched = new_cr0.task_switched;
460 | vmx_vmwrite(VMCS_GUEST_CR0, real_cr0.flags);
461 |
462 | cpu->hide_vm_exit_overhead = true;
463 | skip_instruction();
464 | }
465 |
466 | void handle_mov_cr(vcpu* const cpu) {
467 | vmx_exit_qualification_mov_cr qualification;
468 | qualification.flags = vmx_vmread(VMCS_EXIT_QUALIFICATION);
469 |
470 | switch (qualification.access_type) {
471 | // MOV CRn, XXX
472 | case VMX_EXIT_QUALIFICATION_ACCESS_MOV_TO_CR:
473 | switch (qualification.control_register) {
474 | case VMX_EXIT_QUALIFICATION_REGISTER_CR0:
475 | emulate_mov_to_cr0(cpu, qualification.general_purpose_register);
476 | break;
477 | case VMX_EXIT_QUALIFICATION_REGISTER_CR3:
478 | emulate_mov_to_cr3(cpu, qualification.general_purpose_register);
479 | break;
480 | case VMX_EXIT_QUALIFICATION_REGISTER_CR4:
481 | emulate_mov_to_cr4(cpu, qualification.general_purpose_register);
482 | break;
483 | }
484 | break;
485 | // MOV XXX, CRn
486 | case VMX_EXIT_QUALIFICATION_ACCESS_MOV_FROM_CR:
487 | // TODO: assert that we're accessing CR3 (and not CR8)
488 | emulate_mov_from_cr3(cpu, qualification.general_purpose_register);
489 | break;
490 | // CLTS
491 | case VMX_EXIT_QUALIFICATION_ACCESS_CLTS:
492 | emulate_clts(cpu);
493 | break;
494 | // LMSW XXX
495 | case VMX_EXIT_QUALIFICATION_ACCESS_LMSW:
496 | emulate_lmsw(cpu, qualification.lmsw_source_data);
497 | break;
498 | }
499 | }
500 |
501 | void handle_nmi_window(vcpu* const cpu) {
502 | --cpu->queued_nmis;
503 |
504 | // inject the NMI into the guest
505 | inject_nmi();
506 |
507 | HV_LOG_VERBOSE("Injecting NMI into guest.");
508 |
509 | if (cpu->queued_nmis == 0) {
510 | // disable NMI-window exiting since we have no more NMIs to inject
511 | auto ctrl = read_ctrl_proc_based();
512 | ctrl.nmi_window_exiting = 0;
513 | write_ctrl_proc_based(ctrl);
514 | }
515 |
516 | // there is the possibility that a host NMI occurred right before we
517 | // disabled NMI-window exiting. make sure to re-enable it if this is the case.
518 | if (cpu->queued_nmis > 0) {
519 | auto ctrl = read_ctrl_proc_based();
520 | ctrl.nmi_window_exiting = 1;
521 | write_ctrl_proc_based(ctrl);
522 | }
523 | }
524 |
525 | void handle_exception_or_nmi(vcpu* const cpu) {
526 | // enqueue an NMI to be injected into the guest later on
527 | ++cpu->queued_nmis;
528 |
529 | auto ctrl = read_ctrl_proc_based();
530 | ctrl.nmi_window_exiting = 1;
531 | write_ctrl_proc_based(ctrl);
532 | }
533 |
534 | void handle_vmx_instruction(vcpu*) {
535 | // inject #UD for every VMX instruction since we
536 | // don't allow the guest to ever enter VMX operation.
537 | inject_hw_exception(invalid_opcode);
538 | }
539 |
540 | void handle_ept_violation(vcpu* const cpu) {
541 | vmx_exit_qualification_ept_violation qualification;
542 | qualification.flags = vmx_vmread(VMCS_EXIT_QUALIFICATION);
543 |
544 | // guest physical address that caused the ept-violation
545 | auto const physical_address = vmx_vmread(qualification.caused_by_translation ?
546 | VMCS_GUEST_PHYSICAL_ADDRESS : VMCS_EXIT_GUEST_LINEAR_ADDRESS);
547 |
548 | if (qualification.execute_access &&
549 | (qualification.write_access || qualification.read_access)) {
550 | HV_LOG_ERROR("Invalid EPT access combination. PhysAddr = %p.", physical_address);
551 | inject_hw_exception(machine_check);
552 | return;
553 | }
554 |
555 | auto const hook = find_ept_hook(cpu->ept, physical_address >> 12);
556 |
557 | if (!hook) {
558 | HV_LOG_ERROR("Failed to find EPT hook. PhysAddr = %p.", physical_address);
559 | inject_hw_exception(machine_check);
560 | return;
561 | }
562 |
563 | auto const pte = get_ept_pte(cpu->ept, physical_address);
564 |
565 | if (qualification.execute_access) {
566 | pte->read_access = 0;
567 | pte->write_access = 0;
568 | pte->execute_access = 1;
569 | pte->page_frame_number = hook->exec_pfn;
570 | } else {
571 | pte->read_access = 1;
572 | pte->write_access = 1;
573 | pte->execute_access = 0;
574 | pte->page_frame_number = hook->orig_pfn;
575 | }
576 | }
577 |
578 | void emulate_rdtsc(vcpu* const cpu) {
579 | auto const tsc = __rdtsc();
580 |
581 | // return current TSC
582 | cpu->ctx->rax = tsc & 0xFFFFFFFF;
583 | cpu->ctx->rdx = (tsc >> 32) & 0xFFFFFFFF;
584 |
585 | skip_instruction();
586 | }
587 |
588 | void emulate_rdtscp(vcpu* const cpu) {
589 | unsigned int aux = 0;
590 | auto const tsc = __rdtscp(&aux);
591 |
592 | // return current TSC
593 | cpu->ctx->rax = tsc & 0xFFFFFFFF;
594 | cpu->ctx->rdx = (tsc >> 32) & 0xFFFFFFFF;
595 | cpu->ctx->rcx = aux;
596 |
597 | skip_instruction();
598 | }
599 |
600 | } // namespace hv
601 |
602 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/exit-handlers.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | struct vcpu;
8 |
9 | void emulate_cpuid(vcpu* cpu);
10 |
11 | void emulate_rdmsr(vcpu* cpu);
12 |
13 | void emulate_wrmsr(vcpu* cpu);
14 |
15 | void emulate_getsec(vcpu* cpu);
16 |
17 | void emulate_invd(vcpu* cpu);
18 |
19 | void emulate_xsetbv(vcpu* cpu);
20 |
21 | void emulate_vmxon(vcpu* cpu);
22 |
23 | void emulate_vmcall(vcpu* cpu);
24 |
25 | void handle_vmx_preemption(vcpu* cpu);
26 |
27 | void emulate_mov_to_cr0(vcpu* cpu, uint64_t gpr);
28 |
29 | void emulate_mov_to_cr3(vcpu* cpu, uint64_t gpr);
30 |
31 | void emulate_mov_to_cr4(vcpu* cpu, uint64_t gpr);
32 |
33 | void emulate_mov_from_cr3(vcpu* cpu, uint64_t gpr);
34 |
35 | void emulate_clts(vcpu* cpu);
36 |
37 | void emulate_lmsw(vcpu* cpu, uint16_t value);
38 |
39 | void handle_mov_cr(vcpu* cpu);
40 |
41 | void handle_nmi_window(vcpu* cpu);
42 |
43 | void handle_exception_or_nmi(vcpu* cpu);
44 |
45 | void handle_vmx_instruction(vcpu* cpu);
46 |
47 | void handle_ept_violation(vcpu* cpu);
48 |
49 | void emulate_rdtsc(vcpu* cpu);
50 |
51 | void emulate_rdtscp(vcpu* cpu);
52 |
53 | } // namespace hv
54 |
55 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/gdt.cpp:
--------------------------------------------------------------------------------
1 | #include "gdt.h"
2 | #include "vcpu.h"
3 | #include "mm.h"
4 |
5 | namespace hv {
6 |
7 | // initialize the host GDT and populate every descriptor
8 | void prepare_host_gdt(
9 | segment_descriptor_32* const gdt,
10 | task_state_segment_64 const* const tss) {
11 | memset(gdt, 0, host_gdt_descriptor_count * sizeof(gdt[0]));
12 |
13 | // setup the CS segment descriptor
14 | auto& cs_desc = gdt[host_cs_selector.index];
15 | cs_desc.type = SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_READ;
16 | cs_desc.descriptor_type = SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA;
17 | cs_desc.descriptor_privilege_level = 0;
18 | cs_desc.present = 1;
19 | cs_desc.long_mode = 1;
20 | cs_desc.default_big = 0;
21 | cs_desc.granularity = 0;
22 |
23 | // setup the TSS segment descriptor
24 | auto& tss_desc = *reinterpret_cast(
25 | &gdt[host_tr_selector.index]);
26 | tss_desc.type = SEGMENT_DESCRIPTOR_TYPE_TSS_BUSY;
27 | tss_desc.descriptor_type = SEGMENT_DESCRIPTOR_TYPE_SYSTEM;
28 | tss_desc.descriptor_privilege_level = 0;
29 | tss_desc.present = 1;
30 | tss_desc.granularity = 0;
31 | tss_desc.segment_limit_low = 0x67;
32 | tss_desc.segment_limit_high = 0;
33 |
34 | // point the TSS descriptor to our TSS -_-
35 | auto const base = reinterpret_cast(tss);
36 | tss_desc.base_address_low = (base >> 00) & 0xFFFF;
37 | tss_desc.base_address_middle = (base >> 16) & 0xFF;
38 | tss_desc.base_address_high = (base >> 24) & 0xFF;
39 | tss_desc.base_address_upper = (base >> 32) & 0xFFFFFFFF;
40 | }
41 |
42 | } // namespace hv
43 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/gdt.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | // selectors for the host GDT
8 | inline constexpr segment_selector host_cs_selector = { 0, 0, 1 };
9 | inline constexpr segment_selector host_tr_selector = { 0, 0, 2 };
10 |
11 | // number of available descriptor slots in the host GDT
12 | inline constexpr size_t host_gdt_descriptor_count = 4;
13 |
14 | // initialize the host GDT and populate every descriptor
15 | void prepare_host_gdt(segment_descriptor_32* gdt, task_state_segment_64 const* tss);
16 |
17 | } // namespace hv
18 |
19 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/guest-context.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 | #include
5 |
6 | namespace hv {
7 |
8 | // contains state that isn't stored in guest vmcs fields
9 | struct alignas(16) guest_context {
10 | union {
11 | uint64_t gpr[16];
12 |
13 | // aliases for general-purpose registers
14 | struct {
15 | union {
16 | uint64_t rax;
17 | uint32_t eax;
18 | uint16_t ax;
19 | uint8_t al;
20 | };
21 | union {
22 | uint64_t rcx;
23 | uint32_t ecx;
24 | uint16_t cx;
25 | uint8_t cl;
26 | };
27 | union {
28 | uint64_t rdx;
29 | uint32_t edx;
30 | uint16_t dx;
31 | uint8_t dl;
32 | };
33 | union {
34 | uint64_t rbx;
35 | uint32_t ebx;
36 | uint16_t bx;
37 | uint8_t bl;
38 | };
39 |
40 | // this is where RSP would be if it wasn't saved in the vmcs
41 | uint64_t _padding;
42 |
43 | union {
44 | uint64_t rbp;
45 | uint32_t ebp;
46 | uint16_t bp;
47 | uint8_t bpl;
48 | };
49 | union {
50 | uint64_t rsi;
51 | uint32_t esi;
52 | uint16_t si;
53 | uint8_t sil;
54 | };
55 | union {
56 | uint64_t rdi;
57 | uint32_t edi;
58 | uint16_t di;
59 | uint8_t dil;
60 | };
61 | union {
62 | uint64_t r8;
63 | uint32_t r8d;
64 | uint16_t r8w;
65 | uint8_t r8b;
66 | };
67 | union {
68 | uint64_t r9;
69 | uint32_t r9d;
70 | uint16_t r9w;
71 | uint8_t r9b;
72 | };
73 | union {
74 | uint64_t r10;
75 | uint32_t r10d;
76 | uint16_t r10w;
77 | uint8_t r10b;
78 | };
79 | union {
80 | uint64_t r11;
81 | uint32_t r11d;
82 | uint16_t r11w;
83 | uint8_t r11b;
84 | };
85 | union {
86 | uint64_t r12;
87 | uint32_t r12d;
88 | uint16_t r12w;
89 | uint8_t r12b;
90 | };
91 | union {
92 | uint64_t r13;
93 | uint32_t r13d;
94 | uint16_t r13w;
95 | uint8_t r13b;
96 | };
97 | union {
98 | uint64_t r14;
99 | uint32_t r14d;
100 | uint16_t r14w;
101 | uint8_t r14b;
102 | };
103 | union {
104 | uint64_t r15;
105 | uint32_t r15d;
106 | uint16_t r15w;
107 | uint8_t r15b;
108 | };
109 | };
110 | };
111 |
112 | // control registers
113 | uint64_t cr2;
114 | uint64_t cr8;
115 |
116 | // debug registers
117 | uint64_t dr0;
118 | uint64_t dr1;
119 | uint64_t dr2;
120 | uint64_t dr3;
121 | uint64_t dr6;
122 |
123 | // SSE registers
124 | M128A xmm0;
125 | M128A xmm1;
126 | M128A xmm2;
127 | M128A xmm3;
128 | M128A xmm4;
129 | M128A xmm5;
130 | M128A xmm6;
131 | M128A xmm7;
132 | M128A xmm8;
133 | M128A xmm9;
134 | M128A xmm10;
135 | M128A xmm11;
136 | M128A xmm12;
137 | M128A xmm13;
138 | M128A xmm14;
139 | M128A xmm15;
140 | };
141 |
142 | // remember to update this value in vm-exit.asm
143 | static_assert(sizeof(guest_context) == 0x1C0);
144 |
145 | } // namespace hv
146 |
147 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/hv.cpp:
--------------------------------------------------------------------------------
1 | #include "hv.h"
2 | #include "vcpu.h"
3 | #include "mm.h"
4 | #include "arch.h"
5 |
6 | namespace hv {
7 |
8 | hypervisor ghv;
9 |
10 | // function prototype doesn't really matter
11 | // since we never call this function anyways
12 | extern "C" NTKERNELAPI void PsGetCurrentThreadProcess();
13 |
14 | // dynamically find the offsets for various kernel structures
15 | static bool find_offsets() {
16 | // TODO: maybe dont hardcode this...
17 | ghv.kprocess_directory_table_base_offset = 0x28;
18 | ghv.kpcr_pcrb_offset = 0x180;
19 | ghv.kprcb_current_thread_offset = 0x8;
20 | ghv.kapc_state_process_offset = 0x20;
21 |
22 | ghv.system_eprocess = reinterpret_cast(PsInitialSystemProcess);
23 |
24 | DbgPrint("[hv] System EPROCESS = 0x%llX.\n",
25 | reinterpret_cast(ghv.system_eprocess));
26 |
27 | auto const ps_get_process_id = reinterpret_cast(PsGetProcessId);
28 |
29 | // mov rax, [rcx + OFFSET]
30 | // retn
31 | if (ps_get_process_id[0] != 0x48 ||
32 | ps_get_process_id[1] != 0x8B ||
33 | ps_get_process_id[2] != 0x81 ||
34 | ps_get_process_id[7] != 0xC3) {
35 | DbgPrint("[hv] Failed to get EPROCESS::UniqueProcessId offset.\n");
36 | return false;
37 | }
38 |
39 | ghv.eprocess_unique_process_id_offset =
40 | *reinterpret_cast(ps_get_process_id + 3);
41 |
42 | DbgPrint("[hv] EPROCESS::UniqueProcessId offset = 0x%llX.\n",
43 | ghv.eprocess_unique_process_id_offset);
44 |
45 | auto const ps_get_current_thread_process =
46 | reinterpret_cast(PsGetCurrentThreadProcess);
47 |
48 | // mov rax, gs:188h
49 | // mov rax, [rax + OFFSET]
50 | // retn
51 | if (ps_get_current_thread_process[0] != 0x65 ||
52 | ps_get_current_thread_process[1] != 0x48 ||
53 | ps_get_current_thread_process[2] != 0x8B ||
54 | ps_get_current_thread_process[3] != 0x04 ||
55 | ps_get_current_thread_process[4] != 0x25 ||
56 | ps_get_current_thread_process[9] != 0x48 ||
57 | ps_get_current_thread_process[10] != 0x8B ||
58 | ps_get_current_thread_process[11] != 0x80) {
59 | DbgPrint("[hv] Failed to get KAPC_STATE::Process offset.\n");
60 | return false;
61 | }
62 |
63 | ghv.kapc_state_process_offset =
64 | *reinterpret_cast(ps_get_current_thread_process + 12);
65 |
66 | // store the System cr3 value (found in the System EPROCESS structure)
67 | ghv.system_cr3 = *reinterpret_cast(ghv.system_eprocess +
68 | ghv.kprocess_directory_table_base_offset);
69 |
70 | DbgPrint("[hv] System CR3 = 0x%llX.\n", ghv.system_cr3.flags);
71 |
72 | return true;
73 | }
74 |
75 | // allocate the hypervisor and vcpus
76 | static bool create() {
77 | memset(&ghv, 0, sizeof(ghv));
78 |
79 | logger_init();
80 |
81 | ghv.vcpu_count = KeQueryActiveProcessorCount(nullptr);
82 |
83 | // size of the vcpu array
84 | auto const arr_size = sizeof(vcpu) * ghv.vcpu_count;
85 |
86 | // allocate an array of vcpus
87 | ghv.vcpus = static_cast(ExAllocatePoolWithTag(
88 | NonPagedPoolNx, arr_size, 'fr0g'));
89 |
90 | if (!ghv.vcpus) {
91 | DbgPrint("[hv] Failed to allocate VCPUs.\n");
92 | return false;
93 | }
94 |
95 | // zero-initialize the vcpu array
96 | memset(ghv.vcpus, 0, arr_size);
97 |
98 | DbgPrint("[hv] Allocated %u VCPUs (0x%llX bytes).\n", ghv.vcpu_count, arr_size);
99 |
100 | if (!find_offsets()) {
101 | DbgPrint("[hv] Failed to find offsets.\n");
102 | return false;
103 | }
104 |
105 | prepare_host_page_tables();
106 |
107 | DbgPrint("[hv] Mapped all of physical memory to address 0x%zX.\n",
108 | reinterpret_cast(host_physical_memory_base));
109 |
110 | return true;
111 | }
112 |
113 | // virtualize the current system
114 | bool start() {
115 | if (!create())
116 | return false;
117 |
118 | // we need to be running at an IRQL below DISPATCH_LEVEL so
119 | // that KeSetSystemAffinityThreadEx takes effect immediately
120 | NT_ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
121 |
122 | // virtualize every cpu
123 | for (unsigned long i = 0; i < ghv.vcpu_count; ++i) {
124 | // restrict execution to the specified cpu
125 | auto const orig_affinity = KeSetSystemAffinityThreadEx(1ull << i);
126 |
127 | if (!virtualize_cpu(&ghv.vcpus[i])) {
128 | // TODO: handle this bruh -_-
129 | KeRevertToUserAffinityThreadEx(orig_affinity);
130 | return false;
131 | }
132 |
133 | KeRevertToUserAffinityThreadEx(orig_affinity);
134 | }
135 |
136 | return true;
137 | }
138 |
139 | // devirtualize the current system
140 | void stop() {
141 | // we need to be running at an IRQL below DISPATCH_LEVEL so
142 | // that KeSetSystemAffinityThreadEx takes effect immediately
143 | NT_ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
144 |
145 | // virtualize every cpu
146 | for (unsigned long i = 0; i < ghv.vcpu_count; ++i) {
147 | // restrict execution to the specified cpu
148 | auto const orig_affinity = KeSetSystemAffinityThreadEx(1ull << i);
149 |
150 | // its possible that someone tried to call stop() when the hypervisor
151 | // wasn't even running, so we're wrapping this in a nice try-except
152 | // block. nice job.
153 | __try {
154 | hv::hypercall_input input;
155 | input.code = hv::hypercall_unload;
156 | input.key = hv::hypercall_key;
157 | vmx_vmcall(input);
158 | }
159 | __except (1) {}
160 |
161 | KeRevertToUserAffinityThreadEx(orig_affinity);
162 | }
163 |
164 | ExFreePoolWithTag(ghv.vcpus, 'fr0g');
165 | }
166 |
167 | } // namespace hv
168 |
169 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/hv.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "page-tables.h"
4 | #include "hypercalls.h"
5 | #include "logger.h"
6 | #include "vmx.h"
7 |
8 | #include
9 |
10 | namespace hv {
11 |
12 | // signature that is returned by the ping hypercall
13 | inline constexpr uint64_t hypervisor_signature = 'fr0g';
14 |
15 | struct hypervisor {
16 | // host page tables that are shared between vcpus
17 | host_page_tables host_page_tables;
18 |
19 | // logger that can be used in root-mode
20 | logger logger;
21 |
22 | // dynamically allocated array of vcpus
23 | unsigned long vcpu_count;
24 | struct vcpu* vcpus;
25 |
26 | // pointer to the System process
27 | uint8_t* system_eprocess;
28 |
29 | // kernel CR3 value of the System process
30 | cr3 system_cr3;
31 |
32 | // windows specific offsets D:
33 | uint64_t kprocess_directory_table_base_offset;
34 | uint64_t eprocess_unique_process_id_offset;
35 | uint64_t kpcr_pcrb_offset;
36 | uint64_t kprcb_current_thread_offset;
37 | uint64_t kthread_apc_state_offset;
38 | uint64_t kapc_state_process_offset;
39 | };
40 |
41 | // global instance of the hypervisor
42 | extern hypervisor ghv;
43 |
44 | // virtualize the current system
45 | bool start();
46 |
47 | // devirtualize the current system
48 | void stop();
49 |
50 | } // namespace hv
51 |
52 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/hypercalls.cpp:
--------------------------------------------------------------------------------
1 | #include "hypercalls.h"
2 | #include "vcpu.h"
3 | #include "vmx.h"
4 | #include "mm.h"
5 | #include "hv.h"
6 | #include "exception-routines.h"
7 | #include "introspection.h"
8 |
9 | namespace hv::hc {
10 |
11 | // ping the hypervisor to make sure it is running
12 | void ping(vcpu* const cpu) {
13 | cpu->ctx->rax = hypervisor_signature;
14 |
15 | skip_instruction();
16 | }
17 |
18 | // a hypercall for quick testing
19 | void test(vcpu* const) {
20 | HV_LOG_INFO("KPCR: %p.", current_guest_kpcr());
21 | HV_LOG_INFO("EPROCESS: %p.", current_guest_eprocess());
22 | HV_LOG_INFO("ETHREAD: %p.", current_guest_ethread());
23 | HV_LOG_INFO("PID: %p.", current_guest_pid());
24 | HV_LOG_INFO("CPL: %u.", current_guest_cpl());
25 | skip_instruction();
26 | }
27 |
28 | // devirtualize the current VCPU
29 | void unload(vcpu* const cpu) {
30 | cpu->stop_virtualization = true;
31 | skip_instruction();
32 | }
33 |
34 | // read from arbitrary physical memory
35 | void read_phys_mem(vcpu* const cpu) {
36 | auto const ctx = cpu->ctx;
37 |
38 | // arguments
39 | auto const dst = reinterpret_cast(ctx->rcx);
40 | auto const src = host_physical_memory_base + ctx->rdx;
41 | auto const size = ctx->r8;
42 |
43 | size_t bytes_read = 0;
44 |
45 | while (bytes_read < size) {
46 | size_t dst_remaining = 0;
47 |
48 | // translate the guest buffer into hypervisor space
49 | auto const curr_dst = gva2hva(dst + bytes_read, &dst_remaining);
50 |
51 | if (!curr_dst) {
52 | // guest virtual address that caused the fault
53 | ctx->cr2 = reinterpret_cast(dst + bytes_read);
54 |
55 | page_fault_exception error;
56 | error.flags = 0;
57 | error.present = 0;
58 | error.write = 1;
59 | error.user_mode_access = (current_guest_cpl() == 3);
60 |
61 | inject_hw_exception(page_fault, error.flags);
62 | return;
63 | }
64 |
65 | auto const curr_size = min(dst_remaining, size - bytes_read);
66 |
67 | host_exception_info e;
68 | memcpy_safe(e, curr_dst, src + bytes_read, curr_size);
69 |
70 | if (e.exception_occurred) {
71 | inject_hw_exception(general_protection, 0);
72 | return;
73 | }
74 |
75 | bytes_read += curr_size;
76 | }
77 |
78 | ctx->rax = bytes_read;
79 | skip_instruction();
80 | }
81 |
82 | // write to arbitrary physical memory
83 | void write_phys_mem(vcpu* const cpu) {
84 | auto const ctx = cpu->ctx;
85 |
86 | // arguments
87 | auto const dst = host_physical_memory_base + ctx->rcx;
88 | auto const src = reinterpret_cast(ctx->rdx);
89 | auto const size = ctx->r8;
90 |
91 | size_t bytes_read = 0;
92 |
93 | while (bytes_read < size) {
94 | size_t src_remaining = 0;
95 |
96 | // translate the guest buffer into hypervisor space
97 | auto const curr_src = gva2hva(src + bytes_read, &src_remaining);
98 |
99 | if (!curr_src) {
100 | // guest virtual address that caused the fault
101 | ctx->cr2 = reinterpret_cast(src + bytes_read);
102 |
103 | page_fault_exception error;
104 | error.flags = 0;
105 | error.present = 0;
106 | error.write = 0;
107 | error.user_mode_access = (current_guest_cpl() == 3);
108 |
109 | inject_hw_exception(page_fault, error.flags);
110 | return;
111 | }
112 |
113 | auto const curr_size = min(size - bytes_read, src_remaining);
114 |
115 | host_exception_info e;
116 | memcpy_safe(e, dst + bytes_read, curr_src, curr_size);
117 |
118 | if (e.exception_occurred) {
119 | inject_hw_exception(general_protection, 0);
120 | return;
121 | }
122 |
123 | bytes_read += curr_size;
124 | }
125 |
126 | ctx->rax = bytes_read;
127 | skip_instruction();
128 | }
129 |
130 | // read from virtual memory in another process
131 | void read_virt_mem(vcpu* const cpu) {
132 | auto const ctx = cpu->ctx;
133 |
134 | // arguments
135 | cr3 guest_cr3;
136 | guest_cr3.flags = ctx->rcx;
137 | auto const dst = reinterpret_cast(ctx->rdx);
138 | auto const src = reinterpret_cast(ctx->r8);
139 | auto const size = ctx->r9;
140 |
141 | size_t bytes_read = 0;
142 |
143 | while (bytes_read < size) {
144 | size_t dst_remaining = 0, src_remaining = 0;
145 |
146 | // translate the guest virtual addresses into host virtual addresses.
147 | // this has to be done 1 page at a time. :(
148 | auto const curr_dst = gva2hva(dst + bytes_read, &dst_remaining);
149 | auto const curr_src = gva2hva(guest_cr3, src + bytes_read, &src_remaining);
150 |
151 | if (!curr_dst) {
152 | // guest virtual address that caused the fault
153 | ctx->cr2 = reinterpret_cast(dst + bytes_read);
154 |
155 | page_fault_exception error;
156 | error.flags = 0;
157 | error.present = 0;
158 | error.write = 1;
159 | error.user_mode_access = (current_guest_cpl() == 3);
160 |
161 | inject_hw_exception(page_fault, error.flags);
162 | return;
163 | }
164 |
165 | // this means that the target memory isn't paged in. there's nothing
166 | // we can do about that since we're not currently in that process's context.
167 | if (!curr_src)
168 | break;
169 |
170 | // the maximum allowed size that we can read at once with the translated HVAs
171 | auto const curr_size = min(size - bytes_read, min(dst_remaining, src_remaining));
172 |
173 | host_exception_info e;
174 | memcpy_safe(e, curr_dst, curr_src, curr_size);
175 |
176 | if (e.exception_occurred) {
177 | // this REALLY shouldn't happen... ever...
178 | inject_hw_exception(general_protection, 0);
179 | return;
180 | }
181 |
182 | bytes_read += curr_size;
183 | }
184 |
185 | ctx->rax = bytes_read;
186 | skip_instruction();
187 | }
188 |
189 | // write to virtual memory in another process
190 | void write_virt_mem(vcpu* const cpu) {
191 | auto const ctx = cpu->ctx;
192 |
193 | // arguments
194 | cr3 guest_cr3;
195 | guest_cr3.flags = ctx->rcx;
196 | auto const dst = reinterpret_cast(ctx->rdx);
197 | auto const src = reinterpret_cast(ctx->r8);
198 | auto const size = ctx->r9;
199 |
200 | size_t bytes_read = 0;
201 |
202 | while (bytes_read < size) {
203 | size_t dst_remaining = 0, src_remaining = 0;
204 |
205 | // translate the guest virtual addresses into host virtual addresses.
206 | // this has to be done 1 page at a time. :(
207 | auto const curr_dst = gva2hva(guest_cr3, dst + bytes_read, &dst_remaining);
208 | auto const curr_src = gva2hva(src + bytes_read, &src_remaining);
209 |
210 | if (!curr_src) {
211 | // guest virtual address that caused the fault
212 | ctx->cr2 = reinterpret_cast(src + bytes_read);
213 |
214 | page_fault_exception error;
215 | error.flags = 0;
216 | error.present = 0;
217 | error.write = 0;
218 | error.user_mode_access = (current_guest_cpl() == 3);
219 |
220 | inject_hw_exception(page_fault, error.flags);
221 | return;
222 | }
223 |
224 | // this means that the target memory isn't paged in. there's nothing
225 | // we can do about that since we're not currently in that process's context.
226 | if (!curr_dst)
227 | break;
228 |
229 | // the maximum allowed size that we can read at once with the translated HVAs
230 | auto const curr_size = min(size - bytes_read, min(dst_remaining, src_remaining));
231 |
232 | host_exception_info e;
233 | memcpy_safe(e, curr_dst, curr_src, curr_size);
234 |
235 | if (e.exception_occurred) {
236 | // this REALLY shouldn't happen... ever...
237 | inject_hw_exception(general_protection, 0);
238 | return;
239 | }
240 |
241 | bytes_read += curr_size;
242 | }
243 |
244 | ctx->rax = bytes_read;
245 | skip_instruction();
246 | }
247 |
248 | // get the kernel CR3 value of an arbitrary process
249 | void query_process_cr3(vcpu* const cpu) {
250 | // PID of the process to get the CR3 value of
251 | auto const target_pid = cpu->ctx->rcx;
252 |
253 | // System process
254 | if (target_pid == 4) {
255 | cpu->ctx->rax = ghv.system_cr3.flags;
256 | skip_instruction();
257 | return;
258 | }
259 |
260 | cpu->ctx->rax = 0;
261 |
262 | // ActiveProcessLinks is right after UniqueProcessId in memory
263 | auto const apl_offset = ghv.eprocess_unique_process_id_offset + 8;
264 | auto const head = ghv.system_eprocess + apl_offset;
265 | auto curr_entry = head;
266 |
267 | // iterate over every EPROCESS in the APL linked list
268 | do {
269 | // get the next entry in the linked list
270 | if (sizeof(curr_entry) != read_guest_virtual_memory(ghv.system_cr3,
271 | curr_entry + offsetof(LIST_ENTRY, Flink), &curr_entry, sizeof(curr_entry)))
272 | break;
273 |
274 | // EPROCESS
275 | auto const process = curr_entry - apl_offset;
276 |
277 | // EPROCESS::UniqueProcessId
278 | uint64_t pid = 0;
279 | if (sizeof(pid) != read_guest_virtual_memory(ghv.system_cr3,
280 | process + ghv.eprocess_unique_process_id_offset, &pid, sizeof(pid)))
281 | break;
282 |
283 | // we found the target process
284 | if (target_pid == pid) {
285 | // EPROCESS::DirectoryTableBase
286 | uint64_t cr3 = 0;
287 | if (sizeof(cr3) != read_guest_virtual_memory(ghv.system_cr3,
288 | process + ghv.kprocess_directory_table_base_offset, &cr3, sizeof(cr3)))
289 | break;
290 |
291 | cpu->ctx->rax = cr3;
292 | break;
293 | }
294 | } while (curr_entry != head);
295 |
296 | skip_instruction();
297 | }
298 |
299 | // install an EPT hook for the CURRENT logical processor ONLY
300 | void install_ept_hook(vcpu* const cpu) {
301 | // arguments
302 | auto const orig_page = cpu->ctx->rcx;
303 | auto const exec_page = cpu->ctx->rdx;
304 |
305 | cpu->ctx->rax = install_ept_hook(cpu->ept, orig_page >> 12, exec_page >> 12);
306 |
307 | skip_instruction();
308 | }
309 |
310 | // remove a previously installed EPT hook
311 | void remove_ept_hook(vcpu* const cpu) {
312 | // arguments
313 | auto const orig_page = cpu->ctx->rcx;
314 |
315 | remove_ept_hook(cpu->ept, orig_page >> 12);
316 |
317 | skip_instruction();
318 | }
319 |
320 | // flush the hypervisor logs into a specified buffer
321 | void flush_logs(vcpu* const cpu) {
322 | auto const ctx = cpu->ctx;
323 |
324 | // arguments
325 | uint32_t count = ctx->ecx;
326 | uint8_t* buffer = reinterpret_cast(ctx->rdx);
327 |
328 | ctx->eax = 0;
329 |
330 | if (count <= 0) {
331 | skip_instruction();
332 | return;
333 | }
334 |
335 | auto& l = ghv.logger;
336 |
337 | scoped_spin_lock lock(l.lock);
338 |
339 | count = min(count, l.msg_count);
340 |
341 | auto start = reinterpret_cast(&l.msgs[l.msg_start]);
342 | auto size = min(l.max_msg_count - l.msg_start, count) * sizeof(l.msgs[0]);
343 |
344 | // read the first chunk of logs before circling back around (if needed)
345 | for (size_t bytes_read = 0; bytes_read < size;) {
346 | size_t dst_remaining = 0;
347 |
348 | // translate the guest virtual address
349 | auto const curr_dst = gva2hva(buffer + bytes_read, &dst_remaining);
350 |
351 | if (!curr_dst) {
352 | // guest virtual address that caused the fault
353 | ctx->cr2 = reinterpret_cast(buffer + bytes_read);
354 |
355 | page_fault_exception error;
356 | error.flags = 0;
357 | error.present = 0;
358 | error.write = 1;
359 | error.user_mode_access = (current_guest_cpl() == 3);
360 |
361 | inject_hw_exception(page_fault, error.flags);
362 | return;
363 | }
364 |
365 | // the maximum allowed size that we can read at once with the translated HVAs
366 | auto const curr_size = min(size - bytes_read, dst_remaining);
367 |
368 | host_exception_info e;
369 | memcpy_safe(e, curr_dst, start + bytes_read, curr_size);
370 |
371 | if (e.exception_occurred) {
372 | // this REALLY shouldn't happen... ever...
373 | inject_hw_exception(general_protection, 0);
374 | return;
375 | }
376 |
377 | bytes_read += curr_size;
378 | }
379 |
380 | buffer += size;
381 | start = reinterpret_cast(&l.msgs[0]);
382 | size = (count * sizeof(l.msgs[0])) - size;
383 |
384 | for (size_t bytes_read = 0; bytes_read < size;) {
385 | size_t dst_remaining = 0;
386 |
387 | // translate the guest virtual address
388 | auto const curr_dst = gva2hva(buffer + bytes_read, &dst_remaining);
389 |
390 | if (!curr_dst) {
391 | // guest virtual address that caused the fault
392 | ctx->cr2 = reinterpret_cast(buffer + bytes_read);
393 |
394 | page_fault_exception error;
395 | error.flags = 0;
396 | error.present = 0;
397 | error.write = 1;
398 | error.user_mode_access = (current_guest_cpl() == 3);
399 |
400 | inject_hw_exception(page_fault, error.flags);
401 | return;
402 | }
403 |
404 | // the maximum allowed size that we can read at once with the translated HVAs
405 | auto const curr_size = min(size - bytes_read, dst_remaining);
406 |
407 | host_exception_info e;
408 | memcpy_safe(e, curr_dst, start + bytes_read, curr_size);
409 |
410 | if (e.exception_occurred) {
411 | // this REALLY shouldn't happen... ever...
412 | inject_hw_exception(general_protection, 0);
413 | return;
414 | }
415 |
416 | bytes_read += curr_size;
417 | }
418 |
419 | l.msg_count -= count;
420 | l.msg_start = (l.msg_start + count) % l.max_msg_count;
421 |
422 | ctx->eax = count;
423 |
424 | skip_instruction();
425 | }
426 |
427 | // translate a virtual address to its virtual address
428 | void get_physical_address(vcpu* const cpu) {
429 | cr3 guest_cr3;
430 | guest_cr3.flags = cpu->ctx->rcx;
431 |
432 | cpu->ctx->rax = gva2gpa(guest_cr3, reinterpret_cast(cpu->ctx->rdx));
433 |
434 | skip_instruction();
435 | }
436 |
437 | } // namespace hv::hc
438 |
439 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/hypercalls.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | struct vcpu;
8 |
9 | // key used for executing hypercalls
10 | // TODO: compute this at runtime
11 | inline constexpr uint64_t hypercall_key = 9527;
12 |
13 | // hypercall indices
14 | enum hypercall_code : uint64_t {
15 | hypercall_ping = 0,
16 | hypercall_test,
17 | hypercall_unload,
18 | hypercall_read_phys_mem,
19 | hypercall_write_phys_mem,
20 | hypercall_read_virt_mem,
21 | hypercall_write_virt_mem,
22 | hypercall_query_process_cr3,
23 | hypercall_install_ept_hook,
24 | hypercall_remove_ept_hook,
25 | hypercall_flush_logs,
26 | hypercall_get_physical_address
27 | };
28 |
29 | // hypercall input
30 | struct hypercall_input {
31 | // rax
32 | struct {
33 | hypercall_code code : 8;
34 | uint64_t key : 56;
35 | };
36 |
37 | // rcx, rdx, r8, r9, r10, r11
38 | uint64_t args[6];
39 | };
40 |
41 | namespace hc {
42 |
43 | // ping the hypervisor to make sure it is running
44 | void ping(vcpu* cpu);
45 |
46 | // a hypercall for quick testing
47 | void test(vcpu* cpu);
48 |
49 | // devirtualize the current VCPU
50 | void unload(vcpu* cpu);
51 |
52 | // read from arbitrary physical memory
53 | void read_phys_mem(vcpu* cpu);
54 |
55 | // write to arbitrary physical memory
56 | void write_phys_mem(vcpu* cpu);
57 |
58 | // read from virtual memory in another process
59 | void read_virt_mem(vcpu* cpu);
60 |
61 | // write to virtual memory in another process
62 | void write_virt_mem(vcpu* cpu);
63 |
64 | // get the kernel CR3 value of an arbitrary process
65 | void query_process_cr3(vcpu* cpu);
66 |
67 | // install an EPT hook for the CURRENT logical processor ONLY
68 | void install_ept_hook(vcpu* cpu);
69 |
70 | // remove a previously installed EPT hook
71 | void remove_ept_hook(vcpu* cpu);
72 |
73 | // flush the hypervisor logs into a specified buffer
74 | void flush_logs(vcpu* cpu);
75 |
76 | // translate a virtual address to its virtual address
77 | void get_physical_address(vcpu* cpu);
78 |
79 | } // namespace hc
80 |
81 | } // namespace hv
82 |
83 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/idt.cpp:
--------------------------------------------------------------------------------
1 | #include "idt.h"
2 | #include "vcpu.h"
3 | #include "interrupt-handlers.h"
4 | #include "mm.h"
5 |
6 | namespace hv {
7 |
8 | // create an interrupt gate that points to the supplied interrupt handler
9 | static segment_descriptor_interrupt_gate_64 create_interrupt_gate(void* const handler) {
10 | segment_descriptor_interrupt_gate_64 gate;
11 |
12 | gate.interrupt_stack_table = 0;
13 | gate.segment_selector = host_cs_selector.flags;
14 | gate.must_be_zero_0 = 0;
15 | gate.type = SEGMENT_DESCRIPTOR_TYPE_INTERRUPT_GATE;
16 | gate.must_be_zero_1 = 0;
17 | gate.descriptor_privilege_level = 0;
18 | gate.present = 1;
19 | gate.reserved = 0;
20 |
21 | auto const offset = reinterpret_cast(handler);
22 | gate.offset_low = (offset >> 0) & 0xFFFF;
23 | gate.offset_middle = (offset >> 16) & 0xFFFF;
24 | gate.offset_high = (offset >> 32) & 0xFFFFFFFF;
25 |
26 | return gate;
27 | }
28 |
29 | // initialize the host IDT and populate every descriptor
30 | void prepare_host_idt(segment_descriptor_interrupt_gate_64* const idt) {
31 | memset(idt, 0, host_idt_descriptor_count * sizeof(idt[0]));
32 | idt[0] = create_interrupt_gate(interrupt_handler_0);
33 | idt[1] = create_interrupt_gate(interrupt_handler_1);
34 | idt[2] = create_interrupt_gate(interrupt_handler_2);
35 | idt[3] = create_interrupt_gate(interrupt_handler_3);
36 | idt[4] = create_interrupt_gate(interrupt_handler_4);
37 | idt[5] = create_interrupt_gate(interrupt_handler_5);
38 | idt[6] = create_interrupt_gate(interrupt_handler_6);
39 | idt[7] = create_interrupt_gate(interrupt_handler_7);
40 | idt[8] = create_interrupt_gate(interrupt_handler_8);
41 | idt[10] = create_interrupt_gate(interrupt_handler_10);
42 | idt[11] = create_interrupt_gate(interrupt_handler_11);
43 | idt[12] = create_interrupt_gate(interrupt_handler_12);
44 | idt[13] = create_interrupt_gate(interrupt_handler_13);
45 | idt[14] = create_interrupt_gate(interrupt_handler_14);
46 | idt[16] = create_interrupt_gate(interrupt_handler_16);
47 | idt[17] = create_interrupt_gate(interrupt_handler_17);
48 | idt[18] = create_interrupt_gate(interrupt_handler_18);
49 | idt[19] = create_interrupt_gate(interrupt_handler_19);
50 | idt[20] = create_interrupt_gate(interrupt_handler_20);
51 | idt[30] = create_interrupt_gate(interrupt_handler_30);
52 | }
53 |
54 | } // namespace hv
55 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/idt.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | // number of available descriptor slots in the host IDT
8 | inline constexpr size_t host_idt_descriptor_count = 256;
9 |
10 | // initialize the host IDT and populate every descriptor
11 | void prepare_host_idt(segment_descriptor_interrupt_gate_64* idt);
12 |
13 | } // namespace hv
14 |
15 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/interrupt-handlers.asm:
--------------------------------------------------------------------------------
1 | .code
2 |
3 | ; defined in trap-frame.h
4 | trap_frame struct
5 | ; general-purpose registers
6 | $rax qword ?
7 | $rcx qword ?
8 | $rdx qword ?
9 | $rbx qword ?
10 | $rbp qword ?
11 | $rsi qword ?
12 | $rdi qword ?
13 | $r8 qword ?
14 | $r9 qword ?
15 | $r10 qword ?
16 | $r11 qword ?
17 | $r12 qword ?
18 | $r13 qword ?
19 | $r14 qword ?
20 | $r15 qword ?
21 |
22 | ; interrupt vector
23 | $vector qword ?
24 |
25 | ; _MACHINE_FRAME
26 | $error qword ?
27 | $rip qword ?
28 | $cs qword ?
29 | $rflags qword ?
30 | $rsp qword ?
31 | $ss qword ?
32 | trap_frame ends
33 |
34 | extern ?handle_host_interrupt@hv@@YAXQEAUtrap_frame@1@@Z : proc
35 |
36 | ; the generic interrupt handler that every stub will eventually jump to
37 | generic_interrupt_handler proc
38 | ; allocate space for the trap_frame structure (minus the size of the
39 | ; _MACHINE_FRAME, error code, and interrupt vector)
40 | sub rsp, 78h
41 |
42 | ; general-purpose registers
43 | mov trap_frame.$rax[rsp], rax
44 | mov trap_frame.$rcx[rsp], rcx
45 | mov trap_frame.$rdx[rsp], rdx
46 | mov trap_frame.$rbx[rsp], rbx
47 | mov trap_frame.$rbp[rsp], rbp
48 | mov trap_frame.$rsi[rsp], rsi
49 | mov trap_frame.$rdi[rsp], rdi
50 | mov trap_frame.$r8[rsp], r8
51 | mov trap_frame.$r9[rsp], r9
52 | mov trap_frame.$r10[rsp], r10
53 | mov trap_frame.$r11[rsp], r11
54 | mov trap_frame.$r12[rsp], r12
55 | mov trap_frame.$r13[rsp], r13
56 | mov trap_frame.$r14[rsp], r14
57 | mov trap_frame.$r15[rsp], r15
58 |
59 | ; first argument is the trap frame
60 | mov rcx, rsp
61 |
62 | ; call handle_host_interrupt
63 | sub rsp, 20h
64 | call ?handle_host_interrupt@hv@@YAXQEAUtrap_frame@1@@Z
65 | add rsp, 20h
66 |
67 | ; general-purpose registers
68 | mov rax, trap_frame.$rax[rsp]
69 | mov rcx, trap_frame.$rcx[rsp]
70 | mov rdx, trap_frame.$rdx[rsp]
71 | mov rbx, trap_frame.$rbx[rsp]
72 | mov rbp, trap_frame.$rbp[rsp]
73 | mov rsi, trap_frame.$rsi[rsp]
74 | mov rdi, trap_frame.$rdi[rsp]
75 | mov r8, trap_frame.$r8[rsp]
76 | mov r9, trap_frame.$r9[rsp]
77 | mov r10, trap_frame.$r10[rsp]
78 | mov r11, trap_frame.$r11[rsp]
79 | mov r12, trap_frame.$r12[rsp]
80 | mov r13, trap_frame.$r13[rsp]
81 | mov r14, trap_frame.$r14[rsp]
82 | mov r15, trap_frame.$r15[rsp]
83 |
84 | ; free the trap_frame
85 | add rsp, 78h
86 |
87 | ; pop the interrupt vector
88 | add rsp, 8
89 |
90 | ; pop the error code
91 | add rsp, 8
92 |
93 | iretq
94 | generic_interrupt_handler endp
95 |
96 | ; pushes error code to stack
97 | DEFINE_ISR macro interrupt_vector:req, proc_name:req
98 | proc_name proc
99 | ; interrupt vector is stored right before the machine frame
100 | push interrupt_vector
101 |
102 | jmp generic_interrupt_handler
103 | proc_name endp
104 | endm
105 |
106 | ; doesn't push error code to stack
107 | DEFINE_ISR_NO_ERROR macro interrupt_vector:req, proc_name:req
108 | proc_name proc
109 | ; push a dummy error code onto the stack
110 | push 0
111 |
112 | ; interrupt vector is stored right before the machine frame
113 | push interrupt_vector
114 |
115 | jmp generic_interrupt_handler
116 | proc_name endp
117 | endm
118 |
119 | DEFINE_ISR_NO_ERROR 0, ?interrupt_handler_0@hv@@YAXXZ
120 | DEFINE_ISR_NO_ERROR 1, ?interrupt_handler_1@hv@@YAXXZ
121 | DEFINE_ISR_NO_ERROR 2, ?interrupt_handler_2@hv@@YAXXZ
122 | DEFINE_ISR_NO_ERROR 3, ?interrupt_handler_3@hv@@YAXXZ
123 | DEFINE_ISR_NO_ERROR 4, ?interrupt_handler_4@hv@@YAXXZ
124 | DEFINE_ISR_NO_ERROR 5, ?interrupt_handler_5@hv@@YAXXZ
125 | DEFINE_ISR_NO_ERROR 6, ?interrupt_handler_6@hv@@YAXXZ
126 | DEFINE_ISR_NO_ERROR 7, ?interrupt_handler_7@hv@@YAXXZ
127 | DEFINE_ISR 8, ?interrupt_handler_8@hv@@YAXXZ
128 | DEFINE_ISR 10, ?interrupt_handler_10@hv@@YAXXZ
129 | DEFINE_ISR 11, ?interrupt_handler_11@hv@@YAXXZ
130 | DEFINE_ISR 12, ?interrupt_handler_12@hv@@YAXXZ
131 | DEFINE_ISR 13, ?interrupt_handler_13@hv@@YAXXZ
132 | DEFINE_ISR 14, ?interrupt_handler_14@hv@@YAXXZ
133 | DEFINE_ISR_NO_ERROR 16, ?interrupt_handler_16@hv@@YAXXZ
134 | DEFINE_ISR 17, ?interrupt_handler_17@hv@@YAXXZ
135 | DEFINE_ISR_NO_ERROR 18, ?interrupt_handler_18@hv@@YAXXZ
136 | DEFINE_ISR_NO_ERROR 19, ?interrupt_handler_19@hv@@YAXXZ
137 | DEFINE_ISR_NO_ERROR 20, ?interrupt_handler_20@hv@@YAXXZ
138 | DEFINE_ISR 30, ?interrupt_handler_30@hv@@YAXXZ
139 |
140 | end
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/interrupt-handlers.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | namespace hv {
4 |
5 | // defined in interrupt-handlers.asm
6 | void interrupt_handler_0();
7 | void interrupt_handler_1();
8 | void interrupt_handler_2();
9 | void interrupt_handler_3();
10 | void interrupt_handler_4();
11 | void interrupt_handler_5();
12 | void interrupt_handler_6();
13 | void interrupt_handler_7();
14 | void interrupt_handler_8();
15 | void interrupt_handler_10();
16 | void interrupt_handler_11();
17 | void interrupt_handler_12();
18 | void interrupt_handler_13();
19 | void interrupt_handler_14();
20 | void interrupt_handler_16();
21 | void interrupt_handler_17();
22 | void interrupt_handler_18();
23 | void interrupt_handler_19();
24 | void interrupt_handler_20();
25 | void interrupt_handler_30();
26 |
27 | } // namespace hv
28 |
29 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/introspection.cpp:
--------------------------------------------------------------------------------
1 | #include "introspection.h"
2 | #include "mm.h"
3 | #include "hv.h"
4 |
5 | namespace hv {
6 |
7 | // get the KPCR of the current guest (this pointer should stay constant per-vcpu)
8 | PKPCR current_guest_kpcr() {
9 | // GS base holds the KPCR when in ring-0
10 | if (current_guest_cpl() == 0)
11 | return reinterpret_cast(vmx_vmread(VMCS_GUEST_GS_BASE));
12 |
13 | // when in ring-3, the GS_SWAP contains the KPCR
14 | return reinterpret_cast(__readmsr(IA32_KERNEL_GS_BASE));
15 | }
16 |
17 | // get the ETHREAD of the current guest
18 | PETHREAD current_guest_ethread() {
19 | // KPCR
20 | auto const kpcr = current_guest_kpcr();
21 |
22 | if (!kpcr)
23 | return nullptr;
24 |
25 | // KPCR::Prcb
26 | auto const kprcb = reinterpret_cast(kpcr)
27 | + ghv.kpcr_pcrb_offset;
28 |
29 | // KPCRB::CurrentThread
30 | PETHREAD current_thread = nullptr;
31 | read_guest_virtual_memory(ghv.system_cr3,
32 | kprcb + ghv.kprcb_current_thread_offset, ¤t_thread, sizeof(current_thread));
33 |
34 | return current_thread;
35 | }
36 |
37 | // get the EPROCESS of the current guest
38 | PEPROCESS current_guest_eprocess() {
39 | // ETHREAD (KTHREAD is first field as well)
40 | auto const ethread = current_guest_ethread();
41 |
42 | if (!ethread)
43 | return nullptr;
44 |
45 | // KTHREAD::ApcState
46 | auto const kapc_state = reinterpret_cast(ethread)
47 | + ghv.kthread_apc_state_offset;
48 |
49 | // KAPC_STATE::Process
50 | PEPROCESS process = nullptr;
51 | read_guest_virtual_memory(ghv.system_cr3,
52 | kapc_state + ghv.kapc_state_process_offset, &process, sizeof(process));
53 |
54 | return process;
55 | }
56 |
57 | // get the PID of the current guest
58 | uint64_t current_guest_pid() {
59 | // EPROCESS
60 | auto const process = reinterpret_cast(current_guest_eprocess());
61 | if (!process)
62 | return 0;
63 |
64 | // EPROCESS::UniqueProcessId
65 | uint64_t pid = 0;
66 | read_guest_virtual_memory(ghv.system_cr3,
67 | process + ghv.eprocess_unique_process_id_offset, &pid, sizeof(pid));
68 |
69 | return pid;
70 | }
71 |
72 | } // namespace hv
73 |
74 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/introspection.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "vmx.h"
4 | #include "mm.h"
5 |
6 | #include
7 |
8 | namespace hv {
9 |
10 | // get the KPCR of the current guest (this pointer should stay constant per-vcpu)
11 | PKPCR current_guest_kpcr();
12 |
13 | // get the ETHREAD of the current guest
14 | PETHREAD current_guest_ethread();
15 |
16 | // get the EPROCESS of the current guest
17 | PEPROCESS current_guest_eprocess();
18 |
19 | // get the PID of the current guest
20 | uint64_t current_guest_pid();
21 |
22 | } // namespace hv
23 |
24 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/logger.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ReloadDbg/hv/hv/logger.cpp
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/logger.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | #include "spin-lock.h"
6 |
7 | // different logging levels, usually only ERRORs are useful
8 | #define HV_LOG_INFO(fmt, ...) hv::logger_write(fmt, __VA_ARGS__)
9 | #define HV_LOG_ERROR(fmt, ...) hv::logger_write(fmt, __VA_ARGS__)
10 | #define HV_LOG_VERBOSE(fmt, ...) hv::logger_write(fmt, __VA_ARGS__)
11 |
12 | namespace hv {
13 |
14 | struct logger_msg {
15 | static constexpr uint32_t max_msg_length = 128;
16 |
17 | // ID of the current message
18 | uint64_t id;
19 |
20 | // timestamp counter of the current message
21 | uint64_t tsc;
22 |
23 | // process ID of the VCPU that sent the message
24 | uint32_t aux;
25 |
26 | // null-terminated ascii string
27 | char data[max_msg_length];
28 | };
29 |
30 | struct logger {
31 | static constexpr uint32_t max_msg_count = 512;
32 |
33 | // signature to find logs in memory easier
34 | // "hvloggerhvlogger"
35 | char signature[16];
36 |
37 | spin_lock lock;
38 |
39 | uint32_t msg_start;
40 | uint32_t msg_count;
41 |
42 | // the total messages sent
43 | uint64_t total_msg_count;
44 |
45 | // an array of messages
46 | logger_msg msgs[max_msg_count];
47 | };
48 |
49 | // initialize the logger
50 | void logger_init();
51 |
52 | // flush log messages to the provided buffer
53 | void logger_flush(uint32_t& count, logger_msg* buffer);
54 |
55 | // write a printf-style string to the logger using
56 | // a limited subset of printf specifiers:
57 | // %s, %i, %d, %u, %x, %X, %p
58 | void logger_write(char const* format, ...);
59 |
60 | } // namespace hv
61 |
62 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/main.cpp:
--------------------------------------------------------------------------------
1 | #include "hv.h"
2 |
3 | #include
4 | #include "ia32.hpp"
5 |
6 | // simple hypercall wrappers
7 | static uint64_t ping() {
8 | hv::hypercall_input input;
9 | input.code = hv::hypercall_ping;
10 | input.key = hv::hypercall_key;
11 | return hv::vmx_vmcall(input);
12 | }
13 |
14 | void driver_unload(PDRIVER_OBJECT) {
15 | hv::stop();
16 |
17 | DbgPrint("[hv] Devirtualized the system.\n");
18 | DbgPrint("[hv] Driver unloaded.\n");
19 | }
20 |
21 | NTSTATUS driver_entry(PDRIVER_OBJECT const driver, PUNICODE_STRING) {
22 | DbgPrint("[hv] Driver loaded.\n");
23 |
24 | if (driver)
25 | driver->DriverUnload = driver_unload;
26 |
27 | if (!hv::start()) {
28 | DbgPrint("[hv] Failed to virtualize system.\n");
29 | return STATUS_HV_OPERATION_FAILED;
30 | }
31 |
32 | if (ping() == hv::hypervisor_signature)
33 | DbgPrint("[client] Hypervisor signature matches.\n");
34 | else
35 | DbgPrint("[client] Failed to ping hypervisor!\n");
36 |
37 | return STATUS_SUCCESS;
38 | }
39 |
40 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/mm.cpp:
--------------------------------------------------------------------------------
1 | #include "mm.h"
2 | #include "arch.h"
3 | #include "page-tables.h"
4 | #include "vmx.h"
5 | #include "exception-routines.h"
6 | #include "logger.h"
7 |
8 | namespace hv {
9 |
10 | // translate a GVA to a GPA. offset_to_next_page is the number of bytes to
11 | // the next page (i.e. the number of bytes that can be safely accessed through
12 | // the GPA in order to modify the GVA.
13 | uint64_t gva2gpa(cr3 const guest_cr3, void* const gva, size_t* const offset_to_next_page) {
14 | if (offset_to_next_page)
15 | *offset_to_next_page = 0;
16 |
17 | pml4_virtual_address const vaddr = { gva };
18 |
19 | // guest PML4
20 | auto const pml4 = reinterpret_cast(host_physical_memory_base
21 | + (guest_cr3.address_of_page_directory << 12));
22 | auto const pml4e = pml4[vaddr.pml4_idx];
23 |
24 | if (!pml4e.present)
25 | return 0;
26 |
27 | // guest PDPT
28 | auto const pdpt = reinterpret_cast(host_physical_memory_base
29 | + (pml4e.page_frame_number << 12));
30 | auto const pdpte = pdpt[vaddr.pdpt_idx];
31 |
32 | if (!pdpte.present)
33 | return 0;
34 |
35 | if (pdpte.large_page) {
36 | pdpte_1gb_64 pdpte_1gb;
37 | pdpte_1gb.flags = pdpte.flags;
38 |
39 | auto const offset = (vaddr.pd_idx << 21) + (vaddr.pt_idx << 12) + vaddr.offset;
40 |
41 | // 1GB
42 | if (offset_to_next_page)
43 | *offset_to_next_page = 0x40000000 - offset;
44 |
45 | return (pdpte_1gb.page_frame_number << 30) + offset;
46 | }
47 |
48 | // guest PD
49 | auto const pd = reinterpret_cast(host_physical_memory_base
50 | + (pdpte.page_frame_number << 12));
51 | auto const pde = pd[vaddr.pd_idx];
52 |
53 | if (!pde.present)
54 | return 0;
55 |
56 | if (pde.large_page) {
57 | pde_2mb_64 pde_2mb;
58 | pde_2mb.flags = pde.flags;
59 |
60 | auto const offset = (vaddr.pt_idx << 12) + vaddr.offset;
61 |
62 | // 2MB page
63 | if (offset_to_next_page)
64 | *offset_to_next_page = 0x200000 - offset;
65 |
66 | return (pde_2mb.page_frame_number << 21) + offset;
67 | }
68 |
69 | // guest PT
70 | auto const pt = reinterpret_cast(host_physical_memory_base
71 | + (pde.page_frame_number << 12));
72 | auto const pte = pt[vaddr.pt_idx];
73 |
74 | if (!pte.present)
75 | return 0;
76 |
77 | // 4KB page
78 | if (offset_to_next_page)
79 | *offset_to_next_page = 0x1000 - vaddr.offset;
80 |
81 | return (pte.page_frame_number << 12) + vaddr.offset;
82 | }
83 |
84 | // translate a GVA to a GPA. offset_to_next_page is the number of bytes to
85 | // the next page (i.e. the number of bytes that can be safely accessed through
86 | // the GPA in order to modify the GVA.
87 | uint64_t gva2gpa(void* const gva, size_t* const offset_to_next_page) {
88 | cr3 guest_cr3;
89 | guest_cr3.flags = vmx_vmread(VMCS_GUEST_CR3);
90 | return gva2gpa(guest_cr3, gva, offset_to_next_page);
91 | }
92 |
93 | // translate a GVA to an HVA. offset_to_next_page is the number of bytes to
94 | // the next page (i.e. the number of bytes that can be safely accessed through
95 | // the HVA in order to modify the GVA.
96 | void* gva2hva(cr3 const guest_cr3, void* const gva, size_t* const offset_to_next_page) {
97 | auto const gpa = gva2gpa(guest_cr3, gva, offset_to_next_page);
98 | if (!gpa)
99 | return nullptr;
100 | return host_physical_memory_base + gpa;
101 | }
102 |
103 | // translate a GVA to an HVA. offset_to_next_page is the number of bytes to
104 | // the next page (i.e. the number of bytes that can be safely accessed through
105 | // the HVA in order to modify the GVA.
106 | void* gva2hva(void* const gva, size_t* const offset_to_next_page) {
107 | cr3 guest_cr3;
108 | guest_cr3.flags = vmx_vmread(VMCS_GUEST_CR3);
109 | return gva2hva(guest_cr3, gva, offset_to_next_page);
110 | }
111 |
112 | // attempt to read the memory at the specified guest virtual address from root-mode
113 | size_t read_guest_virtual_memory(cr3 const guest_cr3,
114 | void* const gva, void* const buffer, size_t const size) {
115 | // the GVA that we're reading from
116 | auto const src = reinterpret_cast(gva);
117 |
118 | // the HVA that we're writing to
119 | auto const dst = reinterpret_cast(buffer);
120 |
121 | size_t bytes_read = 0;
122 |
123 | // translate and read 1 page at a time
124 | while (bytes_read < size) {
125 | size_t src_remaining = 0;
126 |
127 | // translate the guest virtual address to a host virtual address
128 | auto const curr_src = gva2hva(guest_cr3, src + bytes_read, &src_remaining);
129 |
130 | // paged out
131 | if (!curr_src)
132 | return bytes_read;
133 |
134 | // the maximum allowed size that we can read at once with the translated HVA
135 | auto const curr_size = min(size - bytes_read, src_remaining);
136 |
137 | host_exception_info e;
138 | memcpy_safe(e, dst + bytes_read, curr_src, curr_size);
139 |
140 | // this shouldn't ever happen...
141 | if (e.exception_occurred) {
142 | HV_LOG_ERROR("Failed to memcpy in read_guest_virtual_memory().");
143 | return bytes_read;
144 | }
145 |
146 | bytes_read += curr_size;
147 | }
148 |
149 | return bytes_read;
150 | }
151 |
152 | // attempt to read the memory at the specified guest virtual address from root-mode
153 | size_t read_guest_virtual_memory(void* const gva, void* const buffer, size_t const size) {
154 | cr3 guest_cr3;
155 | guest_cr3.flags = vmx_vmread(VMCS_GUEST_CR3);
156 | return read_guest_virtual_memory(guest_cr3, gva, buffer, size);
157 | }
158 |
159 | // attempt to read the memory at the specified guest physical address from root-mode
160 | bool read_guest_physical_memory(uint64_t const gpa, void* const buffer, size_t const size) {
161 | host_exception_info e;
162 | memcpy_safe(e, buffer, host_physical_memory_base + gpa, size);
163 | return !e.exception_occurred;
164 | }
165 |
166 | } // namespace hv
167 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/mm.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include "ia32.hpp"
5 |
6 | namespace hv {
7 |
8 | // represents a 4-level virtual address
9 | union pml4_virtual_address {
10 | void const* address;
11 | struct {
12 | uint64_t offset : 12;
13 | uint64_t pt_idx : 9;
14 | uint64_t pd_idx : 9;
15 | uint64_t pdpt_idx : 9;
16 | uint64_t pml4_idx : 9;
17 | };
18 | };
19 |
20 | // translate a GVA to a GPA. offset_to_next_page is the number of bytes to
21 | // the next page (i.e. the number of bytes that can be safely accessed through
22 | // the GPA in order to modify the GVA.
23 | uint64_t gva2gpa(cr3 guest_cr3, void* gva, size_t* offset_to_next_page = nullptr);
24 |
25 | // translate a GVA to a GPA. offset_to_next_page is the number of bytes to
26 | // the next page (i.e. the number of bytes that can be safely accessed through
27 | // the GPA in order to modify the GVA.
28 | uint64_t gva2gpa(void* gva, size_t* offset_to_next_page = nullptr);
29 |
30 | // translate a GVA to an HVA. offset_to_next_page is the number of bytes to
31 | // the next page (i.e. the number of bytes that can be safely accessed through
32 | // the HVA in order to modify the GVA.
33 | void* gva2hva(cr3 guest_cr3, void* gva, size_t* offset_to_next_page = nullptr);
34 |
35 | // translate a GVA to an HVA. offset_to_next_page is the number of bytes to
36 | // the next page (i.e. the number of bytes that can be safely accessed through
37 | // the HVA in order to modify the GVA.
38 | void* gva2hva(void* gva, size_t* offset_to_next_page = nullptr);
39 |
40 | // attempt to read the memory at the specified guest virtual address from root-mode
41 | size_t read_guest_virtual_memory(cr3 guest_cr3, void* gva, void* buffer, size_t size);
42 |
43 | // attempt to read the memory at the specified guest virtual address from root-mode
44 | size_t read_guest_virtual_memory(void* gva, void* buffer, size_t size);
45 |
46 | // attempt to read the memory at the specified guest physical address from root-mode
47 | bool read_guest_physical_memory(uint64_t gpa, void* buffer, size_t size);
48 |
49 | } // namespace hv
50 |
51 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/mtrr.cpp:
--------------------------------------------------------------------------------
1 | #include "mtrr.h"
2 | #include "arch.h"
3 |
4 | namespace hv {
5 |
6 | // read MTRR data into a single structure
7 | mtrr_data read_mtrr_data() {
8 | mtrr_data mtrrs;
9 |
10 | mtrrs.cap.flags = __readmsr(IA32_MTRR_CAPABILITIES);
11 | mtrrs.def_type.flags = __readmsr(IA32_MTRR_DEF_TYPE);
12 | mtrrs.var_count = 0;
13 |
14 | for (uint32_t i = 0; i < mtrrs.cap.variable_range_count; ++i) {
15 | ia32_mtrr_physmask_register mask;
16 | mask.flags = __readmsr(IA32_MTRR_PHYSMASK0 + i * 2);
17 |
18 | if (!mask.valid)
19 | continue;
20 |
21 | mtrrs.variable[mtrrs.var_count].mask = mask;
22 | mtrrs.variable[mtrrs.var_count].base.flags =
23 | __readmsr(IA32_MTRR_PHYSBASE0 + i * 2);
24 |
25 | ++mtrrs.var_count;
26 | }
27 |
28 | return mtrrs;
29 | }
30 |
31 | // calculate the MTRR memory type for a single page
32 | static uint8_t calc_mtrr_mem_type(mtrr_data const& mtrrs, uint64_t const pfn) {
33 | if (!mtrrs.def_type.mtrr_enable)
34 | return MEMORY_TYPE_UNCACHEABLE;
35 |
36 | // fixed range MTRRs
37 | if (pfn < 0x100 && mtrrs.cap.fixed_range_supported
38 | && mtrrs.def_type.fixed_range_mtrr_enable) {
39 | // TODO: implement this
40 | return MEMORY_TYPE_UNCACHEABLE;
41 | }
42 |
43 | uint8_t curr_mem_type = MEMORY_TYPE_INVALID;
44 |
45 | // variable-range MTRRs
46 | for (uint32_t i = 0; i < mtrrs.var_count; ++i) {
47 | auto const base = mtrrs.variable[i].base.page_frame_number;
48 | auto const mask = mtrrs.variable[i].mask.page_frame_number;
49 |
50 | // 3.11.11.2.3
51 | // essentially checking if the top part of the address (as specified
52 | // by the PHYSMASK) is equal to the top part of the PHYSBASE.
53 | if ((pfn & mask) == (base & mask)) {
54 | auto const type = static_cast(mtrrs.variable[i].base.type);
55 |
56 | // UC takes precedence over everything
57 | if (type == MEMORY_TYPE_UNCACHEABLE)
58 | return MEMORY_TYPE_UNCACHEABLE;
59 |
60 | // this works for WT and WB, which is the only other "defined" overlap scenario
61 | if (type < curr_mem_type)
62 | curr_mem_type = type;
63 | }
64 | }
65 |
66 | // no MTRR covers the specified address
67 | if (curr_mem_type == MEMORY_TYPE_INVALID)
68 | return mtrrs.def_type.default_memory_type;
69 |
70 | return curr_mem_type;
71 | }
72 |
73 | // calculate the MTRR memory type for the given physical memory range
74 | uint8_t calc_mtrr_mem_type(mtrr_data const& mtrrs, uint64_t address, uint64_t size) {
75 | // base address must be on atleast a 4KB boundary
76 | address &= ~0xFFFull;
77 |
78 | // minimum range size is 4KB
79 | size = (size + 0xFFF) & ~0xFFFull;
80 |
81 | uint8_t curr_mem_type = MEMORY_TYPE_INVALID;
82 |
83 | for (uint64_t curr = address; curr < address + size; curr += 0x1000) {
84 | auto const type = calc_mtrr_mem_type(mtrrs, curr >> 12);
85 |
86 | if (type == MEMORY_TYPE_UNCACHEABLE)
87 | return type;
88 |
89 | // use the worse memory type between the two
90 | if (type < curr_mem_type)
91 | curr_mem_type = type;
92 | }
93 |
94 | if (curr_mem_type == MEMORY_TYPE_INVALID)
95 | return MEMORY_TYPE_UNCACHEABLE;
96 |
97 | return curr_mem_type;
98 | }
99 |
100 | } // namespace hv
101 |
102 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/mtrr.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | struct mtrr_data {
8 | ia32_mtrr_capabilities_register cap;
9 | ia32_mtrr_def_type_register def_type;
10 |
11 | // fixed-range MTRRs
12 | struct {
13 | // TODO: implement
14 | } fixed;
15 |
16 | // variable-range MTRRs
17 | struct {
18 | ia32_mtrr_physbase_register base;
19 | ia32_mtrr_physmask_register mask;
20 | } variable[64];
21 |
22 | // number of valid variable-range MTRRs
23 | size_t var_count;
24 | };
25 |
26 | // read MTRR data into a single structure
27 | mtrr_data read_mtrr_data();
28 |
29 | // calculate the MTRR memory type for the given physical memory range
30 | uint8_t calc_mtrr_mem_type(mtrr_data const& mtrrs, uint64_t address, uint64_t size);
31 |
32 | } // namespace hv
33 |
34 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/page-tables.cpp:
--------------------------------------------------------------------------------
1 | #include "page-tables.h"
2 | #include "vcpu.h"
3 | #include "hv.h"
4 | #include "mm.h"
5 |
6 | namespace hv {
7 |
8 | // directly map physical memory into the host page tables
9 | static void map_physical_memory(host_page_tables& pt) {
10 | auto& pml4e = pt.pml4[host_physical_memory_pml4_idx];
11 | pml4e.flags = 0;
12 | pml4e.present = 1;
13 | pml4e.write = 1;
14 | pml4e.supervisor = 0;
15 | pml4e.page_level_write_through = 0;
16 | pml4e.page_level_cache_disable = 0;
17 | pml4e.accessed = 0;
18 | pml4e.execute_disable = 0;
19 | pml4e.page_frame_number = MmGetPhysicalAddress(&pt.phys_pdpt).QuadPart >> 12;
20 |
21 | // TODO: add support for 1GB pages
22 | // TODO: check if 2MB pages are supported (pretty much always are)
23 |
24 | for (uint64_t i = 0; i < host_physical_memory_pd_count; ++i) {
25 | auto& pdpte = pt.phys_pdpt[i];
26 | pdpte.flags = 0;
27 | pdpte.present = 1;
28 | pdpte.write = 1;
29 | pdpte.supervisor = 0;
30 | pdpte.page_level_write_through = 0;
31 | pdpte.page_level_cache_disable = 0;
32 | pdpte.accessed = 0;
33 | pdpte.execute_disable = 0;
34 | pdpte.page_frame_number = MmGetPhysicalAddress(&pt.phys_pds[i]).QuadPart >> 12;
35 |
36 | for (uint64_t j = 0; j < 512; ++j) {
37 | auto& pde = pt.phys_pds[i][j];
38 | pde.flags = 0;
39 | pde.present = 1;
40 | pde.write = 1;
41 | pde.supervisor = 0;
42 | pde.page_level_write_through = 0;
43 | pde.page_level_cache_disable = 0;
44 | pde.accessed = 0;
45 | pde.dirty = 0;
46 | pde.large_page = 1;
47 | pde.global = 0;
48 | pde.pat = 0;
49 | pde.execute_disable = 0;
50 | pde.page_frame_number = (i << 9) + j;
51 | }
52 | }
53 | }
54 |
55 | // initialize the host page tables
56 | void prepare_host_page_tables() {
57 | auto& pt = ghv.host_page_tables;
58 | memset(&pt, 0, sizeof(pt));
59 |
60 | // map all of physical memory into our address space
61 | map_physical_memory(pt);
62 |
63 | PHYSICAL_ADDRESS pml4_address;
64 | pml4_address.QuadPart = ghv.system_cr3.address_of_page_directory << 12;
65 |
66 | // kernel PML4 address
67 | auto const guest_pml4 = static_cast(MmGetVirtualForPhysical(pml4_address));
68 |
69 | // copy the top half of the System pml4 (a.k.a. the kernel address space)
70 | memcpy(&pt.pml4[256], &guest_pml4[256], sizeof(pml4e_64) * 256);
71 | }
72 |
73 | } // namespace hv
74 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/page-tables.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | // how much of physical memory to map into the host address-space
8 | inline constexpr size_t host_physical_memory_pd_count = 64;
9 |
10 | // physical memory is directly mapped to this pml4 entry
11 | inline constexpr uint64_t host_physical_memory_pml4_idx = 255;
12 |
13 | // directly access physical memory by using [base + offset]
14 | inline uint8_t* const host_physical_memory_base = reinterpret_cast(
15 | host_physical_memory_pml4_idx << (9 + 9 + 9 + 12));
16 |
17 | struct host_page_tables {
18 | // array of PML4 entries that point to a PDPT
19 | alignas(0x1000) pml4e_64 pml4[512];
20 |
21 | // PDPT for mapping physical memory
22 | alignas(0x1000) pdpte_64 phys_pdpt[512];
23 |
24 | // PDs for mapping physical memory
25 | alignas(0x1000) pde_2mb_64 phys_pds[host_physical_memory_pd_count][512];
26 | };
27 |
28 | // initialize the host page tables
29 | void prepare_host_page_tables();
30 |
31 | } // namespace hv
32 |
33 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/segment.cpp:
--------------------------------------------------------------------------------
1 | #include "segment.h"
2 |
3 | namespace hv {
4 |
5 | // calculate a segment's base address
6 | uint64_t segment_base(
7 | segment_descriptor_register_64 const& gdtr,
8 | segment_selector const selector) {
9 | // null selector
10 | if (selector.index == 0)
11 | return 0;
12 |
13 | // fetch the segment descriptor from the gdtr
14 | auto const descriptor = reinterpret_cast(
15 | gdtr.base_address + static_cast(selector.index) * 8);
16 |
17 | // 3.3.4.5
18 | // calculate the segment base address
19 | auto base_address =
20 | (uint64_t)descriptor->base_address_low |
21 | ((uint64_t)descriptor->base_address_middle << 16) |
22 | ((uint64_t)descriptor->base_address_high << 24);
23 |
24 | // 3.3.5.2
25 | // system descriptors are expanded to 16 bytes for ia-32e
26 | if (descriptor->descriptor_type == SEGMENT_DESCRIPTOR_TYPE_SYSTEM)
27 | base_address |= (uint64_t)descriptor->base_address_upper << 32;
28 |
29 | return base_address;
30 | }
31 |
32 | uint64_t segment_base(
33 | segment_descriptor_register_64 const& gdtr,
34 | uint16_t const selector) {
35 | segment_selector s;
36 | s.flags = selector;
37 | return segment_base(gdtr, s);
38 | }
39 |
40 | // calculate a segment's access rights
41 | vmx_segment_access_rights segment_access(
42 | segment_descriptor_register_64 const& gdtr,
43 | segment_selector const selector) {
44 | // fetch the segment descriptor from the gdtr
45 | auto const descriptor = reinterpret_cast(
46 | gdtr.base_address + static_cast(selector.index) * 8);
47 |
48 | vmx_segment_access_rights access;
49 | access.flags = 0;
50 |
51 | // 3.24.4.1
52 | access.type = descriptor->type;
53 | access.descriptor_type = descriptor->descriptor_type;
54 | access.descriptor_privilege_level = descriptor->descriptor_privilege_level;
55 | access.present = descriptor->present;
56 | access.available_bit = descriptor->system;
57 | access.long_mode = descriptor->long_mode;
58 | access.default_big = descriptor->default_big;
59 | access.granularity = descriptor->granularity;
60 | access.unusable = (selector.index == 0);
61 |
62 | return access;
63 | }
64 |
65 | vmx_segment_access_rights segment_access(
66 | segment_descriptor_register_64 const& gdtr,
67 | uint16_t const selector) {
68 | segment_selector s;
69 | s.flags = selector;
70 | return segment_access(gdtr, s);
71 | }
72 |
73 | } // namespace hv
74 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/segment.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | // calculate a segment's base address
8 | uint64_t segment_base(
9 | segment_descriptor_register_64 const& gdtr,
10 | segment_selector selector);
11 |
12 | uint64_t segment_base(
13 | segment_descriptor_register_64 const& gdtr,
14 | uint16_t selector);
15 |
16 | // calculate a segment's access rights
17 | vmx_segment_access_rights segment_access(
18 | segment_descriptor_register_64 const& gdtr,
19 | segment_selector selector);
20 |
21 | vmx_segment_access_rights segment_access(
22 | segment_descriptor_register_64 const& gdtr,
23 | uint16_t selector);
24 |
25 | } // namespace hv
26 |
27 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/spin-lock.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 |
5 | namespace hv {
6 |
7 | // minimalistic spin lock class
8 | struct spin_lock {
9 | void initialize() {
10 | lock = 0;
11 | }
12 |
13 | void acquire() {
14 | while (1 == _InterlockedCompareExchange(&lock, 1, 0))
15 | _mm_pause();
16 | }
17 |
18 | void release() {
19 | lock = 0;
20 | }
21 |
22 | volatile long lock;
23 | };
24 |
25 | class scoped_spin_lock {
26 | public:
27 | scoped_spin_lock(spin_lock& lock)
28 | : lock_(lock) {
29 | lock.acquire();
30 | }
31 |
32 | ~scoped_spin_lock() {
33 | lock_.release();
34 | }
35 |
36 | // no copying
37 | scoped_spin_lock(scoped_spin_lock const&) = delete;
38 | scoped_spin_lock& operator=(scoped_spin_lock const&) = delete;
39 |
40 | private:
41 | spin_lock& lock_;
42 | };
43 |
44 | } // namespace hv
45 |
46 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/timing.cpp:
--------------------------------------------------------------------------------
1 | #include "timing.h"
2 | #include "vcpu.h"
3 | #include "vmx.h"
4 | #include "logger.h"
5 |
6 | #include
7 |
8 | namespace hv {
9 |
10 | // try to hide the vm-exit overhead from being detected through timings
11 | void hide_vm_exit_overhead(vcpu* const cpu) {
12 | //
13 | // Guest APERF/MPERF values are stored/restored on vm-entry and vm-exit,
14 | // however, there appears to be a small, yet constant, overhead that occurs
15 | // when the CPU is performing these stores and loads. This is the case for
16 | // every MSR, so naturally PERF_GLOBAL_CTRL is affected as well. If it wasn't
17 | // for this, hiding vm-exit overhead would be sooooo much easier and cleaner,
18 | // but whatever.
19 | //
20 |
21 | ia32_perf_global_ctrl_register perf_global_ctrl;
22 | perf_global_ctrl.flags = cpu->msr_exit_store.perf_global_ctrl.msr_data;
23 |
24 | // make sure the CPU loads the previously stored guest state on vm-entry
25 | cpu->msr_entry_load.aperf.msr_data = cpu->msr_exit_store.aperf.msr_data;
26 | cpu->msr_entry_load.mperf.msr_data = cpu->msr_exit_store.mperf.msr_data;
27 | vmx_vmwrite(VMCS_GUEST_PERF_GLOBAL_CTRL, perf_global_ctrl.flags);
28 |
29 | // account for the constant overhead associated with loading/storing MSRs
30 | cpu->msr_entry_load.aperf.msr_data -= cpu->vm_exit_mperf_overhead;
31 | cpu->msr_entry_load.mperf.msr_data -= cpu->vm_exit_mperf_overhead;
32 |
33 | // account for the constant overhead associated with loading/storing MSRs
34 | if (perf_global_ctrl.en_fixed_ctrn & (1ull << 2)) {
35 | auto const cpl = current_guest_cpl();
36 |
37 | ia32_fixed_ctr_ctrl_register fixed_ctr_ctrl;
38 | fixed_ctr_ctrl.flags = __readmsr(IA32_FIXED_CTR_CTRL);
39 |
40 | // this also needs to be done for many other PMCs, but whatever
41 | if ((cpl == 0 && fixed_ctr_ctrl.en2_os) || (cpl == 3 && fixed_ctr_ctrl.en2_usr))
42 | __writemsr(IA32_FIXED_CTR2, __readmsr(IA32_FIXED_CTR2) - cpu->vm_exit_ref_tsc_overhead);
43 | }
44 | }
45 |
46 | // measure the overhead of a vm-exit (RDTSC)
47 | uint64_t measure_vm_exit_tsc_overhead() {
48 | _disable();
49 |
50 | hypercall_input hv_input;
51 | hv_input.code = hypercall_ping;
52 | hv_input.key = hypercall_key;
53 |
54 | uint64_t lowest = ~0ull;
55 | uint64_t lowest_vm_exit_overhead = ~0ull;
56 | uint64_t lowest_timing_overhead = ~0ull;
57 |
58 | // perform the measurement 10 times and use the smallest time
59 | for (int i = 0; i < 10; ++i) {
60 | _mm_lfence();
61 | auto start = __rdtsc();
62 | _mm_lfence();
63 |
64 | _mm_lfence();
65 | auto end = __rdtsc();
66 | _mm_lfence();
67 |
68 | auto const timing_overhead = (end - start);
69 |
70 | _mm_lfence();
71 | start = __rdtsc();
72 | _mm_lfence();
73 |
74 | vmx_vmcall(hv_input);
75 |
76 | _mm_lfence();
77 | end = __rdtsc();
78 | _mm_lfence();
79 |
80 | auto const vm_exit_overhead = (end - start);
81 |
82 | if (vm_exit_overhead < lowest_vm_exit_overhead) {
83 | lowest_vm_exit_overhead = vm_exit_overhead;
84 | }
85 | if (timing_overhead < lowest_timing_overhead) {
86 | lowest_timing_overhead = timing_overhead;
87 | }
88 |
89 | }
90 | lowest = (lowest_vm_exit_overhead - lowest_timing_overhead);
91 | _enable();
92 | return lowest;
93 | }
94 |
95 | // measure the overhead of a vm-exit (CPU_CLK_UNHALTED.REF_TSC)
96 | uint64_t measure_vm_exit_ref_tsc_overhead() {
97 | _disable();
98 |
99 | hypercall_input hv_input;
100 | hv_input.code = hypercall_ping;
101 | hv_input.key = hypercall_key;
102 |
103 | ia32_fixed_ctr_ctrl_register curr_fixed_ctr_ctrl;
104 | curr_fixed_ctr_ctrl.flags = __readmsr(IA32_FIXED_CTR_CTRL);
105 |
106 | ia32_perf_global_ctrl_register curr_perf_global_ctrl;
107 | curr_perf_global_ctrl.flags = __readmsr(IA32_PERF_GLOBAL_CTRL);
108 |
109 | // enable fixed counter #2
110 | auto new_fixed_ctr_ctrl = curr_fixed_ctr_ctrl;
111 | new_fixed_ctr_ctrl.en2_os = 1;
112 | new_fixed_ctr_ctrl.en2_usr = 0;
113 | new_fixed_ctr_ctrl.en2_pmi = 0;
114 | new_fixed_ctr_ctrl.any_thread2 = 0;
115 | __writemsr(IA32_FIXED_CTR_CTRL, new_fixed_ctr_ctrl.flags);
116 |
117 | // enable fixed counter #2
118 | auto new_perf_global_ctrl = curr_perf_global_ctrl;
119 | new_perf_global_ctrl.en_fixed_ctrn |= (1ull << 2);
120 | __writemsr(IA32_PERF_GLOBAL_CTRL, new_perf_global_ctrl.flags);
121 |
122 | uint64_t lowest = ~0ull;
123 | uint64_t lowest_vm_exit_overhead = ~0ull;
124 | uint64_t lowest_timing_overhead = ~0ull;
125 |
126 | // perform the measurement 10 times and use the smallest time
127 | for (int i = 0; i < 10; ++i) {
128 | _mm_lfence();
129 | auto start = __readmsr(IA32_FIXED_CTR2);
130 | _mm_lfence();
131 |
132 | _mm_lfence();
133 | auto end = __readmsr(IA32_FIXED_CTR2);
134 | _mm_lfence();
135 |
136 | auto const timing_overhead = (end - start);
137 |
138 | _mm_lfence();
139 | start = __readmsr(IA32_FIXED_CTR2);
140 | _mm_lfence();
141 |
142 | vmx_vmcall(hv_input);
143 |
144 | _mm_lfence();
145 | end = __readmsr(IA32_FIXED_CTR2);
146 | _mm_lfence();
147 |
148 | auto const vm_exit_overhead = (end - start);
149 |
150 | if (vm_exit_overhead < lowest_vm_exit_overhead) {
151 | lowest_vm_exit_overhead = vm_exit_overhead;
152 | }
153 | if (timing_overhead < lowest_timing_overhead) {
154 | lowest_timing_overhead = timing_overhead;
155 | }
156 |
157 | }
158 | lowest = (lowest_vm_exit_overhead - lowest_timing_overhead);
159 | // restore MSRs
160 | __writemsr(IA32_PERF_GLOBAL_CTRL, curr_perf_global_ctrl.flags);
161 | __writemsr(IA32_FIXED_CTR_CTRL, curr_fixed_ctr_ctrl.flags);
162 |
163 | _enable();
164 | return lowest;
165 | }
166 |
167 | // measure the overhead of a vm-exit (IA32_MPERF)
168 | uint64_t measure_vm_exit_mperf_overhead() {
169 | _disable();
170 |
171 | hypercall_input hv_input;
172 | hv_input.code = hypercall_ping;
173 | hv_input.key = hypercall_key;
174 |
175 | uint64_t lowest = ~0ull;
176 | uint64_t lowest_vm_exit_overhead = ~0ull;
177 | uint64_t lowest_timing_overhead = ~0ull;
178 |
179 | // perform the measurement 10 times and use the smallest time
180 | for (int i = 0; i < 10; ++i) {
181 | _mm_lfence();
182 | auto start = __readmsr(IA32_MPERF);
183 | _mm_lfence();
184 |
185 | _mm_lfence();
186 | auto end = __readmsr(IA32_MPERF);
187 | _mm_lfence();
188 |
189 | auto const timing_overhead = (end - start);
190 |
191 | _mm_lfence();
192 | start = __readmsr(IA32_MPERF);
193 | _mm_lfence();
194 |
195 | vmx_vmcall(hv_input);
196 |
197 | _mm_lfence();
198 | end = __readmsr(IA32_MPERF);
199 | _mm_lfence();
200 |
201 | auto const vm_exit_overhead = (end - start);
202 |
203 | if (vm_exit_overhead < lowest_vm_exit_overhead) {
204 | lowest_vm_exit_overhead = vm_exit_overhead;
205 | }
206 | if (timing_overhead < lowest_timing_overhead) {
207 | lowest_timing_overhead = timing_overhead;
208 | }
209 |
210 | }
211 | lowest = (lowest_vm_exit_overhead - lowest_timing_overhead);
212 | _enable();
213 | return lowest;
214 | }
215 |
216 | } // namespace hv
217 |
218 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/timing.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | struct vcpu;
8 |
9 | // try to hide the vm-exit overhead from being detected through timings
10 | void hide_vm_exit_overhead(vcpu* cpu);
11 |
12 | // measure the overhead of a vm-exit (RDTSC)
13 | uint64_t measure_vm_exit_tsc_overhead();
14 |
15 | // measure the overhead of a vm-exit (CPU_CLK_UNHALTED.REF_TSC)
16 | uint64_t measure_vm_exit_ref_tsc_overhead();
17 |
18 | // measure the overhead of a vm-exit (IA32_MPERF)
19 | uint64_t measure_vm_exit_mperf_overhead();
20 |
21 | } // namespace hv
22 |
23 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/trap-frame.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "ia32.hpp"
4 |
5 | namespace hv {
6 |
7 | struct trap_frame {
8 | // TODO: SSE registers...
9 |
10 | // general-purpose registers
11 | union {
12 | uint64_t rax;
13 | uint32_t eax;
14 | uint16_t ax;
15 | uint8_t al;
16 | };
17 | union {
18 | uint64_t rcx;
19 | uint32_t ecx;
20 | uint16_t cx;
21 | uint8_t cl;
22 | };
23 | union {
24 | uint64_t rdx;
25 | uint32_t edx;
26 | uint16_t dx;
27 | uint8_t dl;
28 | };
29 | union {
30 | uint64_t rbx;
31 | uint32_t ebx;
32 | uint16_t bx;
33 | uint8_t bl;
34 | };
35 | union {
36 | uint64_t rbp;
37 | uint32_t ebp;
38 | uint16_t bp;
39 | uint8_t bpl;
40 | };
41 | union {
42 | uint64_t rsi;
43 | uint32_t esi;
44 | uint16_t si;
45 | uint8_t sil;
46 | };
47 | union {
48 | uint64_t rdi;
49 | uint32_t edi;
50 | uint16_t di;
51 | uint8_t dil;
52 | };
53 | union {
54 | uint64_t r8;
55 | uint32_t r8d;
56 | uint16_t r8w;
57 | uint8_t r8b;
58 | };
59 | union {
60 | uint64_t r9;
61 | uint32_t r9d;
62 | uint16_t r9w;
63 | uint8_t r9b;
64 | };
65 | union {
66 | uint64_t r10;
67 | uint32_t r10d;
68 | uint16_t r10w;
69 | uint8_t r10b;
70 | };
71 | union {
72 | uint64_t r11;
73 | uint32_t r11d;
74 | uint16_t r11w;
75 | uint8_t r11b;
76 | };
77 | union {
78 | uint64_t r12;
79 | uint32_t r12d;
80 | uint16_t r12w;
81 | uint8_t r12b;
82 | };
83 | union {
84 | uint64_t r13;
85 | uint32_t r13d;
86 | uint16_t r13w;
87 | uint8_t r13b;
88 | };
89 | union {
90 | uint64_t r14;
91 | uint32_t r14d;
92 | uint16_t r14w;
93 | uint8_t r14b;
94 | };
95 | union {
96 | uint64_t r15;
97 | uint32_t r15d;
98 | uint16_t r15w;
99 | uint8_t r15b;
100 | };
101 |
102 | // interrupt vector
103 | uint8_t vector;
104 |
105 | // _MACHINE_FRAME
106 | uint64_t error;
107 | uint64_t rip;
108 | uint64_t cs;
109 | uint64_t rflags;
110 | uint64_t rsp;
111 | uint64_t ss;
112 | };
113 |
114 | // remember to update this value in interrupt-handlers.asm
115 | static_assert(sizeof(trap_frame) == (0x78 + 0x38));
116 |
117 | } // namespace hv
118 |
119 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vcpu.cpp:
--------------------------------------------------------------------------------
1 | #include "vcpu.h"
2 | #include "hv.h"
3 | #include "gdt.h"
4 | #include "idt.h"
5 | #include "vmx.h"
6 | #include "vmcs.h"
7 | #include "timing.h"
8 | #include "trap-frame.h"
9 | #include "exit-handlers.h"
10 | #include "exception-routines.h"
11 | #include "introspection.h"
12 |
13 | namespace hv {
14 |
15 | // defined in vm-launch.asm
16 | bool vm_launch();
17 |
18 | // cache certain fixed values (CPUID results, MSRs, etc) that are used
19 | // frequently during VMX operation (to speed up vm-exit handling).
20 | static void cache_cpu_data(vcpu_cached_data& cached) {
21 | __cpuid(reinterpret_cast(&cached.cpuid_01), 0x01);
22 |
23 | // VMX needs to be enabled to read from certain VMX_* MSRS
24 | if (!cached.cpuid_01.cpuid_feature_information_ecx.virtual_machine_extensions)
25 | return;
26 |
27 | cpuid_eax_80000008 cpuid_80000008;
28 | __cpuid(reinterpret_cast(&cpuid_80000008), 0x80000008);
29 |
30 | cached.max_phys_addr = cpuid_80000008.eax.number_of_physical_address_bits;
31 |
32 | cached.vmx_cr0_fixed0 = __readmsr(IA32_VMX_CR0_FIXED0);
33 | cached.vmx_cr0_fixed1 = __readmsr(IA32_VMX_CR0_FIXED1);
34 | cached.vmx_cr4_fixed0 = __readmsr(IA32_VMX_CR4_FIXED0);
35 | cached.vmx_cr4_fixed1 = __readmsr(IA32_VMX_CR4_FIXED1);
36 |
37 | cpuid_eax_0d_ecx_00 cpuid_0d;
38 | __cpuidex(reinterpret_cast(&cpuid_0d), 0x0D, 0x00);
39 |
40 | // features in XCR0 that are supported
41 | cached.xcr0_unsupported_mask = ~((static_cast(
42 | cpuid_0d.edx.flags) << 32) | cpuid_0d.eax.flags);
43 |
44 | cached.feature_control.flags = __readmsr(IA32_FEATURE_CONTROL);
45 | cached.vmx_misc.flags = __readmsr(IA32_VMX_MISC);
46 |
47 | // create a fake guest FEATURE_CONTROL MSR that has VMX and SMX disabled
48 | cached.guest_feature_control = cached.feature_control;
49 | cached.guest_feature_control.lock_bit = 1;
50 | cached.guest_feature_control.enable_vmx_inside_smx = 0;
51 | cached.guest_feature_control.enable_vmx_outside_smx = 0;
52 | cached.guest_feature_control.senter_local_function_enables = 0;
53 | cached.guest_feature_control.senter_global_enable = 0;
54 | }
55 |
56 | // enable VMX operation prior to execution of the VMXON instruction
57 | static bool enable_vmx_operation(vcpu const* const cpu) {
58 | // 3.23.6
59 | if (!cpu->cached.cpuid_01.cpuid_feature_information_ecx.virtual_machine_extensions) {
60 | DbgPrint("[hv] VMX not supported by CPUID.\n");
61 | return false;
62 | }
63 |
64 | // 3.23.7
65 | if (!cpu->cached.feature_control.lock_bit ||
66 | !cpu->cached.feature_control.enable_vmx_outside_smx) {
67 | DbgPrint("[hv] VMX not enabled outside SMX.\n");
68 | return false;
69 | }
70 |
71 | _disable();
72 |
73 | auto cr0 = __readcr0();
74 | auto cr4 = __readcr4();
75 |
76 | // 3.23.7
77 | cr4 |= CR4_VMX_ENABLE_FLAG;
78 |
79 | // 3.23.8
80 | cr0 |= cpu->cached.vmx_cr0_fixed0;
81 | cr0 &= cpu->cached.vmx_cr0_fixed1;
82 | cr4 |= cpu->cached.vmx_cr4_fixed0;
83 | cr4 &= cpu->cached.vmx_cr4_fixed1;
84 |
85 | __writecr0(cr0);
86 | __writecr4(cr4);
87 |
88 | _enable();
89 |
90 | return true;
91 | }
92 |
93 | // enter VMX operation by executing VMXON
94 | static bool enter_vmx_operation(vmxon& vmxon_region) {
95 | ia32_vmx_basic_register vmx_basic;
96 | vmx_basic.flags = __readmsr(IA32_VMX_BASIC);
97 |
98 | // 3.24.11.5
99 | vmxon_region.revision_id = vmx_basic.vmcs_revision_id;
100 | vmxon_region.must_be_zero = 0;
101 |
102 | auto vmxon_phys = MmGetPhysicalAddress(&vmxon_region).QuadPart;
103 | NT_ASSERT(vmxon_phys % 0x1000 == 0);
104 |
105 | // enter vmx operation
106 | if (!vmx_vmxon(vmxon_phys)) {
107 | DbgPrint("[hv] VMXON failed.\n");
108 | return false;
109 | }
110 |
111 | // 3.28.3.3.4
112 | vmx_invept(invept_all_context, {});
113 |
114 | return true;
115 | }
116 |
117 | // load the VMCS pointer by executing VMPTRLD
118 | static bool load_vmcs_pointer(vmcs& vmcs_region) {
119 | ia32_vmx_basic_register vmx_basic;
120 | vmx_basic.flags = __readmsr(IA32_VMX_BASIC);
121 |
122 | // 3.24.2
123 | vmcs_region.revision_id = vmx_basic.vmcs_revision_id;
124 | vmcs_region.shadow_vmcs_indicator = 0;
125 |
126 | auto vmcs_phys = MmGetPhysicalAddress(&vmcs_region).QuadPart;
127 | NT_ASSERT(vmcs_phys % 0x1000 == 0);
128 |
129 | if (!vmx_vmclear(vmcs_phys)) {
130 | DbgPrint("[hv] VMCLEAR failed.\n");
131 | return false;
132 | }
133 |
134 | if (!vmx_vmptrld(vmcs_phys)) {
135 | DbgPrint("[hv] VMPTRLD failed.\n");
136 | return false;
137 | }
138 |
139 | return true;
140 | }
141 |
142 | // enable vm-exits for MTRR MSR writes
143 | static void enable_mtrr_exiting(vcpu* const cpu) {
144 | ia32_mtrr_capabilities_register mtrr_cap;
145 | mtrr_cap.flags = __readmsr(IA32_MTRR_CAPABILITIES);
146 |
147 | enable_exit_for_msr_write(cpu->msr_bitmap, IA32_MTRR_DEF_TYPE, true);
148 |
149 | // enable exiting for fixed-range MTRRs
150 | if (mtrr_cap.fixed_range_supported) {
151 | enable_exit_for_msr_write(cpu->msr_bitmap, IA32_MTRR_FIX64K_00000, true);
152 | enable_exit_for_msr_write(cpu->msr_bitmap, IA32_MTRR_FIX16K_80000, true);
153 | enable_exit_for_msr_write(cpu->msr_bitmap, IA32_MTRR_FIX16K_A0000, true);
154 |
155 | for (uint32_t i = 0; i < 8; ++i)
156 | enable_exit_for_msr_write(cpu->msr_bitmap, IA32_MTRR_FIX4K_C0000 + i, true);
157 | }
158 |
159 | // enable exiting for variable-range MTRRs
160 | for (uint32_t i = 0; i < mtrr_cap.variable_range_count; ++i) {
161 | enable_exit_for_msr_write(cpu->msr_bitmap, IA32_MTRR_PHYSBASE0 + i * 2, true);
162 | enable_exit_for_msr_write(cpu->msr_bitmap, IA32_MTRR_PHYSMASK0 + i * 2, true);
163 | }
164 | }
165 |
166 | // initialize external structures that are not included in the VMCS
167 | static void prepare_external_structures(vcpu* const cpu) {
168 | memset(&cpu->msr_bitmap, 0, sizeof(cpu->msr_bitmap));
169 | enable_exit_for_msr_read(cpu->msr_bitmap, IA32_FEATURE_CONTROL, true);
170 |
171 | enable_mtrr_exiting(cpu);
172 |
173 | // we don't care about anything that's in the TSS
174 | memset(&cpu->host_tss, 0, sizeof(cpu->host_tss));
175 |
176 | prepare_host_idt(cpu->host_idt);
177 | prepare_host_gdt(cpu->host_gdt, &cpu->host_tss);
178 |
179 | prepare_ept(cpu->ept);
180 | }
181 |
182 | // call the appropriate exit-handler for this vm-exit
183 | static void dispatch_vm_exit(vcpu* const cpu, vmx_vmexit_reason const reason) {
184 | switch (reason.basic_exit_reason) {
185 | case VMX_EXIT_REASON_EXCEPTION_OR_NMI: handle_exception_or_nmi(cpu); break;
186 | case VMX_EXIT_REASON_EXECUTE_GETSEC: emulate_getsec(cpu); break;
187 | case VMX_EXIT_REASON_EXECUTE_INVD: emulate_invd(cpu); break;
188 | case VMX_EXIT_REASON_NMI_WINDOW: handle_nmi_window(cpu); break;
189 | case VMX_EXIT_REASON_EXECUTE_CPUID: emulate_cpuid(cpu); break;
190 | case VMX_EXIT_REASON_MOV_CR: handle_mov_cr(cpu); break;
191 | case VMX_EXIT_REASON_EXECUTE_RDMSR: emulate_rdmsr(cpu); break;
192 | case VMX_EXIT_REASON_EXECUTE_WRMSR: emulate_wrmsr(cpu); break;
193 | case VMX_EXIT_REASON_EXECUTE_XSETBV: emulate_xsetbv(cpu); break;
194 | case VMX_EXIT_REASON_EXECUTE_VMXON: emulate_vmxon(cpu); break;
195 | case VMX_EXIT_REASON_EXECUTE_VMCALL: emulate_vmcall(cpu); break;
196 | case VMX_EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED: handle_vmx_preemption(cpu); break;
197 | case VMX_EXIT_REASON_EPT_VIOLATION: handle_ept_violation(cpu); break;
198 | case VMX_EXIT_REASON_EXECUTE_RDTSC: emulate_rdtsc(cpu); break;
199 | case VMX_EXIT_REASON_EXECUTE_RDTSCP: emulate_rdtscp(cpu); break;
200 | // VMX instructions (except for VMXON and VMCALL)
201 | case VMX_EXIT_REASON_EXECUTE_INVEPT:
202 | case VMX_EXIT_REASON_EXECUTE_INVVPID:
203 | case VMX_EXIT_REASON_EXECUTE_VMCLEAR:
204 | case VMX_EXIT_REASON_EXECUTE_VMLAUNCH:
205 | case VMX_EXIT_REASON_EXECUTE_VMPTRLD:
206 | case VMX_EXIT_REASON_EXECUTE_VMPTRST:
207 | case VMX_EXIT_REASON_EXECUTE_VMREAD:
208 | case VMX_EXIT_REASON_EXECUTE_VMRESUME:
209 | case VMX_EXIT_REASON_EXECUTE_VMWRITE:
210 | case VMX_EXIT_REASON_EXECUTE_VMXOFF:
211 | case VMX_EXIT_REASON_EXECUTE_VMFUNC: handle_vmx_instruction(cpu); break;
212 |
213 | // unhandled VM-exit
214 | default:
215 | cpu->stop_virtualization = true;
216 | HV_LOG_ERROR("Unhandled VM-exit. Exit Reason: %u. RIP: %p.",
217 | reason.basic_exit_reason, vmx_vmread(VMCS_GUEST_RIP));
218 | break;
219 | }
220 | }
221 |
222 | // called for every vm-exit
223 | bool handle_vm_exit(guest_context* const ctx) {
224 | // get the current vcpu
225 | auto const cpu = reinterpret_cast(_readfsbase_u64());
226 | cpu->ctx = ctx;
227 |
228 | vmx_vmexit_reason reason;
229 | reason.flags = static_cast(vmx_vmread(VMCS_EXIT_REASON));
230 |
231 | // dont hide tsc overhead by default
232 | cpu->hide_vm_exit_overhead = false;
233 | cpu->stop_virtualization = false;
234 |
235 | dispatch_vm_exit(cpu, reason);
236 |
237 | vmentry_interrupt_information interrupt_info;
238 | interrupt_info.flags = static_cast(
239 | vmx_vmread(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD));
240 |
241 | if (interrupt_info.valid) {
242 | HV_LOG_VERBOSE("Injecting interrupt into guest. Vector=%i. Error=%i.",
243 | interrupt_info.vector, vmx_vmread(VMCS_CTRL_VMENTRY_EXCEPTION_ERROR_CODE));
244 | }
245 |
246 | // restore guest state. the assembly code is responsible for restoring
247 | // RIP, CS, RFLAGS, RSP, SS, CR0, CR4, as well as the usual fields in
248 | // the guest_context structure. the C++ code is responsible for the rest.
249 | if (cpu->stop_virtualization) {
250 | // TODO: assert that CPL is 0
251 |
252 | // ensure that the control register shadows reflect the guest values
253 | vmx_vmwrite(VMCS_CTRL_CR0_READ_SHADOW, read_effective_guest_cr0().flags);
254 | vmx_vmwrite(VMCS_CTRL_CR4_READ_SHADOW, read_effective_guest_cr4().flags);
255 |
256 | // DR7
257 | __writedr(7, vmx_vmread(VMCS_GUEST_DR7));
258 |
259 | // MSRs
260 | __writemsr(IA32_SYSENTER_CS, vmx_vmread(VMCS_GUEST_SYSENTER_CS));
261 | __writemsr(IA32_SYSENTER_ESP, vmx_vmread(VMCS_GUEST_SYSENTER_ESP));
262 | __writemsr(IA32_SYSENTER_EIP, vmx_vmread(VMCS_GUEST_SYSENTER_EIP));
263 | __writemsr(IA32_PAT, vmx_vmread(VMCS_GUEST_PAT));
264 | __writemsr(IA32_DEBUGCTL, vmx_vmread(VMCS_GUEST_DEBUGCTL));
265 | #ifdef WIN7
266 | #else
267 | __writemsr(IA32_PERF_GLOBAL_CTRL, cpu->msr_exit_store.perf_global_ctrl.msr_data);
268 | #endif
269 |
270 | // CR3
271 | __writecr3(vmx_vmread(VMCS_GUEST_CR3));
272 |
273 | // GDT
274 | segment_descriptor_register_64 gdtr;
275 | gdtr.base_address = vmx_vmread(VMCS_GUEST_GDTR_BASE);
276 | gdtr.limit = static_cast(vmx_vmread(VMCS_GUEST_GDTR_LIMIT));
277 | _lgdt(&gdtr);
278 |
279 | // IDT
280 | segment_descriptor_register_64 idtr;
281 | idtr.base_address = vmx_vmread(VMCS_GUEST_IDTR_BASE);
282 | idtr.limit = static_cast(vmx_vmread(VMCS_GUEST_IDTR_LIMIT));
283 | __lidt(&idtr);
284 |
285 | segment_selector guest_tr;
286 | guest_tr.flags = static_cast(vmx_vmread(VMCS_GUEST_TR_SELECTOR));
287 |
288 | // TSS
289 | (reinterpret_cast(gdtr.base_address)
290 | + guest_tr.index)->type = SEGMENT_DESCRIPTOR_TYPE_TSS_AVAILABLE;
291 | write_tr(guest_tr.flags);
292 |
293 | // segment selectors
294 | write_ds(static_cast(vmx_vmread(VMCS_GUEST_DS_SELECTOR)));
295 | write_es(static_cast(vmx_vmread(VMCS_GUEST_ES_SELECTOR)));
296 | write_fs(static_cast(vmx_vmread(VMCS_GUEST_FS_SELECTOR)));
297 | write_gs(static_cast(vmx_vmread(VMCS_GUEST_GS_SELECTOR)));
298 | write_ldtr(static_cast(vmx_vmread(VMCS_GUEST_LDTR_SELECTOR)));
299 |
300 | // FS and GS base address
301 | _writefsbase_u64(vmx_vmread(VMCS_GUEST_FS_BASE));
302 | _writegsbase_u64(vmx_vmread(VMCS_GUEST_GS_BASE));
303 |
304 | return true;
305 | }
306 |
307 | hide_vm_exit_overhead(cpu);
308 |
309 | // sync the vmcs state with the vcpu state
310 | vmx_vmwrite(VMCS_CTRL_TSC_OFFSET, cpu->tsc_offset);
311 | vmx_vmwrite(VMCS_GUEST_VMX_PREEMPTION_TIMER_VALUE, cpu->preemption_timer);
312 |
313 | cpu->ctx = nullptr;
314 |
315 | return false;
316 | }
317 |
318 | // called for every host interrupt
319 | void handle_host_interrupt(trap_frame* const frame) {
320 | switch (frame->vector) {
321 | // host NMIs
322 | case nmi: {
323 | auto ctrl = read_ctrl_proc_based();
324 | ctrl.nmi_window_exiting = 1;
325 | write_ctrl_proc_based(ctrl);
326 |
327 | auto const cpu = reinterpret_cast(_readfsbase_u64());
328 | ++cpu->queued_nmis;
329 |
330 | break;
331 | }
332 | // host exceptions
333 | default: {
334 | // no registered exception handler
335 | if (!frame->r10 || !frame->r11) {
336 | HV_LOG_ERROR("Unhandled exception. RIP=%p. Vector=%u.",
337 | frame->rip, frame->vector);
338 | break;
339 | }
340 |
341 | HV_LOG_VERBOSE("Handling host exception. RIP=%p. Vector=%u",
342 | frame->rip, frame->vector);
343 |
344 | // jump to the exception handler
345 | frame->rip = frame->r10;
346 |
347 | auto const e = reinterpret_cast(frame->r11);
348 |
349 | e->exception_occurred = true;
350 | e->vector = frame->vector;
351 | e->error = frame->error;
352 |
353 | // slightly helps prevent infinite exceptions
354 | frame->r10 = 0;
355 | frame->r11 = 0;
356 | }
357 | }
358 | }
359 |
360 | // virtualize the specified cpu. this assumes that execution is already
361 | // restricted to the desired logical proocessor.
362 | bool virtualize_cpu(vcpu* const cpu) {
363 | memset(cpu, 0, sizeof(*cpu));
364 |
365 | cache_cpu_data(cpu->cached);
366 |
367 | DbgPrint("[hv] Cached VCPU data.\n");
368 |
369 | if (!enable_vmx_operation(cpu)) {
370 | DbgPrint("[hv] Failed to enable VMX operation.\n");
371 | return false;
372 | }
373 |
374 | DbgPrint("[hv] Enabled VMX operation.\n");
375 |
376 | if (!enter_vmx_operation(cpu->vmxon)) {
377 | DbgPrint("[hv] Failed to enter VMX operation.\n");
378 | return false;
379 | }
380 |
381 | DbgPrint("[hv] Entered VMX operation.\n");
382 |
383 | if (!load_vmcs_pointer(cpu->vmcs)) {
384 | DbgPrint("[hv] Failed to load VMCS pointer.\n");
385 | vmx_vmxoff();
386 | return false;
387 | }
388 |
389 | DbgPrint("[hv] Loaded VMCS pointer.\n");
390 |
391 | prepare_external_structures(cpu);
392 |
393 | DbgPrint("[hv] Initialized external structures.\n");
394 |
395 | write_vmcs_ctrl_fields(cpu);
396 | write_vmcs_host_fields(cpu);
397 | write_vmcs_guest_fields();
398 |
399 | DbgPrint("[hv] Wrote VMCS fields.\n");
400 |
401 | // TODO: should these fields really be set here? lol
402 | cpu->ctx = nullptr;
403 | cpu->queued_nmis = 0;
404 | cpu->tsc_offset = 0;
405 | cpu->preemption_timer = 0;
406 | cpu->vm_exit_tsc_overhead = 0;
407 | cpu->vm_exit_mperf_overhead = 0;
408 | cpu->vm_exit_ref_tsc_overhead = 0;
409 |
410 | DbgPrint("Launching VM on VCPU#%i...\n", KeGetCurrentProcessorIndex() + 1);
411 |
412 | if (!vm_launch()) {
413 | DbgPrint("[hv] VMLAUNCH failed. Instruction error = %lli.\n",
414 | vmx_vmread(VMCS_VM_INSTRUCTION_ERROR));
415 |
416 | vmx_vmxoff();
417 | return false;
418 | }
419 |
420 | DbgPrint("[hv] Launched VM on VCPU#%i.\n", KeGetCurrentProcessorIndex() + 1);
421 |
422 | hypercall_input input;
423 | input.code = hypercall_ping;
424 | input.key = hypercall_key;
425 |
426 | if (vmx_vmcall(input) == hypervisor_signature)
427 | DbgPrint("[hv] Successfully pinged the hypervisor.\n");
428 |
429 | cpu->vm_exit_tsc_overhead = measure_vm_exit_tsc_overhead();
430 | cpu->vm_exit_mperf_overhead = measure_vm_exit_mperf_overhead();
431 | #ifdef WIN7
432 | #else
433 | cpu->vm_exit_ref_tsc_overhead = measure_vm_exit_ref_tsc_overhead();
434 | DbgPrint("[hv] Measured VM-exit overhead (TSC = %llX).\n",
435 | cpu->vm_exit_tsc_overhead);
436 | #endif
437 | DbgPrint("[hv] Measured VM-exit overhead (MPERF = %llX).\n",
438 | cpu->vm_exit_mperf_overhead);
439 | DbgPrint("[hv] Measured VM-exit overhead (CPU_CLK_UNHALTED.REF_TSC = %llX).\n",
440 | cpu->vm_exit_ref_tsc_overhead);
441 |
442 | return true;
443 | }
444 |
445 | } // namespace hv
446 |
447 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vcpu.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "guest-context.h"
4 | #include "page-tables.h"
5 | #include "gdt.h"
6 | #include "idt.h"
7 | #include "ept.h"
8 | #include "vmx.h"
9 | #include "timing.h"
10 |
11 | namespace hv {
12 |
13 | // size of the host stack for handling vm-exits
14 | inline constexpr size_t host_stack_size = 0x6000;
15 |
16 | // guest virtual-processor identifier
17 | inline constexpr uint16_t guest_vpid = 1;
18 |
19 | struct vcpu_cached_data {
20 | // maximum number of bits in a physical address (MAXPHYSADDR)
21 | uint64_t max_phys_addr;
22 |
23 | // reserved bits in CR0/CR4
24 | uint64_t vmx_cr0_fixed0;
25 | uint64_t vmx_cr0_fixed1;
26 | uint64_t vmx_cr4_fixed0;
27 | uint64_t vmx_cr4_fixed1;
28 |
29 | // mask of unsupported processor state components for XCR0
30 | uint64_t xcr0_unsupported_mask;
31 |
32 | // IA32_FEATURE_CONTROL
33 | ia32_feature_control_register feature_control;
34 | ia32_feature_control_register guest_feature_control;
35 |
36 | // IA32_VMX_MISC
37 | ia32_vmx_misc_register vmx_misc;
38 |
39 | // CPUID 0x01
40 | cpuid_eax_01 cpuid_01;
41 | };
42 |
43 | struct vcpu {
44 | // 4 KiB vmxon region
45 | alignas(0x1000) vmxon vmxon;
46 |
47 | // 4 KiB vmcs region
48 | alignas(0x1000) vmcs vmcs;
49 |
50 | // 4 KiB msr bitmap
51 | alignas(0x1000) vmx_msr_bitmap msr_bitmap;
52 |
53 | // host stack used for handling vm-exits
54 | alignas(0x1000) uint8_t host_stack[host_stack_size];
55 |
56 | // host interrupt descriptor table
57 | alignas(0x1000) segment_descriptor_interrupt_gate_64 host_idt[host_idt_descriptor_count];
58 |
59 | // host global descriptor table
60 | alignas(0x1000) segment_descriptor_32 host_gdt[host_gdt_descriptor_count];
61 |
62 | // host task state segment
63 | alignas(0x1000) task_state_segment_64 host_tss;
64 |
65 | // EPT paging structures
66 | alignas(0x1000) vcpu_ept_data ept;
67 |
68 | // vm-exit MSR store area
69 | struct alignas(0x10) {
70 | vmx_msr_entry tsc;
71 | vmx_msr_entry perf_global_ctrl;
72 | vmx_msr_entry aperf;
73 | vmx_msr_entry mperf;
74 | } msr_exit_store;
75 |
76 | // vm-entry MSR load area
77 | struct alignas(0x10) {
78 | vmx_msr_entry aperf;
79 | vmx_msr_entry mperf;
80 | } msr_entry_load;
81 |
82 | // cached values that are assumed to NEVER change
83 | vcpu_cached_data cached;
84 |
85 | // pointer to the current guest context, set in exit-handler
86 | guest_context* ctx;
87 |
88 | // the number of NMIs that need to be delivered
89 | uint32_t volatile queued_nmis;
90 |
91 | // current TSC offset
92 | uint64_t tsc_offset;
93 |
94 | // current preemption timer
95 | uint64_t preemption_timer;
96 |
97 | // the overhead caused by world-transitions
98 | uint64_t vm_exit_tsc_overhead;
99 | uint64_t vm_exit_mperf_overhead;
100 | uint64_t vm_exit_ref_tsc_overhead;
101 |
102 | // whether to use TSC offsetting for the current vm-exit--false by default
103 | bool hide_vm_exit_overhead;
104 |
105 | // whether to devirtualize the current VCPU
106 | bool stop_virtualization;
107 | };
108 |
109 | // virtualize the specified cpu. this assumes that execution is already
110 | // restricted to the desired logical proocessor.
111 | bool virtualize_cpu(vcpu* cpu);
112 |
113 | } // namespace hv
114 |
115 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vm-exit.asm:
--------------------------------------------------------------------------------
1 | .code
2 |
3 | ; defined in guest-context.h
4 | guest_context struct
5 | ; general-purpose registers
6 | $rax qword ?
7 | $rcx qword ?
8 | $rdx qword ?
9 | $rbx qword ?
10 | qword ? ; padding
11 | $rbp qword ?
12 | $rsi qword ?
13 | $rdi qword ?
14 | $r8 qword ?
15 | $r9 qword ?
16 | $r10 qword ?
17 | $r11 qword ?
18 | $r12 qword ?
19 | $r13 qword ?
20 | $r14 qword ?
21 | $r15 qword ?
22 |
23 | ; control registers
24 | $cr2 qword ?
25 | $cr8 qword ?
26 |
27 | ; debug registers
28 | $dr0 qword ?
29 | $dr1 qword ?
30 | $dr2 qword ?
31 | $dr3 qword ?
32 | $dr6 qword ?
33 |
34 | ; SSE registers
35 | $xmm0 oword ?
36 | $xmm1 oword ?
37 | $xmm2 oword ?
38 | $xmm3 oword ?
39 | $xmm4 oword ?
40 | $xmm5 oword ?
41 | $xmm6 oword ?
42 | $xmm7 oword ?
43 | $xmm8 oword ?
44 | $xmm9 oword ?
45 | $xmm10 oword ?
46 | $xmm11 oword ?
47 | $xmm12 oword ?
48 | $xmm13 oword ?
49 | $xmm14 oword ?
50 | $xmm15 oword ?
51 | guest_context ends
52 |
53 | extern ?handle_vm_exit@hv@@YA_NQEAUguest_context@1@@Z : proc
54 |
55 | ; execution starts here after a vm-exit
56 | ?vm_exit@hv@@YAXXZ proc
57 | ; allocate space on the stack to store the guest context
58 | sub rsp, 1C0h
59 |
60 | ; general-purpose registers
61 | mov guest_context.$rax[rsp], rax
62 | mov guest_context.$rcx[rsp], rcx
63 | mov guest_context.$rdx[rsp], rdx
64 | mov guest_context.$rbx[rsp], rbx
65 | mov guest_context.$rbp[rsp], rbp
66 | mov guest_context.$rsi[rsp], rsi
67 | mov guest_context.$rdi[rsp], rdi
68 | mov guest_context.$r8[rsp], r8
69 | mov guest_context.$r9[rsp], r9
70 | mov guest_context.$r10[rsp], r10
71 | mov guest_context.$r11[rsp], r11
72 | mov guest_context.$r12[rsp], r12
73 | mov guest_context.$r13[rsp], r13
74 | mov guest_context.$r14[rsp], r14
75 | mov guest_context.$r15[rsp], r15
76 |
77 | ; control registers
78 | mov rax, cr2
79 | mov guest_context.$cr2[rsp], rax
80 | mov rax, cr8
81 | mov guest_context.$cr8[rsp], rax
82 |
83 | ; debug registers
84 | mov rax, dr0
85 | mov guest_context.$dr0[rsp], rax
86 | mov rax, dr1
87 | mov guest_context.$dr1[rsp], rax
88 | mov rax, dr2
89 | mov guest_context.$dr2[rsp], rax
90 | mov rax, dr3
91 | mov guest_context.$dr3[rsp], rax
92 | mov rax, dr6
93 | mov guest_context.$dr6[rsp], rax
94 |
95 | ; SSE registers
96 | movaps guest_context.$xmm0[rsp], xmm0
97 | movaps guest_context.$xmm1[rsp], xmm1
98 | movaps guest_context.$xmm2[rsp], xmm2
99 | movaps guest_context.$xmm3[rsp], xmm3
100 | movaps guest_context.$xmm4[rsp], xmm4
101 | movaps guest_context.$xmm5[rsp], xmm5
102 | movaps guest_context.$xmm6[rsp], xmm6
103 | movaps guest_context.$xmm7[rsp], xmm7
104 | movaps guest_context.$xmm8[rsp], xmm8
105 | movaps guest_context.$xmm9[rsp], xmm9
106 | movaps guest_context.$xmm10[rsp], xmm10
107 | movaps guest_context.$xmm11[rsp], xmm11
108 | movaps guest_context.$xmm12[rsp], xmm12
109 | movaps guest_context.$xmm13[rsp], xmm13
110 | movaps guest_context.$xmm14[rsp], xmm14
111 | movaps guest_context.$xmm15[rsp], xmm15
112 |
113 | ; first argument is the guest context
114 | mov rcx, rsp
115 |
116 | ; call handle_vm_exit
117 | sub rsp, 28h
118 | call ?handle_vm_exit@hv@@YA_NQEAUguest_context@1@@Z
119 | add rsp, 28h
120 |
121 | ; SSE registers
122 | movaps xmm0, guest_context.$xmm0[rsp]
123 | movaps xmm1, guest_context.$xmm1[rsp]
124 | movaps xmm2, guest_context.$xmm2[rsp]
125 | movaps xmm3, guest_context.$xmm3[rsp]
126 | movaps xmm4, guest_context.$xmm4[rsp]
127 | movaps xmm5, guest_context.$xmm5[rsp]
128 | movaps xmm6, guest_context.$xmm6[rsp]
129 | movaps xmm7, guest_context.$xmm7[rsp]
130 | movaps xmm8, guest_context.$xmm8[rsp]
131 | movaps xmm9, guest_context.$xmm9[rsp]
132 | movaps xmm10, guest_context.$xmm10[rsp]
133 | movaps xmm11, guest_context.$xmm11[rsp]
134 | movaps xmm12, guest_context.$xmm12[rsp]
135 | movaps xmm13, guest_context.$xmm13[rsp]
136 | movaps xmm14, guest_context.$xmm14[rsp]
137 | movaps xmm15, guest_context.$xmm15[rsp]
138 |
139 | ; handle_vm_exit returns true if we should stop virtualization
140 | mov r15, rax
141 |
142 | ; debug registers
143 | mov rax, guest_context.$dr0[rsp]
144 | mov dr0, rax
145 | mov rax, guest_context.$dr1[rsp]
146 | mov dr1, rax
147 | mov rax, guest_context.$dr2[rsp]
148 | mov dr2, rax
149 | mov rax, guest_context.$dr3[rsp]
150 | mov dr3, rax
151 | mov rax, guest_context.$dr6[rsp]
152 | mov dr6, rax
153 |
154 | ; control registers
155 | mov rax, guest_context.$cr2[rsp]
156 | mov cr2, rax
157 | mov rax, guest_context.$cr8[rsp]
158 | mov cr8, rax
159 |
160 | ; general-purpose registers
161 | mov rax, guest_context.$rax[rsp]
162 | mov rcx, guest_context.$rcx[rsp]
163 | mov rdx, guest_context.$rdx[rsp]
164 | mov rbx, guest_context.$rbx[rsp]
165 | mov rbp, guest_context.$rbp[rsp]
166 | mov rsi, guest_context.$rsi[rsp]
167 | mov rdi, guest_context.$rdi[rsp]
168 | mov r8, guest_context.$r8[rsp]
169 | mov r9, guest_context.$r9[rsp]
170 | mov r10, guest_context.$r10[rsp]
171 | mov r11, guest_context.$r11[rsp]
172 | mov r12, guest_context.$r12[rsp]
173 | mov r13, guest_context.$r13[rsp]
174 | mov r14, guest_context.$r14[rsp]
175 |
176 | ; check the return value of handle_vm_exit() to see if we should terminate
177 | ; the virtual machine
178 | test r15b, r15b
179 | mov r15, guest_context.$r15[rsp]
180 | jnz stop_virtualization
181 |
182 | ; if handle_exit returned false, perform a vm-enter as usual
183 | vmresume
184 |
185 | stop_virtualization:
186 | ; we'll be dirtying these registers in order to setup the
187 | ; stack so we need to store and restore them before we can use them.
188 | ; also note that we're not allocating any stack space for the trap
189 | ; frame since we can just reuse the space allocated for the guest
190 | ; context.
191 | push rax
192 | push rdx
193 | push rbp
194 | lea rbp, [rsp + 38h]
195 |
196 | ; push SS
197 | mov rdx, 0804h; VMCS_GUEST_SS_SELECTOR
198 | vmread rax, rdx
199 | mov [rbp - 00h], rax
200 |
201 | ; push RSP
202 | mov rdx, 681Ch; VMCS_GUEST_RSP
203 | vmread rax, rdx
204 | mov [rbp - 08h], rax
205 |
206 | ; push RFLAGS
207 | mov rdx, 6820h; VMCS_GUEST_RFLAGS
208 | vmread rax, rdx
209 | mov [rbp - 10h], rax
210 |
211 | ; push CS
212 | mov rdx, 0802h; VMCS_GUEST_CS_SELECTOR
213 | vmread rax, rdx
214 | mov [rbp - 18h], rax
215 |
216 | ; push RIP
217 | mov rdx, 681Eh; VMCS_GUEST_RIP
218 | vmread rax, rdx
219 | mov [rbp - 20h], rax
220 |
221 | ; the C++ exit-handler needs to ensure that the control register shadows
222 | ; contain the current guest control register values (even the guest-owned
223 | ; bits!) before returning.
224 |
225 | ; store cr0 in rax
226 | mov rax, 6004h ; VMCS_CTRL_CR0_READ_SHADOW
227 | vmread rax, rax
228 |
229 | ; store cr4 in rdx
230 | mov rdx, 6006h ; VMCS_CTRL_CR4_READ_SHADOW
231 | vmread rdx, rdx
232 |
233 | ; execute vmxoff before we restore cr0 and cr4
234 | vmxoff
235 |
236 | ; restore cr0 and cr4
237 | mov cr0, rax
238 | mov cr4, rdx
239 |
240 | ; restore the dirty registers
241 | pop rbp
242 | pop rdx
243 | pop rax
244 |
245 | ; we use iretq in order to do the following all in one instruction:
246 | ;
247 | ; pop RIP
248 | ; pop CS
249 | ; pop RFLAGS
250 | ; pop RSP
251 | ; pop SS
252 | ;
253 | iretq
254 |
255 | ?vm_exit@hv@@YAXXZ endp
256 |
257 | end
258 |
259 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vm-launch.asm:
--------------------------------------------------------------------------------
1 | .code
2 |
3 | ; bool __vm_launch();
4 | ?vm_launch@hv@@YA_NXZ proc
5 | ; set VMCS_GUEST_RSP to the current value of RSP
6 | mov rax, 681Ch
7 | vmwrite rax, rsp
8 |
9 | ; set VMCS_GUEST_RIP to the address of
10 | mov rax, 681Eh
11 | mov rdx, successful_launch
12 | vmwrite rax, rdx
13 |
14 | vmlaunch
15 |
16 | ; if we reached here, then we failed to launch
17 | xor al, al
18 | ret
19 |
20 | successful_launch:
21 | mov al, 1
22 | ret
23 | ?vm_launch@hv@@YA_NXZ endp
24 |
25 | end
26 |
27 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vmcs.cpp:
--------------------------------------------------------------------------------
1 | #include "vmcs.h"
2 | #include "hv.h"
3 | #include "vmx.h"
4 | #include "vcpu.h"
5 | #include "segment.h"
6 | #include "timing.h"
7 |
8 | namespace hv {
9 |
10 | // defined in vm-exit.asm
11 | void vm_exit();
12 |
13 | // setup the VMCS control fields
14 | void write_vmcs_ctrl_fields(vcpu* const cpu) {
15 | // 3.26.2
16 |
17 | // 3.24.6.1
18 | ia32_vmx_pinbased_ctls_register pin_based_ctrl;
19 | pin_based_ctrl.flags = 0;
20 | pin_based_ctrl.virtual_nmi = 1;
21 | pin_based_ctrl.nmi_exiting = 1;
22 | //pin_based_ctrl.activate_vmx_preemption_timer = 1;
23 | write_ctrl_pin_based_safe(pin_based_ctrl);
24 |
25 | // 3.24.6.2
26 | ia32_vmx_procbased_ctls_register proc_based_ctrl;
27 | proc_based_ctrl.flags = 0;
28 | #ifndef NDEBUG
29 | proc_based_ctrl.cr3_load_exiting = 1;
30 | proc_based_ctrl.cr3_store_exiting = 1;
31 | #endif
32 | proc_based_ctrl.use_msr_bitmaps = 1;
33 | //proc_based_ctrl.use_tsc_offsetting = 1;
34 | //proc_based_ctrl.rdtsc_exiting = 1;
35 | proc_based_ctrl.activate_secondary_controls = 1;
36 | write_ctrl_proc_based_safe(proc_based_ctrl);
37 |
38 | // 3.24.6.2
39 | ia32_vmx_procbased_ctls2_register proc_based_ctrl2;
40 | proc_based_ctrl2.flags = 0;
41 | proc_based_ctrl2.enable_ept = 1;
42 | proc_based_ctrl2.enable_rdtscp = 1;
43 | proc_based_ctrl2.enable_vpid = 1;
44 | proc_based_ctrl2.enable_invpcid = 1;
45 | proc_based_ctrl2.enable_xsaves = 1;
46 | proc_based_ctrl2.enable_user_wait_pause = 1;
47 | proc_based_ctrl2.conceal_vmx_from_pt = 1;
48 | write_ctrl_proc_based2_safe(proc_based_ctrl2);
49 |
50 | // 3.24.7
51 | ia32_vmx_exit_ctls_register exit_ctrl;
52 | exit_ctrl.flags = 0;
53 | exit_ctrl.save_debug_controls = 1;
54 | exit_ctrl.host_address_space_size = 1;
55 | exit_ctrl.save_ia32_pat = 1;
56 | exit_ctrl.load_ia32_pat = 1;
57 | exit_ctrl.load_ia32_perf_global_ctrl = 1;
58 | exit_ctrl.conceal_vmx_from_pt = 1;
59 | write_ctrl_exit_safe(exit_ctrl);
60 |
61 | // 3.24.8
62 | ia32_vmx_entry_ctls_register entry_ctrl;
63 | entry_ctrl.flags = 0;
64 | entry_ctrl.load_debug_controls = 1;
65 | entry_ctrl.ia32e_mode_guest = 1;
66 | entry_ctrl.load_ia32_pat = 1;
67 | entry_ctrl.load_ia32_perf_global_ctrl = 1;
68 | entry_ctrl.conceal_vmx_from_pt = 1;
69 | write_ctrl_entry_safe(entry_ctrl);
70 |
71 | // 3.24.6.3
72 | vmx_vmwrite(VMCS_CTRL_EXCEPTION_BITMAP, 0);
73 |
74 | // set up the mask and match in such a way so
75 | // that a vm-exit is never triggered for a pagefault
76 | vmx_vmwrite(VMCS_CTRL_PAGEFAULT_ERROR_CODE_MASK, 0);
77 | vmx_vmwrite(VMCS_CTRL_PAGEFAULT_ERROR_CODE_MATCH, 0);
78 |
79 | // 3.24.6.5
80 | vmx_vmwrite(VMCS_CTRL_TSC_OFFSET, 0);
81 |
82 | // 3.24.6.6
83 | #ifdef NDEBUG
84 | // only vm-exit when guest tries to change a reserved bit
85 | vmx_vmwrite(VMCS_CTRL_CR0_GUEST_HOST_MASK,
86 | cpu->cached.vmx_cr0_fixed0 | ~cpu->cached.vmx_cr0_fixed1 |
87 | CR0_CACHE_DISABLE_FLAG | CR0_WRITE_PROTECT_FLAG);
88 | vmx_vmwrite(VMCS_CTRL_CR4_GUEST_HOST_MASK,
89 | cpu->cached.vmx_cr4_fixed0 | ~cpu->cached.vmx_cr4_fixed1);
90 | #else
91 | // vm-exit on every CR0/CR4 modification
92 | vmx_vmwrite(VMCS_CTRL_CR0_GUEST_HOST_MASK, 0xFFFFFFFF'FFFFFFFF);
93 | vmx_vmwrite(VMCS_CTRL_CR4_GUEST_HOST_MASK, 0xFFFFFFFF'FFFFFFFF);
94 | #endif
95 | vmx_vmwrite(VMCS_CTRL_CR0_READ_SHADOW, __readcr0());
96 | vmx_vmwrite(VMCS_CTRL_CR4_READ_SHADOW, __readcr4() & ~CR4_VMX_ENABLE_FLAG);
97 |
98 | // 3.24.6.7
99 | // try to trigger the least amount of CR3 exits as possible
100 | vmx_vmwrite(VMCS_CTRL_CR3_TARGET_COUNT, 1);
101 | vmx_vmwrite(VMCS_CTRL_CR3_TARGET_VALUE_0, ghv.system_cr3.flags);
102 |
103 | // 3.24.6.9
104 | vmx_vmwrite(VMCS_CTRL_MSR_BITMAP_ADDRESS, MmGetPhysicalAddress(&cpu->msr_bitmap).QuadPart);
105 |
106 | // 3.24.6.11
107 | ept_pointer eptp;
108 | eptp.flags = 0;
109 | eptp.memory_type = MEMORY_TYPE_WRITE_BACK;
110 | eptp.page_walk_length = 3;
111 | eptp.enable_access_and_dirty_flags = 0;
112 | eptp.enable_supervisor_shadow_stack_pages = 0;
113 | eptp.page_frame_number = MmGetPhysicalAddress(&cpu->ept.pml4).QuadPart >> 12;
114 | vmx_vmwrite(VMCS_CTRL_EPT_POINTER, eptp.flags);
115 |
116 | // 3.24.6.12
117 | vmx_vmwrite(VMCS_CTRL_VIRTUAL_PROCESSOR_IDENTIFIER, guest_vpid);
118 |
119 | // 3.24.7.2
120 | cpu->msr_exit_store.tsc.msr_idx = IA32_TIME_STAMP_COUNTER;
121 | #ifdef WIN7
122 | #else
123 | cpu->msr_exit_store.perf_global_ctrl.msr_idx = IA32_PERF_GLOBAL_CTRL;
124 | #endif
125 | cpu->msr_exit_store.aperf.msr_idx = IA32_APERF;
126 | cpu->msr_exit_store.mperf.msr_idx = IA32_MPERF;
127 | vmx_vmwrite(VMCS_CTRL_VMEXIT_MSR_STORE_COUNT,
128 | sizeof(cpu->msr_exit_store) / 16);
129 | vmx_vmwrite(VMCS_CTRL_VMEXIT_MSR_STORE_ADDRESS,
130 | MmGetPhysicalAddress(&cpu->msr_exit_store).QuadPart);
131 |
132 | // 3.24.7.2
133 | vmx_vmwrite(VMCS_CTRL_VMEXIT_MSR_LOAD_COUNT, 0);
134 | vmx_vmwrite(VMCS_CTRL_VMEXIT_MSR_LOAD_ADDRESS, 0);
135 |
136 | // 3.24.8.2
137 | cpu->msr_entry_load.aperf.msr_idx = IA32_APERF;
138 | cpu->msr_entry_load.mperf.msr_idx = IA32_MPERF;
139 | cpu->msr_entry_load.aperf.msr_data = __readmsr(IA32_APERF);
140 | cpu->msr_entry_load.mperf.msr_data = __readmsr(IA32_MPERF);
141 | vmx_vmwrite(VMCS_CTRL_VMENTRY_MSR_LOAD_COUNT,
142 | sizeof(cpu->msr_entry_load) / 16);
143 | vmx_vmwrite(VMCS_CTRL_VMENTRY_MSR_LOAD_ADDRESS,
144 | MmGetPhysicalAddress(&cpu->msr_entry_load).QuadPart);
145 |
146 | // 3.24.8.3
147 | vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, 0);
148 | vmx_vmwrite(VMCS_CTRL_VMENTRY_EXCEPTION_ERROR_CODE, 0);
149 | vmx_vmwrite(VMCS_CTRL_VMENTRY_INSTRUCTION_LENGTH, 0);
150 | }
151 |
152 | // setup the VMCS host fields
153 | void write_vmcs_host_fields(vcpu const* const cpu) {
154 | // 3.24.5
155 | // 3.26.2
156 |
157 | cr3 host_cr3;
158 | host_cr3.flags = 0;
159 | host_cr3.page_level_cache_disable = 0;
160 | host_cr3.page_level_write_through = 0;
161 | host_cr3.address_of_page_directory =
162 | MmGetPhysicalAddress(&ghv.host_page_tables.pml4).QuadPart >> 12;
163 | vmx_vmwrite(VMCS_HOST_CR3, host_cr3.flags);
164 |
165 | cr4 host_cr4;
166 | host_cr4.flags = __readcr4();
167 |
168 | // these are flags that may or may not be set by Windows
169 | host_cr4.fsgsbase_enable = 1;
170 | host_cr4.os_xsave = 1;
171 | host_cr4.smap_enable = 0;
172 | host_cr4.smep_enable = 0;
173 |
174 | vmx_vmwrite(VMCS_HOST_CR0, __readcr0());
175 | vmx_vmwrite(VMCS_HOST_CR4, host_cr4.flags);
176 |
177 | // ensure that rsp is NOT aligned to 16 bytes when execution starts
178 | auto const rsp = ((reinterpret_cast(cpu->host_stack)
179 | + host_stack_size) & ~0b1111ull) - 8;
180 |
181 | vmx_vmwrite(VMCS_HOST_RSP, rsp);
182 | vmx_vmwrite(VMCS_HOST_RIP, reinterpret_cast(vm_exit));
183 |
184 | vmx_vmwrite(VMCS_HOST_CS_SELECTOR, host_cs_selector.flags);
185 | vmx_vmwrite(VMCS_HOST_SS_SELECTOR, 0x00);
186 | vmx_vmwrite(VMCS_HOST_DS_SELECTOR, 0x00);
187 | vmx_vmwrite(VMCS_HOST_ES_SELECTOR, 0x00);
188 | vmx_vmwrite(VMCS_HOST_FS_SELECTOR, 0x00);
189 | vmx_vmwrite(VMCS_HOST_GS_SELECTOR, 0x00);
190 | vmx_vmwrite(VMCS_HOST_TR_SELECTOR, host_tr_selector.flags);
191 |
192 | vmx_vmwrite(VMCS_HOST_FS_BASE, reinterpret_cast(cpu));
193 | vmx_vmwrite(VMCS_HOST_GS_BASE, 0);
194 | vmx_vmwrite(VMCS_HOST_TR_BASE, reinterpret_cast(&cpu->host_tss));
195 | vmx_vmwrite(VMCS_HOST_GDTR_BASE, reinterpret_cast(&cpu->host_gdt));
196 | vmx_vmwrite(VMCS_HOST_IDTR_BASE, reinterpret_cast(&cpu->host_idt));
197 |
198 | vmx_vmwrite(VMCS_HOST_SYSENTER_CS, 0);
199 | vmx_vmwrite(VMCS_HOST_SYSENTER_ESP, 0);
200 | vmx_vmwrite(VMCS_HOST_SYSENTER_EIP, 0);
201 |
202 | // 3.11.12.4
203 | // configure PAT as if it wasn't supported (i.e. default settings after a reset)
204 | ia32_pat_register host_pat;
205 | host_pat.flags = 0;
206 | host_pat.pa0 = MEMORY_TYPE_WRITE_BACK;
207 | host_pat.pa1 = MEMORY_TYPE_WRITE_THROUGH;
208 | host_pat.pa2 = MEMORY_TYPE_UNCACHEABLE_MINUS;
209 | host_pat.pa3 = MEMORY_TYPE_UNCACHEABLE;
210 | host_pat.pa4 = MEMORY_TYPE_WRITE_BACK;
211 | host_pat.pa5 = MEMORY_TYPE_WRITE_THROUGH;
212 | host_pat.pa6 = MEMORY_TYPE_UNCACHEABLE_MINUS;
213 | host_pat.pa7 = MEMORY_TYPE_UNCACHEABLE;
214 | vmx_vmwrite(VMCS_HOST_PAT, host_pat.flags);
215 |
216 | // disable every PMC
217 | vmx_vmwrite(VMCS_HOST_PERF_GLOBAL_CTRL, 0);
218 | }
219 |
220 | // setup the guest state in the VMCS so that it mirrors the currently running system
221 | void write_vmcs_guest_fields() {
222 | // 3.24.4
223 | // 3.26.3
224 |
225 | vmx_vmwrite(VMCS_GUEST_CR3, __readcr3());
226 |
227 | vmx_vmwrite(VMCS_GUEST_CR0, __readcr0());
228 | vmx_vmwrite(VMCS_GUEST_CR4, __readcr4());
229 |
230 | vmx_vmwrite(VMCS_GUEST_DR7, __readdr(7));
231 |
232 | // RIP and RSP are set in vm-launch.asm
233 | vmx_vmwrite(VMCS_GUEST_RSP, 0);
234 | vmx_vmwrite(VMCS_GUEST_RIP, 0);
235 |
236 | vmx_vmwrite(VMCS_GUEST_RFLAGS, __readeflags());
237 |
238 | vmx_vmwrite(VMCS_GUEST_CS_SELECTOR, read_cs().flags);
239 | vmx_vmwrite(VMCS_GUEST_SS_SELECTOR, read_ss().flags);
240 | vmx_vmwrite(VMCS_GUEST_DS_SELECTOR, read_ds().flags);
241 | vmx_vmwrite(VMCS_GUEST_ES_SELECTOR, read_es().flags);
242 | vmx_vmwrite(VMCS_GUEST_FS_SELECTOR, read_fs().flags);
243 | vmx_vmwrite(VMCS_GUEST_GS_SELECTOR, read_gs().flags);
244 | vmx_vmwrite(VMCS_GUEST_TR_SELECTOR, read_tr().flags);
245 | vmx_vmwrite(VMCS_GUEST_LDTR_SELECTOR, read_ldtr().flags);
246 |
247 | segment_descriptor_register_64 gdtr, idtr;
248 | _sgdt(&gdtr);
249 | __sidt(&idtr);
250 |
251 | vmx_vmwrite(VMCS_GUEST_CS_BASE, segment_base(gdtr, read_cs()));
252 | vmx_vmwrite(VMCS_GUEST_SS_BASE, segment_base(gdtr, read_ss()));
253 | vmx_vmwrite(VMCS_GUEST_DS_BASE, segment_base(gdtr, read_ds()));
254 | vmx_vmwrite(VMCS_GUEST_ES_BASE, segment_base(gdtr, read_es()));
255 | vmx_vmwrite(VMCS_GUEST_FS_BASE, __readmsr(IA32_FS_BASE));
256 | vmx_vmwrite(VMCS_GUEST_GS_BASE, __readmsr(IA32_GS_BASE));
257 | vmx_vmwrite(VMCS_GUEST_TR_BASE, segment_base(gdtr, read_tr()));
258 | vmx_vmwrite(VMCS_GUEST_LDTR_BASE, segment_base(gdtr, read_ldtr()));
259 |
260 | vmx_vmwrite(VMCS_GUEST_CS_LIMIT, __segmentlimit(read_cs().flags));
261 | vmx_vmwrite(VMCS_GUEST_SS_LIMIT, __segmentlimit(read_ss().flags));
262 | vmx_vmwrite(VMCS_GUEST_DS_LIMIT, __segmentlimit(read_ds().flags));
263 | vmx_vmwrite(VMCS_GUEST_ES_LIMIT, __segmentlimit(read_es().flags));
264 | vmx_vmwrite(VMCS_GUEST_FS_LIMIT, __segmentlimit(read_fs().flags));
265 | vmx_vmwrite(VMCS_GUEST_GS_LIMIT, __segmentlimit(read_gs().flags));
266 | vmx_vmwrite(VMCS_GUEST_TR_LIMIT, __segmentlimit(read_tr().flags));
267 | vmx_vmwrite(VMCS_GUEST_LDTR_LIMIT, __segmentlimit(read_ldtr().flags));
268 |
269 | vmx_vmwrite(VMCS_GUEST_CS_ACCESS_RIGHTS, segment_access(gdtr, read_cs()).flags);
270 | vmx_vmwrite(VMCS_GUEST_SS_ACCESS_RIGHTS, segment_access(gdtr, read_ss()).flags);
271 | vmx_vmwrite(VMCS_GUEST_DS_ACCESS_RIGHTS, segment_access(gdtr, read_ds()).flags);
272 | vmx_vmwrite(VMCS_GUEST_ES_ACCESS_RIGHTS, segment_access(gdtr, read_es()).flags);
273 | vmx_vmwrite(VMCS_GUEST_FS_ACCESS_RIGHTS, segment_access(gdtr, read_fs()).flags);
274 | vmx_vmwrite(VMCS_GUEST_GS_ACCESS_RIGHTS, segment_access(gdtr, read_gs()).flags);
275 | vmx_vmwrite(VMCS_GUEST_TR_ACCESS_RIGHTS, segment_access(gdtr, read_tr()).flags);
276 | vmx_vmwrite(VMCS_GUEST_LDTR_ACCESS_RIGHTS, segment_access(gdtr, read_ldtr()).flags);
277 |
278 | vmx_vmwrite(VMCS_GUEST_GDTR_BASE, gdtr.base_address);
279 | vmx_vmwrite(VMCS_GUEST_IDTR_BASE, idtr.base_address);
280 |
281 | vmx_vmwrite(VMCS_GUEST_GDTR_LIMIT, gdtr.limit);
282 | vmx_vmwrite(VMCS_GUEST_IDTR_LIMIT, idtr.limit);
283 |
284 | vmx_vmwrite(VMCS_GUEST_SYSENTER_CS, __readmsr(IA32_SYSENTER_CS));
285 | vmx_vmwrite(VMCS_GUEST_SYSENTER_ESP, __readmsr(IA32_SYSENTER_ESP));
286 | vmx_vmwrite(VMCS_GUEST_SYSENTER_EIP, __readmsr(IA32_SYSENTER_EIP));
287 | vmx_vmwrite(VMCS_GUEST_DEBUGCTL, __readmsr(IA32_DEBUGCTL));
288 | vmx_vmwrite(VMCS_GUEST_PAT, __readmsr(IA32_PAT));
289 |
290 | #ifdef WIN7
291 | #else
292 | vmx_vmwrite(VMCS_GUEST_PERF_GLOBAL_CTRL, __readmsr(IA32_PERF_GLOBAL_CTRL));
293 | #endif
294 |
295 |
296 | vmx_vmwrite(VMCS_GUEST_ACTIVITY_STATE, vmx_active);
297 |
298 | vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY_STATE, 0);
299 |
300 | vmx_vmwrite(VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
301 |
302 | vmx_vmwrite(VMCS_GUEST_VMCS_LINK_POINTER, MAXULONG64);
303 |
304 | vmx_vmwrite(VMCS_GUEST_VMX_PREEMPTION_TIMER_VALUE, MAXULONG64);
305 | }
306 |
307 | } // namespace hv
308 |
309 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vmcs.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | namespace hv {
4 |
5 | struct vcpu;
6 |
7 | // setup the VMCS control fields
8 | void write_vmcs_ctrl_fields(vcpu* cpu);
9 |
10 | // setup the VMCS host fields
11 | void write_vmcs_host_fields(vcpu const* cpu);
12 |
13 | // setup the guest state in the VMCS so that it mirrors the currently running system
14 | void write_vmcs_guest_fields();
15 |
16 | } // namespace hv
17 |
18 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vmx.asm:
--------------------------------------------------------------------------------
1 | .code
2 |
3 | ?vmx_invept@hv@@YAXW4invept_type@@AEBUinvept_descriptor@@@Z proc
4 | invept rcx, oword ptr [rdx]
5 | ret
6 | ?vmx_invept@hv@@YAXW4invept_type@@AEBUinvept_descriptor@@@Z endp
7 |
8 | ?vmx_invvpid@hv@@YAXW4invvpid_type@@AEBUinvvpid_descriptor@@@Z proc
9 | invvpid rcx, oword ptr [rdx]
10 | ret
11 | ?vmx_invvpid@hv@@YAXW4invvpid_type@@AEBUinvvpid_descriptor@@@Z endp
12 |
13 | ?vmx_vmcall@hv@@YA_KAEAUhypercall_input@1@@Z proc
14 | ; move input into registers
15 | mov rax, [rcx] ; code
16 | mov rdx, [rcx + 10h] ; args[1]
17 | mov r8, [rcx + 18h] ; args[2]
18 | mov r9, [rcx + 20h] ; args[3]
19 | mov r10, [rcx + 28h] ; args[4]
20 | mov r11, [rcx + 30h] ; args[5]
21 | mov rcx, [rcx + 08h] ; args[0]
22 |
23 | vmcall
24 |
25 | ret
26 | ?vmx_vmcall@hv@@YA_KAEAUhypercall_input@1@@Z endp
27 |
28 | end
29 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vmx.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "arch.h"
4 | #include "guest-context.h"
5 | #include "hypercalls.h"
6 |
7 | namespace hv {
8 |
9 | // TODO: move to ia32?
10 | struct vmx_msr_entry {
11 | uint32_t msr_idx;
12 | uint32_t _reserved;
13 | uint64_t msr_data;
14 | };
15 |
16 | // INVEPT instruction
17 | void vmx_invept(invept_type type, invept_descriptor const& desc);
18 |
19 | // INVVPID instruction
20 | void vmx_invvpid(invvpid_type type, invvpid_descriptor const& desc);
21 |
22 | // VMCALL instruction
23 | uint64_t vmx_vmcall(hypercall_input& input);
24 |
25 | // VMXON instruction
26 | bool vmx_vmxon(uint64_t vmxon_phys_addr);
27 |
28 | // VMXOFF instruction
29 | void vmx_vmxoff();
30 |
31 | // VMCLEAR instruction
32 | bool vmx_vmclear(uint64_t vmcs_phys_addr);
33 |
34 | // VMPTRLD instruction
35 | bool vmx_vmptrld(uint64_t vmcs_phys_addr);
36 |
37 | // VMWRITE instruction
38 | void vmx_vmwrite(uint64_t field, uint64_t value);
39 |
40 | // VMREAD instruction
41 | uint64_t vmx_vmread(uint64_t field);
42 |
43 | // write to the guest interruptibility state
44 | void write_interruptibility_state(vmx_interruptibility_state value);
45 |
46 | // read the guest interruptibility state
47 | vmx_interruptibility_state read_interruptibility_state();
48 |
49 | // write to a guest general-purpose register
50 | void write_guest_gpr(guest_context* ctx, uint64_t gpr_idx, uint64_t value);
51 |
52 | // read a guest general-purpose register
53 | uint64_t read_guest_gpr(guest_context const* ctx, uint64_t gpr_idx);
54 |
55 | // get the value of CR0 that the guest believes is active.
56 | // this is a mixture of the guest CR0 and the CR0 read shadow.
57 | cr0 read_effective_guest_cr0();
58 |
59 | // get the value of CR4 that the guest believes is active.
60 | // this is a mixture of the guest CR4 and the CR4 read shadow.
61 | cr4 read_effective_guest_cr4();
62 |
63 | // write to the pin-based vm-execution controls
64 | void write_ctrl_pin_based_safe(ia32_vmx_pinbased_ctls_register value);
65 |
66 | // write to the processor-based vm-execution controls
67 | void write_ctrl_proc_based_safe(ia32_vmx_procbased_ctls_register value);
68 |
69 | // write to the secondary processor-based vm-execution controls
70 | void write_ctrl_proc_based2_safe(ia32_vmx_procbased_ctls2_register value);
71 |
72 | // write to the vm-exit controls
73 | void write_ctrl_exit_safe(ia32_vmx_exit_ctls_register value);
74 |
75 | // write to the vm-entry controls
76 | void write_ctrl_entry_safe(ia32_vmx_entry_ctls_register value);
77 |
78 | // write to the pin-based vm-execution controls
79 | void write_ctrl_pin_based(ia32_vmx_pinbased_ctls_register value);
80 |
81 | // write to the processor-based vm-execution controls
82 | void write_ctrl_proc_based(ia32_vmx_procbased_ctls_register value);
83 |
84 | // write to the secondary processor-based vm-execution controls
85 | void write_ctrl_proc_based2(ia32_vmx_procbased_ctls2_register value);
86 |
87 | // write to the vm-exit controls
88 | void write_ctrl_exit(ia32_vmx_exit_ctls_register value);
89 |
90 | // write to the vm-entry controls
91 | void write_ctrl_entry(ia32_vmx_entry_ctls_register value);
92 |
93 | // read the pin-based vm-execution controls
94 | ia32_vmx_pinbased_ctls_register read_ctrl_pin_based();
95 |
96 | // read the processor-based vm-execution controls
97 | ia32_vmx_procbased_ctls_register read_ctrl_proc_based();
98 |
99 | // read the secondary processor-based vm-execution controls
100 | ia32_vmx_procbased_ctls2_register read_ctrl_proc_based2();
101 |
102 | // read the vm-exit controls
103 | ia32_vmx_exit_ctls_register read_ctrl_exit();
104 |
105 | // read the vm-entry controls
106 | ia32_vmx_entry_ctls_register read_ctrl_entry();
107 |
108 | // get the CPL (current privilege level) of the current guest
109 | uint16_t current_guest_cpl();
110 |
111 | // increment the instruction pointer after emulating an instruction
112 | void skip_instruction();
113 |
114 | // inject a non-maskable interrupt into the guest
115 | void inject_nmi();
116 |
117 | // inject a vectored exception into the guest
118 | void inject_hw_exception(uint32_t vector);
119 |
120 | // inject a vectored exception into the guest (with an error code)
121 | void inject_hw_exception(uint32_t vector, uint32_t error);
122 |
123 | // enable/disable vm-exits when the guest tries to read the specified MSR
124 | void enable_exit_for_msr_read(vmx_msr_bitmap& bitmap, uint32_t msr, bool enable_exiting);
125 |
126 | // enable/disable vm-exits when the guest tries to write to the specified MSR
127 | void enable_exit_for_msr_write(vmx_msr_bitmap& bitmap, uint32_t msr, bool enable_exiting);
128 |
129 | } // namespace hv
130 |
131 | #include "vmx.inl"
132 |
133 |
--------------------------------------------------------------------------------
/ReloadDbg/hv/hv/vmx.inl:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | namespace hv {
4 |
5 | namespace impl {
6 |
7 | // helper function that adjusts vmcs control
8 | // fields according to their capability
9 | inline void write_vmcs_ctrl_field(size_t value,
10 | unsigned long const ctrl_field,
11 | unsigned long const cap_msr,
12 | unsigned long const true_cap_msr) {
13 | ia32_vmx_basic_register vmx_basic;
14 | vmx_basic.flags = __readmsr(IA32_VMX_BASIC);
15 |
16 | // read the "true" capability msr if it is supported
17 | auto const cap = __readmsr(vmx_basic.vmx_controls ? true_cap_msr : cap_msr);
18 |
19 | // adjust the control according to the capability msr
20 | value &= cap >> 32;
21 | value |= cap & 0xFFFFFFFF;
22 |
23 | // write to the vmcs field
24 | vmx_vmwrite(ctrl_field, value);
25 | }
26 |
27 | } // namespace impl
28 |
29 | // VMXON instruction
30 | inline bool vmx_vmxon(uint64_t vmxon_phys_addr) {
31 | return __vmx_on(&vmxon_phys_addr) == 0;
32 | }
33 |
34 | // VMXOFF instruction
35 | inline void vmx_vmxoff() {
36 | __vmx_off();
37 | }
38 |
39 | // VMCLEAR instruction
40 | inline bool vmx_vmclear(uint64_t vmcs_phys_addr) {
41 | return __vmx_vmclear(&vmcs_phys_addr) == 0;
42 | }
43 |
44 | // VMPTRLD instruction
45 | inline bool vmx_vmptrld(uint64_t vmcs_phys_addr) {
46 | return __vmx_vmptrld(&vmcs_phys_addr) == 0;
47 | }
48 |
49 | // VMWRITE instruction
50 | inline void vmx_vmwrite(uint64_t const field, uint64_t const value) {
51 | __vmx_vmwrite(field, value);
52 | }
53 |
54 | // VMREAD instruction
55 | inline uint64_t vmx_vmread(uint64_t const field) {
56 | uint64_t value;
57 | __vmx_vmread(field, &value);
58 | return value;
59 | }
60 |
61 | // write to a guest general-purpose register
62 | inline void write_guest_gpr(guest_context* const ctx,
63 | uint64_t const gpr_idx, uint64_t const value) {
64 | if (gpr_idx == VMX_EXIT_QUALIFICATION_GENREG_RSP)
65 | vmx_vmwrite(VMCS_GUEST_RSP, value);
66 | else
67 | ctx->gpr[gpr_idx] = value;
68 | }
69 |
70 | // read a guest general-purpose register
71 | inline uint64_t read_guest_gpr(guest_context const* const ctx,
72 | uint64_t const gpr_idx) {
73 | if (gpr_idx == VMX_EXIT_QUALIFICATION_GENREG_RSP)
74 | return vmx_vmread(VMCS_GUEST_RSP);
75 | return ctx->gpr[gpr_idx];
76 | }
77 |
78 | // get the value of CR0 that the guest believes is active.
79 | // this is a mixture of the guest CR0 and the CR0 read shadow.
80 | inline cr0 read_effective_guest_cr0() {
81 | // TODO: cache this value
82 | auto const mask = vmx_vmread(VMCS_CTRL_CR0_GUEST_HOST_MASK);
83 |
84 | // bits set to 1 in the mask are read from CR0, otherwise from the shadow
85 | cr0 cr0;
86 | cr0.flags = (vmx_vmread(VMCS_CTRL_CR0_READ_SHADOW) & mask)
87 | | (vmx_vmread(VMCS_GUEST_CR0) & ~mask);
88 |
89 | return cr0;
90 | }
91 |
92 | // get the value of CR4 that the guest believes is active.
93 | // this is a mixture of the guest CR4 and the CR4 read shadow.
94 | inline cr4 read_effective_guest_cr4() {
95 | // TODO: cache this value
96 | auto const mask = vmx_vmread(VMCS_CTRL_CR4_GUEST_HOST_MASK);
97 |
98 | // bits set to 1 in the mask are read from CR4, otherwise from the shadow
99 | cr4 cr4;
100 | cr4.flags = (vmx_vmread(VMCS_CTRL_CR4_READ_SHADOW) & mask)
101 | | (vmx_vmread(VMCS_GUEST_CR4) & ~mask);
102 |
103 | return cr4;
104 | }
105 |
106 | // write to the guest interruptibility state
107 | inline void write_interruptibility_state(vmx_interruptibility_state const value) {
108 | vmx_vmwrite(VMCS_GUEST_INTERRUPTIBILITY_STATE, value.flags);
109 | }
110 |
111 | // read the guest interruptibility state
112 | inline vmx_interruptibility_state read_interruptibility_state() {
113 | vmx_interruptibility_state value;
114 | value.flags = static_cast(vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY_STATE));
115 | return value;
116 | }
117 |
118 | // write to the pin-based vm-execution controls
119 | inline void write_ctrl_pin_based_safe(ia32_vmx_pinbased_ctls_register const value) {
120 | impl::write_vmcs_ctrl_field(value.flags,
121 | VMCS_CTRL_PIN_BASED_VM_EXECUTION_CONTROLS,
122 | IA32_VMX_PINBASED_CTLS,
123 | IA32_VMX_TRUE_PINBASED_CTLS);
124 | }
125 |
126 | // write to the processor-based vm-execution controls
127 | inline void write_ctrl_proc_based_safe(ia32_vmx_procbased_ctls_register const value) {
128 | impl::write_vmcs_ctrl_field(value.flags,
129 | VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS,
130 | IA32_VMX_PROCBASED_CTLS,
131 | IA32_VMX_TRUE_PROCBASED_CTLS);
132 | }
133 |
134 | // write to the secondary processor-based vm-execution controls
135 | inline void write_ctrl_proc_based2_safe(ia32_vmx_procbased_ctls2_register const value) {
136 | impl::write_vmcs_ctrl_field(value.flags,
137 | VMCS_CTRL_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS,
138 | IA32_VMX_PROCBASED_CTLS2,
139 | IA32_VMX_PROCBASED_CTLS2);
140 | }
141 |
142 | // write to the vm-exit controls
143 | inline void write_ctrl_exit_safe(ia32_vmx_exit_ctls_register const value) {
144 | impl::write_vmcs_ctrl_field(value.flags,
145 | VMCS_CTRL_PRIMARY_VMEXIT_CONTROLS,
146 | IA32_VMX_EXIT_CTLS,
147 | IA32_VMX_TRUE_EXIT_CTLS);
148 | }
149 |
150 | // write to the vm-entry controls
151 | inline void write_ctrl_entry_safe(ia32_vmx_entry_ctls_register const value) {
152 | impl::write_vmcs_ctrl_field(value.flags,
153 | VMCS_CTRL_VMENTRY_CONTROLS,
154 | IA32_VMX_ENTRY_CTLS,
155 | IA32_VMX_TRUE_ENTRY_CTLS);
156 | }
157 |
158 | // write to the pin-based vm-execution controls
159 | inline void write_ctrl_pin_based(ia32_vmx_pinbased_ctls_register const value) {
160 | vmx_vmwrite(VMCS_CTRL_PIN_BASED_VM_EXECUTION_CONTROLS, value.flags);
161 | }
162 |
163 | // write to the processor-based vm-execution controls
164 | inline void write_ctrl_proc_based(ia32_vmx_procbased_ctls_register const value) {
165 | vmx_vmwrite(VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS, value.flags);
166 | }
167 |
168 | // write to the secondary processor-based vm-execution controls
169 | inline void write_ctrl_proc_based2(ia32_vmx_procbased_ctls2_register const value) {
170 | vmx_vmwrite(VMCS_CTRL_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS, value.flags);
171 | }
172 |
173 | // write to the vm-exit controls
174 | inline void write_ctrl_exit(ia32_vmx_exit_ctls_register const value) {
175 | vmx_vmwrite(VMCS_CTRL_PRIMARY_VMEXIT_CONTROLS, value.flags);
176 | }
177 |
178 | // write to the vm-entry controls
179 | inline void write_ctrl_entry(ia32_vmx_entry_ctls_register const value) {
180 | vmx_vmwrite(VMCS_CTRL_VMENTRY_CONTROLS, value.flags);
181 | }
182 |
183 | // read the pin-based vm-execution controls
184 | inline ia32_vmx_pinbased_ctls_register read_ctrl_pin_based() {
185 | ia32_vmx_pinbased_ctls_register value;
186 | value.flags = vmx_vmread(VMCS_CTRL_PIN_BASED_VM_EXECUTION_CONTROLS);
187 | return value;
188 | }
189 |
190 | // read the processor-based vm-execution controls
191 | inline ia32_vmx_procbased_ctls_register read_ctrl_proc_based() {
192 | ia32_vmx_procbased_ctls_register value;
193 | value.flags = vmx_vmread(VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS);
194 | return value;
195 | }
196 |
197 | // read the secondary processor-based vm-execution controls
198 | inline ia32_vmx_procbased_ctls2_register read_ctrl_proc_based2() {
199 | ia32_vmx_procbased_ctls2_register value;
200 | value.flags = vmx_vmread(VMCS_CTRL_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS);
201 | return value;
202 | }
203 |
204 | // read the vm-exit controls
205 | inline ia32_vmx_exit_ctls_register read_ctrl_exit() {
206 | ia32_vmx_exit_ctls_register value;
207 | value.flags = vmx_vmread(VMCS_CTRL_PRIMARY_VMEXIT_CONTROLS);
208 | return value;
209 | }
210 |
211 | // read the vm-entry controls
212 | inline ia32_vmx_entry_ctls_register read_ctrl_entry() {
213 | ia32_vmx_entry_ctls_register value;
214 | value.flags = vmx_vmread(VMCS_CTRL_VMENTRY_CONTROLS);
215 | return value;
216 | }
217 |
218 | // get the CPL (current privilege level) of the current guest
219 | inline uint16_t current_guest_cpl() {
220 | vmx_segment_access_rights ss;
221 | ss.flags = static_cast(vmx_vmread(VMCS_GUEST_SS_ACCESS_RIGHTS));
222 | return ss.descriptor_privilege_level;
223 | }
224 |
225 | // increment the instruction pointer after emulating an instruction
226 | inline void skip_instruction() {
227 | // increment RIP
228 | auto const old_rip = vmx_vmread(VMCS_GUEST_RIP);
229 | auto new_rip = old_rip + vmx_vmread(VMCS_VMEXIT_INSTRUCTION_LENGTH);
230 |
231 | // handle wrap-around for 32-bit addresses
232 | // https://patchwork.kernel.org/project/kvm/patch/20200427165917.31799-1-pbonzini@redhat.com/
233 | if (old_rip < (1ull << 32) && new_rip >= (1ull << 32)) {
234 | vmx_segment_access_rights cs_access_rights;
235 | cs_access_rights.flags = static_cast(
236 | vmx_vmread(VMCS_GUEST_CS_ACCESS_RIGHTS));
237 |
238 | // make sure guest is in 32-bit mode
239 | if (!cs_access_rights.long_mode)
240 | new_rip &= 0xFFFF'FFFF;
241 | }
242 |
243 | vmx_vmwrite(VMCS_GUEST_RIP, new_rip);
244 |
245 | // if we're currently blocking interrupts (due to mov ss or sti)
246 | // then we should unblock them since we just emulated an instruction
247 | auto interrupt_state = read_interruptibility_state();
248 | interrupt_state.blocking_by_mov_ss = 0;
249 | interrupt_state.blocking_by_sti = 0;
250 | write_interruptibility_state(interrupt_state);
251 |
252 | ia32_debugctl_register debugctl;
253 | debugctl.flags = vmx_vmread(VMCS_GUEST_DEBUGCTL);
254 |
255 | rflags rflags;
256 | rflags.flags = vmx_vmread(VMCS_GUEST_RFLAGS);
257 |
258 | // if we're single-stepping, inject a debug exception
259 | // just like normal instruction execution would
260 | if (rflags.trap_flag && !debugctl.btf) {
261 | vmx_pending_debug_exceptions dbg_exception;
262 | dbg_exception.flags = vmx_vmread(VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
263 | dbg_exception.bs = 1;
264 | vmx_vmwrite(VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, dbg_exception.flags);
265 | }
266 | }
267 |
268 | // inject an NMI into the guest
269 | inline void inject_nmi() {
270 | vmentry_interrupt_information interrupt_info;
271 | interrupt_info.flags = 0;
272 | interrupt_info.vector = nmi;
273 | interrupt_info.interruption_type = non_maskable_interrupt;
274 | interrupt_info.deliver_error_code = 0;
275 | interrupt_info.valid = 1;
276 | vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt_info.flags);
277 | }
278 |
279 | // inject a vectored exception into the guest
280 | inline void inject_hw_exception(uint32_t const vector) {
281 | vmentry_interrupt_information interrupt_info;
282 | interrupt_info.flags = 0;
283 | interrupt_info.vector = vector;
284 | interrupt_info.interruption_type = hardware_exception;
285 | interrupt_info.deliver_error_code = 0;
286 | interrupt_info.valid = 1;
287 | vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt_info.flags);
288 | }
289 |
290 | // inject a vectored exception into the guest (with an error code)
291 | inline void inject_hw_exception(uint32_t const vector, uint32_t const error) {
292 | vmentry_interrupt_information interrupt_info;
293 | interrupt_info.flags = 0;
294 | interrupt_info.vector = vector;
295 | interrupt_info.interruption_type = hardware_exception;
296 | interrupt_info.deliver_error_code = 1;
297 | interrupt_info.valid = 1;
298 | vmx_vmwrite(VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD, interrupt_info.flags);
299 | vmx_vmwrite(VMCS_CTRL_VMENTRY_EXCEPTION_ERROR_CODE, error);
300 | }
301 |
302 | // enable/disable vm-exits when the guest tries to read the specified MSR
303 | inline void enable_exit_for_msr_read(vmx_msr_bitmap& bitmap,
304 | uint32_t const msr, bool const enable_exiting) {
305 | auto const bit = static_cast(enable_exiting ? 1 : 0);
306 |
307 | if (msr <= MSR_ID_LOW_MAX)
308 | // set the bit in the low bitmap
309 | bitmap.rdmsr_low[msr / 8] = (bit << (msr & 0b0111));
310 | else if (msr >= MSR_ID_HIGH_MIN && msr <= MSR_ID_HIGH_MAX)
311 | // set the bit in the high bitmap
312 | bitmap.rdmsr_high[(msr - MSR_ID_HIGH_MIN) / 8] = (bit << (msr & 0b0111));
313 | }
314 |
315 | // enable/disable vm-exits when the guest tries to write to the specified MSR
316 | inline void enable_exit_for_msr_write(vmx_msr_bitmap& bitmap,
317 | uint32_t const msr, bool const enable_exiting) {
318 | auto const bit = static_cast(enable_exiting ? 1 : 0);
319 |
320 | if (msr <= MSR_ID_LOW_MAX)
321 | // set the bit in the low bitmap
322 | bitmap.wrmsr_low[msr / 8] = (bit << (msr & 0b0111));
323 | else if (msr >= MSR_ID_HIGH_MIN && msr <= MSR_ID_HIGH_MAX)
324 | // set the bit in the high bitmap
325 | bitmap.wrmsr_high[(msr - MSR_ID_HIGH_MIN) / 8] = (bit << (msr & 0b0111));
326 | }
327 |
328 | } // namespace hv
329 |
330 |
--------------------------------------------------------------------------------
/ReloadDbg/main.cpp:
--------------------------------------------------------------------------------
1 | #include"dbg.h"
2 |
3 | SYMBOLS_DATA g_SymbolsData = { 0 };
4 |
5 | NTSTATUS VmTest()
6 | {
7 | if (!hv::start()) {
8 | DbgPrint("[hv] Failed to virtualize system.\n");
9 | return STATUS_HV_OPERATION_FAILED;
10 | }
11 |
12 | //ping test
13 | hv::hypercall_input input;
14 | input.code = hv::hypercall_ping;
15 | input.key = hv::hypercall_key;
16 |
17 | if (hv::vmx_vmcall(input) == hv::hypervisor_signature)
18 | DbgPrint("[client] Hypervisor signature matches.\n");
19 | else
20 | DbgPrint("[client] Failed to ping hypervisor!\n");
21 | return STATUS_SUCCESS;
22 | }
23 |
24 | VOID DriverUnload(PDRIVER_OBJECT DriverObject)
25 | {
26 |
27 | UnHookFuncs();
28 |
29 | if (DriverObject->DeviceObject)
30 | {
31 | UNICODE_STRING DosDeviceName;
32 | RtlInitUnicodeString(&DosDeviceName, L"\\DosDevices\\YCData");
33 | IoDeleteSymbolicLink(&DosDeviceName);
34 | IoDeleteDevice(DriverObject->DeviceObject);
35 | }
36 | }
37 |
38 | NTSTATUS DrvComm(_In_ PDEVICE_OBJECT DeviceObject, _In_ PIRP Irp)
39 | {
40 | UNREFERENCED_PARAMETER(DeviceObject);
41 | Irp->IoStatus.Status = STATUS_SUCCESS;
42 | Irp->IoStatus.Information = 0;
43 | IoCompleteRequest(Irp, IO_NO_INCREMENT);
44 |
45 | return STATUS_SUCCESS;
46 | }
47 |
48 | NTSTATUS DrvIOCTLDispatcher(_In_ PDEVICE_OBJECT DeviceObject, _In_ PIRP Irp)
49 | {
50 | UNREFERENCED_PARAMETER(DeviceObject);
51 | PIO_STACK_LOCATION Stack = IoGetCurrentIrpStackLocation(Irp);
52 | NTSTATUS Status = STATUS_SUCCESS;
53 |
54 | switch (Stack->Parameters.DeviceIoControl.IoControlCode)
55 | {
56 | case CTL_CODE(FILE_DEVICE_UNKNOWN, CTL_LOAD_DRIVER, METHOD_BUFFERED, FILE_ANY_ACCESS):
57 | {
58 | __try
59 | {
60 | memmove(&g_SymbolsData, Irp->AssociatedIrp.SystemBuffer, sizeof(SYMBOLS_DATA));
61 | DbgInit();
62 |
63 | }_except(EXCEPTION_EXECUTE_HANDLER){}
64 | break;
65 | }
66 |
67 | }
68 |
69 | Irp->IoStatus.Status = Status;
70 | Irp->IoStatus.Information = 0;
71 | IoCompleteRequest(Irp, IO_NO_INCREMENT);
72 | return Status;
73 | }
74 |
75 | extern "C" NTSTATUS DriverEntry(PDRIVER_OBJECT Driver, PCUNICODE_STRING Reg)
76 | {
77 | UNREFERENCED_PARAMETER(Reg);
78 | Driver->DriverUnload = DriverUnload;
79 |
80 | if(NT_SUCCESS(VmTest()))
81 | {
82 | PDEVICE_OBJECT DeviceObject;
83 | UNICODE_STRING DriverName, DosDeviceName;
84 | RtlInitUnicodeString(&DriverName, L"\\Device\\YCData");
85 | RtlInitUnicodeString(&DosDeviceName, L"\\DosDevices\\YCData");
86 |
87 | IoCreateDevice(Driver, 0, &DriverName, FILE_DEVICE_UNKNOWN, FILE_DEVICE_SECURE_OPEN, FALSE, &DeviceObject);
88 |
89 | Driver->MajorFunction[IRP_MJ_CLOSE] = DrvComm;
90 | Driver->MajorFunction[IRP_MJ_CREATE] = DrvComm;
91 | Driver->MajorFunction[IRP_MJ_DEVICE_CONTROL] = DrvIOCTLDispatcher;
92 | Driver->Flags |= DO_BUFFERED_IO;
93 |
94 | IoCreateSymbolicLink(&DosDeviceName, &DriverName);
95 | }
96 |
97 | return STATUS_SUCCESS;
98 | }
--------------------------------------------------------------------------------
/ReloadDbg/vmintrin.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include
5 | #include "KernelDbgStruct.h"
6 |
7 | #define CTL_LOAD_DRIVER 0x800
8 | #define IOCTL_POOL_MANAGER_ALLOCATE CTL_CODE(FILE_DEVICE_UNKNOWN, 0x900, METHOD_BUFFERED, FILE_SPECIAL_ACCESS)
9 | typedef struct _SYMBOLS_DATA {
10 | PVOID NtCreateDebugObject;
11 | PVOID PsGetNextProcessThread;
12 | PVOID DbgkpPostFakeThreadMessages;
13 | PVOID DbgkpWakeTarget;
14 | PVOID DbgkpSetProcessDebugObject;
15 | PVOID DbgkCreateThread;
16 | PVOID DbgkpQueueMessage;
17 | PVOID PsCaptureExceptionPort;
18 | PVOID DbgkpSendApiMessage;
19 | PVOID DbgkpSendApiMessageLpc;
20 | PVOID DbgkpSendErrorMessage;
21 | PVOID DbgkForwardException;
22 | PVOID DbgkpSuppressDbgMsg;
23 | PVOID DbgkpSectionToFileHandle;
24 | PVOID DbgkUnMapViewOfSection;
25 | PVOID DbgkpPostFakeProcessCreateMessages;
26 | PVOID NtDebugActiveProcess;
27 | PVOID DbgkpMarkProcessPeb;
28 | PVOID KiDispatchException;
29 | PVOID NtCreateUserProcess;
30 | PVOID DbgkDebugObjectType;
31 | PVOID ObTypeIndexTable;
32 | PVOID NtTerminateProcess;
33 | PVOID DbgkMapViewOfSection;
34 | PVOID DbgkSendSystemDllMessages;
35 | PVOID DbgkpProcessDebugPortMutex;
36 | }SYMBOLS_DATA, * PSYMBOLS_DATA;
37 |
38 | extern "C" PVOID PsGetThreadTeb(PETHREAD Thread);
39 | extern "C" LONG NTAPI ExSystemExceptionFilter(VOID);
40 | extern "C" PVOID PsGetProcessWow64Process(PEPROCESS eprocess);
41 | extern "C" PIMAGE_NT_HEADERS NTAPI RtlImageNtHeader(PVOID Base);
42 | extern "C" NTKERNELAPI NTSTATUS ObCreateObjectType(PUNICODE_STRING TypeName, PVOID ObjectTypeInitializer, PSECURITY_DESCRIPTOR SecurityDescriptor, PVOID* ObjectType);
43 | extern "C" NTSTATUS ObCreateObject(KPROCESSOR_MODE ProbeMode,POBJECT_TYPE ObjectType,POBJECT_ATTRIBUTES ObjectAttributes,KPROCESSOR_MODE OwnershipMode,PVOID ParseContext,ULONG ObjectBodySize,ULONG PagedPoolCharge,ULONG NonPagedPoolCharge,PVOID* Object);
44 |
45 |
46 |
--------------------------------------------------------------------------------
/dbghelp.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/dbghelp.dll
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/223C6C6606ED35973A9AD057262282DB1/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/223C6C6606ED35973A9AD057262282DB1/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/3177D31000BA7590DED335936C93E3741/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/3177D31000BA7590DED335936C93E3741/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/3844DBB920174967BE7AA4A2C20430FA2/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/3844DBB920174967BE7AA4A2C20430FA2/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/47114209A62F3B9930F6B8998DFD4A991/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/47114209A62F3B9930F6B8998DFD4A991/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/67CAF02E081BE9CB68937D22531F99C01/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/67CAF02E081BE9CB68937D22531F99C01/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/68A17FAF3012B7846079AEECDBE0A5831/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/68A17FAF3012B7846079AEECDBE0A5831/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/76B0354BFFCF79294F039F39D1321C171/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/76B0354BFFCF79294F039F39D1321C171/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/992A9A48F30EC2C58B01A5934DCE2D9C1/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/992A9A48F30EC2C58B01A5934DCE2D9C1/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/CA8E2F01B822EDE6357898BFBF8629971/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/CA8E2F01B822EDE6357898BFBF8629971/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/ntkrnlmp.pdb/F526DBB121425697CBBF4FB22502519F1/ntkrnlmp.pdb:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/ntkrnlmp.pdb/F526DBB121425697CBBF4FB22502519F1/ntkrnlmp.pdb
--------------------------------------------------------------------------------
/symsrv.dll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xyddnljydd/vt-ReloadDbg/6070fd0af714125a615d7f6346814c1b0ac36cbd/symsrv.dll
--------------------------------------------------------------------------------