├── .editorconfig ├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── SimpleSvm.sln └── SimpleSvm ├── SimpleSvm.cpp ├── SimpleSvm.hpp ├── SimpleSvm.ruleset ├── SimpleSvm.vcxproj ├── SimpleSvm.vcxproj.filters └── x64.asm /.editorconfig: -------------------------------------------------------------------------------- 1 | # To learn more about .editorconfig see https://aka.ms/editorconfigdocs 2 | 3 | root = true 4 | 5 | # All C++ files 6 | [*.{cpp,hpp,asm}] 7 | charset = utf-8 8 | indent_style = space 9 | indent_size = 4 10 | trim_trailing_whitespace = true 11 | 12 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Set default behavior to automatically normalize line endings. 3 | ############################################################################### 4 | * text=auto 5 | 6 | ############################################################################### 7 | # Set default behavior for command prompt diff. 8 | # 9 | # This is need for earlier builds of msysgit that does not have it on by 10 | # default for csharp files. 11 | # Note: This is only used by command line 12 | ############################################################################### 13 | #*.cs diff=csharp 14 | 15 | ############################################################################### 16 | # Set the merge driver for project and solution files 17 | # 18 | # Merging from the command prompt will add diff markers to the files if there 19 | # are conflicts (Merging from VS is not affected by the settings below, in VS 20 | # the diff markers are never inserted). Diff markers may cause the following 21 | # file extensions to fail to load in VS. An alternative would be to treat 22 | # these files as binary and thus will always conflict and require user 23 | # intervention with every merge. To do so, just uncomment the entries below 24 | ############################################################################### 25 | #*.sln merge=binary 26 | #*.csproj merge=binary 27 | #*.vbproj merge=binary 28 | #*.vcxproj merge=binary 29 | #*.vcproj merge=binary 30 | #*.dbproj merge=binary 31 | #*.fsproj merge=binary 32 | #*.lsproj merge=binary 33 | #*.wixproj merge=binary 34 | #*.modelproj merge=binary 35 | #*.sqlproj merge=binary 36 | #*.wwaproj merge=binary 37 | 38 | ############################################################################### 39 | # behavior for image files 40 | # 41 | # image files are treated as binary by default. 42 | ############################################################################### 43 | #*.jpg binary 44 | #*.png binary 45 | #*.gif binary 46 | 47 | ############################################################################### 48 | # diff behavior for common document formats 49 | # 50 | # Convert binary document formats to text before diffing them. This feature 51 | # is only available from the command line. Turn it on by uncommenting the 52 | # entries below. 53 | ############################################################################### 54 | #*.doc diff=astextplain 55 | #*.DOC diff=astextplain 56 | #*.docx diff=astextplain 57 | #*.DOCX diff=astextplain 58 | #*.dot diff=astextplain 59 | #*.DOT diff=astextplain 60 | #*.pdf diff=astextplain 61 | #*.PDF diff=astextplain 62 | #*.rtf diff=astextplain 63 | #*.RTF diff=astextplain 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Ignore Visual Studio temporary files, build results, and 2 | ## files generated by popular Visual Studio add-ons. 3 | 4 | # User-specific files 5 | *.suo 6 | *.user 7 | *.userosscache 8 | *.sln.docstates 9 | 10 | # User-specific files (MonoDevelop/Xamarin Studio) 11 | *.userprefs 12 | 13 | # Build results 14 | [Dd]ebug/ 15 | [Dd]ebugPublic/ 16 | [Rr]elease/ 17 | [Rr]eleases/ 18 | [Xx]64/ 19 | [Xx]86/ 20 | [Bb]uild/ 21 | bld/ 22 | [Bb]in/ 23 | [Oo]bj/ 24 | 25 | # Visual Studio 2015 cache/options directory 26 | .vs/ 27 | # Uncomment if you have tasks that create the project's static files in wwwroot 28 | #wwwroot/ 29 | 30 | # MSTest test Results 31 | [Tt]est[Rr]esult*/ 32 | [Bb]uild[Ll]og.* 33 | 34 | # NUNIT 35 | *.VisualState.xml 36 | TestResult.xml 37 | 38 | # Build Results of an ATL Project 39 | [Dd]ebugPS/ 40 | [Rr]eleasePS/ 41 | dlldata.c 42 | 43 | # DNX 44 | project.lock.json 45 | artifacts/ 46 | 47 | *_i.c 48 | *_p.c 49 | *_i.h 50 | *.ilk 51 | *.meta 52 | *.obj 53 | *.pch 54 | *.pdb 55 | *.pgc 56 | *.pgd 57 | *.rsp 58 | *.sbr 59 | *.tlb 60 | *.tli 61 | *.tlh 62 | *.tmp 63 | *.tmp_proj 64 | *.log 65 | *.vspscc 66 | *.vssscc 67 | .builds 68 | *.pidb 69 | *.svclog 70 | *.scc 71 | 72 | # Chutzpah Test files 73 | _Chutzpah* 74 | 75 | # Visual C++ cache files 76 | ipch/ 77 | *.aps 78 | *.ncb 79 | *.opendb 80 | *.opensdf 81 | *.sdf 82 | *.cachefile 83 | *.VC.db 84 | 85 | # Visual Studio profiler 86 | *.psess 87 | *.vsp 88 | *.vspx 89 | *.sap 90 | 91 | # TFS 2012 Local Workspace 92 | $tf/ 93 | 94 | # Guidance Automation Toolkit 95 | *.gpState 96 | 97 | # ReSharper is a .NET coding add-in 98 | _ReSharper*/ 99 | *.[Rr]e[Ss]harper 100 | *.DotSettings.user 101 | 102 | # JustCode is a .NET coding add-in 103 | .JustCode 104 | 105 | # TeamCity is a build add-in 106 | _TeamCity* 107 | 108 | # DotCover is a Code Coverage Tool 109 | *.dotCover 110 | 111 | # NCrunch 112 | _NCrunch_* 113 | .*crunch*.local.xml 114 | nCrunchTemp_* 115 | 116 | # MightyMoose 117 | *.mm.* 118 | AutoTest.Net/ 119 | 120 | # Web workbench (sass) 121 | .sass-cache/ 122 | 123 | # Installshield output folder 124 | [Ee]xpress/ 125 | 126 | # DocProject is a documentation generator add-in 127 | DocProject/buildhelp/ 128 | DocProject/Help/*.HxT 129 | DocProject/Help/*.HxC 130 | DocProject/Help/*.hhc 131 | DocProject/Help/*.hhk 132 | DocProject/Help/*.hhp 133 | DocProject/Help/Html2 134 | DocProject/Help/html 135 | 136 | # Click-Once directory 137 | publish/ 138 | 139 | # Publish Web Output 140 | *.[Pp]ublish.xml 141 | *.azurePubxml 142 | 143 | # TODO: Un-comment the next line if you do not want to checkin 144 | # your web deploy settings because they may include unencrypted 145 | # passwords 146 | #*.pubxml 147 | *.publishproj 148 | 149 | # NuGet Packages 150 | *.nupkg 151 | # The packages folder can be ignored because of Package Restore 152 | **/packages/* 153 | # except build/, which is used as an MSBuild target. 154 | !**/packages/build/ 155 | # Uncomment if necessary however generally it will be regenerated when needed 156 | #!**/packages/repositories.config 157 | # NuGet v3's project.json files produces more ignoreable files 158 | *.nuget.props 159 | *.nuget.targets 160 | 161 | # Microsoft Azure Build Output 162 | csx/ 163 | *.build.csdef 164 | 165 | # Microsoft Azure Emulator 166 | ecf/ 167 | rcf/ 168 | 169 | # Windows Store app package directory 170 | AppPackages/ 171 | BundleArtifacts/ 172 | 173 | # Visual Studio cache files 174 | # files ending in .cache can be ignored 175 | *.[Cc]ache 176 | # but keep track of directories ending in .cache 177 | !*.[Cc]ache/ 178 | 179 | # Others 180 | ClientBin/ 181 | [Ss]tyle[Cc]op.* 182 | ~$* 183 | *~ 184 | *.dbmdl 185 | *.dbproj.schemaview 186 | *.pfx 187 | *.publishsettings 188 | node_modules/ 189 | orleans.codegen.cs 190 | 191 | # RIA/Silverlight projects 192 | Generated_Code/ 193 | 194 | # Backup & report files from converting an old project file 195 | # to a newer Visual Studio version. Backup files are not needed, 196 | # because we have git ;-) 197 | _UpgradeReport_Files/ 198 | Backup*/ 199 | UpgradeLog*.XML 200 | UpgradeLog*.htm 201 | 202 | # SQL Server files 203 | *.mdf 204 | *.ldf 205 | 206 | # Business Intelligence projects 207 | *.rdl.data 208 | *.bim.layout 209 | *.bim_*.settings 210 | 211 | # Microsoft Fakes 212 | FakesAssemblies/ 213 | 214 | # GhostDoc plugin setting file 215 | *.GhostDoc.xml 216 | 217 | # Node.js Tools for Visual Studio 218 | .ntvs_analysis.dat 219 | 220 | # Visual Studio 6 build log 221 | *.plg 222 | 223 | # Visual Studio 6 workspace options file 224 | *.opt 225 | 226 | # Visual Studio LightSwitch build output 227 | **/*.HTMLClient/GeneratedArtifacts 228 | **/*.DesktopClient/GeneratedArtifacts 229 | **/*.DesktopClient/ModelManifest.xml 230 | **/*.Server/GeneratedArtifacts 231 | **/*.Server/ModelManifest.xml 232 | _Pvt_Extensions 233 | 234 | # LightSwitch generated files 235 | GeneratedArtifacts/ 236 | ModelManifest.xml 237 | 238 | # Paket dependency manager 239 | .paket/paket.exe 240 | 241 | # FAKE - F# Make 242 | .fake/ 243 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017-2018 Satoshi Tanda 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | SimpleSvm 2 | ========== 3 | 4 | Introduction 5 | ------------- 6 | 7 | SimpleSvm is a minimalistic educational hypervisor for Windows on AMD processors. 8 | It aims to provide small and explanatory code to use Secure Virtual Machine (SVM), 9 | the AMD version of Intel VT-x, with Nested Page Tables (NPT) from a windows driver. 10 | 11 | SimpleSvm is inspired by SimpleVisor, an Intel x64/EM64T VT-x specific hypervisor 12 | for Windows, written by Alex Ionescu (@aionescu). 13 | 14 | 15 | Supported Platforms 16 | ---------------------- 17 | - Windows 10 and later (x64) 18 | - AMD processors with SVM and NPT support 19 | 20 | 21 | Resources 22 | ------------------- 23 | - AMD64 Architecture Programmer’s Manual Volume 2 and 3 24 | - http://developer.amd.com/resources/developer-guides-manuals/ 25 | 26 | - SimpleVisor 27 | - http://ionescu007.github.io/SimpleVisor/ 28 | 29 | - HelloAmdHvPkg 30 | - https://github.com/tandasat/HelloAmdHvPkg 31 | -------------------------------------------------------------------------------- /SimpleSvm.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 15 4 | VisualStudioVersion = 15.0.28016.0 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SimpleSvm", "SimpleSvm\SimpleSvm.vcxproj", "{D8F3DC11-65E5-42C4-84A5-B23FC17F6A3C}" 7 | EndProject 8 | Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{C931AB09-AD9F-4B5A-8C07-66BDC6D01243}" 9 | ProjectSection(SolutionItems) = preProject 10 | .editorconfig = .editorconfig 11 | README.md = README.md 12 | EndProjectSection 13 | EndProject 14 | Global 15 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 16 | Debug|x64 = Debug|x64 17 | Release|x64 = Release|x64 18 | EndGlobalSection 19 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 20 | {D8F3DC11-65E5-42C4-84A5-B23FC17F6A3C}.Debug|x64.ActiveCfg = Debug|x64 21 | {D8F3DC11-65E5-42C4-84A5-B23FC17F6A3C}.Debug|x64.Build.0 = Debug|x64 22 | {D8F3DC11-65E5-42C4-84A5-B23FC17F6A3C}.Debug|x64.Deploy.0 = Debug|x64 23 | {D8F3DC11-65E5-42C4-84A5-B23FC17F6A3C}.Release|x64.ActiveCfg = Release|x64 24 | {D8F3DC11-65E5-42C4-84A5-B23FC17F6A3C}.Release|x64.Build.0 = Release|x64 25 | {D8F3DC11-65E5-42C4-84A5-B23FC17F6A3C}.Release|x64.Deploy.0 = Release|x64 26 | EndGlobalSection 27 | GlobalSection(SolutionProperties) = preSolution 28 | HideSolutionNode = FALSE 29 | EndGlobalSection 30 | GlobalSection(ExtensibilityGlobals) = postSolution 31 | SolutionGuid = {35B510A3-CBDD-4AAA-8A2C-464516D383F3} 32 | EndGlobalSection 33 | EndGlobal 34 | -------------------------------------------------------------------------------- /SimpleSvm/SimpleSvm.cpp: -------------------------------------------------------------------------------- 1 | /*! 2 | @file SimpleSvm.cpp 3 | 4 | @brief All C code. 5 | 6 | @author Satoshi Tanda 7 | 8 | @copyright Copyright (c) 2017-2020, Satoshi Tanda. All rights reserved. 9 | */ 10 | #define POOL_NX_OPTIN 1 11 | #include "SimpleSvm.hpp" 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | EXTERN_C DRIVER_INITIALIZE DriverEntry; 18 | static DRIVER_UNLOAD SvDriverUnload; 19 | static CALLBACK_FUNCTION SvPowerCallbackRoutine; 20 | 21 | EXTERN_C 22 | VOID 23 | _sgdt ( 24 | _Out_ PVOID Descriptor 25 | ); 26 | 27 | _IRQL_requires_max_(DISPATCH_LEVEL) 28 | _IRQL_requires_min_(PASSIVE_LEVEL) 29 | _IRQL_requires_same_ 30 | DECLSPEC_NORETURN 31 | EXTERN_C 32 | VOID 33 | NTAPI 34 | SvLaunchVm ( 35 | _In_ PVOID HostRsp 36 | ); 37 | 38 | // 39 | // x86-64 defined structures. 40 | // 41 | 42 | // 43 | // See "2-Mbyte PML4E-Long Mode" and "2-Mbyte PDPE-Long Mode". 44 | // 45 | typedef struct _PML4_ENTRY_2MB 46 | { 47 | union 48 | { 49 | UINT64 AsUInt64; 50 | struct 51 | { 52 | UINT64 Valid : 1; // [0] 53 | UINT64 Write : 1; // [1] 54 | UINT64 User : 1; // [2] 55 | UINT64 WriteThrough : 1; // [3] 56 | UINT64 CacheDisable : 1; // [4] 57 | UINT64 Accessed : 1; // [5] 58 | UINT64 Reserved1 : 3; // [6:8] 59 | UINT64 Avl : 3; // [9:11] 60 | UINT64 PageFrameNumber : 40; // [12:51] 61 | UINT64 Reserved2 : 11; // [52:62] 62 | UINT64 NoExecute : 1; // [63] 63 | } Fields; 64 | }; 65 | } PML4_ENTRY_2MB, *PPML4_ENTRY_2MB, 66 | PDPT_ENTRY_2MB, *PPDPT_ENTRY_2MB; 67 | static_assert(sizeof(PML4_ENTRY_2MB) == 8, 68 | "PML4_ENTRY_1GB Size Mismatch"); 69 | 70 | // 71 | // See "2-Mbyte PDE-Long Mode". 72 | // 73 | typedef struct _PD_ENTRY_2MB 74 | { 75 | union 76 | { 77 | UINT64 AsUInt64; 78 | struct 79 | { 80 | UINT64 Valid : 1; // [0] 81 | UINT64 Write : 1; // [1] 82 | UINT64 User : 1; // [2] 83 | UINT64 WriteThrough : 1; // [3] 84 | UINT64 CacheDisable : 1; // [4] 85 | UINT64 Accessed : 1; // [5] 86 | UINT64 Dirty : 1; // [6] 87 | UINT64 LargePage : 1; // [7] 88 | UINT64 Global : 1; // [8] 89 | UINT64 Avl : 3; // [9:11] 90 | UINT64 Pat : 1; // [12] 91 | UINT64 Reserved1 : 8; // [13:20] 92 | UINT64 PageFrameNumber : 31; // [21:51] 93 | UINT64 Reserved2 : 11; // [52:62] 94 | UINT64 NoExecute : 1; // [63] 95 | } Fields; 96 | }; 97 | } PD_ENTRY_2MB, *PPD_ENTRY_2MB; 98 | static_assert(sizeof(PD_ENTRY_2MB) == 8, 99 | "PDE_ENTRY_2MB Size Mismatch"); 100 | 101 | // 102 | // See "GDTR and IDTR Format-Long Mode" 103 | // 104 | #include 105 | typedef struct _DESCRIPTOR_TABLE_REGISTER 106 | { 107 | UINT16 Limit; 108 | ULONG_PTR Base; 109 | } DESCRIPTOR_TABLE_REGISTER, *PDESCRIPTOR_TABLE_REGISTER; 110 | static_assert(sizeof(DESCRIPTOR_TABLE_REGISTER) == 10, 111 | "DESCRIPTOR_TABLE_REGISTER Size Mismatch"); 112 | #include 113 | 114 | // 115 | // See "Long-Mode Segment Descriptors" and some of definitions 116 | // (eg, "Code-Segment Descriptor-Long Mode") 117 | // 118 | typedef struct _SEGMENT_DESCRIPTOR 119 | { 120 | union 121 | { 122 | UINT64 AsUInt64; 123 | struct 124 | { 125 | UINT16 LimitLow; // [0:15] 126 | UINT16 BaseLow; // [16:31] 127 | UINT32 BaseMiddle : 8; // [32:39] 128 | UINT32 Type : 4; // [40:43] 129 | UINT32 System : 1; // [44] 130 | UINT32 Dpl : 2; // [45:46] 131 | UINT32 Present : 1; // [47] 132 | UINT32 LimitHigh : 4; // [48:51] 133 | UINT32 Avl : 1; // [52] 134 | UINT32 LongMode : 1; // [53] 135 | UINT32 DefaultBit : 1; // [54] 136 | UINT32 Granularity : 1; // [55] 137 | UINT32 BaseHigh : 8; // [56:63] 138 | } Fields; 139 | }; 140 | } SEGMENT_DESCRIPTOR, *PSEGMENT_DESCRIPTOR; 141 | static_assert(sizeof(SEGMENT_DESCRIPTOR) == 8, 142 | "SEGMENT_DESCRIPTOR Size Mismatch"); 143 | 144 | typedef struct _SEGMENT_ATTRIBUTE 145 | { 146 | union 147 | { 148 | UINT16 AsUInt16; 149 | struct 150 | { 151 | UINT16 Type : 4; // [0:3] 152 | UINT16 System : 1; // [4] 153 | UINT16 Dpl : 2; // [5:6] 154 | UINT16 Present : 1; // [7] 155 | UINT16 Avl : 1; // [8] 156 | UINT16 LongMode : 1; // [9] 157 | UINT16 DefaultBit : 1; // [10] 158 | UINT16 Granularity : 1; // [11] 159 | UINT16 Reserved1 : 4; // [12:15] 160 | } Fields; 161 | }; 162 | } SEGMENT_ATTRIBUTE, *PSEGMENT_ATTRIBUTE; 163 | static_assert(sizeof(SEGMENT_ATTRIBUTE) == 2, 164 | "SEGMENT_ATTRIBUTE Size Mismatch"); 165 | 166 | // 167 | // SimpleSVM specific structures. 168 | // 169 | 170 | typedef struct _PML4E_TREE 171 | { 172 | DECLSPEC_ALIGN(PAGE_SIZE) PDPT_ENTRY_2MB PdptEntries[512]; 173 | DECLSPEC_ALIGN(PAGE_SIZE) PD_ENTRY_2MB PdEntries[512][512]; 174 | } PML4E_TREE, *PPML4E_TREE; 175 | 176 | typedef struct _SHARED_VIRTUAL_PROCESSOR_DATA 177 | { 178 | PVOID MsrPermissionsMap; 179 | DECLSPEC_ALIGN(PAGE_SIZE) PML4_ENTRY_2MB Pml4Entries[512]; 180 | DECLSPEC_ALIGN(PAGE_SIZE) PML4E_TREE Pml4eTrees[2]; // For 1TB 181 | } SHARED_VIRTUAL_PROCESSOR_DATA, *PSHARED_VIRTUAL_PROCESSOR_DATA; 182 | 183 | typedef struct _VIRTUAL_PROCESSOR_DATA 184 | { 185 | union 186 | { 187 | // 188 | // Low HostStackLimit[0] StackLimit 189 | // ^ ... 190 | // ^ HostStackLimit[KERNEL_STACK_SIZE - 2] StackBase 191 | // High HostStackLimit[KERNEL_STACK_SIZE - 1] StackBase 192 | // 193 | DECLSPEC_ALIGN(PAGE_SIZE) UINT8 HostStackLimit[KERNEL_STACK_SIZE]; 194 | struct 195 | { 196 | UINT8 StackContents[KERNEL_STACK_SIZE - (sizeof(PVOID) * 6) - sizeof(KTRAP_FRAME)]; 197 | KTRAP_FRAME TrapFrame; 198 | UINT64 GuestVmcbPa; // HostRsp 199 | UINT64 HostVmcbPa; 200 | struct _VIRTUAL_PROCESSOR_DATA* Self; 201 | PSHARED_VIRTUAL_PROCESSOR_DATA SharedVpData; 202 | UINT64 Padding1; // To keep HostRsp 16 bytes aligned 203 | UINT64 Reserved1; 204 | } HostStackLayout; 205 | }; 206 | 207 | DECLSPEC_ALIGN(PAGE_SIZE) VMCB GuestVmcb; 208 | DECLSPEC_ALIGN(PAGE_SIZE) VMCB HostVmcb; 209 | DECLSPEC_ALIGN(PAGE_SIZE) UINT8 HostStateArea[PAGE_SIZE]; 210 | } VIRTUAL_PROCESSOR_DATA, *PVIRTUAL_PROCESSOR_DATA; 211 | static_assert(sizeof(VIRTUAL_PROCESSOR_DATA) == KERNEL_STACK_SIZE + PAGE_SIZE * 3, 212 | "VIRTUAL_PROCESSOR_DATA Size Mismatch"); 213 | 214 | typedef struct _GUEST_REGISTERS 215 | { 216 | UINT64 R15; 217 | UINT64 R14; 218 | UINT64 R13; 219 | UINT64 R12; 220 | UINT64 R11; 221 | UINT64 R10; 222 | UINT64 R9; 223 | UINT64 R8; 224 | UINT64 Rdi; 225 | UINT64 Rsi; 226 | UINT64 Rbp; 227 | UINT64 Rsp; 228 | UINT64 Rbx; 229 | UINT64 Rdx; 230 | UINT64 Rcx; 231 | UINT64 Rax; 232 | } GUEST_REGISTERS, *PGUEST_REGISTERS; 233 | 234 | typedef struct _GUEST_CONTEXT 235 | { 236 | PGUEST_REGISTERS VpRegs; 237 | BOOLEAN ExitVm; 238 | } GUEST_CONTEXT, *PGUEST_CONTEXT; 239 | 240 | 241 | // 242 | // x86-64 defined constants. 243 | // 244 | #define IA32_MSR_PAT 0x00000277 245 | #define IA32_MSR_EFER 0xc0000080 246 | 247 | #define EFER_SVME (1UL << 12) 248 | 249 | #define RPL_MASK 3 250 | #define DPL_SYSTEM 0 251 | 252 | #define CPUID_FN8000_0001_ECX_SVM (1UL << 2) 253 | #define CPUID_FN0000_0001_ECX_HYPERVISOR_PRESENT (1UL << 31) 254 | #define CPUID_FN8000_000A_EDX_NP (1UL << 0) 255 | 256 | #define CPUID_MAX_STANDARD_FN_NUMBER_AND_VENDOR_STRING 0x00000000 257 | #define CPUID_PROCESSOR_AND_PROCESSOR_FEATURE_IDENTIFIERS 0x00000001 258 | #define CPUID_PROCESSOR_AND_PROCESSOR_FEATURE_IDENTIFIERS_EX 0x80000001 259 | #define CPUID_SVM_FEATURES 0x8000000a 260 | // 261 | // The Microsoft Hypervisor interface defined constants. 262 | // 263 | #define CPUID_HV_VENDOR_AND_MAX_FUNCTIONS 0x40000000 264 | #define CPUID_HV_INTERFACE 0x40000001 265 | 266 | // 267 | // SimpleSVM specific constants. 268 | // 269 | #define CPUID_UNLOAD_SIMPLE_SVM 0x41414141 270 | #define CPUID_HV_MAX CPUID_HV_INTERFACE 271 | 272 | /*! 273 | @brief Breaks into a kernel debugger when it is present. 274 | 275 | @details This macro is emits software breakpoint that only hits when a 276 | kernel debugger is present. This macro is useful because it does 277 | not change the current frame unlike the DbgBreakPoint function, 278 | and breakpoint by this macro can be overwritten with NOP without 279 | impacting other breakpoints. 280 | */ 281 | #define SV_DEBUG_BREAK() \ 282 | if (KD_DEBUGGER_NOT_PRESENT) \ 283 | { \ 284 | NOTHING; \ 285 | } \ 286 | else \ 287 | { \ 288 | __debugbreak(); \ 289 | } \ 290 | reinterpret_cast(0) 291 | 292 | // 293 | // A power state callback handle. 294 | // 295 | static PVOID g_PowerCallbackRegistration; 296 | 297 | /*! 298 | @brief Sends a message to the kernel debugger. 299 | 300 | @param[in] Format - The format string to print. 301 | */ 302 | #pragma prefast(push) 303 | #pragma prefast(disable : 26826, "C-style variable arguments needed for DbgPrint.") 304 | _IRQL_requires_max_(DISPATCH_LEVEL) 305 | _IRQL_requires_same_ 306 | static 307 | VOID 308 | SvDebugPrint ( 309 | _In_z_ _Printf_format_string_ PCSTR Format, 310 | ... 311 | ) 312 | { 313 | va_list argList; 314 | 315 | va_start(argList, Format); 316 | vDbgPrintExWithPrefix("[SimpleSvm] ", 317 | DPFLTR_IHVDRIVER_ID, 318 | DPFLTR_ERROR_LEVEL, 319 | Format, 320 | argList); 321 | va_end(argList); 322 | } 323 | #pragma prefast(pop) 324 | 325 | /*! 326 | @brief Allocates page aligned, zero filled physical memory. 327 | 328 | @details This function allocates page aligned nonpaged pool. The 329 | allocated memory is zero filled and must be freed with 330 | SvFreePageAlingedPhysicalMemory. On Windows 8 and later versions 331 | of Windows, the allocated memory is non executable. 332 | 333 | @param[in] NumberOfBytes - A size of memory to allocate in byte. This must 334 | be equal or greater than PAGE_SIZE. 335 | 336 | @result A pointer to the allocated memory filled with zero; or NULL when 337 | there is insufficient memory to allocate requested size. 338 | */ 339 | __drv_allocatesMem(Mem) 340 | _Post_writable_byte_size_(NumberOfBytes) 341 | _Post_maybenull_ 342 | _IRQL_requires_max_(DISPATCH_LEVEL) 343 | _IRQL_requires_same_ 344 | _Must_inspect_result_ 345 | static 346 | PVOID 347 | SvAllocatePageAlingedPhysicalMemory ( 348 | _In_ SIZE_T NumberOfBytes 349 | ) 350 | { 351 | PVOID memory; 352 | 353 | // 354 | // The size must be equal or greater than PAGE_SIZE in order to allocate 355 | // page aligned memory. 356 | // 357 | NT_ASSERT(NumberOfBytes >= PAGE_SIZE); 358 | 359 | memory = ExAllocatePool2(POOL_FLAG_NON_PAGED, NumberOfBytes, 'MVSS'); 360 | if (memory != nullptr) 361 | { 362 | NT_ASSERT(PAGE_ALIGN(memory) == memory); 363 | RtlZeroMemory(memory, NumberOfBytes); 364 | } 365 | return memory; 366 | } 367 | 368 | /*! 369 | @brief Frees memory allocated by SvAllocatePageAlingedPhysicalMemory. 370 | 371 | @param[in] BaseAddress - The address returned by 372 | SvAllocatePageAlingedPhysicalMemory. 373 | */ 374 | _IRQL_requires_max_(DISPATCH_LEVEL) 375 | _IRQL_requires_same_ 376 | static 377 | VOID 378 | SvFreePageAlingedPhysicalMemory ( 379 | _Pre_notnull_ __drv_freesMem(Mem) PVOID BaseAddress 380 | ) 381 | { 382 | ExFreePoolWithTag(BaseAddress, 'MVSS'); 383 | } 384 | 385 | /*! 386 | @brief Allocates page aligned, zero filled contiguous physical memory. 387 | 388 | @details This function allocates page aligned nonpaged pool where backed 389 | by contiguous physical pages. The allocated memory is zero 390 | filled and must be freed with SvFreeContiguousMemory. The 391 | allocated memory is executable. 392 | 393 | @param[in] NumberOfBytes - A size of memory to allocate in byte. 394 | 395 | @result A pointer to the allocated memory filled with zero; or NULL when 396 | there is insufficient memory to allocate requested size. 397 | */ 398 | _Post_writable_byte_size_(NumberOfBytes) 399 | _Post_maybenull_ 400 | _IRQL_requires_max_(DISPATCH_LEVEL) 401 | _IRQL_requires_same_ 402 | _Must_inspect_result_ 403 | static 404 | PVOID 405 | SvAllocateContiguousMemory ( 406 | _In_ SIZE_T NumberOfBytes 407 | ) 408 | { 409 | PVOID memory; 410 | PHYSICAL_ADDRESS boundary, lowest, highest; 411 | 412 | boundary.QuadPart = lowest.QuadPart = 0; 413 | highest.QuadPart = -1; 414 | 415 | memory = MmAllocateContiguousNodeMemory(NumberOfBytes, 416 | lowest, 417 | highest, 418 | boundary, 419 | PAGE_READWRITE, 420 | MM_ANY_NODE_OK); 421 | if (memory != nullptr) 422 | { 423 | RtlZeroMemory(memory, NumberOfBytes); 424 | } 425 | return memory; 426 | } 427 | 428 | /*! 429 | @brief Frees memory allocated by SvAllocateContiguousMemory. 430 | 431 | @param[in] BaseAddress - The address returned by SvAllocateContiguousMemory. 432 | */ 433 | _IRQL_requires_max_(DISPATCH_LEVEL) 434 | _IRQL_requires_same_ 435 | static 436 | VOID 437 | SvFreeContiguousMemory ( 438 | _In_ PVOID BaseAddress 439 | ) 440 | { 441 | MmFreeContiguousMemory(BaseAddress); 442 | } 443 | 444 | /*! 445 | @brief Injects #GP with 0 of error code. 446 | 447 | @param[in,out] VpData - Per processor data. 448 | */ 449 | _IRQL_requires_same_ 450 | static 451 | VOID 452 | SvInjectGeneralProtectionException ( 453 | _Inout_ PVIRTUAL_PROCESSOR_DATA VpData 454 | ) 455 | { 456 | EVENTINJ event; 457 | 458 | // 459 | // Inject #GP(vector = 13, type = 3 = exception) with a valid error code. 460 | // An error code are always zero. See "#GP-General-Protection Exception 461 | // (Vector 13)" for details about the error code. 462 | // 463 | event.AsUInt64 = 0; 464 | event.Fields.Vector = 13; 465 | event.Fields.Type = 3; 466 | event.Fields.ErrorCodeValid = 1; 467 | event.Fields.Valid = 1; 468 | VpData->GuestVmcb.ControlArea.EventInj = event.AsUInt64; 469 | } 470 | 471 | /*! 472 | @brief Handles #VMEXIT due to execution of the CPUID instructions. 473 | 474 | @details This function returns unmodified results of the CPUID 475 | instruction, except for few cases to indicate presence of 476 | the hypervisor, and to process an unload request. 477 | 478 | CPUID leaf 0x40000000 and 0x40000001 return modified values 479 | to conform to the hypervisor interface to some extent. See 480 | "Requirements for implementing the Microsoft Hypervisor interface" 481 | https://msdn.microsoft.com/en-us/library/windows/hardware/Dn613994(v=vs.85).aspx 482 | for details of the interface. 483 | 484 | @param[in,out] VpData - Per processor data. 485 | @param[in,out] GuestContext - Guest's GPRs. 486 | */ 487 | _IRQL_requires_same_ 488 | static 489 | VOID 490 | SvHandleCpuid ( 491 | _Inout_ PVIRTUAL_PROCESSOR_DATA VpData, 492 | _Inout_ PGUEST_CONTEXT GuestContext 493 | ) 494 | { 495 | int registers[4]; // EAX, EBX, ECX, and EDX 496 | int leaf, subLeaf; 497 | SEGMENT_ATTRIBUTE attribute; 498 | 499 | // 500 | // Execute CPUID as requested. 501 | // 502 | leaf = static_cast(GuestContext->VpRegs->Rax); 503 | subLeaf = static_cast(GuestContext->VpRegs->Rcx); 504 | __cpuidex(registers, leaf, subLeaf); 505 | 506 | switch (leaf) 507 | { 508 | case CPUID_PROCESSOR_AND_PROCESSOR_FEATURE_IDENTIFIERS: 509 | // 510 | // Indicate presence of a hypervisor by setting the bit that are 511 | // reserved for use by hypervisor to indicate guest status. See "CPUID 512 | // Fn0000_0001_ECX Feature Identifiers". 513 | // 514 | registers[2] |= CPUID_FN0000_0001_ECX_HYPERVISOR_PRESENT; 515 | break; 516 | case CPUID_HV_VENDOR_AND_MAX_FUNCTIONS: 517 | // 518 | // Return a maximum supported hypervisor CPUID leaf range and a vendor 519 | // ID signature as required by the spec. 520 | // 521 | registers[0] = CPUID_HV_MAX; 522 | registers[1] = 'pmiS'; // "SimpleSvm " 523 | registers[2] = 'vSel'; 524 | registers[3] = ' m'; 525 | break; 526 | case CPUID_HV_INTERFACE: 527 | // 528 | // Return non Hv#1 value. This indicate that the SimpleSvm does NOT 529 | // conform to the Microsoft hypervisor interface. 530 | // 531 | registers[0] = '0#vH'; // Hv#0 532 | registers[1] = registers[2] = registers[3] = 0; 533 | break; 534 | case CPUID_UNLOAD_SIMPLE_SVM: 535 | if (subLeaf == CPUID_UNLOAD_SIMPLE_SVM) 536 | { 537 | // 538 | // Unload itself if the request is from the kernel mode. 539 | // 540 | attribute.AsUInt16 = VpData->GuestVmcb.StateSaveArea.SsAttrib; 541 | if (attribute.Fields.Dpl == DPL_SYSTEM) 542 | { 543 | GuestContext->ExitVm = TRUE; 544 | } 545 | } 546 | break; 547 | default: 548 | break; 549 | } 550 | 551 | // 552 | // Update guest's GPRs with results. 553 | // 554 | GuestContext->VpRegs->Rax = registers[0]; 555 | GuestContext->VpRegs->Rbx = registers[1]; 556 | GuestContext->VpRegs->Rcx = registers[2]; 557 | GuestContext->VpRegs->Rdx = registers[3]; 558 | 559 | // 560 | // Debug prints results. Very important to note that any use of API from 561 | // the host context is unsafe and absolutely avoided, unless the API is 562 | // documented to be accessible on IRQL IPI_LEVEL+. This is because 563 | // interrupts are disabled when host code is running, and IPI is not going 564 | // to be delivered when it is issued. 565 | // 566 | // This code is not exception and violating this rule. The reasons for this 567 | // code are to demonstrate a bad example, and simply show that the SimpleSvm 568 | // is functioning for a test purpose. 569 | // 570 | if (KeGetCurrentIrql() <= DISPATCH_LEVEL) 571 | { 572 | SvDebugPrint("CPUID: %08x-%08x : %08x %08x %08x %08x\n", 573 | leaf, 574 | subLeaf, 575 | registers[0], 576 | registers[1], 577 | registers[2], 578 | registers[3]); 579 | } 580 | 581 | // 582 | // Then, advance RIP to "complete" the instruction. 583 | // 584 | VpData->GuestVmcb.StateSaveArea.Rip = VpData->GuestVmcb.ControlArea.NRip; 585 | } 586 | 587 | /*! 588 | @brief Handles #VMEXIT due to execution of the WRMSR and RDMSR 589 | instructions. 590 | 591 | @details This protects EFER.SVME from being cleared by the guest by 592 | injecting #GP when it is about to be cleared. For other MSR 593 | access, it passes-through. 594 | 595 | @param[in,out] VpData - Per processor data. 596 | @param[in,out] GuestContext - Guest's GPRs. 597 | */ 598 | _IRQL_requires_same_ 599 | static 600 | VOID 601 | SvHandleMsrAccess ( 602 | _Inout_ PVIRTUAL_PROCESSOR_DATA VpData, 603 | _Inout_ PGUEST_CONTEXT GuestContext 604 | ) 605 | { 606 | ULARGE_INTEGER value; 607 | UINT32 msr; 608 | BOOLEAN writeAccess; 609 | 610 | msr = GuestContext->VpRegs->Rcx & MAXUINT32; 611 | writeAccess = (VpData->GuestVmcb.ControlArea.ExitInfo1 != 0); 612 | 613 | // 614 | // If IA32_MSR_EFER is accessed for write, we must protect the EFER_SVME bit 615 | // from being cleared. 616 | // 617 | if (msr == IA32_MSR_EFER) 618 | { 619 | // 620 | // #VMEXIT on IA32_MSR_EFER access should only occur on write access. 621 | // 622 | NT_ASSERT(writeAccess != FALSE); 623 | 624 | value.LowPart = GuestContext->VpRegs->Rax & MAXUINT32; 625 | value.HighPart = GuestContext->VpRegs->Rdx & MAXUINT32; 626 | if ((value.QuadPart & EFER_SVME) == 0) 627 | { 628 | // 629 | // Inject #GP if the guest attempts to clear the SVME bit. Protection of 630 | // this bit is required because clearing the bit while guest is running 631 | // leads to undefined behavior. 632 | // 633 | SvInjectGeneralProtectionException(VpData); 634 | return; 635 | } 636 | 637 | // 638 | // Otherwise, update the MSR as requested. Important to note that the value 639 | // should be checked not to allow any illegal values, and inject #GP as 640 | // needed. Otherwise, the hypervisor attempts to resume the guest with an 641 | // illegal EFER and immediately receives #VMEXIT due to VMEXIT_INVALID, 642 | // which in our case, results in a bug check. See "Extended Feature Enable 643 | // Register (EFER)" for what values are allowed. 644 | // 645 | // This code does not implement the check intentionally, for simplicity. 646 | // 647 | VpData->GuestVmcb.StateSaveArea.Efer = value.QuadPart; 648 | } 649 | else 650 | { 651 | // 652 | // If the MSR being accessed is not IA32_MSR_EFER, assert that #VMEXIT 653 | // can only occur on access to MSR outside the ranges controlled with 654 | // the MSR permissions map. This is true because the map is configured 655 | // not to intercept any MSR access but IA32_MSR_EFER. See 656 | // "MSR Ranges Covered by MSRPM" in "MSR Intercepts" for the MSR ranges 657 | // controlled by the map. 658 | // 659 | // Note that VMware Workstation has a bug that access to unimplemented 660 | // MSRs unconditionally causes #VMEXIT ignoring bits in the MSR 661 | // permissions map. This can be tested by reading MSR zero, for example. 662 | // 663 | NT_ASSERT(((msr > 0x00001fff) && (msr < 0xc0000000)) || 664 | ((msr > 0xc0001fff) && (msr < 0xc0010000)) || 665 | (msr > 0xc0011fff)); 666 | 667 | // 668 | // Execute WRMSR or RDMSR on behalf of the guest. Important that this 669 | // can cause bug check when the guest tries to access unimplemented MSR 670 | // *even within the SEH block* because the below WRMSR or RDMSR raises 671 | // #GP and are not protected by the SEH block (or cannot be protected 672 | // either as this code run outside the thread stack region Windows 673 | // requires to proceed SEH). Hypervisors typically handle this by noop-ing 674 | // WRMSR and returning zero for RDMSR with non-architecturally defined 675 | // MSRs. Alternatively, one can probe which MSRs should cause #GP prior 676 | // to installation of a hypervisor and the hypervisor can emulate the 677 | // results. 678 | // 679 | if (writeAccess != FALSE) 680 | { 681 | value.LowPart = GuestContext->VpRegs->Rax & MAXUINT32; 682 | value.HighPart = GuestContext->VpRegs->Rdx & MAXUINT32; 683 | __writemsr(msr, value.QuadPart); 684 | } 685 | else 686 | { 687 | value.QuadPart = __readmsr(msr); 688 | GuestContext->VpRegs->Rax = value.LowPart; 689 | GuestContext->VpRegs->Rdx = value.HighPart; 690 | } 691 | } 692 | 693 | // 694 | // Then, advance RIP to "complete" the instruction. 695 | // 696 | VpData->GuestVmcb.StateSaveArea.Rip = VpData->GuestVmcb.ControlArea.NRip; 697 | } 698 | 699 | /*! 700 | @brief Handles #VMEXIT due to execution of the VMRUN instruction. 701 | 702 | @details This function always injects #GP to the guest. 703 | 704 | @param[in,out] VpData - Per processor data. 705 | @param[in,out] GuestContext - Guest's GPRs. 706 | */ 707 | _IRQL_requires_same_ 708 | static 709 | VOID 710 | SvHandleVmrun ( 711 | _Inout_ PVIRTUAL_PROCESSOR_DATA VpData, 712 | _Inout_ PGUEST_CONTEXT GuestContext 713 | ) 714 | { 715 | UNREFERENCED_PARAMETER(GuestContext); 716 | 717 | SvInjectGeneralProtectionException(VpData); 718 | } 719 | 720 | /*! 721 | @brief C-level entry point of the host code called from SvLaunchVm. 722 | 723 | @details This function loads save host state first, and then, handles 724 | #VMEXIT which may or may not change guest's state via VpData 725 | or GuestRegisters. 726 | 727 | Interrupts are disabled when this function is called due to 728 | the cleared GIF. Not all host state are loaded yet, so do it 729 | with the VMLOAD instruction. 730 | 731 | If the #VMEXIT handler detects a request to unload the 732 | hypervisor, this function loads guest state, disables SVM 733 | and returns to execution flow where the #VMEXIT triggered. 734 | 735 | @param[in,out] VpData - Per processor data. 736 | @param[in,out] GuestRegisters - Guest's GPRs. 737 | 738 | @result TRUE when virtualization is terminated; otherwise FALSE. 739 | */ 740 | _IRQL_requires_same_ 741 | EXTERN_C 742 | BOOLEAN 743 | NTAPI 744 | SvHandleVmExit ( 745 | _Inout_ PVIRTUAL_PROCESSOR_DATA VpData, 746 | _Inout_ PGUEST_REGISTERS GuestRegisters 747 | ) 748 | { 749 | GUEST_CONTEXT guestContext; 750 | KIRQL oldIrql; 751 | 752 | guestContext.VpRegs = GuestRegisters; 753 | guestContext.ExitVm = FALSE; 754 | 755 | // 756 | // Load some host state that are not loaded on #VMEXIT. 757 | // 758 | __svm_vmload(VpData->HostStackLayout.HostVmcbPa); 759 | 760 | NT_ASSERT(VpData->HostStackLayout.Reserved1 == MAXUINT64); 761 | 762 | // 763 | // Raise the IRQL to the DISPATCH_LEVEL level. This has no actual effect since 764 | // interrupts are disabled at #VMEXI but warrants bug check when some of 765 | // kernel API that are not usable on this context is called with Driver 766 | // Verifier. This protects developers from accidentally writing such #VMEXIT 767 | // handling code. This should actually raise IRQL to HIGH_LEVEL to represent 768 | // this running context better, but our Logger code is not designed to run at 769 | // that level unfortunately. Finally, note that this API is a thin wrapper 770 | // of mov-to-CR8 on x64 and safe to call on this context. 771 | // 772 | oldIrql = KeGetCurrentIrql(); 773 | if (oldIrql < DISPATCH_LEVEL) 774 | { 775 | KeRaiseIrqlToDpcLevel(); 776 | } 777 | 778 | // 779 | // Guest's RAX is overwritten by the host's value on #VMEXIT and saved in 780 | // the VMCB instead. Reflect the guest RAX to the context. 781 | // 782 | GuestRegisters->Rax = VpData->GuestVmcb.StateSaveArea.Rax; 783 | 784 | // 785 | // Update the _KTRAP_FRAME structure values in hypervisor stack, so that 786 | // Windbg can reconstruct call stack of the guest during debug session. 787 | // This is optional but very useful thing to do for debugging. 788 | // 789 | VpData->HostStackLayout.TrapFrame.Rsp = VpData->GuestVmcb.StateSaveArea.Rsp; 790 | VpData->HostStackLayout.TrapFrame.Rip = VpData->GuestVmcb.ControlArea.NRip; 791 | 792 | // 793 | // Handle #VMEXIT according with its reason. 794 | // 795 | switch (VpData->GuestVmcb.ControlArea.ExitCode) 796 | { 797 | case VMEXIT_CPUID: 798 | SvHandleCpuid(VpData, &guestContext); 799 | break; 800 | case VMEXIT_MSR: 801 | SvHandleMsrAccess(VpData, &guestContext); 802 | break; 803 | case VMEXIT_VMRUN: 804 | SvHandleVmrun(VpData, &guestContext); 805 | break; 806 | default: 807 | SV_DEBUG_BREAK(); 808 | #pragma prefast(suppress : __WARNING_USE_OTHER_FUNCTION, "Unrecoverable path.") 809 | KeBugCheck(MANUALLY_INITIATED_CRASH); 810 | } 811 | 812 | // 813 | // Again, no effect to change IRQL but restoring it here since a #VMEXIT 814 | // handler where the developers most likely call the kernel API inadvertently 815 | // is already executed. 816 | // 817 | if (oldIrql < DISPATCH_LEVEL) 818 | { 819 | KeLowerIrql(oldIrql); 820 | } 821 | 822 | // 823 | // Terminate the SimpleSvm hypervisor if requested. 824 | // 825 | if (guestContext.ExitVm != FALSE) 826 | { 827 | NT_ASSERT(VpData->GuestVmcb.ControlArea.ExitCode == VMEXIT_CPUID); 828 | 829 | // 830 | // Set return values of CPUID instruction as follows: 831 | // RBX = An address to return 832 | // RCX = A stack pointer to restore 833 | // EDX:EAX = An address of per processor data to be freed by the caller 834 | // 835 | guestContext.VpRegs->Rax = reinterpret_cast(VpData) & MAXUINT32; 836 | guestContext.VpRegs->Rbx = VpData->GuestVmcb.ControlArea.NRip; 837 | guestContext.VpRegs->Rcx = VpData->GuestVmcb.StateSaveArea.Rsp; 838 | guestContext.VpRegs->Rdx = reinterpret_cast(VpData) >> 32; 839 | 840 | // 841 | // Load guest state (currently host state is loaded). 842 | // 843 | __svm_vmload(MmGetPhysicalAddress(&VpData->GuestVmcb).QuadPart); 844 | 845 | // 846 | // Set the global interrupt flag (GIF) but still disable interrupts by 847 | // clearing IF. GIF must be set to return to the normal execution, but 848 | // interruptions are not desirable until SVM is disabled as it would 849 | // execute random kernel-code in the host context. 850 | // 851 | _disable(); 852 | __svm_stgi(); 853 | 854 | // 855 | // Disable SVM, and restore the guest RFLAGS. This may enable interrupts. 856 | // Some of arithmetic flags are destroyed by the subsequent code. 857 | // 858 | __writemsr(IA32_MSR_EFER, __readmsr(IA32_MSR_EFER) & ~EFER_SVME); 859 | __writeeflags(VpData->GuestVmcb.StateSaveArea.Rflags); 860 | goto Exit; 861 | } 862 | 863 | // 864 | // Reflect potentially updated guest's RAX to VMCB. Again, unlike other GPRs, 865 | // RAX is loaded from VMCB on VMRUN. 866 | // 867 | VpData->GuestVmcb.StateSaveArea.Rax = guestContext.VpRegs->Rax; 868 | 869 | Exit: 870 | NT_ASSERT(VpData->HostStackLayout.Reserved1 == MAXUINT64); 871 | return guestContext.ExitVm; 872 | } 873 | 874 | /*! 875 | @brief Returns attributes of a segment specified by the segment selector. 876 | 877 | @details This function locates a segment descriptor from the segment 878 | selector and the GDT base, extracts attributes of the segment, 879 | and returns it. The returned value is the same as what the "dg" 880 | command of Windbg shows as "Flags". Here is an example output 881 | with 0x18 of the selector: 882 | ---- 883 | 0: kd> dg 18 884 | P Si Gr Pr Lo 885 | Sel Base Limit Type l ze an es ng Flags 886 | ---- ----------------- ----------------- ---------- - -- -- -- -- -------- 887 | 0018 00000000`00000000 00000000`00000000 Data RW Ac 0 Bg By P Nl 00000493 888 | ---- 889 | 890 | @param[in] SegmentSelector - A segment selector to get attributes of a 891 | corresponding descriptor. 892 | @param[in] GdtBase - A base address of GDT. 893 | 894 | @result Attributes of the segment. 895 | */ 896 | _IRQL_requires_same_ 897 | _Check_return_ 898 | static 899 | UINT16 900 | SvGetSegmentAccessRight ( 901 | _In_ UINT16 SegmentSelector, 902 | _In_ ULONG_PTR GdtBase 903 | ) 904 | { 905 | PSEGMENT_DESCRIPTOR descriptor; 906 | SEGMENT_ATTRIBUTE attribute; 907 | 908 | // 909 | // Get a segment descriptor corresponds to the specified segment selector. 910 | // 911 | descriptor = reinterpret_cast( 912 | GdtBase + (SegmentSelector & ~RPL_MASK)); 913 | 914 | // 915 | // Extract all attribute fields in the segment descriptor to a structure 916 | // that describes only attributes (as opposed to the segment descriptor 917 | // consists of multiple other fields). 918 | // 919 | attribute.Fields.Type = descriptor->Fields.Type; 920 | attribute.Fields.System = descriptor->Fields.System; 921 | attribute.Fields.Dpl = descriptor->Fields.Dpl; 922 | attribute.Fields.Present = descriptor->Fields.Present; 923 | attribute.Fields.Avl = descriptor->Fields.Avl; 924 | attribute.Fields.LongMode = descriptor->Fields.LongMode; 925 | attribute.Fields.DefaultBit = descriptor->Fields.DefaultBit; 926 | attribute.Fields.Granularity = descriptor->Fields.Granularity; 927 | attribute.Fields.Reserved1 = 0; 928 | 929 | return attribute.AsUInt16; 930 | } 931 | 932 | /*! 933 | @brief Tests whether the SimpleSvm hypervisor is installed. 934 | 935 | @details This function checks a result of CPUID leaf 40000000h, which 936 | should return a vendor name of the hypervisor if any of those 937 | who implement the Microsoft Hypervisor interface is installed. 938 | If the SimpleSvm hypervisor is installed, this should return 939 | "SimpleSvm", and if no hypervisor is installed, it the result of 940 | CPUID is undefined. For more details of the interface, see 941 | "Requirements for implementing the Microsoft Hypervisor interface" 942 | https://msdn.microsoft.com/en-us/library/windows/hardware/Dn613994(v=vs.85).aspx 943 | 944 | @result TRUE when the SimpleSvm is installed; otherwise, FALSE. 945 | */ 946 | _IRQL_requires_max_(DISPATCH_LEVEL) 947 | _IRQL_requires_min_(PASSIVE_LEVEL) 948 | _IRQL_requires_same_ 949 | _Check_return_ 950 | static 951 | BOOLEAN 952 | SvIsSimpleSvmHypervisorInstalled ( 953 | VOID 954 | ) 955 | { 956 | int registers[4]; // EAX, EBX, ECX, and EDX 957 | char vendorId[13]; 958 | 959 | // 960 | // When the SimpleSvm hypervisor is installed, CPUID leaf 40000000h will 961 | // return "SimpleSvm " as the vendor name. 962 | // 963 | __cpuid(registers, CPUID_HV_VENDOR_AND_MAX_FUNCTIONS); 964 | RtlCopyMemory(vendorId + 0, ®isters[1], sizeof(registers[1])); 965 | RtlCopyMemory(vendorId + 4, ®isters[2], sizeof(registers[2])); 966 | RtlCopyMemory(vendorId + 8, ®isters[3], sizeof(registers[3])); 967 | vendorId[12] = ANSI_NULL; 968 | 969 | return (strcmp(vendorId, "SimpleSvm ") == 0); 970 | } 971 | 972 | /*! 973 | @brief Virtualizes the current processor. 974 | 975 | @details This function enables SVM, initialize VMCB with the current 976 | processor state, and enters the guest mode on the current 977 | processor. 978 | 979 | @param[in,out] VpData - The address of per processor data. 980 | @param[in] SharedVpData - The address of share data. 981 | @param[in] ContextRecord - The address of CONETEXT to use as an initial 982 | context of the processor after it is virtualized. 983 | */ 984 | _IRQL_requires_max_(DISPATCH_LEVEL) 985 | _IRQL_requires_min_(PASSIVE_LEVEL) 986 | _IRQL_requires_same_ 987 | static 988 | VOID 989 | SvPrepareForVirtualization ( 990 | _Inout_ PVIRTUAL_PROCESSOR_DATA VpData, 991 | _In_ PSHARED_VIRTUAL_PROCESSOR_DATA SharedVpData, 992 | _In_ const CONTEXT* ContextRecord 993 | ) 994 | { 995 | DESCRIPTOR_TABLE_REGISTER gdtr, idtr; 996 | PHYSICAL_ADDRESS guestVmcbPa, hostVmcbPa, hostStateAreaPa, pml4BasePa, msrpmPa; 997 | 998 | // 999 | // Capture the current GDTR and IDTR to use as initial values of the guest 1000 | // mode. 1001 | // 1002 | _sgdt(&gdtr); 1003 | __sidt(&idtr); 1004 | 1005 | guestVmcbPa = MmGetPhysicalAddress(&VpData->GuestVmcb); 1006 | hostVmcbPa = MmGetPhysicalAddress(&VpData->HostVmcb); 1007 | hostStateAreaPa = MmGetPhysicalAddress(&VpData->HostStateArea); 1008 | pml4BasePa = MmGetPhysicalAddress(&SharedVpData->Pml4Entries); 1009 | msrpmPa = MmGetPhysicalAddress(SharedVpData->MsrPermissionsMap); 1010 | 1011 | // 1012 | // Configure to trigger #VMEXIT with CPUID and VMRUN instructions. CPUID is 1013 | // intercepted to present existence of the SimpleSvm hypervisor and provide 1014 | // an interface to ask it to unload itself. 1015 | // 1016 | // VMRUN is intercepted because it is required by the processor to enter the 1017 | // guest mode; otherwise, #VMEXIT occurs due to VMEXIT_INVALID when a 1018 | // processor attempts to enter the guest mode. See "Canonicalization and 1019 | // Consistency Checks" on "VMRUN Instruction". 1020 | // 1021 | VpData->GuestVmcb.ControlArea.InterceptMisc1 |= SVM_INTERCEPT_MISC1_CPUID; 1022 | VpData->GuestVmcb.ControlArea.InterceptMisc2 |= SVM_INTERCEPT_MISC2_VMRUN; 1023 | 1024 | // 1025 | // Also, configure to trigger #VMEXIT on MSR access as configured by the 1026 | // MSRPM. In our case, write to IA32_MSR_EFER is intercepted. 1027 | // 1028 | VpData->GuestVmcb.ControlArea.InterceptMisc1 |= SVM_INTERCEPT_MISC1_MSR_PROT; 1029 | VpData->GuestVmcb.ControlArea.MsrpmBasePa = msrpmPa.QuadPart; 1030 | 1031 | // 1032 | // Specify guest's address space ID (ASID). TLB is maintained by the ID for 1033 | // guests. Use the same value for all processors since all of them run a 1034 | // single guest in our case. Use 1 as the most likely supported ASID by the 1035 | // processor. The actual the supported number of ASID can be obtained with 1036 | // CPUID. See "CPUID Fn8000_000A_EBX SVM Revision and Feature 1037 | // Identification". Zero of ASID is reserved and illegal. 1038 | // 1039 | VpData->GuestVmcb.ControlArea.GuestAsid = 1; 1040 | 1041 | // 1042 | // Enable Nested Page Tables. By enabling this, the processor performs the 1043 | // nested page walk, that involves with an additional page walk to translate 1044 | // a guest physical address to a system physical address. An address of 1045 | // nested page tables is specified by the NCr3 field of VMCB. 1046 | // 1047 | // We have already build the nested page tables with SvBuildNestedPageTables. 1048 | // 1049 | // Note that our hypervisor does not trigger any additional #VMEXIT due to 1050 | // the use of Nested Page Tables since all physical addresses from 0-512 GB 1051 | // are configured to be accessible from the guest. 1052 | // 1053 | VpData->GuestVmcb.ControlArea.NpEnable |= SVM_NP_ENABLE_NP_ENABLE; 1054 | VpData->GuestVmcb.ControlArea.NCr3 = pml4BasePa.QuadPart; 1055 | 1056 | // 1057 | // Set up the initial guest state based on the current system state. Those 1058 | // values are loaded into the processor as guest state when the VMRUN 1059 | // instruction is executed. 1060 | // 1061 | VpData->GuestVmcb.StateSaveArea.GdtrBase = gdtr.Base; 1062 | VpData->GuestVmcb.StateSaveArea.GdtrLimit = gdtr.Limit; 1063 | VpData->GuestVmcb.StateSaveArea.IdtrBase = idtr.Base; 1064 | VpData->GuestVmcb.StateSaveArea.IdtrLimit = idtr.Limit; 1065 | 1066 | VpData->GuestVmcb.StateSaveArea.CsLimit = GetSegmentLimit(ContextRecord->SegCs); 1067 | VpData->GuestVmcb.StateSaveArea.DsLimit = GetSegmentLimit(ContextRecord->SegDs); 1068 | VpData->GuestVmcb.StateSaveArea.EsLimit = GetSegmentLimit(ContextRecord->SegEs); 1069 | VpData->GuestVmcb.StateSaveArea.SsLimit = GetSegmentLimit(ContextRecord->SegSs); 1070 | VpData->GuestVmcb.StateSaveArea.CsSelector = ContextRecord->SegCs; 1071 | VpData->GuestVmcb.StateSaveArea.DsSelector = ContextRecord->SegDs; 1072 | VpData->GuestVmcb.StateSaveArea.EsSelector = ContextRecord->SegEs; 1073 | VpData->GuestVmcb.StateSaveArea.SsSelector = ContextRecord->SegSs; 1074 | VpData->GuestVmcb.StateSaveArea.CsAttrib = SvGetSegmentAccessRight(ContextRecord->SegCs, gdtr.Base); 1075 | VpData->GuestVmcb.StateSaveArea.DsAttrib = SvGetSegmentAccessRight(ContextRecord->SegDs, gdtr.Base); 1076 | VpData->GuestVmcb.StateSaveArea.EsAttrib = SvGetSegmentAccessRight(ContextRecord->SegEs, gdtr.Base); 1077 | VpData->GuestVmcb.StateSaveArea.SsAttrib = SvGetSegmentAccessRight(ContextRecord->SegSs, gdtr.Base); 1078 | 1079 | VpData->GuestVmcb.StateSaveArea.Efer = __readmsr(IA32_MSR_EFER); 1080 | VpData->GuestVmcb.StateSaveArea.Cr0 = __readcr0(); 1081 | VpData->GuestVmcb.StateSaveArea.Cr2 = __readcr2(); 1082 | VpData->GuestVmcb.StateSaveArea.Cr3 = __readcr3(); 1083 | VpData->GuestVmcb.StateSaveArea.Cr4 = __readcr4(); 1084 | VpData->GuestVmcb.StateSaveArea.Rflags = ContextRecord->EFlags; 1085 | VpData->GuestVmcb.StateSaveArea.Rsp = ContextRecord->Rsp; 1086 | VpData->GuestVmcb.StateSaveArea.Rip = ContextRecord->Rip; 1087 | VpData->GuestVmcb.StateSaveArea.GPat = __readmsr(IA32_MSR_PAT); 1088 | 1089 | // 1090 | // Save some of the current state on VMCB. Some of those states are: 1091 | // - FS, GS, TR, LDTR (including all hidden state) 1092 | // - KernelGsBase 1093 | // - STAR, LSTAR, CSTAR, SFMASK 1094 | // - SYSENTER_CS, SYSENTER_ESP, SYSENTER_EIP 1095 | // See "VMSAVE and VMLOAD Instructions" for mode details. 1096 | // 1097 | // Those are restored to the processor right before #VMEXIT with the VMLOAD 1098 | // instruction so that the guest can start its execution with saved state, 1099 | // and also, re-saved to the VMCS with right after #VMEXIT with the VMSAVE 1100 | // instruction so that the host (hypervisor) do not destroy guest's state. 1101 | // 1102 | __svm_vmsave(guestVmcbPa.QuadPart); 1103 | 1104 | // 1105 | // Store data to stack so that the host (hypervisor) can use those values. 1106 | // 1107 | VpData->HostStackLayout.Reserved1 = MAXUINT64; 1108 | VpData->HostStackLayout.SharedVpData = SharedVpData; 1109 | VpData->HostStackLayout.Self = VpData; 1110 | VpData->HostStackLayout.HostVmcbPa = hostVmcbPa.QuadPart; 1111 | VpData->HostStackLayout.GuestVmcbPa = guestVmcbPa.QuadPart; 1112 | 1113 | // 1114 | // Set an address of the host state area to VM_HSAVE_PA MSR. The processor 1115 | // saves some of the current state on VMRUN and loads them on #VMEXIT. See 1116 | // "VM_HSAVE_PA MSR (C001_0117h)". 1117 | // 1118 | __writemsr(SVM_MSR_VM_HSAVE_PA, hostStateAreaPa.QuadPart); 1119 | 1120 | // 1121 | // Also, save some of the current state to VMCB for the host. This is loaded 1122 | // after #VMEXIT to reproduce the current state for the host (hypervisor). 1123 | // 1124 | __svm_vmsave(hostVmcbPa.QuadPart); 1125 | } 1126 | 1127 | /*! 1128 | @brief Virtualize the current processor. 1129 | 1130 | @details This function enables SVM, initialize VMCB with the current 1131 | processor state, and enters the guest mode on the current 1132 | processor. 1133 | 1134 | @param[in] Context - A pointer of share data. 1135 | 1136 | @result STATUS_SUCCESS on success; otherwise, an appropriate error code. 1137 | */ 1138 | _IRQL_requires_max_(DISPATCH_LEVEL) 1139 | _IRQL_requires_min_(PASSIVE_LEVEL) 1140 | _IRQL_requires_same_ 1141 | _Check_return_ 1142 | static 1143 | NTSTATUS 1144 | SvVirtualizeProcessor ( 1145 | _In_opt_ PVOID Context 1146 | ) 1147 | { 1148 | NTSTATUS status; 1149 | PSHARED_VIRTUAL_PROCESSOR_DATA sharedVpData; 1150 | PVIRTUAL_PROCESSOR_DATA vpData; 1151 | PCONTEXT contextRecord; 1152 | 1153 | SV_DEBUG_BREAK(); 1154 | 1155 | vpData = nullptr; 1156 | 1157 | NT_ASSERT(ARGUMENT_PRESENT(Context)); 1158 | _Analysis_assume_(ARGUMENT_PRESENT(Context)); 1159 | 1160 | contextRecord = static_cast(ExAllocatePool2( 1161 | POOL_FLAG_NON_PAGED, 1162 | sizeof(*contextRecord), 1163 | 'MVSS')); 1164 | if (contextRecord == nullptr) 1165 | { 1166 | SvDebugPrint("Insufficient memory.\n"); 1167 | status = STATUS_INSUFFICIENT_RESOURCES; 1168 | goto Exit; 1169 | } 1170 | 1171 | // 1172 | // Allocate per processor data. 1173 | // 1174 | #pragma prefast(suppress : __WARNING_MEMORY_LEAK, "Ownership is taken on success.") 1175 | vpData = static_cast( 1176 | SvAllocatePageAlingedPhysicalMemory(sizeof(VIRTUAL_PROCESSOR_DATA))); 1177 | if (vpData == nullptr) 1178 | { 1179 | SvDebugPrint("Insufficient memory.\n"); 1180 | status = STATUS_INSUFFICIENT_RESOURCES; 1181 | goto Exit; 1182 | } 1183 | 1184 | // 1185 | // Capture the current RIP, RSP, RFLAGS, and segment selectors. This 1186 | // captured state is used as an initial state of the guest mode; therefore 1187 | // when virtualization starts by the later call of SvLaunchVm, a processor 1188 | // resume its execution at this location and state. 1189 | // 1190 | RtlCaptureContext(contextRecord); 1191 | 1192 | // 1193 | // First time of this execution, the SimpleSvm hypervisor is not installed 1194 | // yet. Therefore, the branch is taken, and virtualization is attempted. 1195 | // 1196 | // At the second execution of here, after SvLaunchVm virtualized the 1197 | // processor, SvIsSimpleSvmHypervisorInstalled returns TRUE, and this 1198 | // function exits with STATUS_SUCCESS. 1199 | // 1200 | if (SvIsSimpleSvmHypervisorInstalled() == FALSE) 1201 | { 1202 | SvDebugPrint("Attempting to virtualize the processor.\n"); 1203 | sharedVpData = static_cast(Context); 1204 | 1205 | // 1206 | // Enable SVM by setting EFER.SVME. It has already been verified that this 1207 | // bit was writable with SvIsSvmSupported. 1208 | // 1209 | __writemsr(IA32_MSR_EFER, __readmsr(IA32_MSR_EFER) | EFER_SVME); 1210 | 1211 | // 1212 | // Set up VMCB, the structure describes the guest state and what events 1213 | // within the guest should be intercepted, ie, triggers #VMEXIT. 1214 | // 1215 | SvPrepareForVirtualization(vpData, sharedVpData, contextRecord); 1216 | 1217 | // 1218 | // Switch to the host RSP to run as the host (hypervisor), and then 1219 | // enters loop that executes code as a guest until #VMEXIT happens and 1220 | // handles #VMEXIT as the host. 1221 | // 1222 | // This function should never return to here. 1223 | // 1224 | SvLaunchVm(&vpData->HostStackLayout.GuestVmcbPa); 1225 | SV_DEBUG_BREAK(); 1226 | #pragma prefast(suppress : __WARNING_USE_OTHER_FUNCTION, "Unrecoverble path.") 1227 | KeBugCheck(MANUALLY_INITIATED_CRASH); 1228 | } 1229 | 1230 | SvDebugPrint("The processor has been virtualized.\n"); 1231 | status = STATUS_SUCCESS; 1232 | 1233 | Exit: 1234 | if (contextRecord != nullptr) 1235 | { 1236 | ExFreePoolWithTag(contextRecord, 'MVSS'); 1237 | } 1238 | if ((!NT_SUCCESS(status)) && (vpData != nullptr)) 1239 | { 1240 | // 1241 | // Frees per processor data if allocated and this function is 1242 | // unsuccessful. 1243 | // 1244 | SvFreePageAlingedPhysicalMemory(vpData); 1245 | } 1246 | return status; 1247 | } 1248 | 1249 | /*! 1250 | @brief Execute a callback on all processors one-by-one. 1251 | 1252 | @details This function execute Callback with Context as a parameter for 1253 | each processor on the current IRQL. If the callback returned 1254 | non-STATUS_SUCCESS value or any error occurred, this function 1255 | stops execution of the callback and returns the error code. 1256 | 1257 | When NumOfProcessorCompleted is not NULL, this function always 1258 | set a number of processors that successfully executed the 1259 | callback. 1260 | 1261 | @param[in] Callback - A function to execute on all processors. 1262 | @param[in] Context - A parameter to pass to the callback. 1263 | @param[out] NumOfProcessorCompleted - A pointer to receive a number of 1264 | processors executed the callback successfully. 1265 | 1266 | @result STATUS_SUCCESS when Callback executed and returned STATUS_SUCCESS 1267 | on all processors; otherwise, an appropriate error code. 1268 | */ 1269 | _IRQL_requires_max_(APC_LEVEL) 1270 | _IRQL_requires_min_(PASSIVE_LEVEL) 1271 | _IRQL_requires_same_ 1272 | _Check_return_ 1273 | static 1274 | NTSTATUS 1275 | SvExecuteOnEachProcessor ( 1276 | _In_ NTSTATUS (*Callback)(PVOID), 1277 | _In_opt_ PVOID Context, 1278 | _Out_opt_ PULONG NumOfProcessorCompleted 1279 | ) 1280 | { 1281 | NTSTATUS status; 1282 | ULONG i, numOfProcessors; 1283 | PROCESSOR_NUMBER processorNumber; 1284 | GROUP_AFFINITY affinity, oldAffinity; 1285 | 1286 | status = STATUS_SUCCESS; 1287 | 1288 | // 1289 | // Get a number of processors on this system. 1290 | // 1291 | numOfProcessors = KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS); 1292 | 1293 | for (i = 0; i < numOfProcessors; i++) 1294 | { 1295 | // 1296 | // Convert from an index to a processor number. 1297 | // 1298 | status = KeGetProcessorNumberFromIndex(i, &processorNumber); 1299 | if (!NT_SUCCESS(status)) 1300 | { 1301 | goto Exit; 1302 | } 1303 | 1304 | // 1305 | // Switch execution of this code to a processor #i. 1306 | // 1307 | affinity.Group = processorNumber.Group; 1308 | affinity.Mask = 1ULL << processorNumber.Number; 1309 | affinity.Reserved[0] = affinity.Reserved[1] = affinity.Reserved[2] = 0; 1310 | KeSetSystemGroupAffinityThread(&affinity, &oldAffinity); 1311 | 1312 | // 1313 | // Execute the callback. 1314 | // 1315 | status = Callback(Context); 1316 | 1317 | // 1318 | // Revert the previously executed processor. 1319 | // 1320 | KeRevertToUserGroupAffinityThread(&oldAffinity); 1321 | 1322 | // 1323 | // Exit if the callback returned error. 1324 | // 1325 | if (!NT_SUCCESS(status)) 1326 | { 1327 | goto Exit; 1328 | } 1329 | } 1330 | 1331 | Exit: 1332 | // 1333 | // i must be the same as the number of processors on the system when this 1334 | // function returns STATUS_SUCCESS; 1335 | // 1336 | NT_ASSERT(!NT_SUCCESS(status) || (i == numOfProcessors)); 1337 | 1338 | // 1339 | // Set a number of processors that successfully executed callback if the 1340 | // out parameter is present. 1341 | // 1342 | if (ARGUMENT_PRESENT(NumOfProcessorCompleted)) 1343 | { 1344 | *NumOfProcessorCompleted = i; 1345 | } 1346 | return status; 1347 | } 1348 | 1349 | /*! 1350 | @brief De-virtualize the current processor if virtualized. 1351 | 1352 | @details This function asks SimpleSVM hypervisor to deactivate itself 1353 | through CPUID with a back-door function id and frees per 1354 | processor data if it is returned. If the SimpleSvm is not 1355 | installed, this function does nothing. 1356 | 1357 | @param[in] Context - An out pointer to receive an address of shared data. 1358 | 1359 | @result Always STATUS_SUCCESS. 1360 | */ 1361 | _IRQL_requires_max_(DISPATCH_LEVEL) 1362 | _IRQL_requires_min_(PASSIVE_LEVEL) 1363 | _IRQL_requires_same_ 1364 | _Check_return_ 1365 | static 1366 | NTSTATUS 1367 | SvDevirtualizeProcessor ( 1368 | _In_opt_ PVOID Context 1369 | ) 1370 | { 1371 | int registers[4]; // EAX, EBX, ECX, and EDX 1372 | UINT64 high, low; 1373 | PVIRTUAL_PROCESSOR_DATA vpData; 1374 | PSHARED_VIRTUAL_PROCESSOR_DATA* sharedVpDataPtr; 1375 | 1376 | if (!ARGUMENT_PRESENT(Context)) 1377 | { 1378 | goto Exit; 1379 | } 1380 | 1381 | // 1382 | // Ask SimpleSVM hypervisor to deactivate itself. If the hypervisor is 1383 | // installed, this ECX is set to 'SSVM', and EDX:EAX indicates an address 1384 | // of per processor data to be freed. 1385 | // 1386 | __cpuidex(registers, CPUID_UNLOAD_SIMPLE_SVM, CPUID_UNLOAD_SIMPLE_SVM); 1387 | if (registers[2] != 'SSVM') 1388 | { 1389 | goto Exit; 1390 | } 1391 | 1392 | SvDebugPrint("The processor has been de-virtualized.\n"); 1393 | 1394 | // 1395 | // Get an address of per processor data indicated by EDX:EAX. 1396 | // 1397 | high = registers[3]; 1398 | low = registers[0] & MAXUINT32; 1399 | vpData = reinterpret_cast(high << 32 | low); 1400 | NT_ASSERT(vpData->HostStackLayout.Reserved1 == MAXUINT64); 1401 | 1402 | // 1403 | // Save an address of shared data, then free per processor data. 1404 | // 1405 | sharedVpDataPtr = static_cast(Context); 1406 | *sharedVpDataPtr = vpData->HostStackLayout.SharedVpData; 1407 | SvFreePageAlingedPhysicalMemory(vpData); 1408 | 1409 | Exit: 1410 | return STATUS_SUCCESS; 1411 | } 1412 | 1413 | /*! 1414 | @brief De-virtualize all virtualized processors. 1415 | 1416 | @details This function execute a callback to de-virtualize a processor on 1417 | all processors, and frees shared data when the callback returned 1418 | its pointer from a hypervisor. 1419 | */ 1420 | _IRQL_requires_max_(APC_LEVEL) 1421 | _IRQL_requires_min_(PASSIVE_LEVEL) 1422 | _IRQL_requires_same_ 1423 | static 1424 | VOID 1425 | SvDevirtualizeAllProcessors ( 1426 | VOID 1427 | ) 1428 | { 1429 | PSHARED_VIRTUAL_PROCESSOR_DATA sharedVpData; 1430 | 1431 | sharedVpData = nullptr; 1432 | 1433 | // 1434 | // De-virtualize all processors and free shared data when returned. 1435 | // 1436 | NT_VERIFY(NT_SUCCESS(SvExecuteOnEachProcessor(SvDevirtualizeProcessor, 1437 | &sharedVpData, 1438 | nullptr))); 1439 | if (sharedVpData != nullptr) 1440 | { 1441 | SvFreeContiguousMemory(sharedVpData->MsrPermissionsMap); 1442 | SvFreePageAlingedPhysicalMemory(sharedVpData); 1443 | } 1444 | } 1445 | 1446 | /*! 1447 | @brief Build the MSR permissions map (MSRPM). 1448 | 1449 | @details This function sets up MSRPM to intercept to IA32_MSR_EFER, 1450 | as suggested in "Extended Feature Enable Register (EFER)" 1451 | ---- 1452 | Secure Virtual Machine Enable (SVME) Bit 1453 | Bit 12, read/write. Enables the SVM extensions. (...) The 1454 | effect of turning off EFER.SVME while a guest is running is 1455 | undefined; therefore, the VMM should always prevent guests 1456 | from writing EFER. 1457 | ---- 1458 | 1459 | Each MSR is controlled by two bits in the MSRPM. The LSB of 1460 | the two bits controls read access to the MSR and the MSB 1461 | controls write access. A value of 1 indicates that the 1462 | operation is intercepted. This function locates an offset for 1463 | IA32_MSR_EFER and sets the MSB bit. For details of logic, see 1464 | "MSR Intercepts". 1465 | 1466 | @param[in,out] MsrPermissionsMap - The MSRPM to set up. 1467 | */ 1468 | _IRQL_requires_same_ 1469 | static 1470 | VOID 1471 | SvBuildMsrPermissionsMap ( 1472 | _Inout_ PVOID MsrPermissionsMap 1473 | ) 1474 | { 1475 | constexpr UINT32 BITS_PER_MSR = 2; 1476 | constexpr UINT32 SECOND_MSR_RANGE_BASE = 0xc0000000; 1477 | constexpr UINT32 SECOND_MSRPM_OFFSET = 0x800 * CHAR_BIT; 1478 | RTL_BITMAP bitmapHeader; 1479 | ULONG offsetFrom2ndBase, offset; 1480 | 1481 | // 1482 | // Setup and clear all bits, indicating no MSR access should be intercepted. 1483 | // 1484 | RtlInitializeBitMap(&bitmapHeader, 1485 | static_cast(MsrPermissionsMap), 1486 | SVM_MSR_PERMISSIONS_MAP_SIZE * CHAR_BIT 1487 | ); 1488 | RtlClearAllBits(&bitmapHeader); 1489 | 1490 | // 1491 | // Compute an offset from the second MSR permissions map offset (0x800) for 1492 | // IA32_MSR_EFER in bits. Then, add an offset until the second MSR 1493 | // permissions map. 1494 | // 1495 | offsetFrom2ndBase = (IA32_MSR_EFER - SECOND_MSR_RANGE_BASE) * BITS_PER_MSR; 1496 | offset = SECOND_MSRPM_OFFSET + offsetFrom2ndBase; 1497 | 1498 | // 1499 | // Set the MSB bit indicating write accesses to the MSR should be intercepted. 1500 | // 1501 | RtlSetBits(&bitmapHeader, offset + 1, 1); 1502 | } 1503 | 1504 | /*! 1505 | @brief Build pass-through style page tables used in nested paging. 1506 | 1507 | @details This function build page tables used in Nested Page Tables. The 1508 | page tables are used to translate from a guest physical address 1509 | to a system physical address and pointed by the NCr3 field of 1510 | VMCB, like the traditional page tables are pointed by CR3. 1511 | 1512 | The nested page tables built in this function are set to 1513 | translate a guest physical address to the same system physical 1514 | address. For example, guest physical address 0x1000 is 1515 | translated into system physical address 0x1000. 1516 | 1517 | In order to save memory to build nested page tables, 2MB large 1518 | pages are used (as opposed to the standard pages that describe 1519 | translation only for 4K granularity. Also, only up to 1 TB of 1520 | translation is built. 1GB huge pages are not used due to VMware 1521 | not supporting this feature. 1522 | 1523 | @param[out] SharedVpData - Out buffer to build nested page tables. 1524 | */ 1525 | _IRQL_requires_same_ 1526 | static 1527 | VOID 1528 | SvBuildNestedPageTables ( 1529 | _Out_ PSHARED_VIRTUAL_PROCESSOR_DATA SharedVpData 1530 | ) 1531 | { 1532 | ULONG64 pdptBasePa, pdBasePa, translationPa; 1533 | 1534 | // 1535 | // Build only two PML4 entries. Those entries have subtables that control up to 1536 | // 1 TB physical memory. PFN points to a base physical address of the page 1537 | // directory pointer table. 1538 | // 1539 | for (ULONG64 pml4Index = 0; pml4Index < 2; pml4Index++) { 1540 | PPML4_ENTRY_2MB pml4e = &SharedVpData->Pml4Entries[pml4Index]; 1541 | PPML4E_TREE pml4eTree = &SharedVpData->Pml4eTrees[pml4Index]; 1542 | 1543 | // 1544 | // Set the US (User) bit of all nested page table entries to be translated 1545 | // without #VMEXIT, as all guest accesses are treated as user accesses at 1546 | // the nested level. Also, the RW (Write) bit of nested page table entries 1547 | // that corresponds to guest page tables must be 1 since all guest page 1548 | // table accesses are threated as write access. See "Nested versus Guest 1549 | // Page Faults, Fault Ordering" for more details. 1550 | // 1551 | // Those settings do not lower security since permission checks are done 1552 | // twice independently: based on guest page tables, and nested page tables. 1553 | // See "Nested versus Guest Page Faults, Fault Ordering" for more details. 1554 | // 1555 | pdptBasePa = MmGetPhysicalAddress(&pml4eTree->PdptEntries).QuadPart; 1556 | pml4e->Fields.PageFrameNumber = pdptBasePa >> PAGE_SHIFT; 1557 | pml4e->Fields.Valid = 1; 1558 | pml4e->Fields.Write = 1; 1559 | pml4e->Fields.User = 1; 1560 | 1561 | // 1562 | // One PML4 entry controls 512 page directory pointer entires. 1563 | // 1564 | for (ULONG64 pdptIndex = 0; pdptIndex < 512; pdptIndex++) 1565 | { 1566 | // 1567 | // PFN points to a base physical address of the page directory table. 1568 | // 1569 | pdBasePa = MmGetPhysicalAddress(&pml4eTree->PdEntries[pdptIndex][0]).QuadPart; 1570 | pml4eTree->PdptEntries[pdptIndex].Fields.PageFrameNumber = pdBasePa >> PAGE_SHIFT; 1571 | pml4eTree->PdptEntries[pdptIndex].Fields.Valid = 1; 1572 | pml4eTree->PdptEntries[pdptIndex].Fields.Write = 1; 1573 | pml4eTree->PdptEntries[pdptIndex].Fields.User = 1; 1574 | 1575 | // 1576 | // One page directory entry controls 512 page directory entries. 1577 | // 1578 | // We do not explicitly configure PAT in the NPT entry. The consequences 1579 | // of this are: 1) pages whose PAT (Page Attribute Table) type is the 1580 | // Write-Combining (WC) memory type could be treated as the 1581 | // Write-Combining Plus (WC+) while it should be WC when the MTRR type is 1582 | // either Write Protect (WP), Writethrough (WT) or Writeback (WB), and 1583 | // 2) pages whose PAT type is Uncacheable Minus (UC-) could be treated 1584 | // as Cache Disabled (CD) while it should be WC, when MTRR type is WC. 1585 | // 1586 | // While those are not desirable, this is acceptable given that 1) only 1587 | // introduces additional cache snooping and associated performance 1588 | // penalty, which would not be significant since WC+ still lets 1589 | // processors combine multiple writes into one and avoid large 1590 | // performance penalty due to frequent writes to memory without caching. 1591 | // 2) might be worse but I have not seen MTRR ranges configured as WC 1592 | // on testing, hence the unintentional UC- will just results in the same 1593 | // effective memory type as what would be with UC. 1594 | // 1595 | // See "Memory Types" (7.4), for details of memory types, 1596 | // "PAT-Register PA-Field Indexing", "Combining Guest and Host PAT Types", 1597 | // and "Combining PAT and MTRR Types" for how the effective memory type 1598 | // is determined based on Guest PAT type, Host PAT type, and the MTRR 1599 | // type. 1600 | // 1601 | // The correct approach may be to look up the guest PTE and copy the 1602 | // caching related bits (PAT, PCD, and PWT) when constructing NTP 1603 | // entries for non RAM regions, so the combined PAT will always be the 1604 | // same as the guest PAT type. This may be done when any issue manifests 1605 | // with the current implementation. 1606 | // 1607 | for (ULONG64 pdIndex = 0; pdIndex < 512; pdIndex++) 1608 | { 1609 | // 1610 | // PFN points to a base physical address of system physical address 1611 | // to be translated from a guest physical address. Set the PS 1612 | // (LargePage) bit to indicate that this is a large page and no 1613 | // subtable exists. 1614 | // 1615 | translationPa = (pml4Index * 512 * 512) + (pdptIndex * 512) + pdIndex; 1616 | pml4eTree->PdEntries[pdptIndex][pdIndex].Fields.PageFrameNumber = translationPa; 1617 | pml4eTree->PdEntries[pdptIndex][pdIndex].Fields.Valid = 1; 1618 | pml4eTree->PdEntries[pdptIndex][pdIndex].Fields.Write = 1; 1619 | pml4eTree->PdEntries[pdptIndex][pdIndex].Fields.User = 1; 1620 | pml4eTree->PdEntries[pdptIndex][pdIndex].Fields.LargePage = 1; 1621 | } 1622 | } 1623 | } 1624 | } 1625 | 1626 | /*! 1627 | @brief Test whether the current processor support the SVM feature. 1628 | 1629 | @details This function tests whether the current processor has enough 1630 | features to run SimpleSvm, especially about SVM features. 1631 | 1632 | @result TRUE if the processor supports the SVM feature; otherwise, FALSE. 1633 | */ 1634 | _IRQL_requires_same_ 1635 | _Check_return_ 1636 | static 1637 | BOOLEAN 1638 | SvIsSvmSupported ( 1639 | VOID 1640 | ) 1641 | { 1642 | BOOLEAN svmSupported; 1643 | int registers[4]; // EAX, EBX, ECX, and EDX 1644 | ULONG64 vmcr; 1645 | 1646 | svmSupported = FALSE; 1647 | 1648 | // 1649 | // Test if the current processor is AMD one. An AMD processor should return 1650 | // "AuthenticAMD" from CPUID function 0. See "Function 0h-Maximum Standard 1651 | // Function Number and Vendor String". 1652 | // 1653 | __cpuid(registers, CPUID_MAX_STANDARD_FN_NUMBER_AND_VENDOR_STRING); 1654 | if ((registers[1] != 'htuA') || 1655 | (registers[3] != 'itne') || 1656 | (registers[2] != 'DMAc')) 1657 | { 1658 | goto Exit; 1659 | } 1660 | 1661 | // 1662 | // Test if the SVM feature is supported by the current processor. See 1663 | // "Enabling SVM" and "CPUID Fn8000_0001_ECX Feature Identifiers". 1664 | // 1665 | __cpuid(registers, CPUID_PROCESSOR_AND_PROCESSOR_FEATURE_IDENTIFIERS_EX); 1666 | if ((registers[2] & CPUID_FN8000_0001_ECX_SVM) == 0) 1667 | { 1668 | goto Exit; 1669 | } 1670 | 1671 | // 1672 | // Test if the Nested Page Tables feature is supported by the current 1673 | // processor. See "Enabling Nested Paging" and "CPUID Fn8000_000A_EDX SVM 1674 | // Feature Identification". 1675 | // 1676 | __cpuid(registers, CPUID_SVM_FEATURES); 1677 | if ((registers[3] & CPUID_FN8000_000A_EDX_NP) == 0) 1678 | { 1679 | goto Exit; 1680 | } 1681 | 1682 | // 1683 | // Test if the SVM feature can be enabled. When VM_CR.SVMDIS is set, 1684 | // EFER.SVME cannot be 1; therefore, SVM cannot be enabled. When 1685 | // VM_CR.SVMDIS is clear, EFER.SVME can be written normally and SVM can be 1686 | // enabled. See "Enabling SVM". 1687 | // 1688 | vmcr = __readmsr(SVM_MSR_VM_CR); 1689 | if ((vmcr & SVM_VM_CR_SVMDIS) != 0) 1690 | { 1691 | goto Exit; 1692 | } 1693 | 1694 | svmSupported = TRUE; 1695 | 1696 | Exit: 1697 | return svmSupported; 1698 | } 1699 | 1700 | /*! 1701 | @brief Virtualizes all processors on the system. 1702 | 1703 | @details This function attempts to virtualize all processors on the 1704 | system, and returns STATUS_SUCCESS if all processors are 1705 | successfully virtualized. If any processor is not virtualized, 1706 | this function de-virtualizes all processors and returns an error 1707 | code. 1708 | 1709 | @result STATUS_SUCCESS on success; otherwise, an appropriate error code. 1710 | */ 1711 | _IRQL_requires_max_(APC_LEVEL) 1712 | _IRQL_requires_min_(PASSIVE_LEVEL) 1713 | _IRQL_requires_same_ 1714 | _Check_return_ 1715 | static 1716 | NTSTATUS 1717 | SvVirtualizeAllProcessors ( 1718 | VOID 1719 | ) 1720 | { 1721 | NTSTATUS status; 1722 | PSHARED_VIRTUAL_PROCESSOR_DATA sharedVpData; 1723 | ULONG numOfProcessorsCompleted; 1724 | 1725 | sharedVpData = nullptr; 1726 | numOfProcessorsCompleted = 0; 1727 | 1728 | // 1729 | // Test whether the current processor supports all required SVM features. If 1730 | // not, exit as error. 1731 | // 1732 | if (SvIsSvmSupported() == FALSE) 1733 | { 1734 | SvDebugPrint("SVM is not fully supported on this processor.\n"); 1735 | status = STATUS_HV_FEATURE_UNAVAILABLE; 1736 | goto Exit; 1737 | } 1738 | 1739 | // 1740 | // Allocate a data structure shared across all processors. This data is 1741 | // page tables used for Nested Page Tables. 1742 | // 1743 | #pragma prefast(suppress : __WARNING_MEMORY_LEAK, "Ownership is taken on success.") 1744 | sharedVpData = static_cast( 1745 | SvAllocatePageAlingedPhysicalMemory(sizeof(SHARED_VIRTUAL_PROCESSOR_DATA))); 1746 | if (sharedVpData == nullptr) 1747 | { 1748 | SvDebugPrint("Insufficient memory.\n"); 1749 | status = STATUS_INSUFFICIENT_RESOURCES; 1750 | goto Exit; 1751 | } 1752 | 1753 | // 1754 | // Allocate MSR permissions map (MSRPM) onto contiguous physical memory. 1755 | // 1756 | sharedVpData->MsrPermissionsMap = SvAllocateContiguousMemory( 1757 | SVM_MSR_PERMISSIONS_MAP_SIZE); 1758 | if (sharedVpData->MsrPermissionsMap == nullptr) 1759 | { 1760 | SvDebugPrint("Insufficient memory.\n"); 1761 | status = STATUS_INSUFFICIENT_RESOURCES; 1762 | goto Exit; 1763 | } 1764 | 1765 | // 1766 | // Build nested page table and MSRPM. 1767 | // 1768 | SvBuildNestedPageTables(sharedVpData); 1769 | SvBuildMsrPermissionsMap(sharedVpData->MsrPermissionsMap); 1770 | 1771 | // 1772 | // Execute SvVirtualizeProcessor on and virtualize each processor one-by-one. 1773 | // How many processors were successfully virtualized is stored in the third 1774 | // parameter. 1775 | // 1776 | // STATUS_SUCCESS is returned if all processor are successfully virtualized. 1777 | // When any error occurs while virtualizing processors, this function does 1778 | // not attempt to virtualize the rest of processor. Therefore, only part of 1779 | // processors on the system may have been virtualized on error. In this case, 1780 | // it is a caller's responsibility to clean-up (de-virtualize) such 1781 | // processors. 1782 | // 1783 | status = SvExecuteOnEachProcessor(SvVirtualizeProcessor, 1784 | sharedVpData, 1785 | &numOfProcessorsCompleted); 1786 | 1787 | Exit: 1788 | if (!NT_SUCCESS(status)) 1789 | { 1790 | // 1791 | // On failure, after successful allocation of shared data. 1792 | // 1793 | if (numOfProcessorsCompleted != 0) 1794 | { 1795 | // 1796 | // If one or more processors have already been virtualized, 1797 | // de-virtualize any of those processors, and free shared data. 1798 | // 1799 | NT_ASSERT(sharedVpData != nullptr); 1800 | SvDevirtualizeAllProcessors(); 1801 | } 1802 | else 1803 | { 1804 | // 1805 | // If none of processors has not been virtualized, simply free 1806 | // shared data. 1807 | // 1808 | if (sharedVpData != nullptr) 1809 | { 1810 | if (sharedVpData->MsrPermissionsMap != nullptr) 1811 | { 1812 | SvFreeContiguousMemory(sharedVpData->MsrPermissionsMap); 1813 | } 1814 | SvFreePageAlingedPhysicalMemory(sharedVpData); 1815 | } 1816 | } 1817 | } 1818 | return status; 1819 | } 1820 | 1821 | /*! 1822 | @brief An entry point of this driver. 1823 | 1824 | @param[in] DriverObject - A driver object. 1825 | @param[in] RegistryPath - Unused. 1826 | 1827 | @result STATUS_SUCCESS on success; otherwise, an appropriate error code. 1828 | */ 1829 | _Use_decl_annotations_ 1830 | EXTERN_C 1831 | NTSTATUS 1832 | DriverEntry ( 1833 | PDRIVER_OBJECT DriverObject, 1834 | PUNICODE_STRING RegistryPath 1835 | ) 1836 | { 1837 | NTSTATUS status; 1838 | UNICODE_STRING objectName; 1839 | OBJECT_ATTRIBUTES objectAttributes; 1840 | PCALLBACK_OBJECT callbackObject; 1841 | PVOID callbackRegistration; 1842 | 1843 | UNREFERENCED_PARAMETER(RegistryPath); 1844 | 1845 | SV_DEBUG_BREAK(); 1846 | 1847 | callbackRegistration = nullptr; 1848 | DriverObject->DriverUnload = SvDriverUnload; 1849 | 1850 | // 1851 | // Opts-in no-execute (NX) nonpaged pool when available for security. By 1852 | // defining POOL_NX_OPTIN as 1 and calling this function, nonpaged pool 1853 | // allocation by the ExAllocatePool family with the NonPagedPool flag 1854 | // automatically allocates NX nonpaged pool on Windows 8 and later versions 1855 | // of Windows, while on Windows 7 where NX nonpaged pool is unsupported, 1856 | // executable nonpaged pool is returned as usual. 1857 | // 1858 | ExInitializeDriverRuntime(DrvRtPoolNxOptIn); 1859 | 1860 | // 1861 | // Registers a power state callback (SvPowerCallbackRoutine) to handle 1862 | // system sleep and resume to manage virtualization state. 1863 | // 1864 | // First, opens the \Callback\PowerState callback object provides 1865 | // notification regarding power state changes. This is a system defined 1866 | // callback object that was already created by Windows. To open a system 1867 | // defined callback object, the Create parameter of ExCreateCallback must be 1868 | // FALSE (and AllowMultipleCallbacks is ignore when the Create parameter is 1869 | // FALSE). 1870 | // 1871 | objectName = RTL_CONSTANT_STRING(L"\\Callback\\PowerState"); 1872 | objectAttributes = RTL_CONSTANT_OBJECT_ATTRIBUTES(&objectName, 1873 | OBJ_CASE_INSENSITIVE); 1874 | status = ExCreateCallback(&callbackObject, &objectAttributes, FALSE, TRUE); 1875 | if (!NT_SUCCESS(status)) 1876 | { 1877 | SvDebugPrint("Failed to open the power state callback object.\n"); 1878 | goto Exit; 1879 | } 1880 | 1881 | // 1882 | // Then, registers our callback. The open callback object must be 1883 | // dereferenced. 1884 | // 1885 | callbackRegistration = ExRegisterCallback(callbackObject, 1886 | SvPowerCallbackRoutine, 1887 | nullptr); 1888 | ObDereferenceObject(callbackObject); 1889 | if (callbackRegistration == nullptr) 1890 | { 1891 | SvDebugPrint("Failed to register a power state callback.\n"); 1892 | status = STATUS_UNSUCCESSFUL; 1893 | goto Exit; 1894 | } 1895 | 1896 | // 1897 | // Virtualize all processors on the system. 1898 | // 1899 | status = SvVirtualizeAllProcessors(); 1900 | 1901 | Exit: 1902 | if (NT_SUCCESS(status)) 1903 | { 1904 | // 1905 | // On success, save the registration handle for un-registration. 1906 | // 1907 | NT_ASSERT(callbackRegistration); 1908 | g_PowerCallbackRegistration = callbackRegistration; 1909 | } 1910 | else 1911 | { 1912 | // 1913 | // On any failure, clean up stuff as needed. 1914 | // 1915 | if (callbackRegistration != nullptr) 1916 | { 1917 | ExUnregisterCallback(callbackRegistration); 1918 | } 1919 | } 1920 | return status; 1921 | } 1922 | 1923 | /*! 1924 | @brief Driver unload callback. 1925 | 1926 | @details This function de-virtualize all processors on the system. 1927 | 1928 | @param[in] DriverObject - Unused. 1929 | */ 1930 | _Use_decl_annotations_ 1931 | static 1932 | VOID 1933 | SvDriverUnload ( 1934 | PDRIVER_OBJECT DriverObject 1935 | ) 1936 | { 1937 | UNREFERENCED_PARAMETER(DriverObject); 1938 | 1939 | SV_DEBUG_BREAK(); 1940 | 1941 | // 1942 | // Unregister the power state callback. 1943 | // 1944 | NT_ASSERT(g_PowerCallbackRegistration); 1945 | ExUnregisterCallback(g_PowerCallbackRegistration); 1946 | 1947 | // 1948 | // De-virtualize all processors on the system. 1949 | // 1950 | SvDevirtualizeAllProcessors(); 1951 | } 1952 | 1953 | /*! 1954 | @brief PowerState callback routine. 1955 | 1956 | @details This function de-virtualize all processors when the system is 1957 | exiting system power state S0 (ie, the system is about to sleep 1958 | etc), and virtualize all processors when the system has just 1959 | reentered S0 (ie, the system has resume from sleep etc). 1960 | 1961 | Those operations are required because virtualization is cleared 1962 | during sleep. 1963 | 1964 | For the meanings of parameters, see ExRegisterCallback in MSDN. 1965 | 1966 | @param[in] CallbackContext - Unused. 1967 | @param[in] Argument1 - A PO_CB_XXX constant value. 1968 | @param[in] Argument2 - A value of TRUE or FALSE. 1969 | */ 1970 | _Use_decl_annotations_ 1971 | static 1972 | VOID 1973 | SvPowerCallbackRoutine ( 1974 | PVOID CallbackContext, 1975 | PVOID Argument1, 1976 | PVOID Argument2 1977 | ) 1978 | { 1979 | UNREFERENCED_PARAMETER(CallbackContext); 1980 | 1981 | // 1982 | // PO_CB_SYSTEM_STATE_LOCK of Argument1 indicates that a system power state 1983 | // change is imminent. 1984 | // 1985 | if (Argument1 != reinterpret_cast(PO_CB_SYSTEM_STATE_LOCK)) 1986 | { 1987 | goto Exit; 1988 | } 1989 | 1990 | if (Argument2 != FALSE) 1991 | { 1992 | // 1993 | // The system has just reentered S0. Re-virtualize all processors. 1994 | // 1995 | NT_VERIFY(NT_SUCCESS(SvVirtualizeAllProcessors())); 1996 | } 1997 | else 1998 | { 1999 | // 2000 | // The system is about to exit system power state S0. De-virtualize all 2001 | // processors. 2002 | // 2003 | SvDevirtualizeAllProcessors(); 2004 | } 2005 | 2006 | Exit: 2007 | return; 2008 | } 2009 | -------------------------------------------------------------------------------- /SimpleSvm/SimpleSvm.hpp: -------------------------------------------------------------------------------- 1 | /*! 2 | @file SimpleSvm.hpp 3 | 4 | @brief SVM specific definitions. 5 | 6 | @author Satoshi Tanda 7 | 8 | @copyright Copyright (c) 2017-2019, Satoshi Tanda. All rights reserved. 9 | */ 10 | #pragma once 11 | 12 | #include 13 | 14 | // 15 | // A size of two the MSR permissions map. 16 | // 17 | #define SVM_MSR_PERMISSIONS_MAP_SIZE (PAGE_SIZE * 2) 18 | 19 | // 20 | // See "SVM Related MSRs" 21 | // 22 | #define SVM_MSR_VM_CR 0xc0010114 23 | #define SVM_MSR_VM_HSAVE_PA 0xc0010117 24 | 25 | #define SVM_VM_CR_SVMDIS (1UL << 4) 26 | 27 | // 28 | // See "VMCB Layout, Control Area" 29 | // 30 | #define SVM_INTERCEPT_MISC1_CPUID (1UL << 18) 31 | #define SVM_INTERCEPT_MISC1_MSR_PROT (1UL << 28) 32 | #define SVM_INTERCEPT_MISC2_VMRUN (1UL << 0) 33 | #define SVM_NP_ENABLE_NP_ENABLE (1UL << 0) 34 | 35 | typedef struct _VMCB_CONTROL_AREA 36 | { 37 | UINT16 InterceptCrRead; // +0x000 38 | UINT16 InterceptCrWrite; // +0x002 39 | UINT16 InterceptDrRead; // +0x004 40 | UINT16 InterceptDrWrite; // +0x006 41 | UINT32 InterceptException; // +0x008 42 | UINT32 InterceptMisc1; // +0x00c 43 | UINT32 InterceptMisc2; // +0x010 44 | UINT8 Reserved1[0x03c - 0x014]; // +0x014 45 | UINT16 PauseFilterThreshold; // +0x03c 46 | UINT16 PauseFilterCount; // +0x03e 47 | UINT64 IopmBasePa; // +0x040 48 | UINT64 MsrpmBasePa; // +0x048 49 | UINT64 TscOffset; // +0x050 50 | UINT32 GuestAsid; // +0x058 51 | UINT32 TlbControl; // +0x05c 52 | UINT64 VIntr; // +0x060 53 | UINT64 InterruptShadow; // +0x068 54 | UINT64 ExitCode; // +0x070 55 | UINT64 ExitInfo1; // +0x078 56 | UINT64 ExitInfo2; // +0x080 57 | UINT64 ExitIntInfo; // +0x088 58 | UINT64 NpEnable; // +0x090 59 | UINT64 AvicApicBar; // +0x098 60 | UINT64 GuestPaOfGhcb; // +0x0a0 61 | UINT64 EventInj; // +0x0a8 62 | UINT64 NCr3; // +0x0b0 63 | UINT64 LbrVirtualizationEnable; // +0x0b8 64 | UINT64 VmcbClean; // +0x0c0 65 | UINT64 NRip; // +0x0c8 66 | UINT8 NumOfBytesFetched; // +0x0d0 67 | UINT8 GuestInstructionBytes[15]; // +0x0d1 68 | UINT64 AvicApicBackingPagePointer; // +0x0e0 69 | UINT64 Reserved2; // +0x0e8 70 | UINT64 AvicLogicalTablePointer; // +0x0f0 71 | UINT64 AvicPhysicalTablePointer; // +0x0f8 72 | UINT64 Reserved3; // +0x100 73 | UINT64 VmcbSaveStatePointer; // +0x108 74 | UINT8 Reserved4[0x400 - 0x110]; // +0x110 75 | } VMCB_CONTROL_AREA, *PVMCB_CONTROL_AREA; 76 | static_assert(sizeof(VMCB_CONTROL_AREA) == 0x400, 77 | "VMCB_CONTROL_AREA Size Mismatch"); 78 | 79 | // 80 | // See "VMCB Layout, State Save Area" 81 | // 82 | typedef struct _VMCB_STATE_SAVE_AREA 83 | { 84 | UINT16 EsSelector; // +0x000 85 | UINT16 EsAttrib; // +0x002 86 | UINT32 EsLimit; // +0x004 87 | UINT64 EsBase; // +0x008 88 | UINT16 CsSelector; // +0x010 89 | UINT16 CsAttrib; // +0x012 90 | UINT32 CsLimit; // +0x014 91 | UINT64 CsBase; // +0x018 92 | UINT16 SsSelector; // +0x020 93 | UINT16 SsAttrib; // +0x022 94 | UINT32 SsLimit; // +0x024 95 | UINT64 SsBase; // +0x028 96 | UINT16 DsSelector; // +0x030 97 | UINT16 DsAttrib; // +0x032 98 | UINT32 DsLimit; // +0x034 99 | UINT64 DsBase; // +0x038 100 | UINT16 FsSelector; // +0x040 101 | UINT16 FsAttrib; // +0x042 102 | UINT32 FsLimit; // +0x044 103 | UINT64 FsBase; // +0x048 104 | UINT16 GsSelector; // +0x050 105 | UINT16 GsAttrib; // +0x052 106 | UINT32 GsLimit; // +0x054 107 | UINT64 GsBase; // +0x058 108 | UINT16 GdtrSelector; // +0x060 109 | UINT16 GdtrAttrib; // +0x062 110 | UINT32 GdtrLimit; // +0x064 111 | UINT64 GdtrBase; // +0x068 112 | UINT16 LdtrSelector; // +0x070 113 | UINT16 LdtrAttrib; // +0x072 114 | UINT32 LdtrLimit; // +0x074 115 | UINT64 LdtrBase; // +0x078 116 | UINT16 IdtrSelector; // +0x080 117 | UINT16 IdtrAttrib; // +0x082 118 | UINT32 IdtrLimit; // +0x084 119 | UINT64 IdtrBase; // +0x088 120 | UINT16 TrSelector; // +0x090 121 | UINT16 TrAttrib; // +0x092 122 | UINT32 TrLimit; // +0x094 123 | UINT64 TrBase; // +0x098 124 | UINT8 Reserved1[0x0cb - 0x0a0]; // +0x0a0 125 | UINT8 Cpl; // +0x0cb 126 | UINT32 Reserved2; // +0x0cc 127 | UINT64 Efer; // +0x0d0 128 | UINT8 Reserved3[0x148 - 0x0d8]; // +0x0d8 129 | UINT64 Cr4; // +0x148 130 | UINT64 Cr3; // +0x150 131 | UINT64 Cr0; // +0x158 132 | UINT64 Dr7; // +0x160 133 | UINT64 Dr6; // +0x168 134 | UINT64 Rflags; // +0x170 135 | UINT64 Rip; // +0x178 136 | UINT8 Reserved4[0x1d8 - 0x180]; // +0x180 137 | UINT64 Rsp; // +0x1d8 138 | UINT8 Reserved5[0x1f8 - 0x1e0]; // +0x1e0 139 | UINT64 Rax; // +0x1f8 140 | UINT64 Star; // +0x200 141 | UINT64 LStar; // +0x208 142 | UINT64 CStar; // +0x210 143 | UINT64 SfMask; // +0x218 144 | UINT64 KernelGsBase; // +0x220 145 | UINT64 SysenterCs; // +0x228 146 | UINT64 SysenterEsp; // +0x230 147 | UINT64 SysenterEip; // +0x238 148 | UINT64 Cr2; // +0x240 149 | UINT8 Reserved6[0x268 - 0x248]; // +0x248 150 | UINT64 GPat; // +0x268 151 | UINT64 DbgCtl; // +0x270 152 | UINT64 BrFrom; // +0x278 153 | UINT64 BrTo; // +0x280 154 | UINT64 LastExcepFrom; // +0x288 155 | UINT64 LastExcepTo; // +0x290 156 | } VMCB_STATE_SAVE_AREA, *PVMCB_STATE_SAVE_AREA; 157 | static_assert(sizeof(VMCB_STATE_SAVE_AREA) == 0x298, 158 | "VMCB_STATE_SAVE_AREA Size Mismatch"); 159 | 160 | // 161 | // An entire VMCB (Virtual machine control block) layout. 162 | // 163 | typedef struct _VMCB 164 | { 165 | VMCB_CONTROL_AREA ControlArea; 166 | VMCB_STATE_SAVE_AREA StateSaveArea; 167 | UINT8 Reserved1[0x1000 - sizeof(VMCB_CONTROL_AREA) - sizeof(VMCB_STATE_SAVE_AREA)]; 168 | } VMCB, *PVMCB; 169 | static_assert(sizeof(VMCB) == 0x1000, 170 | "VMCB Size Mismatch"); 171 | 172 | // 173 | // See "Event Injection" 174 | // 175 | typedef struct _EVENTINJ 176 | { 177 | union 178 | { 179 | UINT64 AsUInt64; 180 | struct 181 | { 182 | UINT64 Vector : 8; // [0:7] 183 | UINT64 Type : 3; // [8:10] 184 | UINT64 ErrorCodeValid : 1; // [11] 185 | UINT64 Reserved1 : 19; // [12:30] 186 | UINT64 Valid : 1; // [31] 187 | UINT64 ErrorCode : 32; // [32:63] 188 | } Fields; 189 | }; 190 | } EVENTINJ, *PEVENTINJ; 191 | static_assert(sizeof(EVENTINJ) == 8, 192 | "EVENTINJ Size Mismatch"); 193 | 194 | // 195 | // See "SVM Intercept Codes" 196 | // 197 | #define VMEXIT_CR0_READ 0x0000 198 | #define VMEXIT_CR1_READ 0x0001 199 | #define VMEXIT_CR2_READ 0x0002 200 | #define VMEXIT_CR3_READ 0x0003 201 | #define VMEXIT_CR4_READ 0x0004 202 | #define VMEXIT_CR5_READ 0x0005 203 | #define VMEXIT_CR6_READ 0x0006 204 | #define VMEXIT_CR7_READ 0x0007 205 | #define VMEXIT_CR8_READ 0x0008 206 | #define VMEXIT_CR9_READ 0x0009 207 | #define VMEXIT_CR10_READ 0x000a 208 | #define VMEXIT_CR11_READ 0x000b 209 | #define VMEXIT_CR12_READ 0x000c 210 | #define VMEXIT_CR13_READ 0x000d 211 | #define VMEXIT_CR14_READ 0x000e 212 | #define VMEXIT_CR15_READ 0x000f 213 | #define VMEXIT_CR0_WRITE 0x0010 214 | #define VMEXIT_CR1_WRITE 0x0011 215 | #define VMEXIT_CR2_WRITE 0x0012 216 | #define VMEXIT_CR3_WRITE 0x0013 217 | #define VMEXIT_CR4_WRITE 0x0014 218 | #define VMEXIT_CR5_WRITE 0x0015 219 | #define VMEXIT_CR6_WRITE 0x0016 220 | #define VMEXIT_CR7_WRITE 0x0017 221 | #define VMEXIT_CR8_WRITE 0x0018 222 | #define VMEXIT_CR9_WRITE 0x0019 223 | #define VMEXIT_CR10_WRITE 0x001a 224 | #define VMEXIT_CR11_WRITE 0x001b 225 | #define VMEXIT_CR12_WRITE 0x001c 226 | #define VMEXIT_CR13_WRITE 0x001d 227 | #define VMEXIT_CR14_WRITE 0x001e 228 | #define VMEXIT_CR15_WRITE 0x001f 229 | #define VMEXIT_DR0_READ 0x0020 230 | #define VMEXIT_DR1_READ 0x0021 231 | #define VMEXIT_DR2_READ 0x0022 232 | #define VMEXIT_DR3_READ 0x0023 233 | #define VMEXIT_DR4_READ 0x0024 234 | #define VMEXIT_DR5_READ 0x0025 235 | #define VMEXIT_DR6_READ 0x0026 236 | #define VMEXIT_DR7_READ 0x0027 237 | #define VMEXIT_DR8_READ 0x0028 238 | #define VMEXIT_DR9_READ 0x0029 239 | #define VMEXIT_DR10_READ 0x002a 240 | #define VMEXIT_DR11_READ 0x002b 241 | #define VMEXIT_DR12_READ 0x002c 242 | #define VMEXIT_DR13_READ 0x002d 243 | #define VMEXIT_DR14_READ 0x002e 244 | #define VMEXIT_DR15_READ 0x002f 245 | #define VMEXIT_DR0_WRITE 0x0030 246 | #define VMEXIT_DR1_WRITE 0x0031 247 | #define VMEXIT_DR2_WRITE 0x0032 248 | #define VMEXIT_DR3_WRITE 0x0033 249 | #define VMEXIT_DR4_WRITE 0x0034 250 | #define VMEXIT_DR5_WRITE 0x0035 251 | #define VMEXIT_DR6_WRITE 0x0036 252 | #define VMEXIT_DR7_WRITE 0x0037 253 | #define VMEXIT_DR8_WRITE 0x0038 254 | #define VMEXIT_DR9_WRITE 0x0039 255 | #define VMEXIT_DR10_WRITE 0x003a 256 | #define VMEXIT_DR11_WRITE 0x003b 257 | #define VMEXIT_DR12_WRITE 0x003c 258 | #define VMEXIT_DR13_WRITE 0x003d 259 | #define VMEXIT_DR14_WRITE 0x003e 260 | #define VMEXIT_DR15_WRITE 0x003f 261 | #define VMEXIT_EXCEPTION_DE 0x0040 262 | #define VMEXIT_EXCEPTION_DB 0x0041 263 | #define VMEXIT_EXCEPTION_NMI 0x0042 264 | #define VMEXIT_EXCEPTION_BP 0x0043 265 | #define VMEXIT_EXCEPTION_OF 0x0044 266 | #define VMEXIT_EXCEPTION_BR 0x0045 267 | #define VMEXIT_EXCEPTION_UD 0x0046 268 | #define VMEXIT_EXCEPTION_NM 0x0047 269 | #define VMEXIT_EXCEPTION_DF 0x0048 270 | #define VMEXIT_EXCEPTION_09 0x0049 271 | #define VMEXIT_EXCEPTION_TS 0x004a 272 | #define VMEXIT_EXCEPTION_NP 0x004b 273 | #define VMEXIT_EXCEPTION_SS 0x004c 274 | #define VMEXIT_EXCEPTION_GP 0x004d 275 | #define VMEXIT_EXCEPTION_PF 0x004e 276 | #define VMEXIT_EXCEPTION_15 0x004f 277 | #define VMEXIT_EXCEPTION_MF 0x0050 278 | #define VMEXIT_EXCEPTION_AC 0x0051 279 | #define VMEXIT_EXCEPTION_MC 0x0052 280 | #define VMEXIT_EXCEPTION_XF 0x0053 281 | #define VMEXIT_EXCEPTION_20 0x0054 282 | #define VMEXIT_EXCEPTION_21 0x0055 283 | #define VMEXIT_EXCEPTION_22 0x0056 284 | #define VMEXIT_EXCEPTION_23 0x0057 285 | #define VMEXIT_EXCEPTION_24 0x0058 286 | #define VMEXIT_EXCEPTION_25 0x0059 287 | #define VMEXIT_EXCEPTION_26 0x005a 288 | #define VMEXIT_EXCEPTION_27 0x005b 289 | #define VMEXIT_EXCEPTION_28 0x005c 290 | #define VMEXIT_EXCEPTION_VC 0x005d 291 | #define VMEXIT_EXCEPTION_SX 0x005e 292 | #define VMEXIT_EXCEPTION_31 0x005f 293 | #define VMEXIT_INTR 0x0060 294 | #define VMEXIT_NMI 0x0061 295 | #define VMEXIT_SMI 0x0062 296 | #define VMEXIT_INIT 0x0063 297 | #define VMEXIT_VINTR 0x0064 298 | #define VMEXIT_CR0_SEL_WRITE 0x0065 299 | #define VMEXIT_IDTR_READ 0x0066 300 | #define VMEXIT_GDTR_READ 0x0067 301 | #define VMEXIT_LDTR_READ 0x0068 302 | #define VMEXIT_TR_READ 0x0069 303 | #define VMEXIT_IDTR_WRITE 0x006a 304 | #define VMEXIT_GDTR_WRITE 0x006b 305 | #define VMEXIT_LDTR_WRITE 0x006c 306 | #define VMEXIT_TR_WRITE 0x006d 307 | #define VMEXIT_RDTSC 0x006e 308 | #define VMEXIT_RDPMC 0x006f 309 | #define VMEXIT_PUSHF 0x0070 310 | #define VMEXIT_POPF 0x0071 311 | #define VMEXIT_CPUID 0x0072 312 | #define VMEXIT_RSM 0x0073 313 | #define VMEXIT_IRET 0x0074 314 | #define VMEXIT_SWINT 0x0075 315 | #define VMEXIT_INVD 0x0076 316 | #define VMEXIT_PAUSE 0x0077 317 | #define VMEXIT_HLT 0x0078 318 | #define VMEXIT_INVLPG 0x0079 319 | #define VMEXIT_INVLPGA 0x007a 320 | #define VMEXIT_IOIO 0x007b 321 | #define VMEXIT_MSR 0x007c 322 | #define VMEXIT_TASK_SWITCH 0x007d 323 | #define VMEXIT_FERR_FREEZE 0x007e 324 | #define VMEXIT_SHUTDOWN 0x007f 325 | #define VMEXIT_VMRUN 0x0080 326 | #define VMEXIT_VMMCALL 0x0081 327 | #define VMEXIT_VMLOAD 0x0082 328 | #define VMEXIT_VMSAVE 0x0083 329 | #define VMEXIT_STGI 0x0084 330 | #define VMEXIT_CLGI 0x0085 331 | #define VMEXIT_SKINIT 0x0086 332 | #define VMEXIT_RDTSCP 0x0087 333 | #define VMEXIT_ICEBP 0x0088 334 | #define VMEXIT_WBINVD 0x0089 335 | #define VMEXIT_MONITOR 0x008a 336 | #define VMEXIT_MWAIT 0x008b 337 | #define VMEXIT_MWAIT_CONDITIONAL 0x008c 338 | #define VMEXIT_XSETBV 0x008d 339 | #define VMEXIT_EFER_WRITE_TRAP 0x008f 340 | #define VMEXIT_CR0_WRITE_TRAP 0x0090 341 | #define VMEXIT_CR1_WRITE_TRAP 0x0091 342 | #define VMEXIT_CR2_WRITE_TRAP 0x0092 343 | #define VMEXIT_CR3_WRITE_TRAP 0x0093 344 | #define VMEXIT_CR4_WRITE_TRAP 0x0094 345 | #define VMEXIT_CR5_WRITE_TRAP 0x0095 346 | #define VMEXIT_CR6_WRITE_TRAP 0x0096 347 | #define VMEXIT_CR7_WRITE_TRAP 0x0097 348 | #define VMEXIT_CR8_WRITE_TRAP 0x0098 349 | #define VMEXIT_CR9_WRITE_TRAP 0x0099 350 | #define VMEXIT_CR10_WRITE_TRAP 0x009a 351 | #define VMEXIT_CR11_WRITE_TRAP 0x009b 352 | #define VMEXIT_CR12_WRITE_TRAP 0x009c 353 | #define VMEXIT_CR13_WRITE_TRAP 0x009d 354 | #define VMEXIT_CR14_WRITE_TRAP 0x009e 355 | #define VMEXIT_CR15_WRITE_TRAP 0x009f 356 | #define VMEXIT_NPF 0x0400 357 | #define AVIC_INCOMPLETE_IPI 0x0401 358 | #define AVIC_NOACCEL 0x0402 359 | #define VMEXIT_VMGEXIT 0x0403 360 | #define VMEXIT_INVALID -1 361 | -------------------------------------------------------------------------------- /SimpleSvm/SimpleSvm.ruleset: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /SimpleSvm/SimpleSvm.vcxproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | x64 7 | 8 | 9 | Release 10 | x64 11 | 12 | 13 | 14 | {D8F3DC11-65E5-42C4-84A5-B23FC17F6A3C} 15 | {dd38f7fc-d7bd-488b-9242-7d8754cde80d} 16 | v4.5 17 | 12.0 18 | Debug 19 | Win32 20 | SimpleSvm 21 | 22 | 23 | 24 | Windows10 25 | false 26 | true 27 | WindowsKernelModeDriver10.0 28 | Driver 29 | WDM 30 | Desktop 31 | 32 | 33 | 34 | 35 | 36 | 37 | DbgengKernelDebugger 38 | SimpleSvm.ruleset 39 | 40 | 41 | 42 | true 43 | stdcpp17 44 | 5040;%(DisableSpecificWarnings) 45 | 46 | 47 | SHA256 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /SimpleSvm/SimpleSvm.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF} 6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx 7 | 8 | 9 | {93995380-89BD-4b04-88EB-625FBE52EBFB} 10 | h;hpp;hxx;hm;inl;inc;xsd 11 | 12 | 13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} 14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms 15 | 16 | 17 | {8E41214B-6785-4CFE-B992-037D68949A14} 18 | inf;inv;inx;mof;mc; 19 | 20 | 21 | 22 | 23 | Source Files 24 | 25 | 26 | 27 | 28 | Header Files 29 | 30 | 31 | 32 | 33 | Source Files 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /SimpleSvm/x64.asm: -------------------------------------------------------------------------------- 1 | ; 2 | ; @file x64.asm 3 | ; 4 | ; @brief All assembly code. 5 | ; 6 | ; @author Satoshi Tanda 7 | ; 8 | ; @copyright Copyright (c) 2017-2019, Satoshi Tanda. All rights reserved. 9 | ; 10 | .const 11 | 12 | KTRAP_FRAME_SIZE equ 190h 13 | MACHINE_FRAME_SIZE equ 28h 14 | 15 | .code 16 | 17 | extern SvHandleVmExit : proc 18 | 19 | ; 20 | ; @brief Saves all general purpose registers to the stack. 21 | ; 22 | ; @details This macro does not alter the flag register. 23 | ; 24 | PUSHAQ macro 25 | push rax 26 | push rcx 27 | push rdx 28 | push rbx 29 | push -1 ; Dummy for rsp. 30 | push rbp 31 | push rsi 32 | push rdi 33 | push r8 34 | push r9 35 | push r10 36 | push r11 37 | push r12 38 | push r13 39 | push r14 40 | push r15 41 | endm 42 | 43 | ; 44 | ; @brief Loads all general purpose registers from the stack. 45 | ; 46 | ; @details This macro does not alter the flag register. 47 | ; 48 | POPAQ macro 49 | pop r15 50 | pop r14 51 | pop r13 52 | pop r12 53 | pop r11 54 | pop r10 55 | pop r9 56 | pop r8 57 | pop rdi 58 | pop rsi 59 | pop rbp 60 | pop rbx ; Dummy for rsp (this value is destroyed by the next pop). 61 | pop rbx 62 | pop rdx 63 | pop rcx 64 | pop rax 65 | endm 66 | 67 | ; 68 | ; @brief Enters the loop that executes the guest and handles #VMEXIT. 69 | ; 70 | ; @details This function switches to the host stack pointer, runs the guest 71 | ; and handles #VMEXIT until SvHandleVmExit returns non-zero value. 72 | ; When SvHandleVmExit returned non-zero value, this function 73 | ; returns execution flow to the next instruction of the 74 | ; instruction triggered #VMEXIT after terminating virtualization. 75 | ; 76 | ; @param[in] HostRsp - A stack pointer for the hypervisor. 77 | ; 78 | SvLaunchVm proc frame 79 | ; 80 | ; Update the current stack pointer with the host RSP. This protects 81 | ; values stored on stack for the hypervisor from being overwritten by 82 | ; the guest due to a use of the same stack memory. 83 | ; 84 | mov rsp, rcx ; Rsp <= HostRsp 85 | 86 | SvLV10: ; 87 | ; Run the loop to executed the guest and handle #VMEXIT. Below is the 88 | ; current stack leyout. 89 | ; ---- 90 | ; Rsp => 0x...fd0 GuestVmcbPa ; HostStackLayout 91 | ; 0x...fd8 HostVmcbPa ; 92 | ; 0x...fe0 Self ; 93 | ; 0x...fe8 SharedVpData ; 94 | ; 0x...ff0 Padding1 ; 95 | ; 0x...ff8 Reserved1 ; 96 | ; ---- 97 | ; 98 | mov rax, [rsp] ; RAX <= VpData->HostStackLayout.GuestVmcbPa 99 | vmload rax ; load previously saved guest state from VMCB 100 | 101 | ; 102 | ; Start the guest. The VMRUN instruction resumes execution of the guest 103 | ; with state described in VMCB (specified by RAX by its physical address) 104 | ; until #VMEXI is triggered. On #VMEXIT, the VMRUN instruction completes 105 | ; and resumes the next instruction (ie, vmsave in our case). 106 | ; 107 | ; The VMRUN instruction does the following things in this order: 108 | ; - saves some current state (ie. host state) into the host state-save 109 | ; area specified in IA32_MSR_VM_HSAVE_PA 110 | ; - loads guest state from the VMCB state-save area 111 | ; - enables interrupts by setting the the global interrupt flag (GIF) 112 | ; - resumes execution of the guest until #VMEXIT occurs 113 | ; See "Basic Operation" for more details. 114 | ; 115 | ; On #VMEXIT: 116 | ; - disables interrupts by clearing the the global interrupt flag (GIF) 117 | ; - saves current guest state into and update VMCB to provide information 118 | ; to handle #VMEXIT 119 | ; - loads the host state previously saved by the VMRUN instruction 120 | ; See "#VMEXIT" in the volume 2 and "VMRUN" in the volume 3 for more 121 | ; details. 122 | ; 123 | vmrun rax ; Switch to the guest until #VMEXIT 124 | 125 | ; 126 | ; #VMEXIT occurred. Now, some of guest state has been saved to VMCB, but 127 | ; not all of it. Save some of unsaved state with the VMSAVE instruction. 128 | ; 129 | ; RAX (and some other state like RSP) has been restored from the host 130 | ; state-save, so it has the same value as before and not guest's one. 131 | ; 132 | vmsave rax ; Save current guest state to VMCB 133 | 134 | ; 135 | ; Optionally, allocate the trap frame so that Windbg can display stack 136 | ; trace of the guest while SvHandleVmExit is being executed. The trap 137 | ; frame fields necessary for this are initialized in SvHandleVmExit. 138 | ; 139 | .pushframe 140 | sub rsp, KTRAP_FRAME_SIZE 141 | .allocstack KTRAP_FRAME_SIZE - MACHINE_FRAME_SIZE + 100h 142 | 143 | ; 144 | ; Also save guest's GPRs since those are not saved anywhere by the 145 | ; processor on #VMEXIT and will be destroyed by subsequent host code. 146 | ; 147 | PUSHAQ ; Stack pointer decreased 8 * 16 148 | 149 | ; 150 | ; Set parameters for SvHandleVmExit. Below is the current stack layout. 151 | ; ---- 152 | ; Rsp => 0x...dc0 R15 ; GUEST_REGISTERS 153 | ; 0x...dc8 R14 ; 154 | ; ... ; 155 | ; 0x...e38 RAX ; 156 | ; Rsp + 8 * 16 => 0x...e40 TrapFrame ; HostStackLayout 157 | ; ... ; 158 | ; Rsp + 8 * 16 + KTRAP_FRAME_SIZE => 0x...fd0 GuestVmcbPa ; 159 | ; 0x...fd8 HostVmcbPa ; 160 | ; Rsp + 8 * 18 + KTRAP_FRAME_SIZE => 0x...fe0 Self ; 161 | ; 0x...fe8 SharedVpData ; 162 | ; 0x...ff0 Padding1 ; 163 | ; 0x...ff8 Reserved1 ; 164 | ; ---- 165 | ; 166 | mov rdx, rsp ; Rdx <= GuestRegisters 167 | mov rcx, [rsp + 8 * 18 + KTRAP_FRAME_SIZE] ; Rcx <= VpData 168 | 169 | ; 170 | ; Allocate stack for homing space (0x20) and volatile XMM registers 171 | ; (0x60). Save those registers because subsequent host code may destroy 172 | ; any of those registers. XMM6-15 are not saved because those should be 173 | ; preserved (those are non volatile registers). Finally, indicates the 174 | ; end of the function prolog as stack pointer changes are all done. This 175 | ; is for Windbg to reconstruct stack trace. 176 | ; 177 | sub rsp, 80h 178 | movaps xmmword ptr [rsp + 20h], xmm0 179 | movaps xmmword ptr [rsp + 30h], xmm1 180 | movaps xmmword ptr [rsp + 40h], xmm2 181 | movaps xmmword ptr [rsp + 50h], xmm3 182 | movaps xmmword ptr [rsp + 60h], xmm4 183 | movaps xmmword ptr [rsp + 70h], xmm5 184 | .endprolog 185 | 186 | ; 187 | ; Handle #VMEXIT. 188 | ; 189 | call SvHandleVmExit 190 | 191 | ; 192 | ; Restore XMM registers and roll back stack pointer. 193 | ; 194 | movaps xmm5, xmmword ptr [rsp + 70h] 195 | movaps xmm4, xmmword ptr [rsp + 60h] 196 | movaps xmm3, xmmword ptr [rsp + 50h] 197 | movaps xmm2, xmmword ptr [rsp + 40h] 198 | movaps xmm1, xmmword ptr [rsp + 30h] 199 | movaps xmm0, xmmword ptr [rsp + 20h] 200 | add rsp, 80h 201 | 202 | ; 203 | ; Test a return value of SvHandleVmExit (RAX), then POPAQ to restore the 204 | ; original guest's GPRs. 205 | ; 206 | test al, al 207 | POPAQ 208 | 209 | ; 210 | ; If non zero value is returned from SvHandleVmExit, this function exits 211 | ; the loop. Otherwise, continue the loop and resume the guest. 212 | ; 213 | jnz SvLV20 ; if (ExitVm != 0) jmp SvLV20 214 | add rsp, KTRAP_FRAME_SIZE ; else, restore RSP and 215 | jmp SvLV10 ; jmp SvLV10 216 | 217 | SvLV20: ; 218 | ; Virtualization has been terminated. Restore an original (guest's, 219 | ; although it is no longer the "guest") stack pointer and return to the 220 | ; next instruction of CPUID triggered this #VMEXIT. 221 | ; 222 | ; Here is contents of certain registers: 223 | ; RBX = An address to return 224 | ; RCX = An original stack pointer to restore 225 | ; EDX:EAX = An address of per processor data for this processor 226 | ; 227 | mov rsp, rcx 228 | 229 | ; 230 | ; Update RCX with the magic value indicating that the SimpleSvm 231 | ; hypervisor has been unloaded. 232 | ; 233 | mov ecx, 'SSVM' 234 | 235 | ; 236 | ; Return to the next instruction of CPUID triggered this #VMEXIT. The 237 | ; registry values to be returned are: 238 | ; EBX = Undefined 239 | ; ECX = 'SSVM' 240 | ; EDX:EAX = An address of per processor data for this processor 241 | ; 242 | jmp rbx 243 | SvLaunchVm endp 244 | 245 | end 246 | --------------------------------------------------------------------------------