├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── examples ├── hello.c └── image.c └── src ├── bytevec.rs ├── linux ├── coverage.rs ├── elf_loader.rs ├── errcodes.rs ├── errconv.rs ├── lxfile.rs ├── lxrealfile.rs ├── lxstate.rs ├── lxstd.rs ├── lxsyscall.rs ├── mod.rs ├── syscall_stats.rs └── usermem.rs ├── main.rs ├── mm ├── membank.rs ├── mod.rs ├── paging.rs └── phys_allocator.rs └── vm ├── exception.rs ├── memory.rs ├── misc.rs ├── mod.rs ├── rawmem.rs ├── regbits.rs ├── regs.rs ├── regstate.rs ├── regstate_sync.rs ├── vmexit.rs └── whvp_bindings.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | linuxfs/ 3 | coverage.txt -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "linux-vm" 5 | version = "0.1.0" 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "linux-vm" 3 | version = "0.1.0" 4 | authors = ["addrianyy "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Adrian 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Linux emulation on Windows 2 | This project emulates Linux syscalls allowing Linux applications to run on Windows. 3 | Currently very few syscalls are handled so it will work only on the most basic applications. 4 | As dynamic libraries are not supported I suggest compiling Linux code with musl standard library. 5 | It requires Windows Hypervisor Platform to run, so you need to install it beforehand. 6 | 7 | # Supported syscalls 8 | Some syscalls below are only partially implemented. 9 | 10 | * read 11 | * write 12 | * open 13 | * close 14 | * mmap 15 | * munmap 16 | * brk 17 | * ioctl 18 | * readv 19 | * writev 20 | * madvise 21 | * nanosleep 22 | * exit 23 | * creat 24 | * arch_prctl 25 | * set_tid_address 26 | * clock_gettime 27 | * exit_group 28 | 29 | # Additional features 30 | Syscall frequencies can be counted when running application. Generating 31 | code coverage file which can be consumed by Lighthouse or other similar tools is also supported. 32 | 33 | # Examples 34 | 2 simple programs written in C are in examples/ directory. They need to be used 35 | with musl standard library and successfuly run on emulator. 36 | They can be compiled with following command: 37 |
38 | musl-gcc -static app.c -o compiled-app.o 39 | -------------------------------------------------------------------------------- /examples/hello.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main(int argc, const char* argv[]) { 6 | for (int i = 0; i < argc; ++i) { 7 | printf("%d: %s\n", i, argv[i]); 8 | } 9 | 10 | printf("\n"); 11 | 12 | for (int i = 0; i < 10; ++i) { 13 | time_t unix_time; 14 | 15 | time (&unix_time); 16 | struct tm* time_info = localtime(&unix_time); 17 | 18 | printf("Current time and date: %s", asctime(time_info)); 19 | printf("Timezone is probably wrong though.\n"); 20 | 21 | usleep(500 * 1000); 22 | } 23 | 24 | return 0; 25 | } -------------------------------------------------------------------------------- /examples/image.c: -------------------------------------------------------------------------------- 1 | // Requires stb_image_write 2 | // https://github.com/nothings/stb/blob/master/stb_image_write.h 3 | 4 | #define STB_IMAGE_WRITE_IMPLEMENTATION 5 | #include "stb_image_write.h" 6 | 7 | #include 8 | #include 9 | 10 | int main() { 11 | int width = 1920; 12 | int height = 1080; 13 | int channels = 4; 14 | 15 | uint8_t* data_begin = malloc(width * height * channels); 16 | uint8_t* data = data_begin; 17 | 18 | for (int y = 0; y < height; ++y) { 19 | for (int x = 0; x < width; ++x) { 20 | float xv = (float)x / (float)(width - 1); 21 | float yv = (float)y / (float)(height - 1); 22 | 23 | data[0] = (uint8_t)(xv * 255.f); 24 | data[1] = (uint8_t)(yv * 255.f); 25 | data[2] = 0; 26 | data[3] = 255; 27 | 28 | data += channels; 29 | } 30 | } 31 | 32 | stbi_write_png("output.png", width, height, channels, data_begin, width * channels); 33 | 34 | free(data_begin); 35 | 36 | return 0; 37 | } -------------------------------------------------------------------------------- /src/bytevec.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Deref; 2 | 3 | pub struct ByteVec { 4 | vec: Vec, 5 | } 6 | 7 | impl ByteVec { 8 | pub fn new() -> Self { 9 | Self { 10 | vec: Vec::new(), 11 | } 12 | } 13 | 14 | pub fn with_capacity(capacity: usize) -> Self { 15 | Self { 16 | vec: Vec::with_capacity(capacity), 17 | } 18 | } 19 | 20 | pub fn push_bytes(&mut self, bytes: &[u8]) -> u64 { 21 | let offset = self.vec.len() as u64; 22 | 23 | self.vec.extend_from_slice(bytes); 24 | 25 | offset 26 | } 27 | 28 | pub fn push_u16(&mut self, value: u16) -> u64 { 29 | self.push_bytes(&value.to_le_bytes()) 30 | } 31 | 32 | pub fn push_u32(&mut self, value: u32) -> u64 { 33 | self.push_bytes(&value.to_le_bytes()) 34 | } 35 | 36 | pub fn push_u64(&mut self, value: u64) -> u64 { 37 | self.push_bytes(&value.to_le_bytes()) 38 | } 39 | 40 | pub fn as_slice(&self) -> &[u8] { 41 | self.vec.as_slice() 42 | } 43 | } 44 | 45 | impl Deref for ByteVec { 46 | type Target = [u8]; 47 | 48 | fn deref(&self) -> &[u8] { 49 | self.as_slice() 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/linux/coverage.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeSet; 2 | use std::fs::File; 3 | use std::io::Write; 4 | 5 | pub struct Coverage { 6 | rips: BTreeSet, 7 | file: File, 8 | } 9 | 10 | impl Coverage { 11 | pub fn new(path: &str) -> Self { 12 | let file = File::create(path).expect("Failed to create coverage file."); 13 | 14 | Self { 15 | rips: BTreeSet::new(), 16 | file, 17 | } 18 | } 19 | 20 | pub fn report(&mut self, rip: u64) { 21 | self.rips.insert(rip); 22 | } 23 | 24 | pub fn entries(&self) -> usize { 25 | self.rips.len() 26 | } 27 | } 28 | 29 | impl Drop for Coverage { 30 | fn drop(&mut self) { 31 | for rip in self.rips.iter() { 32 | self.file.write_all(format!("{:X}\n", rip).as_bytes()) 33 | .expect("Failed to write coverage info."); 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/linux/elf_loader.rs: -------------------------------------------------------------------------------- 1 | trait Parse { 2 | fn parse(&self, off: u32) -> T; 3 | } 4 | 5 | impl Parse for [u8] { 6 | fn parse(&self, off: u32) -> T { 7 | assert!( 8 | off as usize + std::mem::size_of::() <= self.len(), 9 | "Tried to read out of bounds memory." 10 | ); 11 | 12 | unsafe { 13 | let ptr = self.as_ptr().offset(off as isize); 14 | (ptr as *const T).read() 15 | } 16 | } 17 | } 18 | 19 | pub struct Segment { 20 | pub start: u64, 21 | pub size: u64, 22 | pub writeable: bool, 23 | pub executable: bool, 24 | } 25 | 26 | pub struct MappedElf { 27 | pub mapped: Vec, 28 | pub entrypoint: u64, 29 | pub base: u64, 30 | pub segments: Vec, 31 | } 32 | 33 | pub fn map_elf64(buffer: &[u8]) -> MappedElf { 34 | let entrypoint: u64 = buffer.parse(0x18); 35 | 36 | let segment_table: u64 = buffer.parse(0x20); 37 | let segment_count: u16 = buffer.parse(0x38); 38 | let segment_header_size: u16 = buffer.parse(0x36); 39 | 40 | assert_eq!(segment_header_size, 0x38, "Unexpected program header size."); 41 | 42 | let mut segments = Vec::with_capacity(segment_count as usize); 43 | let mut mapped = Vec::with_capacity(buffer.len()); 44 | let mut base = None; 45 | 46 | for i in 0..(segment_count as u64) { 47 | let segment = (segment_table + i * segment_header_size as u64) as u32; 48 | 49 | let segment_type: u32 = buffer.parse(segment); 50 | if segment_type != 1 { 51 | continue; 52 | } 53 | 54 | let file_off: u64 = buffer.parse(segment + 0x08); 55 | let vaddr: u64 = buffer.parse(segment + 0x10); 56 | let file_size: u64 = buffer.parse(segment + 0x20); 57 | let virt_size: u64 = buffer.parse(segment + 0x28); 58 | 59 | let flags: u64 = buffer.parse(segment + 0x4); 60 | 61 | let executable = flags & 1 != 0; 62 | let writeable = flags & 2 != 0; 63 | 64 | segments.push(Segment { 65 | start: vaddr, 66 | size: virt_size, 67 | writeable, 68 | executable, 69 | }); 70 | 71 | if base == None { 72 | base = Some(vaddr); 73 | } 74 | 75 | let virt_offset = vaddr - base.unwrap(); 76 | 77 | let pad = virt_offset as usize - mapped.len(); 78 | mapped.extend(vec![0u8; pad]); 79 | 80 | let raw = file_off as usize; 81 | let len = std::cmp::min(file_size, virt_size); 82 | mapped.extend_from_slice(&buffer[raw..raw + len as usize]); 83 | 84 | let pad = virt_size - file_size; 85 | mapped.extend(vec![0u8; pad as usize]); 86 | } 87 | 88 | let base = base.expect("ELF has no loadable sections."); 89 | 90 | let pad = ((mapped.len() + 0xFFF) & !0xFFF) - mapped.len(); 91 | mapped.extend(vec![0u8; pad as usize]); 92 | 93 | MappedElf { 94 | mapped, 95 | entrypoint, 96 | base, 97 | segments, 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/linux/errcodes.rs: -------------------------------------------------------------------------------- 1 | pub const EPERM: i64 = 1; 2 | pub const ENOENT: i64 = 2; 3 | pub const ESRCH: i64 = 3; 4 | pub const EINTR: i64 = 4; 5 | pub const EIO: i64 = 5; 6 | pub const ENXIO: i64 = 6; 7 | pub const E2BIG: i64 = 7; 8 | pub const ENOEXEC: i64 = 8; 9 | pub const EBADF: i64 = 9; 10 | pub const ECHILD: i64 = 10; 11 | pub const EAGAIN: i64 = 11; 12 | pub const ENOMEM: i64 = 12; 13 | pub const EACCES: i64 = 13; 14 | pub const EFAULT: i64 = 14; 15 | pub const ENOTBLK: i64 = 15; 16 | pub const EBUSY: i64 = 16; 17 | pub const EEXIST: i64 = 17; 18 | pub const EXDEV: i64 = 18; 19 | pub const ENODEV: i64 = 19; 20 | pub const ENOTDIR: i64 = 20; 21 | pub const EISDIR: i64 = 21; 22 | pub const EINVAL: i64 = 22; 23 | pub const ENFILE: i64 = 23; 24 | pub const EMFILE: i64 = 24; 25 | pub const ENOTTY: i64 = 25; 26 | pub const ETXTBSY: i64 = 26; 27 | pub const EFBIG: i64 = 27; 28 | pub const ENOSPC: i64 = 28; 29 | pub const ESPIPE: i64 = 29; 30 | pub const EROFS: i64 = 30; 31 | pub const EMLINK: i64 = 31; 32 | pub const EPIPE: i64 = 32; 33 | pub const EDOM: i64 = 33; 34 | pub const ERANGE: i64 = 34; 35 | pub const EDEADLK: i64 = 35; 36 | pub const ENAMETOOLONG: i64 = 36; 37 | pub const ENOLCK: i64 = 37; 38 | pub const ENOSYS: i64 = 38; 39 | pub const ENOTEMPTY: i64 = 39; 40 | pub const ELOOP: i64 = 40; 41 | pub const EWOULDBLOCK: i64 = EAGAIN; 42 | pub const ENOMSG: i64 = 42; 43 | pub const EIDRM: i64 = 43; 44 | pub const ECHRNG: i64 = 44; 45 | pub const EL2NSYNC: i64 = 45; 46 | pub const EL3HLT: i64 = 46; 47 | pub const EL3RST: i64 = 47; 48 | pub const ELNRNG: i64 = 48; 49 | pub const EUNATCH: i64 = 49; 50 | pub const ENOCSI: i64 = 50; 51 | pub const EL2HLT: i64 = 51; 52 | pub const EBADE: i64 = 52; 53 | pub const EBADR: i64 = 53; 54 | pub const EXFULL: i64 = 54; 55 | pub const ENOANO: i64 = 55; 56 | pub const EBADRQC: i64 = 56; 57 | pub const EBADSLT: i64 = 57; 58 | pub const EDEADLOCK: i64 = EDEADLK; 59 | pub const EBFONT: i64 = 59; 60 | pub const ENOSTR: i64 = 60; 61 | pub const ENODATA: i64 = 61; 62 | pub const ETIME: i64 = 62; 63 | pub const ENOSR: i64 = 63; 64 | pub const ENONET: i64 = 64; 65 | pub const ENOPKG: i64 = 65; 66 | pub const EREMOTE: i64 = 66; 67 | pub const ENOLINK: i64 = 67; 68 | pub const EADV: i64 = 68; 69 | pub const ESRMNT: i64 = 69; 70 | pub const ECOMM: i64 = 70; 71 | pub const EPROTO: i64 = 71; 72 | pub const EMULTIHOP: i64 = 72; 73 | pub const EDOTDOT: i64 = 73; 74 | pub const EBADMSG: i64 = 74; 75 | pub const EOVERFLOW: i64 = 75; 76 | pub const ENOTUNIQ: i64 = 76; 77 | pub const EBADFD: i64 = 77; 78 | pub const EREMCHG: i64 = 78; 79 | pub const ELIBACC: i64 = 79; 80 | pub const ELIBBAD: i64 = 80; 81 | pub const ELIBSCN: i64 = 81; 82 | pub const ELIBMAX: i64 = 82; 83 | pub const ELIBEXEC: i64 = 83; 84 | pub const EILSEQ: i64 = 84; 85 | pub const ERESTART: i64 = 85; 86 | pub const ESTRPIPE: i64 = 86; 87 | pub const EUSERS: i64 = 87; 88 | pub const ENOTSOCK: i64 = 88; 89 | pub const EDESTADDRREQ: i64 = 89; 90 | pub const EMSGSIZE: i64 = 90; 91 | pub const EPROTOTYPE: i64 = 91; 92 | pub const ENOPROTOOPT: i64 = 92; 93 | pub const EPROTONOSUPPORT: i64 = 93; 94 | pub const ESOCKTNOSUPPORT: i64 = 94; 95 | pub const EOPNOTSUPP: i64 = 95; 96 | pub const EPFNOSUPPORT: i64 = 96; 97 | pub const EAFNOSUPPORT: i64 = 97; 98 | pub const EADDRINUSE: i64 = 98; 99 | pub const EADDRNOTAVAIL: i64 = 99; 100 | pub const ENETDOWN: i64 = 100; 101 | pub const ENETUNREACH: i64 = 101; 102 | pub const ENETRESET: i64 = 102; 103 | pub const ECONNABORTED: i64 = 103; 104 | pub const ECONNRESET: i64 = 104; 105 | pub const ENOBUFS: i64 = 105; 106 | pub const EISCONN: i64 = 106; 107 | pub const ENOTCONN: i64 = 107; 108 | pub const ESHUTDOWN: i64 = 108; 109 | pub const ETOOMANYREFS: i64 = 109; 110 | pub const ETIMEDOUT: i64 = 110; 111 | pub const ECONNREFUSED: i64 = 111; 112 | pub const EHOSTDOWN: i64 = 112; 113 | pub const EHOSTUNREACH: i64 = 113; 114 | pub const EALREADY: i64 = 114; 115 | pub const EINPROGRESS: i64 = 115; 116 | pub const ESTALE: i64 = 116; 117 | pub const EUCLEAN: i64 = 117; 118 | pub const ENOTNAM: i64 = 118; 119 | pub const ENAVAIL: i64 = 119; 120 | pub const EISNAM: i64 = 120; 121 | pub const EREMOTEIO: i64 = 121; 122 | pub const EDQUOT: i64 = 122; 123 | pub const ENOMEDIUM: i64 = 123; 124 | pub const EMEDIUMTYPE: i64 = 124; 125 | -------------------------------------------------------------------------------- /src/linux/errconv.rs: -------------------------------------------------------------------------------- 1 | use super::errcodes as ec; 2 | use std::io::ErrorKind; 3 | 4 | pub fn ekind_to_linux_error(kind: ErrorKind) -> i64 { 5 | match kind { 6 | ErrorKind::NotFound => -ec::ENOENT, 7 | ErrorKind::PermissionDenied => -ec::EACCES, 8 | ErrorKind::BrokenPipe => -ec::EPIPE, 9 | ErrorKind::AlreadyExists => -ec::EEXIST, 10 | ErrorKind::WouldBlock => -ec::EWOULDBLOCK, 11 | ErrorKind::Interrupted => -ec::EINTR, 12 | ErrorKind::InvalidInput => -ec::EINVAL, 13 | ErrorKind::Other => -ec::ENOENT, 14 | _ => panic!("Unhandled IO error kind {:?}.", kind), 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/linux/lxfile.rs: -------------------------------------------------------------------------------- 1 | use crate::vm::*; 2 | use super::VmPaging; 3 | 4 | pub type DynLinuxFile = dyn LinuxFile + 'static; 5 | 6 | pub trait LinuxFile { 7 | fn read(&mut self, buffer: &mut [u8]) -> i64; 8 | fn write(&mut self, buffer: &[u8]) -> i64; 9 | fn ioctl(&mut self, cmd: u32, arg: u64, vm: &mut Vm, paging: &mut VmPaging) -> i64; 10 | } 11 | -------------------------------------------------------------------------------- /src/linux/lxrealfile.rs: -------------------------------------------------------------------------------- 1 | use crate::vm::*; 2 | use super::VmPaging; 3 | use super::errcodes as ec; 4 | use super::lxfile::LinuxFile; 5 | use super::errconv::ekind_to_linux_error; 6 | 7 | use std::fs::File; 8 | use std::io::{Read, Write}; 9 | 10 | pub struct LinuxRealFile { 11 | file: File, 12 | } 13 | 14 | impl LinuxRealFile { 15 | pub fn new(file: File) -> Self { 16 | Self { 17 | file, 18 | } 19 | } 20 | } 21 | 22 | impl LinuxFile for LinuxRealFile { 23 | fn read(&mut self, buffer: &mut [u8]) -> i64 { 24 | match self.file.read(buffer) { 25 | Ok(bytes) => bytes as i64, 26 | Err(error) => ekind_to_linux_error(error.kind()), 27 | } 28 | } 29 | 30 | fn write(&mut self, buffer: &[u8]) -> i64 { 31 | match self.file.write(buffer) { 32 | Ok(bytes) => bytes as i64, 33 | Err(error) => ekind_to_linux_error(error.kind()), 34 | } 35 | } 36 | 37 | fn ioctl(&mut self, cmd: u32, _arg: u64, _vm: &mut Vm, _paging: &mut VmPaging) -> i64 { 38 | match cmd { 39 | 0x5413 => -ec::ENOTTY, 40 | _ => panic!("Unsupported IOCTL {:X} to file.", cmd), 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/linux/lxstate.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::io::ErrorKind; 3 | use crate::mm::membank::MemBank; 4 | use super::lxfile::{LinuxFile, DynLinuxFile}; 5 | 6 | pub type Fd = u32; 7 | 8 | pub struct LinuxState { 9 | tid: u32, 10 | pid: u32, 11 | exited: bool, 12 | fds: BTreeMap>, 13 | next_fd: Fd, 14 | pub heap: MemBank, 15 | } 16 | 17 | impl LinuxState { 18 | pub fn new(pid: u32, tid: u32, heap_start: u64, heap_end: u64) -> Self { 19 | match std::fs::create_dir("linuxfs") { 20 | Ok(_) => (), 21 | Err(e) if e.kind() == ErrorKind::AlreadyExists => (), 22 | _ => panic!("Failed to create linuxfs."), 23 | }; 24 | 25 | Self { 26 | pid, 27 | tid, 28 | exited: false, 29 | fds: BTreeMap::new(), 30 | heap: MemBank::new(heap_start, Some(heap_end)), 31 | next_fd: 10, 32 | } 33 | } 34 | 35 | pub fn create_file_at_fd(&mut self, fd: Fd, file: impl LinuxFile + 'static) { 36 | assert!(self.fds.insert(fd, Box::new(file)).is_none(), "FD {} was already used.", fd); 37 | } 38 | 39 | pub fn create_file(&mut self, file: impl LinuxFile + 'static) -> Fd { 40 | let fd = self.next_fd; 41 | 42 | self.next_fd += 1; 43 | 44 | self.create_file_at_fd(fd, file); 45 | 46 | fd 47 | } 48 | 49 | pub fn close_file(&mut self, fd: Fd) -> bool { 50 | self.fds.remove(&fd).is_some() 51 | } 52 | 53 | pub fn exit(&mut self) { 54 | self.exited = true; 55 | } 56 | 57 | pub fn exited(&self) -> bool { 58 | self.exited 59 | } 60 | 61 | pub fn tid(&self) -> u32 { 62 | self.tid 63 | } 64 | 65 | pub fn pid(&self) -> u32 { 66 | self.pid 67 | } 68 | 69 | pub fn file_from_fd(&mut self, fd: Fd) -> Option<&mut DynLinuxFile> { 70 | self.fds.get_mut(&fd).map(|file| &mut **file) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/linux/lxstd.rs: -------------------------------------------------------------------------------- 1 | use crate::vm::*; 2 | use super::VmPaging; 3 | use super::errcodes as ec; 4 | use super::lxfile::LinuxFile; 5 | use super::usermem::USER_RW; 6 | use crate::bytevec::ByteVec; 7 | 8 | pub struct LinuxStdout { 9 | is_stderr: bool, 10 | } 11 | 12 | impl LinuxStdout { 13 | pub fn new(is_stderr: bool) -> Self { 14 | Self { 15 | is_stderr, 16 | } 17 | } 18 | 19 | fn name(&self) -> &str { 20 | match self.is_stderr { 21 | true => "stderr", 22 | false => "stdout", 23 | } 24 | } 25 | } 26 | 27 | impl LinuxFile for LinuxStdout { 28 | fn read(&mut self, _buffer: &mut [u8]) -> i64 { 29 | panic!("Reads not implemented for {}.", self.name()); 30 | } 31 | 32 | fn write(&mut self, buffer: &[u8]) -> i64 { 33 | print!("{}", String::from_utf8_lossy(&buffer)); 34 | 35 | buffer.len() as i64 36 | } 37 | 38 | fn ioctl(&mut self, cmd: u32, arg: u64, vm: &mut Vm, paging: &mut VmPaging) -> i64 { 39 | match cmd { 40 | 0x5413 => { // TIOCGWINSZ 41 | let mut winsize = ByteVec::with_capacity(2 * 4); 42 | 43 | winsize.push_u16(25); // rows 44 | winsize.push_u16(80); // cols 45 | winsize.push_u16(0); // xpixel 46 | winsize.push_u16(0); // ypixel 47 | 48 | if paging.write_virt_checked(vm, arg, &winsize, USER_RW).is_err() { 49 | return -ec::EFAULT; 50 | } 51 | 52 | 0 53 | }, 54 | _ => panic!("Unsupported IOCTL {:X} to {}.", cmd, self.name()), 55 | } 56 | } 57 | } 58 | 59 | 60 | pub struct LinuxStdin { 61 | _priv: bool, 62 | } 63 | 64 | impl LinuxStdin { 65 | pub fn new() -> Self { 66 | Self { 67 | _priv: true, 68 | } 69 | } 70 | } 71 | 72 | impl LinuxFile for LinuxStdin { 73 | fn read(&mut self, _buffer: &mut [u8]) -> i64 { 74 | panic!("Reads not implemented for stdin."); 75 | } 76 | 77 | fn write(&mut self, _buffer: &[u8]) -> i64 { 78 | panic!("Writes not implemented for stdin."); 79 | } 80 | 81 | fn ioctl(&mut self, _cmd: u32, _arg: u64, _vm: &mut Vm, _paging: &mut VmPaging) -> i64 { 82 | panic!("IOCTLs not implemented for stdin."); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/linux/lxsyscall.rs: -------------------------------------------------------------------------------- 1 | use crate::vm::*; 2 | use crate::mm::phys_allocator::{PhysAllocator, ContinousPhysAllocator}; 3 | use crate::mm::paging::MemProt; 4 | use super::usermem::{USER_R, USER_RW}; 5 | use super::lxstate::LinuxState; 6 | use super::lxfile::DynLinuxFile; 7 | use super::lxrealfile::LinuxRealFile; 8 | use crate::bytevec::ByteVec; 9 | use super::VmPaging; 10 | use super::errcodes as ec; 11 | use super::errconv::ekind_to_linux_error; 12 | 13 | use std::time::{SystemTime, Duration}; 14 | use std::convert::TryInto; 15 | use std::fs::OpenOptions; 16 | 17 | const O_WRONLY: u32 = 00000001; 18 | const O_RDWR: u32 = 00000002; 19 | const O_CREAT: u32 = 01000; 20 | const O_TRUNC: u32 = 02000; 21 | const O_EXCL: u32 = 04000; 22 | const O_APPEND: u32 = 00010; 23 | const O_DIRECTORY: u32 = 0100000; 24 | 25 | pub struct LinuxSyscall<'a> { 26 | vm: &'a mut Vm, 27 | paging: &'a mut VmPaging, 28 | phys_allocator: &'a mut ContinousPhysAllocator, 29 | state: &'a mut LinuxState, 30 | } 31 | 32 | impl<'a> LinuxSyscall<'a> { 33 | pub fn handle( 34 | vm: &'a mut Vm, 35 | paging: &'a mut VmPaging, 36 | phys_allocator: &'a mut ContinousPhysAllocator, 37 | state: &'a mut LinuxState, 38 | ) -> i64 { 39 | let mut syscall = Self { 40 | vm, 41 | paging, 42 | phys_allocator, 43 | state, 44 | }; 45 | 46 | syscall.service_syscall() 47 | } 48 | 49 | fn service_syscall(&mut self) -> i64 { 50 | let (syscall_id, params) = { 51 | let regs = self.vm.regs(); 52 | let syscall_id = regs.rax as u32; 53 | 54 | let params = [ 55 | regs.rdi, 56 | regs.rsi, 57 | regs.rdx, 58 | regs.r10, 59 | regs.r8, 60 | regs.r9, 61 | ]; 62 | 63 | (syscall_id, params) 64 | }; 65 | 66 | match syscall_id { 67 | 0 => self.sys_read(params[0] as u32, params[1], params[2]), 68 | 1 => self.sys_write(params[0] as u32, params[1], params[2]), 69 | 2 => self.sys_open(params[0], params[1] as u32, params[2] as u32), 70 | 3 => self.sys_close(params[0] as u32), 71 | 9 => self.sys_mmap(params[0], params[1], params[2] as u32, params[3] as u32, 72 | params[4] as u32, params[5] as u32), 73 | 11 => self.sys_munmap(params[0], params[1]), 74 | 12 => self.sys_brk(params[0]), 75 | 16 => self.sys_ioctl(params[0] as u32, params[1] as u32, params[2]), 76 | 19 => self.sys_readv(params[0] as u32, params[1], params[2] as u32), 77 | 20 => self.sys_writev(params[0] as u32, params[1], params[2] as u32), 78 | 28 => self.sys_madvise(params[0], params[1], params[2] as u32), 79 | 35 => self.sys_nanosleep(params[0], params[1]), 80 | 60 => self.sys_exit(params[0] as u32), 81 | 85 => self.sys_creat(params[0], params[1] as u32), 82 | 158 => self.sys_arch_prctl(params[0] as u32, params[1]), 83 | 218 => self.sys_set_tid_address(params[0]), 84 | 228 => self.sys_clock_gettime(params[0] as u32, params[1]), 85 | 231 => self.sys_exit_group(params[0] as u32), 86 | _ => panic!("Unknown syscall {} at RIP {:X}.", syscall_id, self.vm.regs().rip), 87 | } 88 | } 89 | 90 | fn allocate(&mut self, length: u64, prot: MemProt) -> u64 { 91 | assert!(length & 0xFFF == 0, "Size {:X} not page aligned.", length); 92 | 93 | let phys_addr = self.phys_allocator.alloc_phys(&mut self.vm, length, None); 94 | let virt_addr = self.state.heap.reserve_region(length); 95 | 96 | self.paging.map_virt_region(&mut self.vm, virt_addr, phys_addr, length, prot); 97 | 98 | virt_addr 99 | } 100 | 101 | fn sys_mmap(&mut self, addr: u64, length: u64, prot: u32, flags: u32, _fd: u32, _off: u32) 102 | -> i64 103 | { 104 | const PROT_READ: u32 = 1; 105 | const PROT_WRITE: u32 = 2; 106 | const PROT_EXEC: u32 = 4; 107 | 108 | const MAP_FILE: u32 = 0x00; 109 | const MAP_SHARED: u32 = 0x01; 110 | const MAP_PRIVATE: u32 = 0x02; 111 | const MAP_ANONYMOUS: u32 = 0x20; 112 | 113 | // TODO 114 | 115 | assert!(flags & MAP_SHARED == 0, "Shared mmap is not supported."); 116 | assert!(flags & MAP_PRIVATE != 0, "Non-private mmap is not supported."); 117 | assert!(flags & MAP_ANONYMOUS != 0, "Non-anonymous mmap is not supported."); 118 | 119 | if addr != 0 { 120 | println!("WARNING: Ignoring mmap base address {:X}.", addr); 121 | } 122 | 123 | let length = (length + 0xFFF) & !0xFFF; 124 | 125 | self.allocate(length, MemProt { 126 | user: true, 127 | write: prot & PROT_WRITE != 0, 128 | execute: prot & PROT_EXEC != 0, 129 | }) as i64 130 | } 131 | 132 | fn sys_munmap(&mut self, addr: u64, _size: u64) -> i64 { 133 | // TODO 134 | 135 | println!("WARNING: Ignoring munmap of address {:X}.", addr); 136 | 137 | 0 138 | } 139 | 140 | fn sys_madvise(&mut self, _addr: u64, _length: u64, _advice: u32) -> i64 { 141 | 0 142 | } 143 | 144 | fn sys_nanosleep(&mut self, rqtp: u64, _rmtp: u64) -> i64 { 145 | let mut timespec = [0u8; 16]; 146 | 147 | if self.paging.read_virt_checked(&mut self.vm, rqtp, &mut timespec, USER_R).is_err() { 148 | return -ec::EFAULT; 149 | } 150 | 151 | let seconds = u64::from_le_bytes(timespec[0..8].try_into().unwrap()); 152 | let nanos = u64::from_le_bytes(timespec[8..16].try_into().unwrap()); 153 | 154 | let duration = Duration::from_secs(seconds) + Duration::from_nanos(nanos); 155 | 156 | std::thread::sleep(duration); 157 | 158 | 0 159 | } 160 | 161 | fn sys_clock_gettime(&mut self, _clock_id: u32, tp: u64) -> i64 { 162 | let unix_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); 163 | 164 | let seconds = unix_time.as_secs(); 165 | let nanos = (unix_time.as_nanos() - Duration::from_secs(seconds).as_nanos()) as u64; 166 | 167 | let mut timespec = ByteVec::with_capacity(16); 168 | 169 | timespec.push_u64(seconds); 170 | timespec.push_u64(nanos); 171 | 172 | if self.paging.write_virt_checked(&mut self.vm, tp, ×pec, USER_RW).is_err() { 173 | return -ec::EFAULT; 174 | } 175 | 176 | 0 177 | } 178 | 179 | fn read_string(&mut self, mut addr: u64) -> Option { 180 | let mut bytes = Vec::with_capacity(1024); 181 | 182 | while bytes.len() < 1024 { 183 | let mut buf = [0u8; 16]; 184 | let result = self.paging.read_virt_checked(&mut self.vm, addr, &mut buf, USER_R); 185 | 186 | let read_bytes = match result { 187 | Ok(_) => buf.len(), 188 | Err(read) => read as usize, 189 | }; 190 | 191 | if read_bytes == 0 { 192 | return None; 193 | } 194 | 195 | let null_terminator = buf.iter().position(|x| *x == 0); 196 | if let Some(pos) = null_terminator { 197 | if pos != 0 { 198 | bytes.extend_from_slice(&buf[0..pos]); 199 | } 200 | 201 | break; 202 | } else { 203 | bytes.extend_from_slice(&buf[0..read_bytes]); 204 | } 205 | 206 | if read_bytes < buf.len() { 207 | break; 208 | } 209 | 210 | addr += read_bytes as u64; 211 | } 212 | 213 | Some(String::from_utf8_lossy(&bytes).to_string()) 214 | } 215 | 216 | 217 | fn sys_creat(&mut self, path: u64, mode: u32) -> i64 { 218 | self.sys_open(path, O_CREAT | O_WRONLY | O_TRUNC, mode) 219 | } 220 | 221 | fn sys_open(&mut self, path: u64, mut flags: u32, _mode: u32) -> i64 { 222 | let path = if let Some(path) = self.read_string(path) { 223 | if path.contains("../") || path.contains("..\\") { 224 | return -ec::EACCES; 225 | } 226 | 227 | let mut final_path = String::new(); 228 | final_path.push_str("linuxfs/"); 229 | 230 | if path.starts_with("/") { 231 | final_path.push_str(&path[1..]); 232 | } else { 233 | final_path.push_str(&path); 234 | } 235 | 236 | final_path 237 | } else { 238 | return -ec::EFAULT; 239 | }; 240 | 241 | if flags & O_CREAT != 0 { 242 | flags &= !O_DIRECTORY; 243 | } 244 | 245 | if flags & O_DIRECTORY != 0 { 246 | println!("WARNING: Opening of directories ({}) is not supported.", path); 247 | return -ec::ENOTDIR; 248 | } 249 | 250 | if flags & O_RDWR != 0 && flags & O_WRONLY != 0 { 251 | return -ec::EINVAL; 252 | } 253 | 254 | let read = flags & O_RDWR != 0 || flags & O_WRONLY == 0; 255 | let write = flags & O_RDWR != 0 || flags & O_WRONLY == 1; 256 | 257 | let file = OpenOptions::new() 258 | .read(read) 259 | .write(write) 260 | .append(flags & O_APPEND != 0) 261 | .truncate(flags & O_TRUNC != 0) 262 | .create(flags & O_CREAT != 0) 263 | .create_new(flags & O_EXCL != 0) 264 | .open(path); 265 | 266 | match file { 267 | Ok(file) => self.state.create_file(LinuxRealFile::new(file)) as i64, 268 | Err(error) => ekind_to_linux_error(error.kind()), 269 | } 270 | } 271 | 272 | fn sys_close(&mut self, fd: u32) -> i64 { 273 | match self.state.close_file(fd) { 274 | true => 0, 275 | false => -ec::EBADF, 276 | } 277 | } 278 | 279 | fn sys_brk(&mut self, _brk: u64) -> i64 { 280 | -ec::ENOMEM 281 | } 282 | 283 | fn is_um_addr(&self, addr: u64) -> bool { 284 | if let Some((_, prot)) = self.paging.query_virt_addr(&self.vm, addr) { 285 | return prot.user; 286 | } 287 | 288 | return false; 289 | } 290 | 291 | fn sys_arch_prctl(&mut self, code: u32, addr: u64) -> i64 { 292 | match code { 293 | 0x1001 => { // ARCH_SET_GS 294 | if !self.is_um_addr(addr) { 295 | return -ec::EPERM; 296 | } 297 | 298 | self.vm.regs_mut().gs.base = addr; 299 | }, 300 | 0x1002 => { // ARCH_SET_FS 301 | if !self.is_um_addr(addr) { 302 | return -ec::EPERM; 303 | } 304 | 305 | self.vm.regs_mut().fs.base = addr; 306 | }, 307 | 0x1003 => { // ARCH_GET_FS 308 | let base = self.vm.regs().fs.base; 309 | if self.paging.write_virt_u64_checked(&mut self.vm, addr, base, USER_RW).is_err() { 310 | return -ec::EFAULT; 311 | } 312 | }, 313 | 0x1004 => { // ARCH_GET_GS 314 | let base = self.vm.regs().gs.base; 315 | if self.paging.write_virt_u64_checked(&mut self.vm, addr, base, USER_RW).is_err() { 316 | return -ec::EFAULT; 317 | } 318 | }, 319 | 0x1011 | 0x1012 => { // ARCH_GET_CPUID or ARCH_SET_CPUID 320 | return -ec::ENODEV; 321 | } 322 | _ => { 323 | return -ec::EINVAL; 324 | } 325 | }; 326 | 327 | 0 328 | } 329 | 330 | fn sys_set_tid_address(&mut self, _tidptr: u64) -> i64 { 331 | self.state.tid() as i64 332 | } 333 | 334 | fn sys_exit(&mut self, status: u32) -> i64 { 335 | println!("\nExecutable exited with status {:X}.", status); 336 | 337 | self.state.exit(); 338 | 339 | 0 340 | } 341 | 342 | fn sys_exit_group(&mut self, status: u32) -> i64 { 343 | self.sys_exit(status) 344 | } 345 | 346 | fn sys_ioctl(&mut self, fd: u32, cmd: u32, arg: u64) -> i64 { 347 | match self.state.file_from_fd(fd) { 348 | Some(file) => file.ioctl(cmd, arg, &mut self.vm, &mut self.paging), 349 | _ => -ec::EBADF, 350 | } 351 | } 352 | 353 | fn read( 354 | vm: &mut Vm, 355 | paging: &mut VmPaging, 356 | file: &mut DynLinuxFile, 357 | buf: u64, 358 | count: u64, 359 | ) -> i64 { 360 | let mut buffer = vec![0u8; count as usize]; 361 | 362 | let result = file.read(&mut buffer); 363 | 364 | if result > 0 { 365 | let buffer = &buffer[..result as usize]; 366 | 367 | if paging.write_virt_checked(vm, buf, buffer, USER_RW).is_err() { 368 | return -ec::EFAULT 369 | } 370 | } 371 | 372 | return result; 373 | } 374 | 375 | fn write( 376 | vm: &mut Vm, 377 | paging: &mut VmPaging, 378 | file: &mut DynLinuxFile, 379 | buf: u64, 380 | count: u64, 381 | ) -> i64 { 382 | let mut buffer = vec![0u8; count as usize]; 383 | 384 | if paging.read_virt_checked(vm, buf, &mut buffer, USER_R).is_err() { 385 | return -ec::EFAULT 386 | } 387 | 388 | file.write(&buffer) 389 | } 390 | 391 | fn iovec_rw( 392 | &mut self, 393 | fd: u32, 394 | iov: u64, 395 | iovcnt: u32, 396 | func: impl Fn(&mut Vm, &mut VmPaging, &mut DynLinuxFile, u64, u64) -> i64, 397 | ) -> i64 { 398 | if let Some(file) = self.state.file_from_fd(fd) { 399 | let mut total = 0; 400 | 401 | for i in 0..iovcnt { 402 | let iovec = iov + i as u64 * 16; 403 | 404 | let res = (self.paging.read_virt_u64_checked(&mut self.vm, iovec, USER_R), 405 | self.paging.read_virt_u64_checked(&mut self.vm, iovec + 8, USER_R)); 406 | 407 | if let (Ok(base), Ok(size)) = res { 408 | let result = func(&mut self.vm, &mut self.paging, file, base, size); 409 | 410 | if result < 0 { 411 | return result; 412 | } 413 | 414 | total += result; 415 | } else { 416 | return -ec::EFAULT; 417 | } 418 | } 419 | 420 | return total; 421 | } else { 422 | -ec::EBADF 423 | } 424 | } 425 | 426 | fn sys_read(&mut self, fd: u32, buf: u64, count: u64) -> i64 { 427 | match self.state.file_from_fd(fd) { 428 | Some(file) => Self::read(&mut self.vm, &mut self.paging, file, buf, count), 429 | _ => -ec::EBADF, 430 | } 431 | } 432 | 433 | fn sys_write(&mut self, fd: u32, buf: u64, count: u64) -> i64 { 434 | match self.state.file_from_fd(fd) { 435 | Some(file) => Self::write(&mut self.vm, &mut self.paging, file, buf, count), 436 | _ => -ec::EBADF, 437 | } 438 | } 439 | 440 | fn sys_readv(&mut self, fd: u32, iov: u64, iovcnt: u32) -> i64 { 441 | self.iovec_rw(fd, iov, iovcnt, Self::read) 442 | } 443 | 444 | fn sys_writev(&mut self, fd: u32, iov: u64, iovcnt: u32) -> i64 { 445 | self.iovec_rw(fd, iov, iovcnt, Self::write) 446 | } 447 | } 448 | -------------------------------------------------------------------------------- /src/linux/mod.rs: -------------------------------------------------------------------------------- 1 | mod elf_loader; 2 | mod errcodes; 3 | mod errconv; 4 | mod lxstate; 5 | mod lxfile; 6 | mod lxsyscall; 7 | mod lxrealfile; 8 | mod lxstd; 9 | mod usermem; 10 | mod coverage; 11 | mod syscall_stats; 12 | 13 | use crate::vm::*; 14 | use crate::mm::phys_allocator::{PhysAllocator, ContinousPhysAllocator}; 15 | use crate::mm::paging::{PagingManager, MemProt, MemAccess}; 16 | use crate::bytevec::ByteVec; 17 | use lxstate::LinuxState; 18 | use lxsyscall::LinuxSyscall; 19 | use coverage::Coverage; 20 | use syscall_stats::SyscallStats; 21 | 22 | const EXCEPTIONS_TO_INTERCEPT: &[Exception] = &[ 23 | Exception::DivideErrorFault, 24 | Exception::DebugTrapOrFault, 25 | Exception::BreakpointTrap, 26 | Exception::OverflowTrap, 27 | Exception::BoundRangeFault, 28 | Exception::InvalidOpcodeFault, 29 | Exception::DeviceNotAvailableFault, 30 | Exception::DoubleFaultAbort, 31 | Exception::InvalidTaskStateSegmentFault, 32 | Exception::SegmentNotPresentFault, 33 | Exception::StackFault, 34 | Exception::GeneralProtectionFault, 35 | Exception::PageFault, 36 | Exception::FloatingPointErrorFault, 37 | Exception::AlignmentCheckFault, 38 | Exception::MachineCheckAbort, 39 | Exception::SimdFloatingPointFault, 40 | ]; 41 | 42 | const PROCESS_ID: u32 = 4; 43 | const THREAD_ID: u32 = 4; 44 | 45 | const TARGET_CPL: u8 = 3; 46 | const GDT_VIRT: u64 = 0xFFFF_8000_0000_0000; 47 | const STACK_END_VIRT: u64 = 0x7FFF_FFFF_F000; 48 | const STACK_SIZE: u64 = 1024 * 1024 * 16; 49 | 50 | type VmPaging = PagingManager; 51 | 52 | pub struct LinuxVm { 53 | vm: Vm, 54 | paging: VmPaging, 55 | phys_allocator: ContinousPhysAllocator, 56 | coverage: Option, 57 | syscall_stats: Option, 58 | state: LinuxState, 59 | } 60 | 61 | impl LinuxVm { 62 | /// Prepare guest to run 64 bit code in usermode. 63 | fn initialize_longmode( 64 | vm: &mut Vm, 65 | paging: &mut VmPaging, 66 | phys_allocator: &mut ContinousPhysAllocator, 67 | ) { 68 | // Create code and data segment attributes. DPL will be equal to `TARGET_CPL`. 69 | // Accessed bit must be set because we are manually setting descriptor caches. 70 | // Granularity bit is not set because limit is ignored anyway. 71 | 72 | // Read, execute, accessed. Long mode is enabled so default needs to be disabled. 73 | let code_attribs = SegAttribs { 74 | seg_type: 0b1011, 75 | non_system: true, 76 | dpl: TARGET_CPL, 77 | present: true, 78 | default: false, 79 | granularity: false, 80 | long: true, 81 | }.build(); 82 | 83 | // Read, write, accessed. 84 | let data_attribs = SegAttribs { 85 | seg_type: 0b0011, 86 | non_system: true, 87 | dpl: TARGET_CPL, 88 | present: true, 89 | default: false, 90 | granularity: false, 91 | long: false, 92 | }.build(); 93 | 94 | let mut gdt = ByteVec::new(); 95 | 96 | // Create 64 bit GDT with 3 entries containing usermode code and data segment. 97 | // Null segment is required by x86. Only attributes are set because base and limit 98 | // are ignored in long mode. 99 | let _null_sel = gdt.push_u64(0); 100 | let code_sel = gdt.push_u64((code_attribs as u64) << 40); 101 | let data_sel = gdt.push_u64((data_attribs as u64) << 40); 102 | 103 | assert!(gdt.len() == 8 * 3, "GDT does not have 3 entries."); 104 | 105 | // Allocate GDT and map it to `GDT_VIRT` virtual address as read-only. 106 | let gdt_aligned_size = (gdt.len() as u64 + 0xFFF) & !0xFFF; 107 | let gdt_phys = phys_allocator.alloc_phys(vm, gdt_aligned_size, Some(&gdt)); 108 | 109 | paging.map_virt_region(vm, GDT_VIRT, gdt_phys, 110 | gdt_aligned_size, MemProt::r(MemAccess::Kernelmode)); 111 | 112 | // Load newly created GDT. 113 | vm.regs_mut().gdtr = TableReg { 114 | base: GDT_VIRT, 115 | limit: gdt.len() as u16 - 1, 116 | }; 117 | 118 | // Load empty IDT. All exceptions will be catched and handled by the hypervisor. 119 | vm.regs_mut().idtr = TableReg { 120 | base: 0, 121 | limit: 0, 122 | }; 123 | 124 | // Create code and data segments with appropriate attributes. Base and limit 125 | // are 0 because they are ignored in long mode and we are using paging anyway. 126 | // RPL == DPL == TARGET_CPL 127 | 128 | let code_seg = SegReg { 129 | sel: code_sel as u16 | TARGET_CPL as u16, 130 | base: 0, 131 | limit: 0, 132 | attribs: code_attribs, 133 | }; 134 | 135 | let data_seg = SegReg { 136 | sel: data_sel as u16 | TARGET_CPL as u16, 137 | base: 0, 138 | limit: 0, 139 | attribs: data_attribs, 140 | }; 141 | 142 | let regs = vm.regs_mut(); 143 | 144 | // Load all newly created segments. 145 | regs.cs = code_seg; 146 | regs.es = data_seg; 147 | regs.ss = data_seg; 148 | regs.ds = data_seg; 149 | regs.fs = data_seg; 150 | regs.gs = data_seg; 151 | 152 | // Enable protected mode and paging (both required by long mode). Enable write protect 153 | // to ensure that read-only memory cannot be written. 154 | regs.cr0 = cr0::PE | cr0::WP | cr0::PG | cr0::MP; 155 | 156 | // Enable Physical Address Extension (required by long mode). Also enable some 157 | // things to make SSE work properly. 158 | regs.cr4 = cr4::PAE | cr4::OSFXSR | cr4::OSXMMEXCPT | cr4::OSXSAVE; 159 | 160 | // Enable long mode and activate it. Enable non-execute bit to ensure that 161 | // data cannot be executed. 162 | // Syscall enable is off because we need to manually handle syscalls by catching #UD. 163 | regs.efer = efer::LMA | efer::LME | efer::NXE; 164 | 165 | // Enable SSE and X87 FPU. 166 | regs.xcr0 = xcr0::X87 | xcr0::SSE; 167 | 168 | // Load physical address of PML4 allocated by paging manager. 169 | regs.cr3 = paging.cr3(); 170 | } 171 | 172 | /// Load and map ELF executable into guest and prepare it for running. 173 | fn load_executable( 174 | executable_path: &str, 175 | vm: &mut Vm, 176 | paging: &mut VmPaging, 177 | phys_allocator: &mut ContinousPhysAllocator, 178 | ) -> (u64, u64) { 179 | // Load and parse 64-bit ELF executable. 180 | let bytes = std::fs::read(executable_path).expect("Failed to read executable ELF."); 181 | let elf = elf_loader::map_elf64(&bytes); 182 | 183 | // Ensure that both base address and mapped image size are page aligned. 184 | assert!(elf.base & 0xFFF == 0 && elf.mapped.len() & 0xFFF == 0, 185 | "ELF is not page aligned."); 186 | 187 | let elf_virt = elf.base; 188 | let elf_size = elf.mapped.len() as u64; 189 | 190 | // Allocate physical region containing ELF file. 191 | let elf_phys = phys_allocator.alloc_phys(vm, elf_size, Some(&elf.mapped)); 192 | 193 | // Go through every page in ELF image to map it in virtual memory. 194 | for offset in (0..elf_size).step_by(0x1000) { 195 | let virt = elf_virt + offset; 196 | let phys = elf_phys + offset; 197 | 198 | // If page does not have protection we dafult it to read-only. 199 | let mut protection = MemProt::r(MemAccess::Usermode); 200 | 201 | // Find segment protection for given page. 202 | for segment in &elf.segments { 203 | if virt >= segment.start && virt < segment.start + segment.size { 204 | // Code will be executing in usermode so user bit needs to be always set. 205 | protection = MemProt { 206 | write: segment.writeable, 207 | execute: segment.executable, 208 | user: true, 209 | }; 210 | 211 | break; 212 | } 213 | } 214 | 215 | // Map page of ELF executable with appropriate protection. 216 | paging.map_virt_region(vm, virt, phys, 0x1000, protection); 217 | } 218 | 219 | // Make guest execute code from entrypoint of ELF. 220 | vm.regs_mut().rip = elf.entrypoint; 221 | 222 | (elf_virt, elf_size) 223 | } 224 | 225 | /// Create and initialize guest stack containing command line arguments `args` and 226 | /// enviromental variables `env` used by libc. 227 | fn initialize_stack, S2: AsRef>( 228 | args: &[S1], 229 | env: &[S2], 230 | vm: &mut Vm, 231 | paging: &mut VmPaging, 232 | phys_allocator: &mut ContinousPhysAllocator, 233 | ) -> (u64, u64, u64, u64) { 234 | // Alocate data containing all strings and setup stack layout to contain pointers to them. 235 | // + 0 argc 236 | // + 8 ptr to arg 1 237 | // + 16 ptr to arg 2 238 | // + 24 null ptr 239 | // + 32 ptr to env 1 240 | // + 40 ptr to env 2 241 | // + 48 null ptr 242 | 243 | // Buffer to hold all strings from `args` and `env`. 244 | let mut data = ByteVec::new(); 245 | 246 | // Offsets from `data` to all entries in `args` and `env`. Required to setup stack 247 | // containing pointers to all these C strings. 248 | let mut args_offsets = Vec::with_capacity(args.len()); 249 | let mut env_offsets = Vec::with_capacity(env.len()); 250 | 251 | // Allocate all command line arguments in `data`. 252 | for arg in args.iter() { 253 | let bytes = arg.as_ref().as_bytes(); 254 | let offset = data.push_bytes(bytes); 255 | 256 | // Push null terminator. 257 | data.push_bytes(&[0]); 258 | 259 | args_offsets.push(offset); 260 | } 261 | 262 | // Allocate all enviromental variables in `data`. 263 | for var in env.iter() { 264 | let bytes = var.as_ref().as_bytes(); 265 | let offset = data.push_bytes(bytes); 266 | 267 | // Push null terminator. 268 | data.push_bytes(&[0]); 269 | 270 | env_offsets.push(offset); 271 | } 272 | 273 | // Calculate additional size on stack required to hold argc, argv and envp. 274 | // We need to store all enviromental variable pointers, command line pointers 275 | // and argc, null separator between argv and envp, null separator at the end of envp. 276 | let add_size = (3 + env.len() + args.len()) as u64 * 8; 277 | let add_aligned_size = (add_size + 0xFFF) & !0xFFF; 278 | 279 | let usable_stack_size = STACK_SIZE; 280 | let real_stack_size = usable_stack_size + add_aligned_size; 281 | 282 | let stack_phys = phys_allocator.alloc_phys(vm, real_stack_size, None); 283 | let stack_virt = STACK_END_VIRT - real_stack_size; 284 | 285 | paging.map_virt_region(vm, stack_virt, stack_phys, 286 | real_stack_size, MemProt::rw(MemAccess::Usermode)); 287 | 288 | let data_aligned_size = (data.len() as u64 + 0xFFF) & !0xFFF; 289 | let data_phys = phys_allocator.alloc_phys(vm, data_aligned_size, Some(&data)); 290 | let data_virt = stack_virt - data_aligned_size - 0x1000; 291 | 292 | paging.map_virt_region(vm, data_virt, data_phys, 293 | data_aligned_size, MemProt::r(MemAccess::Usermode)); 294 | 295 | let mut ptr_data = ByteVec::with_capacity(add_size as usize); 296 | 297 | ptr_data.push_u64(args.len() as u64); 298 | 299 | for offset in args_offsets.iter() { 300 | ptr_data.push_u64(offset + data_virt); 301 | } 302 | 303 | ptr_data.push_u64(0); 304 | 305 | for offset in env_offsets.iter() { 306 | ptr_data.push_u64(offset + data_virt); 307 | } 308 | 309 | ptr_data.push_u64(0); 310 | 311 | assert!(ptr_data.len() == add_size as usize, "Predicted size was wrong."); 312 | 313 | let rsp = stack_virt + usable_stack_size - 0x100; 314 | 315 | paging.write_virt(vm, rsp, &ptr_data).unwrap(); 316 | 317 | vm.regs_mut().rsp = rsp; 318 | 319 | (stack_virt, real_stack_size, data_virt, data_aligned_size) 320 | } 321 | 322 | pub fn new, S2: AsRef>( 323 | executable_path: &str, 324 | args: &[S1], 325 | env: &[S2], 326 | coverage_path: Option<&str>, 327 | ) -> Self { 328 | assert!(args.len() > 0, "Need to provide at least one cmd line argument."); 329 | assert!(env.len() > 0, "Need to provide at least one env variable."); 330 | 331 | let mut vm = Vm::new(EXCEPTIONS_TO_INTERCEPT); 332 | 333 | let phys_pt_start = 1 << 30; 334 | 335 | let mut phys_allocator = ContinousPhysAllocator::new(0, Some(phys_pt_start)); 336 | let mut paging = PagingManager::new(&mut vm, 337 | ContinousPhysAllocator::new(phys_pt_start, None)); 338 | 339 | Self::initialize_longmode(&mut vm, &mut paging, &mut phys_allocator); 340 | 341 | let (elf_base, elf_size) = Self::load_executable(executable_path, &mut vm, 342 | &mut paging, &mut phys_allocator); 343 | 344 | let (stack_base, _stack_size, args_base, _args_size) = Self::initialize_stack(args, env, 345 | &mut vm, &mut paging, &mut phys_allocator); 346 | 347 | let coverage = coverage_path.map(|path| Coverage::new(path)); 348 | 349 | if coverage.is_some() { 350 | const TRAP_FLAG: u64 = 0x100; 351 | 352 | vm.regs_mut().rflags |= TRAP_FLAG; 353 | 354 | println!("Enabled code coverage.\n"); 355 | } 356 | 357 | let (heap_base, heap_size) = { 358 | let elf_end = elf_base + elf_size; 359 | 360 | assert!(elf_base > 0, "ELF on null page."); 361 | assert!(elf_end < stack_base, "ELF on higher address than stack."); 362 | assert!(elf_end < args_base, "ELF on higher address than args."); 363 | 364 | let gb = 1024 * 1024 * 1024; 365 | 366 | let gap_start = elf_end; 367 | let gap_end = std::cmp::min(stack_base, args_base); 368 | 369 | let padding = 10 * gb; 370 | let heap_start = gap_start + padding; 371 | let heap_size = gap_end - gap_start - padding * 2; 372 | 373 | assert!(heap_size >= 10 * gb, "Heap size is less than 10GB."); 374 | 375 | (heap_start, heap_size) 376 | }; 377 | 378 | let mut lx_state = LinuxState::new(PROCESS_ID, THREAD_ID, heap_base, heap_size); 379 | 380 | const STDIN_FD: u32 = 0; 381 | const STDOUT_FD: u32 = 1; 382 | const STDERR_FD: u32 = 2; 383 | 384 | lx_state.create_file_at_fd(STDIN_FD, lxstd::LinuxStdin::new()); 385 | lx_state.create_file_at_fd(STDOUT_FD, lxstd::LinuxStdout::new(false)); 386 | lx_state.create_file_at_fd(STDERR_FD, lxstd::LinuxStdout::new(true)); 387 | 388 | Self { 389 | vm, 390 | paging, 391 | phys_allocator, 392 | coverage, 393 | state: lx_state, 394 | syscall_stats: Some(SyscallStats::new()), 395 | } 396 | } 397 | 398 | fn report_coverage(&mut self, rip: u64) -> bool { 399 | if let Some(coverage) = self.coverage.as_mut() { 400 | coverage.report(rip); 401 | 402 | return true; 403 | } 404 | 405 | false 406 | } 407 | 408 | pub fn run(&mut self) { 409 | // Run till `exit` or `exit_group` syscall. 410 | while !self.state.exited() { 411 | let vmexit = self.vm.run(); 412 | 413 | let mut handled = false; 414 | 415 | match vmexit { 416 | VmExit::Exception { vector, instruction, .. } => { 417 | // If #UD was caused by `syscall` instruction we need to 418 | // emulate it appropriately. 419 | if vector == Exception::InvalidOpcodeFault && 420 | matches!(&instruction, &[0x0F, 0x05, ..]) 421 | { 422 | let syscall_id = self.vm.regs().rax as u32; 423 | 424 | let result = LinuxSyscall::handle( 425 | &mut self.vm, 426 | &mut self.paging, 427 | &mut self.phys_allocator, 428 | &mut self.state, 429 | ); 430 | 431 | if let Some(syscall_stats) = self.syscall_stats.as_mut() { 432 | syscall_stats.report(syscall_id); 433 | } 434 | 435 | let regs = self.vm.regs_mut(); 436 | 437 | regs.rax = result as u64; 438 | 439 | // Skip faulting instruction. 440 | regs.rip += 2; 441 | 442 | // #DB trap is used to gather code coverage if enabled. 443 | // We are emulating `syscall` so trap at the end of this instruction 444 | // will not be delivered. To fix this we need to maually report 445 | // code coverage on succeeding instruction. 446 | 447 | let rip = regs.rip; 448 | 449 | if !self.state.exited() { 450 | self.report_coverage(rip); 451 | } 452 | 453 | handled = true; 454 | } 455 | 456 | // #DB trap should be caused by RFLAGS.TF and is used to gather 457 | // code coverage. 458 | if vector == Exception::DebugTrapOrFault { 459 | handled = self.report_coverage(self.vm.regs().rip); 460 | } 461 | }, 462 | VmExit::Preemption => handled = true, 463 | _ => (), 464 | } 465 | 466 | if !handled { 467 | println!("{:#X?}", self.vm.regs()); 468 | println!("{:#X?}", vmexit); 469 | panic!("Unhandled VM exit."); 470 | } 471 | } 472 | 473 | if let Some(coverage) = self.coverage.as_ref() { 474 | println!("Gathered {} unique coverage entries.", coverage.entries()); 475 | } 476 | 477 | if let Some(syscall_stats) = self.syscall_stats.as_ref() { 478 | println!(); 479 | 480 | syscall_stats.show(); 481 | } 482 | } 483 | } 484 | -------------------------------------------------------------------------------- /src/linux/syscall_stats.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::borrow::Cow; 3 | 4 | pub struct SyscallStats { 5 | syscalls: BTreeMap, 6 | } 7 | 8 | impl SyscallStats { 9 | pub fn new() -> Self { 10 | Self { 11 | syscalls: BTreeMap::new(), 12 | } 13 | } 14 | 15 | pub fn report(&mut self, syscall_id: u32) { 16 | *self.syscalls.entry(syscall_id).or_insert(0) += 1; 17 | } 18 | 19 | pub fn show(&self) { 20 | let mut syscalls: Vec<_> = self.syscalls.iter().map(|(k, v)| (*k, *v)).collect(); 21 | syscalls.sort_unstable_by_key(|syscall| std::cmp::Reverse(syscall.1)); 22 | 23 | for (syscall_id, count) in syscalls.into_iter() { 24 | let syscall_name = match syscall_name_from_id(syscall_id) { 25 | Some(name) => Cow::from(name), 26 | None => Cow::from(format!("Unknown {}", syscall_id)), 27 | }; 28 | 29 | println!("{:16} | {:>3}", syscall_name, count); 30 | } 31 | } 32 | } 33 | 34 | fn syscall_name_from_id(id: u32) -> Option<&'static str> { 35 | match id { 36 | 0 => Some("read"), 37 | 1 => Some("write"), 38 | 2 => Some("open"), 39 | 3 => Some("close"), 40 | 4 => Some("stat"), 41 | 5 => Some("fstat"), 42 | 6 => Some("lstat"), 43 | 7 => Some("poll"), 44 | 8 => Some("lseek"), 45 | 9 => Some("mmap"), 46 | 10 => Some("mprotect"), 47 | 11 => Some("munmap"), 48 | 12 => Some("brk"), 49 | 13 => Some("rt_sigaction"), 50 | 14 => Some("rt_sigprocmask"), 51 | 15 => Some("rt_sigreturn"), 52 | 16 => Some("ioctl"), 53 | 17 => Some("pread64"), 54 | 18 => Some("pwrite64"), 55 | 19 => Some("readv"), 56 | 20 => Some("writev"), 57 | 21 => Some("access"), 58 | 22 => Some("pipe"), 59 | 23 => Some("select"), 60 | 24 => Some("sched_yield"), 61 | 25 => Some("mremap"), 62 | 26 => Some("msync"), 63 | 27 => Some("mincore"), 64 | 28 => Some("madvise"), 65 | 29 => Some("shmget"), 66 | 30 => Some("shmat"), 67 | 31 => Some("shmctl"), 68 | 32 => Some("dup"), 69 | 33 => Some("dup2"), 70 | 34 => Some("pause"), 71 | 35 => Some("nanosleep"), 72 | 36 => Some("getitimer"), 73 | 37 => Some("alarm"), 74 | 38 => Some("setitimer"), 75 | 39 => Some("getpid"), 76 | 40 => Some("sendfile"), 77 | 41 => Some("socket"), 78 | 42 => Some("connect"), 79 | 43 => Some("accept"), 80 | 44 => Some("sendto"), 81 | 45 => Some("recvfrom"), 82 | 46 => Some("sendmsg"), 83 | 47 => Some("recvmsg"), 84 | 48 => Some("shutdown"), 85 | 49 => Some("bind"), 86 | 50 => Some("listen"), 87 | 51 => Some("getsockname"), 88 | 52 => Some("getpeername"), 89 | 53 => Some("socketpair"), 90 | 54 => Some("setsockopt"), 91 | 55 => Some("getsockopt"), 92 | 56 => Some("clone"), 93 | 57 => Some("fork"), 94 | 58 => Some("vfork"), 95 | 59 => Some("execve"), 96 | 60 => Some("exit"), 97 | 61 => Some("wait4"), 98 | 62 => Some("kill"), 99 | 63 => Some("uname"), 100 | 64 => Some("semget"), 101 | 65 => Some("semop"), 102 | 66 => Some("semctl"), 103 | 67 => Some("shmdt"), 104 | 68 => Some("msgget"), 105 | 69 => Some("msgsnd"), 106 | 70 => Some("msgrcv"), 107 | 71 => Some("msgctl"), 108 | 72 => Some("fcntl"), 109 | 73 => Some("flock"), 110 | 74 => Some("fsync"), 111 | 75 => Some("fdatasync"), 112 | 76 => Some("truncate"), 113 | 77 => Some("ftruncate"), 114 | 78 => Some("getdents"), 115 | 79 => Some("getcwd"), 116 | 80 => Some("chdir"), 117 | 81 => Some("fchdir"), 118 | 82 => Some("rename"), 119 | 83 => Some("mkdir"), 120 | 84 => Some("rmdir"), 121 | 85 => Some("creat"), 122 | 86 => Some("link"), 123 | 87 => Some("unlink"), 124 | 88 => Some("symlink"), 125 | 89 => Some("readlink"), 126 | 90 => Some("chmod"), 127 | 91 => Some("fchmod"), 128 | 92 => Some("chown"), 129 | 93 => Some("fchown"), 130 | 94 => Some("lchown"), 131 | 95 => Some("umask"), 132 | 96 => Some("gettimeofday"), 133 | 97 => Some("getrlimit"), 134 | 98 => Some("getrusage"), 135 | 99 => Some("sysinfo"), 136 | 100 => Some("times"), 137 | 101 => Some("ptrace"), 138 | 102 => Some("getuid"), 139 | 103 => Some("syslog"), 140 | 104 => Some("getgid"), 141 | 105 => Some("setuid"), 142 | 106 => Some("setgid"), 143 | 107 => Some("geteuid"), 144 | 108 => Some("getegid"), 145 | 109 => Some("setpgid"), 146 | 110 => Some("getppid"), 147 | 111 => Some("getpgrp"), 148 | 112 => Some("setsid"), 149 | 113 => Some("setreuid"), 150 | 114 => Some("setregid"), 151 | 115 => Some("getgroups"), 152 | 116 => Some("setgroups"), 153 | 117 => Some("setresuid"), 154 | 118 => Some("getresuid"), 155 | 119 => Some("setresgid"), 156 | 120 => Some("getresgid"), 157 | 121 => Some("getpgid"), 158 | 122 => Some("setfsuid"), 159 | 123 => Some("setfsgid"), 160 | 124 => Some("getsid"), 161 | 125 => Some("capget"), 162 | 126 => Some("capset"), 163 | 127 => Some("rt_sigpending"), 164 | 128 => Some("rt_sigtimedwait"), 165 | 129 => Some("rt_sigqueueinfo"), 166 | 130 => Some("rt_sigsuspend"), 167 | 131 => Some("sigaltstack"), 168 | 132 => Some("utime"), 169 | 133 => Some("mknod"), 170 | 134 => Some("useli"), 171 | 135 => Some("personality"), 172 | 136 => Some("ustat"), 173 | 137 => Some("statfs"), 174 | 138 => Some("fstatfs"), 175 | 139 => Some("sysfs"), 176 | 140 => Some("getpriority"), 177 | 141 => Some("setpriority"), 178 | 142 => Some("sched_setparam"), 179 | 143 => Some("sched_getparam"), 180 | 144 => Some("sched_setscheduler"), 181 | 145 => Some("sched_getscheduler"), 182 | 146 => Some("sched_get_priority_max"), 183 | 147 => Some("sched_get_priority_min"), 184 | 148 => Some("sched_rr_get_interval"), 185 | 149 => Some("mlock"), 186 | 150 => Some("munlock"), 187 | 151 => Some("mlockall"), 188 | 152 => Some("munlockall"), 189 | 153 => Some("vhangup"), 190 | 154 => Some("modify_ldt"), 191 | 155 => Some("pivot_root"), 192 | 156 => Some("_sysctl"), 193 | 157 => Some("prctl"), 194 | 158 => Some("arch_prctl"), 195 | 159 => Some("adjtimex"), 196 | 160 => Some("setrlimit"), 197 | 161 => Some("chroot"), 198 | 162 => Some("sync"), 199 | 163 => Some("acct"), 200 | 164 => Some("settimeofday"), 201 | 165 => Some("mount"), 202 | 166 => Some("umount2"), 203 | 167 => Some("swapon"), 204 | 168 => Some("swapoff"), 205 | 169 => Some("reboot"), 206 | 170 => Some("sethostname"), 207 | 171 => Some("setdomainname"), 208 | 172 => Some("iopl"), 209 | 173 => Some("ioperm"), 210 | 174 => Some("create_modul"), 211 | 175 => Some("init_module"), 212 | 176 => Some("delete_module"), 213 | 177 => Some("get_kernel_sym"), 214 | 178 => Some("query_modul"), 215 | 179 => Some("quotactl"), 216 | 180 => Some("nfsservct"), 217 | 181 => Some("getpms"), 218 | 182 => Some("putpms"), 219 | 183 => Some("afs_syscal"), 220 | 184 => Some("tuxcal"), 221 | 185 => Some("securit"), 222 | 186 => Some("gettid"), 223 | 187 => Some("readahead"), 224 | 188 => Some("setxattr"), 225 | 189 => Some("lsetxattr"), 226 | 190 => Some("fsetxattr"), 227 | 191 => Some("getxattr"), 228 | 192 => Some("lgetxattr"), 229 | 193 => Some("fgetxattr"), 230 | 194 => Some("listxattr"), 231 | 195 => Some("llistxattr"), 232 | 196 => Some("flistxattr"), 233 | 197 => Some("removexattr"), 234 | 198 => Some("lremovexattr"), 235 | 199 => Some("fremovexattr"), 236 | 200 => Some("tkill"), 237 | 201 => Some("time"), 238 | 202 => Some("futex"), 239 | 203 => Some("sched_setaffinity"), 240 | 204 => Some("sched_getaffinity"), 241 | 205 => Some("set_thread_are"), 242 | 206 => Some("io_setup"), 243 | 207 => Some("io_destroy"), 244 | 208 => Some("io_getevents"), 245 | 209 => Some("io_submit"), 246 | 210 => Some("io_cancel"), 247 | 211 => Some("get_thread_are"), 248 | 212 => Some("lookup_dcookie"), 249 | 213 => Some("epoll_create"), 250 | 214 => Some("epoll_ctl_ol"), 251 | 215 => Some("epoll_wait_ol"), 252 | 216 => Some("remap_file_pages"), 253 | 217 => Some("getdents64"), 254 | 218 => Some("set_tid_address"), 255 | 219 => Some("restart_syscall"), 256 | 220 => Some("semtimedop"), 257 | 221 => Some("fadvise64"), 258 | 222 => Some("timer_create"), 259 | 223 => Some("timer_settime"), 260 | 224 => Some("timer_gettime"), 261 | 225 => Some("timer_getoverrun"), 262 | 226 => Some("timer_delete"), 263 | 227 => Some("clock_settime"), 264 | 228 => Some("clock_gettime"), 265 | 229 => Some("clock_getres"), 266 | 230 => Some("clock_nanosleep"), 267 | 231 => Some("exit_group"), 268 | 232 => Some("epoll_wait"), 269 | 233 => Some("epoll_ctl"), 270 | 234 => Some("tgkill"), 271 | 235 => Some("utimes"), 272 | 236 => Some("vserve"), 273 | 237 => Some("mbind"), 274 | 238 => Some("set_mempolicy"), 275 | 239 => Some("get_mempolicy"), 276 | 240 => Some("mq_open"), 277 | 241 => Some("mq_unlink"), 278 | 242 => Some("mq_timedsend"), 279 | 243 => Some("mq_timedreceive"), 280 | 244 => Some("mq_notify"), 281 | 245 => Some("mq_getsetattr"), 282 | 246 => Some("kexec_load"), 283 | 247 => Some("waitid"), 284 | 248 => Some("add_key"), 285 | 249 => Some("request_key"), 286 | 250 => Some("keyctl"), 287 | 251 => Some("ioprio_set"), 288 | 252 => Some("ioprio_get"), 289 | 253 => Some("inotify_init"), 290 | 254 => Some("inotify_add_watch"), 291 | 255 => Some("inotify_rm_watch"), 292 | 256 => Some("migrate_pages"), 293 | 257 => Some("openat"), 294 | 258 => Some("mkdirat"), 295 | 259 => Some("mknodat"), 296 | 260 => Some("fchownat"), 297 | 261 => Some("futimesat"), 298 | 262 => Some("newfstatat"), 299 | 263 => Some("unlinkat"), 300 | 264 => Some("renameat"), 301 | 265 => Some("linkat"), 302 | 266 => Some("symlinkat"), 303 | 267 => Some("readlinkat"), 304 | 268 => Some("fchmodat"), 305 | 269 => Some("faccessat"), 306 | 270 => Some("pselect6"), 307 | 271 => Some("ppoll"), 308 | 272 => Some("unshare"), 309 | 273 => Some("set_robust_list"), 310 | 274 => Some("get_robust_list"), 311 | 275 => Some("splice"), 312 | 276 => Some("tee"), 313 | 277 => Some("sync_file_range"), 314 | 278 => Some("vmsplice"), 315 | 279 => Some("move_pages"), 316 | 280 => Some("utimensat"), 317 | 281 => Some("epoll_pwait"), 318 | 282 => Some("signalfd"), 319 | 283 => Some("timerfd_create"), 320 | 284 => Some("eventfd"), 321 | 285 => Some("fallocate"), 322 | 286 => Some("timerfd_settime"), 323 | 287 => Some("timerfd_gettime"), 324 | 288 => Some("accept4"), 325 | 289 => Some("signalfd4"), 326 | 290 => Some("eventfd2"), 327 | 291 => Some("epoll_create1"), 328 | 292 => Some("dup3"), 329 | 293 => Some("pipe2"), 330 | 294 => Some("inotify_init1"), 331 | 295 => Some("preadv"), 332 | 296 => Some("pwritev"), 333 | 297 => Some("rt_tgsigqueueinfo"), 334 | 298 => Some("perf_event_open"), 335 | 299 => Some("recvmmsg"), 336 | 300 => Some("fanotify_init"), 337 | 301 => Some("fanotify_mark"), 338 | 302 => Some("prlimit64"), 339 | 303 => Some("name_to_handle_at"), 340 | 304 => Some("open_by_handle_at"), 341 | 305 => Some("clock_adjtime"), 342 | 306 => Some("syncfs"), 343 | 307 => Some("sendmmsg"), 344 | 308 => Some("setns"), 345 | 309 => Some("getcpu"), 346 | 310 => Some("process_vm_readv"), 347 | 311 => Some("process_vm_writev"), 348 | 312 => Some("kcmp"), 349 | 313 => Some("finit_module"), 350 | 314 => Some("sched_setattr"), 351 | 315 => Some("sched_getattr"), 352 | 316 => Some("renameat2"), 353 | 317 => Some("seccomp"), 354 | 318 => Some("getrandom"), 355 | 319 => Some("memfd_create"), 356 | 320 => Some("kexec_file_load"), 357 | 321 => Some("bpf"), 358 | 322 => Some("execveat"), 359 | 323 => Some("userfaultfd"), 360 | 324 => Some("membarrier"), 361 | 325 => Some("mlock2"), 362 | 326 => Some("copy_file_range"), 363 | 327 => Some("preadv2"), 364 | 328 => Some("pwritev2"), 365 | 329 => Some("pkey_mprotect"), 366 | 330 => Some("pkey_alloc"), 367 | 331 => Some("pkey_free"), 368 | 332 => Some("statx"), 369 | 333 => Some("io_pgetevents"), 370 | 334 => Some("rseq"), 371 | 424 => Some("pidfd_send_signal"), 372 | 425 => Some("io_uring_setup"), 373 | 426 => Some("io_uring_enter"), 374 | 427 => Some("io_uring_register"), 375 | 428 => Some("open_tree"), 376 | 429 => Some("move_mount"), 377 | 430 => Some("fsopen"), 378 | 431 => Some("fsconfig"), 379 | 432 => Some("fsmount"), 380 | 433 => Some("fspick"), 381 | 434 => Some("pidfd_open"), 382 | 435 => Some("clone3"), 383 | 437 => Some("openat2"), 384 | 438 => Some("pidfd_getfd"), 385 | _ => None, 386 | } 387 | } 388 | -------------------------------------------------------------------------------- /src/linux/usermem.rs: -------------------------------------------------------------------------------- 1 | use crate::mm::paging::MemProt; 2 | 3 | pub const USER_R: MemProt = MemProt { execute: false, write: false, user: true }; 4 | pub const USER_RW: MemProt = MemProt { execute: false, write: true, user: true }; 5 | pub const USER_RX: MemProt = MemProt { execute: true, write: false, user: true }; 6 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | mod vm; 4 | mod mm; 5 | mod linux; 6 | mod bytevec; 7 | 8 | use linux::LinuxVm; 9 | 10 | fn main() { 11 | let cmdline_args = [ 12 | "test_emulator", 13 | "123", 14 | "test00000", 15 | ]; 16 | 17 | let env = [ 18 | "env var 1", 19 | "env var 2", 20 | "something", 21 | ]; 22 | 23 | let mut linux_vm = LinuxVm::new("compiled-app", &cmdline_args, &env, None); 24 | 25 | linux_vm.run(); 26 | } 27 | -------------------------------------------------------------------------------- /src/mm/membank.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | const PRECISE_TRACKING: bool = true; 4 | 5 | pub struct MemBank { 6 | start_address: u64, 7 | end_address: u64, 8 | next_free: u64, 9 | freed: Vec<(u64, u64)>, 10 | allocations: BTreeMap, 11 | } 12 | 13 | impl MemBank { 14 | fn reuse_address(&mut self, requested_size: u64) -> Option { 15 | for (i, &(addr, region_size)) in self.freed.iter().enumerate() { 16 | if region_size < requested_size { 17 | continue; 18 | } 19 | 20 | let size_left = region_size - requested_size; 21 | 22 | if size_left == 0 { 23 | self.freed.swap_remove(i); 24 | } else { 25 | self.freed[i] = (addr + requested_size, size_left); 26 | } 27 | 28 | return Some(addr); 29 | } 30 | 31 | None 32 | } 33 | 34 | pub fn new(start_address: u64, end_address: Option) -> Self { 35 | Self { 36 | start_address, 37 | end_address: end_address.unwrap_or(!0), 38 | next_free: start_address, 39 | freed: Vec::new(), 40 | allocations: BTreeMap::new(), 41 | } 42 | } 43 | 44 | pub fn reserve_region(&mut self, size: u64) -> u64 { 45 | assert!(size & 0xFFF == 0, "Size {:X} is not page aligned.", size); 46 | 47 | let addr = self.reuse_address(size).unwrap_or_else(|| { 48 | assert!(self.next_free.checked_add(size).unwrap() <= self.end_address, 49 | "Memory bank is out of space."); 50 | 51 | let addr = self.next_free; 52 | 53 | self.next_free += size; 54 | 55 | addr 56 | }); 57 | 58 | if PRECISE_TRACKING { 59 | assert!(self.allocations.insert(addr, size).is_none(), 60 | "Region was already reserved."); 61 | } 62 | 63 | addr 64 | } 65 | 66 | pub fn return_region(&mut self, addr: u64, size: u64) { 67 | assert!(size & 0xFFF == 0, "Size {:X} is not page aligned.", size); 68 | 69 | let start = addr; 70 | let end = addr.checked_add(size).unwrap(); 71 | 72 | assert!(start >= self.start_address && end <= self.end_address, 73 | "Tried to return region to foreign memory bank."); 74 | 75 | if PRECISE_TRACKING { 76 | let original_size = self.allocations.remove(&addr) 77 | .expect("Requested to free unknown allocation."); 78 | 79 | assert!(original_size == size, "Freed allocation had originally different size."); 80 | } 81 | 82 | self.freed.push((addr, size)); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/mm/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod phys_allocator; 2 | pub mod membank; 3 | pub mod paging; 4 | -------------------------------------------------------------------------------- /src/mm/paging.rs: -------------------------------------------------------------------------------- 1 | use crate::vm::*; 2 | use super::phys_allocator::PhysAllocator; 3 | 4 | const PAGE_MASK: u64 = 0x000F_FFFF_FFFF_F000; 5 | const PAGE_PRESENT: u64 = 1; 6 | const PAGE_WRITE: u64 = 1 << 1; 7 | const PAGE_USER: u64 = 1 << 2; 8 | const PAGE_SIZE: u64 = 1 << 7; 9 | const PAGE_XD: u64 = 1 << 63; 10 | const HIERARCHY_DEPTH: u64 = 4; 11 | 12 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 13 | pub enum MemAccess { 14 | Kernelmode, 15 | Usermode, 16 | } 17 | 18 | #[derive(Copy, Clone, Debug)] 19 | pub struct MemProt { 20 | pub execute: bool, 21 | pub user: bool, 22 | pub write: bool, 23 | } 24 | 25 | impl MemProt { 26 | pub fn r(acc: MemAccess) -> Self { 27 | Self { 28 | execute: false, 29 | write: false, 30 | user: acc == MemAccess::Usermode, 31 | } 32 | } 33 | 34 | pub fn rw(acc: MemAccess) -> Self { 35 | Self { 36 | execute: false, 37 | write: true, 38 | user: acc == MemAccess::Usermode, 39 | } 40 | } 41 | 42 | pub fn rx(acc: MemAccess) -> Self { 43 | Self { 44 | execute: true, 45 | write: false, 46 | user: acc == MemAccess::Usermode, 47 | } 48 | } 49 | 50 | pub fn rwx(acc: MemAccess) -> Self { 51 | Self { 52 | execute: true, 53 | write: true, 54 | user: acc == MemAccess::Usermode, 55 | } 56 | } 57 | 58 | fn is_subset_of(&self, other: &MemProt) -> bool { 59 | if other.execute && !self.execute { 60 | return false; 61 | } 62 | 63 | if other.write && !self.write { 64 | return false; 65 | } 66 | 67 | if other.user && !self.user { 68 | return false; 69 | } 70 | 71 | return true; 72 | } 73 | } 74 | 75 | pub struct PagingManager { 76 | allocator: A, 77 | pml4: u64, 78 | } 79 | 80 | impl PagingManager { 81 | pub fn new(vm: &mut Vm, mut allocator: A) -> Self { 82 | let pml4 = allocator.alloc_phys(vm, 0x1000, None); 83 | 84 | Self { 85 | allocator, 86 | pml4, 87 | } 88 | } 89 | 90 | fn assert_phys_addr(phys_addr: u64) { 91 | assert!(phys_addr & 0xFFF == 0, "Physical address {:X} is not page aligned.", phys_addr); 92 | } 93 | 94 | fn assert_virt_addr(virt_addr: u64) { 95 | assert!(virt_addr & 0xFFF == 0, "Virtual address {:X} is not page aligned.", virt_addr); 96 | 97 | let mut addr = virt_addr as i64; 98 | addr <<= 16; 99 | addr >>= 16; 100 | 101 | assert!(addr as u64 == virt_addr, "Virtual address {:X} is not canonical.", virt_addr); 102 | } 103 | 104 | fn entry_index(virt_addr: u64, depth: u64) -> u64 { 105 | assert!(depth < HIERARCHY_DEPTH); 106 | 107 | (virt_addr >> (12 + 9 * (3 - depth))) & 0x1FF 108 | } 109 | 110 | fn map_virt_page( 111 | &mut self, 112 | vm: &mut Vm, 113 | virt_addr: u64, 114 | phys_addr: u64, 115 | prot: MemProt, 116 | ) { 117 | Self::assert_virt_addr(virt_addr); 118 | Self::assert_phys_addr(phys_addr); 119 | 120 | let page_flags = { 121 | let mut flags = PAGE_PRESENT; 122 | 123 | if prot.user { flags |= PAGE_USER; } 124 | if prot.write { flags |= PAGE_WRITE; } 125 | if !prot.execute { flags |= PAGE_XD; } 126 | 127 | flags 128 | }; 129 | 130 | let mut current = self.pml4; 131 | 132 | for depth in 0..HIERARCHY_DEPTH { 133 | let last = depth == HIERARCHY_DEPTH - 1; 134 | 135 | let entry_index = Self::entry_index(virt_addr, depth); 136 | let entry_addr = current + entry_index * 8; 137 | 138 | let mut entry_value = vm.mem().read_phys_u64(entry_addr) 139 | .expect("Failed to read page table entry."); 140 | 141 | if entry_value & PAGE_PRESENT != 0 { 142 | assert!(!last && entry_value & PAGE_SIZE == 0, 143 | "Requested page was already mapped."); 144 | } else { 145 | entry_value = match last { 146 | true => phys_addr, 147 | false => self.allocator.alloc_phys(vm, 0x1000, None), 148 | }; 149 | 150 | let flags = match last { 151 | true => page_flags, 152 | false => PAGE_PRESENT | PAGE_WRITE | PAGE_USER, 153 | }; 154 | 155 | entry_value |= flags; 156 | 157 | vm.mem_mut().write_phys_u64(entry_addr, entry_value) 158 | .expect("Failed to write page table entry."); 159 | } 160 | 161 | current = entry_value & PAGE_MASK; 162 | } 163 | } 164 | 165 | fn unmap_virt_page( 166 | &mut self, 167 | vm: &mut Vm, 168 | virt_addr: u64, 169 | ) -> u64 { 170 | Self::assert_virt_addr(virt_addr); 171 | 172 | let mut backing_phys = 0; 173 | 174 | let mut walked_entries = [0; 3]; 175 | let mut current = self.pml4; 176 | 177 | for depth in 0..HIERARCHY_DEPTH { 178 | let last = depth == HIERARCHY_DEPTH - 1; 179 | 180 | let entry_index = Self::entry_index(virt_addr, depth); 181 | let entry_addr = current + entry_index * 8; 182 | 183 | if !last { 184 | walked_entries[depth as usize] = entry_addr; 185 | } 186 | 187 | let entry_value = vm.mem().read_phys_u64(entry_addr) 188 | .expect("Failed to read page table entry."); 189 | 190 | assert!(entry_value & PAGE_PRESENT != 0, "Page to free was not mapped."); 191 | assert!(entry_value & PAGE_SIZE == 0, "Large pages are not supported."); 192 | 193 | if last { 194 | vm.mem_mut().write_phys_u64(entry_addr, 0) 195 | .expect("Failed to zero page table entry."); 196 | 197 | backing_phys = entry_value & PAGE_MASK; 198 | } 199 | 200 | current = entry_value & PAGE_MASK; 201 | } 202 | 203 | for &entry_addr in walked_entries.iter().rev() { 204 | let children = vm.mem().read_phys_u64(entry_addr) 205 | .expect("Failed to read page table entry."); 206 | let children = children & PAGE_MASK; 207 | 208 | let mut used = false; 209 | 210 | for i in 0..512 { 211 | let child_addr = children + i * 8; 212 | let child_value = vm.mem().read_phys_u64(child_addr) 213 | .expect("Failed to read page table entry."); 214 | 215 | if child_value & PAGE_PRESENT != 0 { 216 | used = true; 217 | break; 218 | } 219 | } 220 | 221 | if used { 222 | break; 223 | } 224 | 225 | vm.mem_mut().write_phys_u64(entry_addr, 0) 226 | .expect("Failed to zero page table entry."); 227 | 228 | self.allocator.free_phys(vm, children, 0x1000); 229 | } 230 | 231 | backing_phys 232 | } 233 | 234 | pub fn map_virt_region( 235 | &mut self, 236 | vm: &mut Vm, 237 | virt_addr: u64, 238 | phys_addr: u64, 239 | size: u64, 240 | prot: MemProt, 241 | ) { 242 | Self::assert_virt_addr(virt_addr); 243 | Self::assert_phys_addr(phys_addr); 244 | 245 | assert!(size & 0xFFF == 0, "Size {:X} is not page aligned.", size); 246 | 247 | for offset in (0..size).step_by(0x1000) { 248 | let virt_addr = virt_addr + offset; 249 | let phys_addr = phys_addr + offset; 250 | 251 | self.map_virt_page(vm, virt_addr, phys_addr, prot); 252 | } 253 | } 254 | 255 | pub fn unmap_virt_region( 256 | &mut self, 257 | vm: &mut Vm, 258 | virt_addr: u64, 259 | size: u64, 260 | ) -> Vec { 261 | Self::assert_virt_addr(virt_addr); 262 | 263 | assert!(size & 0xFFF == 0, "Size {:X} is not page aligned.", size); 264 | 265 | let mut backings = Vec::with_capacity(size as usize / 0x1000); 266 | 267 | for offset in (0..size).step_by(0x1000) { 268 | let virt_addr = virt_addr + offset; 269 | let backing = self.unmap_virt_page(vm, virt_addr); 270 | 271 | backings.push(backing); 272 | } 273 | 274 | backings 275 | } 276 | 277 | pub fn query_virt_addr(&self, vm: &Vm, virt_addr: u64) -> Option<(u64, MemProt)> { 278 | let offset = virt_addr & 0xFFF; 279 | let virt_addr = virt_addr & !0xFFF; 280 | 281 | Self::assert_virt_addr(virt_addr); 282 | 283 | let mut current = self.pml4; 284 | 285 | for depth in 0..HIERARCHY_DEPTH { 286 | let last = depth == HIERARCHY_DEPTH - 1; 287 | 288 | let entry_index = Self::entry_index(virt_addr, depth); 289 | let entry_addr = current + entry_index * 8; 290 | 291 | let entry_value = vm.mem().read_phys_u64(entry_addr) 292 | .expect("Failed to read page table entry."); 293 | 294 | if entry_value & PAGE_PRESENT == 0 || entry_value & PAGE_SIZE != 0 { 295 | break; 296 | } 297 | 298 | if last { 299 | let user = entry_value & PAGE_USER != 0; 300 | let write = entry_value & PAGE_WRITE != 0; 301 | let execute = entry_value & PAGE_XD == 0; 302 | 303 | let phys = (entry_value & PAGE_MASK) + offset; 304 | let prot = MemProt { 305 | user, 306 | write, 307 | execute, 308 | }; 309 | 310 | return Some((phys, prot)); 311 | } 312 | 313 | current = entry_value & PAGE_MASK; 314 | } 315 | 316 | None 317 | } 318 | 319 | pub fn read_virt_checked(&self, vm: &Vm, mut virt_addr: u64, buffer: &mut [u8], prot: MemProt) 320 | -> Result<(), u64> 321 | { 322 | let mut already_read = 0; 323 | let mut left_to_read = buffer.len() as u64; 324 | 325 | while left_to_read > 0 { 326 | if let Some((backing, region_prot)) = self.query_virt_addr(vm, virt_addr) { 327 | if !region_prot.is_subset_of(&prot) { 328 | return Err(already_read); 329 | } 330 | 331 | let offset_in_page = virt_addr & 0xFFF; 332 | 333 | let to_page_end = 0x1000 - offset_in_page; 334 | let read_len = std::cmp::min(left_to_read, to_page_end); 335 | 336 | let buffer = &mut buffer[already_read as usize..]; 337 | let buffer = &mut buffer[..read_len as usize]; 338 | 339 | vm.mem().read_phys(backing, buffer)?; 340 | 341 | virt_addr += read_len; 342 | left_to_read -= read_len; 343 | already_read += read_len; 344 | } else { 345 | return Err(already_read); 346 | } 347 | } 348 | 349 | Ok(()) 350 | } 351 | 352 | pub fn write_virt_checked(&self, vm: &mut Vm, mut virt_addr: u64, buffer: &[u8], prot: MemProt) 353 | -> Result<(), u64> 354 | { 355 | let mut already_written = 0; 356 | let mut left_to_write = buffer.len() as u64; 357 | 358 | while left_to_write > 0 { 359 | if let Some((backing, region_prot)) = self.query_virt_addr(vm, virt_addr) { 360 | if !region_prot.is_subset_of(&prot) { 361 | return Err(already_written); 362 | } 363 | 364 | let offset_in_page = virt_addr & 0xFFF; 365 | 366 | let to_page_end = 0x1000 - offset_in_page; 367 | let write_len = std::cmp::min(left_to_write, to_page_end); 368 | 369 | let buffer = &buffer[already_written as usize..]; 370 | let buffer = &buffer[..write_len as usize]; 371 | 372 | vm.mem_mut().write_phys(backing, buffer)?; 373 | 374 | virt_addr += write_len; 375 | left_to_write -= write_len; 376 | already_written += write_len; 377 | } else { 378 | return Err(already_written); 379 | } 380 | } 381 | 382 | Ok(()) 383 | } 384 | 385 | pub fn read_virt(&self, vm: &Vm, virt_addr: u64, buffer: &mut [u8]) -> Result<(), u64> { 386 | self.read_virt_checked(vm, virt_addr, buffer, MemProt::r(MemAccess::Kernelmode)) 387 | } 388 | 389 | pub fn write_virt(&self, vm: &mut Vm, virt_addr: u64, buffer: &[u8]) -> Result<(), u64> { 390 | self.write_virt_checked(vm, virt_addr, buffer, MemProt::r(MemAccess::Kernelmode)) 391 | } 392 | 393 | pub fn read_virt_u64_checked(&self, vm: &Vm, virt_addr: u64, prot: MemProt) 394 | -> Result 395 | { 396 | let mut buffer = [0u8; 8]; 397 | 398 | self.read_virt_checked(vm, virt_addr, &mut buffer, prot) 399 | .map(|_| u64::from_le_bytes(buffer)) 400 | } 401 | 402 | pub fn write_virt_u64_checked(&self, vm: &mut Vm, virt_addr: u64, value: u64, prot: MemProt) 403 | -> Result<(), u64> 404 | { 405 | self.write_virt_checked(vm, virt_addr, &value.to_le_bytes(), prot) 406 | } 407 | 408 | pub fn read_virt_u64(&self, vm: &Vm, virt_addr: u64) -> Result { 409 | self.read_virt_u64_checked(vm, virt_addr, MemProt::r(MemAccess::Kernelmode)) 410 | } 411 | 412 | pub fn write_virt_u64(&self, vm: &mut Vm, virt_addr: u64, value: u64) -> Result<(), u64> { 413 | self.write_virt_u64_checked(vm, virt_addr, value, MemProt::r(MemAccess::Kernelmode)) 414 | } 415 | 416 | pub fn cr3(&self) -> u64 { 417 | self.pml4 418 | } 419 | } 420 | -------------------------------------------------------------------------------- /src/mm/phys_allocator.rs: -------------------------------------------------------------------------------- 1 | use crate::vm::*; 2 | use super::membank::MemBank; 3 | 4 | pub trait PhysAllocator { 5 | fn alloc_phys(&mut self, vm: &mut Vm, size: u64, contents: Option<&[u8]>) -> u64; 6 | fn free_phys(&mut self, vm: &mut Vm, addr: u64, size: u64); 7 | } 8 | 9 | pub struct ContinousPhysAllocator { 10 | bank: MemBank, 11 | } 12 | 13 | impl ContinousPhysAllocator { 14 | pub fn new(start_address: u64, end_address: Option) -> Self { 15 | Self { 16 | bank: MemBank::new(start_address, end_address), 17 | } 18 | } 19 | } 20 | 21 | impl PhysAllocator for ContinousPhysAllocator { 22 | fn alloc_phys(&mut self, vm: &mut Vm, size: u64, contents: Option<&[u8]>) -> u64 { 23 | assert!(size & 0xFFF == 0, "Size {:X} is not page aligned.", size); 24 | 25 | let addr = self.bank.reserve_region(size); 26 | 27 | vm.mem_mut().map_phys_region(addr, size, contents); 28 | 29 | addr 30 | } 31 | 32 | fn free_phys(&mut self, vm: &mut Vm, addr: u64, size: u64) { 33 | assert!(size & 0xFFF == 0, "Size {:X} is not page aligned.", size); 34 | 35 | self.bank.return_region(addr, size); 36 | 37 | vm.mem_mut().unmap_phys_region(addr, size); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/vm/exception.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 2 | pub enum Exception { 3 | DivideErrorFault, 4 | DebugTrapOrFault, 5 | BreakpointTrap, 6 | OverflowTrap, 7 | BoundRangeFault, 8 | InvalidOpcodeFault, 9 | DeviceNotAvailableFault, 10 | DoubleFaultAbort, 11 | InvalidTaskStateSegmentFault, 12 | SegmentNotPresentFault, 13 | StackFault, 14 | GeneralProtectionFault, 15 | PageFault, 16 | FloatingPointErrorFault, 17 | AlignmentCheckFault, 18 | MachineCheckAbort, 19 | SimdFloatingPointFault, 20 | } 21 | 22 | impl Exception { 23 | pub(super) fn from_id(exception: u8) -> Option { 24 | match exception { 25 | 0x00 => Some(Exception::DivideErrorFault), 26 | 0x01 => Some(Exception::DebugTrapOrFault), 27 | 0x03 => Some(Exception::BreakpointTrap), 28 | 0x04 => Some(Exception::OverflowTrap), 29 | 0x05 => Some(Exception::BoundRangeFault), 30 | 0x06 => Some(Exception::InvalidOpcodeFault), 31 | 0x07 => Some(Exception::DeviceNotAvailableFault), 32 | 0x08 => Some(Exception::DoubleFaultAbort), 33 | 0x0A => Some(Exception::InvalidTaskStateSegmentFault), 34 | 0x0B => Some(Exception::SegmentNotPresentFault), 35 | 0x0C => Some(Exception::StackFault), 36 | 0x0D => Some(Exception::GeneralProtectionFault), 37 | 0x0E => Some(Exception::PageFault), 38 | 0x10 => Some(Exception::FloatingPointErrorFault), 39 | 0x11 => Some(Exception::AlignmentCheckFault), 40 | 0x12 => Some(Exception::MachineCheckAbort), 41 | 0x13 => Some(Exception::SimdFloatingPointFault), 42 | _ => None, 43 | } 44 | } 45 | 46 | pub(super) fn to_id(&self) -> u8 { 47 | match self { 48 | Exception::DivideErrorFault => 0x00, 49 | Exception::DebugTrapOrFault => 0x01, 50 | Exception::BreakpointTrap => 0x03, 51 | Exception::OverflowTrap => 0x04, 52 | Exception::BoundRangeFault => 0x05, 53 | Exception::InvalidOpcodeFault => 0x06, 54 | Exception::DeviceNotAvailableFault => 0x07, 55 | Exception::DoubleFaultAbort => 0x08, 56 | Exception::InvalidTaskStateSegmentFault => 0x0A, 57 | Exception::SegmentNotPresentFault => 0x0B, 58 | Exception::StackFault => 0x0C, 59 | Exception::GeneralProtectionFault => 0x0D, 60 | Exception::PageFault => 0x0E, 61 | Exception::FloatingPointErrorFault => 0x10, 62 | Exception::AlignmentCheckFault => 0x11, 63 | Exception::MachineCheckAbort => 0x12, 64 | Exception::SimdFloatingPointFault => 0x13, 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/vm/memory.rs: -------------------------------------------------------------------------------- 1 | use super::whvp_bindings as whv; 2 | use super::rawmem; 3 | use std::collections::BTreeMap; 4 | 5 | const RWX_PERMS: i32 = 1 | 2 | 4; 6 | 7 | #[derive(Copy, Clone)] 8 | struct Region { 9 | pub size: u64, 10 | pub backing: *mut u8, 11 | } 12 | 13 | impl Region { 14 | fn offseted_slice(&self, offset: u64) -> &[u8] { 15 | let size_left = self.size.checked_sub(offset) 16 | .expect("Offset is out of bounds."); 17 | 18 | unsafe { 19 | std::slice::from_raw_parts(self.backing.add(offset as usize), size_left as usize) 20 | } 21 | } 22 | 23 | fn offseted_slice_mut(&mut self, offset: u64) -> &mut [u8] { 24 | let size_left = self.size.checked_sub(offset) 25 | .expect("Offset is out of bounds."); 26 | 27 | unsafe { 28 | std::slice::from_raw_parts_mut(self.backing.add(offset as usize), size_left as usize) 29 | } 30 | } 31 | } 32 | 33 | pub struct Memory { 34 | partition: whv::WHV_PARTITION_HANDLE, 35 | regions: BTreeMap, 36 | } 37 | 38 | impl Memory { 39 | pub(super) fn new(partition: whv::WHV_PARTITION_HANDLE) -> Self { 40 | Self { 41 | partition, 42 | regions: BTreeMap::new(), 43 | } 44 | } 45 | 46 | pub(super) fn destroy_all_mappings(&mut self) { 47 | let regions = std::mem::take(&mut self.regions); 48 | 49 | for (base, region) in regions.into_iter() { 50 | let result = unsafe { 51 | let result = whv::WHvUnmapGpaRange(self.partition, base, region.size); 52 | 53 | rawmem::raw_free(region.backing); 54 | 55 | result 56 | }; 57 | 58 | assert!(result >= 0, "Unmapping GPA range on destruction failed with result {:X}.", 59 | result); 60 | } 61 | } 62 | 63 | fn assert_addr_size(addr: u64, size: u64) { 64 | assert!(size > 0, "Zero sized allocations are not supported."); 65 | assert!(addr & 0xFFF == 0, "Physical address {:X} is not page aligned.", addr); 66 | assert!(size & 0xFFF == 0, "Size {:X} is not page aligned.", size); 67 | 68 | addr.checked_add(size).expect("Region end address overflows."); 69 | } 70 | 71 | fn assert_unique(&self, addr: u64, size: u64) { 72 | assert!(self.regions.get(&addr).is_none(), 73 | "Region is already mapped at address {:X}.", addr); 74 | 75 | let alloc_start = addr; 76 | let alloc_end = addr + size; 77 | 78 | if let Some((base, region)) = self.regions.range(..alloc_end).next_back() { 79 | let region_start = *base; 80 | let region_end = *base + region.size; 81 | 82 | let no_overlap = region_end <= alloc_start || alloc_end <= region_start; 83 | 84 | assert!(no_overlap, "New region [{:X} -> {:X}] overlaps with [{:X} -> {:X}].", 85 | alloc_start, alloc_end, region_start, region_end); 86 | } 87 | } 88 | 89 | fn region(&self, addr: u64) -> Option<(Region, u64)> { 90 | if let Some((base, region)) = self.regions.range(..=addr).next_back() { 91 | let start = *base; 92 | let end = *base + region.size; 93 | 94 | if addr >= start && addr < end { 95 | let offset = addr - start; 96 | 97 | return Some((*region, offset)); 98 | } 99 | } 100 | 101 | None 102 | } 103 | 104 | pub fn map_phys_region(&mut self, addr: u64, size: u64, contents: Option<&[u8]>) { 105 | Self::assert_addr_size(addr, size); 106 | self.assert_unique(addr, size); 107 | 108 | if let Some(contents) = contents { 109 | assert!(contents.len() <= size as usize, 110 | "Contents buffer is bigger than region to map."); 111 | } 112 | 113 | let backing = unsafe { 114 | let backing = rawmem::raw_alloc(size as usize); 115 | 116 | if let Some(contents) = contents { 117 | std::ptr::copy_nonoverlapping(contents.as_ptr(), backing, contents.len()); 118 | } 119 | 120 | backing 121 | }; 122 | 123 | let result = unsafe { 124 | whv::WHvMapGpaRange(self.partition, backing as _, addr, size, RWX_PERMS) 125 | }; 126 | 127 | if result < 0 { 128 | unsafe { 129 | rawmem::raw_free(backing); 130 | } 131 | 132 | panic!("Mapping GPA range failed with result {:X}.", result); 133 | } 134 | 135 | let region = Region { 136 | size, 137 | backing, 138 | }; 139 | 140 | assert!(self.regions.insert(addr, region).is_none(), "Region was already mapped (??)."); 141 | } 142 | 143 | pub fn unmap_phys_region(&mut self, addr: u64, size: u64) { 144 | Self::assert_addr_size(addr, size); 145 | 146 | let region = self.regions.remove(&addr).expect("Region to unmap was not mapped."); 147 | 148 | let result = unsafe { 149 | let result = whv::WHvUnmapGpaRange(self.partition, addr, region.size); 150 | 151 | rawmem::raw_free(region.backing); 152 | 153 | result 154 | }; 155 | 156 | assert!(result >= 0, "Unmapping GPA range failed with result {:X}.", result); 157 | assert!(size == region.size, "Region to unmap has invalid size. Got: {:X}, actual {:X}.", 158 | size, region.size); 159 | } 160 | 161 | pub fn read_phys(&self, mut addr: u64, buffer: &mut [u8]) -> Result<(), u64> { 162 | let mut already_read = 0; 163 | let mut left_to_read = buffer.len(); 164 | 165 | while left_to_read > 0 { 166 | if let Some((region, offset)) = self.region(addr) { 167 | let backing = region.offseted_slice(offset); 168 | let read_len = std::cmp::min(left_to_read, backing.len()); 169 | 170 | let buffer = &mut buffer[already_read..]; 171 | let buffer = &mut buffer[..read_len]; 172 | 173 | buffer.copy_from_slice(&backing[..read_len]); 174 | 175 | addr += read_len as u64; 176 | left_to_read -= read_len; 177 | already_read += read_len; 178 | } else { 179 | return Err(already_read as u64); 180 | } 181 | } 182 | 183 | Ok(()) 184 | } 185 | 186 | pub fn write_phys(&mut self, mut addr: u64, buffer: &[u8]) -> Result<(), u64> { 187 | let mut already_written = 0; 188 | let mut left_to_write = buffer.len(); 189 | 190 | while left_to_write > 0 { 191 | if let Some((mut region, offset)) = self.region(addr) { 192 | let backing = region.offseted_slice_mut(offset); 193 | let write_len = std::cmp::min(left_to_write, backing.len()); 194 | 195 | let buffer = &buffer[already_written..]; 196 | let buffer = &buffer[..write_len]; 197 | 198 | backing[..write_len].copy_from_slice(buffer); 199 | 200 | addr += write_len as u64; 201 | left_to_write -= write_len; 202 | already_written += write_len; 203 | } else { 204 | return Err(already_written as u64); 205 | } 206 | } 207 | 208 | Ok(()) 209 | } 210 | 211 | pub fn read_phys_u64(&self, addr: u64) -> Result { 212 | let mut buffer = [0u8; 8]; 213 | self.read_phys(addr, &mut buffer).map(|_| u64::from_le_bytes(buffer)) 214 | } 215 | 216 | pub fn write_phys_u64(&mut self, addr: u64, value: u64) -> Result<(), u64> { 217 | self.write_phys(addr, &value.to_le_bytes()) 218 | } 219 | 220 | pub fn dump_physical_ranges(&self) { 221 | let dump_range = |start: u64, end: u64, count: u64| { 222 | print!("0x{:012X} -> 0x{:012X}", start, end); 223 | 224 | if count > 1 { 225 | print!(" [{}]", count); 226 | } 227 | 228 | println!(); 229 | }; 230 | 231 | let mut prev = None; 232 | 233 | for (base, region) in self.regions.iter() { 234 | let start = *base; 235 | let end = *base + region.size; 236 | 237 | let mut updated = false; 238 | 239 | if let Some((prev_start, prev_end, count)) = prev { 240 | if start == prev_end { 241 | prev = Some((prev_start, end, count + 1)); 242 | updated = true; 243 | } else { 244 | dump_range(prev_start, prev_end, count); 245 | } 246 | } 247 | 248 | if !updated { 249 | prev = Some((start, end, 1)); 250 | } 251 | } 252 | 253 | if let Some((prev_start, prev_end, count)) = prev { 254 | dump_range(prev_start, prev_end, count); 255 | } 256 | } 257 | } 258 | 259 | impl Drop for Memory { 260 | fn drop(&mut self) { 261 | assert!(self.regions.is_empty(), 262 | "Not all regions are free on memory manager destruction."); 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /src/vm/misc.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Copy, Clone)] 2 | pub enum AccessType { 3 | Read, 4 | Write, 5 | Execute, 6 | } 7 | 8 | #[derive(Debug, Copy, Clone)] 9 | pub enum PortSize { 10 | Byte, 11 | Word, 12 | Dword, 13 | } 14 | 15 | #[derive(Debug, Copy, Clone)] 16 | pub enum PendingInterruptType { 17 | Interrupt, 18 | Nmi, 19 | Exception, 20 | } 21 | 22 | #[derive(Debug, Copy, Clone)] 23 | pub enum UnsupportedFeature { 24 | Intercept, 25 | TaskSwitchTss, 26 | } 27 | 28 | #[derive(Debug, Copy, Clone)] 29 | pub struct SegAttribs { 30 | pub seg_type: u8, 31 | pub non_system: bool, 32 | pub dpl: u8, 33 | pub present: bool, 34 | pub default: bool, 35 | pub granularity: bool, 36 | pub long: bool, 37 | } 38 | 39 | impl SegAttribs { 40 | pub fn build(&self) -> u16 { 41 | assert!(self.seg_type <= 0b1111, "Invalid segment type."); 42 | assert!(self.dpl <= 0b11, "Invalid DPL."); 43 | 44 | (self.seg_type as u16) | 45 | ((self.non_system as u16) << 4) | 46 | ((self.dpl as u16) << 5) | 47 | ((self.present as u16) << 7) | 48 | ((self.long as u16) << 13) | 49 | ((self.default as u16) << 14) | 50 | ((self.granularity as u16) << 15) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/vm/mod.rs: -------------------------------------------------------------------------------- 1 | mod misc; 2 | mod regs; 3 | mod regstate; 4 | mod regstate_sync; 5 | mod exception; 6 | mod vmexit; 7 | mod memory; 8 | mod rawmem; 9 | mod regbits; 10 | mod whvp_bindings; 11 | 12 | use std::mem::MaybeUninit; 13 | use std::thread::{self, JoinHandle}; 14 | use std::sync::mpsc::{self, Receiver, Sender}; 15 | use std::time::Duration; 16 | 17 | pub use misc::{AccessType, PortSize, PendingInterruptType, SegAttribs, UnsupportedFeature}; 18 | pub use regs::{TableReg, SegReg, PendingExceptionReg, IntStateReg}; 19 | pub use regstate::RegState; 20 | pub use exception::Exception; 21 | pub use vmexit::VmExit; 22 | pub use memory::Memory; 23 | pub use regbits::{cr0, cr4, efer, xcr0}; 24 | use whvp_bindings as whv; 25 | 26 | type ExitContext = whv::WHV_RUN_VP_EXIT_CONTEXT; 27 | 28 | fn runner_thread(run_receiver: Receiver, exit_sender: Sender) { 29 | while let Ok(partition) = run_receiver.recv() { 30 | let partition = partition as whv::WHV_PARTITION_HANDLE; 31 | 32 | let mut exit_context: MaybeUninit = MaybeUninit::uninit(); 33 | 34 | let result = unsafe { 35 | whv::WHvRunVirtualProcessor(partition, 0, exit_context.as_mut_ptr() as _, 36 | std::mem::size_of::() as u32) 37 | }; 38 | 39 | assert!(result >= 0, "Running virtual CPU failed with result {:X}."); 40 | 41 | let exit_context = unsafe { exit_context.assume_init() }; 42 | 43 | exit_sender.send(exit_context).unwrap(); 44 | } 45 | } 46 | 47 | pub struct Vm { 48 | partition: whv::WHV_PARTITION_HANDLE, 49 | register_values: Vec, 50 | run_sender: Option>, 51 | exit_receiver: Option>, 52 | runner_thread: Option>, 53 | preemption_time: Option, 54 | timeout_duration: Duration, 55 | regs: RegState, 56 | mem: Memory, 57 | reload_regs: bool, 58 | exit_instr_length: u64, 59 | } 60 | 61 | impl Vm { 62 | fn sync_to_whv(&mut self) { 63 | let register_names = ®state_sync::REGSTATE_WHV_NAMES; 64 | assert!(self.register_values.len() == register_names.len()); 65 | 66 | regstate_sync::sync_to_whv(&self.regs, &mut self.register_values); 67 | 68 | let result = unsafe { 69 | whv::WHvSetVirtualProcessorRegisters(self.partition, 0, register_names.as_ptr(), 70 | register_names.len() as u32, self.register_values.as_ptr()) 71 | }; 72 | 73 | assert!(result >= 0, "Syncing regstate to WHV failed with result {:X}.", result); 74 | } 75 | 76 | fn sync_from_whv(&mut self) { 77 | let register_names = ®state_sync::REGSTATE_WHV_NAMES; 78 | assert!(self.register_values.len() == register_names.len()); 79 | 80 | let result = unsafe { 81 | whv::WHvGetVirtualProcessorRegisters(self.partition, 0, register_names.as_ptr(), 82 | register_names.len() as u32, self.register_values.as_mut_ptr()) 83 | }; 84 | 85 | assert!(result >= 0, "Syncing regstate from WHV failed with result {:X}.", result); 86 | 87 | regstate_sync::sync_from_whv(&mut self.regs, &self.register_values); 88 | } 89 | 90 | pub fn regs(&self) -> &RegState { 91 | &self.regs 92 | } 93 | 94 | pub fn regs_mut(&mut self) -> &mut RegState { 95 | self.reload_regs = true; 96 | 97 | &mut self.regs 98 | } 99 | 100 | pub fn mem(&self) -> &Memory { 101 | &self.mem 102 | } 103 | 104 | pub fn mem_mut(&mut self) -> &mut Memory { 105 | &mut self.mem 106 | } 107 | 108 | pub fn new(exit_exceptions: &[Exception]) -> Self { 109 | let mut partition = std::ptr::null_mut(); 110 | 111 | unsafe { 112 | let result = whv::WHvCreatePartition(&mut partition); 113 | assert!(result >= 0, "Creating WHV partition failed with result {:X}.", result); 114 | 115 | let mut property = MaybeUninit::::zeroed() 116 | .assume_init(); 117 | 118 | let set_property = |p: &whv::WHV_PARTITION_PROPERTY, code: whv::WHV_CAPABILITY_CODE| { 119 | let size = std::mem::size_of::() as u32; 120 | let prop = p as *const _ as *const std::ffi::c_void; 121 | 122 | let result = whv::WHvSetPartitionProperty(partition, code, prop, size); 123 | assert!(result >= 0, "Setting partition property {:X} failed with result {:X}.", 124 | code as u32, result); 125 | }; 126 | 127 | property.ProcessorCount = 1; 128 | set_property(&property, 129 | whv::WHV_PARTITION_PROPERTY_CODE_WHvPartitionPropertyCodeProcessorCount); 130 | 131 | property.ExtendedVmExits.__bindgen_anon_1.set_ExceptionExit(1); 132 | set_property(&property, 133 | whv::WHV_PARTITION_PROPERTY_CODE_WHvPartitionPropertyCodeExtendedVmExits); 134 | 135 | { 136 | let size = std::mem::size_of::() as u32; 137 | let prop = &mut property as *mut _ as *mut std::ffi::c_void; 138 | let code = 139 | whv::WHV_PARTITION_PROPERTY_CODE_WHvPartitionPropertyCodeProcessorFeatures; 140 | 141 | let result = whv::WHvGetPartitionProperty(partition, code, prop, size, 142 | std::ptr::null_mut()); 143 | 144 | assert!(result >= 0, "Getting CPU default features failed with result {:X}.", 145 | result); 146 | 147 | let f = &mut property.ProcessorFeatures.__bindgen_anon_1; 148 | f.set_Sse3Support(1); 149 | f.set_Sse4_1Support(1); 150 | f.set_Sse4_2Support(1); 151 | f.set_Sse4aSupport(1); 152 | f.set_MisAlignSseSupport(1); 153 | f.set_Cmpxchg16bSupport(1); 154 | 155 | set_property(&property, code); 156 | } 157 | 158 | let mut exception_bitmap = 0; 159 | for exception in exit_exceptions.iter() { 160 | let id = exception.to_id(); 161 | exception_bitmap |= 1 << id; 162 | } 163 | 164 | property.ExceptionExitBitmap = exception_bitmap; 165 | set_property(&property, 166 | whv::WHV_PARTITION_PROPERTY_CODE_WHvPartitionPropertyCodeExceptionExitBitmap); 167 | 168 | let result = whv::WHvSetupPartition(partition); 169 | assert!(result >= 0, "Setting up WHV partition failed with result {:X}.", result); 170 | 171 | let result = whv::WHvCreateVirtualProcessor(partition, 0, 0); 172 | assert!(result >= 0, "Creating virtual CPU failed with result {:X}.", result); 173 | } 174 | 175 | let (run_sender, run_receiver) = mpsc::channel(); 176 | let (exit_sender, exit_receiver) = mpsc::channel(); 177 | 178 | let runner_thread = thread::spawn(move || runner_thread(run_receiver, exit_sender)); 179 | 180 | let register_count = regstate_sync::REGSTATE_WHV_NAMES.len(); 181 | let mut register_values = Vec::with_capacity(register_count); 182 | 183 | for _ in 0..register_count { 184 | let zeroed_register_value = unsafe { 185 | MaybeUninit::::zeroed().assume_init() 186 | }; 187 | 188 | register_values.push(zeroed_register_value); 189 | } 190 | 191 | let mut vm = Self { 192 | partition, 193 | register_values, 194 | runner_thread: Some(runner_thread), 195 | run_sender: Some(run_sender), 196 | exit_receiver: Some(exit_receiver), 197 | preemption_time: None, 198 | timeout_duration: Duration::from_millis(150), 199 | regs: Default::default(), 200 | mem: Memory::new(partition), 201 | reload_regs: false, 202 | exit_instr_length: 0, 203 | }; 204 | 205 | vm.sync_from_whv(); 206 | 207 | vm 208 | } 209 | 210 | pub fn inject_exception(&mut self, vector: Exception, error_code: Option) { 211 | self.regs_mut().pending_exception = PendingExceptionReg::Pending { 212 | error_code, 213 | vector, 214 | param: 0, 215 | }; 216 | } 217 | 218 | pub fn set_preemption_time(&mut self, preemption_time: Option) { 219 | self.preemption_time = preemption_time; 220 | } 221 | 222 | pub fn exit_instruction_length(&self) -> u64 { 223 | self.exit_instr_length 224 | } 225 | 226 | pub fn run(&mut self) -> VmExit { 227 | if self.reload_regs { 228 | self.sync_to_whv(); 229 | 230 | self.reload_regs = false; 231 | } 232 | 233 | let exit_context = if let Some(preemption_time) = self.preemption_time { 234 | self.run_sender.as_mut().unwrap().send(self.partition as usize).unwrap(); 235 | 236 | let partition = self.partition; 237 | let timeout = self.timeout_duration; 238 | let exit_receiver = self.exit_receiver.as_mut().unwrap(); 239 | 240 | if let Ok(ctx) = exit_receiver.recv_timeout(preemption_time) { 241 | ctx 242 | } else { 243 | unsafe { 244 | let result = whv::WHvCancelRunVirtualProcessor(partition, 0, 0); 245 | 246 | assert!(result >= 0, 247 | "Canceling virtual CPU execution failed with result {:X}.", result); 248 | } 249 | 250 | exit_receiver.recv_timeout(timeout) 251 | .expect("Runner thread did not respond in time") 252 | } 253 | } else { 254 | let mut exit_context: MaybeUninit = 255 | MaybeUninit::uninit(); 256 | 257 | let result = unsafe { 258 | whv::WHvRunVirtualProcessor(self.partition, 0, exit_context.as_mut_ptr() as _, 259 | std::mem::size_of::() as u32) 260 | }; 261 | 262 | assert!(result >= 0, "Running virtual CPU failed with result {:X}."); 263 | 264 | unsafe { exit_context.assume_init() } 265 | }; 266 | 267 | self.sync_from_whv(); 268 | 269 | self.exit_instr_length = exit_context.VpContext.InstructionLength() as u64; 270 | 271 | VmExit::from_run_exit_context(&exit_context) 272 | } 273 | } 274 | 275 | impl Drop for Vm { 276 | fn drop(&mut self) { 277 | self.mem.destroy_all_mappings(); 278 | 279 | self.run_sender.take(); 280 | self.exit_receiver.take(); 281 | self.runner_thread.take().unwrap().join().unwrap(); 282 | 283 | unsafe { 284 | let result = whv::WHvDeleteVirtualProcessor(self.partition, 0); 285 | assert!(result >= 0, "Deleting virtual CPU failed with result {:X}.", result); 286 | 287 | let result = whv::WHvDeletePartition(self.partition); 288 | assert!(result >= 0, "Deleting WHV partition failed with result {:X}.", result); 289 | } 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /src/vm/rawmem.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_os = "windows")] 2 | pub unsafe fn raw_alloc(len: usize) -> *mut u8 { 3 | extern "system" { 4 | fn VirtualAlloc(addr: *mut u8, len: usize, alloc_type: u32, protect: u32) -> *mut u8; 5 | } 6 | 7 | const MEM_COMMIT_RESERVE: u32 = 0x1000 | 0x2000; 8 | const PAGE_READWRITE: u32 = 4; 9 | 10 | let result = VirtualAlloc(std::ptr::null_mut(), len, MEM_COMMIT_RESERVE, PAGE_READWRITE); 11 | 12 | assert!(!result.is_null(), "Allocating memory with size of {} bytes failed.", len); 13 | 14 | result 15 | } 16 | 17 | #[cfg(target_os = "windows")] 18 | pub unsafe fn raw_free(addr: *mut u8) { 19 | extern "system" { 20 | fn VirtualFree (addr: *mut u8, len: usize, free_type: u32) -> u32; 21 | } 22 | 23 | const MEM_RELEASE: u32 = 0x8000; 24 | 25 | let result = VirtualFree(addr, 0, MEM_RELEASE); 26 | 27 | assert!(result != 0, "Freeing memory at addresss {:p} failed.", addr); 28 | } 29 | -------------------------------------------------------------------------------- /src/vm/regbits.rs: -------------------------------------------------------------------------------- 1 | pub mod cr0 { 2 | pub const PE: u64 = 1; 3 | pub const MP: u64 = 1 << 1; 4 | pub const WP: u64 = 1 << 16; 5 | pub const PG: u64 = 1 << 31; 6 | } 7 | 8 | pub mod cr4 { 9 | pub const PAE: u64 = 1 << 5; 10 | pub const OSFXSR: u64 = 1 << 9; 11 | pub const OSXMMEXCPT: u64 = 1 << 10; 12 | pub const OSXSAVE: u64 = 1 << 18; 13 | } 14 | 15 | pub mod xcr0 { 16 | pub const X87: u64 = 1; 17 | pub const SSE: u64 = 1 << 1; 18 | } 19 | 20 | pub mod efer { 21 | pub const LME: u64 = 1 << 8; 22 | pub const LMA: u64 = 1 << 10; 23 | pub const NXE: u64 = 1 << 11; 24 | } 25 | -------------------------------------------------------------------------------- /src/vm/regs.rs: -------------------------------------------------------------------------------- 1 | use super::Exception; 2 | 3 | pub type Reg64 = u64; 4 | 5 | #[derive(Default, Debug, Copy, Clone)] 6 | pub struct SegReg { 7 | pub base: u64, 8 | pub limit: u32, 9 | pub sel: u16, 10 | pub attribs: u16, 11 | } 12 | 13 | #[derive(Default, Debug, Copy, Clone)] 14 | pub struct TableReg { 15 | pub base: u64, 16 | pub limit: u16, 17 | } 18 | 19 | #[derive(Default, Debug, Copy, Clone)] 20 | pub struct IntStateReg { 21 | pub int_shadow: bool, 22 | pub nmi_masked: bool, 23 | } 24 | 25 | #[derive(Debug, Copy, Clone)] 26 | pub enum PendingExceptionReg { 27 | NotPending, 28 | Pending { 29 | error_code: Option, 30 | vector: Exception, 31 | param: u64, 32 | }, 33 | } 34 | 35 | impl Default for PendingExceptionReg { 36 | fn default() -> Self { 37 | PendingExceptionReg::NotPending 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/vm/regstate.rs: -------------------------------------------------------------------------------- 1 | use super::regs::{Reg64, SegReg, TableReg, PendingExceptionReg, IntStateReg}; 2 | 3 | #[derive(Default, Debug, Clone)] 4 | pub struct RegState { 5 | pub rax: Reg64, 6 | pub rcx: Reg64, 7 | pub rdx: Reg64, 8 | pub rbx: Reg64, 9 | pub rsp: Reg64, 10 | pub rbp: Reg64, 11 | pub rsi: Reg64, 12 | pub rdi: Reg64, 13 | pub r8: Reg64, 14 | pub r9: Reg64, 15 | pub r10: Reg64, 16 | pub r11: Reg64, 17 | pub r12: Reg64, 18 | pub r13: Reg64, 19 | pub r14: Reg64, 20 | pub r15: Reg64, 21 | pub rip: Reg64, 22 | pub rflags: Reg64, 23 | 24 | pub es: SegReg, 25 | pub cs: SegReg, 26 | pub ss: SegReg, 27 | pub ds: SegReg, 28 | pub fs: SegReg, 29 | pub gs: SegReg, 30 | pub ldtr: SegReg, 31 | pub tr: SegReg, 32 | 33 | pub idtr: TableReg, 34 | pub gdtr: TableReg, 35 | 36 | pub cr0: Reg64, 37 | pub cr2: Reg64, 38 | pub cr3: Reg64, 39 | pub cr4: Reg64, 40 | pub cr8: Reg64, 41 | pub xcr0: Reg64, 42 | 43 | pub dr0: Reg64, 44 | pub dr1: Reg64, 45 | pub dr2: Reg64, 46 | pub dr3: Reg64, 47 | pub dr6: Reg64, 48 | pub dr7: Reg64, 49 | 50 | pub tsc: Reg64, 51 | pub efer: Reg64, 52 | pub kernel_gsbase: Reg64, 53 | pub apic_base: Reg64, 54 | pub pat: Reg64, 55 | pub sysenter_es: Reg64, 56 | pub sysenter_eip: Reg64, 57 | pub sysenter_esp: Reg64, 58 | pub star: Reg64, 59 | pub lstar: Reg64, 60 | pub cstar: Reg64, 61 | pub sfmask: Reg64, 62 | 63 | pub int_state: IntStateReg, 64 | pub pending_exception: PendingExceptionReg, 65 | } 66 | -------------------------------------------------------------------------------- /src/vm/regstate_sync.rs: -------------------------------------------------------------------------------- 1 | use super::whvp_bindings as whv; 2 | use super::Exception; 3 | use super::regs::{Reg64, SegReg, TableReg, PendingExceptionReg, IntStateReg}; 4 | use super::regstate::RegState; 5 | 6 | trait RegSyncWhvValue { 7 | fn create_from_whv(value: &whv::WHV_REGISTER_VALUE) -> Self; 8 | fn save_to_whv(&self, value: &mut whv::WHV_REGISTER_VALUE); 9 | } 10 | 11 | impl RegSyncWhvValue for SegReg { 12 | fn create_from_whv(value: &whv::WHV_REGISTER_VALUE) -> Self { 13 | let value = unsafe { &value.Segment }; 14 | let attribs = unsafe { value.__bindgen_anon_1.Attributes }; 15 | 16 | Self { 17 | base: value.Base, 18 | limit: value.Limit, 19 | sel: value.Selector, 20 | attribs, 21 | } 22 | } 23 | 24 | fn save_to_whv(&self, value: &mut whv::WHV_REGISTER_VALUE) { 25 | let value = unsafe { &mut value.Segment }; 26 | 27 | value.Base = self.base; 28 | value.Limit = self.limit; 29 | value.Selector = self.sel; 30 | 31 | value.__bindgen_anon_1.Attributes = self.attribs; 32 | } 33 | } 34 | 35 | impl RegSyncWhvValue for TableReg { 36 | fn create_from_whv(value: &whv::WHV_REGISTER_VALUE) -> Self { 37 | let value = unsafe { &value.Table }; 38 | 39 | Self { 40 | base: value.Base, 41 | limit: value.Limit, 42 | } 43 | } 44 | 45 | fn save_to_whv(&self, value: &mut whv::WHV_REGISTER_VALUE) { 46 | let value = unsafe { &mut value.Table }; 47 | 48 | value.Base = self.base; 49 | value.Limit = self.limit; 50 | } 51 | } 52 | 53 | impl RegSyncWhvValue for Reg64 { 54 | fn create_from_whv(value: &whv::WHV_REGISTER_VALUE) -> Self { 55 | unsafe { value.Reg64 } 56 | } 57 | 58 | fn save_to_whv(&self, value: &mut whv::WHV_REGISTER_VALUE) { 59 | value.Reg64 = *self; 60 | } 61 | } 62 | 63 | impl RegSyncWhvValue for IntStateReg { 64 | fn create_from_whv(value: &whv::WHV_REGISTER_VALUE) -> Self { 65 | let value = unsafe { &value.InterruptState.__bindgen_anon_1 }; 66 | 67 | Self { 68 | int_shadow: value.InterruptShadow() != 0, 69 | nmi_masked: value.NmiMasked() != 0, 70 | } 71 | } 72 | 73 | fn save_to_whv(&self, value: &mut whv::WHV_REGISTER_VALUE) { 74 | let value = unsafe { &mut value.InterruptState.__bindgen_anon_1 }; 75 | 76 | value.set_InterruptShadow(self.int_shadow as u64); 77 | value.set_NmiMasked(self.nmi_masked as u64); 78 | } 79 | } 80 | 81 | impl RegSyncWhvValue for PendingExceptionReg { 82 | fn create_from_whv(value: &whv::WHV_REGISTER_VALUE) -> Self { 83 | let value = unsafe { &value.ExceptionEvent.__bindgen_anon_1 }; 84 | 85 | if value.EventPending() == 0 { 86 | PendingExceptionReg::NotPending 87 | } else { 88 | assert!(value.EventType() == 89 | whv::WHV_X64_PENDING_EVENT_TYPE_WHvX64PendingEventException as u32); 90 | 91 | let error_code = match value.DeliverErrorCode() { 92 | 0 => None, 93 | _ => Some(value.ErrorCode), 94 | }; 95 | 96 | let vector = Exception::from_id(value.Vector() as u8) 97 | .expect("Invalid exception vector."); 98 | 99 | PendingExceptionReg::Pending { 100 | error_code, 101 | vector, 102 | param: value.ExceptionParameter, 103 | } 104 | } 105 | } 106 | 107 | fn save_to_whv(&self, value: &mut whv::WHV_REGISTER_VALUE) { 108 | let value = unsafe { &mut value.ExceptionEvent.__bindgen_anon_1 }; 109 | 110 | match self { 111 | PendingExceptionReg::NotPending => { 112 | value.set_EventPending(0); 113 | }, 114 | PendingExceptionReg::Pending { vector, error_code, param } => { 115 | value.set_EventPending(1); 116 | value.set_EventType( 117 | whv::WHV_X64_PENDING_EVENT_TYPE_WHvX64PendingEventException as u32); 118 | 119 | match error_code { 120 | Some(code) => { 121 | value.ErrorCode = *code; 122 | value.set_DeliverErrorCode(1); 123 | }, 124 | None => { 125 | value.set_DeliverErrorCode(0); 126 | } 127 | }; 128 | 129 | value.set_Vector(vector.to_id() as u32); 130 | value.ExceptionParameter = *param; 131 | }, 132 | } 133 | } 134 | } 135 | 136 | macro_rules! sync_regstate { 137 | () => { 138 | sync_reg!(rax); 139 | sync_reg!(rcx); 140 | sync_reg!(rdx); 141 | sync_reg!(rbx); 142 | sync_reg!(rsp); 143 | sync_reg!(rbp); 144 | sync_reg!(rsi); 145 | sync_reg!(rdi); 146 | sync_reg!(r8); 147 | sync_reg!(r9); 148 | sync_reg!(r10); 149 | sync_reg!(r11); 150 | sync_reg!(r12); 151 | sync_reg!(r13); 152 | sync_reg!(r14); 153 | sync_reg!(r15); 154 | sync_reg!(rip); 155 | sync_reg!(rflags); 156 | 157 | sync_reg!(es); 158 | sync_reg!(cs); 159 | sync_reg!(ss); 160 | sync_reg!(ds); 161 | sync_reg!(fs); 162 | sync_reg!(gs); 163 | sync_reg!(ldtr); 164 | sync_reg!(tr); 165 | 166 | sync_reg!(idtr); 167 | sync_reg!(gdtr); 168 | 169 | sync_reg!(cr0); 170 | sync_reg!(cr2); 171 | sync_reg!(cr3); 172 | sync_reg!(cr4); 173 | sync_reg!(cr8); 174 | sync_reg!(xcr0); 175 | 176 | sync_reg!(dr0); 177 | sync_reg!(dr1); 178 | sync_reg!(dr2); 179 | sync_reg!(dr3); 180 | sync_reg!(dr6); 181 | sync_reg!(dr7); 182 | 183 | sync_reg!(tsc); 184 | sync_reg!(efer); 185 | sync_reg!(kernel_gsbase); 186 | sync_reg!(apic_base); 187 | sync_reg!(pat); 188 | sync_reg!(sysenter_es); 189 | sync_reg!(sysenter_eip); 190 | sync_reg!(sysenter_esp); 191 | sync_reg!(star); 192 | sync_reg!(lstar); 193 | sync_reg!(cstar); 194 | sync_reg!(sfmask); 195 | 196 | sync_reg!(int_state); 197 | sync_reg!(pending_exception); 198 | } 199 | } 200 | 201 | pub const REGSTATE_WHV_NAMES: &[whv::WHV_REGISTER_NAME] = &[ 202 | whv::WHV_REGISTER_NAME_WHvX64RegisterRax, 203 | whv::WHV_REGISTER_NAME_WHvX64RegisterRcx, 204 | whv::WHV_REGISTER_NAME_WHvX64RegisterRdx, 205 | whv::WHV_REGISTER_NAME_WHvX64RegisterRbx, 206 | whv::WHV_REGISTER_NAME_WHvX64RegisterRsp, 207 | whv::WHV_REGISTER_NAME_WHvX64RegisterRbp, 208 | whv::WHV_REGISTER_NAME_WHvX64RegisterRsi, 209 | whv::WHV_REGISTER_NAME_WHvX64RegisterRdi, 210 | whv::WHV_REGISTER_NAME_WHvX64RegisterR8, 211 | whv::WHV_REGISTER_NAME_WHvX64RegisterR9, 212 | whv::WHV_REGISTER_NAME_WHvX64RegisterR10, 213 | whv::WHV_REGISTER_NAME_WHvX64RegisterR11, 214 | whv::WHV_REGISTER_NAME_WHvX64RegisterR12, 215 | whv::WHV_REGISTER_NAME_WHvX64RegisterR13, 216 | whv::WHV_REGISTER_NAME_WHvX64RegisterR14, 217 | whv::WHV_REGISTER_NAME_WHvX64RegisterR15, 218 | whv::WHV_REGISTER_NAME_WHvX64RegisterRip, 219 | whv::WHV_REGISTER_NAME_WHvX64RegisterRflags, 220 | 221 | whv::WHV_REGISTER_NAME_WHvX64RegisterEs, 222 | whv::WHV_REGISTER_NAME_WHvX64RegisterCs, 223 | whv::WHV_REGISTER_NAME_WHvX64RegisterSs, 224 | whv::WHV_REGISTER_NAME_WHvX64RegisterDs, 225 | whv::WHV_REGISTER_NAME_WHvX64RegisterFs, 226 | whv::WHV_REGISTER_NAME_WHvX64RegisterGs, 227 | whv::WHV_REGISTER_NAME_WHvX64RegisterLdtr, 228 | whv::WHV_REGISTER_NAME_WHvX64RegisterTr, 229 | 230 | whv::WHV_REGISTER_NAME_WHvX64RegisterIdtr, 231 | whv::WHV_REGISTER_NAME_WHvX64RegisterGdtr, 232 | 233 | whv::WHV_REGISTER_NAME_WHvX64RegisterCr0, 234 | whv::WHV_REGISTER_NAME_WHvX64RegisterCr2, 235 | whv::WHV_REGISTER_NAME_WHvX64RegisterCr3, 236 | whv::WHV_REGISTER_NAME_WHvX64RegisterCr4, 237 | whv::WHV_REGISTER_NAME_WHvX64RegisterCr8, 238 | whv::WHV_REGISTER_NAME_WHvX64RegisterXCr0, 239 | 240 | whv::WHV_REGISTER_NAME_WHvX64RegisterDr0, 241 | whv::WHV_REGISTER_NAME_WHvX64RegisterDr1, 242 | whv::WHV_REGISTER_NAME_WHvX64RegisterDr2, 243 | whv::WHV_REGISTER_NAME_WHvX64RegisterDr3, 244 | whv::WHV_REGISTER_NAME_WHvX64RegisterDr6, 245 | whv::WHV_REGISTER_NAME_WHvX64RegisterDr7, 246 | 247 | whv::WHV_REGISTER_NAME_WHvX64RegisterTsc, 248 | whv::WHV_REGISTER_NAME_WHvX64RegisterEfer, 249 | whv::WHV_REGISTER_NAME_WHvX64RegisterKernelGsBase, 250 | whv::WHV_REGISTER_NAME_WHvX64RegisterApicBase, 251 | whv::WHV_REGISTER_NAME_WHvX64RegisterPat, 252 | whv::WHV_REGISTER_NAME_WHvX64RegisterSysenterCs, 253 | whv::WHV_REGISTER_NAME_WHvX64RegisterSysenterEip, 254 | whv::WHV_REGISTER_NAME_WHvX64RegisterSysenterEsp, 255 | whv::WHV_REGISTER_NAME_WHvX64RegisterStar, 256 | whv::WHV_REGISTER_NAME_WHvX64RegisterLstar, 257 | whv::WHV_REGISTER_NAME_WHvX64RegisterCstar, 258 | whv::WHV_REGISTER_NAME_WHvX64RegisterSfmask, 259 | 260 | whv::WHV_REGISTER_NAME_WHvRegisterInterruptState, 261 | whv::WHV_REGISTER_NAME_WHvRegisterPendingEvent, 262 | ]; 263 | 264 | pub fn sync_from_whv(state: &mut RegState, mut register_values: &[whv::WHV_REGISTER_VALUE]) { 265 | macro_rules! sync_reg { 266 | ($reg:tt) => { 267 | state.$reg = RegSyncWhvValue::create_from_whv(®ister_values[0]); 268 | register_values = ®ister_values[1..]; 269 | } 270 | }; 271 | 272 | sync_regstate!(); 273 | 274 | assert!(register_values.is_empty(), "Didn't sync every value."); 275 | } 276 | 277 | pub fn sync_to_whv(state: &RegState, mut register_values: &mut [whv::WHV_REGISTER_VALUE]) { 278 | macro_rules! sync_reg { 279 | ($reg:tt) => { 280 | &state.$reg.save_to_whv(&mut register_values[0]); 281 | register_values = &mut register_values[1..]; 282 | } 283 | }; 284 | 285 | sync_regstate!(); 286 | 287 | assert!(register_values.is_empty(), "Didn't sync every value."); 288 | } 289 | -------------------------------------------------------------------------------- /src/vm/vmexit.rs: -------------------------------------------------------------------------------- 1 | use super::{AccessType, SegReg, PortSize, Exception, PendingInterruptType, UnsupportedFeature}; 2 | 3 | use super::whvp_bindings as whv; 4 | 5 | type InstructionBytes = [u8; 16]; 6 | 7 | #[derive(Debug, Clone)] 8 | pub enum VmExit { 9 | MemoryAccess { 10 | instruction: InstructionBytes, 11 | gpa: u64, 12 | gva: u64, 13 | access: AccessType, 14 | gpa_unmapped: bool, 15 | gva_valid: bool, 16 | }, 17 | PortAccess { 18 | instruction: InstructionBytes, 19 | port: u16, 20 | rax: u64, 21 | rcx: u64, 22 | rsi: u64, 23 | rdi: u64, 24 | ds: SegReg, 25 | es: SegReg, 26 | write: bool, 27 | string: bool, 28 | rep: bool, 29 | size: PortSize, 30 | }, 31 | MsrAccess { 32 | msr: u32, 33 | rax: u64, 34 | rdx: u64, 35 | write: bool, 36 | }, 37 | Cpuid { 38 | rax: u64, 39 | rcx: u64, 40 | rdx: u64, 41 | rbx: u64, 42 | def_rax: u64, 43 | def_rcx: u64, 44 | def_rdx: u64, 45 | def_rbx: u64, 46 | }, 47 | Exception { 48 | instruction: InstructionBytes, 49 | vector: Exception, 50 | error_code: Option, 51 | software: bool, 52 | param: u64, 53 | }, 54 | InterruptWindow { 55 | deliverable_type: PendingInterruptType, 56 | }, 57 | ApicEoi { 58 | vector: u32, 59 | }, 60 | UnsupportedFeature { 61 | feature: UnsupportedFeature, 62 | param: u64, 63 | }, 64 | UnrecoverableException, 65 | InvalidState, 66 | Preemption, 67 | Halt, 68 | } 69 | 70 | impl VmExit { 71 | pub(super) fn from_run_exit_context(exit_context: &whv::WHV_RUN_VP_EXIT_CONTEXT) -> VmExit { 72 | match exit_context.ExitReason { 73 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonNone => { 74 | panic!("Processor exited without any reason."); 75 | }, 76 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonMemoryAccess => { 77 | let info = unsafe { &exit_context.__bindgen_anon_1.MemoryAccess }; 78 | 79 | let instruction = info.InstructionBytes; 80 | 81 | let access_info = unsafe { info.AccessInfo.__bindgen_anon_1 }; 82 | 83 | let access = match access_info.AccessType() { 84 | 0 => AccessType::Read, 85 | 1 => AccessType::Write, 86 | 2 => AccessType::Execute, 87 | _ => panic!("Unknown memory access type {}.", access_info.AccessType()), 88 | }; 89 | 90 | VmExit::MemoryAccess { 91 | instruction, 92 | gpa: info.Gpa, 93 | gva: info.Gva, 94 | access, 95 | gpa_unmapped: access_info.GpaUnmapped() != 0, 96 | gva_valid: access_info.GvaValid() != 0, 97 | } 98 | }, 99 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonX64IoPortAccess => { 100 | let info = unsafe { &exit_context.__bindgen_anon_1.IoPortAccess }; 101 | 102 | let instruction = info.InstructionBytes; 103 | 104 | let access_info = unsafe { info.AccessInfo.__bindgen_anon_1 }; 105 | 106 | let port_size = match access_info.AccessSize() { 107 | 1 => PortSize::Byte, 108 | 2 => PortSize::Word, 109 | 4 => PortSize::Dword, 110 | _ => panic!("Unknown port access size {}.", access_info.AccessSize()), 111 | }; 112 | 113 | let get_segreg = |seg: &whv::WHV_X64_SEGMENT_REGISTER| { 114 | let attribs = unsafe { seg.__bindgen_anon_1.Attributes }; 115 | 116 | SegReg { 117 | base: seg.Base, 118 | limit: seg.Limit, 119 | sel: seg.Selector, 120 | attribs, 121 | } 122 | }; 123 | 124 | VmExit::PortAccess { 125 | instruction, 126 | port: info.PortNumber, 127 | rax: info.Rax, 128 | rcx: info.Rcx, 129 | rsi: info.Rsi, 130 | rdi: info.Rdi, 131 | ds: get_segreg(&info.Ds), 132 | es: get_segreg(&info.Es), 133 | write: access_info.IsWrite() != 0, 134 | string: access_info.StringOp() != 0, 135 | rep: access_info.RepPrefix() != 0, 136 | size: port_size, 137 | } 138 | }, 139 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonX64MsrAccess => { 140 | let info = unsafe { &exit_context.__bindgen_anon_1.MsrAccess }; 141 | 142 | let access_info = unsafe { info.AccessInfo.__bindgen_anon_1 }; 143 | 144 | VmExit::MsrAccess { 145 | msr: info.MsrNumber, 146 | rax: info.Rax, 147 | rdx: info.Rdx, 148 | write: access_info.IsWrite() != 0, 149 | } 150 | }, 151 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonX64Cpuid => { 152 | let info = unsafe { &exit_context.__bindgen_anon_1.CpuidAccess }; 153 | 154 | VmExit::Cpuid { 155 | rax: info.Rax, 156 | rdx: info.Rdx, 157 | rcx: info.Rcx, 158 | rbx: info.Rbx, 159 | def_rax: info.DefaultResultRax, 160 | def_rdx: info.DefaultResultRdx, 161 | def_rcx: info.DefaultResultRcx, 162 | def_rbx: info.DefaultResultRbx, 163 | } 164 | }, 165 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonException => { 166 | let info = unsafe { &exit_context.__bindgen_anon_1.VpException }; 167 | 168 | let instruction = info.InstructionBytes; 169 | 170 | let vector = Exception::from_id(info.ExceptionType) 171 | .expect("Unknown exception type."); 172 | 173 | let exception_info = unsafe { info.ExceptionInfo.__bindgen_anon_1 }; 174 | 175 | let error_code = match exception_info.ErrorCodeValid() { 176 | 0 => None, 177 | _ => Some(info.ErrorCode), 178 | }; 179 | 180 | VmExit::Exception { 181 | instruction, 182 | vector, 183 | error_code, 184 | software: exception_info.SoftwareException() != 0, 185 | param: info.ExceptionParameter, 186 | } 187 | }, 188 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonX64InterruptWindow => { 189 | let info = unsafe { &exit_context.__bindgen_anon_1.InterruptWindow }; 190 | 191 | let deliverable_type = match info.DeliverableType { 192 | 0 => PendingInterruptType::Interrupt, 193 | 2 => PendingInterruptType::Nmi, 194 | 3 => PendingInterruptType::Exception, 195 | _ => panic!("Unknown pending interrupt type {}.", info.DeliverableType), 196 | }; 197 | 198 | VmExit::InterruptWindow { 199 | deliverable_type, 200 | } 201 | }, 202 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonX64ApicEoi => { 203 | let info = unsafe { &exit_context.__bindgen_anon_1.ApicEoi }; 204 | 205 | VmExit::ApicEoi { 206 | vector: info.InterruptVector, 207 | } 208 | }, 209 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonCanceled => { 210 | let info = unsafe { &exit_context.__bindgen_anon_1.CancelReason }; 211 | let reason = info.CancelReason; 212 | 213 | assert!(reason == 0, "Unknown execution cancel reason {}.", reason); 214 | 215 | VmExit::Preemption 216 | }, 217 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonUnsupportedFeature => { 218 | let info = unsafe { &exit_context.__bindgen_anon_1.UnsupportedFeature }; 219 | 220 | let feature = match info.FeatureCode { 221 | 1 => UnsupportedFeature::Intercept, 222 | 2 => UnsupportedFeature::TaskSwitchTss, 223 | _ => panic!("Unknown unsupported feature {}.", info.FeatureParameter), 224 | }; 225 | 226 | VmExit::UnsupportedFeature { 227 | feature, 228 | param: info.FeatureParameter, 229 | } 230 | }, 231 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonUnrecoverableException => { 232 | VmExit::UnrecoverableException 233 | }, 234 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonInvalidVpRegisterValue => { 235 | VmExit::InvalidState 236 | }, 237 | whv::WHV_RUN_VP_EXIT_REASON_WHvRunVpExitReasonX64Halt => { 238 | VmExit::Halt 239 | }, 240 | _ => panic!("Unknown exit reason {}.", exit_context.ExitReason) 241 | } 242 | } 243 | } 244 | --------------------------------------------------------------------------------