├── rustfmt.toml
├── Cargo.toml
├── rust-toolchain.toml
├── hv-sys
├── src
│ └── lib.rs
├── wrapper.h
├── Cargo.toml
├── README.md
└── build.rs
├── .gitignore
├── .github
└── workflows
│ └── ci.yml
├── hv
├── examples
│ ├── caps.rs
│ └── as.rs
├── Cargo.toml
├── README.md
└── src
│ ├── vcpu.rs
│ ├── lib.rs
│ ├── vm.rs
│ ├── arm64
│ ├── mod.rs
│ └── regs.rs
│ └── x86
│ ├── mod.rs
│ └── vmx.rs
├── example.entitlements
├── README.md
└── LICENSE
/rustfmt.toml:
--------------------------------------------------------------------------------
1 | newline_style = "Unix"
2 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | members = [
3 | "hv",
4 | "hv-sys"
5 | ]
6 |
--------------------------------------------------------------------------------
/rust-toolchain.toml:
--------------------------------------------------------------------------------
1 | [toolchain]
2 | channel = "1.53"
3 | components = ["rustfmt", "clippy"]
4 |
--------------------------------------------------------------------------------
/hv-sys/src/lib.rs:
--------------------------------------------------------------------------------
1 | #![allow(non_upper_case_globals)]
2 | #![allow(non_camel_case_types)]
3 | #![allow(improper_ctypes)]
4 | // Comes from unit tests, don't care much
5 | #![allow(deref_nullptr)]
6 | #![allow(unaligned_references)]
7 |
8 | include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
9 |
--------------------------------------------------------------------------------
/hv-sys/wrapper.h:
--------------------------------------------------------------------------------
1 | #if defined(__arm64__)
2 |
3 | // Newer version of Hypervisor.Framework (the ones with ARM support) includes more convenient `Hypervisor.h`
4 | #include "Hypervisor/Hypervisor.h"
5 |
6 | #elif defined(__x86_64__)
7 |
8 | #include "Hypervisor/hv.h"
9 | #include "Hypervisor/hv_vmx.h"
10 |
11 | #endif
12 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Generated by Cargo
2 | # will have compiled files and executables
3 | /target/
4 |
5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
7 | Cargo.lock
8 |
9 | # These are backup files generated by rustfmt
10 | **/*.rs.bk
11 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on: [push, pull_request]
3 |
4 | jobs:
5 | checks:
6 | name: Checks
7 | runs-on: macos-10.15
8 | steps:
9 | - uses: actions/checkout@v2
10 | - run: cargo check --examples --tests --all-targets
11 | - run: cargo fmt --all -- --check --files-with-diff
12 | - run: cargo clippy --all-targets --all-features -- -D warnings
13 | - run: cargo test --all-features
14 |
--------------------------------------------------------------------------------
/hv/examples/caps.rs:
--------------------------------------------------------------------------------
1 | #[cfg(target_arch = "x86_64")]
2 | fn main() -> Result<(), hv::Error> {
3 | use hv::x86::{Capability, VmExt, VmOptions};
4 |
5 | let vm = hv::Vm::new(VmOptions::default())?;
6 |
7 | println!("Max vCPUs: {}", vm.capability(Capability::VcpuMax)?);
8 |
9 | println!(
10 | "Available address spaces: {}",
11 | vm.capability(Capability::AddrSpaceMax)?
12 | );
13 |
14 | Ok(())
15 | }
16 |
17 | #[cfg(target_arch = "aarch64")]
18 | fn main() {}
19 |
--------------------------------------------------------------------------------
/example.entitlements:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
9 | com.apple.security.hypervisor
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/hv-sys/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "hv-sys"
3 | version = "0.1.1"
4 | edition = "2018"
5 | description = "Unsafe bindings for Hypervisor Framework generated with bindgen"
6 | authors = ["Maksym Pavlenko ", "The Cloud Hypervisor Authors"]
7 | repository = "https://github.com/cloud-hypervisor/hypervisor-framework"
8 | license = "Apache-2.0"
9 | readme = "README.md"
10 | keywords = ["hypervisor", "virtualization", "microvm", "macos", "apple"]
11 | categories = ["os::macos-apis", "api-bindings", "hardware-support"]
12 |
13 | [build-dependencies]
14 | bindgen = "0.58"
15 |
16 | [package.metadata.docs.rs]
17 | targets = ["x86_64-apple-darwin", "aarch64-apple-darwin"]
18 |
--------------------------------------------------------------------------------
/hv/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "hv"
3 | version = "0.1.2"
4 | edition = "2018"
5 | description = "High level Rust bidings to Hypervisor Framework"
6 | authors = ["Maksym Pavlenko ", "The Cloud Hypervisor Authors"]
7 | repository = "https://github.com/cloud-hypervisor/hypervisor-framework"
8 | license = "Apache-2.0"
9 | readme = "README.md"
10 | keywords = ["hypervisor", "virtualization", "microvm", "macos", "apple"]
11 | categories = ["os::macos-apis", "api-bindings", "hardware-support"]
12 |
13 | [dependencies]
14 | bitflags = "1.2"
15 | hv-sys = { path = "../hv-sys", version = "0.1.1" }
16 |
17 | [dev-dependencies]
18 | libc = "0.2"
19 |
20 | [features]
21 | hv_10_15 = []
22 | default = ["hv_10_15"]
23 |
24 | # Query basic caps
25 | [[example]]
26 | name = "caps"
27 |
28 | # Apple Silicon
29 | [[example]]
30 | name = "as"
31 |
32 | # See https://docs.rs/about/metadata
33 | [package.metadata.docs.rs]
34 | targets = ["x86_64-apple-darwin", "aarch64-apple-darwin"]
35 |
--------------------------------------------------------------------------------
/hv-sys/README.md:
--------------------------------------------------------------------------------
1 | # hv-sys
2 |
3 | [](https://github.com/cloud-hypervisor/hypervisor-framework/actions/workflows/ci.yml)
4 | [](https://crates.io/crates/hv-sys)
5 | 
6 |
7 | Unsafe `-sys` crate with raw, unsafe bindings for [Hypervisor Framework](https://developer.apple.com/documentation/hypervisor), automatically generated with `bindgen`.
8 |
9 | Please don't use this crate directly, have a look on [hv](https://crates.io/crates/hv) crate instead.
10 | It offers high level safer Rust API to access Hypervisor Framework.
11 |
12 | Also please see the [repository](https://github.com/cloud-hypervisor/hypervisor-framework) for ongoing work, questions, submit bugs, etc.
13 |
14 | ## Usage
15 |
16 | Add the following to your `Cargo.toml`:
17 | ```toml
18 | [dependencies]
19 | hv-sys = "0.1"
20 | ```
21 |
--------------------------------------------------------------------------------
/hv-sys/build.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 | use std::path::PathBuf;
3 | use std::process::Command;
4 |
5 | fn main() {
6 | let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
7 |
8 | bindgen::builder()
9 | .header("wrapper.h")
10 | .clang_arg(format!("-F{}/System/Library/Frameworks", show_sdk_path())) // -F Add framework to the search path
11 | .allowlist_function("hv_.*")
12 | .allowlist_var("HV.*")
13 | .allowlist_var("VM.*")
14 | .allowlist_var("IRQ.*")
15 | .derive_default(true)
16 | .derive_debug(true)
17 | .generate_comments(false)
18 | .generate()
19 | .expect("Failed to generate bindings")
20 | .write_to_file(out_path.join("bindings.rs"))
21 | .expect("Failed to write bindings file");
22 |
23 | println!("cargo:rustc-link-lib=framework=Hypervisor");
24 | }
25 |
26 | /// Execute `xcrun --sdk macosx --show-sdk-path` to locate MacOS SDK
27 | fn show_sdk_path() -> String {
28 | let output = Command::new("xcrun")
29 | .arg("--sdk")
30 | .arg("macosx")
31 | .arg("--show-sdk-path")
32 | .output()
33 | .expect("Failed to execute xcrun");
34 |
35 | if !output.stderr.is_empty() {
36 | panic!("ERROR: {}", String::from_utf8(output.stderr).unwrap());
37 | }
38 |
39 | let mut path = output.stdout;
40 |
41 | // Remove new line character ('\n' == 0x0A == 10)
42 | if path.ends_with(&[10]) {
43 | path.swap_remove(path.len() - 1);
44 | }
45 |
46 | String::from_utf8(path).unwrap()
47 | }
48 |
--------------------------------------------------------------------------------
/hv/README.md:
--------------------------------------------------------------------------------
1 | # hv
2 |
3 | [](https://github.com/cloud-hypervisor/hypervisor-framework/actions/workflows/ci.yml)
4 | [](https://crates.io/crates/hv)
5 | 
6 | [](https://docs.rs/hv/)
7 |
8 | `hv` is a high level Rust bindings for Hypervisor Framework.
9 |
10 | Build virtualization solutions on top of a lightweight hypervisor using Rust:
11 | - Full Hypervisor Framework support.
12 | - Supports Apple Silicon.
13 | - Safe Rust API.
14 |
15 | Please also see the [repository](https://github.com/cloud-hypervisor/hypervisor-framework) for latest changes and updates.
16 |
17 | ## Requirements
18 |
19 | ### Hypervisor Framework
20 |
21 | At runtime, determine whether the Hypervisor APIs are available on a particular machine with the `sysctl`:
22 |
23 | ```bash
24 | $ sysctl kern.hv_support
25 | kern.hv_support: 1
26 | ```
27 |
28 | In order to use Hypervisor API your app must have `com.apple.security.hypervisor` entitlement.
29 | Refer to [example.entitlements](example.entitlements) for example of how entitlement file might look like.
30 |
31 | Use the following command to self sign your binary for local development:
32 |
33 | ```bash
34 | $ codesign --sign - --force --entitlements=example.entitlements ./binary
35 | ```
36 |
37 | ### Rust
38 |
39 | Developed and tested on latest stable Rust (1.53.0+).
40 |
41 | Be sure to have [Xcode](https://developer.apple.com/xcode/) installed and don't forget to `xcode-select --install`,
42 | otherwise `bindgen` may fail to find Hypervisor headers.
43 |
44 | ## Usage
45 |
46 | This crate uses [hv-sys](https://crates.io/crates/hv-sys)
47 |
48 | ## Example
49 |
50 | Here is basic "Hello world" example on Apple Silicon:
51 | ```rust
52 | // Init VM
53 | let vm = Arc::new(hv::Vm::new(std::ptr::null_mut())?);
54 |
55 | // Initialize guest memory
56 | vm.map(load_addr, GUEST_ADDR, MEM_SIZE, hv::Memory::READ)?;
57 |
58 | // Create VCPU
59 | let cpu = vm.create_cpu()?;
60 |
61 | // Set regs
62 | cpu.set_reg(Reg::PC, GUEST_ADDR)?
63 | cpu.set_reg(Reg::X1, GUEST_RESULT_ADDR)?
64 |
65 | loop {
66 | cpu.run().expect("Failed to run CPU");
67 |
68 | let info = cpu.exit_info();
69 | println!("{:?}", info);
70 |
71 | break;
72 | }
73 | ```
74 |
--------------------------------------------------------------------------------
/hv/examples/as.rs:
--------------------------------------------------------------------------------
1 | // Apple Silicon example.
2 | // Adapted from https://github.com/zhuowei/FakeHVF/blob/main/simplevm.c
3 |
4 | #![allow(dead_code)]
5 |
6 | static CODE: &[u8] = &[
7 | // Compute ((2 + 2) - 1)
8 | 0x40, 0x00, 0x80, 0xD2, // mov x0, #2
9 | 0x00, 0x08, 0x00, 0x91, // add x0, x0, #2
10 | 0x00, 0x04, 0x00, 0xD1, // sub x0, x0, #1
11 | // Write it to memory pointed by x1
12 | 0x20, 0x00, 0x00, 0xF9, // str x0, [x1]
13 | // Reboot the computer with PSCI/SMCCC
14 | // 0x84000009 is PSCI SYSTEM_RESET using SMC32 calling convention
15 | 0x20, 0x01, 0x80, 0xd2, // mov x0, 0x0009
16 | 0x00, 0x80, 0xb0, 0xf2, // movk x0, 0x8400, lsl #16
17 | 0x02, 0x00, 0x00, 0xD4, // hvc #0
18 | // Infinite loop
19 | 0x00, 0x00, 0x00, 0x14,
20 | ];
21 |
22 | const MEM_SIZE: usize = 0x100000;
23 | const GUEST_ADDR: usize = 0x69420000;
24 |
25 | const RESULT_OFFSET: usize = 0x100;
26 | const GUEST_RESULT_ADDR: usize = GUEST_ADDR + RESULT_OFFSET;
27 |
28 | #[cfg(target_arch = "aarch64")]
29 | use hv::arm64::{Reg, VcpuExt};
30 |
31 | #[cfg(target_arch = "aarch64")]
32 | fn main() -> Result<(), hv::Error> {
33 | use std::sync::Arc;
34 |
35 | let load_addr = unsafe {
36 | libc::mmap(
37 | std::ptr::null_mut(),
38 | MEM_SIZE,
39 | libc::PROT_READ | libc::PROT_WRITE,
40 | libc::MAP_ANONYMOUS | libc::MAP_PRIVATE | libc::MAP_NORESERVE,
41 | -1,
42 | 0,
43 | ) as *mut u8
44 | };
45 |
46 | if load_addr == libc::MAP_FAILED as _ {
47 | panic!("libc::mmap returned MAP_FAILED");
48 | }
49 |
50 | unsafe {
51 | std::ptr::copy_nonoverlapping(CODE.as_ptr(), load_addr, CODE.len());
52 | }
53 |
54 | // Init VM
55 | let vm = Arc::new(hv::Vm::new(std::ptr::null_mut())?);
56 |
57 | // Initialize guest memory
58 | vm.map(
59 | load_addr,
60 | GUEST_ADDR as _,
61 | MEM_SIZE as _,
62 | hv::Memory::READ | hv::Memory::WRITE | hv::Memory::EXEC,
63 | )?;
64 |
65 | // Create VCPU
66 | let cpu = vm.create_cpu()?;
67 |
68 | // Set regs
69 | cpu.set_reg(Reg::PC, GUEST_ADDR as _)
70 | .expect("Failed to set PC reg");
71 |
72 | cpu.set_reg(Reg::X1, GUEST_RESULT_ADDR as _)
73 | .expect("Failed to set X1");
74 |
75 | loop {
76 | cpu.run().expect("Failed to run CPU");
77 |
78 | let info = cpu.exit_info();
79 | println!("{:?}", info);
80 |
81 | break;
82 | }
83 |
84 | let result_addr = unsafe { load_addr.add(RESULT_OFFSET) } as *const u64;
85 | let result = unsafe { *result_addr };
86 |
87 | println!("Result: {}", result);
88 | assert_eq!(result, 3);
89 |
90 | Ok(())
91 | }
92 |
93 | #[cfg(target_arch = "x86_64")]
94 | fn main() {}
95 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # hv
2 |
3 | [](https://github.com/cloud-hypervisor/hypervisor-framework/actions/workflows/ci.yml)
4 | [](https://github.com/cloud-hypervisor/hypervisor-framework/blob/main/LICENSE)
5 | [](https://docs.rs/hv/)
6 |
7 | `hv` is a high level Rust bindings for Hypervisor Framework.
8 |
9 | [Apple Documentation](https://developer.apple.com/documentation/hypervisor)
10 |
11 | Build virtualization solutions on top of a lightweight hypervisor using Rust:
12 | - Full Hypervisor Framework support.
13 | - Supports Apple Silicon.
14 | - Safe Rust API.
15 |
16 | This repository contains the following crates:
17 | | Name | Description | Links |
18 | | --- | --- | --- |
19 | | [`hv-sys`](./hv-sys) | Unsafe bindings generated with bindgen | [](https://crates.io/crates/hv-sys) |
20 | | [`hv`](./hv) | High level API to access Hypervisor Framework | [](https://crates.io/crates/hv) |
21 |
22 | ### Current list of things to do:
23 | - Make high level API safer.
24 | - Expand documentation.
25 | - Add more examples.
26 |
27 | ## Requirements
28 |
29 | ### Hypervisor Framework
30 |
31 | At runtime, determine whether the Hypervisor APIs are available on a particular machine with the `sysctl`:
32 |
33 | ```bash
34 | $ sysctl kern.hv_support
35 | kern.hv_support: 1
36 | ```
37 |
38 | In order to use Hypervisor API your app must have `com.apple.security.hypervisor` entitlement.
39 | Refer to [example.entitlements](example.entitlements) for example of how entitlement file might look like.
40 |
41 | Use the following command to self sign your binary for local development:
42 |
43 | ```bash
44 | $ codesign --sign - --force --entitlements=example.entitlements ./binary
45 | ```
46 |
47 | ### Rust
48 |
49 | Developed and tested on latest stable Rust (1.53.0+).
50 |
51 | Be sure to have [Xcode](https://developer.apple.com/xcode/) installed and don't forget to `xcode-select --install`,
52 | otherwise `bindgen` may fail to find Hypervisor headers.
53 |
54 | ## Example
55 |
56 | Here is basic "Hello world" example on Apple Silicon:
57 | ```rust
58 | // Init VM
59 | let vm = Arc::new(hv::Vm::new(std::ptr::null_mut())?);
60 |
61 | // Initialize guest memory
62 | vm.map(load_addr, GUEST_ADDR, MEM_SIZE, hv::Memory::READ)?;
63 |
64 | // Create VCPU
65 | let cpu = vm.create_cpu()?;
66 |
67 | // Set regs
68 | cpu.set_reg(Reg::PC, GUEST_ADDR)?
69 | cpu.set_reg(Reg::X1, GUEST_RESULT_ADDR)?
70 |
71 | loop {
72 | cpu.run().expect("Failed to run CPU");
73 |
74 | let info = cpu.exit_info();
75 | println!("{:?}", info);
76 |
77 | break;
78 | }
79 | ```
80 |
--------------------------------------------------------------------------------
/hv/src/vcpu.rs:
--------------------------------------------------------------------------------
1 | use crate::{call, sys, Error, Vm};
2 | use std::sync::Arc;
3 |
4 | /// The type that describes a vCPU ID on Intel.
5 | #[cfg(target_arch = "x86_64")]
6 | pub type Id = sys::hv_vcpuid_t;
7 |
8 | /// The type that describes a vCPU ID on Apple Silicon.
9 | #[cfg(target_arch = "aarch64")]
10 | pub type Id = sys::hv_vcpu_t;
11 |
12 | /// Represents a single virtual CPU.
13 | ///
14 | /// [Vcpu] object is not thread safe, all calls must be performed from
15 | /// the owning thread.
16 | pub struct Vcpu {
17 | #[allow(dead_code)] // VM instance must outlive CPU in order to deallocate things properly.
18 | vm: Arc,
19 | pub(crate) id: Id,
20 | #[cfg(target_arch = "aarch64")]
21 | /// The pointer to the vCPU exit information.
22 | /// The function `hv_vcpu_run` updates this structure on return.
23 | /// Apple silicon only.
24 | pub(crate) exit: *const sys::hv_vcpu_exit_t,
25 | }
26 |
27 | impl Vcpu {
28 | /// Creates a vCPU instance for the current thread.
29 | pub(crate) fn new(vm: Arc) -> Result {
30 | #[cfg(target_arch = "x86_64")]
31 | {
32 | let mut id = 0;
33 | call!(sys::hv_vcpu_create(&mut id, 0))?;
34 | Ok(Vcpu { vm, id })
35 | }
36 |
37 | #[cfg(target_arch = "aarch64")]
38 | {
39 | let mut id = 0;
40 | let mut exit = std::ptr::null_mut();
41 | call!(sys::hv_vcpu_create(
42 | &mut id,
43 | &mut exit,
44 | std::ptr::null_mut()
45 | ))?;
46 | Ok(Vcpu { vm, id, exit })
47 | }
48 | }
49 |
50 | /// Executes a vCPU.
51 | ///
52 | /// Call blocks until the next exit of the vCPU [1].
53 | /// The owning thread must call this function.
54 | ///
55 | /// # Intel
56 | /// On an Intel-based Mac, `hv_vcpu_run` exits from causes external to the guest.
57 | /// To avoid the overhead of spurious exits use `hv_vcpu_run_until` with the deadline `HV_DEADLINE_FOREVER`.
58 | ///
59 | /// # Apple Silicon
60 | /// If the exit is of type `HV_EXIT_REASON_VTIMER_ACTIVATED`, the VTimer is automatically masked.
61 | /// As a result, no timer fires until the timer is unmasked with `hv_vcpu_set_vtimer_mask`.
62 | ///
63 | /// [1]: https://developer.apple.com/documentation/hypervisor/1441231-hv_vcpu_run
64 | pub fn run(&self) -> Result<(), Error> {
65 | call!(sys::hv_vcpu_run(self.id))
66 | }
67 |
68 | /// Returns the cumulative execution time of a vCPU in nanoseconds.
69 | pub fn exec_time(&self) -> Result {
70 | let mut out = 0_u64;
71 | call!(sys::hv_vcpu_get_exec_time(self.id, &mut out))?;
72 | Ok(out)
73 | }
74 |
75 | /// Returns the underlying vCPU ID.
76 | #[inline]
77 | pub fn id(&self) -> Id {
78 | self.id
79 | }
80 | }
81 |
82 | /// Destroys the vCPU instance associated with the current thread.
83 | impl Drop for Vcpu {
84 | fn drop(&mut self) {
85 | call!(sys::hv_vcpu_destroy(self.id)).unwrap()
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/hv/src/lib.rs:
--------------------------------------------------------------------------------
1 | //! `hv` is a high level safe Rust crate to access Hypervisor Framework.
2 |
3 | use std::error;
4 | use std::fmt;
5 |
6 | /// Low level access to generated bindings.
7 | pub use hv_sys as sys;
8 | pub use vcpu::Vcpu;
9 | pub use vm::Vm;
10 |
11 | mod vcpu;
12 | pub mod vm;
13 |
14 | #[cfg(target_arch = "aarch64")]
15 | pub mod arm64;
16 | #[cfg(target_arch = "x86_64")]
17 | pub mod x86;
18 |
19 | pub type Size = u64;
20 |
21 | /// Type of a user virtual address.
22 | pub type Addr = *const u8;
23 |
24 | /// Type of a guest physical address.
25 | pub type GPAddr = u64;
26 |
27 | bitflags::bitflags! {
28 | /// Guest physical memory region permissions.
29 | pub struct Memory: u32 {
30 | const READ = sys::HV_MEMORY_READ;
31 | const WRITE = sys::HV_MEMORY_WRITE;
32 | const EXEC = sys::HV_MEMORY_EXEC;
33 | }
34 | }
35 |
36 | /// Helper macro to call unsafe Hypervisor functions and map returned error codes to [Error] type.
37 | #[macro_export]
38 | macro_rules! call {
39 | ($f:expr) => {{
40 | let code = unsafe { $f };
41 | match code {
42 | 0 => Ok(()),
43 | _ => Err(Error::from(code)),
44 | }
45 | }};
46 | }
47 |
48 | /// The return type of framework functions.
49 | /// Wraps the underlying `hv_return_t` type.
50 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
51 | pub enum Error {
52 | Unsuccessful,
53 | Busy,
54 | BadArgument,
55 | NoResources,
56 | NoDevice,
57 | Unsupported,
58 | /// Not mapped error code.
59 | Unknown(sys::hv_return_t),
60 | }
61 |
62 | impl error::Error for Error {}
63 |
64 | impl fmt::Display for Error {
65 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
66 | match self {
67 | Error::Unsuccessful => write!(f, "The operation was unsuccessful"),
68 | Error::Busy => write!(f, "The operation was unsuccessful because the owning resource was busy"),
69 | Error::BadArgument => write!(f, "The operation was unsuccessful because the function call had an invalid argument"),
70 | Error::NoResources => write!(f, "The operation was unsuccessful because the host had no resources available to complete the request"),
71 | Error::NoDevice => write!(f, "The operation was unsuccessful because no VM or vCPU was available"),
72 | Error::Unsupported => write!(f, "The operation requested isn’t supported by the hypervisor"),
73 | Error::Unknown(code) => write!(f, "Error code: {}", *code as i32),
74 | }
75 | }
76 | }
77 |
78 | impl From for Error {
79 | fn from(value: sys::hv_return_t) -> Self {
80 | // Looks like bindgen gets confused sometimes and produces different code for these
81 | // constants (`sys::HV_ERROR` vs `hv_return_t_HV_ERROR`) on different machines making things
82 | // to fail. It's probably easier to just hardcode them.
83 | let v = value as i64;
84 | match v {
85 | 0xfae94001 => Error::Unsuccessful,
86 | 0xfae94002 => Error::Busy,
87 | 0xfae94003 => Error::BadArgument,
88 | 0xfae94005 => Error::NoResources,
89 | 0xfae94006 => Error::NoDevice,
90 | 0xfae9400f => Error::Unsupported,
91 | _ => Error::Unknown(value),
92 | }
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/hv/src/vm.rs:
--------------------------------------------------------------------------------
1 | use std::ffi::c_void;
2 | use std::sync::Arc;
3 |
4 | use crate::{call, sys, Addr, Error, GPAddr, Memory, Size, Vcpu};
5 |
6 | #[cfg(target_arch = "x86_64")]
7 | pub type Options = crate::x86::VmOptions;
8 |
9 | #[cfg(target_arch = "aarch64")]
10 | pub type Options = sys::hv_vm_config_t;
11 |
12 | /// Vm is an entry point to Hypervisor Framework.
13 | #[derive(Debug)]
14 | pub struct Vm;
15 |
16 | /// Destroys the VM instance associated with the current process.
17 | impl Drop for Vm {
18 | fn drop(&mut self) {
19 | call!(sys::hv_vm_destroy()).unwrap()
20 | }
21 | }
22 |
23 | unsafe impl Send for Vm {}
24 |
25 | impl Vm {
26 | /// Creates a VM instance for the current process.
27 | ///
28 | /// Only one VM object can exists at a time.
29 | /// All subsequent attempts will return an error from Hypervisor Framework.
30 | ///
31 | /// In order to create child objects (`Vcpu`, `Space`, etc), this object must be wrapped
32 | /// with [Arc].
33 | ///
34 | pub fn new(options: Options) -> Result {
35 | #[cfg(target_arch = "x86_64")]
36 | let options = options.bits();
37 |
38 | call!(sys::hv_vm_create(options))?;
39 | Ok(Vm)
40 | }
41 |
42 | /// Creates a vCPU instance for the current thread.
43 | ///
44 | /// `create_cpu` implements safe wrapper around `hv_vcpu_create` that holds reference to the
45 | /// [Vm] object, so they can be dropped in proper order.
46 | pub fn create_cpu(self: Arc) -> Result {
47 | Vcpu::new(Arc::clone(&self))
48 | }
49 |
50 | /// Maps a region in the virtual address space of the current task into the guest physical
51 | /// address space of the VM.
52 | ///
53 | /// The host memory must encompass a single VM region, typically allocated with `mmap` or
54 | /// `mach_vm_allocate` instead of `malloc`. [1]
55 | ///
56 | /// # Arguments
57 | /// * `uva` - Page aligned virtual address in the current task.
58 | /// * `gpa` - Page aligned address in the guest physical address space.
59 | /// * `size` - Size in bytes of the region to be mapped.
60 | /// * `flags` - READ, WRITE and EXECUTE permissions of the region
61 | ///
62 | /// [1]: https://developer.apple.com/documentation/hypervisor/1441187-hv_vm_map
63 | ///
64 | pub fn map(&self, uva: Addr, gpa: GPAddr, size: Size, flags: Memory) -> Result<(), Error> {
65 | call!(sys::hv_vm_map(
66 | uva as *mut c_void,
67 | gpa,
68 | size,
69 | flags.bits() as _
70 | ))
71 | }
72 |
73 | /// Unmaps a region in the guest physical address space of the VM
74 | ///
75 | /// # Arguments
76 | /// * `gpa` - Page aligned address in the guest physical address space.
77 | /// * `size` - Size in bytes of the region to be unmapped.
78 | pub fn unmap(&self, gpa: GPAddr, size: Size) -> Result<(), Error> {
79 | call!(sys::hv_vm_unmap(gpa, size))
80 | }
81 |
82 | /// Modifies the permissions of a region in the guest physical address space of the VM.
83 | ///
84 | /// # Arguments
85 | /// * `gpa` - Page aligned address in the guest physical address space.
86 | /// * `size` - Size in bytes of the region to be modified.
87 | /// * `flags` - New READ, WRITE and EXECUTE permissions of the region.
88 | pub fn protect(&self, gpa: GPAddr, size: Size, flags: Memory) -> Result<(), Error> {
89 | call!(sys::hv_vm_protect(gpa, size, flags.bits() as _))
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/hv/src/arm64/mod.rs:
--------------------------------------------------------------------------------
1 | //! Apple Silicon extensions support.
2 |
3 | use crate::{call, sys, Error, Vcpu};
4 |
5 | mod regs;
6 | pub use regs::*;
7 |
8 | /// Injected interrupt type.
9 | #[repr(u32)]
10 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
11 | pub enum InterruptType {
12 | IRQ = sys::hv_interrupt_type_t_HV_INTERRUPT_TYPE_IRQ,
13 | FIQ = sys::hv_interrupt_type_t_HV_INTERRUPT_TYPE_FIQ,
14 | }
15 |
16 | /// Events that can trigger a guest exit to the VMM.
17 | #[repr(u32)]
18 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
19 | pub enum ExitReason {
20 | /// Asynchronous exit requested explicitly by `hv_vcpus_exit` call.
21 | Canceled = sys::hv_exit_reason_t_HV_EXIT_REASON_CANCELED,
22 | /// Synchronous exception to EL2 triggered by the guest.
23 | Exception = sys::hv_exit_reason_t_HV_EXIT_REASON_EXCEPTION,
24 | /// ARM Generic VTimer became pending since the last hv_vcpu_run() call
25 | /// returned. The caller is expected to make the interrupt corresponding to
26 | /// the VTimer pending in the guest's interrupt controller.
27 | ///
28 | /// This exit automatically sets the VTimer mask.
29 | /// The VCPU will not exit with this status again until after the mask is cleared
30 | /// with hv_vcpu_set_vtimer_mask(), which should be called during a trap of
31 | /// the EOI for the guest's VTimer interrupt handler.
32 | VTimerActivated = sys::hv_exit_reason_t_HV_EXIT_REASON_VTIMER_ACTIVATED,
33 | /// Unable to determine exit reason: this should not happen under normal operation.
34 | Unknown = sys::hv_exit_reason_t_HV_EXIT_REASON_UNKNOWN,
35 | }
36 |
37 | impl Default for ExitReason {
38 | fn default() -> Self {
39 | ExitReason::Unknown
40 | }
41 | }
42 |
43 | impl From for ExitReason {
44 | fn from(value: sys::hv_exit_reason_t) -> Self {
45 | match value {
46 | sys::hv_exit_reason_t_HV_EXIT_REASON_CANCELED => ExitReason::Canceled,
47 | sys::hv_exit_reason_t_HV_EXIT_REASON_EXCEPTION => ExitReason::Exception,
48 | sys::hv_exit_reason_t_HV_EXIT_REASON_VTIMER_ACTIVATED => ExitReason::VTimerActivated,
49 | sys::hv_exit_reason_t_HV_EXIT_REASON_UNKNOWN => ExitReason::Unknown,
50 | _ => ExitReason::Unknown,
51 | }
52 | }
53 | }
54 |
55 | /// Contains information about an exit from the vcpu to the host.
56 | pub type VcpuExit = sys::hv_vcpu_exit_t;
57 |
58 | pub trait VcpuExt {
59 | /// Returns the current value of a vCPU register.
60 | fn get_reg(&self, reg: regs::Reg) -> Result;
61 |
62 | /// Sets the value of a vCPU register.
63 | fn set_reg(&self, reg: regs::Reg, value: u64) -> Result<(), Error>;
64 |
65 | /// Returns the current value of a vCPU SIMD & FP register.
66 | fn get_simd_fp_reg(&self, reg: regs::SimdFpReg) -> Result;
67 |
68 | /// Sets the value of a vCPU SIMD & FP register
69 | fn set_simd_fp_reg(
70 | &self,
71 | reg: regs::SimdFpReg,
72 | value: regs::SimdFpUchar16,
73 | ) -> Result<(), Error>;
74 |
75 | /// Returns the current value of a vCPU system register.
76 | fn get_sys_reg(&self, reg: regs::SysReg) -> Result;
77 |
78 | /// Sets the value of a vCPU system register.
79 | fn set_sys_reg(&self, reg: regs::SysReg, value: u64) -> Result<(), Error>;
80 |
81 | /// Gets pending interrupts for a vcpu.
82 | fn pending_interrupt(&self, ty: InterruptType) -> Result;
83 |
84 | /// Sets pending interrupts for a vcpu.
85 | fn set_pending_interrupt(&self, ty: InterruptType, pending: bool) -> Result<(), Error>;
86 |
87 | /// Get whether debug exceptions in the guest are trapped to the host.
88 | fn trap_debug_exceptions(&self) -> Result;
89 |
90 | /// Set whether debug exceptions in the guest are trapped to the host.
91 | fn set_trap_debug_exceptions(&self, enable: bool) -> Result<(), Error>;
92 |
93 | /// Get whether debug register accesses in the guest are trapped to the host.
94 | fn trap_debug_reg_accesses(&self) -> Result;
95 |
96 | /// Set whether debug register accesses in the guest are trapped to the host.
97 | fn set_trap_debug_reg_accesses(&self, enable: bool) -> Result<(), Error>;
98 |
99 | /// Gets the VTimer mask.
100 | fn vtimer_mask(&self) -> Result;
101 |
102 | /// Sets the VTimer mask.
103 | fn set_vtimer_mask(&self, vtimer_is_masked: bool) -> Result<(), Error>;
104 |
105 | /// Gets the VTimer offset.
106 | fn vtimer_offset(&self) -> Result;
107 |
108 | /// Sets the VTimer offset.
109 | fn set_vtimer_offset(&self, vtimer_offset: u64) -> Result<(), Error>;
110 |
111 | /// Returns the underlying `hv_vcpu_exit_t` structure.
112 | fn exit_info(&self) -> VcpuExit;
113 | }
114 |
115 | impl VcpuExt for Vcpu {
116 | /// Returns the current value of a vCPU register.
117 | fn get_reg(&self, reg: regs::Reg) -> Result {
118 | let mut out = 0_u64;
119 | call!(sys::hv_vcpu_get_reg(self.id, reg as _, &mut out))?;
120 | Ok(out)
121 | }
122 |
123 | /// Sets the value of a vCPU register.
124 | fn set_reg(&self, reg: regs::Reg, value: u64) -> Result<(), Error> {
125 | call!(sys::hv_vcpu_set_reg(self.id, reg as _, value))
126 | }
127 |
128 | /// Returns the current value of a vCPU SIMD & FP register.
129 | fn get_simd_fp_reg(&self, reg: regs::SimdFpReg) -> Result {
130 | let mut out = 0_u128;
131 | call!(sys::hv_vcpu_get_simd_fp_reg(self.id, reg as _, &mut out))?;
132 | Ok(out)
133 | }
134 |
135 | /// Sets the value of a vCPU SIMD & FP register.
136 | fn set_simd_fp_reg(
137 | &self,
138 | reg: regs::SimdFpReg,
139 | value: regs::SimdFpUchar16,
140 | ) -> Result<(), Error> {
141 | call!(sys::hv_vcpu_set_simd_fp_reg(self.id, reg as _, value))?;
142 | Ok(())
143 | }
144 |
145 | /// Returns the current value of a vCPU system register.
146 | fn get_sys_reg(&self, reg: regs::SysReg) -> Result {
147 | let mut out = 0_u64;
148 | call!(sys::hv_vcpu_get_sys_reg(self.id, reg as _, &mut out))?;
149 | Ok(out)
150 | }
151 |
152 | /// Sets the value of a vCPU system register.
153 | fn set_sys_reg(&self, reg: regs::SysReg, value: u64) -> Result<(), Error> {
154 | call!(sys::hv_vcpu_set_sys_reg(self.id, reg as _, value))
155 | }
156 |
157 | /// Gets pending interrupts for a vcpu.
158 | fn pending_interrupt(&self, ty: InterruptType) -> Result {
159 | let mut out = false;
160 | call!(sys::hv_vcpu_get_pending_interrupt(
161 | self.id, ty as u32, &mut out
162 | ))?;
163 | Ok(out)
164 | }
165 |
166 | /// Sets pending interrupts for a vcpu.
167 | fn set_pending_interrupt(&self, ty: InterruptType, mut pending: bool) -> Result<(), Error> {
168 | call!(sys::hv_vcpu_get_pending_interrupt(
169 | self.id,
170 | ty as u32,
171 | &mut pending
172 | ))
173 | }
174 |
175 | /// Get whether debug exceptions in the guest are trapped to the host.
176 | fn trap_debug_exceptions(&self) -> Result {
177 | let mut out = false;
178 | call!(sys::hv_vcpu_get_trap_debug_exceptions(self.id, &mut out))?;
179 | Ok(out)
180 | }
181 |
182 | /// Set whether debug exceptions in the guest are trapped to the host.
183 | fn set_trap_debug_exceptions(&self, enable: bool) -> Result<(), Error> {
184 | call!(sys::hv_vcpu_set_trap_debug_exceptions(self.id, enable))
185 | }
186 |
187 | /// Get whether debug register accesses in the guest are trapped to the host.
188 | fn trap_debug_reg_accesses(&self) -> Result {
189 | let mut out = false;
190 | call!(sys::hv_vcpu_get_trap_debug_reg_accesses(self.id, &mut out))?;
191 | Ok(out)
192 | }
193 |
194 | /// Set whether debug register accesses in the guest are trapped to the host.
195 | fn set_trap_debug_reg_accesses(&self, enable: bool) -> Result<(), Error> {
196 | call!(sys::hv_vcpu_set_trap_debug_reg_accesses(self.id, enable))
197 | }
198 |
199 | /// Gets the VTimer mask.
200 | fn vtimer_mask(&self) -> Result {
201 | let mut out = false;
202 | call!(sys::hv_vcpu_get_vtimer_mask(self.id, &mut out))?;
203 | Ok(out)
204 | }
205 |
206 | /// Sets the VTimer mask.
207 | fn set_vtimer_mask(&self, vtimer_is_masked: bool) -> Result<(), Error> {
208 | call!(sys::hv_vcpu_set_vtimer_mask(self.id, vtimer_is_masked))
209 | }
210 |
211 | /// Gets the VTimer offset.
212 | fn vtimer_offset(&self) -> Result {
213 | let mut out = 0_u64;
214 | call!(sys::hv_vcpu_get_vtimer_offset(self.id, &mut out))?;
215 | Ok(out)
216 | }
217 |
218 | /// Sets the VTimer offset.
219 | fn set_vtimer_offset(&self, vtimer_offset: u64) -> Result<(), Error> {
220 | call!(sys::hv_vcpu_set_vtimer_offset(self.id, vtimer_offset))
221 | }
222 |
223 | /// Returns the underlying `hv_vcpu_exit_t` structure.
224 | fn exit_info(&self) -> VcpuExit {
225 | if self.exit.is_null() {
226 | VcpuExit::default()
227 | } else {
228 | unsafe { *self.exit }
229 | }
230 | }
231 | }
232 |
--------------------------------------------------------------------------------
/hv/src/arm64/regs.rs:
--------------------------------------------------------------------------------
1 | use crate::sys;
2 |
3 | /// Type of an ARM register.
4 | #[allow(non_camel_case_types)]
5 | #[repr(u32)]
6 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
7 | pub enum Reg {
8 | X0 = sys::hv_reg_t_HV_REG_X0,
9 | X1 = sys::hv_reg_t_HV_REG_X1,
10 | X2 = sys::hv_reg_t_HV_REG_X2,
11 | X3 = sys::hv_reg_t_HV_REG_X3,
12 | X4 = sys::hv_reg_t_HV_REG_X4,
13 | X5 = sys::hv_reg_t_HV_REG_X5,
14 | X6 = sys::hv_reg_t_HV_REG_X6,
15 | X7 = sys::hv_reg_t_HV_REG_X7,
16 | X8 = sys::hv_reg_t_HV_REG_X8,
17 | X9 = sys::hv_reg_t_HV_REG_X9,
18 | X10 = sys::hv_reg_t_HV_REG_X10,
19 | X11 = sys::hv_reg_t_HV_REG_X11,
20 | X12 = sys::hv_reg_t_HV_REG_X12,
21 | X13 = sys::hv_reg_t_HV_REG_X13,
22 | X14 = sys::hv_reg_t_HV_REG_X14,
23 | X15 = sys::hv_reg_t_HV_REG_X15,
24 | X16 = sys::hv_reg_t_HV_REG_X16,
25 | X17 = sys::hv_reg_t_HV_REG_X17,
26 | X18 = sys::hv_reg_t_HV_REG_X18,
27 | X19 = sys::hv_reg_t_HV_REG_X19,
28 | X20 = sys::hv_reg_t_HV_REG_X20,
29 | X21 = sys::hv_reg_t_HV_REG_X21,
30 | X22 = sys::hv_reg_t_HV_REG_X22,
31 | X23 = sys::hv_reg_t_HV_REG_X23,
32 | X24 = sys::hv_reg_t_HV_REG_X24,
33 | X25 = sys::hv_reg_t_HV_REG_X25,
34 | X26 = sys::hv_reg_t_HV_REG_X26,
35 | X27 = sys::hv_reg_t_HV_REG_X27,
36 | X28 = sys::hv_reg_t_HV_REG_X28,
37 | X29 = sys::hv_reg_t_HV_REG_X29,
38 | X30 = sys::hv_reg_t_HV_REG_X30,
39 | PC = sys::hv_reg_t_HV_REG_PC,
40 | FPCR = sys::hv_reg_t_HV_REG_FPCR,
41 | FPSR = sys::hv_reg_t_HV_REG_FPSR,
42 | CPSR = sys::hv_reg_t_HV_REG_CPSR,
43 | }
44 |
45 | pub const REG_FP: Reg = Reg::X29;
46 | pub const REG_LR: Reg = Reg::X30;
47 |
48 | pub type SimdFpUchar16 = sys::hv_simd_fp_uchar16_t;
49 |
50 | /// Type of an ARM SIMD & FP register.
51 | #[allow(non_camel_case_types)]
52 | #[repr(u32)]
53 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
54 | pub enum SimdFpReg {
55 | Q0 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q0,
56 | Q1 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q1,
57 | Q2 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q2,
58 | Q3 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q3,
59 | Q4 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q4,
60 | Q5 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q5,
61 | Q6 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q6,
62 | Q7 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q7,
63 | Q8 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q8,
64 | Q9 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q9,
65 | Q10 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q10,
66 | Q11 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q11,
67 | Q12 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q12,
68 | Q13 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q13,
69 | Q14 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q14,
70 | Q15 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q15,
71 | Q16 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q16,
72 | Q17 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q17,
73 | Q18 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q18,
74 | Q19 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q19,
75 | Q20 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q20,
76 | Q21 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q21,
77 | Q22 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q22,
78 | Q23 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q23,
79 | Q24 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q24,
80 | Q25 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q25,
81 | Q26 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q26,
82 | Q27 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q27,
83 | Q28 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q28,
84 | Q29 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q29,
85 | Q30 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q30,
86 | Q31 = sys::hv_simd_fp_reg_t_HV_SIMD_FP_REG_Q31,
87 | }
88 |
89 | /// Type of an ARM system register.
90 | #[allow(non_camel_case_types)]
91 | #[repr(u16)]
92 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
93 | pub enum SysReg {
94 | DBGBVR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR0_EL1,
95 | DBGBCR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR0_EL1,
96 | DBGWVR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR0_EL1,
97 | DBGWCR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR0_EL1,
98 | DBGBVR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR1_EL1,
99 | DBGBCR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR1_EL1,
100 | DBGWVR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR1_EL1,
101 | DBGWCR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR1_EL1,
102 | MDCCINT_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_MDCCINT_EL1,
103 | MDSCR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_MDSCR_EL1,
104 | DBGBVR2_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR2_EL1,
105 | DBGBCR2_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR2_EL1,
106 | DBGWVR2_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR2_EL1,
107 | DBGWCR2_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR2_EL1,
108 | DBGBVR3_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR3_EL1,
109 | DBGBCR3_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR3_EL1,
110 | DBGWVR3_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR3_EL1,
111 | DBGWCR3_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR3_EL1,
112 | DBGBVR4_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR4_EL1,
113 | DBGBCR4_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR4_EL1,
114 | DBGWVR4_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR4_EL1,
115 | DBGWCR4_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR4_EL1,
116 | DBGBVR5_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR5_EL1,
117 | DBGBCR5_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR5_EL1,
118 | DBGWVR5_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR5_EL1,
119 | DBGWCR5_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR5_EL1,
120 | DBGBVR6_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR6_EL1,
121 | DBGBCR6_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR6_EL1,
122 | DBGWVR6_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR6_EL1,
123 | DBGWCR6_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR6_EL1,
124 | DBGBVR7_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR7_EL1,
125 | DBGBCR7_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR7_EL1,
126 | DBGWVR7_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR7_EL1,
127 | DBGWCR7_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR7_EL1,
128 | DBGBVR8_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR8_EL1,
129 | DBGBCR8_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR8_EL1,
130 | DBGWVR8_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR8_EL1,
131 | DBGWCR8_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR8_EL1,
132 | DBGBVR9_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR9_EL1,
133 | DBGBCR9_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR9_EL1,
134 | DBGWVR9_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR9_EL1,
135 | DBGWCR9_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR9_EL1,
136 | DBGBVR10_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR10_EL1,
137 | DBGBCR10_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR10_EL1,
138 | DBGWVR10_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR10_EL1,
139 | DBGWCR10_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR10_EL1,
140 | DBGBVR11_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR11_EL1,
141 | DBGBCR11_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR11_EL1,
142 | DBGWVR11_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR11_EL1,
143 | DBGWCR11_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR11_EL1,
144 | DBGBVR12_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR12_EL1,
145 | DBGBCR12_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR12_EL1,
146 | DBGWVR12_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR12_EL1,
147 | DBGWCR12_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR12_EL1,
148 | DBGBVR13_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR13_EL1,
149 | DBGBCR13_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR13_EL1,
150 | DBGWVR13_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR13_EL1,
151 | DBGWCR13_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR13_EL1,
152 | DBGBVR14_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR14_EL1,
153 | DBGBCR14_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR14_EL1,
154 | DBGWVR14_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR14_EL1,
155 | DBGWCR14_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR14_EL1,
156 | DBGBVR15_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBVR15_EL1,
157 | DBGBCR15_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGBCR15_EL1,
158 | DBGWVR15_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWVR15_EL1,
159 | DBGWCR15_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_DBGWCR15_EL1,
160 | MIDR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_MIDR_EL1,
161 | MPIDR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_MPIDR_EL1,
162 | ID_AA64PFR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64PFR0_EL1,
163 | ID_AA64PFR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64PFR1_EL1,
164 | ID_AA64DFR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64DFR0_EL1,
165 | ID_AA64DFR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64DFR1_EL1,
166 | ID_AA64ISAR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64ISAR0_EL1,
167 | ID_AA64ISAR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64ISAR1_EL1,
168 | ID_AA64MMFR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64MMFR0_EL1,
169 | ID_AA64MMFR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64MMFR1_EL1,
170 | ID_AA64MMFR2_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ID_AA64MMFR2_EL1,
171 | SCTLR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_SCTLR_EL1,
172 | CPACR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_CPACR_EL1,
173 | TTBR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_TTBR0_EL1,
174 | TTBR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_TTBR1_EL1,
175 | TCR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_TCR_EL1,
176 | APIAKEYLO_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APIAKEYLO_EL1,
177 | APIAKEYHI_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APIAKEYHI_EL1,
178 | APIBKEYLO_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APIBKEYLO_EL1,
179 | APIBKEYHI_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APIBKEYHI_EL1,
180 | APDAKEYLO_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APDAKEYLO_EL1,
181 | APDAKEYHI_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APDAKEYHI_EL1,
182 | APDBKEYLO_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APDBKEYLO_EL1,
183 | APDBKEYHI_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APDBKEYHI_EL1,
184 | APGAKEYLO_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APGAKEYLO_EL1,
185 | APGAKEYHI_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_APGAKEYHI_EL1,
186 | SPSR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_SPSR_EL1,
187 | ELR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ELR_EL1,
188 | SP_EL0 = sys::hv_sys_reg_t_HV_SYS_REG_SP_EL0,
189 | AFSR0_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_AFSR0_EL1,
190 | AFSR1_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_AFSR1_EL1,
191 | ESR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_ESR_EL1,
192 | FAR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_FAR_EL1,
193 | PAR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_PAR_EL1,
194 | MAIR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_MAIR_EL1,
195 | AMAIR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_AMAIR_EL1,
196 | VBAR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_VBAR_EL1,
197 | CONTEXTIDR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_CONTEXTIDR_EL1,
198 | TPIDR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_TPIDR_EL1,
199 | CNTKCTL_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_CNTKCTL_EL1,
200 | CSSELR_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_CSSELR_EL1,
201 | TPIDR_EL0 = sys::hv_sys_reg_t_HV_SYS_REG_TPIDR_EL0,
202 | TPIDRRO_EL0 = sys::hv_sys_reg_t_HV_SYS_REG_TPIDRRO_EL0,
203 | CNTV_CTL_EL0 = sys::hv_sys_reg_t_HV_SYS_REG_CNTV_CTL_EL0,
204 | CNTV_CVAL_EL0 = sys::hv_sys_reg_t_HV_SYS_REG_CNTV_CVAL_EL0,
205 | SP_EL1 = sys::hv_sys_reg_t_HV_SYS_REG_SP_EL1,
206 | }
207 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/hv/src/x86/mod.rs:
--------------------------------------------------------------------------------
1 | //! x86 specific routines.
2 |
3 | use std::ffi::c_void;
4 | use std::mem;
5 | use std::sync::Arc;
6 |
7 | use crate::{call, sys, Addr, Error, GPAddr, Memory, Size, Vcpu, Vm};
8 |
9 | pub mod vmx;
10 |
11 | pub type UVAddr = Addr;
12 |
13 | /// Type of a guest address space.
14 | pub type SpaceId = sys::hv_vm_space_t;
15 |
16 | pub const VM_SPACE_DEFAULT: SpaceId = sys::HV_VM_SPACE_DEFAULT;
17 |
18 | /// The type of system capabilities.
19 | #[repr(u32)]
20 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
21 | pub enum Capability {
22 | VcpuMax = 0,
23 | AddrSpaceMax = 1,
24 | }
25 |
26 | bitflags::bitflags! {
27 | pub struct VmOptions: u64 {
28 | const DEFAULT = sys::HV_VM_DEFAULT as _;
29 | const SPECIFY_MITIGATIONS = sys::HV_VM_SPECIFY_MITIGATIONS as _;
30 | const MITIGATION_A_ENABLE = sys::HV_VM_MITIGATION_A_ENABLE as _;
31 | const MITIGATION_B_ENABLE = sys::HV_VM_MITIGATION_B_ENABLE as _;
32 | const MITIGATION_C_ENABLE = sys::HV_VM_MITIGATION_C_ENABLE as _;
33 | const MITIGATION_D_ENABLE = sys::HV_VM_MITIGATION_D_ENABLE as _;
34 | const MITIGATION_E_ENABLE = sys::HV_VM_MITIGATION_E_ENABLE as _;
35 | }
36 | }
37 |
38 | impl Default for VmOptions {
39 | fn default() -> Self {
40 | VmOptions::DEFAULT
41 | }
42 | }
43 |
44 | /// Represents an additional guest address space.
45 | #[cfg(feature = "hv_10_15")]
46 | #[derive(Debug)]
47 | pub struct Space {
48 | #[allow(dead_code)] // Keep handle alive as long as `Space` exists.
49 | vm: Arc,
50 | id: SpaceId,
51 | }
52 |
53 | #[cfg(feature = "hv_10_15")]
54 | impl Space {
55 | fn new(vm: Arc) -> Result {
56 | let mut id: SpaceId = 0;
57 | call!(sys::hv_vm_space_create(&mut id))?;
58 | Ok(Space { vm, id })
59 | }
60 |
61 | /// Returns the underlying space id.
62 | #[inline]
63 | pub fn id(&self) -> SpaceId {
64 | self.id
65 | }
66 |
67 | /// Maps a region in the virtual address space of the current task
68 | /// into a guest physical address space of the VM.
69 | ///
70 | /// # Arguments
71 | /// * `uva` - Page aligned virtual address in the current task.
72 | /// * `gpa` - Page aligned address in the guest physical address space.
73 | /// * `size` - Size in bytes of the region to be mapped.
74 | /// * `flags` - READ, WRITE and EXECUTE permissions of the region.
75 | pub fn map(&self, uva: UVAddr, gpa: GPAddr, size: u64, flags: Memory) -> Result<(), Error> {
76 | call!(sys::hv_vm_map_space(
77 | self.id,
78 | uva as *const c_void,
79 | gpa,
80 | size,
81 | flags.bits() as _
82 | ))
83 | }
84 |
85 | /// Unmaps a region in a guest physical address space of the VM.
86 | ///
87 | /// # Arguments
88 | /// * `gpa` - Page aligned address in the guest physical address space.
89 | /// * `size` - Size in bytes of the region to be unmapped.
90 | pub fn unmap(&self, gpa: GPAddr, size: Size) -> Result<(), Error> {
91 | call!(sys::hv_vm_unmap_space(self.id, gpa, size))
92 | }
93 |
94 | /// Modifies the permissions of a region in a guest physical address space of the VM.
95 | ///
96 | /// # Arguments
97 | /// * `gpa` - Page aligned address in the guest physical address space.
98 | /// * `size` - Size in bytes of the region to be modified.
99 | /// * `flags` - New READ, WRITE and EXECUTE permissions of the region.
100 | pub fn protect(&self, gpa: GPAddr, size: Size, flags: Memory) -> Result<(), Error> {
101 | call!(sys::hv_vm_protect_space(
102 | self.id,
103 | gpa,
104 | size,
105 | flags.bits() as _
106 | ))
107 | }
108 | }
109 |
110 | #[cfg(feature = "hv_10_15")]
111 | impl Drop for Space {
112 | fn drop(&mut self) {
113 | call!(sys::hv_vm_space_destroy(self.id)).unwrap()
114 | }
115 | }
116 |
117 | pub trait VmExt {
118 | /// Gets the value of capabilities of the system.
119 | fn capability(&self, cap: Capability) -> Result;
120 |
121 | /// Creates an additional guest address space for the current task.
122 | #[cfg(feature = "hv_10_15")]
123 | fn create_space(self: Arc) -> Result;
124 |
125 | /// Synchronizes guest TSC across all vCPUs.
126 | fn sync_tsc(tcs: u64) -> Result<(), Error>;
127 | }
128 |
129 | /// x86 specific routines for vCPU.
130 | pub trait VcpuExt {
131 | /// Executes a vCPU until the given deadline.
132 | #[cfg(feature = "hv_10_15")]
133 | fn run_until(&self, deadline: u64) -> Result<(), Error>;
134 |
135 | /// Forces flushing of cached vCPU state.
136 | fn flush(&self) -> Result<(), Error>;
137 |
138 | /// Invalidates the TLB of a vCPU.
139 | fn invalidate_tlb(&self) -> Result<(), Error>;
140 |
141 | /// Associates the vCPU instance with an allocated address space.
142 | #[cfg(feature = "hv_10_15")]
143 | fn set_space(&self, space: &Space) -> Result<(), Error>;
144 |
145 | /// Forces an immediate VMEXIT of the vCPU.
146 | fn interrupt(&self) -> Result<(), Error>;
147 |
148 | /// Enables an MSR to be used natively by the VM.
149 | fn enable_native_msr(&self, msr: u32, enable: bool) -> Result<(), Error>;
150 |
151 | /// Returns the current value of an MSR of a vCPU.
152 | fn read_msr(&self, msr: u32) -> Result;
153 |
154 | /// Set the value of an MSR of a vCPU.
155 | fn write_msr(&self, msr: u32, value: u64) -> Result<(), Error>;
156 |
157 | /// Returns the current value of an architectural x86 register of a vCPU.
158 | fn read_register(&self, reg: Reg) -> Result;
159 |
160 | /// Set the value of an architectural x86 register of a vCPU.
161 | fn write_register(&self, reg: Reg, value: u64) -> Result<(), Error>;
162 |
163 | /// Returns the current architectural x86 floating point and SIMD state of a vCPU.
164 | /// Structure and size are defined by the XSAVE feature set of the host processor.
165 | fn read_fpstate(&self, buffer: &mut [u8]) -> Result<(), Error>;
166 |
167 | /// Sets the architectural x86 floating point and SIMD state of a vCPU.
168 | fn write_fpstate(&self, buffer: &[u8]) -> Result<(), Error>;
169 | }
170 |
171 | impl VmExt for Vm {
172 | /// Gets the value of capabilities of the system.
173 | fn capability(&self, cap: Capability) -> Result {
174 | let mut out = 0_u64;
175 | call!(sys::hv_capability(cap as u64, &mut out))?;
176 | Ok(out)
177 | }
178 |
179 | /// Creates an additional guest address space for the current task.
180 | #[cfg(feature = "hv_10_15")]
181 | fn create_space(self: Arc) -> Result {
182 | Space::new(Arc::clone(&self))
183 | }
184 |
185 | /// Synchronizes guest TSC across all vCPUs.
186 | fn sync_tsc(tcs: u64) -> Result<(), Error> {
187 | call!(sys::hv_vm_sync_tsc(tcs))
188 | }
189 | }
190 |
191 | impl VcpuExt for Vcpu {
192 | /// Executes a vCPU until the given deadline.
193 | #[cfg(feature = "hv_10_15")]
194 | fn run_until(&self, deadline: u64) -> Result<(), Error> {
195 | call!(sys::hv_vcpu_run_until(self.id, deadline))
196 | }
197 |
198 | /// Forces flushing of cached vCPU state.
199 | fn flush(&self) -> Result<(), Error> {
200 | call!(sys::hv_vcpu_flush(self.id))
201 | }
202 |
203 | /// Invalidates the TLB of a vCPU.
204 | fn invalidate_tlb(&self) -> Result<(), Error> {
205 | call!(sys::hv_vcpu_invalidate_tlb(self.id))
206 | }
207 |
208 | /// Associates the vCPU instance with an allocated address space.
209 | #[cfg(feature = "hv_10_15")]
210 | fn set_space(&self, space: &Space) -> Result<(), Error> {
211 | call!(sys::hv_vcpu_set_space(self.id, space.id()))
212 | }
213 |
214 | /// Forces an immediate VMEXIT of the vCPU.
215 | fn interrupt(&self) -> Result<(), Error> {
216 | call!(sys::hv_vcpu_interrupt(mem::transmute(&self.id), 1))
217 | }
218 |
219 | /// Enables an MSR to be used natively by the VM.
220 | fn enable_native_msr(&self, msr: u32, enable: bool) -> Result<(), Error> {
221 | call!(sys::hv_vcpu_enable_native_msr(self.id, msr, enable))
222 | }
223 |
224 | /// Returns the current value of an MSR of a vCPU.
225 | fn read_msr(&self, msr: u32) -> Result {
226 | let mut value = 0_u64;
227 | call!(sys::hv_vcpu_read_msr(self.id, msr, &mut value))?;
228 | Ok(value)
229 | }
230 |
231 | /// Set the value of an MSR of a vCPU.
232 | fn write_msr(&self, msr: u32, value: u64) -> Result<(), Error> {
233 | call!(sys::hv_vcpu_write_msr(self.id, msr, value))
234 | }
235 |
236 | /// Returns the current value of an architectural x86 register of a vCPU.
237 | fn read_register(&self, reg: Reg) -> Result {
238 | let mut value = 0_u64;
239 | call!(sys::hv_vcpu_read_register(
240 | self.id,
241 | reg as sys::hv_x86_reg_t,
242 | &mut value
243 | ))?;
244 | Ok(value)
245 | }
246 |
247 | /// Set the value of an architectural x86 register of a vCPU.
248 | fn write_register(&self, reg: Reg, value: u64) -> Result<(), Error> {
249 | call!(sys::hv_vcpu_write_register(
250 | self.id,
251 | reg as sys::hv_x86_reg_t,
252 | value
253 | ))
254 | }
255 |
256 | /// Returns the current architectural x86 floating point and SIMD state of a vCPU.
257 | /// Structure and size are defined by the XSAVE feature set of the host processor.
258 | fn read_fpstate(&self, buffer: &mut [u8]) -> Result<(), Error> {
259 | call!(sys::hv_vcpu_read_fpstate(
260 | self.id,
261 | buffer.as_mut_ptr() as *mut c_void,
262 | buffer.len() as u64
263 | ))
264 | }
265 |
266 | /// Sets the architectural x86 floating point and SIMD state of a vCPU.
267 | fn write_fpstate(&self, buffer: &[u8]) -> Result<(), Error> {
268 | call!(sys::hv_vcpu_write_fpstate(
269 | self.id,
270 | buffer.as_ptr() as *mut c_void,
271 | buffer.len() as u64
272 | ))
273 | }
274 | }
275 |
276 | /// x86 architecture register IDs.
277 | #[allow(non_camel_case_types)]
278 | #[non_exhaustive]
279 | #[repr(u32)]
280 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
281 | pub enum Reg {
282 | RIP = sys::hv_x86_reg_t_HV_X86_RIP,
283 | RFLAGS = sys::hv_x86_reg_t_HV_X86_RFLAGS,
284 | RAX = sys::hv_x86_reg_t_HV_X86_RAX,
285 | RCX = sys::hv_x86_reg_t_HV_X86_RCX,
286 | RDX = sys::hv_x86_reg_t_HV_X86_RDX,
287 | RBX = sys::hv_x86_reg_t_HV_X86_RBX,
288 | RSI = sys::hv_x86_reg_t_HV_X86_RSI,
289 | RDI = sys::hv_x86_reg_t_HV_X86_RDI,
290 | RSP = sys::hv_x86_reg_t_HV_X86_RSP,
291 | RBP = sys::hv_x86_reg_t_HV_X86_RBP,
292 | R8 = sys::hv_x86_reg_t_HV_X86_R8,
293 | R9 = sys::hv_x86_reg_t_HV_X86_R9,
294 | R10 = sys::hv_x86_reg_t_HV_X86_R10,
295 | R11 = sys::hv_x86_reg_t_HV_X86_R11,
296 | R12 = sys::hv_x86_reg_t_HV_X86_R12,
297 | R13 = sys::hv_x86_reg_t_HV_X86_R13,
298 | R14 = sys::hv_x86_reg_t_HV_X86_R14,
299 | R15 = sys::hv_x86_reg_t_HV_X86_R15,
300 | CS = sys::hv_x86_reg_t_HV_X86_CS,
301 | SS = sys::hv_x86_reg_t_HV_X86_SS,
302 | DS = sys::hv_x86_reg_t_HV_X86_DS,
303 | ES = sys::hv_x86_reg_t_HV_X86_ES,
304 | FS = sys::hv_x86_reg_t_HV_X86_FS,
305 | GS = sys::hv_x86_reg_t_HV_X86_GS,
306 | IDT_BASE = sys::hv_x86_reg_t_HV_X86_IDT_BASE,
307 | IDT_LIMIT = sys::hv_x86_reg_t_HV_X86_IDT_LIMIT,
308 | GDT_BASE = sys::hv_x86_reg_t_HV_X86_GDT_BASE,
309 | GDT_LIMIT = sys::hv_x86_reg_t_HV_X86_GDT_LIMIT,
310 | LDTR = sys::hv_x86_reg_t_HV_X86_LDTR,
311 | LDT_BASE = sys::hv_x86_reg_t_HV_X86_LDT_BASE,
312 | LDT_LIMIT = sys::hv_x86_reg_t_HV_X86_LDT_LIMIT,
313 | LDT_AR = sys::hv_x86_reg_t_HV_X86_LDT_AR,
314 | TR = sys::hv_x86_reg_t_HV_X86_TR,
315 | TSS_BASE = sys::hv_x86_reg_t_HV_X86_TSS_BASE,
316 | TSS_LIMIT = sys::hv_x86_reg_t_HV_X86_TSS_LIMIT,
317 | TSS_AR = sys::hv_x86_reg_t_HV_X86_TSS_AR,
318 | CR0 = sys::hv_x86_reg_t_HV_X86_CR0,
319 | CR1 = sys::hv_x86_reg_t_HV_X86_CR1,
320 | CR2 = sys::hv_x86_reg_t_HV_X86_CR2,
321 | CR3 = sys::hv_x86_reg_t_HV_X86_CR3,
322 | CR4 = sys::hv_x86_reg_t_HV_X86_CR4,
323 | DR0 = sys::hv_x86_reg_t_HV_X86_DR0,
324 | DR1 = sys::hv_x86_reg_t_HV_X86_DR1,
325 | DR2 = sys::hv_x86_reg_t_HV_X86_DR2,
326 | DR3 = sys::hv_x86_reg_t_HV_X86_DR3,
327 | DR4 = sys::hv_x86_reg_t_HV_X86_DR4,
328 | DR5 = sys::hv_x86_reg_t_HV_X86_DR5,
329 | DR6 = sys::hv_x86_reg_t_HV_X86_DR6,
330 | DR7 = sys::hv_x86_reg_t_HV_X86_DR7,
331 | TPR = sys::hv_x86_reg_t_HV_X86_TPR,
332 | XCR0 = sys::hv_x86_reg_t_HV_X86_XCR0,
333 | MAX = sys::hv_x86_reg_t_HV_X86_REGISTERS_MAX,
334 | }
335 |
--------------------------------------------------------------------------------
/hv/src/x86/vmx.rs:
--------------------------------------------------------------------------------
1 | //! VMX extensions.
2 |
3 | use crate::{call, sys, Error, Vcpu};
4 |
5 | /// Enum type of VMX cabability fields
6 | #[repr(u32)]
7 | #[non_exhaustive]
8 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
9 | pub enum Capability {
10 | /// Pin-based VMX capabilities.
11 | PinBased = sys::hv_vmx_capability_t_HV_VMX_CAP_PINBASED,
12 | /// Primary proc-based VMX capabilities.
13 | ProcBased = sys::hv_vmx_capability_t_HV_VMX_CAP_PROCBASED,
14 | /// Second proc-based VMX capabilities.
15 | ProcBased2 = sys::hv_vmx_capability_t_HV_VMX_CAP_PROCBASED2,
16 | /// VM-entry VMX capabilities.
17 | Entry = sys::hv_vmx_capability_t_HV_VMX_CAP_ENTRY,
18 | /// VM-exit VMX capabilities.
19 | Exit = sys::hv_vmx_capability_t_HV_VMX_CAP_EXIT,
20 | /// VMX preemption timer frequency.
21 | PreemptionTimer = sys::hv_vmx_capability_t_HV_VMX_CAP_PREEMPTION_TIMER,
22 | }
23 |
24 | /// Returns the VMX capabilities of the host processor.
25 | pub fn read_capability(field: Capability) -> Result {
26 | let mut out = 0_u64;
27 | call!(sys::hv_vmx_read_capability(field as u32, &mut out))?;
28 | Ok(out)
29 | }
30 |
31 | bitflags::bitflags! {
32 | #[cfg(feature = "hv_10_15")]
33 | pub struct ShadowFlags: u32 {
34 | const NONE = sys::HV_SHADOW_VMCS_NONE;
35 | const READ = sys::HV_SHADOW_VMCS_READ;
36 | const WRITE = sys::HV_SHADOW_VMCS_WRITE;
37 | }
38 | }
39 |
40 | pub trait VCpuVmxExt {
41 | /// Returns the current value of a VMCS field of a vCPU.
42 | fn read_vmcs(&self, field: Vmcs) -> Result;
43 |
44 | /// Set the value of a VMCS field of a vCPU.
45 | fn write_vmcs(&self, field: Vmcs, value: u64) -> Result<(), Error>;
46 |
47 | /// Returns the current value of a shadow VMCS field of a vCPU.
48 | #[cfg(feature = "hv_10_15")]
49 | fn read_shadow_vmcs(&self, field: Vmcs) -> Result;
50 |
51 | /// Set the value of a shadow VMCS field of a vCPU.
52 | #[cfg(feature = "hv_10_15")]
53 | fn write_shadow_vmcs(&self, field: Vmcs, value: u64) -> Result<(), Error>;
54 |
55 | /// Set the access permissions of a shadow VMCS field of a vCPU.
56 | #[cfg(feature = "hv_10_15")]
57 | fn set_shadow_access(&self, field: Vmcs, flags: ShadowFlags) -> Result<(), Error>;
58 | }
59 |
60 | impl VCpuVmxExt for Vcpu {
61 | /// Returns the current value of a VMCS field of a vCPU.
62 | fn read_vmcs(&self, field: Vmcs) -> Result {
63 | let mut out = 0_u64;
64 | call!(sys::hv_vmx_vcpu_read_vmcs(self.id, field as u32, &mut out))?;
65 | Ok(out)
66 | }
67 |
68 | /// Set the value of a VMCS field of a vCPU.
69 | fn write_vmcs(&self, field: Vmcs, value: u64) -> Result<(), Error> {
70 | call!(sys::hv_vmx_vcpu_write_vmcs(self.id, field as u32, value))
71 | }
72 |
73 | /// Returns the current value of a shadow VMCS field of a vCPU.
74 | #[cfg(feature = "hv_10_15")]
75 | fn read_shadow_vmcs(&self, field: Vmcs) -> Result {
76 | let mut out = 0_u64;
77 | call!(sys::hv_vmx_vcpu_read_shadow_vmcs(
78 | self.id,
79 | field as u32,
80 | &mut out
81 | ))?;
82 | Ok(out)
83 | }
84 |
85 | /// Set the value of a shadow VMCS field of a vCPU.
86 | #[cfg(feature = "hv_10_15")]
87 | fn write_shadow_vmcs(&self, field: Vmcs, value: u64) -> Result<(), Error> {
88 | call!(sys::hv_vmx_vcpu_write_shadow_vmcs(
89 | self.id,
90 | field as u32,
91 | value
92 | ))
93 | }
94 |
95 | /// Set the access permissions of a shadow VMCS field of a vCPU.
96 | #[cfg(feature = "hv_10_15")]
97 | fn set_shadow_access(&self, field: Vmcs, flags: ShadowFlags) -> Result<(), Error> {
98 | call!(sys::hv_vmx_vcpu_set_shadow_access(
99 | self.id,
100 | field as u32,
101 | flags.bits() as u64
102 | ))
103 | }
104 | }
105 |
106 | /// Virtual Machine Control Structure (VMCS) Field IDs.
107 | /// Identify the fields of the virtual machine control structure.
108 | #[allow(non_camel_case_types)]
109 | #[non_exhaustive]
110 | #[repr(u32)]
111 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
112 | pub enum Vmcs {
113 | VPID = sys::VMCS_VPID,
114 | CTRL_POSTED_INT_N_VECTOR = sys::VMCS_CTRL_POSTED_INT_N_VECTOR,
115 | CTRL_EPTP_INDEX = sys::VMCS_CTRL_EPTP_INDEX,
116 | GUEST_ES = sys::VMCS_GUEST_ES,
117 | GUEST_CS = sys::VMCS_GUEST_CS,
118 | GUEST_SS = sys::VMCS_GUEST_SS,
119 | GUEST_DS = sys::VMCS_GUEST_DS,
120 | GUEST_FS = sys::VMCS_GUEST_FS,
121 | GUEST_GS = sys::VMCS_GUEST_GS,
122 | GUEST_LDTR = sys::VMCS_GUEST_LDTR,
123 | GUEST_TR = sys::VMCS_GUEST_TR,
124 | GUEST_INT_STATUS = sys::VMCS_GUEST_INT_STATUS,
125 | GUESTPML_INDEX = sys::VMCS_GUESTPML_INDEX,
126 | HOST_ES = sys::VMCS_HOST_ES,
127 | HOST_CS = sys::VMCS_HOST_CS,
128 | HOST_SS = sys::VMCS_HOST_SS,
129 | HOST_DS = sys::VMCS_HOST_DS,
130 | HOST_FS = sys::VMCS_HOST_FS,
131 | HOST_GS = sys::VMCS_HOST_GS,
132 | HOST_TR = sys::VMCS_HOST_TR,
133 | CTRL_IO_BITMAP_A = sys::VMCS_CTRL_IO_BITMAP_A,
134 | CTRL_IO_BITMAP_B = sys::VMCS_CTRL_IO_BITMAP_B,
135 | CTRL_MSR_BITMAPS = sys::VMCS_CTRL_MSR_BITMAPS,
136 | CTRL_VMEXIT_MSR_STORE_ADDR = sys::VMCS_CTRL_VMEXIT_MSR_STORE_ADDR,
137 | CTRL_VMEXIT_MSR_LOAD_ADDR = sys::VMCS_CTRL_VMEXIT_MSR_LOAD_ADDR,
138 | CTRL_VMENTRY_MSR_LOAD_ADDR = sys::VMCS_CTRL_VMENTRY_MSR_LOAD_ADDR,
139 | CTRL_EXECUTIVE_VMCS_PTR = sys::VMCS_CTRL_EXECUTIVE_VMCS_PTR,
140 | CTRL_PML_ADDR = sys::VMCS_CTRL_PML_ADDR,
141 | CTRL_TSC_OFFSET = sys::VMCS_CTRL_TSC_OFFSET,
142 | CTRL_VIRTUAL_APIC = sys::VMCS_CTRL_VIRTUAL_APIC,
143 | CTRL_APIC_ACCESS = sys::VMCS_CTRL_APIC_ACCESS,
144 | CTRL_POSTED_INT_DESC_ADDR = sys::VMCS_CTRL_POSTED_INT_DESC_ADDR,
145 | CTRL_VMFUNC_CTRL = sys::VMCS_CTRL_VMFUNC_CTRL,
146 | CTRL_EPTP = sys::VMCS_CTRL_EPTP,
147 | CTRL_EOI_EXIT_BITMAP_0 = sys::VMCS_CTRL_EOI_EXIT_BITMAP_0,
148 | CTRL_EOI_EXIT_BITMAP_1 = sys::VMCS_CTRL_EOI_EXIT_BITMAP_1,
149 | CTRL_EOI_EXIT_BITMAP_2 = sys::VMCS_CTRL_EOI_EXIT_BITMAP_2,
150 | CTRL_EOI_EXIT_BITMAP_3 = sys::VMCS_CTRL_EOI_EXIT_BITMAP_3,
151 | CTRL_EPTP_LIST_ADDR = sys::VMCS_CTRL_EPTP_LIST_ADDR,
152 | CTRL_VMREAD_BITMAP_ADDR = sys::VMCS_CTRL_VMREAD_BITMAP_ADDR,
153 | CTRL_VMWRITE_BITMAP_ADDR = sys::VMCS_CTRL_VMWRITE_BITMAP_ADDR,
154 | CTRL_VIRT_EXC_INFO_ADDR = sys::VMCS_CTRL_VIRT_EXC_INFO_ADDR,
155 | CTRL_XSS_EXITING_BITMAP = sys::VMCS_CTRL_XSS_EXITING_BITMAP,
156 | CTRL_ENCLS_EXITING_BITMAP = sys::VMCS_CTRL_ENCLS_EXITING_BITMAP,
157 | CTRL_TSC_MULTIPLIER = sys::VMCS_CTRL_TSC_MULTIPLIER,
158 | GUEST_PHYSICAL_ADDRESS = sys::VMCS_GUEST_PHYSICAL_ADDRESS,
159 | GUEST_LINK_POINTER = sys::VMCS_GUEST_LINK_POINTER,
160 | GUEST_IA32_DEBUGCTL = sys::VMCS_GUEST_IA32_DEBUGCTL,
161 | GUEST_IA32_PAT = sys::VMCS_GUEST_IA32_PAT,
162 | GUEST_IA32_EFER = sys::VMCS_GUEST_IA32_EFER,
163 | GUEST_IA32_PERF_GLOBAL_CTRL = sys::VMCS_GUEST_IA32_PERF_GLOBAL_CTRL,
164 | GUEST_PDPTE0 = sys::VMCS_GUEST_PDPTE0,
165 | GUEST_PDPTE1 = sys::VMCS_GUEST_PDPTE1,
166 | GUEST_PDPTE2 = sys::VMCS_GUEST_PDPTE2,
167 | GUEST_PDPTE3 = sys::VMCS_GUEST_PDPTE3,
168 | GUEST_IA32_BNDCFGS = sys::VMCS_GUEST_IA32_BNDCFGS,
169 | HOST_IA32_PAT = sys::VMCS_HOST_IA32_PAT,
170 | HOST_IA32_EFER = sys::VMCS_HOST_IA32_EFER,
171 | HOST_IA32_PERF_GLOBAL_CTRL = sys::VMCS_HOST_IA32_PERF_GLOBAL_CTRL,
172 | CTRL_PIN_BASED = sys::VMCS_CTRL_PIN_BASED,
173 | CTRL_CPU_BASED = sys::VMCS_CTRL_CPU_BASED,
174 | CTRL_EXC_BITMAP = sys::VMCS_CTRL_EXC_BITMAP,
175 | CTRL_PF_ERROR_MASK = sys::VMCS_CTRL_PF_ERROR_MASK,
176 | CTRL_PF_ERROR_MATCH = sys::VMCS_CTRL_PF_ERROR_MATCH,
177 | CTRL_CR3_COUNT = sys::VMCS_CTRL_CR3_COUNT,
178 | CTRL_VMEXIT_CONTROLS = sys::VMCS_CTRL_VMEXIT_CONTROLS,
179 | CTRL_VMEXIT_MSR_STORE_COUNT = sys::VMCS_CTRL_VMEXIT_MSR_STORE_COUNT,
180 | CTRL_VMEXIT_MSR_LOAD_COUNT = sys::VMCS_CTRL_VMEXIT_MSR_LOAD_COUNT,
181 | CTRL_VMENTRY_CONTROLS = sys::VMCS_CTRL_VMENTRY_CONTROLS,
182 | CTRL_VMENTRY_MSR_LOAD_COUNT = sys::VMCS_CTRL_VMENTRY_MSR_LOAD_COUNT,
183 | CTRL_VMENTRY_IRQ_INFO = sys::VMCS_CTRL_VMENTRY_IRQ_INFO,
184 | CTRL_VMENTRY_EXC_ERROR = sys::VMCS_CTRL_VMENTRY_EXC_ERROR,
185 | CTRL_VMENTRY_INSTR_LEN = sys::VMCS_CTRL_VMENTRY_INSTR_LEN,
186 | CTRL_TPR_THRESHOLD = sys::VMCS_CTRL_TPR_THRESHOLD,
187 | CTRL_CPU_BASED2 = sys::VMCS_CTRL_CPU_BASED2,
188 | CTRL_PLE_GAP = sys::VMCS_CTRL_PLE_GAP,
189 | CTRL_PLE_WINDOW = sys::VMCS_CTRL_PLE_WINDOW,
190 | RO_INSTR_ERROR = sys::VMCS_RO_INSTR_ERROR,
191 | RO_EXIT_REASON = sys::VMCS_RO_EXIT_REASON,
192 | RO_VMEXIT_IRQ_INFO = sys::VMCS_RO_VMEXIT_IRQ_INFO,
193 | RO_VMEXIT_IRQ_ERROR = sys::VMCS_RO_VMEXIT_IRQ_ERROR,
194 | RO_IDT_VECTOR_INFO = sys::VMCS_RO_IDT_VECTOR_INFO,
195 | RO_IDT_VECTOR_ERROR = sys::VMCS_RO_IDT_VECTOR_ERROR,
196 | RO_VMEXIT_INSTR_LEN = sys::VMCS_RO_VMEXIT_INSTR_LEN,
197 | RO_VMX_INSTR_INFO = sys::VMCS_RO_VMX_INSTR_INFO,
198 | GUEST_ES_LIMIT = sys::VMCS_GUEST_ES_LIMIT,
199 | GUEST_CS_LIMIT = sys::VMCS_GUEST_CS_LIMIT,
200 | GUEST_SS_LIMIT = sys::VMCS_GUEST_SS_LIMIT,
201 | GUEST_DS_LIMIT = sys::VMCS_GUEST_DS_LIMIT,
202 | GUEST_FS_LIMIT = sys::VMCS_GUEST_FS_LIMIT,
203 | GUEST_GS_LIMIT = sys::VMCS_GUEST_GS_LIMIT,
204 | GUEST_LDTR_LIMIT = sys::VMCS_GUEST_LDTR_LIMIT,
205 | GUEST_TR_LIMIT = sys::VMCS_GUEST_TR_LIMIT,
206 | GUEST_GDTR_LIMIT = sys::VMCS_GUEST_GDTR_LIMIT,
207 | GUEST_IDTR_LIMIT = sys::VMCS_GUEST_IDTR_LIMIT,
208 | GUEST_ES_AR = sys::VMCS_GUEST_ES_AR,
209 | GUEST_CS_AR = sys::VMCS_GUEST_CS_AR,
210 | GUEST_SS_AR = sys::VMCS_GUEST_SS_AR,
211 | GUEST_DS_AR = sys::VMCS_GUEST_DS_AR,
212 | GUEST_FS_AR = sys::VMCS_GUEST_FS_AR,
213 | GUEST_GS_AR = sys::VMCS_GUEST_GS_AR,
214 | GUEST_LDTR_AR = sys::VMCS_GUEST_LDTR_AR,
215 | GUEST_TR_AR = sys::VMCS_GUEST_TR_AR,
216 | GUEST_IGNORE_IRQ = sys::VMCS_GUEST_IGNORE_IRQ,
217 | GUEST_ACTIVITY_STATE = sys::VMCS_GUEST_ACTIVITY_STATE,
218 | GUEST_SMBASE = sys::VMCS_GUEST_SMBASE,
219 | GUEST_IA32_SYSENTER_CS = sys::VMCS_GUEST_IA32_SYSENTER_CS,
220 | GUEST_VMX_TIMER_VALUE = sys::VMCS_GUEST_VMX_TIMER_VALUE,
221 | HOST_IA32_SYSENTER_CS = sys::VMCS_HOST_IA32_SYSENTER_CS,
222 | CTRL_CR0_MASK = sys::VMCS_CTRL_CR0_MASK,
223 | CTRL_CR4_MASK = sys::VMCS_CTRL_CR4_MASK,
224 | CTRL_CR0_SHADOW = sys::VMCS_CTRL_CR0_SHADOW,
225 | CTRL_CR4_SHADOW = sys::VMCS_CTRL_CR4_SHADOW,
226 | CTRL_CR3_VALUE0 = sys::VMCS_CTRL_CR3_VALUE0,
227 | CTRL_CR3_VALUE1 = sys::VMCS_CTRL_CR3_VALUE1,
228 | CTRL_CR3_VALUE2 = sys::VMCS_CTRL_CR3_VALUE2,
229 | CTRL_CR3_VALUE3 = sys::VMCS_CTRL_CR3_VALUE3,
230 | RO_EXIT_QUALIFIC = sys::VMCS_RO_EXIT_QUALIFIC,
231 | RO_IO_RCX = sys::VMCS_RO_IO_RCX,
232 | RO_IO_RSI = sys::VMCS_RO_IO_RSI,
233 | RO_IO_RDI = sys::VMCS_RO_IO_RDI,
234 | RO_IO_RIP = sys::VMCS_RO_IO_RIP,
235 | RO_GUEST_LIN_ADDR = sys::VMCS_RO_GUEST_LIN_ADDR,
236 | GUEST_CR0 = sys::VMCS_GUEST_CR0,
237 | GUEST_CR3 = sys::VMCS_GUEST_CR3,
238 | GUEST_CR4 = sys::VMCS_GUEST_CR4,
239 | GUEST_ES_BASE = sys::VMCS_GUEST_ES_BASE,
240 | GUEST_CS_BASE = sys::VMCS_GUEST_CS_BASE,
241 | GUEST_SS_BASE = sys::VMCS_GUEST_SS_BASE,
242 | GUEST_DS_BASE = sys::VMCS_GUEST_DS_BASE,
243 | GUEST_FS_BASE = sys::VMCS_GUEST_FS_BASE,
244 | GUEST_GS_BASE = sys::VMCS_GUEST_GS_BASE,
245 | GUEST_LDTR_BASE = sys::VMCS_GUEST_LDTR_BASE,
246 | GUEST_TR_BASE = sys::VMCS_GUEST_TR_BASE,
247 | GUEST_GDTR_BASE = sys::VMCS_GUEST_GDTR_BASE,
248 | GUEST_IDTR_BASE = sys::VMCS_GUEST_IDTR_BASE,
249 | GUEST_DR7 = sys::VMCS_GUEST_DR7,
250 | GUEST_RSP = sys::VMCS_GUEST_RSP,
251 | GUEST_RIP = sys::VMCS_GUEST_RIP,
252 | GUEST_RFLAGS = sys::VMCS_GUEST_RFLAGS,
253 | GUEST_DEBUG_EXC = sys::VMCS_GUEST_DEBUG_EXC,
254 | GUEST_SYSENTER_ESP = sys::VMCS_GUEST_SYSENTER_ESP,
255 | GUEST_SYSENTER_EIP = sys::VMCS_GUEST_SYSENTER_EIP,
256 | HOST_CR0 = sys::VMCS_HOST_CR0,
257 | HOST_CR3 = sys::VMCS_HOST_CR3,
258 | HOST_CR4 = sys::VMCS_HOST_CR4,
259 | HOST_FS_BASE = sys::VMCS_HOST_FS_BASE,
260 | HOST_GS_BASE = sys::VMCS_HOST_GS_BASE,
261 | HOST_TR_BASE = sys::VMCS_HOST_TR_BASE,
262 | HOST_GDTR_BASE = sys::VMCS_HOST_GDTR_BASE,
263 | HOST_IDTR_BASE = sys::VMCS_HOST_IDTR_BASE,
264 | HOST_IA32_SYSENTER_ESP = sys::VMCS_HOST_IA32_SYSENTER_ESP,
265 | HOST_IA32_SYSENTER_EIP = sys::VMCS_HOST_IA32_SYSENTER_EIP,
266 | HOST_RSP = sys::VMCS_HOST_RSP,
267 | HOST_RIP = sys::VMCS_HOST_RIP,
268 | MAX = sys::VMCS_MAX,
269 | }
270 |
271 | #[allow(non_camel_case_types)]
272 | #[non_exhaustive]
273 | #[repr(u32)]
274 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
275 | pub enum Reason {
276 | EXC_NMI = sys::VMX_REASON_EXC_NMI,
277 | IRQ = sys::VMX_REASON_IRQ,
278 | TRIPLE_FAULT = sys::VMX_REASON_TRIPLE_FAULT,
279 | INIT = sys::VMX_REASON_INIT,
280 | SIPI = sys::VMX_REASON_SIPI,
281 | IO_SMI = sys::VMX_REASON_IO_SMI,
282 | OTHER_SMI = sys::VMX_REASON_OTHER_SMI,
283 | IRQ_WND = sys::VMX_REASON_IRQ_WND,
284 | VIRTUAL_NMI_WND = sys::VMX_REASON_VIRTUAL_NMI_WND,
285 | TASK = sys::VMX_REASON_TASK,
286 | CPUID = sys::VMX_REASON_CPUID,
287 | GETSEC = sys::VMX_REASON_GETSEC,
288 | HLT = sys::VMX_REASON_HLT,
289 | INVD = sys::VMX_REASON_INVD,
290 | INVLPG = sys::VMX_REASON_INVLPG,
291 | RDPMC = sys::VMX_REASON_RDPMC,
292 | RDTSC = sys::VMX_REASON_RDTSC,
293 | RSM = sys::VMX_REASON_RSM,
294 | VMCALL = sys::VMX_REASON_VMCALL,
295 | VMCLEAR = sys::VMX_REASON_VMCLEAR,
296 | VMLAUNCH = sys::VMX_REASON_VMLAUNCH,
297 | VMPTRLD = sys::VMX_REASON_VMPTRLD,
298 | VMPTRST = sys::VMX_REASON_VMPTRST,
299 | VMREAD = sys::VMX_REASON_VMREAD,
300 | VMRESUME = sys::VMX_REASON_VMRESUME,
301 | VMWRITE = sys::VMX_REASON_VMWRITE,
302 | VMOFF = sys::VMX_REASON_VMOFF,
303 | VMON = sys::VMX_REASON_VMON,
304 | MOV_CR = sys::VMX_REASON_MOV_CR,
305 | MOV_DR = sys::VMX_REASON_MOV_DR,
306 | IO = sys::VMX_REASON_IO,
307 | RDMSR = sys::VMX_REASON_RDMSR,
308 | WRMSR = sys::VMX_REASON_WRMSR,
309 | VMENTRY_GUEST = sys::VMX_REASON_VMENTRY_GUEST,
310 | VMENTRY_MSR = sys::VMX_REASON_VMENTRY_MSR,
311 | MWAIT = sys::VMX_REASON_MWAIT,
312 | MTF = sys::VMX_REASON_MTF,
313 | MONITOR = sys::VMX_REASON_MONITOR,
314 | PAUSE = sys::VMX_REASON_PAUSE,
315 | VMENTRY_MC = sys::VMX_REASON_VMENTRY_MC,
316 | TPR_THRESHOLD = sys::VMX_REASON_TPR_THRESHOLD,
317 | APIC_ACCESS = sys::VMX_REASON_APIC_ACCESS,
318 | VIRTUALIZED_EOI = sys::VMX_REASON_VIRTUALIZED_EOI,
319 | GDTR_IDTR = sys::VMX_REASON_GDTR_IDTR,
320 | LDTR_TR = sys::VMX_REASON_LDTR_TR,
321 | EPT_VIOLATION = sys::VMX_REASON_EPT_VIOLATION,
322 | EPT_MISCONFIG = sys::VMX_REASON_EPT_MISCONFIG,
323 | EPT_INVEPT = sys::VMX_REASON_EPT_INVEPT,
324 | RDTSCP = sys::VMX_REASON_RDTSCP,
325 | VMX_TIMER_EXPIRED = sys::VMX_REASON_VMX_TIMER_EXPIRED,
326 | INVVPID = sys::VMX_REASON_INVVPID,
327 | WBINVD = sys::VMX_REASON_WBINVD,
328 | XSETBV = sys::VMX_REASON_XSETBV,
329 | APIC_WRITE = sys::VMX_REASON_APIC_WRITE,
330 | RDRAND = sys::VMX_REASON_RDRAND,
331 | INVPCID = sys::VMX_REASON_INVPCID,
332 | VMFUNC = sys::VMX_REASON_VMFUNC,
333 | RDSEED = sys::VMX_REASON_RDSEED,
334 | XSAVES = sys::VMX_REASON_XSAVES,
335 | XRSTORS = sys::VMX_REASON_XRSTORS,
336 | }
337 |
338 | #[allow(non_camel_case_types)]
339 | #[non_exhaustive]
340 | #[repr(u32)]
341 | #[derive(Debug, Copy, Clone, Eq, PartialEq)]
342 | pub enum IrqInfo {
343 | EXT_IRQ = sys::IRQ_INFO_EXT_IRQ,
344 | NMI = sys::IRQ_INFO_NMI,
345 | HARD_EXC = sys::IRQ_INFO_HARD_EXC,
346 | SOFT_IRQ = sys::IRQ_INFO_SOFT_IRQ,
347 | PRIV_SOFT_EXC = sys::IRQ_INFO_PRIV_SOFT_EXC,
348 | SOFT_EXC = sys::IRQ_INFO_SOFT_EXC,
349 | ERROR_VALID = sys::IRQ_INFO_ERROR_VALID,
350 | VALID = sys::IRQ_INFO_VALID,
351 | }
352 |
--------------------------------------------------------------------------------