├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── Cargo.toml
├── README.md
└── src
├── context_frame.rs
├── exception.S
├── exception.rs
├── exception_utils.rs
├── lib.rs
├── pcpu.rs
├── smc.rs
└── vcpu.rs
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | ci:
7 | runs-on: ubuntu-latest
8 | strategy:
9 | fail-fast: false
10 | matrix:
11 | rust-toolchain: [nightly-2024-12-25, nightly]
12 | targets: [aarch64-unknown-none-softfloat]
13 | steps:
14 | - uses: actions/checkout@v4
15 | - uses: dtolnay/rust-toolchain@nightly
16 | with:
17 | toolchain: ${{ matrix.rust-toolchain }}
18 | components: rust-src, clippy, rustfmt
19 | targets: ${{ matrix.targets }}
20 | - name: Check rust version
21 | run: rustc --version --verbose
22 | - name: Check code format
23 | run: cargo fmt --all -- --check
24 | - name: Clippy
25 | continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }}
26 | run: cargo clippy --target ${{ matrix.targets }} --all-features -- -A clippy::new_without_default
27 | - name: Build
28 | continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }}
29 | run: cargo build --target ${{ matrix.targets }} --all-features
30 | - name: Unit test
31 | if: ${{ matrix.targets == 'x86_64-unknown-linux-gnu' }}
32 | run: cargo test --target ${{ matrix.targets }} -- --nocapture
33 |
34 | doc:
35 | runs-on: ubuntu-latest
36 | strategy:
37 | fail-fast: false
38 | permissions:
39 | contents: write
40 | env:
41 | default-branch: ${{ format('refs/heads/{0}', github.event.repository.default_branch) }}
42 | RUSTDOCFLAGS: -D rustdoc::broken_intra_doc_links -D missing-docs
43 | steps:
44 | - uses: actions/checkout@v4
45 | - uses: dtolnay/rust-toolchain@nightly
46 | with:
47 | toolchain: nightly-2024-12-25
48 | - name: Build docs
49 | continue-on-error: ${{ github.ref != env.default-branch && github.event_name != 'pull_request' }}
50 | run: |
51 | cargo doc --no-deps --all-features
52 | printf '' $(cargo tree | head -1 | cut -d' ' -f1) > target/doc/index.html
53 | - name: Deploy to Github Pages
54 | if: ${{ github.ref == env.default-branch }}
55 | uses: JamesIves/github-pages-deploy-action@v4
56 | with:
57 | single-commit: true
58 | branch: gh-pages
59 | folder: target/doc
60 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Build output and other log files from arceos
2 | /target
3 | *.asm
4 | *.img
5 | *.bin
6 | *.elf
7 | actual.out
8 | qemu.log
9 | rusty-tags.vi
10 |
11 | # Visual Studio Code settings
12 | /.vscode
13 |
14 | # macOS system files
15 | .DS_Store
16 |
17 | # We ignore Cargo.lock because `axvcpu` is just a library
18 | Cargo.lock
19 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "arm_vcpu"
3 | version = "0.1.0"
4 | edition = "2024"
5 |
6 | [dependencies]
7 | log = "0.4.21"
8 | spin = "0.9"
9 |
10 | aarch64-cpu = "9.3"
11 | tock-registers = "0.8"
12 | numeric-enum-macro = "0.2"
13 |
14 | axerrno = "0.1.0"
15 | percpu = { version = "0.2.0", features = ["arm-el2"] }
16 | aarch64_sysreg = "0.1.1"
17 |
18 | axaddrspace = { git = "https://github.com/arceos-hypervisor/axaddrspace.git" }
19 | axvcpu = { git = "https://github.com/arceos-hypervisor/axvcpu.git" }
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # arm_vcpu
2 |
3 | [](https://github.com/arceos-hypervisor/arm_vcpu/actions/workflows/ci.yml)
4 |
5 | Definition of the vCPU structure and virtualization-related interface support for the AArch64 architecture.
--------------------------------------------------------------------------------
/src/context_frame.rs:
--------------------------------------------------------------------------------
1 | use core::arch::asm;
2 | use core::fmt::Formatter;
3 |
4 | use aarch64_cpu::registers::*;
5 |
6 | /// A struct representing the AArch64 CPU context frame.
7 | ///
8 | /// This context frame includes
9 | /// * the general-purpose registers (GPRs),
10 | /// * the stack pointer associated with EL0 (SP_EL0),
11 | /// * the exception link register (ELR),
12 | /// * the saved program status register (SPSR).
13 | ///
14 | /// The `#[repr(C)]` attribute ensures that the struct has a C-compatible
15 | /// memory layout, which is important when interfacing with hardware or
16 | /// other low-level components.
17 | #[repr(C)]
18 | #[derive(Copy, Clone, Debug)]
19 | pub struct Aarch64ContextFrame {
20 | /// An array of 31 `u64` values representing the general-purpose registers.
21 | pub gpr: [u64; 31],
22 | /// The stack pointer associated with EL0 (SP_EL0)
23 | pub sp_el0: u64,
24 | /// The exception link register, which stores the return address after an exception.
25 | pub elr: u64,
26 | /// The saved program status register, which holds the state of the program at the time of an exception.
27 | pub spsr: u64,
28 | }
29 |
30 | /// Implementations of [`fmt::Display`] for [`Aarch64ContextFrame`].
31 | impl core::fmt::Display for Aarch64ContextFrame {
32 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {
33 | for i in 0..31 {
34 | write!(f, "x{:02}: {:016x} ", i, self.gpr[i])?;
35 | if (i + 1) % 2 == 0 {
36 | writeln!(f)?;
37 | }
38 | }
39 | writeln!(f, "spsr:{:016x}", self.spsr)?;
40 | write!(f, "elr: {:016x}", self.elr)?;
41 | writeln!(f, " sp_el0: {:016x}", self.sp_el0)?;
42 | Ok(())
43 | }
44 | }
45 |
46 | impl Default for Aarch64ContextFrame {
47 | /// Returns the default context frame.
48 | ///
49 | /// The default state sets the SPSR to mask all exceptions and sets the mode to EL1h.
50 | fn default() -> Self {
51 | Aarch64ContextFrame {
52 | gpr: [0; 31],
53 | spsr: (SPSR_EL1::M::EL1h
54 | + SPSR_EL1::I::Masked
55 | + SPSR_EL1::F::Masked
56 | + SPSR_EL1::A::Masked
57 | + SPSR_EL1::D::Masked)
58 | .value,
59 | elr: 0,
60 | sp_el0: 0,
61 | }
62 | }
63 | }
64 |
65 | impl Aarch64ContextFrame {
66 | /// Returns the exception program counter (ELR).
67 | pub fn exception_pc(&self) -> usize {
68 | self.elr as usize
69 | }
70 |
71 | /// Sets the exception program counter (ELR).
72 | ///
73 | /// # Arguments
74 | ///
75 | /// * `pc` - The new program counter value.
76 | pub fn set_exception_pc(&mut self, pc: usize) {
77 | self.elr = pc as u64;
78 | }
79 |
80 | /// Sets the argument in register x0.
81 | ///
82 | /// # Arguments
83 | ///
84 | /// * `arg` - The argument to be passed in register x0.
85 | pub fn set_argument(&mut self, arg: usize) {
86 | self.gpr[0] = arg as u64;
87 | }
88 |
89 | /// Sets the value of a general-purpose register (GPR).
90 | ///
91 | /// # Arguments
92 | ///
93 | /// * `index` - The index of the general-purpose register (0 to 31).
94 | /// * `val` - The value to be set in the register.
95 | ///
96 | /// # Behavior
97 | /// - If `index` is between 0 and 30, the register at the specified index is set to `val`.
98 | /// - If `index` is 31, the operation is ignored, as it corresponds to the zero register
99 | /// (`wzr` or `xzr` in AArch64), which always reads as zero and cannot be modified.
100 | ///
101 | /// # Panics
102 | /// Panics if the provided `index` is outside the range 0 to 31.
103 | pub fn set_gpr(&mut self, index: usize, val: usize) {
104 | match index {
105 | 0..=30 => self.gpr[index] = val as u64,
106 | 31 => warn!("Try to set zero register at index [{index}] as {val}"),
107 | _ => {
108 | panic!("Invalid general-purpose register index {index}")
109 | }
110 | }
111 | }
112 |
113 | /// Retrieves the value of a general-purpose register (GPR).
114 | ///
115 | /// # Arguments
116 | ///
117 | /// * `index` - The index of the general-purpose register (0 to 31).
118 | ///
119 | /// # Returns
120 | /// The value stored in the specified register.
121 | ///
122 | /// # Panics
123 | /// Panics if the provided `index` is not in the range 0 to 31.
124 | ///
125 | /// # Notes
126 | /// * For `index` 31, this method returns 0, as it corresponds to the zero register (`wzr` or `xzr` in AArch64).
127 | pub fn gpr(&self, index: usize) -> usize {
128 | match index {
129 | 0..=30 => self.gpr[index] as usize,
130 | 31 => 0,
131 | _ => {
132 | panic!("Invalid general-purpose register index {index}")
133 | }
134 | }
135 | }
136 | }
137 |
138 | /// Represents the VM context for a guest virtual machine in a hypervisor environment.
139 | ///
140 | /// The `GuestSystemRegisters` structure contains various registers and states needed to manage
141 | /// and restore the context of a virtual machine (VM). This includes timer registers,
142 | /// system control registers, exception registers, and hypervisor-specific registers.
143 | ///
144 | /// The structure is aligned to 16 bytes to ensure proper memory alignment for efficient access.
145 | #[repr(C)]
146 | #[repr(align(16))]
147 | #[derive(Debug, Clone, Copy, Default)]
148 | pub struct GuestSystemRegisters {
149 | // generic timer
150 | pub cntvoff_el2: u64,
151 | cntp_cval_el0: u64,
152 | cntv_cval_el0: u64,
153 | pub cntkctl_el1: u32,
154 | pub cntvct_el0: u64,
155 | cntp_ctl_el0: u32,
156 | cntv_ctl_el0: u32,
157 | cntp_tval_el0: u32,
158 | cntv_tval_el0: u32,
159 |
160 | // vpidr and vmpidr
161 | vpidr_el2: u32,
162 | pub vmpidr_el2: u64,
163 |
164 | // 64bit EL1/EL0 register
165 | pub sp_el0: u64,
166 | sp_el1: u64,
167 | elr_el1: u64,
168 | spsr_el1: u32,
169 | pub sctlr_el1: u32,
170 | actlr_el1: u64,
171 | cpacr_el1: u32,
172 | ttbr0_el1: u64,
173 | ttbr1_el1: u64,
174 | tcr_el1: u64,
175 | esr_el1: u32,
176 | far_el1: u64,
177 | par_el1: u64,
178 | mair_el1: u64,
179 | amair_el1: u64,
180 | vbar_el1: u64,
181 | contextidr_el1: u32,
182 | tpidr_el0: u64,
183 | tpidr_el1: u64,
184 | tpidrro_el0: u64,
185 |
186 | // hypervisor context
187 | pub hcr_el2: u64,
188 | pub vttbr_el2: u64,
189 | cptr_el2: u64,
190 | hstr_el2: u64,
191 | pub pmcr_el0: u64,
192 | pub vtcr_el2: u64,
193 |
194 | // exception
195 | far_el2: u64,
196 | hpfar_el2: u64,
197 | }
198 |
199 | impl GuestSystemRegisters {
200 | /// Resets the VM context by setting all registers to zero.
201 | ///
202 | /// This method allows the `GuestSystemRegisters` instance to be reused by resetting
203 | /// its state to the default values (all zeros).
204 | #[allow(unused)]
205 | pub fn reset(&mut self) {
206 | *self = GuestSystemRegisters::default()
207 | }
208 |
209 | /// Stores the current values of all relevant registers into the `GuestSystemRegisters` structure.
210 | ///
211 | /// This method uses inline assembly to read the values of various system registers
212 | /// and stores them in the corresponding fields of the `GuestSystemRegisters` structure.
213 | pub unsafe fn store(&mut self) {
214 | unsafe {
215 | asm!("mrs {0}, CNTVOFF_EL2", out(reg) self.cntvoff_el2);
216 | asm!("mrs {0}, CNTV_CVAL_EL0", out(reg) self.cntv_cval_el0);
217 | asm!("mrs {0:x}, CNTKCTL_EL1", out(reg) self.cntkctl_el1);
218 | asm!("mrs {0:x}, CNTP_CTL_EL0", out(reg) self.cntp_ctl_el0);
219 | asm!("mrs {0:x}, CNTV_CTL_EL0", out(reg) self.cntv_ctl_el0);
220 | asm!("mrs {0:x}, CNTP_TVAL_EL0", out(reg) self.cntp_tval_el0);
221 | asm!("mrs {0:x}, CNTV_TVAL_EL0", out(reg) self.cntv_tval_el0);
222 | asm!("mrs {0}, CNTVCT_EL0", out(reg) self.cntvct_el0);
223 | // MRS!("self.vpidr_el2, VPIDR_EL2, "x");
224 | asm!("mrs {0}, VMPIDR_EL2", out(reg) self.vmpidr_el2);
225 |
226 | asm!("mrs {0}, SP_EL0", out(reg) self.sp_el0);
227 | asm!("mrs {0}, SP_EL1", out(reg) self.sp_el1);
228 | asm!("mrs {0}, ELR_EL1", out(reg) self.elr_el1);
229 | asm!("mrs {0:x}, SPSR_EL1", out(reg) self.spsr_el1);
230 | asm!("mrs {0:x}, SCTLR_EL1", out(reg) self.sctlr_el1);
231 | asm!("mrs {0:x}, CPACR_EL1", out(reg) self.cpacr_el1);
232 | asm!("mrs {0}, TTBR0_EL1", out(reg) self.ttbr0_el1);
233 | asm!("mrs {0}, TTBR1_EL1", out(reg) self.ttbr1_el1);
234 | asm!("mrs {0}, TCR_EL1", out(reg) self.tcr_el1);
235 | asm!("mrs {0:x}, ESR_EL1", out(reg) self.esr_el1);
236 | asm!("mrs {0}, FAR_EL1", out(reg) self.far_el1);
237 | asm!("mrs {0}, PAR_EL1", out(reg) self.par_el1);
238 | asm!("mrs {0}, MAIR_EL1", out(reg) self.mair_el1);
239 | asm!("mrs {0}, AMAIR_EL1", out(reg) self.amair_el1);
240 | asm!("mrs {0}, VBAR_EL1", out(reg) self.vbar_el1);
241 | asm!("mrs {0:x}, CONTEXTIDR_EL1", out(reg) self.contextidr_el1);
242 | asm!("mrs {0}, TPIDR_EL0", out(reg) self.tpidr_el0);
243 | asm!("mrs {0}, TPIDR_EL1", out(reg) self.tpidr_el1);
244 | asm!("mrs {0}, TPIDRRO_EL0", out(reg) self.tpidrro_el0);
245 |
246 | asm!("mrs {0}, PMCR_EL0", out(reg) self.pmcr_el0);
247 | asm!("mrs {0}, VTCR_EL2", out(reg) self.vtcr_el2);
248 | asm!("mrs {0}, VTTBR_EL2", out(reg) self.vttbr_el2);
249 | asm!("mrs {0}, HCR_EL2", out(reg) self.hcr_el2);
250 | asm!("mrs {0}, ACTLR_EL1", out(reg) self.actlr_el1);
251 | // println!("save sctlr {:x}", self.sctlr_el1);
252 | }
253 | }
254 |
255 | /// Restores the values of all relevant system registers from the `GuestSystemRegisters` structure.
256 | ///
257 | /// This method uses inline assembly to write the values stored in the `GuestSystemRegisters` structure
258 | /// back to the system registers. This is essential for restoring the state of a virtual machine
259 | /// or thread during context switching.
260 | ///
261 | /// Each system register is restored with its corresponding value from the `GuestSystemRegisters`, ensuring
262 | /// that the virtual machine or thread resumes execution with the correct context.
263 | pub unsafe fn restore(&self) {
264 | unsafe {
265 | asm!("msr CNTV_CVAL_EL0, {0}", in(reg) self.cntv_cval_el0);
266 | asm!("msr CNTKCTL_EL1, {0:x}", in (reg) self.cntkctl_el1);
267 | asm!("msr CNTV_CTL_EL0, {0:x}", in (reg) self.cntv_ctl_el0);
268 | // The restoration of SP_EL0 is done in `exception_return_el2`,
269 | // which move the value from `self.ctx.sp_el0` to `SP_EL0`.
270 | // asm!("msr SP_EL0, {0}", in(reg) self.sp_el0);
271 | asm!("msr SP_EL1, {0}", in(reg) self.sp_el1);
272 | asm!("msr ELR_EL1, {0}", in(reg) self.elr_el1);
273 | asm!("msr SPSR_EL1, {0:x}", in(reg) self.spsr_el1);
274 | asm!("msr SCTLR_EL1, {0:x}", in(reg) self.sctlr_el1);
275 | asm!("msr CPACR_EL1, {0:x}", in(reg) self.cpacr_el1);
276 | asm!("msr TTBR0_EL1, {0}", in(reg) self.ttbr0_el1);
277 | asm!("msr TTBR1_EL1, {0}", in(reg) self.ttbr1_el1);
278 | asm!("msr TCR_EL1, {0}", in(reg) self.tcr_el1);
279 | asm!("msr ESR_EL1, {0:x}", in(reg) self.esr_el1);
280 | asm!("msr FAR_EL1, {0}", in(reg) self.far_el1);
281 | asm!("msr PAR_EL1, {0}", in(reg) self.par_el1);
282 | asm!("msr MAIR_EL1, {0}", in(reg) self.mair_el1);
283 | asm!("msr AMAIR_EL1, {0}", in(reg) self.amair_el1);
284 | asm!("msr VBAR_EL1, {0}", in(reg) self.vbar_el1);
285 | asm!("msr CONTEXTIDR_EL1, {0:x}", in(reg) self.contextidr_el1);
286 | asm!("msr TPIDR_EL0, {0}", in(reg) self.tpidr_el0);
287 | asm!("msr TPIDR_EL1, {0}", in(reg) self.tpidr_el1);
288 | asm!("msr TPIDRRO_EL0, {0}", in(reg) self.tpidrro_el0);
289 |
290 | asm!("msr PMCR_EL0, {0}", in(reg) self.pmcr_el0);
291 | asm!("msr ACTLR_EL1, {0}", in(reg) self.actlr_el1);
292 |
293 | asm!("msr VTCR_EL2, {0}", in(reg) self.vtcr_el2);
294 | asm!("msr VTTBR_EL2, {0}", in(reg) self.vttbr_el2);
295 | asm!("msr HCR_EL2, {0}", in(reg) self.hcr_el2);
296 | asm!("msr VMPIDR_EL2, {0}", in(reg) self.vmpidr_el2);
297 | asm!("msr CNTVOFF_EL2, {0}", in(reg) self.cntvoff_el2);
298 | }
299 | }
300 | }
301 |
--------------------------------------------------------------------------------
/src/exception.S:
--------------------------------------------------------------------------------
1 | .macro SAVE_REGS_FROM_EL1
2 | # Curretly `sp` points to the address of `Aarch64VCpu.host_stack_top`.
3 | sub sp, sp, 34 * 8
4 | # Curretly `sp` points to the base address of `Aarch64VCpu.ctx`, which stores guest's `TrapFrame`.
5 |
6 | # Save general purpose registers into `Aarch64VCpu.ctx`
7 | stp x0, x1, [sp]
8 | stp x2, x3, [sp, 2 * 8]
9 | stp x4, x5, [sp, 4 * 8]
10 | stp x6, x7, [sp, 6 * 8]
11 | stp x8, x9, [sp, 8 * 8]
12 | stp x10, x11, [sp, 10 * 8]
13 | stp x12, x13, [sp, 12 * 8]
14 | stp x14, x15, [sp, 14 * 8]
15 | stp x16, x17, [sp, 16 * 8]
16 | stp x18, x19, [sp, 18 * 8]
17 | stp x20, x21, [sp, 20 * 8]
18 | stp x22, x23, [sp, 22 * 8]
19 | stp x24, x25, [sp, 24 * 8]
20 | stp x26, x27, [sp, 26 * 8]
21 | stp x28, x29, [sp, 28 * 8]
22 |
23 | mrs x9, sp_el0
24 | stp x30, x9, [sp, 30 * 8]
25 |
26 | # Save `elr_el2` and `spsr_el2` into `Aarch64VCpu.ctx`
27 | mrs x10, elr_el2
28 | mrs x11, spsr_el2
29 | stp x10, x11, [sp, 32 * 8]
30 | .endm
31 |
32 | .macro RESTORE_REGS_INTO_EL1
33 | ldp x10, x11, [sp, 32 * 8]
34 | ldp x30, x9, [sp, 30 * 8]
35 | msr sp_el0, x9
36 | msr elr_el2, x10
37 | msr spsr_el2, x11
38 |
39 | ldr x30, [sp, 30 * 8]
40 | ldp x28, x29, [sp, 28 * 8]
41 | ldp x26, x27, [sp, 26 * 8]
42 | ldp x24, x25, [sp, 24 * 8]
43 | ldp x22, x23, [sp, 22 * 8]
44 | ldp x20, x21, [sp, 20 * 8]
45 | ldp x18, x19, [sp, 18 * 8]
46 | ldp x16, x17, [sp, 16 * 8]
47 | ldp x14, x15, [sp, 14 * 8]
48 | ldp x12, x13, [sp, 12 * 8]
49 | ldp x10, x11, [sp, 10 * 8]
50 | ldp x8, x9, [sp, 8 * 8]
51 | ldp x6, x7, [sp, 6 * 8]
52 | ldp x4, x5, [sp, 4 * 8]
53 | ldp x2, x3, [sp, 2 * 8]
54 | ldp x0, x1, [sp]
55 | # Curretly `sp` points to the base address of `Aarch64VCpu.ctx`
56 | add sp, sp, 34 * 8
57 | # Curretly `x0` points to the address of `Aarch64VCpu.host_stack_top`.
58 | .endm
59 |
60 |
61 | .macro INVALID_EXCP_EL2, kind, source
62 | .p2align 7
63 | SAVE_REGS_FROM_EL1
64 | mov x0, sp
65 | mov x1, \kind
66 | mov x2, \source
67 | bl invalid_exception_el2
68 | b .Lexception_return_el2
69 | .endm
70 |
71 | .macro HANDLE_CURRENT_IRQ
72 | .p2align 7
73 | SAVE_REGS_FROM_EL1
74 | mov x0, sp
75 | bl current_el_irq_handler
76 | b .Lexception_return_el2
77 | .endm
78 |
79 | .macro HANDLE_CURRENT_SYNC
80 | .p2align 7
81 | SAVE_REGS_FROM_EL1
82 | mov x0, sp
83 | bl current_el_sync_handler
84 | b .Lexception_return_el2
85 | .endm
86 |
87 | .macro HANDLE_LOWER_IRQ_VCPU
88 | .p2align 7
89 | SAVE_REGS_FROM_EL1
90 | mov x0, {exception_irq}
91 | bl vmexit_trampoline
92 | # b .Lexception_return_el2 is called by `vmexit_trampoline`
93 | .endm
94 |
95 | .macro HANDLE_LOWER_SYNC_VCPU
96 | .p2align 7
97 | SAVE_REGS_FROM_EL1
98 | mov x0, {exception_sync}
99 | bl vmexit_trampoline
100 | # b .Lexception_return_el2 is called by `vmexit_trampoline`
101 | .endm
102 |
103 |
104 | .section .text
105 | .p2align 11
106 | .global exception_vector_base_vcpu
107 | exception_vector_base_vcpu:
108 | // current EL, with SP_EL0
109 | INVALID_EXCP_EL2 0 0
110 | INVALID_EXCP_EL2 1 0
111 | INVALID_EXCP_EL2 2 0
112 | INVALID_EXCP_EL2 3 0
113 |
114 | // current EL, with SP_ELx
115 | HANDLE_CURRENT_SYNC
116 | HANDLE_CURRENT_IRQ
117 | INVALID_EXCP_EL2 2 1
118 | INVALID_EXCP_EL2 3 1
119 |
120 | // lower EL, aarch64
121 | HANDLE_LOWER_SYNC_VCPU
122 | HANDLE_LOWER_IRQ_VCPU
123 | INVALID_EXCP_EL2 2 2
124 | INVALID_EXCP_EL2 3 2
125 |
126 | // lower EL, aarch32
127 | INVALID_EXCP_EL2 0 3
128 | INVALID_EXCP_EL2 1 3
129 | INVALID_EXCP_EL2 2 3
130 | INVALID_EXCP_EL2 3 3
131 |
132 | .global context_vm_entry
133 | context_vm_entry:
134 | # Curretly `x0` points to the address of `Aarch64VCpu.host_stack_top`.
135 | mov sp, x0
136 | sub sp, sp, 34 * 8
137 | # Curretly `sp` points to the base address of `Aarch64VCpu.ctx`, which stores guest's `TrapFrame`.
138 | .Lexception_return_el2:
139 | RESTORE_REGS_INTO_EL1
140 | eret
141 |
--------------------------------------------------------------------------------
/src/exception.rs:
--------------------------------------------------------------------------------
1 | use aarch64_cpu::registers::{ESR_EL2, HCR_EL2, Readable, SCTLR_EL1, VTCR_EL2, VTTBR_EL2};
2 |
3 | use axaddrspace::GuestPhysAddr;
4 | use axerrno::{AxError, AxResult};
5 | use axvcpu::{AccessWidth, AxVCpuExitReason};
6 |
7 | use crate::TrapFrame;
8 | use crate::exception_utils::{
9 | exception_class, exception_class_value, exception_data_abort_access_is_write,
10 | exception_data_abort_access_reg, exception_data_abort_access_reg_width,
11 | exception_data_abort_access_width, exception_data_abort_handleable,
12 | exception_data_abort_is_permission_fault, exception_data_abort_is_translate_fault,
13 | exception_esr, exception_fault_addr, exception_next_instruction_step, exception_sysreg_addr,
14 | exception_sysreg_direction_write, exception_sysreg_gpr,
15 | };
16 |
17 | numeric_enum_macro::numeric_enum! {
18 | #[repr(u8)]
19 | #[derive(Debug)]
20 | pub enum TrapKind {
21 | Synchronous = 0,
22 | Irq = 1,
23 | Fiq = 2,
24 | SError = 3,
25 | }
26 | }
27 |
28 | /// Equals to [`TrapKind::Synchronous`], used in exception.S.
29 | const EXCEPTION_SYNC: usize = TrapKind::Synchronous as usize;
30 | /// Equals to [`TrapKind::Irq`], used in exception.S.
31 | const EXCEPTION_IRQ: usize = TrapKind::Irq as usize;
32 |
33 | #[repr(u8)]
34 | #[derive(Debug)]
35 | #[allow(unused)]
36 | enum TrapSource {
37 | CurrentSpEl0 = 0,
38 | CurrentSpElx = 1,
39 | LowerAArch64 = 2,
40 | LowerAArch32 = 3,
41 | }
42 |
43 | core::arch::global_asm!(
44 | include_str!("exception.S"),
45 | exception_sync = const EXCEPTION_SYNC,
46 | exception_irq = const EXCEPTION_IRQ,
47 | );
48 |
49 | /// Handles synchronous exceptions that occur during the execution of a guest VM.
50 | ///
51 | /// This function examines the exception class (EC) to determine the cause of the exception
52 | /// and then handles it accordingly.
53 | ///
54 | /// Currently we just handle exception type including data abort (`DataAbortLowerEL`) and hypervisor call (`HVC64)`.
55 | ///
56 | /// # Arguments
57 | ///
58 | /// * `ctx` - A mutable reference to the `TrapFrame`, which contains the saved state of the
59 | /// guest VM's CPU registers at the time of the exception.
60 | ///
61 | /// # Returns
62 | ///
63 | /// An `AxResult` containing an `AxVCpuExitReason` indicating the reason for the VM exit.
64 | /// This could be due to a hypervisor call (`Hypercall`) or other reasons such as data aborts.
65 | ///
66 | /// # Panics
67 | ///
68 | /// If an unhandled exception class is encountered, the function will panic, outputting
69 | /// details about the exception including the instruction pointer, faulting address, exception
70 | /// syndrome register (ESR), and system control registers.
71 | ///
72 | pub fn handle_exception_sync(ctx: &mut TrapFrame) -> AxResult {
73 | match exception_class() {
74 | Some(ESR_EL2::EC::Value::DataAbortLowerEL) => {
75 | let elr = ctx.exception_pc();
76 | let val = elr + exception_next_instruction_step();
77 | ctx.set_exception_pc(val);
78 | handle_data_abort(ctx)
79 | }
80 | Some(ESR_EL2::EC::Value::HVC64) => {
81 | // The `#imm`` argument when triggering a hvc call, currently not used.
82 | let _hvc_arg_imm16 = ESR_EL2.read(ESR_EL2::ISS);
83 |
84 | // Is this a psci call?
85 | //
86 | // By convention, a psci call can use either the `hvc` or the `smc` instruction.
87 | // NimbOS uses `hvc`, `ArceOS` use `hvc` too when running on QEMU.
88 | if let Some(result) = handle_psci_call(ctx) {
89 | return result;
90 | }
91 |
92 | // We assume that guest VM triggers HVC through a `hvc #0`` instruction.
93 | // And arm64 hcall implementation uses `x0` to specify the hcall number.
94 | // For more details on the hypervisor call (HVC) mechanism and the use of general-purpose registers,
95 | // refer to the [Linux Kernel documentation on KVM ARM hypervisor ABI](https://github.com/torvalds/linux/blob/master/Documentation/virt/kvm/arm/hyp-abi.rst).
96 | Ok(AxVCpuExitReason::Hypercall {
97 | nr: ctx.gpr[0],
98 | args: [
99 | ctx.gpr[1], ctx.gpr[2], ctx.gpr[3], ctx.gpr[4], ctx.gpr[5], ctx.gpr[6],
100 | ],
101 | })
102 | }
103 | Some(ESR_EL2::EC::Value::TrappedMsrMrs) => handle_system_register(ctx),
104 | Some(ESR_EL2::EC::Value::SMC64) => {
105 | let elr = ctx.exception_pc();
106 | let val = elr + exception_next_instruction_step();
107 | ctx.set_exception_pc(val);
108 | handle_smc64_exception(ctx)
109 | }
110 | _ => {
111 | panic!(
112 | "handler not presents for EC_{} @ipa 0x{:x}, @pc 0x{:x}, @esr 0x{:x},
113 | @sctlr_el1 0x{:x}, @vttbr_el2 0x{:x}, @vtcr_el2: {:#x} hcr: {:#x} ctx:{}",
114 | exception_class_value(),
115 | exception_fault_addr()?,
116 | (*ctx).exception_pc(),
117 | exception_esr(),
118 | SCTLR_EL1.get() as usize,
119 | VTTBR_EL2.get() as usize,
120 | VTCR_EL2.get() as usize,
121 | HCR_EL2.get() as usize,
122 | ctx
123 | );
124 | }
125 | }
126 | }
127 |
128 | fn handle_data_abort(context_frame: &mut TrapFrame) -> AxResult {
129 | let addr = exception_fault_addr()?;
130 | let access_width = exception_data_abort_access_width();
131 | let is_write = exception_data_abort_access_is_write();
132 | //let sign_ext = exception_data_abort_access_is_sign_ext();
133 | let reg = exception_data_abort_access_reg();
134 | let reg_width = exception_data_abort_access_reg_width();
135 |
136 | trace!(
137 | "Data fault @{:?}, ELR {:#x}, esr: 0x{:x}",
138 | addr,
139 | context_frame.exception_pc(),
140 | exception_esr(),
141 | );
142 |
143 | let width = match AccessWidth::try_from(access_width) {
144 | Ok(access_width) => access_width,
145 | Err(_) => return Err(AxError::InvalidInput),
146 | };
147 |
148 | let reg_width = match AccessWidth::try_from(reg_width) {
149 | Ok(reg_width) => reg_width,
150 | Err(_) => return Err(AxError::InvalidInput),
151 | };
152 |
153 | if !exception_data_abort_handleable() {
154 | panic!(
155 | "Core data abort not handleable {:#x}, esr {:#x}",
156 | addr,
157 | exception_esr()
158 | );
159 | }
160 |
161 | if !exception_data_abort_is_translate_fault() {
162 | if exception_data_abort_is_permission_fault() {
163 | return Err(AxError::Unsupported);
164 | } else {
165 | panic!("Core data abort is not translate fault {:#x}", addr,);
166 | }
167 | }
168 |
169 | if is_write {
170 | return Ok(AxVCpuExitReason::MmioWrite {
171 | addr,
172 | width,
173 | data: context_frame.gpr(reg) as u64,
174 | });
175 | }
176 | Ok(AxVCpuExitReason::MmioRead {
177 | addr,
178 | width,
179 | reg,
180 | reg_width,
181 | })
182 | }
183 |
184 | /// Handles a system register access exception.
185 | ///
186 | /// This function processes the exception by reading or writing to a system register
187 | /// based on the information in the `context_frame`.
188 | ///
189 | /// # Arguments
190 | /// * `context_frame` - A mutable reference to the trap frame containing the CPU state.
191 | ///
192 | /// # Returns
193 | /// * `AxResult` - An `AxResult` containing an `AxVCpuExitReason` indicating
194 | /// whether the operation was a read or write and the relevant details.
195 | fn handle_system_register(context_frame: &mut TrapFrame) -> AxResult {
196 | let iss = ESR_EL2.read(ESR_EL2::ISS);
197 |
198 | let addr = exception_sysreg_addr(iss.try_into().unwrap());
199 | let elr = context_frame.exception_pc();
200 | let val = elr + exception_next_instruction_step();
201 | let write = exception_sysreg_direction_write(iss);
202 | let reg = exception_sysreg_gpr(iss) as usize;
203 | context_frame.set_exception_pc(val);
204 | if write {
205 | return Ok(AxVCpuExitReason::SysRegWrite {
206 | addr,
207 | value: context_frame.gpr(reg as usize) as u64,
208 | });
209 | }
210 | Ok(AxVCpuExitReason::SysRegRead { addr, reg })
211 | }
212 |
213 | /// Handles HVC or SMC exceptions that serve as psci (Power State Coordination Interface) calls.
214 | ///
215 | /// A hvc or smc call with the function in range 0x8000_0000..=0x8000_001F (when the 32-bit
216 | /// hvc/smc calling convention is used) or 0xC000_0000..=0xC000_001F (when the 64-bit hvc/smc
217 | /// calling convention is used) is a psci call. This function handles them all.
218 | ///
219 | /// Returns `None` if the HVC is not a psci call.
220 | fn handle_psci_call(ctx: &mut TrapFrame) -> Option> {
221 | const PSCI_FN_RANGE_32: core::ops::RangeInclusive = 0x8400_0000..=0x8400_001F;
222 | const PSCI_FN_RANGE_64: core::ops::RangeInclusive = 0xC400_0000..=0xC400_001F;
223 |
224 | const PSCI_FN_VERSION: u64 = 0x0;
225 | const _PSCI_FN_CPU_SUSPEND: u64 = 0x1;
226 | const PSCI_FN_CPU_OFF: u64 = 0x2;
227 | const PSCI_FN_CPU_ON: u64 = 0x3;
228 | const _PSCI_FN_MIGRATE: u64 = 0x5;
229 | const PSCI_FN_SYSTEM_OFF: u64 = 0x8;
230 | const _PSCI_FN_SYSTEM_RESET: u64 = 0x9;
231 | const PSCI_FN_END: u64 = 0x1f;
232 |
233 | let fn_ = ctx.gpr[0];
234 | let fn_offset = if PSCI_FN_RANGE_32.contains(&fn_) {
235 | Some(fn_ - PSCI_FN_RANGE_32.start())
236 | } else if PSCI_FN_RANGE_64.contains(&fn_) {
237 | Some(fn_ - PSCI_FN_RANGE_64.start())
238 | } else {
239 | None
240 | };
241 |
242 | match fn_offset {
243 | Some(PSCI_FN_CPU_OFF) => Some(Ok(AxVCpuExitReason::CpuDown { _state: ctx.gpr[1] })),
244 | Some(PSCI_FN_CPU_ON) => Some(Ok(AxVCpuExitReason::CpuUp {
245 | target_cpu: ctx.gpr[1],
246 | entry_point: GuestPhysAddr::from(ctx.gpr[2] as usize),
247 | arg: ctx.gpr[3],
248 | })),
249 | Some(PSCI_FN_SYSTEM_OFF) => Some(Ok(AxVCpuExitReason::SystemDown)),
250 | // We just forward these request to the ATF directly.
251 | Some(PSCI_FN_VERSION..PSCI_FN_END) => None,
252 | _ => None,
253 | }
254 | }
255 |
256 | /// Handles SMC (Secure Monitor Call) exceptions.
257 | ///
258 | /// This function will judge if the SMC call is a PSCI call, if so, it will handle it as a PSCI call.
259 | /// Otherwise, it will forward the SMC call to the ATF directly.
260 | fn handle_smc64_exception(ctx: &mut TrapFrame) -> AxResult {
261 | // Is this a psci call?
262 | if let Some(result) = handle_psci_call(ctx) {
263 | result
264 | } else {
265 | // We just forward the SMC call to the ATF directly.
266 | // The args are from lower EL, so it is safe to call the ATF.
267 | (ctx.gpr[0], ctx.gpr[1], ctx.gpr[2], ctx.gpr[3]) =
268 | unsafe { crate::smc::smc_call(ctx.gpr[0], ctx.gpr[1], ctx.gpr[2], ctx.gpr[3]) };
269 | Ok(AxVCpuExitReason::Nothing)
270 | }
271 | }
272 |
273 | /// Handles IRQ exceptions that occur from the current exception level.
274 | /// Dispatches IRQs to the appropriate handler provided by the underlying host OS,
275 | /// which is registered at [`crate::pcpu::IRQ_HANDLER`] during `Aarch64PerCpu::new()`.
276 | #[unsafe(no_mangle)]
277 | fn current_el_irq_handler(_tf: &mut TrapFrame) {
278 | unsafe { crate::pcpu::IRQ_HANDLER.current_ref_raw() }
279 | .get()
280 | .unwrap()()
281 | }
282 |
283 | /// Handles synchronous exceptions that occur from the current exception level.
284 | #[unsafe(no_mangle)]
285 | fn current_el_sync_handler(tf: &mut TrapFrame) {
286 | panic!(
287 | "Unhandled synchronous exception from current EL: {:#x?}",
288 | tf
289 | );
290 | }
291 |
292 | /// A trampoline function for sp switching during handling VM exits,
293 | /// when **there is a active VCPU running**, which means that the host context is stored
294 | /// into host stack in `run_guest` function.
295 | ///
296 | /// # Functionality
297 | ///
298 | /// 1. **Restore Previous Host Stack pointor:**
299 | /// - The guest context frame is aleady saved by `SAVE_REGS_FROM_EL1` macro in exception.S.
300 | /// This function firstly adjusts the `sp` to skip the exception frame
301 | /// (adding `34 * 8` to the stack pointer) according to the memory layout of `Aarch64VCpu` struct,
302 | /// which makes current `sp` point to the address of `host_stack_top`.
303 | /// The host stack top value is restored by `ldr`.
304 | ///
305 | /// 2. **Restore Host Context:**
306 | /// - The `restore_regs_from_stack!()` macro is invoked to restore the host function context
307 | /// from the stack. This macro handles the restoration of the host's callee-saved general-purpose
308 | /// registers (`x19` to `x30`).
309 | ///
310 | /// 3. **Restore Host Control Flow:**
311 | /// - The `ret` instruction is used to return control to the host context after
312 | /// the guest context has been saved in `Aarch64VCpu` struct and the host context restored.
313 | /// Finally the control flow is returned back to `Aarch64VCpu.run()` in [vcpu.rs].
314 | ///
315 | /// # Notes
316 | ///
317 | /// - This function is typically invoked when a VM exit occurs, requiring the
318 | /// hypervisor to switch context from the guest to the host. The precise control
319 | /// over stack and register management ensures that the transition is smooth and
320 | /// that the host can correctly resume execution.
321 | ///
322 | /// - The `options(noreturn)` directive indicates that this function will not return
323 | /// to its caller, as control will be transferred back to the host context via `ret`.
324 | ///
325 | /// - This function is not typically called directly from Rust code. Instead, it is
326 | /// invoked as part of the low-level hypervisor or VM exit handling routines.
327 | #[naked]
328 | #[unsafe(no_mangle)]
329 | unsafe extern "C" fn vmexit_trampoline() -> ! {
330 | unsafe {
331 | core::arch::naked_asm!(
332 | // Curretly `sp` points to the base address of `Aarch64VCpu.ctx`, which stores guest's `TrapFrame`.
333 | "add x9, sp, 34 * 8", // Skip the exception frame.
334 | // Currently `x9` points to `&Aarch64VCpu.host_stack_top`, see `run_guest()` in vcpu.rs.
335 | "ldr x10, [x9]", // Get `host_stack_top` value from `&Aarch64VCpu.host_stack_top`.
336 | "mov sp, x10", // Set `sp` as the host stack top.
337 | restore_regs_from_stack!(), // Restore host function context frame.
338 | "ret", // Control flow is handed back to Aarch64VCpu.run(), simulating the normal return of the `run_guest` function.
339 | )
340 | }
341 | }
342 |
343 | /// Deal with invalid aarch64 exception.
344 | #[unsafe(no_mangle)]
345 | fn invalid_exception_el2(tf: &mut TrapFrame, kind: TrapKind, source: TrapSource) {
346 | panic!(
347 | "Invalid exception {:?} from {:?}:\n{:#x?}",
348 | kind, source, tf
349 | );
350 | }
351 |
--------------------------------------------------------------------------------
/src/exception_utils.rs:
--------------------------------------------------------------------------------
1 | use aarch64_cpu::registers::{ESR_EL2, FAR_EL2, PAR_EL1};
2 | use tock_registers::interfaces::*;
3 |
4 | use axaddrspace::GuestPhysAddr;
5 | use axerrno::{AxResult, ax_err};
6 |
7 | /// Retrieves the Exception Syndrome Register (ESR) value from EL2.
8 | ///
9 | /// # Returns
10 | /// The value of the ESR_EL2 register as a `usize`.
11 | #[inline(always)]
12 | pub fn exception_esr() -> usize {
13 | ESR_EL2.get() as usize
14 | }
15 |
16 | /// Reads the Exception Class (EC) field from the ESR_EL2 register.
17 | ///
18 | /// # Returns
19 | /// An `Option` containing the enum value representing the exception class.
20 | #[inline(always)]
21 | pub fn exception_class() -> Option {
22 | ESR_EL2.read_as_enum(ESR_EL2::EC)
23 | }
24 |
25 | /// Reads the Exception Class (EC) field from the ESR_EL2 register and returns it as a raw value.
26 | ///
27 | /// # Returns
28 | /// The value of the EC field in the ESR_EL2 register as a `usize`.
29 | #[inline(always)]
30 | pub fn exception_class_value() -> usize {
31 | ESR_EL2.read(ESR_EL2::EC) as usize
32 | }
33 |
34 | /// Retrieves the Hypervisor IPA Fault Address Register (HPFAR) value from EL2.
35 | ///
36 | /// This function uses inline assembly to read the HPFAR_EL2 register.
37 | ///
38 | /// # Returns
39 | /// The value of the HPFAR_EL2 register as a `usize`.
40 | #[inline(always)]
41 | fn exception_hpfar() -> usize {
42 | let hpfar: u64;
43 | unsafe {
44 | core::arch::asm!("mrs {}, HPFAR_EL2", out(reg) hpfar);
45 | }
46 | hpfar as usize
47 | }
48 |
49 | /// Constant for the shift amount used to identify the S1PTW bit in ESR_ELx.
50 | #[allow(non_upper_case_globals)]
51 | const ESR_ELx_S1PTW_SHIFT: usize = 7;
52 | /// Constant representing the S1PTW (Stage 1 translation fault) bit in ESR_ELx.
53 | #[allow(non_upper_case_globals)]
54 | const ESR_ELx_S1PTW: usize = 1 << ESR_ELx_S1PTW_SHIFT;
55 |
56 | /// Macro for executing an ARM Address Translation (AT) instruction.
57 | ///
58 | /// The macro takes two arguments:
59 | /// - `$at_op`: The AT operation to perform (e.g., `"s1e1r"`).
60 | /// - `$addr`: The address on which to perform the AT operation.
61 | ///
62 | /// This macro is unsafe because it directly executes assembly code.
63 | ///
64 | /// Example usage:
65 | /// ```rust
66 | /// arm_at!("s1e1r", address);
67 | /// ```
68 | macro_rules! arm_at {
69 | ($at_op:expr, $addr:expr) => {
70 | unsafe {
71 | core::arch::asm!(concat!("AT ", $at_op, ", {0}"), in(reg) $addr, options(nomem, nostack));
72 | core::arch::asm!("isb");
73 | }
74 | };
75 | }
76 |
77 | /// Translates a Fault Address Register (FAR) to a Hypervisor Physical Fault Address Register (HPFAR).
78 | ///
79 | /// This function uses the ARM Address Translation (AT) instruction to translate
80 | /// the provided FAR to an HPFAR. The translation result is returned in the Physical
81 | /// Address Register (PAR_EL1), and is then converted to the HPFAR format using the
82 | /// `par_to_far` function.
83 | ///
84 | /// # Arguments
85 | /// * `far` - The Fault Address Register value that needs to be translated.
86 | ///
87 | /// # Returns
88 | /// * `AxResult` - The translated HPFAR value, or an error if translation fails.
89 | ///
90 | /// # Errors
91 | /// Returns a `BadState` error if the translation is aborted (indicated by the `F` bit in `PAR_EL1`).
92 | fn translate_far_to_hpfar(far: usize) -> AxResult {
93 | /*
94 | * We have
95 | * PAR[PA_Shift - 1 : 12] = PA[PA_Shift - 1 : 12]
96 | * HPFAR[PA_Shift - 9 : 4] = FIPA[PA_Shift - 1 : 12]
97 | */
98 | // #define PAR_TO_HPFAR(par) (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
99 | fn par_to_far(par: u64) -> u64 {
100 | let mask = ((1 << (52 - 12)) - 1) << 12;
101 | (par & mask) >> 8
102 | }
103 |
104 | let par = PAR_EL1.get();
105 | arm_at!("s1e1r", far);
106 | let tmp = PAR_EL1.get();
107 | PAR_EL1.set(par);
108 | if (tmp & PAR_EL1::F::TranslationAborted.value) != 0 {
109 | ax_err!(BadState, "PAR_EL1::F::TranslationAborted value")
110 | } else {
111 | Ok(par_to_far(tmp) as usize)
112 | }
113 | }
114 |
115 | /// Retrieves the fault address that caused an exception.
116 | ///
117 | /// This function returns the Guest Physical Address (GPA) that caused the
118 | /// exception. The address is determined based on the `FAR_EL2` and `HPFAR_EL2`
119 | /// registers. If the exception is not due to a permission fault or if stage 1
120 | /// translation is involved, the function uses `HPFAR_EL2` to compute the final
121 | /// address.
122 | ///
123 | /// - `far` is the Fault Address Register (FAR_EL2) value.
124 | /// - `hpfar` is the Hypervisor Fault Address Register (HPFAR_EL2) value,
125 | /// which might be derived from `FAR_EL2` if certain conditions are met.
126 | ///
127 | /// The final address returned is computed by combining the page offset from
128 | /// `FAR_EL2` with the page number from `HPFAR_EL2`.
129 | ///
130 | /// # Returns
131 | /// * `AxResult` - The guest physical address that caused the exception, wrapped in an `AxResult`.
132 | #[inline(always)]
133 | pub fn exception_fault_addr() -> AxResult {
134 | let far = FAR_EL2.get() as usize;
135 | let hpfar =
136 | if (exception_esr() & ESR_ELx_S1PTW) == 0 && exception_data_abort_is_permission_fault() {
137 | translate_far_to_hpfar(far)?
138 | } else {
139 | exception_hpfar()
140 | };
141 | Ok(GuestPhysAddr::from((far & 0xfff) | (hpfar << 8)))
142 | }
143 |
144 | /// Determines the instruction length based on the ESR_EL2 register.
145 | ///
146 | /// # Returns
147 | /// - `1` if the instruction is 32-bit.
148 | /// - `0` if the instruction is 16-bit.
149 | #[inline(always)]
150 | fn exception_instruction_length() -> usize {
151 | (exception_esr() >> 25) & 1
152 | }
153 |
154 | /// Calculates the step size to the next instruction after an exception.
155 | ///
156 | /// # Returns
157 | /// The step size to the next instruction:
158 | /// - `4` for a 32-bit instruction.
159 | /// - `2` for a 16-bit instruction.
160 | #[inline(always)]
161 | pub fn exception_next_instruction_step() -> usize {
162 | 2 + 2 * exception_instruction_length()
163 | }
164 |
165 | /// Retrieves the Instruction Specific Syndrome (ISS) field from the ESR_EL2 register.
166 | ///
167 | /// # Returns
168 | /// The value of the ISS field in the ESR_EL2 register as a `usize`.
169 | #[inline(always)]
170 | pub fn exception_iss() -> usize {
171 | ESR_EL2.read(ESR_EL2::ISS) as usize
172 | }
173 |
174 | #[inline(always)]
175 | pub fn exception_sysreg_direction_write(iss: u64) -> bool {
176 | const ESR_ISS_SYSREG_DIRECTION: u64 = 0b1;
177 | (iss & ESR_ISS_SYSREG_DIRECTION) == 0
178 | }
179 |
180 | #[inline(always)]
181 | pub fn exception_sysreg_gpr(iss: u64) -> u64 {
182 | const ESR_ISS_SYSREG_REG_OFF: u64 = 5;
183 | const ESR_ISS_SYSREG_REG_LEN: u64 = 5;
184 | const ESR_ISS_SYSREG_REG_MASK: u64 = (1 << ESR_ISS_SYSREG_REG_LEN) - 1;
185 | (iss >> ESR_ISS_SYSREG_REG_OFF) & ESR_ISS_SYSREG_REG_MASK
186 | }
187 |
188 | /// The numbering of `SystemReg` follows the order specified in the Instruction Set Specification (ISS),
189 | /// formatted as `000000`.
190 | /// (Op0[21..20] + Op2[19..17] + Op1[16..14] + CRn[13..10]) + CRm[4..1]
191 | #[inline(always)]
192 | pub const fn exception_sysreg_addr(iss: usize) -> usize {
193 | const ESR_ISS_SYSREG_ADDR: usize = (0xfff << 10) | (0xf << 1);
194 | iss & ESR_ISS_SYSREG_ADDR
195 | }
196 |
197 | /// Checks if the data abort exception was caused by a permission fault.
198 | ///
199 | /// # Returns
200 | /// - `true` if the exception was caused by a permission fault.
201 | /// - `false` otherwise.
202 | #[inline(always)]
203 | pub fn exception_data_abort_is_permission_fault() -> bool {
204 | (exception_iss() & 0b111111 & (0xf << 2)) == 12
205 | }
206 |
207 | /// Determines the access width of a data abort exception.
208 | ///
209 | /// # Returns
210 | /// The access width in bytes (1, 2, 4, or 8 bytes).
211 | #[inline(always)]
212 | pub fn exception_data_abort_access_width() -> usize {
213 | 1 << ((exception_iss() >> 22) & 0b11)
214 | }
215 |
216 | /// Determines the DA can be handled
217 | #[inline(always)]
218 | pub fn exception_data_abort_handleable() -> bool {
219 | (!(exception_iss() & (1 << 10)) | (exception_iss() & (1 << 24))) != 0
220 | }
221 |
222 | #[inline(always)]
223 | pub fn exception_data_abort_is_translate_fault() -> bool {
224 | (exception_iss() & 0b111111 & (0xf << 2)) == 4
225 | }
226 |
227 | /// Checks if the data abort exception was caused by a write access.
228 | ///
229 | /// # Returns
230 | /// - `true` if the exception was caused by a write access.
231 | /// - `false` if it was caused by a read access.
232 | #[inline(always)]
233 | pub fn exception_data_abort_access_is_write() -> bool {
234 | (exception_iss() & (1 << 6)) != 0
235 | }
236 |
237 | /// Retrieves the register index involved in a data abort exception.
238 | ///
239 | /// # Returns
240 | /// The index of the register (0-31) involved in the access.
241 | #[inline(always)]
242 | pub fn exception_data_abort_access_reg() -> usize {
243 | (exception_iss() >> 16) & 0b11111
244 | }
245 |
246 | /// Determines the width of the register involved in a data abort exception.
247 | ///
248 | /// # Returns
249 | /// The width of the register in bytes (4 or 8 bytes).
250 | #[allow(unused)]
251 | #[inline(always)]
252 | pub fn exception_data_abort_access_reg_width() -> usize {
253 | 4 + 4 * ((exception_iss() >> 15) & 1)
254 | }
255 |
256 | /// Checks if the data accessed during a data abort exception is sign-extended.
257 | ///
258 | /// # Returns
259 | /// - `true` if the data is sign-extended.
260 | /// - `false` otherwise.
261 | #[allow(unused)]
262 | #[inline(always)]
263 | pub fn exception_data_abort_access_is_sign_ext() -> bool {
264 | ((exception_iss() >> 21) & 1) != 0
265 | }
266 |
267 | /// Macro to save the host function context to the stack.
268 | ///
269 | /// This macro saves the values of the callee-saved registers (`x19` to `x30`) to the stack.
270 | /// The stack pointer (`sp`) is adjusted accordingly
271 | /// to make space for the saved registers.
272 | ///
273 | /// ## Note
274 | ///
275 | /// This macro should be used in conjunction with `restore_regs_from_stack!` to ensure that
276 | /// the saved registers are properly restored when needed,
277 | /// and the control flow can be returned to `Aarch64VCpu.run()` in `vcpu.rs` happily.
278 | macro_rules! save_regs_to_stack {
279 | () => {
280 | "
281 | sub sp, sp, 12 * 8
282 | stp x29, x30, [sp, 10 * 8]
283 | stp x27, x28, [sp, 8 * 8]
284 | stp x25, x26, [sp, 6 * 8]
285 | stp x23, x24, [sp, 4 * 8]
286 | stp x21, x22, [sp, 2 * 8]
287 | stp x19, x20, [sp]"
288 | };
289 | }
290 |
291 | /// Macro to restore the host function context from the stack.
292 | ///
293 | /// This macro restores the values of the callee-saved general-purpose registers (`x19` to `x30`) from the stack.
294 | /// The stack pointer (`sp`) is adjusted back after restoring the registers.
295 | ///
296 | /// ## Note
297 | ///
298 | /// This macro is called in `return_run_guest()` in exception.rs,
299 | /// it should only be used after `save_regs_to_stack!` to correctly restore the control flow of `Aarch64VCpu.run()`.
300 | macro_rules! restore_regs_from_stack {
301 | () => {
302 | "
303 | ldp x19, x20, [sp]
304 | ldp x21, x22, [sp, 2 * 8]
305 | ldp x23, x24, [sp, 4 * 8]
306 | ldp x25, x26, [sp, 6 * 8]
307 | ldp x27, x28, [sp, 8 * 8]
308 | ldp x29, x30, [sp, 10 * 8]
309 | add sp, sp, 12 * 8"
310 | };
311 | }
312 |
--------------------------------------------------------------------------------
/src/lib.rs:
--------------------------------------------------------------------------------
1 | #![no_std]
2 | #![feature(naked_functions)]
3 | #![feature(doc_cfg)]
4 | #![doc = include_str!("../README.md")]
5 |
6 | #[macro_use]
7 | extern crate log;
8 |
9 | mod context_frame;
10 | #[macro_use]
11 | mod exception_utils;
12 | mod exception;
13 | mod pcpu;
14 | mod smc;
15 | mod vcpu;
16 |
17 | pub use self::pcpu::Aarch64PerCpu;
18 | pub use self::vcpu::{Aarch64VCpu, Aarch64VCpuCreateConfig};
19 |
20 | /// context frame for aarch64
21 | pub type TrapFrame = context_frame::Aarch64ContextFrame;
22 |
23 | /// Return if current platform support virtualization extension.
24 | pub fn has_hardware_support() -> bool {
25 | // Hint:
26 | // In Cortex-A78, we can use
27 | // [ID_AA64MMFR1_EL1](https://developer.arm.com/documentation/101430/0102/Register-descriptions/AArch64-system-registers/ID-AA64MMFR1-EL1--AArch64-Memory-Model-Feature-Register-1--EL1)
28 | // to get whether Virtualization Host Extensions is supported.
29 |
30 | // Current just return true by default.
31 | true
32 | }
33 |
--------------------------------------------------------------------------------
/src/pcpu.rs:
--------------------------------------------------------------------------------
1 | use core::{cell::OnceCell, marker::PhantomData};
2 |
3 | use aarch64_cpu::registers::*;
4 | use tock_registers::interfaces::ReadWriteable;
5 |
6 | use axerrno::AxResult;
7 | use axvcpu::{AxArchPerCpu, AxVCpuHal};
8 |
9 | /// Per-CPU data. A pointer to this struct is loaded into TP when a CPU starts. This structure
10 | #[repr(C)]
11 | #[repr(align(4096))]
12 | pub struct Aarch64PerCpu {
13 | /// per cpu id
14 | pub cpu_id: usize,
15 | _phantom: PhantomData,
16 | }
17 |
18 | #[percpu::def_percpu]
19 | static ORI_EXCEPTION_VECTOR_BASE: usize = 0;
20 |
21 | /// IRQ handler registered by underlying host OS during per-cpu initialization,
22 | /// for dispatching IRQs to the host OS.
23 | ///
24 | /// Set `IRQ_HANDLER` as per-cpu variable to avoid the need of `OnceLock`.
25 | #[percpu::def_percpu]
26 | pub static IRQ_HANDLER: OnceCell<&(dyn Fn() + Send + Sync)> = OnceCell::new();
27 |
28 | unsafe extern "C" {
29 | fn exception_vector_base_vcpu();
30 | }
31 |
32 | impl AxArchPerCpu for Aarch64PerCpu {
33 | fn new(cpu_id: usize) -> AxResult {
34 | // Register IRQ handler for this CPU.
35 | let _ = unsafe { IRQ_HANDLER.current_ref_mut_raw() }
36 | .set(&|| H::irq_hanlder())
37 | .map(|_| {});
38 |
39 | Ok(Self {
40 | cpu_id,
41 | _phantom: PhantomData,
42 | })
43 | }
44 |
45 | fn is_enabled(&self) -> bool {
46 | HCR_EL2.is_set(HCR_EL2::VM)
47 | }
48 |
49 | fn hardware_enable(&mut self) -> AxResult {
50 | // First we save origin `exception_vector_base`.
51 | // Safety:
52 | // Todo: take care of `preemption`
53 | unsafe { ORI_EXCEPTION_VECTOR_BASE.write_current_raw(VBAR_EL2.get() as usize) }
54 |
55 | // Set current `VBAR_EL2` to `exception_vector_base_vcpu`
56 | // defined in this crate.
57 | VBAR_EL2.set(exception_vector_base_vcpu as usize as _);
58 |
59 | HCR_EL2.modify(
60 | HCR_EL2::VM::Enable
61 | + HCR_EL2::RW::EL1IsAarch64
62 | + HCR_EL2::IMO::EnableVirtualIRQ
63 | + HCR_EL2::FMO::EnableVirtualFIQ
64 | + HCR_EL2::TSC::EnableTrapEl1SmcToEl2,
65 | );
66 |
67 | Ok(())
68 | }
69 |
70 | fn hardware_disable(&mut self) -> AxResult {
71 | // Reset `VBAR_EL2` into previous value.
72 | // Safety:
73 | // Todo: take care of `preemption`
74 | VBAR_EL2.set(unsafe { ORI_EXCEPTION_VECTOR_BASE.read_current_raw() } as _);
75 |
76 | HCR_EL2.set(HCR_EL2::VM::Disable.into());
77 | Ok(())
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/src/smc.rs:
--------------------------------------------------------------------------------
1 | use core::arch::asm;
2 |
3 | #[inline(never)]
4 | /// invoke a secure monitor call
5 | /// # Safety:
6 | /// It is unsafe to call this function directly.
7 | /// The caller must ensure that
8 | /// x0 is defined as the SMC function number referenced in the SMC Calling Convention
9 | /// than the args later must be valid for the specified SMC function.
10 | pub unsafe fn smc_call(x0: u64, x1: u64, x2: u64, x3: u64) -> (u64, u64, u64, u64) {
11 | let r0;
12 | let r1;
13 | let r2;
14 | let r3;
15 | unsafe {
16 | asm!(
17 | "smc #0",
18 | inout("x0") x0 => r0,
19 | inout("x1") x1 => r1,
20 | inout("x2") x2 => r2,
21 | inout("x3") x3 => r3,
22 | options(nomem, nostack)
23 | );
24 | }
25 | (r0, r1, r2, r3)
26 | }
27 |
--------------------------------------------------------------------------------
/src/vcpu.rs:
--------------------------------------------------------------------------------
1 | use core::marker::PhantomData;
2 |
3 | use aarch64_cpu::registers::{CNTHCTL_EL2, HCR_EL2, SP_EL0, SPSR_EL1, VTCR_EL2};
4 | use tock_registers::interfaces::{ReadWriteable, Readable, Writeable};
5 |
6 | use axaddrspace::{GuestPhysAddr, HostPhysAddr};
7 | use axerrno::AxResult;
8 | use axvcpu::{AxVCpuExitReason, AxVCpuHal};
9 |
10 | use crate::TrapFrame;
11 | use crate::context_frame::GuestSystemRegisters;
12 | use crate::exception::{TrapKind, handle_exception_sync};
13 | use crate::exception_utils::exception_class_value;
14 |
15 | #[percpu::def_percpu]
16 | static HOST_SP_EL0: u64 = 0;
17 |
18 | /// Save host's `SP_EL0` to the current percpu region.
19 | unsafe fn save_host_sp_el0() {
20 | unsafe { HOST_SP_EL0.write_current_raw(SP_EL0.get()) }
21 | }
22 |
23 | /// Restore host's `SP_EL0` from the current percpu region.
24 | unsafe fn restore_host_sp_el0() {
25 | SP_EL0.set(unsafe { HOST_SP_EL0.read_current_raw() });
26 | }
27 |
28 | /// (v)CPU register state that must be saved or restored when entering/exiting a VM or switching
29 | /// between VMs.
30 | #[repr(C)]
31 | #[derive(Clone, Debug, Copy, Default)]
32 | pub struct VmCpuRegisters {
33 | /// guest trap context
34 | pub trap_context_regs: TrapFrame,
35 | /// virtual machine system regs setting
36 | pub vm_system_regs: GuestSystemRegisters,
37 | }
38 |
39 | /// A virtual CPU within a guest
40 | #[repr(C)]
41 | #[derive(Debug)]
42 | pub struct Aarch64VCpu {
43 | // DO NOT modify `guest_regs` and `host_stack_top` and their order unless you do know what you are doing!
44 | // DO NOT add anything before or between them unless you do know what you are doing!
45 | ctx: TrapFrame,
46 | host_stack_top: u64,
47 | guest_system_regs: GuestSystemRegisters,
48 | /// The MPIDR_EL1 value for the vCPU.
49 | mpidr: u64,
50 | _phantom: PhantomData,
51 | }
52 |
53 | /// Configuration for creating a new `Aarch64VCpu`
54 | #[derive(Clone, Debug, Default)]
55 | pub struct Aarch64VCpuCreateConfig {
56 | /// The MPIDR_EL1 value for the new vCPU,
57 | /// which is used to identify the CPU in a multiprocessor system.
58 | /// Note: mind CPU cluster.
59 | pub mpidr_el1: u64,
60 | /// The address of the device tree blob.
61 | pub dtb_addr: usize,
62 | }
63 |
64 | impl axvcpu::AxArchVCpu for Aarch64VCpu {
65 | type CreateConfig = Aarch64VCpuCreateConfig;
66 |
67 | type SetupConfig = ();
68 |
69 | fn new(config: Self::CreateConfig) -> AxResult {
70 | let mut ctx = TrapFrame::default();
71 | ctx.set_argument(config.dtb_addr);
72 |
73 | Ok(Self {
74 | ctx,
75 | host_stack_top: 0,
76 | guest_system_regs: GuestSystemRegisters::default(),
77 | mpidr: config.mpidr_el1,
78 | _phantom: PhantomData,
79 | })
80 | }
81 |
82 | fn setup(&mut self, _config: Self::SetupConfig) -> AxResult {
83 | self.init_hv();
84 | Ok(())
85 | }
86 |
87 | fn set_entry(&mut self, entry: GuestPhysAddr) -> AxResult {
88 | debug!("set vcpu entry:{:?}", entry);
89 | self.set_elr(entry.as_usize());
90 | Ok(())
91 | }
92 |
93 | fn set_ept_root(&mut self, ept_root: HostPhysAddr) -> AxResult {
94 | debug!("set vcpu ept root:{:#x}", ept_root);
95 | self.guest_system_regs.vttbr_el2 = ept_root.as_usize() as u64;
96 | Ok(())
97 | }
98 |
99 | fn run(&mut self) -> AxResult {
100 | // Run guest.
101 | let exit_reson = unsafe {
102 | // Save host SP_EL0 to the ctx becase it's used as current task ptr.
103 | // This has to be done before vm system regs are restored.
104 | save_host_sp_el0();
105 | self.restore_vm_system_regs();
106 | self.run_guest()
107 | };
108 |
109 | let trap_kind = TrapKind::try_from(exit_reson as u8).expect("Invalid TrapKind");
110 | self.vmexit_handler(trap_kind)
111 | }
112 |
113 | fn bind(&mut self) -> AxResult {
114 | Ok(())
115 | }
116 |
117 | fn unbind(&mut self) -> AxResult {
118 | Ok(())
119 | }
120 |
121 | fn set_gpr(&mut self, idx: usize, val: usize) {
122 | self.ctx.set_gpr(idx, val);
123 | }
124 | }
125 |
126 | // Private function
127 | impl Aarch64VCpu {
128 | fn init_hv(&mut self) {
129 | self.ctx.spsr = (SPSR_EL1::M::EL1h
130 | + SPSR_EL1::I::Masked
131 | + SPSR_EL1::F::Masked
132 | + SPSR_EL1::A::Masked
133 | + SPSR_EL1::D::Masked)
134 | .value;
135 | self.init_vm_context();
136 | }
137 |
138 | /// Init guest context. Also set some el2 register value.
139 | fn init_vm_context(&mut self) {
140 | CNTHCTL_EL2.modify(CNTHCTL_EL2::EL1PCEN::SET + CNTHCTL_EL2::EL1PCTEN::SET);
141 | self.guest_system_regs.cntvoff_el2 = 0;
142 | self.guest_system_regs.cntkctl_el1 = 0;
143 |
144 | self.guest_system_regs.sctlr_el1 = 0x30C50830;
145 | self.guest_system_regs.pmcr_el0 = 0;
146 | self.guest_system_regs.vtcr_el2 = (VTCR_EL2::PS::PA_40B_1TB
147 | + VTCR_EL2::TG0::Granule4KB
148 | + VTCR_EL2::SH0::Inner
149 | + VTCR_EL2::ORGN0::NormalWBRAWA
150 | + VTCR_EL2::IRGN0::NormalWBRAWA
151 | + VTCR_EL2::SL0.val(0b01)
152 | + VTCR_EL2::T0SZ.val(64 - 39))
153 | .into();
154 | self.guest_system_regs.hcr_el2 =
155 | (HCR_EL2::VM::Enable + HCR_EL2::RW::EL1IsAarch64 + HCR_EL2::TSC::EnableTrapEl1SmcToEl2)
156 | .into();
157 | // self.system_regs.hcr_el2 |= 1<<27;
158 | // + HCR_EL2::IMO::EnableVirtualIRQ).into();
159 |
160 | // Set VMPIDR_EL2, which provides the value of the Virtualization Multiprocessor ID.
161 | // This is the value returned by Non-secure EL1 reads of MPIDR.
162 | let mut vmpidr = 1 << 31;
163 | // Note: mind CPU cluster here.
164 | vmpidr |= self.mpidr;
165 | self.guest_system_regs.vmpidr_el2 = vmpidr;
166 | }
167 |
168 | /// Set exception return pc
169 | fn set_elr(&mut self, elr: usize) {
170 | self.ctx.set_exception_pc(elr);
171 | }
172 |
173 | /// Get general purpose register
174 | #[allow(unused)]
175 | fn get_gpr(&self, idx: usize) {
176 | self.ctx.gpr(idx);
177 | }
178 | }
179 |
180 | /// Private functions related to vcpu runtime control flow.
181 | impl Aarch64VCpu {
182 | /// Save host context and run guest.
183 | ///
184 | /// When a VM-Exit happens when guest's vCpu is running,
185 | /// the control flow will be redirected to this function through `return_run_guest`.
186 | #[naked]
187 | unsafe extern "C" fn run_guest(&mut self) -> usize {
188 | // Fixes: https://github.com/arceos-hypervisor/arm_vcpu/issues/22
189 | //
190 | // The original issue seems to be caused by an unexpected compiler optimization that takes
191 | // the dummy return value `0` of `run_guest` as the actual return value. By replacing the
192 | // original `run_guest` with the current naked one, we eliminate the dummy code path of the
193 | // original version, and ensure that the compiler does not perform any unexpected return
194 | // value optimization.
195 | unsafe {
196 | core::arch::naked_asm!(
197 | // Save host context.
198 | save_regs_to_stack!(),
199 | // Save current host stack top to `self.host_stack_top`.
200 | //
201 | // 'extern "C"' here specifies the aapcs64 calling convention, according to which
202 | // the first and only parameter, the pointer of self, should be in x0:
203 | "mov x9, sp",
204 | "add x0, x0, {host_stack_top_offset}",
205 | "str x9, [x0]",
206 | // Go to `context_vm_entry`.
207 | "b context_vm_entry",
208 | // Panic if the control flow comes back here, which should never happen.
209 | "b {run_guest_panic}",
210 | host_stack_top_offset = const core::mem::size_of::(),
211 | run_guest_panic = sym Self::run_guest_panic,
212 | );
213 | }
214 | }
215 |
216 | /// This function is called when the control flow comes back to `run_guest`. To provide a error
217 | /// message for debugging purposes.
218 | ///
219 | /// This function may fail as the stack may have been corrupted when this function is called.
220 | /// But we won't handle it here for now.
221 | unsafe fn run_guest_panic() -> ! {
222 | panic!("run_guest_panic");
223 | }
224 |
225 | /// Restores guest system control registers.
226 | unsafe fn restore_vm_system_regs(&mut self) {
227 | unsafe {
228 | // load system regs
229 | core::arch::asm!(
230 | "
231 | mov x3, xzr // Trap nothing from EL1 to El2.
232 | msr cptr_el2, x3"
233 | );
234 | self.guest_system_regs.restore();
235 | core::arch::asm!(
236 | "
237 | ic iallu
238 | tlbi alle2
239 | tlbi alle1 // Flush tlb
240 | dsb nsh
241 | isb"
242 | );
243 | }
244 | }
245 |
246 | /// Handle VM-Exits.
247 | ///
248 | /// Parameters:
249 | /// - `exit_reason`: The reason why the VM-Exit happened in [`TrapKind`].
250 | ///
251 | /// Returns:
252 | /// - [`AxVCpuExitReason`]: a wrappered VM-Exit reason needed to be handled by the hypervisor.
253 | ///
254 | /// This function may panic for unhandled exceptions.
255 | fn vmexit_handler(&mut self, exit_reason: TrapKind) -> AxResult {
256 | trace!(
257 | "Aarch64VCpu vmexit_handler() esr:{:#x} ctx:{:#x?}",
258 | exception_class_value(),
259 | self.ctx
260 | );
261 |
262 | unsafe {
263 | // Store guest system regs
264 | self.guest_system_regs.store();
265 |
266 | // Store guest `SP_EL0` into the `Aarch64VCpu` struct,
267 | // which will be restored when the guest is resumed in `exception_return_el2`.
268 | self.ctx.sp_el0 = self.guest_system_regs.sp_el0;
269 |
270 | // Restore host `SP_EL0`.
271 | // This has to be done after guest's SP_EL0 is stored by `ext_regs_store`.
272 | restore_host_sp_el0();
273 | }
274 |
275 | match exit_reason {
276 | TrapKind::Synchronous => handle_exception_sync(&mut self.ctx),
277 | TrapKind::Irq => Ok(AxVCpuExitReason::ExternalInterrupt {
278 | vector: H::irq_fetch() as _,
279 | }),
280 | _ => panic!("Unhandled exception {:?}", exit_reason),
281 | }
282 | }
283 | }
284 |
--------------------------------------------------------------------------------