├── .cargo └── config.toml ├── .github └── workflows │ ├── clippy.yml │ ├── doc.yml │ ├── format.yml │ └── test.yml ├── .gitignore ├── Cargo.toml ├── README.md ├── aarch64_qemuvirt ├── .cargo │ └── config.toml ├── Cargo.toml ├── build.rs └── src │ ├── aarch64_qemuvirt.ld │ └── main.rs ├── flake.lock ├── flake.nix ├── hal_aarch64 ├── Cargo.toml └── src │ ├── cpu.rs │ ├── devices │ ├── gicv2.rs │ └── mod.rs │ ├── exceptions.S │ ├── irq.rs │ ├── lib.rs │ └── mm │ ├── mod.rs │ └── pgt48.rs ├── hal_core ├── Cargo.toml └── src │ ├── lib.rs │ └── mm.rs ├── hal_riscv64 ├── Cargo.toml └── src │ ├── cpu.rs │ ├── irq.rs │ ├── lib.rs │ ├── mm │ ├── mod.rs │ └── sv39.rs │ ├── plic.rs │ └── registers.rs ├── kernel ├── Cargo.lock ├── Cargo.toml ├── fixtures │ ├── Makefile │ └── small.S └── src │ ├── device_tree.rs │ ├── driver_manager.rs │ ├── drivers │ ├── mod.rs │ ├── ns16550.rs │ ├── null_uart.rs │ ├── pl011.rs │ └── qemuexit.rs │ ├── error.rs │ ├── executable │ ├── elf.rs │ └── mod.rs │ ├── generic_main.rs │ ├── globals.rs │ ├── kernel_console.rs │ ├── kernel_tests.rs │ ├── lib.rs │ ├── mm │ ├── binary_buddy_allocator.rs │ ├── mod.rs │ └── physical_memory_manager.rs │ ├── panic.rs │ ├── tests.rs │ └── utils │ ├── lock.rs │ └── mod.rs ├── riscv64_qemuvirt ├── .cargo │ └── config.toml ├── Cargo.toml ├── build.rs └── src │ ├── main.rs │ └── riscv64_qemuvirt.ld └── tests ├── .cargo └── config ├── Cargo.toml └── src └── main.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [unstable] 2 | bindeps = true 3 | -------------------------------------------------------------------------------- /.github/workflows/clippy.yml: -------------------------------------------------------------------------------- 1 | name: Clippy 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | main 8 | 9 | jobs: 10 | clippy_riscv64: 11 | runs-on: ubuntu-latest 12 | env: 13 | TARGET: riscv64gc-unknown-none-elf 14 | steps: 15 | - uses: actions/checkout@v2 16 | 17 | - uses: cachix/install-nix-action@v27 18 | 19 | - name: Run clippy 20 | run: nix develop --command sh -c " 21 | cd riscv64_qemuvirt && 22 | cargo clippy --all-features --target ${{ env.TARGET }} 23 | " 24 | 25 | clippy_aarch64: 26 | runs-on: ubuntu-latest 27 | env: 28 | TARGET: aarch64-unknown-none 29 | steps: 30 | - uses: actions/checkout@v2 31 | 32 | - uses: cachix/install-nix-action@v27 33 | 34 | - name: Run clippy 35 | run: nix develop --command sh -c " 36 | cd aarch64_qemuvirt && 37 | cargo clippy --all-features --target ${{ env.TARGET }} 38 | " 39 | -------------------------------------------------------------------------------- /.github/workflows/doc.yml: -------------------------------------------------------------------------------- 1 | name: Doc 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | main 8 | 9 | jobs: 10 | doc_riscv64: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | target: 15 | - aarch64-unknown-none 16 | - riscv64gc-unknown-none-elf 17 | steps: 18 | - uses: actions/checkout@v2 19 | 20 | - uses: cachix/install-nix-action@v27 21 | 22 | - name: Build doc 23 | run: nix develop --command sh -c " 24 | cd kernel && 25 | cargo doc --target ${{ matrix.target }} --all-features 26 | " 27 | -------------------------------------------------------------------------------- /.github/workflows/format.yml: -------------------------------------------------------------------------------- 1 | name: Format 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | main 8 | 9 | jobs: 10 | format: 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - uses: cachix/install-nix-action@v27 17 | 18 | - name: Format 19 | run: nix develop --command sh -c " 20 | cargo fmt -- --check 21 | " 22 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | main 8 | 9 | jobs: 10 | test_riscv64: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - uses: cachix/install-nix-action@v27 16 | 17 | - name: Run tests 18 | run: nix develop --command sh -c " 19 | cd riscv64_qemuvirt && 20 | cargo run -F launch_tests 21 | " 22 | 23 | test_aarch64: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v2 27 | 28 | - uses: cachix/install-nix-action@v27 29 | 30 | - name: Enter nix 31 | run: nix develop 32 | 33 | - name: Run tests 34 | run: nix develop --command sh -c " 35 | cd aarch64_qemuvirt && 36 | cargo run -F launch_tests 37 | " 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | 16 | # Generated memory.x for ARM32 platforms 17 | memory.x 18 | 19 | # Ignore tags 20 | tags 21 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | 4 | members = [ 5 | "kernel", 6 | "hal_core", 7 | "hal_aarch64", 8 | "hal_riscv64", 9 | "aarch64_qemuvirt", 10 | "riscv64_qemuvirt", 11 | "tests", 12 | ] 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # goose 2 | 3 | A micro Kernel written in Rust. The goal is to have a small number of syscall 4 | with a strong emphasis on IPC 5 | 6 | ## Roadmap 7 | 8 | - [ ] Virtual Memory Manager (in progress) 9 | - [ ] Basic in-kernel filesystem 10 | - [ ] Device-tree handling 11 | - [x] In-kernel ELF loader 12 | - [ ] Userland process (in progress) 13 | - [ ] IPC implementation 14 | - [ ] Drivers 15 | - [ ] Driver API 16 | - [ ] Kernel API 17 | 18 | ## Project structure 19 | 20 | The project is divided in 2 components, the kernel and the drivers. Most of the 21 | drivers runs in userland, but some are required to run in kernel just to 22 | provide the basic kernel functionalities (UART, interrupt hardware, ...) 23 | :warning: **At the moment userland drivers are not implemented** 24 | 25 | ## Try it out 26 | Choose your desired board: 27 | - Qemu RISC-V (virt) --> `riscv_qemuvirt` 28 | - Qemu AArch64 (virt) --> `aarch64_qemuvirt` 29 | 30 | Then just run: 31 | ```console 32 | $ cargo run 33 | ``` 34 | 35 | For qemu target it will launch Qemu. For other targets the hope is to flash them instead 36 | 37 | ### Requirement 38 | - A rust nightly toolchain 39 | - Clang compiler (for tests) 40 | 41 | #### Nix 42 | If you use Nix you can run `nix develop` to get a shell with everything needed 43 | to test GoOSe 44 | 45 | ### Build 46 | #### Board project 47 | Go to a board project (ex. riscv_qemuvirt) and then: 48 | ```console 49 | $ cargo build 50 | ``` 51 | 52 | #### Kernel, hal, ... 53 | When building, you need to specify which target triplet to use with 54 | `--target `. Here is the list of triplet to use depending on the 55 | targeted architecture: 56 | - RISC-V --> `riscv64gc-unknown-none-elf` 57 | - AArch64 --> `aarch64-unknown-none` 58 | 59 | Go to the component folder and then: 60 | ```console 61 | $ cargo build --target 62 | ``` 63 | 64 | ### Tests 65 | GoOSe also comes with unit tests that run directly on hardware and output the 66 | result over serial. When using Qemu, you can also have an exit code != 0 on 67 | failure. 68 | 69 | ```console 70 | $ make -C kernel/fixtures 71 | ... 72 | $ cargo tests --bin --target 73 | ... 74 | ``` 75 | :warning: **Tests might be slow to run as GoOSe is not really optimized. You can 76 | append `--release` to the previous cargo command line to boost performance but 77 | please be aware that some test might pass in debug and not in release. Feel 78 | free to open an issue if you encounter such a case** 79 | -------------------------------------------------------------------------------- /aarch64_qemuvirt/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "aarch64-unknown-none" 3 | 4 | [target.aarch64-unknown-none] 5 | runner = 'qemu-system-aarch64 -M virt,gic-version=2 -cpu cortex-a53 -m 256M -nographic -semihosting -kernel ' 6 | -------------------------------------------------------------------------------- /aarch64_qemuvirt/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aarch64_qemuvirt" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | kernel = { path = "../kernel", features = ["aarch64_pgt48oa"] } 10 | log = "0.4" 11 | 12 | [features] 13 | launch_tests = [] 14 | -------------------------------------------------------------------------------- /aarch64_qemuvirt/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | println!("cargo:rerun-if-changed=src/aarch64_qemuvirt.ld"); 3 | println!("cargo:rustc-link-arg=-Taarch64_qemuvirt/src/aarch64_qemuvirt.ld"); 4 | } 5 | -------------------------------------------------------------------------------- /aarch64_qemuvirt/src/aarch64_qemuvirt.ld: -------------------------------------------------------------------------------- 1 | ENTRY(_start) 2 | 3 | SECTIONS 4 | { 5 | . = 0x40100000; 6 | 7 | KERNEL_START = . ; 8 | .text : { 9 | *(.text._start); # _start should allways be at the top of all sections 10 | *(.text*); 11 | } 12 | 13 | .data : { 14 | *(.data*); 15 | } 16 | 17 | .sdata : { 18 | *(.sdata*); 19 | } 20 | 21 | .rodata : { 22 | *(.rodata*); 23 | } 24 | 25 | .eh_frame : { 26 | *(.eh_frame*); 27 | } 28 | 29 | .bss : { 30 | *(.bss*); 31 | } 32 | 33 | .sbss : { 34 | *(.sbss*); 35 | } 36 | 37 | . = ALIGN(4096); 38 | STACK_END = . ; 39 | . = . + 1M; 40 | STACK_START = . ; 41 | 42 | KERNEL_END = . ; 43 | } 44 | -------------------------------------------------------------------------------- /aarch64_qemuvirt/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | #![feature(naked_functions)] 4 | 5 | #[cfg(not(target_arch = "aarch64"))] 6 | compile_error!("Must be compiled as aarch64"); 7 | 8 | use core::arch::asm; 9 | use kernel::drivers::pl011::Pl011; 10 | 11 | const DTB_ADDR: usize = 0x4000_0000; 12 | 13 | const LAUNCH_TESTS: bool = cfg!(feature = "launch_tests"); 14 | 15 | use log::info; 16 | 17 | #[no_mangle] 18 | extern "C" fn k_main(_device_tree_ptr: usize) -> ! { 19 | kernel::hal::cpu::disable_fp_trapping(); 20 | 21 | static PL011: Pl011 = Pl011::new(0x0900_0000); 22 | kernel::kernel_console::set_earlyinit_console(&PL011); 23 | 24 | kernel::kernel_console::init_logging().unwrap(); 25 | 26 | info!("hello, I am a goOSe! proud member of the gagelen !!!"); 27 | 28 | unsafe { 29 | kernel::hal::irq::init_el1_exception_handlers(); 30 | } 31 | 32 | unsafe { 33 | asm!("isb SY"); 34 | asm!("dmb SY"); 35 | } 36 | 37 | let device_tree = kernel::device_tree::DeviceTree::new(DTB_ADDR).unwrap(); 38 | 39 | kernel::generic_main::generic_main::(device_tree, &[&PL011]); 40 | } 41 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1726560853, 9 | "narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1728346266, 24 | "narHash": "sha256-yaic9tiycsdKf8W855+4YR1WON4RdpJmqvj+Bzpq/FY=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "b5c4e15e539e18249d50a41ef14c97f4047bfcb7", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "nixos-24.05-small", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "nixpkgs_2": { 38 | "locked": { 39 | "lastModified": 1718428119, 40 | "narHash": "sha256-WdWDpNaq6u1IPtxtYHHWpl5BmabtpmLnMAx0RdJ/vo8=", 41 | "owner": "NixOS", 42 | "repo": "nixpkgs", 43 | "rev": "e6cea36f83499eb4e9cd184c8a8e823296b50ad5", 44 | "type": "github" 45 | }, 46 | "original": { 47 | "owner": "NixOS", 48 | "ref": "nixpkgs-unstable", 49 | "repo": "nixpkgs", 50 | "type": "github" 51 | } 52 | }, 53 | "root": { 54 | "inputs": { 55 | "flake-utils": "flake-utils", 56 | "nixpkgs": "nixpkgs", 57 | "rust-overlay": "rust-overlay" 58 | } 59 | }, 60 | "rust-overlay": { 61 | "inputs": { 62 | "nixpkgs": "nixpkgs_2" 63 | }, 64 | "locked": { 65 | "lastModified": 1728354625, 66 | "narHash": "sha256-r+Sa1NRRT7LXKzCaVaq75l1GdZcegODtF06uaxVVVbI=", 67 | "owner": "oxalica", 68 | "repo": "rust-overlay", 69 | "rev": "d216ade5a0091ce60076bf1f8bc816433a1fc5da", 70 | "type": "github" 71 | }, 72 | "original": { 73 | "owner": "oxalica", 74 | "repo": "rust-overlay", 75 | "type": "github" 76 | } 77 | }, 78 | "systems": { 79 | "locked": { 80 | "lastModified": 1681028828, 81 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 82 | "owner": "nix-systems", 83 | "repo": "default", 84 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 85 | "type": "github" 86 | }, 87 | "original": { 88 | "owner": "nix-systems", 89 | "repo": "default", 90 | "type": "github" 91 | } 92 | } 93 | }, 94 | "root": "root", 95 | "version": 7 96 | } 97 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "GoOSe devshell"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05-small"; 6 | rust-overlay.url = "github:oxalica/rust-overlay"; 7 | flake-utils.url = "github:numtide/flake-utils"; 8 | }; 9 | 10 | outputs = { self, nixpkgs, rust-overlay, flake-utils, ... }: 11 | flake-utils.lib.eachDefaultSystem (system: 12 | let 13 | overlays = [ (import rust-overlay) ]; 14 | pkgs = import nixpkgs { 15 | inherit system overlays; 16 | }; 17 | in 18 | with pkgs; 19 | { 20 | devShell = mkShell { 21 | buildInputs = [ 22 | (rust-bin.selectLatestNightlyWith (toolchain: toolchain.default.override { 23 | targets = ["aarch64-unknown-none" "riscv64gc-unknown-none-elf"]; 24 | })) 25 | qemu 26 | rust-analyzer 27 | ]; 28 | }; 29 | } 30 | ); 31 | } 32 | 33 | -------------------------------------------------------------------------------- /hal_aarch64/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hal_aarch64" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | hal_core = { path = "../hal_core" } 10 | tock-registers = "0.8" 11 | cortex-a = "8.1" 12 | log = "0.4" 13 | -------------------------------------------------------------------------------- /hal_aarch64/src/cpu.rs: -------------------------------------------------------------------------------- 1 | use cortex_a::{asm, registers::*}; 2 | use tock_registers::interfaces::{ReadWriteable, Writeable}; 3 | 4 | pub fn disable_fp_trapping() { 5 | // Disable trapping of FP instructions. 6 | // CPACR_EL1.write(CPACR_EL1::FPEN::TrapNothing); 7 | CPACR_EL1.set(0b11 << 20); 8 | } 9 | 10 | pub fn set_physical_timer(delay: usize) { 11 | CNTP_TVAL_EL0.set(delay as u64); 12 | 13 | asm::barrier::isb(asm::barrier::SY); 14 | 15 | CNTP_CTL_EL0.write( 16 | CNTP_CTL_EL0::ENABLE::SET + CNTP_CTL_EL0::IMASK::CLEAR + CNTP_CTL_EL0::ISTATUS::CLEAR, 17 | ); 18 | } 19 | 20 | pub fn clear_physical_timer() { 21 | CNTP_CTL_EL0.modify(CNTP_CTL_EL0::ENABLE::CLEAR); 22 | } 23 | 24 | pub fn unmask_interrupts() { 25 | DAIF.write(DAIF::A::Unmasked + DAIF::I::Unmasked + DAIF::F::Unmasked); 26 | } 27 | -------------------------------------------------------------------------------- /hal_aarch64/src/devices/gicv2.rs: -------------------------------------------------------------------------------- 1 | use tock_registers::interfaces::{ReadWriteable, Readable, Writeable}; 2 | use tock_registers::register_bitfields; 3 | use tock_registers::registers::{ReadOnly, ReadWrite}; 4 | 5 | use hal_core::Error; 6 | 7 | pub struct GicV2 { 8 | pub distributor: &'static GicDistributor, 9 | pub cpu: &'static GicCpu, 10 | } 11 | 12 | impl GicV2 { 13 | pub fn new(distributor_base: usize, cpu_base: usize) -> Self { 14 | let distributor = unsafe { 15 | (distributor_base as *const GicDistributor) 16 | .as_ref() 17 | .unwrap() 18 | }; 19 | let cpu = unsafe { (cpu_base as *const GicCpu).as_ref().unwrap() }; 20 | let mut gic = Self { distributor, cpu }; 21 | 22 | gic.init_distributor(); 23 | 24 | gic 25 | } 26 | 27 | pub fn disable_interrupts(&mut self) { 28 | self.distributor 29 | .CTLR 30 | .modify(GICD_CTLR::EnableGrp0::Disable + GICD_CTLR::EnableGrp1::Disable); 31 | } 32 | 33 | pub fn enable_interrupts(&mut self) { 34 | self.distributor 35 | .CTLR 36 | .modify(GICD_CTLR::EnableGrp0::Enable + GICD_CTLR::EnableGrp1::Enable); 37 | } 38 | 39 | pub fn get_int(&mut self) -> Result { 40 | let intno = self.cpu.IAR.get(); 41 | 42 | Ok(intno) 43 | } 44 | 45 | pub fn clear_int(&mut self, int: u32) { 46 | // TODO: check (maybe in the TRM) if this could fail / give an error. 47 | self.cpu.EOIR.modify(GICC_EOIR::EOIINTID.val(int)); 48 | } 49 | 50 | pub fn nlines(&self) -> usize { 51 | let n = self.distributor.TYPER.read(GICD_TYPER::ITLinesNumber) as usize; 52 | 53 | 32 * (n + 1) 54 | } 55 | 56 | /// Put the Gic in a known state. 57 | fn init_distributor(&mut self) { 58 | self.disable_interrupts(); 59 | 60 | for i in 0..(self.nlines() / 32) { 61 | // Disable all interrupts. 62 | // Each bit corresponds to a line, writing 1 disables forwarding of the corresponding 63 | // interrupt line. 64 | self.distributor.ICENABLER[i].set(0xffff_ffff); 65 | 66 | // Clear pending interrupts. 67 | self.distributor.ICPENDR[i].set(0xffff_ffff); 68 | 69 | // Clear active interrupts. 70 | self.distributor.ICACTIVER[i].set(0xffff_ffff); 71 | } 72 | 73 | for i in 0..(self.nlines() / 4) { 74 | // Targets all interrupts to core 0. 75 | self.distributor.ITARGETSR[i].set(0x0101_0101); 76 | } 77 | 78 | for i in 1..(self.nlines() / 16) { 79 | // Set all interrupts to level-triggered. 80 | self.distributor.ICFGR[i].set(0); 81 | } 82 | 83 | // TODO: this should be moved somewhere else so other cores can run it. 84 | self.init_cpu(); 85 | 86 | self.enable_interrupts(); 87 | } 88 | 89 | fn init_cpu(&self) { 90 | // Accept ALL interrupts. 91 | self.cpu.PMR.set(0xff); 92 | 93 | // Set maximum amount of bits to be used for Group priority field. 94 | self.cpu.BPR.set(0x0); 95 | 96 | self.cpu.CTLR.write( 97 | GICC_CTLR::EnableGrp0::Enable + GICC_CTLR::EnableGrp1::Enable + GICC_CTLR::FIQEn.val(0), 98 | ); 99 | } 100 | 101 | pub fn enable_line(&mut self, line: u32) -> Result<(), Error> { 102 | let line = line as usize; 103 | let enable_reg_index = line >> 5; 104 | let enable_bit: u32 = 1u32 << (line % 32); 105 | 106 | self.distributor.ISENABLER[enable_reg_index] 107 | .set(self.distributor.ISENABLER[enable_reg_index].get() | enable_bit); 108 | self.distributor.IPRIORITYR[line].set(0x80); 109 | 110 | Ok(()) 111 | } 112 | } 113 | 114 | #[repr(C)] 115 | #[allow(non_snake_case)] 116 | pub struct GicDistributor { 117 | /// Distributor Control Register 118 | pub CTLR: ReadWrite, 119 | /// Interrupt Controller Type Register 120 | pub TYPER: ReadOnly, 121 | /// Distributor Implementer Identification Register 122 | pub IIDR: ReadOnly, 123 | _reserved1: [u32; 5], 124 | _impdef1: [u32; 8], 125 | _reserved2: [u32; 16], 126 | /// Interrupt Group Registers 127 | pub IGROUPR: [ReadWrite; 32], 128 | // _reserved3: [u32; 31], 129 | /// Interrupt Set-Enable Registers 130 | pub ISENABLER: [ReadWrite; 32], 131 | /// Interrupt Clear-Enable Registers 132 | pub ICENABLER: [ReadWrite; 32], 133 | /// Interrupt Set-Pending Registers 134 | pub ISPENDR: [ReadWrite; 32], 135 | /// Interrupt Clear-Pending Registers 136 | pub ICPENDR: [ReadWrite; 32], 137 | /// Interrupt Set-Active Registers 138 | pub ISACTIVER: [ReadWrite; 32], 139 | /// Interrupt Clear-Active Registers 140 | pub ICACTIVER: [ReadWrite; 32], 141 | /// Interrupt Priority Registers 142 | pub IPRIORITYR: [ReadWrite; 1024], 143 | /// Interrupt Processor Targets Registers 144 | pub ITARGETSR: [ReadWrite; 255], 145 | _reserved5: u32, 146 | /// Interrupt Configuration Registers 147 | pub ICFGR: [ReadWrite; 64], 148 | _implementation_defined2: [u32; 64], 149 | /// Non-secure Access Control Registers, optional 150 | pub NSACR: [ReadWrite; 64], 151 | /// Software Generated Interrupt Register 152 | pub SGIR: ReadWrite, 153 | _reserved6: [u32; 3], 154 | /// SGI Clear-Pending Registers 155 | pub CPENDSGIR: [ReadWrite; 16], 156 | /// SGI Set-Pending Registers 157 | pub SPENDSGIR: [ReadWrite; 16], 158 | _reserved7: [u32; 40], 159 | _impdef3: [u32; 12], 160 | } 161 | 162 | register_bitfields! {u32, 163 | pub GICD_CTLR [ 164 | EnableGrp0 OFFSET(0) NUMBITS(1) [ 165 | Disable = 0, 166 | Enable = 1, 167 | ], 168 | EnableGrp1 OFFSET(1) NUMBITS(1) [ 169 | Disable = 0, 170 | Enable = 1, 171 | ] 172 | ], 173 | 174 | pub GICD_TYPER [ 175 | ITLinesNumber OFFSET(0) NUMBITS(5) [], 176 | CPUNumber OFFSET(5) NUMBITS(3) [], 177 | SecurityExtn OFFSET(10) NUMBITS(1) [ 178 | NotImplemented = 0, 179 | Implemented = 1, 180 | ], 181 | LSPI OFFSET(11) NUMBITS(5) [], 182 | ], 183 | 184 | pub GICD_IIDR [ 185 | Implementer OFFSET(0) NUMBITS(12), 186 | Revision OFFSET(12) NUMBITS(4), 187 | Variant OFFSET(16) NUMBITS(4), 188 | ProductID OFFSET(24) NUMBITS(8), 189 | ], 190 | 191 | pub GICD_SGIR [ 192 | SGIINTID OFFSET(0) NUMBITS(4) [], 193 | NSATT OFFSET(15) NUMBITS(1) [], 194 | CPUTargetList OFFSET(16) NUMBITS(8) [], 195 | TargetListFilter OFFSET(24) NUMBITS(2) [], 196 | ], 197 | 198 | pub GICD_ICPIDR2 [ 199 | ArchRev OFFSET(4) NUMBITS(4), 200 | ], 201 | } 202 | 203 | #[repr(C)] 204 | #[allow(non_snake_case)] 205 | pub struct GicCpu { 206 | /// CPU Interface Control Register 207 | pub CTLR: ReadWrite, 208 | /// Interrupt Priority Mask Register 209 | pub PMR: ReadWrite, 210 | /// Binary Point Register 211 | pub BPR: ReadWrite, 212 | /// Interrupt Acknowledge Register 213 | pub IAR: ReadWrite, 214 | /// End of Interrupt Register 215 | pub EOIR: ReadWrite, 216 | /// Running Priority Register 217 | pub RPR: ReadWrite, 218 | /// Highest Priority Pending Interrupt Register 219 | pub HPPIR: ReadWrite, 220 | /// Aliased Binary Point Register 221 | pub ABPR: ReadWrite, 222 | /// Aliased Interrupt Acknowledge Register 223 | pub AIAR: ReadWrite, 224 | /// Aliased End of Interrupt Register 225 | pub AEOIR: ReadWrite, 226 | /// Aliased Highest Priority Pending Interrupt Register 227 | pub AHPPIR: ReadWrite, 228 | } 229 | 230 | register_bitfields! {u32, 231 | pub GICC_CTLR [ 232 | EnableGrp0 OFFSET(0) NUMBITS(1) [ 233 | Disable = 0, 234 | Enable = 1, 235 | ], 236 | EnableGrp1 OFFSET(1) NUMBITS(1) [ 237 | Disable = 0, 238 | Enable = 1, 239 | ], 240 | FIQEn OFFSET(3) NUMBITS(1) [], 241 | FIQBypDisGrp1 OFFSET(5) NUMBITS(1) [], 242 | IRQBypDisGrp1 OFFSET(6) NUMBITS(1) [], 243 | EOImodeNS OFFSET(9) NUMBITS(1) [ 244 | BothPriorityDropAndDeactiveInterrupt = 0, 245 | PriorityDropOnly = 1, 246 | ], 247 | ], 248 | 249 | pub GICC_PMR [ 250 | Priority OFFSET(0) NUMBITS(8) [], 251 | ], 252 | 253 | pub GICC_IAR [ 254 | InterruptID OFFSET(0) NUMBITS(10) [], 255 | ], 256 | 257 | pub GICC_EOIR [ 258 | EOIINTID OFFSET(0) NUMBITS(10) [] 259 | ], 260 | 261 | pub GICC_AHPPIR [ 262 | PENDINTID OFFSET(0) NUMBITS(10) [], 263 | CPUID OFFSET(10) NUMBITS(3) [], 264 | ], 265 | } 266 | -------------------------------------------------------------------------------- /hal_aarch64/src/devices/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod gicv2; 2 | -------------------------------------------------------------------------------- /hal_aarch64/src/exceptions.S: -------------------------------------------------------------------------------- 1 | .section .text 2 | 3 | .macro save_regs 4 | stp x0, x1, [sp, #-16]! 5 | stp x2, x3, [sp, #-16]! 6 | stp x4, x5, [sp, #-16]! 7 | stp x6, x7, [sp, #-16]! 8 | stp x8, x9, [sp, #-16]! 9 | stp x10, x11, [sp, #-16]! 10 | stp x12, x13, [sp, #-16]! 11 | stp x14, x15, [sp, #-16]! 12 | stp x16, x17, [sp, #-16]! 13 | stp x18, x29, [sp, #-16]! 14 | stp x30, xzr, [sp, #-16]! 15 | .endm 16 | 17 | .macro restore_regs 18 | ldp x30, xzr, [sp], #16 19 | ldp x18, x29, [sp], #16 20 | ldp x16, x17, [sp], #16 21 | ldp x14, x15, [sp], #16 22 | ldp x12, x13, [sp], #16 23 | ldp x10, x11, [sp], #16 24 | ldp x8, x9, [sp], #16 25 | ldp x6, x7, [sp], #16 26 | ldp x4, x5, [sp], #16 27 | ldp x2, x3, [sp], #16 28 | ldp x0, x1, [sp], #16 29 | .endm 30 | 31 | .macro gen_stub func 32 | .balign 0x80 33 | asm_\func: 34 | msr spsel, xzr 35 | save_regs 36 | bl \func 37 | restore_regs 38 | eret 39 | .endm 40 | 41 | .balign 0x800 42 | el1_vector_table: 43 | 44 | // Current EL with SP0 45 | gen_stub sync_current_el_sp0 46 | gen_stub irq_current_el_sp0 47 | gen_stub fiq_current_el_sp0 48 | gen_stub serror_current_el_sp0 49 | 50 | // Current EL with SPx 51 | gen_stub sync_current_el_spx 52 | gen_stub irq_current_el_spx 53 | gen_stub fiq_current_el_spx 54 | gen_stub serror_current_el_spx 55 | 56 | // Lower EL 57 | gen_stub sync_lower_el 58 | gen_stub irq_lower_el 59 | gen_stub fiq_lower_el 60 | gen_stub serror_lower_el 61 | 62 | // Lower EL with aarch32 63 | gen_stub sync_lower_el_aarch32 64 | gen_stub irq_lower_el_aarch32 65 | gen_stub fiq_lower_el_aarch32 66 | gen_stub serror_lower_el_aarch32 67 | -------------------------------------------------------------------------------- /hal_aarch64/src/irq.rs: -------------------------------------------------------------------------------- 1 | use core::ptr; 2 | use core::sync::atomic::{AtomicPtr, Ordering}; 3 | 4 | use crate::cpu; 5 | use hal_core::{Error, TimerCallbackFn}; 6 | 7 | use crate::devices::gicv2::GicV2; 8 | 9 | use crate::mm; 10 | use hal_core::mm::{PageAlloc, PageMap, Permissions, VAddr}; 11 | 12 | use tock_registers::interfaces::Writeable; 13 | 14 | const PHYSICAL_TIMER_LINE: u32 = 30; 15 | 16 | pub unsafe fn init_el1_exception_handlers() { 17 | extern "Rust" { 18 | static el1_vector_table: core::cell::UnsafeCell<()>; 19 | } 20 | cortex_a::registers::VBAR_EL1.set(el1_vector_table.get() as u64); 21 | } 22 | 23 | static TIMER_CALLBACK: AtomicPtr = AtomicPtr::new(ptr::null_mut()); 24 | 25 | pub fn set_timer_handler(h: TimerCallbackFn) { 26 | TIMER_CALLBACK.store(h as *mut _, Ordering::Relaxed); 27 | } 28 | 29 | pub fn set_timer(ticks: usize) -> Result<(), Error> { 30 | enable_line(PHYSICAL_TIMER_LINE)?; 31 | super::cpu::set_physical_timer(ticks); 32 | super::cpu::unmask_interrupts(); 33 | 34 | Ok(()) 35 | } 36 | 37 | enum IrqChip { 38 | NoChip, 39 | GicV2(GicV2), 40 | } 41 | 42 | impl IrqChip { 43 | fn get_int(&mut self) -> Result { 44 | match self { 45 | Self::NoChip => unreachable!("does not support this"), 46 | Self::GicV2(gic) => gic.get_int(), 47 | } 48 | } 49 | 50 | fn clear_int(&mut self, int: u32) { 51 | match self { 52 | Self::NoChip => unreachable!("does not support this"), 53 | Self::GicV2(gic) => gic.clear_int(int), 54 | } 55 | } 56 | 57 | fn enable_int(&mut self, int: u32) -> Result<(), Error> { 58 | match self { 59 | Self::NoChip => unreachable!("does not support"), 60 | Self::GicV2(gic) => gic.enable_line(int), 61 | } 62 | } 63 | } 64 | 65 | static mut IRQ_CHIP: IrqChip = IrqChip::NoChip; 66 | 67 | pub fn init_irq_chip(_dt_node: (), allocator: &impl PageAlloc) -> Result<(), Error> { 68 | let (gicd_base, gicc_base) = (0x800_0000, 0x801_0000); 69 | mm::current().identity_map_range( 70 | VAddr::new(gicd_base), 71 | 0x0001_0000 / mm::PAGE_SIZE, 72 | Permissions::READ | Permissions::WRITE, 73 | allocator, 74 | )?; 75 | mm::current().identity_map_range( 76 | VAddr::new(gicc_base), 77 | 0x0001_0000 / mm::PAGE_SIZE, 78 | Permissions::READ | Permissions::WRITE, 79 | allocator, 80 | )?; 81 | 82 | unsafe { 83 | IRQ_CHIP = IrqChip::GicV2(GicV2::new(gicd_base, gicc_base)); 84 | } 85 | Ok(()) 86 | } 87 | 88 | fn enable_line(line: u32) -> Result<(), Error> { 89 | unsafe { IRQ_CHIP.enable_int(line) } 90 | } 91 | 92 | #[no_mangle] 93 | extern "C" fn sync_current_el_sp0() { 94 | panic!("hit sync_current_el_sp0"); 95 | } 96 | 97 | #[no_mangle] 98 | extern "C" fn irq_current_el_sp0() { 99 | let int = unsafe { IRQ_CHIP.get_int() }; 100 | 101 | match int { 102 | Ok(PHYSICAL_TIMER_LINE) => { 103 | // Clear the timer in order to EOI it. 104 | cpu::clear_physical_timer(); 105 | 106 | let timer_cb = TIMER_CALLBACK.load(Ordering::Relaxed); 107 | if !timer_cb.is_null() { 108 | unsafe { 109 | // Cannot simply dereference TIMER_CALLBACK here. 110 | // We are using an AtomicPtr and TIMER_CALLBACK already holds the fn(). 111 | core::mem::transmute::<_, fn()>(timer_cb)(); 112 | } 113 | } 114 | 115 | unsafe { IRQ_CHIP.clear_int(int.unwrap()) }; 116 | } 117 | _ => panic!("got an irq but fuck knows"), 118 | } 119 | } 120 | #[no_mangle] 121 | extern "C" fn fiq_current_el_sp0() { 122 | panic!("hit fiq_current_el_sp0"); 123 | } 124 | 125 | #[no_mangle] 126 | extern "C" fn serror_current_el_sp0() { 127 | panic!("hit serror_current_el_sp0"); 128 | } 129 | 130 | #[no_mangle] 131 | extern "C" fn sync_current_el_spx() { 132 | panic!("hit sync_current_el_spx"); 133 | } 134 | 135 | #[no_mangle] 136 | extern "C" fn irq_current_el_spx() { 137 | panic!("hit irq_current_el_spx"); 138 | } 139 | 140 | #[no_mangle] 141 | extern "C" fn fiq_current_el_spx() { 142 | panic!("hit fiq_current_el_spx"); 143 | } 144 | 145 | #[no_mangle] 146 | extern "C" fn serror_current_el_spx() { 147 | panic!("hit serror_current_el_spx"); 148 | } 149 | 150 | #[no_mangle] 151 | extern "C" fn sync_lower_el() { 152 | panic!("hit sync_lower_el"); 153 | } 154 | 155 | #[no_mangle] 156 | extern "C" fn irq_lower_el() { 157 | panic!("hit irq_lower_el"); 158 | } 159 | 160 | #[no_mangle] 161 | extern "C" fn fiq_lower_el() { 162 | panic!("hit fiq_lower_el"); 163 | } 164 | 165 | #[no_mangle] 166 | extern "C" fn serror_lower_el() { 167 | panic!("hit serror_lower_el"); 168 | } 169 | 170 | #[no_mangle] 171 | extern "C" fn sync_lower_el_aarch32() { 172 | panic!("hit sync_lower_el_aarch32"); 173 | } 174 | 175 | #[no_mangle] 176 | extern "C" fn irq_lower_el_aarch32() { 177 | panic!("hit irq_lower_el_aarch32"); 178 | } 179 | 180 | #[no_mangle] 181 | extern "C" fn fiq_lower_el_aarch32() { 182 | panic!("hit fiq_lower_el_aarch32"); 183 | } 184 | 185 | #[no_mangle] 186 | extern "C" fn serror_lower_el_aarch32() { 187 | panic!("hit serror_lower_el_aarch32"); 188 | } 189 | 190 | core::arch::global_asm!(include_str!("exceptions.S")); 191 | -------------------------------------------------------------------------------- /hal_aarch64/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(naked_functions)] 3 | 4 | use cortex_a::registers::*; 5 | use tock_registers::interfaces::Readable; 6 | 7 | use core::arch::asm; 8 | 9 | pub mod cpu; 10 | pub mod irq; 11 | pub mod mm; 12 | 13 | mod devices; 14 | 15 | #[allow(dead_code)] 16 | #[derive(Debug)] 17 | pub struct PanicInfo { 18 | esr_el1: u64, 19 | elr_el1: u64, 20 | far_el1: u64, 21 | } 22 | 23 | pub fn panic_info() -> PanicInfo { 24 | PanicInfo { 25 | esr_el1: ESR_EL1.get(), 26 | elr_el1: ELR_EL1.get(), 27 | far_el1: FAR_EL1.get(), 28 | } 29 | } 30 | 31 | #[naked] 32 | #[no_mangle] 33 | unsafe extern "C" fn _start() -> ! { 34 | asm!( 35 | " 36 | adrp x9, STACK_START 37 | msr spsel, xzr 38 | mov sp, x9 39 | b k_main 40 | ", 41 | options(noreturn) 42 | ); 43 | } 44 | -------------------------------------------------------------------------------- /hal_aarch64/src/mm/mod.rs: -------------------------------------------------------------------------------- 1 | use hal_core::{ 2 | mm::{self, PageAlloc, PageMap}, 3 | AddressRange, Error, 4 | }; 5 | 6 | use cortex_a::asm::barrier; 7 | use cortex_a::registers::*; 8 | use tock_registers::interfaces::{ReadWriteable, Writeable}; 9 | 10 | mod pgt48; 11 | 12 | use pgt48::PageTable; 13 | 14 | pub type EntryType = usize; 15 | 16 | pub const PAGE_SIZE: usize = PageTable::PAGE_SIZE; 17 | 18 | use core::cell::OnceCell; 19 | 20 | static mut GPT: OnceCell<&'static mut PageTable> = OnceCell::new(); 21 | 22 | pub fn is_pagetable_installed() -> bool { 23 | unsafe { GPT.get_mut().is_some() } 24 | } 25 | 26 | pub fn current() -> &'static mut PageTable { 27 | unsafe { GPT.get_mut().unwrap() } 28 | } 29 | 30 | pub fn prefill_pagetable( 31 | r: impl Iterator, 32 | rw: impl Iterator, 33 | rwx: impl Iterator, 34 | pre_allocated: impl Iterator, 35 | allocator: &impl PageAlloc, 36 | ) -> Result<(), Error> { 37 | let pt = hal_core::mm::prefill_pagetable::(r, rw, rwx, pre_allocated, allocator)?; 38 | 39 | // TODO: put into into the hal_core::Error 40 | unsafe { 41 | if GPT.set(pt).is_err() { 42 | panic!("GPT is already set ?"); 43 | } 44 | }; 45 | 46 | Ok(()) 47 | } 48 | 49 | pub fn enable_paging() { 50 | unsafe { 51 | load_pagetable(current()); 52 | }; 53 | } 54 | 55 | unsafe fn load_pagetable(pt: &'static mut PageTable) { 56 | MAIR_EL1.write( 57 | // Attribute 0 - NonCacheable normal DRAM. FIXME: enable cache? 58 | MAIR_EL1::Attr0_Normal_Outer::NonCacheable + MAIR_EL1::Attr0_Normal_Inner::NonCacheable, 59 | ); 60 | TTBR0_EL1.set_baddr((pt as *const PageTable) as u64); 61 | TCR_EL1.write( 62 | TCR_EL1::TBI0::Used 63 | + TCR_EL1::IPS::Bits_48 64 | + TCR_EL1::TG0::KiB_4 65 | // + TCR_EL1::SH0::Inner 66 | + TCR_EL1::SH0::None 67 | // + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable 68 | + TCR_EL1::ORGN0::NonCacheable 69 | // + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable 70 | + TCR_EL1::IRGN0::NonCacheable 71 | + TCR_EL1::EPD0::EnableTTBR0Walks 72 | + TCR_EL1::A1::TTBR0 73 | + TCR_EL1::T0SZ.val(16) 74 | + TCR_EL1::EPD1::DisableTTBR1Walks, 75 | ); 76 | 77 | barrier::isb(barrier::SY); 78 | 79 | SCTLR_EL1.modify(SCTLR_EL1::M::Enable); 80 | 81 | barrier::isb(barrier::SY); 82 | } 83 | 84 | pub fn align_up(addr: usize) -> usize { 85 | mm::align_up(addr, PAGE_SIZE) 86 | } 87 | 88 | pub fn align_down(addr: usize) -> usize { 89 | mm::align_down(addr, PAGE_SIZE) 90 | } 91 | -------------------------------------------------------------------------------- /hal_aarch64/src/mm/pgt48.rs: -------------------------------------------------------------------------------- 1 | use hal_core::{ 2 | mm::{self, PageAlloc, PageEntry, PageMap, Permissions}, 3 | Error, 4 | }; 5 | 6 | use tock_registers::interfaces::{ReadWriteable, Readable, Writeable}; 7 | use tock_registers::register_bitfields; 8 | use tock_registers::registers::{ReadOnly, ReadWrite}; 9 | 10 | register_bitfields! [u64, 11 | pub VAddrInner [ 12 | BLOCK_OFFSET OFFSET(0) NUMBITS(12) [], 13 | LEVEL3_TABLE_IDX OFFSET(12) NUMBITS(9) [], 14 | LEVEL2_TABLE_IDX OFFSET(21) NUMBITS(9) [], 15 | LEVEL1_TABLE_IDX OFFSET(30) NUMBITS(9) [], 16 | LEVEL0_TABLE_IDX OFFSET(39) NUMBITS(9) [], 17 | ], 18 | 19 | pub TableEntryInner [ 20 | TYPE OFFSET(0) NUMBITS(2) [ 21 | TABLE_ENTRY = 0b11, 22 | INVALID_ENTRY = 0b00, 23 | ], 24 | 25 | INDX OFFSET(2) NUMBITS(2) [], 26 | 27 | AP OFFSET(6) NUMBITS(2) [ 28 | U_NONE_K_RW = 0b00, 29 | U_RW_K_RW = 0b01, 30 | U_NONE_K_R = 0b10, 31 | U_R_K_R = 0b11, 32 | ], 33 | 34 | SH OFFSET(8) NUMBITS(2) [ 35 | NON_SHAREABLE = 0b00, 36 | RESERVED = 0b01, 37 | OUTER_SHAREABLE = 0b10, 38 | INNER_SHAREABLE = 0b11, 39 | ], 40 | 41 | AF OFFSET(10) NUMBITS(1) [ 42 | FALSE = 0b0, 43 | TRUE = 0b1, 44 | ], 45 | 46 | DEST OFFSET(12) NUMBITS(36) [], 47 | 48 | PXN OFFSET(53) NUMBITS(1) [], 49 | UXN OFFSET(54) NUMBITS(1) [], 50 | ], 51 | 52 | pub TableDescriptorInner [ 53 | TYPE OFFSET(0) NUMBITS(2) [ 54 | TABLE_DESCRIPTOR = 0b11, 55 | INVALID_ENTRY = 0b00, 56 | ], 57 | 58 | DEST OFFSET(12) NUMBITS(36) [], 59 | ], 60 | ]; 61 | 62 | struct VAddr(ReadOnly); 63 | 64 | impl VAddr { 65 | fn get_level_offset(&self, level: u8) -> usize { 66 | let offset = match level { 67 | 0 => self.0.read(VAddrInner::LEVEL0_TABLE_IDX), 68 | 1 => self.0.read(VAddrInner::LEVEL1_TABLE_IDX), 69 | 2 => self.0.read(VAddrInner::LEVEL2_TABLE_IDX), 70 | 3 => self.0.read(VAddrInner::LEVEL3_TABLE_IDX), 71 | _ => panic!("There are only 4 levels"), 72 | }; 73 | 74 | offset as usize 75 | } 76 | } 77 | 78 | impl From for VAddr { 79 | fn from(paddr: mm::VAddr) -> Self { 80 | assert_eq!(usize::BITS, u64::BITS); 81 | let val = paddr.val as u64; 82 | Self(unsafe { core::mem::transmute::>(val) }) 83 | } 84 | } 85 | 86 | pub struct PAddr(u64); 87 | 88 | impl From for PAddr { 89 | fn from(paddr: mm::PAddr) -> Self { 90 | assert_eq!(usize::BITS, u64::BITS); 91 | Self(paddr.val as u64) 92 | } 93 | } 94 | 95 | impl From<&PAddr> for u64 { 96 | fn from(paddr: &PAddr) -> Self { 97 | paddr.0 98 | } 99 | } 100 | 101 | struct TableDescriptor(ReadWrite); 102 | 103 | impl TableDescriptor { 104 | fn get_next_level(&mut self) -> &mut PageTable { 105 | let raw_pgt = (self.0.read(TableDescriptorInner::DEST) << 12) as *mut PageTable; 106 | 107 | // Safety: there is no conceivable way for us to know if this pointer is valid. 108 | // If the pointer in our pagetable are invalid, then we're lost... 109 | unsafe { raw_pgt.as_mut().unwrap() } 110 | } 111 | 112 | fn set_next_level(&mut self, next_level: &mut PageTable) { 113 | let next_level_addr = (next_level as *const PageTable) as u64; 114 | self.0 115 | .modify(TableDescriptorInner::DEST.val(next_level_addr >> 12)); 116 | self.0.modify(TableDescriptorInner::TYPE::TABLE_DESCRIPTOR); 117 | } 118 | 119 | fn is_invalid(&self) -> bool { 120 | self.0.read(TableDescriptorInner::TYPE) == TableDescriptorInner::TYPE::INVALID_ENTRY.into() 121 | } 122 | 123 | fn set_invalid(&mut self) { 124 | self.0.write(TableDescriptorInner::TYPE::INVALID_ENTRY); 125 | } 126 | } 127 | 128 | pub struct TableEntry(ReadWrite); 129 | 130 | impl TableEntry { 131 | fn set_target(&mut self, addr: u64) { 132 | let field = TableEntryInner::DEST.val(addr >> 12); 133 | self.0.modify(TableEntryInner::TYPE::TABLE_ENTRY); 134 | self.0.modify(field); 135 | } 136 | 137 | fn set_permissions(&mut self, perms: mm::Permissions) { 138 | // TODO: Can we improve this? 139 | if perms.contains(mm::Permissions::USER) { 140 | if perms.contains(mm::Permissions::WRITE) { 141 | self.0.modify(TableEntryInner::AP::U_RW_K_RW); 142 | } else { 143 | self.0.modify(TableEntryInner::AP::U_R_K_R); 144 | } 145 | 146 | self.0.modify(TableEntryInner::PXN.val(1)); 147 | if perms.contains(mm::Permissions::EXECUTE) { 148 | self.0.modify(TableEntryInner::UXN.val(0)); 149 | } else { 150 | self.0.modify(TableEntryInner::UXN.val(1)); 151 | } 152 | } else { 153 | if perms.contains(mm::Permissions::WRITE) { 154 | self.0.modify(TableEntryInner::AP::U_NONE_K_RW); 155 | } else { 156 | self.0.modify(TableEntryInner::AP::U_NONE_K_R); 157 | } 158 | 159 | self.0.modify(TableEntryInner::UXN.val(1)); 160 | if perms.contains(mm::Permissions::EXECUTE) { 161 | self.0.modify(TableEntryInner::PXN.val(0)); 162 | } else { 163 | self.0.modify(TableEntryInner::PXN.val(1)); 164 | } 165 | } 166 | } 167 | 168 | fn set_mair_index(&mut self, index: usize) { 169 | // MAIR can store only 8 attributes 170 | assert!(index < 8); 171 | 172 | self.0.modify(TableEntryInner::INDX.val(index as u64)); 173 | } 174 | 175 | fn set_shareable(&mut self) { 176 | self.0.modify(TableEntryInner::SH::INNER_SHAREABLE); 177 | } 178 | 179 | fn set_access_flag(&mut self) { 180 | self.0.modify(TableEntryInner::AF::TRUE); 181 | } 182 | } 183 | 184 | impl PageEntry for TableEntry { 185 | fn set_invalid(&mut self) { 186 | self.0.write(TableEntryInner::TYPE::INVALID_ENTRY); 187 | } 188 | } 189 | 190 | /// Depending on the level of the pagetable walk, the actual data (u64) needs to be interpreted 191 | /// differently: Descriptor for levels 0 to 2 and Entry for level 3 192 | union PageTableContent { 193 | descriptor: core::mem::ManuallyDrop, 194 | entry: core::mem::ManuallyDrop, 195 | } 196 | 197 | impl PageTableContent { 198 | /// With Aarch64 Pgt48OA, if the first two bits are set to 0b00, entry/descriptor is invalid. 199 | const fn new_invalid() -> Self { 200 | unsafe { core::mem::transmute::(0b00u64) } 201 | } 202 | } 203 | 204 | #[repr(align(0x1000))] 205 | pub struct PageTable { 206 | entries: [PageTableContent; 512], 207 | } 208 | 209 | impl PageTable { 210 | pub const fn zeroed() -> Self { 211 | #[allow(clippy::uninit_assumed_init)] 212 | let mut entries: [PageTableContent; 512] = 213 | unsafe { core::mem::MaybeUninit::uninit().assume_init() }; 214 | let mut i = 0; 215 | while i < 512 { 216 | entries[i] = PageTableContent::new_invalid(); 217 | i += 1; 218 | } 219 | Self { entries } 220 | } 221 | } 222 | 223 | impl PageMap for PageTable { 224 | const PAGE_SIZE: usize = 4096; 225 | type Entry = TableEntry; 226 | 227 | fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error> { 228 | let page = allocator.alloc(1)?; 229 | let page_table = page as *mut PageTable; 230 | // Safety: the PMM gave us the memory, it should be a valid pointer. 231 | let page_table: &mut PageTable = unsafe { page_table.as_mut().unwrap() }; 232 | 233 | page_table 234 | .entries 235 | .iter_mut() 236 | .for_each(|content| unsafe { &mut content.descriptor }.set_invalid()); 237 | 238 | Ok(page_table) 239 | } 240 | 241 | fn map( 242 | &mut self, 243 | va: mm::VAddr, 244 | pa: mm::PAddr, 245 | perms: Permissions, 246 | allocator: &impl PageAlloc, 247 | ) -> Result<&mut TableEntry, Error> { 248 | let va = VAddr::from(va); 249 | let pa = PAddr::from(pa); 250 | let mut pagetable = self; 251 | 252 | for lvl in 0..=3 { 253 | let offset = va.get_level_offset(lvl); 254 | let content = &mut pagetable.entries[offset]; 255 | 256 | if lvl == 3 { 257 | let entry = unsafe { &mut content.entry }; 258 | entry.set_target(u64::from(&pa)); 259 | entry.set_permissions(perms); 260 | entry.set_mair_index(0); 261 | entry.set_shareable(); 262 | entry.set_access_flag(); 263 | 264 | return Ok(entry); 265 | } 266 | 267 | let descriptor = unsafe { &mut content.descriptor }; 268 | if descriptor.is_invalid() { 269 | let new_page_table = PageTable::new(allocator)?; 270 | descriptor.set_next_level(new_page_table); 271 | } 272 | 273 | pagetable = descriptor.get_next_level(); 274 | } 275 | 276 | unreachable!("We should have reached lvl 3 and returned by now..."); 277 | } 278 | 279 | fn add_invalid_entry( 280 | &mut self, 281 | va: mm::VAddr, 282 | allocator: &impl PageAlloc, 283 | ) -> Result<(), Error> { 284 | let entry = self.map( 285 | va, 286 | mm::PAddr { 287 | val: 0x0A0A_0A0A_0A0A_0A0A, 288 | }, 289 | mm::Permissions::READ, 290 | allocator, 291 | )?; 292 | 293 | entry.set_invalid(); 294 | 295 | Ok(()) 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /hal_core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hal_core" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | bitflags = "2.3" 10 | log = "0.4" 11 | -------------------------------------------------------------------------------- /hal_core/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(return_position_impl_trait_in_trait)] 3 | 4 | use core::convert::Into; 5 | use core::ops::Range; 6 | 7 | pub mod mm; 8 | 9 | #[derive(Debug)] 10 | pub enum Error { 11 | Alloc(mm::AllocatorError), 12 | } 13 | 14 | impl From for Error { 15 | fn from(e: mm::AllocatorError) -> Self { 16 | Self::Alloc(e) 17 | } 18 | } 19 | 20 | pub type TimerCallbackFn = fn(); 21 | 22 | /// A range similar to core::ops::Range but that is copyable. 23 | /// The range is half-open, inclusive below, exclusive above, ie. [start; end[ 24 | #[derive(Debug, Copy, Clone, PartialEq)] 25 | pub struct AddressRange { 26 | pub start: usize, 27 | pub end: usize, 28 | } 29 | 30 | impl AddressRange { 31 | pub fn new>(range: Range) -> Self { 32 | let (start, end) = (range.start.into(), range.end.into()); 33 | // assert!(range.start % page_size == 0); 34 | // assert_eq!(range.end, mm::align_up(range.end, page_size)); 35 | 36 | assert!(start < end); 37 | 38 | Self { start, end } 39 | } 40 | 41 | pub fn with_size(start: usize, size: usize) -> Self { 42 | Self::new(start..start + size) 43 | } 44 | 45 | pub fn round_up_to_page(self, page_size: usize) -> Self { 46 | Self { 47 | start: self.start, 48 | end: mm::align_up(self.end, page_size), 49 | } 50 | } 51 | 52 | pub fn iter_pages(self, page_size: usize) -> impl Iterator { 53 | assert_eq!(self.end, mm::align_up(self.end, page_size)); 54 | 55 | (self.start..self.end).step_by(page_size) 56 | } 57 | 58 | pub fn count_pages(&self, page_size: usize) -> usize { 59 | mm::align_up(self.size(), page_size) / page_size 60 | } 61 | 62 | pub fn contains(&self, val: usize) -> bool { 63 | self.start <= val && val < self.end 64 | } 65 | 66 | pub fn size(&self) -> usize { 67 | self.end - self.start 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /hal_core/src/mm.rs: -------------------------------------------------------------------------------- 1 | use log::trace; 2 | 3 | use super::{AddressRange, Error}; 4 | 5 | #[derive(Debug, Clone, Copy)] 6 | pub struct VAddr { 7 | pub val: usize, 8 | } 9 | 10 | impl VAddr { 11 | pub fn new(val: usize) -> Self { 12 | Self { val } 13 | } 14 | } 15 | 16 | impl core::convert::From for VAddr { 17 | fn from(val: usize) -> Self { 18 | Self { val } 19 | } 20 | } 21 | 22 | impl core::convert::From for usize { 23 | fn from(va: VAddr) -> Self { 24 | va.val 25 | } 26 | } 27 | 28 | #[derive(Debug, Clone, Copy)] 29 | pub struct PAddr { 30 | pub val: usize, 31 | } 32 | 33 | impl PAddr { 34 | pub fn new(val: usize) -> Self { 35 | Self { val } 36 | } 37 | pub fn ptr_cast(self) -> *mut T { 38 | self.val as *mut T 39 | } 40 | } 41 | 42 | bitflags::bitflags! { 43 | #[derive(Clone, Copy)] 44 | pub struct Permissions: u8 { 45 | const READ = 0b00000001; 46 | const WRITE = 0b00000010; 47 | const EXECUTE = 0b00000100; 48 | const USER = 0b00001000; 49 | } 50 | } 51 | 52 | pub type PageAllocFn = fn(usize) -> PAddr; 53 | 54 | #[derive(Debug)] 55 | pub enum AllocatorError { 56 | NotEnoughMemoryForMetadata, 57 | OutOfMemory, 58 | } 59 | 60 | pub trait PageAlloc: Sync { 61 | fn alloc(&self, page_count: usize) -> Result; 62 | fn dealloc(&self, base: usize, page_count: usize) -> Result<(), AllocatorError>; 63 | fn used_pages(&self, f: F); 64 | } 65 | 66 | pub struct NullPageAllocator; 67 | 68 | impl PageAlloc for NullPageAllocator { 69 | fn alloc(&self, _page_count: usize) -> Result { 70 | panic!("the null page allocator mustn't allocate"); 71 | } 72 | 73 | fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> { 74 | panic!("the null page allocator cannot deallocate"); 75 | } 76 | 77 | fn used_pages(&self, _f: F) { 78 | panic!("obviously the null allocator has no pages that are in use"); 79 | } 80 | } 81 | 82 | pub trait PageEntry { 83 | fn set_invalid(&mut self); 84 | } 85 | 86 | pub trait PageMap { 87 | const PAGE_SIZE: usize; 88 | type Entry: PageEntry; 89 | 90 | fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error>; 91 | 92 | fn map( 93 | &mut self, 94 | va: VAddr, 95 | pa: PAddr, 96 | perms: Permissions, 97 | allocator: &impl PageAlloc, 98 | ) -> Result<&mut Self::Entry, Error>; 99 | 100 | fn add_invalid_entry(&mut self, va: VAddr, allocator: &impl PageAlloc) -> Result<(), Error> { 101 | self.map( 102 | va, 103 | PAddr::new(0x0A0A_0A0A_0A0A_0A0A), 104 | Permissions::READ, 105 | allocator, 106 | )? 107 | .set_invalid(); 108 | 109 | Ok(()) 110 | } 111 | 112 | fn identity_map( 113 | &mut self, 114 | addr: VAddr, 115 | perms: Permissions, 116 | allocator: &impl PageAlloc, 117 | ) -> Result<(), Error> { 118 | self.map(addr, PAddr::new(addr.val), perms, allocator) 119 | .map(|_| ()) 120 | } 121 | 122 | fn identity_map_range( 123 | &mut self, 124 | addr: VAddr, 125 | page_count: usize, 126 | perms: Permissions, 127 | allocator: &impl PageAlloc, 128 | ) -> Result<(), Error> { 129 | let start = addr.val; 130 | for i in 0..page_count { 131 | self.identity_map(VAddr::new(start + i * Self::PAGE_SIZE), perms, allocator)?; 132 | } 133 | 134 | Ok(()) 135 | } 136 | 137 | fn add_invalid_entries( 138 | &mut self, 139 | range: AddressRange, 140 | allocator: &impl PageAlloc, 141 | ) -> Result<(), Error> { 142 | for page in range.iter_pages(Self::PAGE_SIZE) { 143 | self.add_invalid_entry(VAddr::new(page), allocator)?; 144 | } 145 | 146 | Ok(()) 147 | } 148 | 149 | fn identity_map_addressrange( 150 | &mut self, 151 | range: AddressRange, 152 | perms: Permissions, 153 | allocator: &impl PageAlloc, 154 | ) -> Result<(), Error> { 155 | for page in range.iter_pages(Self::PAGE_SIZE) { 156 | self.identity_map(VAddr::new(page), perms, allocator)?; 157 | } 158 | 159 | Ok(()) 160 | } 161 | } 162 | 163 | pub fn align_up(val: usize, page_sz: usize) -> usize { 164 | ((val + page_sz - 1) / page_sz) * page_sz 165 | } 166 | 167 | pub fn align_down(addr: usize, page_sz: usize) -> usize { 168 | // TODO: can this be more optimized ? 169 | // XXX: uh isn't this math wrong ? 170 | align_up(addr, page_sz) + page_sz 171 | } 172 | 173 | pub fn prefill_pagetable( 174 | r: impl Iterator, 175 | rw: impl Iterator, 176 | rwx: impl Iterator, 177 | pre_allocated: impl Iterator, 178 | allocator: &impl PageAlloc, 179 | ) -> Result<&'static mut P, Error> { 180 | trace!("hal_core::mm::prefill_pagetable"); 181 | let pt: &'static mut P = P::new(allocator)?; 182 | 183 | for range in pre_allocated { 184 | pt.add_invalid_entries(range, allocator)?; 185 | } 186 | 187 | for range in r { 188 | trace!("mapping as RO: {:X?}", range); 189 | pt.identity_map_addressrange(range, Permissions::READ, allocator)?; 190 | } 191 | 192 | for range in rw { 193 | trace!("mapping as RW: {:X?}", range); 194 | pt.identity_map_addressrange(range, Permissions::READ | Permissions::WRITE, allocator)?; 195 | } 196 | 197 | for range in rwx { 198 | trace!("mapping as RWX: {:X?}", range); 199 | pt.identity_map_addressrange( 200 | range, 201 | Permissions::READ | Permissions::WRITE | Permissions::EXECUTE, 202 | allocator, 203 | )? 204 | } 205 | 206 | Ok(pt) 207 | } 208 | -------------------------------------------------------------------------------- /hal_riscv64/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hal_riscv64" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | hal_core = { path = "../hal_core" } 10 | modular-bitfield = "0.11" 11 | sbi = "0.2.0" 12 | riscv = "0.10" 13 | -------------------------------------------------------------------------------- /hal_riscv64/src/cpu.rs: -------------------------------------------------------------------------------- 1 | use super::registers; 2 | 3 | pub fn unmask_interrupts() { 4 | registers::set_sstatus_sie(); 5 | registers::set_sie_ssie(); 6 | registers::set_sie_seie(); 7 | registers::set_sie_stie(); 8 | } 9 | 10 | pub fn clear_physical_timer() { 11 | sbi::timer::set_timer(u64::MAX).unwrap(); 12 | } 13 | -------------------------------------------------------------------------------- /hal_riscv64/src/irq.rs: -------------------------------------------------------------------------------- 1 | use hal_core::{ 2 | mm::{PageAlloc, PageMap, Permissions, VAddr}, 3 | Error, TimerCallbackFn, 4 | }; 5 | 6 | use super::mm; 7 | use super::plic::Plic; 8 | use super::registers; 9 | 10 | use core::arch::asm; 11 | use core::ptr; 12 | use core::sync::atomic::{AtomicPtr, Ordering}; 13 | 14 | use riscv; 15 | use sbi; 16 | 17 | pub fn init_exception_handlers() { 18 | registers::set_stvec(trap_handler as usize); 19 | } 20 | 21 | static mut IRQ_CHIP: Option = None; 22 | 23 | pub fn init_irq_chip(_dt_node: (), allocator: &impl PageAlloc) -> Result<(), Error> { 24 | // TODO map the dt_node 25 | let base = 0xc000000; 26 | let max_offset = 0x3FFFFFC; 27 | 28 | mm::current().identity_map_range( 29 | VAddr::new(base), 30 | max_offset / mm::PAGE_SIZE + 1, 31 | Permissions::READ | Permissions::WRITE, 32 | allocator, 33 | )?; 34 | unsafe { 35 | IRQ_CHIP = Some(Plic::new(base)); 36 | } 37 | 38 | Ok(()) 39 | } 40 | 41 | static TIMER_CALLBACK: AtomicPtr = AtomicPtr::new(ptr::null_mut()); 42 | 43 | pub fn set_timer_handler(h: TimerCallbackFn) { 44 | TIMER_CALLBACK.store(h as *mut _, Ordering::Relaxed); 45 | } 46 | 47 | pub fn set_timer(ticks: usize) -> Result<(), Error> { 48 | let target_time = riscv::register::time::read() + ticks; 49 | sbi::timer::set_timer(target_time as u64).unwrap(); 50 | 51 | Ok(()) 52 | } 53 | 54 | #[derive(Debug, Copy, Clone)] 55 | enum InterruptType { 56 | Reserved, 57 | SupervisorSoftware, 58 | SupervisorTimer, 59 | SupervisorExternal, 60 | Platform(u64), 61 | } 62 | 63 | impl InterruptType { 64 | fn is_asynchronous(&self) -> bool { 65 | matches!(self, Self::SupervisorTimer) 66 | } 67 | } 68 | 69 | impl From for InterruptType { 70 | fn from(code: u64) -> Self { 71 | match code { 72 | 0 | 2..=4 | 6..=8 | 10..=15 => Self::Reserved, 73 | 1 => Self::SupervisorSoftware, 74 | 5 => Self::SupervisorTimer, 75 | 9 => Self::SupervisorExternal, 76 | _ => Self::Platform(code), 77 | } 78 | } 79 | } 80 | 81 | impl From for u64 { 82 | fn from(itype: InterruptType) -> Self { 83 | match itype { 84 | InterruptType::Reserved => { 85 | unreachable!() 86 | } 87 | InterruptType::Platform(code) => code, 88 | InterruptType::SupervisorSoftware => 1, 89 | InterruptType::SupervisorTimer => 5, 90 | InterruptType::SupervisorExternal => 9, 91 | } 92 | } 93 | } 94 | 95 | #[derive(Debug, Copy, Clone)] 96 | enum ExceptionType { 97 | Reserved, 98 | Custom(u64), 99 | InstructionAddressMisaligned, 100 | InstructionAccessFault, 101 | IllegalInstruction, 102 | Breakpoint, 103 | LoadAddressMisaligned, 104 | LoadAccessFault, 105 | StoreAMOAddressMisaligned, 106 | StoreAMOAccessFault, 107 | EnvironmentCallUMode, 108 | EnvironmentCallSMode, 109 | InstructionPageFault, 110 | LoadPageFault, 111 | StoreAMOPageFault, 112 | } 113 | 114 | impl From for ExceptionType { 115 | fn from(code: u64) -> Self { 116 | match code { 117 | 10..=11 | 14 | 16..=23 | 32..=47 => Self::Reserved, 118 | c if c >= 64 => Self::Reserved, 119 | 24..=31 | 48..=63 => Self::Custom(code), 120 | 0 => Self::InstructionAddressMisaligned, 121 | 1 => Self::InstructionAccessFault, 122 | 2 => Self::IllegalInstruction, 123 | 3 => Self::Breakpoint, 124 | 4 => Self::LoadAddressMisaligned, 125 | 5 => Self::LoadAccessFault, 126 | 6 => Self::StoreAMOAddressMisaligned, 127 | 7 => Self::StoreAMOAccessFault, 128 | 8 => Self::EnvironmentCallUMode, 129 | 9 => Self::EnvironmentCallSMode, 130 | 12 => Self::InstructionPageFault, 131 | 13 => Self::LoadPageFault, 132 | 15 => Self::StoreAMOPageFault, 133 | _ => unreachable!(), 134 | } 135 | } 136 | } 137 | 138 | #[derive(Copy, Clone)] 139 | enum TrapType { 140 | Interrupt(InterruptType), 141 | Exception(ExceptionType), 142 | } 143 | 144 | impl TrapType { 145 | fn is_interrupt(cause: u64) -> bool { 146 | (cause >> 63) == 1 147 | } 148 | } 149 | 150 | impl From for TrapType { 151 | fn from(cause: u64) -> Self { 152 | let exception_code = cause & !(1 << 63); 153 | 154 | if Self::is_interrupt(cause) { 155 | Self::Interrupt(InterruptType::from(exception_code)) 156 | } else { 157 | Self::Exception(ExceptionType::from(exception_code)) 158 | } 159 | } 160 | } 161 | 162 | static mut INTERRUPT_VECTOR: &[extern "C" fn()] = &[ 163 | undefined_handler, 164 | undefined_handler, 165 | undefined_handler, 166 | undefined_handler, 167 | undefined_handler, 168 | timer_handler, 169 | undefined_handler, 170 | undefined_handler, 171 | undefined_handler, 172 | supervisor_external_interrupt_handler, 173 | ]; 174 | 175 | /// Dispatch interrupts and exceptions 176 | /// Returns 0 if it was synchronous, 1 otherwise 177 | #[no_mangle] 178 | extern "C" fn trap_dispatch(cause: u64) -> u64 { 179 | match TrapType::from(cause) { 180 | TrapType::Interrupt(itype) => { 181 | let exception_code: u64 = itype.into(); 182 | unsafe { INTERRUPT_VECTOR[exception_code as usize]() }; 183 | 184 | if itype.is_asynchronous() { 185 | 1 186 | } else { 187 | 0 188 | } 189 | } 190 | TrapType::Exception(etype) => { 191 | panic!("Exception '{:?}' not implemented yet", etype) 192 | } 193 | } 194 | } 195 | 196 | extern "C" fn supervisor_external_interrupt_handler() { 197 | todo!("fwd the external int to the irq_chip or smthing..."); 198 | } 199 | 200 | extern "C" fn undefined_handler() { 201 | panic!("Interruption is not handled yet"); 202 | } 203 | 204 | extern "C" fn timer_handler() { 205 | let timer_cb = TIMER_CALLBACK.load(Ordering::Relaxed); 206 | if !timer_cb.is_null() { 207 | unsafe { 208 | core::mem::transmute::<_, fn()>(timer_cb)(); 209 | } 210 | } 211 | } 212 | 213 | #[naked] 214 | #[no_mangle] 215 | #[repr(align(4))] 216 | unsafe extern "C" fn trap_handler() { 217 | asm!( 218 | " 219 | addi sp, sp, -0x100 220 | 221 | sd x31, 0x100(sp) 222 | sd x30, 0xf8(sp) 223 | sd x29, 0xf0(sp) 224 | sd x28, 0xd8(sp) 225 | sd x27, 0xd0(sp) 226 | sd x26, 0xc8(sp) 227 | sd x25, 0xc0(sp) 228 | sd x24, 0xb8(sp) 229 | sd x23, 0xb0(sp) 230 | sd x22, 0xa8(sp) 231 | sd x21, 0xa0(sp) 232 | sd x20, 0x98(sp) 233 | sd x19, 0x90(sp) 234 | sd x18, 0x88(sp) 235 | sd x17, 0x80(sp) 236 | sd x16, 0x78(sp) 237 | sd x15, 0x70(sp) 238 | sd x14, 0x68(sp) 239 | sd x13, 0x60(sp) 240 | sd x12, 0x58(sp) 241 | sd x11, 0x50(sp) 242 | sd x10, 0x48(sp) 243 | sd x9, 0x40(sp) 244 | sd x8, 0x38(sp) 245 | sd x7, 0x30(sp) 246 | sd x6, 0x28(sp) 247 | sd x5, 0x20(sp) 248 | sd x4, 0x18(sp) 249 | sd x3, 0x10(sp) 250 | sd x2, 0x8(sp) 251 | sd x1, 0x0(sp) 252 | 253 | // mv a0, sp // Pointer on stack for the register struct 254 | // csrr a1, sepc 255 | // csrr a2, stval 256 | // csrr a3, scause 257 | // csrr a5, sstatus 258 | 259 | csrr a0, scause 260 | jal trap_dispatch 261 | 262 | bne a0, x0, 1f 263 | 264 | csrr t0, sepc 265 | addi t0, t0, 4 266 | csrw sepc, t0 267 | 268 | 1: 269 | ld x1, 0x0(sp) 270 | ld x2, 0x8(sp) 271 | ld x3, 0x10(sp) 272 | ld x4, 0x18(sp) 273 | ld x5, 0x20(sp) 274 | ld x6, 0x28(sp) 275 | ld x7, 0x30(sp) 276 | ld x8, 0x38(sp) 277 | ld x9, 0x40(sp) 278 | ld x10, 0x48(sp) 279 | ld x11, 0x50(sp) 280 | ld x12, 0x58(sp) 281 | ld x13, 0x60(sp) 282 | ld x14, 0x68(sp) 283 | ld x15, 0x70(sp) 284 | ld x16, 0x78(sp) 285 | ld x17, 0x80(sp) 286 | ld x18, 0x88(sp) 287 | ld x19, 0x90(sp) 288 | ld x20, 0x98(sp) 289 | ld x21, 0xa0(sp) 290 | ld x22, 0xa8(sp) 291 | ld x23, 0xb0(sp) 292 | ld x24, 0xb8(sp) 293 | ld x25, 0xc0(sp) 294 | ld x26, 0xc8(sp) 295 | ld x27, 0xd0(sp) 296 | ld x28, 0xd8(sp) 297 | ld x29, 0xf0(sp) 298 | ld x30, 0xf8(sp) 299 | ld x31, 0x100(sp) 300 | 301 | addi sp, sp, 0x100 302 | 303 | sret", 304 | options(noreturn) 305 | ); 306 | // Obviously this isn't done, we need to jump back to the previous context before the 307 | // interrupt using mpp/spp and mepc/sepc. 308 | } 309 | 310 | #[cfg(test)] 311 | mod test { 312 | use super::*; 313 | use crate::kernel_tests::*; 314 | 315 | #[test_case] 316 | fn arch_timer(ctx: &mut TestContext) { 317 | static mut TRAPTYPE: Option = None; 318 | 319 | extern "C" fn test_trap_handler(cause: u64) -> u64 { 320 | unsafe { TRAPTYPE = Some(TrapType::from(cause)) }; 321 | 322 | // "Disable" timer 323 | sbi::timer::set_timer(u64::MAX).unwrap(); 324 | 1 325 | } 326 | 327 | ctx.arch_interrupts.init_interrupts(); 328 | ctx.arch_interrupts 329 | .set_higher_trap_handler(test_trap_handler); 330 | 331 | ctx.arch_interrupts.set_timer(10000); 332 | 333 | // Wait for the some time for the timer interrupt to arrive 334 | for i in 0..1000000 { 335 | // This is just to avoid the whole loop optimized out 336 | core::hint::black_box(i); 337 | 338 | if let Some(ttype) = unsafe { TRAPTYPE } { 339 | if matches!(ttype, TrapType::Interrupt(InterruptType::SupervisorTimer)) { 340 | return; 341 | } 342 | 343 | // There was an interrupt but it was not the timer 344 | assert!(false); 345 | } 346 | } 347 | 348 | // The interrupt was never tirggered 349 | assert!(false) 350 | } 351 | } 352 | -------------------------------------------------------------------------------- /hal_riscv64/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(fn_align)] 3 | #![feature(naked_functions)] 4 | 5 | pub mod cpu; 6 | pub mod irq; 7 | pub mod mm; 8 | mod plic; 9 | mod registers; 10 | 11 | use core::arch::asm; 12 | 13 | pub fn panic_info() {} 14 | 15 | #[naked] 16 | #[no_mangle] 17 | unsafe extern "C" fn _start() -> ! { 18 | asm!("la sp, STACK_START", "call k_main", options(noreturn)); 19 | } 20 | -------------------------------------------------------------------------------- /hal_riscv64/src/mm/mod.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | use core::cell::OnceCell; 3 | use hal_core::{ 4 | mm::{self, PageAlloc, PageMap}, 5 | AddressRange, Error, 6 | }; 7 | 8 | mod sv39; 9 | use sv39::{PageTable, Satp, SatpMode}; 10 | 11 | pub const PAGE_SIZE: usize = PageTable::PAGE_SIZE; 12 | 13 | static mut GPT: OnceCell<&'static mut PageTable> = OnceCell::new(); 14 | 15 | pub fn current() -> &'static mut PageTable { 16 | unsafe { GPT.get_mut().unwrap() } 17 | } 18 | 19 | pub fn prefill_pagetable( 20 | r: impl Iterator, 21 | rw: impl Iterator, 22 | rwx: impl Iterator, 23 | pre_allocated: impl Iterator, 24 | allocator: &impl PageAlloc, 25 | ) -> Result<(), Error> { 26 | let pt = hal_core::mm::prefill_pagetable::(r, rw, rwx, pre_allocated, allocator)?; 27 | 28 | // TODO: put into into the hal_core::Error 29 | unsafe { 30 | if GPT.set(pt).is_err() { 31 | panic!("GPT is already set ?"); 32 | } 33 | }; 34 | 35 | Ok(()) 36 | } 37 | 38 | pub fn enable_paging() { 39 | unsafe { 40 | load_pagetable(current()); 41 | } 42 | } 43 | 44 | unsafe fn load_pagetable(pt: &'static mut PageTable) { 45 | let pt_addr = pt as *mut PageTable as usize; 46 | let ppn = pt_addr >> 12; 47 | 48 | let satp = Satp::with_values(ppn as u64, 0, SatpMode::Sv39); 49 | 50 | unsafe { 51 | asm!("csrw satp, {}", in(reg)u64::from(satp)); 52 | asm!("sfence.vma"); 53 | } 54 | } 55 | 56 | pub fn align_down(addr: usize) -> usize { 57 | mm::align_down(addr, PageTable::PAGE_SIZE) 58 | } 59 | 60 | pub fn align_up(addr: usize) -> usize { 61 | mm::align_up(addr, PageTable::PAGE_SIZE) 62 | } 63 | -------------------------------------------------------------------------------- /hal_riscv64/src/mm/sv39.rs: -------------------------------------------------------------------------------- 1 | use modular_bitfield::{bitfield, prelude::*}; 2 | 3 | use hal_core::mm::{self, PageAlloc, PageEntry, PageMap}; 4 | use hal_core::Error; 5 | 6 | #[repr(C)] 7 | pub struct VAddr { 8 | addr: u64, 9 | } 10 | 11 | impl From for VAddr { 12 | fn from(vaddr: mm::VAddr) -> Self { 13 | assert_eq!(usize::BITS, u64::BITS); 14 | Self::from_u64(vaddr.val as u64) 15 | } 16 | } 17 | 18 | impl VAddr { 19 | pub fn from_u64(addr: u64) -> VAddr { 20 | let mut vaddr = Self { addr }; 21 | 22 | let bit38 = (vaddr.vpn(2) >> 8) != 0; 23 | 24 | vaddr.extend_38th_bit(bit38); 25 | 26 | vaddr 27 | } 28 | 29 | pub fn vpn(&self, nb: usize) -> u16 { 30 | let vpn = self.addr >> 12; 31 | 32 | ((vpn >> (nb * 9)) & 0x1ff) as u16 33 | } 34 | 35 | fn extend_38th_bit(&mut self, bit: bool) { 36 | let mask = ((!1u64) >> 38) << 38; 37 | 38 | if bit { 39 | self.addr |= mask; // set all the bits above the 38th 40 | } else { 41 | self.addr &= !mask; // clear all the bits above the 38th 42 | } 43 | } 44 | } 45 | 46 | #[repr(C)] 47 | pub struct PAddr { 48 | addr: u64, 49 | } 50 | 51 | impl From for PAddr { 52 | fn from(paddr: mm::PAddr) -> Self { 53 | assert_eq!(usize::BITS, u64::BITS); 54 | Self::from_u64(paddr.val as u64) 55 | } 56 | } 57 | 58 | impl PAddr { 59 | pub fn from_u64(addr: u64) -> Self { 60 | let mut paddr = Self { addr }; 61 | 62 | let bit55 = (paddr.ppn(2) >> 8) != 0; 63 | 64 | paddr.extend_55th_bit(bit55); 65 | 66 | paddr 67 | } 68 | 69 | fn ppn(&self, nb: usize) -> u64 { 70 | let ppn = self.addr >> 12; 71 | 72 | if nb == 2 { 73 | (ppn >> (nb * 9)) & 0x3fffff 74 | } else { 75 | (ppn >> (nb * 9)) & 0x1ff 76 | } 77 | } 78 | 79 | fn extend_55th_bit(&mut self, bit: bool) { 80 | let mask = ((!0u64) >> 55) << 55; 81 | 82 | if bit { 83 | self.addr |= mask; // set all the bits above the 55th 84 | } else { 85 | self.addr &= !mask; // clear all the bits above the 55th 86 | } 87 | } 88 | } 89 | 90 | #[repr(u64)] 91 | #[bitfield] 92 | pub struct PageTableEntry { 93 | v: B1, 94 | #[skip(getters)] 95 | r: B1, 96 | #[skip(getters)] 97 | w: B1, 98 | #[skip(getters)] 99 | x: B1, 100 | #[skip(getters)] 101 | u: B1, 102 | #[skip] 103 | g: B1, 104 | #[skip] 105 | a: B1, 106 | #[skip] 107 | d: B1, 108 | #[skip] 109 | rsw: B2, 110 | ppn0: B9, 111 | ppn1: B9, 112 | ppn2: B26, 113 | #[skip] 114 | reserved: B10, 115 | } 116 | 117 | impl PageTableEntry { 118 | fn is_valid(&self) -> bool { 119 | self.v() == 1 120 | } 121 | 122 | fn set_valid(&mut self) { 123 | self.set_v(1) 124 | } 125 | 126 | fn set_paddr(&mut self, paddr: &PAddr) { 127 | self.set_ppn2(paddr.ppn(2) as u32); 128 | self.set_ppn1(paddr.ppn(1) as u16); 129 | self.set_ppn0(paddr.ppn(0) as u16); 130 | } 131 | 132 | fn set_target(&mut self, pt: *mut PageTable) { 133 | let addr = pt as u64; 134 | self.set_paddr(&PAddr::from_u64(addr)) 135 | } 136 | 137 | fn get_target(&mut self) -> &mut PageTable { 138 | let addr = 139 | ((self.ppn2() as u64) << 18 | (self.ppn1() as u64) << 9 | self.ppn0() as u64) * 4096u64; 140 | unsafe { (addr as *mut PageTable).as_mut().unwrap() } 141 | } 142 | 143 | fn set_perms(&mut self, perms: mm::Permissions) { 144 | self.set_r(perms.contains(mm::Permissions::READ) as u8); 145 | self.set_w(perms.contains(mm::Permissions::WRITE) as u8); 146 | self.set_x(perms.contains(mm::Permissions::EXECUTE) as u8); 147 | self.set_u(perms.contains(mm::Permissions::USER) as u8); 148 | } 149 | } 150 | 151 | impl PageEntry for PageTableEntry { 152 | fn set_invalid(&mut self) { 153 | self.set_v(0); 154 | } 155 | } 156 | 157 | #[repr(align(0x1000))] 158 | pub struct PageTable { 159 | entries: [PageTableEntry; 512], 160 | } 161 | 162 | impl PageMap for PageTable { 163 | const PAGE_SIZE: usize = 4096; 164 | type Entry = PageTableEntry; 165 | 166 | fn new(allocator: &impl PageAlloc) -> Result<&'static mut Self, Error> { 167 | let page = allocator.alloc(1)?; 168 | let page_table = page as *mut PageTable; 169 | // Safety: the PMM gave us the memory, it should be a valid pointer. 170 | let page_table: &mut PageTable = unsafe { page_table.as_mut().unwrap() }; 171 | 172 | page_table 173 | .entries 174 | .iter_mut() 175 | .for_each(|pte| pte.set_invalid()); 176 | 177 | Ok(page_table) 178 | } 179 | 180 | fn map( 181 | &mut self, 182 | va: mm::VAddr, 183 | pa: mm::PAddr, 184 | perms: mm::Permissions, 185 | allocator: &impl PageAlloc, 186 | ) -> Result<&mut Self::Entry, Error> { 187 | let paddr: PAddr = pa.into(); 188 | let vaddr: VAddr = va.into(); 189 | let mut pagetable = self; 190 | 191 | for level in (0..=2).rev() { 192 | // Get offset for this vaddr 193 | let vpn = vaddr.vpn(level); 194 | // Get entry for this vaddr 195 | let pte = &mut pagetable.entries[vpn as usize]; 196 | 197 | // If we are a leaf, add an entry for the paddr 198 | if level == 0 { 199 | pte.set_paddr(&paddr); 200 | pte.set_perms(perms); 201 | pte.set_valid(); 202 | 203 | return Ok(pte); 204 | } 205 | 206 | // If the entry is not valid we will need to allocate a new PageTable 207 | if !pte.is_valid() { 208 | let new_page_table = PageTable::new(allocator); 209 | 210 | // Set new PageTable as target of this entry 211 | pte.set_target(new_page_table? as *mut PageTable); 212 | pte.set_valid(); 213 | } 214 | 215 | // Get the next level PageTable 216 | pagetable = pte.get_target(); 217 | } 218 | 219 | unreachable!("We should have returned by now"); 220 | } 221 | } 222 | 223 | #[repr(u8)] 224 | pub(crate) enum SatpMode { 225 | _Bare = 0, 226 | Sv39 = 8, 227 | _Sv48 = 9, 228 | _Sv57 = 10, 229 | _Sv64 = 11, 230 | } 231 | 232 | #[repr(u64)] 233 | #[bitfield] 234 | pub(crate) struct Satp { 235 | #[skip(getters)] 236 | ppn: B44, 237 | #[skip(getters)] 238 | asid: B16, 239 | #[skip(getters)] 240 | mode: B4, 241 | } 242 | 243 | impl Satp { 244 | pub fn with_values(ppn: u64, asid: u16, mode: SatpMode) -> Self { 245 | Satp::new() 246 | .with_ppn(ppn) 247 | .with_asid(asid) 248 | .with_mode(mode as u8) 249 | } 250 | } 251 | -------------------------------------------------------------------------------- /hal_riscv64/src/plic.rs: -------------------------------------------------------------------------------- 1 | use hal_core::Error; 2 | 3 | const PLIC_ENABLE_OFFSET: usize = 0x002080; 4 | const PLIC_THRESHOLD_OFFSET: usize = 0x201000; 5 | const PLIC_NUMBER_SOURCES: u16 = 1024; 6 | const PLIC_NUMBER_INTERRUPT_SOURCE_BY_REGISTER: u8 = 32; 7 | const PLIC_NUMBER_SOURCE_REGISTER: u16 = 8 | PLIC_NUMBER_SOURCES / PLIC_NUMBER_INTERRUPT_SOURCE_BY_REGISTER as u16; 9 | const PLIC_MAX_CONTEXT: u16 = 0x3e00; 10 | const PLIC_CLAIM_OFFSET: usize = 0x201004; 11 | 12 | pub struct Plic { 13 | base_register_address: usize, 14 | } 15 | 16 | impl Plic { 17 | pub fn new(base_register_address: usize) -> Plic { 18 | Self { 19 | base_register_address, 20 | } 21 | } 22 | 23 | pub fn set_threshold(&self, threshold: u8) { 24 | unsafe { 25 | let addr = (self.base_register_address + PLIC_THRESHOLD_OFFSET) as *mut u32; 26 | addr.write_volatile(threshold as u32); 27 | } 28 | } 29 | 30 | pub fn enable_interrupt(&self, id: u16, hart: u16) -> Result<(), &'static str> { 31 | if id >= PLIC_NUMBER_SOURCES { 32 | return Err("enable_interrupt: Id is higher than PLIC_MAX_INTERRUPT_SOURCE"); 33 | } 34 | 35 | if hart >= PLIC_MAX_CONTEXT { 36 | return Err("enable_interrupt: hart is higher than PLIC_MAX_CONTEXT"); 37 | } 38 | 39 | let source_offset = (id / PLIC_NUMBER_INTERRUPT_SOURCE_BY_REGISTER as u16 40 | + hart * PLIC_NUMBER_SOURCE_REGISTER) as usize; 41 | let id_shift = 1 << (id % PLIC_NUMBER_INTERRUPT_SOURCE_BY_REGISTER as u16); 42 | 43 | unsafe { 44 | let addr = 45 | (self.base_register_address + PLIC_ENABLE_OFFSET + source_offset) as *mut u32; 46 | let current_interrupt = core::ptr::read_volatile(addr); 47 | addr.write_volatile(current_interrupt | id_shift); 48 | } 49 | 50 | Ok(()) 51 | } 52 | 53 | pub fn set_priority(&self, id: u16, priority: u32) -> Result<(), &'static str> { 54 | if id >= PLIC_NUMBER_SOURCES { 55 | return Err("set_priority: Id is higher than PLIC_MAX_INTERRUPT_SOURCE"); 56 | } 57 | 58 | unsafe { 59 | let addr = (self.base_register_address + (id * 4) as usize) as *mut u32; 60 | addr.write_volatile(priority); 61 | } 62 | 63 | Ok(()) 64 | } 65 | 66 | pub fn claim(&self) -> u32 { 67 | unsafe { 68 | let addr = (self.base_register_address + PLIC_CLAIM_OFFSET) as *mut u32; 69 | addr.read_volatile() 70 | } 71 | } 72 | 73 | pub fn complete(&self, source: u32) { 74 | unsafe { 75 | // XXX: this isn't hart specific ? 76 | let addr = (self.base_register_address + PLIC_CLAIM_OFFSET) as *mut u32; 77 | addr.write_volatile(source); 78 | } 79 | } 80 | 81 | fn get_int(&self) -> Result { 82 | let source = self.claim(); 83 | 84 | Ok(source) 85 | } 86 | 87 | fn clear_int(&self, int: u32) { 88 | self.complete(int); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /hal_riscv64/src/registers.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | pub fn set_sstatus_sie() { 4 | unsafe { 5 | asm!("csrrs zero, sstatus, {}", in(reg)1 << 1); 6 | } 7 | } 8 | 9 | pub fn set_sie_ssie() { 10 | unsafe { 11 | asm!("csrrs zero, sie, {}", in(reg)1 << 1); 12 | } 13 | } 14 | 15 | pub fn set_sie_seie() { 16 | unsafe { 17 | asm!("csrrs zero, sie, {}", in(reg)1 << 9); 18 | } 19 | } 20 | 21 | pub fn set_sie_stie() { 22 | unsafe { 23 | asm!("csrrs zero, sie, {}", in(reg)1 << 5); 24 | } 25 | } 26 | 27 | pub fn set_stvec(addr: usize) { 28 | unsafe { 29 | asm!("csrw stvec, {}", in(reg)(addr)); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /kernel/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "autocfg" 7 | version = "1.1.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" 10 | 11 | [[package]] 12 | name = "bit_field" 13 | version = "0.10.1" 14 | source = "registry+https://github.com/rust-lang/crates.io-index" 15 | checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4" 16 | 17 | [[package]] 18 | name = "bitflags" 19 | version = "2.2.1" 20 | source = "registry+https://github.com/rust-lang/crates.io-index" 21 | checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813" 22 | 23 | [[package]] 24 | name = "cfg-if" 25 | version = "1.0.0" 26 | source = "registry+https://github.com/rust-lang/crates.io-index" 27 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 28 | 29 | [[package]] 30 | name = "cortex-a" 31 | version = "8.1.1" 32 | source = "registry+https://github.com/rust-lang/crates.io-index" 33 | checksum = "8256fd5103e10027467cc7a97c9ff27fcc4547ea24864da0aff2e7aef6e18e28" 34 | dependencies = [ 35 | "tock-registers", 36 | ] 37 | 38 | [[package]] 39 | name = "critical-section" 40 | version = "1.1.1" 41 | source = "registry+https://github.com/rust-lang/crates.io-index" 42 | checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52" 43 | 44 | [[package]] 45 | name = "drivers" 46 | version = "0.1.0" 47 | dependencies = [ 48 | "qemu-exit", 49 | "tock-registers", 50 | "utils", 51 | ] 52 | 53 | [[package]] 54 | name = "embedded-hal" 55 | version = "0.2.7" 56 | source = "registry+https://github.com/rust-lang/crates.io-index" 57 | checksum = "35949884794ad573cf46071e41c9b60efb0cb311e3ca01f7af807af1debc66ff" 58 | dependencies = [ 59 | "nb 0.1.3", 60 | "void", 61 | ] 62 | 63 | [[package]] 64 | name = "fdt" 65 | version = "0.1.3" 66 | source = "registry+https://github.com/rust-lang/crates.io-index" 67 | checksum = "b643857cf70949306b81d7e92cb9d47add673868edac9863c4a49c42feaf3f1e" 68 | 69 | [[package]] 70 | name = "goblin" 71 | version = "0.6.1" 72 | source = "registry+https://github.com/rust-lang/crates.io-index" 73 | checksum = "0d6b4de4a8eb6c46a8c77e1d3be942cb9a8bf073c22374578e5ba4b08ed0ff68" 74 | dependencies = [ 75 | "plain", 76 | "scroll", 77 | ] 78 | 79 | [[package]] 80 | name = "kernel" 81 | version = "0.1.0" 82 | dependencies = [ 83 | "bitflags", 84 | "cfg-if", 85 | "cortex-a", 86 | "drivers", 87 | "fdt", 88 | "goblin", 89 | "modular-bitfield", 90 | "once_cell", 91 | "riscv", 92 | "sbi", 93 | "spin", 94 | "static_assertions", 95 | "tock-registers", 96 | "utils", 97 | ] 98 | 99 | [[package]] 100 | name = "lock_api" 101 | version = "0.4.7" 102 | source = "registry+https://github.com/rust-lang/crates.io-index" 103 | checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" 104 | dependencies = [ 105 | "autocfg", 106 | "scopeguard", 107 | ] 108 | 109 | [[package]] 110 | name = "modular-bitfield" 111 | version = "0.11.2" 112 | source = "registry+https://github.com/rust-lang/crates.io-index" 113 | checksum = "a53d79ba8304ac1c4f9eb3b9d281f21f7be9d4626f72ce7df4ad8fbde4f38a74" 114 | dependencies = [ 115 | "modular-bitfield-impl", 116 | "static_assertions", 117 | ] 118 | 119 | [[package]] 120 | name = "modular-bitfield-impl" 121 | version = "0.11.2" 122 | source = "registry+https://github.com/rust-lang/crates.io-index" 123 | checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" 124 | dependencies = [ 125 | "proc-macro2", 126 | "quote", 127 | "syn", 128 | ] 129 | 130 | [[package]] 131 | name = "nb" 132 | version = "0.1.3" 133 | source = "registry+https://github.com/rust-lang/crates.io-index" 134 | checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f" 135 | dependencies = [ 136 | "nb 1.0.0", 137 | ] 138 | 139 | [[package]] 140 | name = "nb" 141 | version = "1.0.0" 142 | source = "registry+https://github.com/rust-lang/crates.io-index" 143 | checksum = "546c37ac5d9e56f55e73b677106873d9d9f5190605e41a856503623648488cae" 144 | 145 | [[package]] 146 | name = "once_cell" 147 | version = "1.17.1" 148 | source = "registry+https://github.com/rust-lang/crates.io-index" 149 | checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" 150 | 151 | [[package]] 152 | name = "plain" 153 | version = "0.2.3" 154 | source = "registry+https://github.com/rust-lang/crates.io-index" 155 | checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" 156 | 157 | [[package]] 158 | name = "proc-macro2" 159 | version = "1.0.39" 160 | source = "registry+https://github.com/rust-lang/crates.io-index" 161 | checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" 162 | dependencies = [ 163 | "unicode-ident", 164 | ] 165 | 166 | [[package]] 167 | name = "qemu-exit" 168 | version = "3.0.1" 169 | source = "registry+https://github.com/rust-lang/crates.io-index" 170 | checksum = "9ff023245bfcc73fb890e1f8d5383825b3131cc920020a5c487d6f113dfc428a" 171 | 172 | [[package]] 173 | name = "quote" 174 | version = "1.0.18" 175 | source = "registry+https://github.com/rust-lang/crates.io-index" 176 | checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" 177 | dependencies = [ 178 | "proc-macro2", 179 | ] 180 | 181 | [[package]] 182 | name = "riscv" 183 | version = "0.10.1" 184 | source = "registry+https://github.com/rust-lang/crates.io-index" 185 | checksum = "aa3145d2fae3778b1e31ec2e827b228bdc6abd9b74bb5705ba46dcb82069bc4f" 186 | dependencies = [ 187 | "bit_field", 188 | "critical-section", 189 | "embedded-hal", 190 | ] 191 | 192 | [[package]] 193 | name = "sbi" 194 | version = "0.2.0" 195 | source = "registry+https://github.com/rust-lang/crates.io-index" 196 | checksum = "29cb0870400aca7e4487e8ec1e93f9d4288da763cb1da2cedc5102e62b6522ad" 197 | 198 | [[package]] 199 | name = "scopeguard" 200 | version = "1.1.0" 201 | source = "registry+https://github.com/rust-lang/crates.io-index" 202 | checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" 203 | 204 | [[package]] 205 | name = "scroll" 206 | version = "0.11.0" 207 | source = "registry+https://github.com/rust-lang/crates.io-index" 208 | checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" 209 | 210 | [[package]] 211 | name = "spin" 212 | version = "0.9.8" 213 | source = "registry+https://github.com/rust-lang/crates.io-index" 214 | checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" 215 | dependencies = [ 216 | "lock_api", 217 | ] 218 | 219 | [[package]] 220 | name = "static_assertions" 221 | version = "1.1.0" 222 | source = "registry+https://github.com/rust-lang/crates.io-index" 223 | checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" 224 | 225 | [[package]] 226 | name = "syn" 227 | version = "1.0.96" 228 | source = "registry+https://github.com/rust-lang/crates.io-index" 229 | checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" 230 | dependencies = [ 231 | "proc-macro2", 232 | "quote", 233 | "unicode-ident", 234 | ] 235 | 236 | [[package]] 237 | name = "tock-registers" 238 | version = "0.8.1" 239 | source = "registry+https://github.com/rust-lang/crates.io-index" 240 | checksum = "696941a0aee7e276a165a978b37918fd5d22c55c3d6bda197813070ca9c0f21c" 241 | 242 | [[package]] 243 | name = "unicode-ident" 244 | version = "1.0.1" 245 | source = "registry+https://github.com/rust-lang/crates.io-index" 246 | checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" 247 | 248 | [[package]] 249 | name = "utils" 250 | version = "0.1.0" 251 | 252 | [[package]] 253 | name = "void" 254 | version = "1.0.2" 255 | source = "registry+https://github.com/rust-lang/crates.io-index" 256 | checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" 257 | -------------------------------------------------------------------------------- /kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kernel" 3 | version = "0.1.0" 4 | authors = ["cohenarthur ", "Esteban \"Skallwar\" Blanc ", "Martin \"n1tram1\" Schmidt "] 5 | edition = "2021" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | cfg-if = "1.0" 11 | static_assertions = "1.1" 12 | modular-bitfield = "0.11" 13 | bitflags = "2.1" 14 | fdt = "0.1" 15 | spin = "0.9" 16 | goblin = { version = "0.6", default-features = false, features = ["elf64"] } 17 | qemu-exit = "3.0" 18 | hal_core = { path = "../hal_core" } 19 | arrayvec = { version = "0.7", default-features = false } 20 | align-data = "0.1" 21 | log = "0.4" 22 | tests = { path = "../tests", artifact = "bin" } 23 | 24 | [dev-dependencies] 25 | 26 | [target.riscv64gc-unknown-none-elf.dependencies] 27 | sbi = "0.2" 28 | hal_riscv64 = { path = "../hal_riscv64" } 29 | 30 | [target.aarch64-unknown-none.dependencies] 31 | tock-registers = "0.8" 32 | cortex-a = "8.1" 33 | hal_aarch64 = { path = "../hal_aarch64" } 34 | 35 | [features] 36 | aarch64_pgt48oa = [] 37 | riscv64_sv39 = [] 38 | arm = [] 39 | -------------------------------------------------------------------------------- /kernel/fixtures/Makefile: -------------------------------------------------------------------------------- 1 | CC := clang 2 | 3 | ASFLAGS += -Wall -Wextra -nostdlib -nostartfiles -static -mno-relax 4 | LDFLAGS += -fuse-ld=lld 5 | 6 | EXEC = small 7 | 8 | .PHONY = all clean 9 | 10 | all: 11 | echo 'You forgot to sepcify your rust target' 12 | exit 1 13 | 14 | riscv64gc-unknown-none-elf: ASFLAGS+=--target=riscv64-unknown-none-elf-riscv64gc 15 | riscv64gc-unknown-none-elf: $(EXEC) 16 | 17 | aarch64-unknown-none: ASFLAGS+=--target=aarch64-unknown-none-elf 18 | aarch64-unknown-none: $(EXEC) 19 | 20 | clean: 21 | rm -f *.o 22 | rm -f $(EXEC) 23 | 24 | 25 | -------------------------------------------------------------------------------- /kernel/fixtures/small.S: -------------------------------------------------------------------------------- 1 | .text 2 | .global _start 3 | .type _start,@function 4 | 5 | _start: 6 | #ifdef __riscv 7 | addi a0, x0, 42 8 | ret 9 | #elif __aarch64__ 10 | mov w8, 42 11 | svc 0 12 | #else 13 | #error This example is not available for the selected target 14 | #endif 15 | -------------------------------------------------------------------------------- /kernel/src/device_tree.rs: -------------------------------------------------------------------------------- 1 | use super::Error; 2 | 3 | use hal_core::AddressRange; 4 | 5 | use fdt::node::FdtNode; 6 | 7 | pub struct DeviceTree { 8 | addr: usize, 9 | dtb: fdt::Fdt<'static>, 10 | total_size: usize, 11 | } 12 | 13 | impl DeviceTree { 14 | pub fn new(device_tree_ptr: usize) -> Result { 15 | let dtb = unsafe { fdt::Fdt::from_ptr(device_tree_ptr as *const u8)? }; 16 | 17 | Ok(Self { 18 | addr: device_tree_ptr, 19 | dtb, 20 | total_size: dtb.total_size(), 21 | }) 22 | } 23 | 24 | pub fn memory_region(&self) -> AddressRange { 25 | AddressRange::new(self.addr..self.addr + self.total_size) 26 | } 27 | 28 | pub fn for_all_memory_regions)>( 29 | &self, 30 | mut f: F, 31 | ) { 32 | let memory = self.dtb.memory(); 33 | let mut regions = memory 34 | .regions() 35 | .map(|region| (region.starting_address as usize, region.size.unwrap_or(0))); 36 | 37 | f(&mut regions); 38 | } 39 | 40 | pub fn for_all_reserved_memory_regions)>( 41 | &self, 42 | mut f: F, 43 | ) { 44 | match self.dtb.find_node("/reserved-memory") { 45 | None => (), 46 | Some(reserved_memory) => { 47 | let mut regions = reserved_memory 48 | .children() 49 | .flat_map(|child| child.reg()) 50 | .flatten() 51 | .map(|region| (region.starting_address as usize, region.size.unwrap_or(0))); 52 | 53 | f(&mut regions); 54 | } 55 | } 56 | } 57 | 58 | pub fn console_node(&self) -> Option { 59 | let chosen = self.dtb.chosen(); 60 | chosen.stdout() 61 | } 62 | 63 | pub fn interrupt_controller(&self) -> Option { 64 | // This is a funny one. 65 | // There can be multiple interrupt controllers: 66 | // - on a "regular" Aarch64 board, you just have a gic 67 | // - on a RISC-V board, you have a "root" irq chip that's part of the cpu and there is a 68 | // soc level interrupt controller "plic/aplic" (similar to gic). 69 | // Handling this properly requires more code which we will do in the future, but for 70 | // now... don't do anything particular to take care of the root irqchip and use a 71 | // heuristic to find the soc level interrupt controller. 72 | let mut interrupt_controllers = self 73 | .dtb 74 | .all_nodes() 75 | .filter(|node| node.property("interrupt-controller").is_some()); 76 | 77 | // The heuristic, the root irq chip doesn't have a reg property. 78 | // Works on aarch64 and riscv64. 79 | interrupt_controllers.find(|intc| intc.reg().is_some()) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /kernel/src/driver_manager.rs: -------------------------------------------------------------------------------- 1 | use alloc::{boxed::Box, collections::LinkedList, sync::Arc}; 2 | 3 | use super::device_tree::DeviceTree; 4 | use super::drivers::{self, Matcher}; 5 | use super::error::Error; 6 | use super::kernel_console; 7 | use drivers::{Console, Driver}; 8 | use fdt::node::FdtNode; 9 | 10 | use crate::hal; 11 | use crate::mm::alloc_pages_for_hal; 12 | use hal_core::mm::{PageMap, Permissions}; 13 | 14 | pub struct DriverManager { 15 | drivers: LinkedList>, 16 | } 17 | 18 | impl DriverManager { 19 | fn new() -> Self { 20 | Self { 21 | drivers: LinkedList::new(), 22 | } 23 | } 24 | 25 | pub fn with_devices(dt: &DeviceTree) -> Result { 26 | let mut mgr = Self::new(); 27 | 28 | mgr.do_console(dt)?; 29 | 30 | Ok(mgr) 31 | } 32 | 33 | fn do_console(&mut self, dt: &DeviceTree) -> Result<(), Error> { 34 | let cons_node = dt.console_node().ok_or(Error::DeviceNotFound( 35 | "dtb doesn't contain a console node...", 36 | ))?; 37 | 38 | map_dt_regions(&cons_node)?; 39 | 40 | if let Some(cons_driver) = 41 | self.find_driver::(&cons_node, drivers::CONSOLE_MATCHERS) 42 | { 43 | self.register_console(cons_driver)?; 44 | Ok(()) 45 | } else { 46 | unmap_dt_regions(&cons_node)?; 47 | Err(Error::NoMatchingDriver("console")) 48 | } 49 | } 50 | 51 | fn extract_compatibles<'a>(node: &'a FdtNode) -> impl Iterator { 52 | let compatible = node 53 | .properties() 54 | .find(|prop| prop.name == "compatible") 55 | .and_then(|some_prop| some_prop.as_str()) 56 | .unwrap_or(""); 57 | compatible.split('\0') 58 | } 59 | 60 | pub fn find_driver( 61 | &self, 62 | node: &FdtNode, 63 | matchers: &[&Matcher], 64 | ) -> Option> { 65 | for compat in Self::extract_compatibles(node) { 66 | let matching_constructor = matchers 67 | .iter() 68 | .find(|matcher| matcher.matches(compat)) 69 | .map(|matcher| matcher.constructor); 70 | if let Some(constructor) = matching_constructor { 71 | if let Ok(driver) = constructor(&mut node.reg()?) { 72 | return Some(driver); 73 | } 74 | } 75 | } 76 | 77 | None 78 | } 79 | 80 | fn register_console(&mut self, cons: Box) -> Result<(), Error> { 81 | let cons: Arc = Arc::from(cons); 82 | self.register_driver(cons.clone()); 83 | kernel_console::set_console(cons.clone())?; 84 | 85 | Ok(()) 86 | } 87 | 88 | fn register_driver(&mut self, drv: Arc) { 89 | self.drivers.push_back(drv); 90 | } 91 | } 92 | 93 | fn map_dt_regions(node: &FdtNode) -> Result<(), Error> { 94 | if let Some(reg) = node.reg() { 95 | for memory_region in reg { 96 | let start = memory_region.starting_address as usize; 97 | let size = memory_region.size.ok_or(Error::InvalidFdtNode)?; 98 | 99 | assert!(size % hal::mm::PAGE_SIZE == 0); 100 | hal::mm::current().identity_map_range( 101 | start.into(), 102 | size / hal::mm::PAGE_SIZE, 103 | Permissions::READ | Permissions::WRITE, 104 | alloc_pages_for_hal, 105 | )?; 106 | } 107 | } 108 | 109 | Ok(()) 110 | } 111 | 112 | fn unmap_dt_regions(node: &FdtNode) -> Result<(), Error> { 113 | let pagesize = hal::mm::PAGE_SIZE; 114 | 115 | if let Some(reg) = node.reg() { 116 | for memory_region in reg { 117 | let start = memory_region.starting_address as usize; 118 | let size = memory_region.size.ok_or(Error::InvalidFdtNode)?; 119 | assert!(size % hal::mm::PAGE_SIZE == 0); 120 | 121 | let kernel_pt = hal::mm::current(); 122 | for page in (start..start + size).step_by(pagesize) { 123 | kernel_pt 124 | .add_invalid_entry(page.into(), |_| unreachable!()) 125 | .unwrap(); 126 | } 127 | } 128 | } 129 | 130 | Ok(()) 131 | } 132 | -------------------------------------------------------------------------------- /kernel/src/drivers/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module stores all drivers strictly necessary for the kernel. 2 | 3 | extern crate alloc; 4 | use alloc::boxed::Box; 5 | 6 | pub mod ns16550; 7 | pub mod null_uart; 8 | pub mod pl011; 9 | pub mod qemuexit; 10 | 11 | use crate::Error; 12 | use fdt::standard_nodes::MemoryRegion; 13 | 14 | pub trait Driver { 15 | fn get_address_range(&self) -> Option<(usize, usize)>; 16 | } 17 | 18 | pub trait Console: Driver { 19 | fn write(&self, data: &str); 20 | } 21 | 22 | pub struct Matcher { 23 | pub compatibles: &'static [&'static str], 24 | pub constructor: fn(&mut dyn Iterator) -> Result, Error>, 25 | } 26 | 27 | impl Matcher { 28 | pub fn matches(&self, compatible: &str) -> bool { 29 | self.compatibles.iter().any(|s| s == &compatible) 30 | } 31 | } 32 | type ConsoleMatcher = Matcher; 33 | 34 | pub const CONSOLE_MATCHERS: &[&ConsoleMatcher] = &[&pl011::MATCHER, &ns16550::MATCHER]; 35 | -------------------------------------------------------------------------------- /kernel/src/drivers/ns16550.rs: -------------------------------------------------------------------------------- 1 | //! Driver for the NS16550 UART chip. 2 | //! The datasheet used to write this is: 3 | 4 | use super::Console; 5 | use super::ConsoleMatcher; 6 | use super::Driver; 7 | 8 | use crate::utils::lock::Lock; 9 | 10 | pub extern crate alloc; 11 | use alloc::boxed::Box; 12 | 13 | const TRANSMITTER_HOLDING_REGISTER: usize = 0; 14 | const _INTERRUPT_ENABLE_REGISTER: usize = 1; 15 | 16 | pub struct Ns16550 { 17 | inner: Lock, 18 | } 19 | 20 | struct Ns16550Inner { 21 | base_register_address: usize, 22 | } 23 | 24 | impl Ns16550Inner { 25 | pub const fn new(base_register_address: usize) -> Self { 26 | Self { 27 | base_register_address, 28 | } 29 | } 30 | 31 | pub fn _read(&self) -> u8 { 32 | self._read_transmitter_holding_reg() 33 | } 34 | 35 | pub fn _enable_data_ready_interrupt(&self) { 36 | // Data ready is the first bit of the Interrupt Enable Register 37 | unsafe { 38 | let addr = (self.base_register_address as *mut u8).add(_INTERRUPT_ENABLE_REGISTER); 39 | addr.write_volatile(1 << 0) 40 | } 41 | } 42 | 43 | fn write_transmitter_holding_reg(&self, byte: u8) { 44 | unsafe { 45 | let addr = (self.base_register_address as *mut u8).add(TRANSMITTER_HOLDING_REGISTER); 46 | addr.write_volatile(byte); 47 | } 48 | } 49 | 50 | fn _read_transmitter_holding_reg(&self) -> u8 { 51 | unsafe { 52 | let addr = (self.base_register_address as *mut u8).add(TRANSMITTER_HOLDING_REGISTER); 53 | addr.read_volatile() 54 | } 55 | } 56 | } 57 | 58 | impl Ns16550 { 59 | pub const fn new(base: usize) -> Self { 60 | Self { 61 | inner: Lock::new(Ns16550Inner::new(base)), 62 | } 63 | } 64 | } 65 | 66 | impl Driver for Ns16550 { 67 | fn get_address_range(&self) -> Option<(usize, usize)> { 68 | // Base address + max register offset 69 | self.inner 70 | .lock(|ns16550| Some((ns16550.base_register_address, 0b111))) 71 | } 72 | } 73 | 74 | impl Console for Ns16550 { 75 | fn write(&self, data: &str) { 76 | self.inner.lock(|ns16550| { 77 | for byte in data.bytes() { 78 | ns16550.write_transmitter_holding_reg(byte); 79 | } 80 | }) 81 | } 82 | } 83 | 84 | pub(super) const MATCHER: ConsoleMatcher = ConsoleMatcher { 85 | compatibles: &["ns16550a"], 86 | constructor: |reg| { 87 | Ok(Box::new(Ns16550::new( 88 | reg.next().unwrap().starting_address as usize, 89 | ))) 90 | }, 91 | }; 92 | -------------------------------------------------------------------------------- /kernel/src/drivers/null_uart.rs: -------------------------------------------------------------------------------- 1 | use super::{Console, Driver}; 2 | 3 | #[derive(Debug)] 4 | pub struct NullUart; 5 | 6 | impl NullUart { 7 | pub const fn new() -> Self { 8 | Self 9 | } 10 | } 11 | 12 | impl Console for NullUart { 13 | fn write(&self, _data: &str) { 14 | // Does nothing, just a placeholder while a real uart is not in place. 15 | } 16 | } 17 | 18 | impl Driver for NullUart { 19 | fn get_address_range(&self) -> Option<(usize, usize)> { 20 | None 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /kernel/src/drivers/pl011.rs: -------------------------------------------------------------------------------- 1 | use super::Console; 2 | use super::ConsoleMatcher; 3 | use super::Driver; 4 | 5 | use crate::utils::lock::Lock; 6 | 7 | pub extern crate alloc; 8 | use alloc::boxed::Box; 9 | 10 | pub struct Pl011 { 11 | inner: Lock, 12 | } 13 | 14 | struct Pl011Inner { 15 | base: usize, 16 | } 17 | 18 | impl Pl011Inner { 19 | pub const fn new(base: usize) -> Self { 20 | Self { base } 21 | } 22 | 23 | fn read_flag_register(&self) -> u32 { 24 | unsafe { ((self.base + 0x18) as *mut u32).read_volatile() } 25 | } 26 | 27 | fn tx_fifo_full(&self) -> bool { 28 | self.read_flag_register() & (1 << 5) > 0 29 | } 30 | 31 | fn write_data_register(&mut self, b: u8) { 32 | let dr = self.base as *mut u32; 33 | unsafe { dr.write_volatile(b.into()) } 34 | } 35 | 36 | pub fn putc(&mut self, b: u8) { 37 | while self.tx_fifo_full() {} 38 | 39 | self.write_data_register(b); 40 | } 41 | } 42 | 43 | impl Driver for Pl011 { 44 | fn get_address_range(&self) -> Option<(usize, usize)> { 45 | // Base address, max register offset 46 | self.inner.lock(|pl011| Some((pl011.base, 0xFFC))) 47 | } 48 | } 49 | 50 | impl Console for Pl011 { 51 | fn write(&self, data: &str) { 52 | self.inner 53 | .lock(|pl011| data.bytes().for_each(|b| pl011.putc(b))); 54 | } 55 | } 56 | 57 | impl Pl011 { 58 | pub const fn new(base: usize) -> Self { 59 | Self { 60 | inner: Lock::new(Pl011Inner::new(base)), 61 | } 62 | } 63 | } 64 | 65 | pub(super) const MATCHER: ConsoleMatcher = ConsoleMatcher { 66 | compatibles: &["arm,pl011"], 67 | constructor: |reg| { 68 | Ok(Box::new(Pl011::new( 69 | reg.next().unwrap().starting_address as usize, 70 | ))) 71 | }, 72 | }; 73 | -------------------------------------------------------------------------------- /kernel/src/drivers/qemuexit.rs: -------------------------------------------------------------------------------- 1 | use qemu_exit; 2 | use qemu_exit::QEMUExit; 3 | 4 | use super::Driver; 5 | 6 | #[cfg(target_arch = "riscv64")] 7 | const RISCV64_BASE_ADDRESS: usize = 0x100000; 8 | 9 | pub struct QemuExit { 10 | #[cfg(target_arch = "riscv64")] 11 | address: usize, 12 | #[cfg(target_arch = "riscv64")] 13 | inner: qemu_exit::RISCV64, 14 | #[cfg(target_arch = "aarch64")] 15 | inner: qemu_exit::AArch64, 16 | } 17 | 18 | impl QemuExit { 19 | pub fn new() -> Self { 20 | #[cfg(target_arch = "riscv64")] 21 | return Self { 22 | address: RISCV64_BASE_ADDRESS, 23 | inner: qemu_exit::RISCV64::new(RISCV64_BASE_ADDRESS as u64), 24 | }; 25 | 26 | #[cfg(target_arch = "aarch64")] 27 | return Self { 28 | inner: qemu_exit::AArch64::new(), 29 | }; 30 | } 31 | 32 | pub fn exit_success(&self) -> ! { 33 | self.inner.exit_success(); 34 | } 35 | 36 | pub fn exit_failure(&self) -> ! { 37 | self.inner.exit_failure(); 38 | } 39 | } 40 | 41 | impl Default for QemuExit { 42 | fn default() -> Self { 43 | Self::new() 44 | } 45 | } 46 | 47 | impl Driver for QemuExit { 48 | fn get_address_range(&self) -> Option<(usize, usize)> { 49 | #[cfg(target_arch = "riscv64")] 50 | return Some((self.address, 1)); 51 | 52 | #[cfg(target_arch = "aarch64")] 53 | return None; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /kernel/src/error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug)] 2 | pub enum Error { 3 | DeviceNotFound(&'static str), 4 | NoMatchingDriver(&'static str), 5 | InvalidFdtNode, 6 | FdtError(fdt::FdtError), 7 | Allocator(hal_core::mm::AllocatorError), 8 | Hal(hal_core::Error), 9 | SetLoggerError(log::SetLoggerError), 10 | } 11 | 12 | impl From for Error { 13 | fn from(e: fdt::FdtError) -> Self { 14 | Self::FdtError(e) 15 | } 16 | } 17 | 18 | impl From for Error { 19 | fn from(e: hal_core::mm::AllocatorError) -> Self { 20 | Self::Allocator(e) 21 | } 22 | } 23 | 24 | impl From for Error { 25 | fn from(e: hal_core::Error) -> Self { 26 | Self::Hal(e) 27 | } 28 | } 29 | 30 | impl From for Error { 31 | fn from(e: log::SetLoggerError) -> Self { 32 | Self::SetLoggerError(e) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /kernel/src/executable/elf.rs: -------------------------------------------------------------------------------- 1 | use core::iter::Iterator; 2 | 3 | use crate::globals; 4 | use crate::Error; 5 | 6 | use goblin; 7 | use goblin::elf::header::header64::Header; 8 | use goblin::elf::program_header::program_header64::ProgramHeader; 9 | use goblin::elf::program_header::*; 10 | 11 | use crate::hal; 12 | use hal_core::mm::{PAddr, PageAlloc, PageMap, Permissions, VAddr}; 13 | 14 | fn align_down(addr: usize, page_size: usize) -> usize { 15 | let page_mask = !(page_size - 1); 16 | 17 | addr & page_mask 18 | } 19 | 20 | pub struct Elf<'a> { 21 | data: &'a [u8], 22 | } 23 | 24 | impl<'a> Elf<'a> { 25 | /// Create a new Elf struct from a byte slice 26 | pub fn from_bytes(data: &'a [u8]) -> Self { 27 | Self { data } 28 | } 29 | 30 | /// Get the header struct of an ELF file 31 | fn header(&self) -> &Header { 32 | let header_slice = self.data[..64].try_into().unwrap(); 33 | 34 | Header::from_bytes(header_slice) 35 | } 36 | 37 | /// Get an iterator over all the segment of an ELF file 38 | fn segments(&self) -> impl Iterator + '_ { 39 | let header = self.header(); 40 | 41 | (0..header.e_phnum) 42 | .map(|n| { 43 | (self.data.as_ptr() as usize) 44 | + header.e_phoff as usize 45 | + (n as usize * header.e_phentsize as usize) 46 | }) 47 | .map(|addr| unsafe { &(*(addr as *const ProgramHeader)) }) 48 | } 49 | 50 | pub fn get_entry_point(&self) -> usize { 51 | self.header().e_entry as usize 52 | } 53 | 54 | fn pages_needed( 55 | segment: &goblin::elf64::program_header::ProgramHeader, 56 | page_size: usize, 57 | ) -> usize { 58 | let p_memsz = segment.p_memsz as usize; 59 | 60 | if p_memsz < page_size { 61 | 1 62 | } else { 63 | p_memsz / page_size 64 | } 65 | } 66 | 67 | pub fn load(&self) -> Result<(), Error> { 68 | let page_size = hal::mm::PAGE_SIZE; 69 | 70 | for segment in self.segments() { 71 | if segment.p_type != PT_LOAD { 72 | continue; 73 | } 74 | 75 | let p_offset = segment.p_offset as usize; 76 | let p_filesz = segment.p_filesz as usize; 77 | let p_memsz = segment.p_memsz as usize; 78 | 79 | let pages_needed = Self::pages_needed(segment, page_size); 80 | let physical_pages = globals::PHYSICAL_MEMORY_MANAGER 81 | .alloc(pages_needed) 82 | .unwrap(); 83 | let virtual_pages = segment.p_paddr as *mut u8; 84 | let offset_in_page = 85 | (virtual_pages as usize) - align_down(virtual_pages as usize, page_size); 86 | 87 | let segment_data_src_addr = ((self.data.as_ptr() as usize) + p_offset) as *const u8; 88 | let segment_data_dst_addr = (physical_pages + offset_in_page) as *mut u8; 89 | 90 | let segment_data_src: &[u8] = 91 | unsafe { core::slice::from_raw_parts(segment_data_src_addr, p_filesz) }; 92 | let segment_data_dst: &mut [u8] = { 93 | let dst = 94 | unsafe { core::slice::from_raw_parts_mut(segment_data_dst_addr, p_memsz) }; 95 | 96 | // Zeroing uninitialized data 97 | dst[p_filesz..p_memsz].iter_mut().for_each(|e| *e = 0u8); 98 | 99 | dst 100 | }; 101 | 102 | segment_data_dst[0..p_filesz].clone_from_slice(segment_data_src); 103 | 104 | let perms = elf_to_mm_permissions(segment.p_flags); 105 | 106 | for i in 0..pages_needed { 107 | let page_offset = i * page_size; 108 | // FIXME: No unwrap 109 | hal::mm::current() 110 | .map( 111 | VAddr::new(align_down(virtual_pages as usize, page_size) + page_offset), 112 | PAddr::new(physical_pages + page_offset), 113 | perms, 114 | &globals::PHYSICAL_MEMORY_MANAGER, 115 | ) 116 | .unwrap(); 117 | } 118 | } 119 | 120 | Ok(()) 121 | } 122 | } 123 | 124 | /// Convert ELF p_flags permissions to Permissions 125 | fn elf_to_mm_permissions(elf_permsission: u32) -> Permissions { 126 | let mut perms = Permissions::empty(); 127 | 128 | if elf_permsission & PF_R != 0 { 129 | perms |= Permissions::READ; 130 | } 131 | 132 | if elf_permsission & PF_W != 0 { 133 | perms |= Permissions::WRITE; 134 | } 135 | 136 | if elf_permsission & PF_X != 0 { 137 | perms |= Permissions::EXECUTE; 138 | } 139 | 140 | perms 141 | } 142 | 143 | #[cfg(test)] 144 | mod tests { 145 | use super::*; 146 | use crate::kernel_tests::*; 147 | 148 | #[test_case] 149 | fn elf_load(ctx: &mut TestContext) { 150 | ctx.reset(); 151 | 152 | let elf_bytes = core::include_bytes!("../../fixtures/small"); 153 | let elf = Elf::from_bytes(elf_bytes); 154 | 155 | let mut user_pagetable = ctx.page_table.fork_user_page_table(&mut ctx.pmm).unwrap(); 156 | 157 | elf.load(&mut ctx.page_table, &mut user_pagetable, &mut ctx.pmm); 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /kernel/src/executable/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod elf; 2 | -------------------------------------------------------------------------------- /kernel/src/generic_main.rs: -------------------------------------------------------------------------------- 1 | use super::device_tree::DeviceTree; 2 | use super::drivers::qemuexit::QemuExit; 3 | use super::drivers::Driver; 4 | use super::globals; 5 | 6 | use crate::hal; 7 | use crate::mm; 8 | 9 | use crate::tests::{self, TestResult}; 10 | 11 | use log::info; 12 | 13 | pub fn generic_main(dt: DeviceTree, hacky_devices: &[&dyn Driver]) -> ! { 14 | info!("Entered generic_main"); 15 | let qemu_exit = QemuExit::new(); 16 | let qemu_exit_slice = [&qemu_exit as &dyn Driver]; 17 | 18 | let devices = hacky_devices.iter().chain(&qemu_exit_slice); 19 | 20 | // Memory init 21 | globals::PHYSICAL_MEMORY_MANAGER 22 | .init_from_device_tree(&dt) 23 | .unwrap(); 24 | mm::map_address_space(&dt, devices).expect("failed to map the addres space"); 25 | 26 | // Driver stuff 27 | // let _drvmgr = DriverManager::with_devices(&dt).unwrap(); 28 | 29 | hal::irq::init_irq_chip((), &globals::PHYSICAL_MEMORY_MANAGER) 30 | .expect("initialization of irq chip failed"); 31 | 32 | hal::cpu::unmask_interrupts(); 33 | 34 | if LAUNCH_TESTS { 35 | match tests::launch() { 36 | TestResult::Success => qemu_exit.exit_success(), 37 | TestResult::Failure => qemu_exit.exit_failure(), 38 | } 39 | } else { 40 | panic!("no scheduler to launch yet..."); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /kernel/src/globals.rs: -------------------------------------------------------------------------------- 1 | use crate::mm; 2 | 3 | pub static PHYSICAL_MEMORY_MANAGER: mm::PhysicalMemoryManager = mm::PhysicalMemoryManager::new(); 4 | 5 | pub enum KernelState { 6 | EarlyInit, 7 | MmuEnabledInit, 8 | } 9 | 10 | impl KernelState { 11 | pub fn is_earlyinit(&self) -> bool { 12 | matches!(self, Self::EarlyInit) 13 | } 14 | 15 | pub fn is_mmu_enabled(&self) -> bool { 16 | matches!(self, Self::MmuEnabledInit) 17 | } 18 | } 19 | 20 | pub static mut STATE: KernelState = KernelState::EarlyInit; 21 | -------------------------------------------------------------------------------- /kernel/src/kernel_console.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::{self, Write}; 2 | 3 | use crate::drivers::Console; 4 | use crate::Error; 5 | 6 | use alloc::sync::Arc; 7 | 8 | use log::{Level, LevelFilter, Metadata, Record}; 9 | use spin::Mutex; 10 | 11 | struct KernelConsole { 12 | earlyinit_console: Option<&'static (dyn Console + Sync)>, 13 | console: Option>, 14 | } 15 | 16 | impl KernelConsole { 17 | const fn new() -> Self { 18 | Self { 19 | earlyinit_console: None, 20 | console: None, 21 | } 22 | } 23 | } 24 | 25 | static KERNEL_CONSOLE: Mutex = Mutex::new(KernelConsole::new()); 26 | 27 | impl fmt::Write for KernelConsole { 28 | fn write_str(&mut self, data: &str) -> fmt::Result { 29 | if let Some(console) = &self.console { 30 | console.write(data); 31 | } else if let Some(earlyinit_console) = self.earlyinit_console { 32 | earlyinit_console.write(data); 33 | } 34 | // We should return an Error in the `else` case but it not like we can tell the user with a 35 | // print... 36 | 37 | Ok(()) 38 | } 39 | } 40 | 41 | fn print_fmt(args: fmt::Arguments) { 42 | KERNEL_CONSOLE.lock().write_fmt(args).unwrap(); 43 | } 44 | 45 | struct KernelLogger; 46 | 47 | impl log::Log for KernelLogger { 48 | fn enabled(&self, metadata: &Metadata) -> bool { 49 | metadata.level() <= Level::Trace 50 | } 51 | 52 | fn log(&self, record: &Record) { 53 | if self.enabled(record.metadata()) { 54 | // kprintln will call into the KERNEL_CONSOLE 55 | crate::kprintln!("{} - {}", record.level(), record.args()); 56 | } 57 | } 58 | 59 | fn flush(&self) {} 60 | } 61 | 62 | static KERNEL_LOGGER: KernelLogger = KernelLogger; 63 | 64 | pub fn init_logging() -> Result<(), Error> { 65 | log::set_logger(&KERNEL_LOGGER)?; 66 | log::set_max_level(LevelFilter::Trace); 67 | 68 | Ok(()) 69 | } 70 | 71 | pub fn set_earlyinit_console(new_console: &'static (dyn Console + Sync)) { 72 | KERNEL_CONSOLE.lock().earlyinit_console = Some(new_console); 73 | } 74 | 75 | pub fn set_console(new_console: Arc) -> Result<(), Error> { 76 | KERNEL_CONSOLE.lock().console = Some(new_console); 77 | 78 | // TODO: return an error if the error already was some (unless we consider it is ok) 79 | Ok(()) 80 | } 81 | 82 | #[macro_export] 83 | macro_rules! kprint { 84 | ($($args:tt)*) => ($crate::kernel_console::print_fmt(format_args!($($args)*))) 85 | } 86 | 87 | #[macro_export] 88 | macro_rules! kprintln { 89 | () => ($crate::kprint!("\r\n")); 90 | ($($args:tt)*) => ($crate::kprint!("{}\r\n", format_args!($($args)*))) 91 | } 92 | 93 | #[macro_export] 94 | macro_rules! dbg { 95 | () => { 96 | $crate::kprintln!("[{}:{}]", core::file!(), core::line!()) 97 | }; 98 | ($expr:expr) => { 99 | $crate::kprintln!( 100 | "[{}:{}] {} = {:#?}", 101 | core::file!(), 102 | core::line!(), 103 | core::stringify!($expr), 104 | &$expr 105 | ) 106 | }; 107 | } 108 | -------------------------------------------------------------------------------- /kernel/src/kernel_tests.rs: -------------------------------------------------------------------------------- 1 | use cfg_if::cfg_if; 2 | 3 | use core::panic::PanicInfo; 4 | 5 | use crate::arch; 6 | use crate::arch::Architecture; 7 | use crate::arch::ArchitectureInterrupts; 8 | use crate::mm; 9 | use crate::paging::PagingImpl; 10 | use crate::{kprint, kprintln}; 11 | 12 | use drivers::qemuexit::QemuExit; 13 | 14 | static UTEST_SUCESS: &str = "\x1b[32mok\x1b[0m"; 15 | static UTEST_FAILURE: &str = "\x1b[31mFAILED\x1b[0m"; 16 | 17 | static mut TEST_CONTEXT: Option = None; 18 | 19 | pub struct TestContext { 20 | device_tree_address: usize, 21 | pub arch: crate::ArchImpl, 22 | pub arch_interrupts: crate::InterruptsImpl, 23 | pub mm: mm::MemoryManager, 24 | } 25 | 26 | impl TestContext { 27 | pub fn new(device_tree_address: usize) -> Self { 28 | let (arch, mm) = TestContext::build_context_data(device_tree_address); 29 | 30 | TestContext { 31 | device_tree_address, 32 | arch, 33 | arch_interrupts: crate::InterruptsImpl {}, 34 | mm, 35 | } 36 | } 37 | 38 | pub fn reset(&mut self) { 39 | // We will recreate a global allocator from scratch. Currently loaded page table is 40 | // allocated via the global allocator. Let's disable pagination to avoiding access fault 41 | self.mm.get_kernel_pagetable().unwrap().disable(); 42 | 43 | let (arch, mm) = TestContext::build_context_data(self.device_tree_address); 44 | 45 | self.arch = arch; 46 | self.mm = mm; 47 | } 48 | 49 | fn build_context_data(device_tree_address: usize) -> (crate::ArchImpl, mm::MemoryManager) { 50 | let arch = crate::ArchImpl::new(); 51 | let device_tree = crate::device_tree::DeviceTree::new(device_tree_address); 52 | let pmm = mm::PhysicalMemoryManager::from_device_tree( 53 | &device_tree, 54 | crate::PagingImpl::get_page_size(), 55 | ); 56 | 57 | let mut mm = mm::MemoryManager::new(pmm); 58 | 59 | let pagetable = mm::map_address_space( 60 | &device_tree, 61 | &mut mm, 62 | &[crate::kernel_console::get_console(), &QemuExit::new()], 63 | ); 64 | 65 | mm.set_kernel_pagetable(pagetable); 66 | 67 | (arch, mm) 68 | } 69 | } 70 | 71 | pub trait Testable { 72 | fn run(&self, ctx: &mut TestContext) -> (); 73 | } 74 | 75 | impl Testable for T 76 | where 77 | T: Fn(&mut TestContext), 78 | { 79 | fn run(&self, ctx: &mut TestContext) { 80 | kprint!("{} ... ", core::any::type_name::()); 81 | self(ctx); 82 | kprintln!("{}", UTEST_SUCESS); 83 | } 84 | } 85 | 86 | pub fn init(device_tree_address: usize) { 87 | let ctx = TestContext::new(device_tree_address); 88 | 89 | unsafe { 90 | TEST_CONTEXT = Some(ctx); 91 | } 92 | 93 | kprintln!("[OK] Test context initialization"); 94 | } 95 | 96 | #[doc(hidden)] 97 | pub fn runner(tests: &[&dyn Testable]) { 98 | kprintln!("\nRunning goOSe tests... Amount: {}\n", tests.len()); 99 | 100 | let ctx = unsafe { TEST_CONTEXT.as_mut().unwrap() }; 101 | 102 | for test in tests { 103 | test.run(ctx); 104 | } 105 | 106 | end_utests(); 107 | } 108 | 109 | fn end_utests() { 110 | let ctx = unsafe { (&mut TEST_CONTEXT).as_mut().unwrap() }; 111 | 112 | QemuExit::new().exit_success(); 113 | } 114 | 115 | #[panic_handler] 116 | fn panic(info: &PanicInfo) -> ! { 117 | kprintln!("[{}]", UTEST_FAILURE); 118 | kprintln!("{}", info); 119 | 120 | end_utests(); 121 | 122 | QemuExit::new().exit_failure(); 123 | 124 | loop {} 125 | } 126 | 127 | #[cfg(test)] 128 | mod tests { 129 | use super::*; 130 | 131 | #[test_case] 132 | fn assert_true(_ctx: &mut TestContext) { 133 | assert!(true) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /kernel/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(naked_functions)] 3 | #![feature(fn_align)] 4 | #![feature(const_mut_refs)] 5 | #![feature(slice_ptr_get)] 6 | #![feature(const_ptr_as_ref)] 7 | #![feature(const_slice_from_raw_parts_mut)] 8 | #![feature(iterator_try_collect)] 9 | #![feature(const_for)] 10 | #![feature(alloc_error_handler)] 11 | #![feature(trait_upcasting)] 12 | #![feature(return_position_impl_trait_in_trait)] 13 | 14 | pub extern crate alloc; 15 | 16 | pub mod drivers; 17 | mod utils; 18 | 19 | pub mod error; 20 | pub use error::Error; 21 | 22 | pub mod device_tree; 23 | pub mod executable; 24 | pub mod generic_main; 25 | pub mod globals; 26 | pub mod kernel_console; 27 | pub mod mm; 28 | mod panic; 29 | mod tests; 30 | 31 | // TODO: redo the unit tests with Mockall 32 | // pub mod kernel_tests; 33 | 34 | // TODO: cleanup how we handle features 35 | cfg_if::cfg_if! { 36 | if #[cfg(target_arch = "aarch64")] { 37 | pub type ConsoleImpl = drivers::pl011::Pl011; 38 | pub use hal_aarch64 as hal; 39 | } else if #[cfg(target_arch = "riscv64")] { 40 | pub type ConsoleImpl = drivers::ns16550::Ns16550; 41 | pub use hal_riscv64 as hal; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /kernel/src/mm/binary_buddy_allocator.rs: -------------------------------------------------------------------------------- 1 | use crate::globals; 2 | use crate::hal::mm::PAGE_SIZE; 3 | use hal_core::mm::PageAlloc; 4 | 5 | use core::alloc::{GlobalAlloc, Layout}; 6 | 7 | use log::warn; 8 | 9 | pub struct BinaryBuddyAllocator; 10 | 11 | unsafe impl Sync for BinaryBuddyAllocator {} 12 | 13 | unsafe impl GlobalAlloc for BinaryBuddyAllocator { 14 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 15 | if unsafe { globals::STATE.is_earlyinit() } { 16 | panic!("Something tried to allocate before earlyinit is over o_O"); 17 | } 18 | 19 | assert!(layout.size() > 0); 20 | 21 | // The keen eye might notice this is just giving away pages ^^ 22 | // TODO: some much to be done 23 | // - actually implement a buddy allocator ^^ 24 | // - be thread-safe 25 | // - disable interrupts when entering, then re-enable 26 | 27 | let page_count = if layout.size() <= PAGE_SIZE { 28 | 1 29 | } else { 30 | layout.size() / PAGE_SIZE + 1 31 | }; 32 | globals::PHYSICAL_MEMORY_MANAGER 33 | .alloc(page_count) 34 | .unwrap_or(0usize) as *mut u8 35 | } 36 | 37 | unsafe fn dealloc(&self, _: *mut u8, _: Layout) { 38 | warn!("[WARNING] dealloc is not implemented yet, freeing memory isn't supported by the allocator"); 39 | } 40 | } 41 | 42 | #[global_allocator] 43 | static HEAP: BinaryBuddyAllocator = BinaryBuddyAllocator; 44 | 45 | #[alloc_error_handler] 46 | fn binary_buddy_oom(_layout: Layout) -> ! { 47 | panic!("The binary buddy allocator has oomed, this system has gobbled up all the RAM\nWe are dOOMed !!!"); 48 | } 49 | -------------------------------------------------------------------------------- /kernel/src/mm/mod.rs: -------------------------------------------------------------------------------- 1 | mod physical_memory_manager; 2 | pub use physical_memory_manager::PhysicalMemoryManager; 3 | 4 | mod binary_buddy_allocator; 5 | 6 | use crate::device_tree::DeviceTree; 7 | use crate::globals; 8 | 9 | use crate::hal; 10 | use crate::Error; 11 | use hal_core::mm::{NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}; 12 | use hal_core::AddressRange; 13 | 14 | use crate::drivers; 15 | use drivers::Driver; 16 | 17 | use arrayvec::ArrayVec; 18 | use core::iter; 19 | 20 | use log::debug; 21 | 22 | extern "C" { 23 | pub static KERNEL_START: usize; 24 | pub static KERNEL_END: usize; 25 | } 26 | 27 | pub fn is_kernel_page(base: usize) -> bool { 28 | let (kernel_start, kernel_end) = unsafe { 29 | ( 30 | crate::utils::external_symbol_value(&KERNEL_START), 31 | crate::utils::external_symbol_value(&KERNEL_END), 32 | ) 33 | }; 34 | 35 | base >= kernel_start && base < kernel_end 36 | } 37 | 38 | pub fn kernel_memory_region() -> AddressRange { 39 | let (start, end) = unsafe { 40 | ( 41 | crate::utils::external_symbol_value(&KERNEL_START), 42 | crate::utils::external_symbol_value(&KERNEL_END), 43 | ) 44 | }; 45 | 46 | AddressRange::new(start..end) 47 | } 48 | 49 | pub fn is_reserved_page(base: usize, device_tree: &DeviceTree) -> bool { 50 | let mut is_res = false; 51 | 52 | device_tree.for_all_reserved_memory_regions(|regions| { 53 | is_res = regions 54 | .map(|(start, size)| (start, size)) // this is a weird hack to fix a type error. 55 | .any(|(region_start, region_size)| { 56 | base >= region_start && base <= (region_start + region_size) 57 | }) 58 | }); 59 | 60 | is_res 61 | } 62 | 63 | fn map_kernel_rwx() -> ( 64 | impl Iterator, 65 | impl Iterator, 66 | impl Iterator, 67 | ) { 68 | let page_size = hal::mm::PAGE_SIZE; 69 | let kernel_start = unsafe { crate::utils::external_symbol_value(&KERNEL_START) }; 70 | let kernel_end = unsafe { crate::utils::external_symbol_value(&KERNEL_END) }; 71 | let kernel_end_align = ((kernel_end + page_size - 1) / page_size) * page_size; 72 | 73 | let rwx_entries = iter::once(AddressRange::new(kernel_start..kernel_end_align)); 74 | 75 | (iter::empty(), iter::empty(), rwx_entries) 76 | } 77 | 78 | pub fn map_address_space<'a, I: Iterator>( 79 | device_tree: &DeviceTree, 80 | drivers: I, 81 | ) -> Result<(), Error> { 82 | let mut r_entries = ArrayVec::::new(); 83 | let mut rw_entries = ArrayVec::::new(); 84 | let mut rwx_entries = ArrayVec::::new(); 85 | let mut pre_allocated_entries = ArrayVec::::new(); 86 | 87 | // Add entries/descriptors in the pagetable for all of accessible memory regions. 88 | // That way in the future, mapping those entries won't require any memory allocations, 89 | // just settings the entry to valid and filling up the bits. 90 | device_tree.for_all_memory_regions(|regions| { 91 | regions.for_each(|(base, size)| { 92 | pre_allocated_entries 93 | .try_push(AddressRange::with_size(base, size)) 94 | .unwrap(); 95 | }); 96 | }); 97 | debug!( 98 | "adding region containing the device tree to rw entries {:X?}", 99 | device_tree.memory_region() 100 | ); 101 | rw_entries 102 | .try_push( 103 | device_tree 104 | .memory_region() 105 | .round_up_to_page(hal::mm::PAGE_SIZE), 106 | ) 107 | .unwrap(); 108 | 109 | let (kernel_r, kernel_rw, kernel_rwx) = map_kernel_rwx(); 110 | r_entries.extend(kernel_r); 111 | rw_entries.extend(kernel_rw); 112 | rwx_entries.extend(kernel_rwx); 113 | 114 | for drv in drivers { 115 | if let Some((base, len)) = drv.get_address_range() { 116 | let len = hal::mm::align_up(len); 117 | debug!( 118 | "adding driver memory region to RW entries: [{:X}; {:X}]", 119 | base, 120 | base + len 121 | ); 122 | rw_entries 123 | .try_push(AddressRange::with_size(base, len)) 124 | .unwrap(); 125 | } 126 | } 127 | 128 | debug!("r_entries: {:X?}", r_entries); 129 | debug!("rw_entries: {:X?}", rw_entries); 130 | debug!("rwx_entries: {:X?}", rwx_entries); 131 | debug!("pre_allocated_entries: {:X?}", pre_allocated_entries); 132 | 133 | hal::mm::prefill_pagetable( 134 | r_entries.into_iter(), 135 | rw_entries.into_iter(), 136 | rwx_entries.into_iter(), 137 | pre_allocated_entries.into_iter(), 138 | &globals::PHYSICAL_MEMORY_MANAGER, 139 | )?; 140 | 141 | // All pmm pages are located in DRAM so they are already in the pagetable (they are part of 142 | // the pre_allocated_entries). 143 | // Therefore no allocations will be made, pass the NullPageAllocator. 144 | globals::PHYSICAL_MEMORY_MANAGER.used_pages(|page| { 145 | hal::mm::current() 146 | .identity_map( 147 | VAddr::new(page), 148 | Permissions::READ | Permissions::WRITE, 149 | &NullPageAllocator, 150 | ) 151 | .unwrap(); 152 | }); 153 | 154 | hal::mm::enable_paging(); 155 | 156 | unsafe { globals::STATE = globals::KernelState::MmuEnabledInit }; 157 | 158 | Ok(()) 159 | } 160 | -------------------------------------------------------------------------------- /kernel/src/mm/physical_memory_manager.rs: -------------------------------------------------------------------------------- 1 | use crate::device_tree::DeviceTree; 2 | use crate::globals; 3 | use crate::hal; 4 | use crate::mm; 5 | use core::mem; 6 | use hal_core::{ 7 | mm::{AllocatorError, NullPageAllocator, PageAlloc, PageMap, Permissions, VAddr}, 8 | AddressRange, 9 | }; 10 | 11 | use hal::mm::PAGE_SIZE; 12 | 13 | use log::debug; 14 | use spin::mutex::Mutex; 15 | 16 | #[derive(Debug, PartialEq, Eq)] 17 | pub enum PageKind { 18 | Metadata, 19 | Allocated, 20 | Free, 21 | } 22 | 23 | /// Holds data about each physical page in the system. 24 | #[derive(Debug)] 25 | pub struct PhysicalPage { 26 | kind: PageKind, 27 | base: usize, 28 | 29 | /// Page is last of a contiguous allocation of pages. 30 | last: bool, 31 | } 32 | 33 | impl PhysicalPage { 34 | fn is_used(&self) -> bool { 35 | self.kind != PageKind::Free 36 | } 37 | 38 | fn is_allocated(&self) -> bool { 39 | self.kind == PageKind::Allocated 40 | } 41 | 42 | fn set_allocated(&mut self) { 43 | self.kind = PageKind::Allocated; 44 | } 45 | 46 | fn _is_last(&self) -> bool { 47 | self.last 48 | } 49 | 50 | fn set_last(&mut self) { 51 | self.last = true; 52 | } 53 | } 54 | 55 | #[derive(Debug)] 56 | pub struct PhysicalMemoryManager { 57 | metadata: Mutex<&'static mut [PhysicalPage]>, 58 | } 59 | 60 | impl PhysicalMemoryManager { 61 | fn count_pages(regions: &[Option]) -> usize { 62 | let total_memory_bytes: usize = regions 63 | .iter() 64 | .filter_map(|maybe_region| maybe_region.map(|region| region.size())) 65 | .sum(); 66 | 67 | total_memory_bytes / PAGE_SIZE 68 | } 69 | 70 | fn find_large_region(regions: &[Option], minimum_size: usize) -> Option { 71 | regions 72 | .iter() 73 | .flatten() 74 | .find(|region| region.size() >= minimum_size) 75 | .map(|region| region.start) 76 | } 77 | 78 | fn is_metadata_page(base: usize, metadata_start: usize, metadata_end: usize) -> bool { 79 | base >= metadata_start && base < metadata_end 80 | } 81 | 82 | fn phys_addr_to_physical_page( 83 | phys_addr: usize, 84 | metadata_start: usize, 85 | metadata_end: usize, 86 | ) -> PhysicalPage { 87 | let kind = if Self::is_metadata_page(phys_addr, metadata_start, metadata_end) { 88 | PageKind::Metadata 89 | } else { 90 | PageKind::Free 91 | }; 92 | 93 | PhysicalPage { 94 | kind, 95 | base: phys_addr, 96 | last: false, 97 | } 98 | } 99 | 100 | fn exclude_range( 101 | regions: &mut [Option; MAX_REGIONS], 102 | excluded: AddressRange, 103 | ) { 104 | let (excl_start, excl_end) = (excluded.start, excluded.end); 105 | 106 | assert!(excl_start < excl_end); 107 | 108 | for i in 0..MAX_REGIONS { 109 | if regions[i].is_none() { 110 | continue; 111 | } 112 | let region = regions[i].unwrap(); 113 | 114 | if region.start == excl_start && region.end == excl_end { 115 | // Perfect overlap between the region to be excluded and the current region, just remove the region. 116 | regions[i] = None; 117 | } else if (region.start < excl_start && excl_start < region.end) 118 | && (region.start < excl_end && excl_end < region.end) 119 | { 120 | // Region to be excluded is the middle of the current region. 121 | let new_region = AddressRange { 122 | start: excl_end, 123 | end: region.end, 124 | }; 125 | regions[i] = Some(AddressRange::new(region.start..excl_start)); 126 | 127 | // The exclusion in the middle causes a split of the current region, put the new region (the end part) somewhere there is a none. 128 | *regions 129 | .iter_mut() 130 | .find(|maybe_region| maybe_region.is_none()) 131 | .expect("regions array is too small, increase MAX_REGIONS") = Some(new_region); 132 | } else if region.contains(excl_end) { 133 | // Region to be removed is at the left (at the beginning) of the current region. 134 | regions[i] = Some(AddressRange::new(excl_end..region.end)); 135 | } else if region.contains(excl_start) { 136 | // Region to be removed is at the right (at the end) of the current region. 137 | regions[i] = Some(AddressRange::new(region.start..excl_start)); 138 | } 139 | } 140 | } 141 | 142 | fn available_memory_regions( 143 | device_tree: &DeviceTree, 144 | ) -> [Option; MAX_REGIONS] { 145 | // First put all regions in the array. 146 | let mut all_regions = [None; MAX_REGIONS]; 147 | device_tree.for_all_memory_regions(|regions| { 148 | regions.enumerate().for_each(|(i, (base, size))| { 149 | if i == MAX_REGIONS - 1 { 150 | panic!( 151 | "found more regions in the device tree than this has been compiled to fit" 152 | ); 153 | } 154 | 155 | all_regions[i] = Some(AddressRange { 156 | start: base, 157 | end: base + size, 158 | }); 159 | }); 160 | }); 161 | 162 | Self::exclude_range(&mut all_regions, mm::kernel_memory_region()); 163 | 164 | Self::exclude_range(&mut all_regions, device_tree.memory_region()); 165 | 166 | device_tree.for_all_reserved_memory_regions(|reserved_regions| { 167 | reserved_regions.for_each(|(base, size)| { 168 | Self::exclude_range(&mut all_regions, AddressRange::with_size(base, size)) 169 | }) 170 | }); 171 | 172 | // Re-align the regions, for exemple things we exclude are not always aligned to a page boundary. 173 | all_regions.iter_mut().for_each(|maybe_region| { 174 | if let Some(region) = maybe_region { 175 | region.start = hal::mm::align_down(region.start); 176 | region.end = hal::mm::align_up(region.end); 177 | 178 | *maybe_region = if region.size() > 0 { 179 | Some(*region) 180 | } else { 181 | None 182 | }; 183 | } 184 | }); 185 | 186 | all_regions 187 | } 188 | 189 | pub const fn new() -> Self { 190 | let metadata = unsafe { 191 | core::slice::from_raw_parts_mut( 192 | core::ptr::NonNull::::dangling().as_ptr(), 193 | 0, 194 | ) 195 | }; 196 | 197 | Self { 198 | metadata: Mutex::new(metadata), 199 | } 200 | } 201 | 202 | /// Initialize a [`PageAllocator`] from the device tree. 203 | pub fn init_from_device_tree(&self, device_tree: &DeviceTree) -> Result<(), AllocatorError> { 204 | let available_regions = Self::available_memory_regions::<10>(device_tree); 205 | 206 | assert!( 207 | available_regions 208 | .iter() 209 | .flatten() 210 | .all( 211 | |region| region.start == hal::mm::align_up(region.start) 212 | && region.end == hal::mm::align_up(region.end) 213 | ), 214 | "Expected region bounds to be aligned to the page size (won't be possible to allocate pages otherwise)" 215 | ); 216 | 217 | for (i, reg) in available_regions.iter().flatten().enumerate() { 218 | debug!("region {}: {:X?}", i, reg); 219 | } 220 | 221 | let page_count = Self::count_pages(&available_regions); 222 | let metadata_size = page_count * mem::size_of::(); 223 | let pages_needed = hal::mm::align_up(metadata_size) / PAGE_SIZE; 224 | 225 | let metadata_addr = Self::find_large_region(&available_regions, metadata_size) 226 | .ok_or(AllocatorError::NotEnoughMemoryForMetadata)?; 227 | 228 | let metadata: &mut [PhysicalPage] = 229 | unsafe { core::slice::from_raw_parts_mut(metadata_addr as *mut _, page_count) }; 230 | 231 | let physical_pages = available_regions 232 | .iter() 233 | .flatten() 234 | .flat_map(|region| region.iter_pages(PAGE_SIZE)) 235 | .map(|base| { 236 | Self::phys_addr_to_physical_page( 237 | base, 238 | metadata_addr, 239 | metadata_addr + pages_needed * PAGE_SIZE, 240 | ) 241 | }); 242 | 243 | let mut count = 0; 244 | for (i, page) in physical_pages.enumerate() { 245 | metadata[i] = page; 246 | count += 1; 247 | } 248 | assert!(count == page_count); 249 | 250 | *self.metadata.lock() = metadata; 251 | 252 | Ok(()) 253 | } 254 | 255 | pub fn alloc_pages(&self, page_count: usize) -> Result { 256 | let mut consecutive_pages: usize = 0; 257 | let mut first_page_index: usize = 0; 258 | let mut last_page_base: usize = 0; 259 | 260 | let mut metadata = self.metadata.lock(); 261 | 262 | for (i, page) in metadata.iter().enumerate() { 263 | if consecutive_pages == 0 { 264 | first_page_index = i; 265 | last_page_base = page.base; 266 | } 267 | 268 | if page.is_used() { 269 | consecutive_pages = 0; 270 | continue; 271 | } 272 | 273 | if consecutive_pages > 0 && page.base != last_page_base + PAGE_SIZE { 274 | consecutive_pages = 0; 275 | continue; 276 | } 277 | 278 | consecutive_pages += 1; 279 | last_page_base = page.base; 280 | 281 | if consecutive_pages == page_count { 282 | metadata[first_page_index..=i] 283 | .iter_mut() 284 | .for_each(|page| page.set_allocated()); 285 | metadata[i].set_last(); 286 | 287 | return Ok(metadata[first_page_index].base); 288 | } 289 | } 290 | 291 | Err(AllocatorError::OutOfMemory) 292 | } 293 | } 294 | 295 | impl PageAlloc for PhysicalMemoryManager { 296 | fn alloc(&self, page_count: usize) -> Result { 297 | // If there is a kernel pagetable, identity map the pages. 298 | let first_page = self.alloc_pages(page_count)?; 299 | 300 | if unsafe { globals::STATE.is_mmu_enabled() } { 301 | // The mmu is enabled, therefore we already mapped all DRAM into the kernel's pagetable 302 | // as invalid entries. 303 | // Pagetable must only modify existing entries and not allocate. 304 | hal::mm::current() 305 | .identity_map_range( 306 | VAddr::new(first_page), 307 | page_count, 308 | Permissions::READ | Permissions::WRITE, 309 | &NullPageAllocator, 310 | ) 311 | .unwrap(); 312 | } 313 | 314 | Ok(first_page) 315 | } 316 | 317 | fn dealloc(&self, _base: usize, _page_count: usize) -> Result<(), AllocatorError> { 318 | // TODO: 319 | // - if MMU is on, unmap the page 320 | // - set as free 321 | log::warn!("PMM dealloc not yet implemented..."); 322 | Ok(()) 323 | } 324 | 325 | fn used_pages(&self, f: F) { 326 | let metadata = self.metadata.lock(); 327 | 328 | let metadata_start = (&metadata[0] as *const PhysicalPage) as usize; 329 | let metadata_last = (&metadata[metadata.len() - 1] as *const PhysicalPage) as usize; 330 | 331 | let metadata_pages = (metadata_start..=metadata_last).step_by(PAGE_SIZE); 332 | let allocated_pages = metadata 333 | .iter() 334 | .filter(|page| page.is_allocated()) 335 | .map(|page| page.base); 336 | 337 | metadata_pages.chain(allocated_pages).for_each(f); 338 | } 339 | } 340 | 341 | #[cfg(test)] 342 | mod tests { 343 | use super::*; 344 | use crate::kernel_tests::*; 345 | 346 | #[test_case] 347 | fn exclude_range_remove_in_the_middle(_ctx: &mut TestContext) { 348 | let mut ranges = [Some(AddressRange::new(0x0..0x1000)), None]; 349 | PhysicalMemoryManager::exclude_range(&mut ranges, (0x500, 0x600)); 350 | 351 | assert_eq!(ranges[0], Some(AddressRange::new(0x0..0x500))); 352 | assert_eq!(ranges[1], Some(AddressRange::new(0x600, 0x1000))); 353 | } 354 | 355 | #[test_case] 356 | fn exclude_range_remove_beginning(_ctx: &mut TestContext) { 357 | let mut ranges = [Some(AddressRange::new(0x100..0x1000)), None]; 358 | PhysicalMemoryManager::exclude_range(&mut ranges, (0x0, 0x200)); 359 | 360 | assert_eq!(ranges[0], Some(AddressRange::new(0x200..0x1000))); 361 | assert!(ranges[1].is_none()); 362 | } 363 | 364 | #[test_case] 365 | fn exclude_range_remove_ending(_ctx: &mut TestContext) { 366 | let mut ranges = [Some(AddressRange::new(0x100..0x1000)), None]; 367 | PhysicalMemoryManager::exclude_range(&mut ranges, (0x800, 0x1000)); 368 | 369 | assert_eq!(ranges[0], Some(AddressRange::new(0x100..0x800))); 370 | assert!(ranges[1].is_none()); 371 | } 372 | 373 | #[test_case] 374 | fn exclude_range_overlaps_exactly(_ctx: &mut TestContext) { 375 | let mut ranges = [Some(AddressRange::new(0x400_000..0x800_000)), None]; 376 | PhysicalMemoryManager::exclude_range(&mut ranges, (0x400_000, 0x800_000)); 377 | 378 | assert!(ranges[0].is_none()); 379 | assert!(ranges[1].is_none()); 380 | } 381 | 382 | #[test_case] 383 | fn exclude_range_overlap_with_exact_beginning(_ctx: &mut TestContext) { 384 | let mut ranges = [Some(AddressRange::new(0x400_000..0x800_000)), None]; 385 | PhysicalMemoryManager::exclude_range(&mut ranges, (0x400_000, 0x401_000)); 386 | 387 | assert_eq!(ranges[0], Some(AddressRange::new(0x401_000..0x800_000))); 388 | assert!(ranges[1].is_none()); 389 | } 390 | } 391 | -------------------------------------------------------------------------------- /kernel/src/panic.rs: -------------------------------------------------------------------------------- 1 | use crate::hal; 2 | use core::arch::asm; 3 | use log::error; 4 | 5 | #[panic_handler] 6 | #[cfg(not(test))] 7 | fn panic(info: &core::panic::PanicInfo) -> ! { 8 | error!("\x1b[31mkernel panic\x1b[0m: {}", info); 9 | 10 | error!("hal panic info: {:X?}", hal::panic_info()); 11 | 12 | loop { 13 | unsafe { asm!("wfi") } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /kernel/src/tests.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, info, trace}; 2 | 3 | use core::slice; 4 | use core::sync::atomic::{AtomicUsize, Ordering}; 5 | 6 | use crate::executable::elf::Elf; 7 | use crate::globals; 8 | use crate::hal::{self, mm::PAGE_SIZE}; 9 | use hal_core::mm::{PageAlloc, PageMap, Permissions}; 10 | 11 | use align_data::include_aligned; 12 | use align_data::Align4K; 13 | 14 | pub enum TestResult { 15 | Success, 16 | Failure, 17 | } 18 | 19 | struct Test { 20 | name: &'static str, 21 | test: fn() -> TestResult, 22 | } 23 | 24 | const TESTS: &[Test] = &[ 25 | Test { 26 | name: "timer interrupts", 27 | test: test_timer_interrupt, 28 | }, 29 | Test { 30 | name: "pagetable does remap", 31 | test: test_pagetable_remap, 32 | }, 33 | Test { 34 | name: "basic elf loader", 35 | test: test_elf_loader_basic, 36 | }, 37 | ]; 38 | 39 | pub fn launch() -> TestResult { 40 | let mut res = TestResult::Success; 41 | 42 | info!("Launching tests..."); 43 | for (i, test) in TESTS.iter().enumerate() { 44 | info!("Test #{} \'{}\':", i, test.name); 45 | match (test.test)() { 46 | TestResult::Failure => { 47 | info!("#{} failed ❌", i); 48 | res = TestResult::Failure; 49 | } 50 | TestResult::Success => { 51 | info!("#{} passed ✅", i); 52 | } 53 | } 54 | } 55 | 56 | res 57 | } 58 | 59 | fn test_timer_interrupt() -> TestResult { 60 | if true { 61 | // IRQ 62 | static CNT: AtomicUsize = AtomicUsize::new(0); 63 | const NUM_INTERRUPTS: usize = 3; 64 | 65 | debug!( 66 | "Testing timer interrupts, waiting for {} interrupts", 67 | NUM_INTERRUPTS 68 | ); 69 | 70 | hal::cpu::clear_physical_timer(); 71 | 72 | hal::irq::set_timer_handler(|| { 73 | trace!("."); 74 | 75 | if CNT.fetch_add(1, Ordering::Relaxed) < NUM_INTERRUPTS { 76 | hal::irq::set_timer(50_000) 77 | .expect("failed to set timer in the timer handler of the test"); 78 | } 79 | }); 80 | 81 | hal::irq::set_timer(50_000).expect("failed to set timer for test"); 82 | 83 | while CNT.load(Ordering::Relaxed) < NUM_INTERRUPTS {} 84 | 85 | // TODO: restore the timer handler 86 | hal::cpu::clear_physical_timer(); 87 | TestResult::Success 88 | } else { 89 | // // Synchronous exception 90 | // unsafe { 91 | // asm!("svc 42"); 92 | // } 93 | TestResult::Failure 94 | } 95 | } 96 | 97 | fn test_pagetable_remap() -> TestResult { 98 | info!("Testing the remapping capabilities of our pagetable..."); 99 | 100 | let page_src = globals::PHYSICAL_MEMORY_MANAGER.alloc(1).unwrap(); 101 | let page_src = unsafe { slice::from_raw_parts_mut(page_src as *mut u8, PAGE_SIZE) }; 102 | let dst_addr = 0x0450_0000; 103 | let page_dst = unsafe { slice::from_raw_parts(dst_addr as *const u8, hal::mm::PAGE_SIZE) }; 104 | let deadbeef = [0xDE, 0xAD, 0xBE, 0xEF]; 105 | 106 | // Put data in source page 107 | 108 | page_src[0..deadbeef.len()].copy_from_slice(&deadbeef); 109 | 110 | // Remap source page to destination page 111 | hal::mm::current() 112 | .map( 113 | hal_core::mm::VAddr::new(dst_addr), 114 | hal_core::mm::PAddr::new(page_src.as_ptr() as usize), 115 | Permissions::READ | Permissions::WRITE, 116 | &globals::PHYSICAL_MEMORY_MANAGER, 117 | ) 118 | .unwrap(); 119 | 120 | // Readback from destination page 121 | for i in 0..deadbeef.len() { 122 | if page_dst[i] != deadbeef[i] { 123 | return TestResult::Failure; 124 | } 125 | } 126 | 127 | info!("Remapping works"); 128 | 129 | TestResult::Success 130 | } 131 | 132 | fn test_elf_loader_basic() -> TestResult { 133 | static TEST_BIN: &[u8] = include_aligned!(Align4K, env!("CARGO_BIN_FILE_TESTS")); 134 | 135 | let test_bin = Elf::from_bytes(TEST_BIN); 136 | debug!("[OK] Elf from_bytes {}", env!("CARGO_BIN_FILE_TESTS")); 137 | test_bin.load().unwrap(); 138 | debug!("[OK] Elf loaded"); 139 | let entry_point: extern "C" fn() -> u8 = 140 | unsafe { core::mem::transmute(test_bin.get_entry_point()) }; 141 | debug!("[OK] Elf loaded, entry point is {:?}", entry_point); 142 | entry_point(); 143 | debug!("[OK] Returned for Elf"); 144 | 145 | TestResult::Success 146 | } 147 | -------------------------------------------------------------------------------- /kernel/src/utils/lock.rs: -------------------------------------------------------------------------------- 1 | use core::cell::UnsafeCell; 2 | 3 | pub struct Lock { 4 | data: UnsafeCell, 5 | } 6 | 7 | impl Lock { 8 | pub const fn new(data: T) -> Self { 9 | Self { 10 | data: UnsafeCell::new(data), 11 | } 12 | } 13 | 14 | pub fn lock<'a, R>(&'a self, f: impl FnOnce(&'a mut T) -> R) -> R { 15 | // TODO: actually lock something... 16 | let data = unsafe { &mut *self.data.get() }; 17 | 18 | f(data) 19 | } 20 | } 21 | 22 | unsafe impl Send for Lock where T: Sized + Send {} 23 | unsafe impl Sync for Lock where T: Sized + Send {} 24 | -------------------------------------------------------------------------------- /kernel/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod lock; 2 | 3 | pub fn external_symbol_value(sym: &T) -> usize { 4 | (sym as *const T) as usize 5 | } 6 | -------------------------------------------------------------------------------- /riscv64_qemuvirt/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "riscv64gc-unknown-none-elf" 3 | 4 | [target.riscv64gc-unknown-none-elf] 5 | runner = 'qemu-system-riscv64 -M virt -m 256M -nographic -kernel ' 6 | -------------------------------------------------------------------------------- /riscv64_qemuvirt/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "riscv64_qemuvirt" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | kernel = { path = "../kernel", features = ["riscv64_sv39"] } 10 | log = "0.4" 11 | 12 | [features] 13 | launch_tests = [] 14 | -------------------------------------------------------------------------------- /riscv64_qemuvirt/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | println!("cargo:rerun-if-changed=src/riscv64_qemuvirt.ld"); 3 | println!("cargo:rustc-link-arg=-Triscv64_qemuvirt/src/riscv64_qemuvirt.ld"); 4 | } 5 | -------------------------------------------------------------------------------- /riscv64_qemuvirt/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | #![feature(naked_functions)] 4 | 5 | #[cfg(not(target_arch = "riscv64"))] 6 | compile_error!("Must be compiled as riscv64"); 7 | 8 | use kernel::drivers::ns16550::*; 9 | 10 | use log::info; 11 | 12 | pub const UART_ADDR: usize = 0x1000_0000; 13 | pub const UART_INTERRUPT_NUMBER: u16 = 10; 14 | 15 | const LAUNCH_TESTS: bool = cfg!(feature = "launch_tests"); 16 | 17 | #[no_mangle] 18 | extern "C" fn k_main(_core_id: usize, device_tree_ptr: usize) -> ! { 19 | static NS16550: Ns16550 = Ns16550::new(UART_ADDR); 20 | kernel::kernel_console::set_earlyinit_console(&NS16550); 21 | 22 | kernel::kernel_console::init_logging().unwrap(); 23 | 24 | info!("GoOSe is booting"); 25 | 26 | kernel::hal::irq::init_exception_handlers(); 27 | 28 | let device_tree = kernel::device_tree::DeviceTree::new(device_tree_ptr).unwrap(); 29 | kernel::generic_main::generic_main::(device_tree, &[&NS16550]); 30 | } 31 | -------------------------------------------------------------------------------- /riscv64_qemuvirt/src/riscv64_qemuvirt.ld: -------------------------------------------------------------------------------- 1 | ENTRY(_start) 2 | 3 | SECTIONS 4 | { 5 | . = 0x80200000; 6 | 7 | KERNEL_START = . ; 8 | .text : { 9 | *(.text._start); # _start should allways be at the top of all sections 10 | *(.text*); 11 | } 12 | 13 | .data : { 14 | *(.data*); 15 | } 16 | 17 | .sdata : { 18 | *(.sdata*); 19 | } 20 | 21 | .rodata : { 22 | *(.rodata*); 23 | } 24 | 25 | .eh_frame : { 26 | *(.eh_frame*); 27 | } 28 | 29 | .bss : { 30 | *(.bss*); 31 | } 32 | 33 | .sbss : { 34 | *(.sbss*); 35 | } 36 | 37 | . = ALIGN(4096); 38 | STACK_END = . ; 39 | . = . + 1M; 40 | STACK_START = . ; 41 | 42 | KERNEL_END = . ; 43 | } 44 | -------------------------------------------------------------------------------- /tests/.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | rustflags = [ 3 | # "-C", "link-args=-nostartfiles", 4 | # "-C", "link-args=-Wl,--no-dynamic-linker", 5 | "-C", "link-args=-pie", 6 | "-C", "relocation-model=pie" 7 | ] 8 | -------------------------------------------------------------------------------- /tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tests" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | -------------------------------------------------------------------------------- /tests/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_main] 2 | #![no_std] 3 | #![feature(lang_items)] 4 | #![feature(fn_align)] 5 | 6 | use core::panic::PanicInfo; 7 | 8 | #[panic_handler] 9 | fn panic(_panic: &PanicInfo<'_>) -> ! { 10 | loop {} 11 | } 12 | 13 | #[lang = "eh_personality"] 14 | extern "C" fn eh_personality() {} 15 | 16 | #[no_mangle] 17 | #[repr(align(0x1000))] 18 | pub extern "C" fn _start() -> u8 { 19 | 0u8 20 | } 21 | --------------------------------------------------------------------------------