├── .cargo └── config.toml ├── .github └── workflows │ └── check.yml ├── .gitignore ├── .vscode └── settings.json ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── build ├── linker-scripts │ └── kernel.ld ├── targets │ └── x86_64.json └── xtask │ ├── .gitignore │ ├── Cargo.toml │ └── src │ └── main.rs ├── crates └── libxernel │ ├── Cargo.toml │ └── src │ ├── boot.rs │ ├── collections │ ├── mod.rs │ └── ringbuffer.rs │ ├── crypto │ ├── csprng.rs │ ├── fortuna.rs │ ├── mod.rs │ └── primality.rs │ ├── ipl.rs │ ├── lib.rs │ ├── on_drop.rs │ ├── sync │ ├── mod.rs │ ├── once.rs │ ├── rwlock.rs │ ├── spin.rs │ └── spinirq.rs │ └── syscall.rs ├── gdb.sh ├── kernel ├── .gitignore ├── Cargo.toml ├── limine.conf ├── src │ ├── acpi │ │ ├── hpet.rs │ │ └── mod.rs │ ├── allocator │ │ ├── buddy.rs │ │ ├── mod.rs │ │ └── unit.rs │ ├── arch │ │ ├── amd64 │ │ │ ├── apic.rs │ │ │ ├── cpuid.rs │ │ │ ├── gdt.rs │ │ │ ├── interrupts │ │ │ │ ├── idt.rs │ │ │ │ └── mod.rs │ │ │ ├── ioapic.rs │ │ │ ├── lapic.rs │ │ │ ├── mod.rs │ │ │ ├── ports.rs │ │ │ ├── switch.S │ │ │ └── tsc.rs │ │ └── mod.rs │ ├── cpu.rs │ ├── dpc.rs │ ├── drivers │ │ ├── mod.rs │ │ └── ps2 │ │ │ ├── keyboard.rs │ │ │ └── mod.rs │ ├── framebuffer │ │ ├── font.rs │ │ └── mod.rs │ ├── fs │ │ ├── file.rs │ │ ├── initramfs.rs │ │ ├── mod.rs │ │ ├── mount.rs │ │ ├── pathbuf.rs │ │ ├── tmpfs.rs │ │ ├── vfs.rs │ │ ├── vfs_syscalls.rs │ │ └── vnode.rs │ ├── logger.rs │ ├── main.rs │ ├── mem │ │ ├── frame.rs │ │ ├── heap.rs │ │ ├── mmap.rs │ │ ├── mod.rs │ │ ├── paging.rs │ │ └── vm.rs │ ├── sched │ │ ├── context.rs │ │ ├── mod.rs │ │ ├── process.rs │ │ ├── scheduler.rs │ │ └── thread.rs │ ├── syscall │ │ └── mod.rs │ ├── timer │ │ ├── mod.rs │ │ ├── timer_event.rs │ │ └── timer_queue.rs │ ├── userland.rs │ ├── utils │ │ ├── backtrace.rs │ │ ├── defer.rs │ │ ├── limine_module.rs │ │ ├── mod.rs │ │ └── rtc.rs │ └── writer.rs └── uefi-edk2 │ ├── License.txt │ ├── OVMF.fd │ └── OvmfPkg.License.txt ├── logo.bmp ├── rust-toolchain ├── rustfmt.toml ├── status_quo.png └── userland └── init ├── Cargo.toml └── src └── main.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | kernel = "run -p xtask --" 3 | kb = "kernel build" 4 | kr = "kernel run" 5 | kl = "kernel lint" 6 | kc = "kernel clean" 7 | 8 | [build] 9 | rustflags = ["-Z", "threads=8"] 10 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | name: Xernel Lint 2 | on: [push, pull_request] 3 | jobs: 4 | xernel-lint: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v4 8 | - run: sudo apt update && sudo apt install -y mtools 9 | - run: rustup update && rustup toolchain install nightly 10 | - run: rustup component add rust-src rustfmt clippy 11 | - run: cargo kernel build 12 | - run: cargo kernel fmt --check 13 | - run: cargo kernel clippy 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | limine 13 | 14 | *.log 15 | *.iso 16 | *.hdd 17 | 18 | .env 19 | 20 | .idea 21 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.check.allTargets": false, 3 | "rust-analyzer.check.extraArgs": [ 4 | "--target", 5 | "x86_64-unknown-none", 6 | "--target-dir", 7 | "target/rust-analyzer" 8 | ], 9 | } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "build/xtask", 4 | "kernel", 5 | "crates/libxernel", 6 | "userland/init", 7 | ] 8 | resolver = "2" 9 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Raphael O. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # xernel 2 | A kernel for x86-64 (and maybe ARM/RISC-V later on) using the limine boot protocol. 3 | Mostly made for learning purposes, no real-world application. 4 | 5 | ![xernel](status_quo.png) 6 | 7 | ## Building 8 | For building, you simply need a working Rust installation with the nightly toolchain installed. 9 | We use xtask as our workflow for building and running the kernel. 10 | Therefore, the command `cargo kernel` is used. 11 | 12 | See `cargo kernel help` for a list of subcommands and available flags. 13 | 14 | You can simply use `cargo kernel build` or `cargo kernel run`, it is going to use x86_64 as the default target. 15 | 16 | ## Contact 17 | Feel free to reach out to `lockna` or `mp1310` on Discord 18 | or join our **Offical Discord Server:** https://discord.gg/e3gsmHX4w4 19 | 20 | Here you can talk to us in the `xernel-development` category. 21 | 22 | ## TODO 23 | Things that are still to do: 24 | - [ ] Add doc comments 25 | - [ ] Rewrite mem module 26 | - [x] Rewrite the page frame allocator so it uses a buddy allocator 27 | - [ ] Rewrite the virtual memory manager, just make it better 28 | - [ ] Switch from linked_list_allocator to a slab allocator for heap 29 | - [ ] Implement a virtual file system 30 | - [ ] Implement a driver system 31 | - [ ] Implement a proper syscall interface 32 | - [ ] Implement a proper userspace 33 | - [ ] Implement a proper init system 34 | 35 | 36 | ## Contributing 37 | Contributions to code and documentation are heavily appreciated, may it be a bug fix or an improvement of the existing code or code documentation. 38 | New features would be gladly taken over by us, as the project was created for exactly this purpose. 39 | 40 | ## License 41 | xernel is distributed under the terms of either the Apache License (Version 2.0) or the MIT license, at the user's choice. 42 | See [LICENSE-APACHE](./LICENSE-APACHE) and [LICENSE-MIT](./LICENSE-MIT) for details. 43 | Contributions to the xernel project must be made under the terms of both licenses. 44 | -------------------------------------------------------------------------------- /build/linker-scripts/kernel.ld: -------------------------------------------------------------------------------- 1 | /* Tell the linker that we want an x86_64 ELF64 output file */ 2 | OUTPUT_FORMAT(elf64-x86-64) 3 | OUTPUT_ARCH(i386:x86-64) 4 | 5 | /* We want the symbol `kernel_main` to be our entry point */ 6 | ENTRY(kernel_main) 7 | 8 | /* Define the program headers we want so the bootloader gives us the right */ 9 | /* MMU permissions */ 10 | PHDRS 11 | { 12 | null PT_NULL FLAGS(0) ; /* Null segment */ 13 | text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */ 14 | rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */ 15 | data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */ 16 | } 17 | 18 | SECTIONS 19 | { 20 | /* We wanna be placed in the topmost 2GiB of the address space, for optimisations */ 21 | /* and because that is what the Limine spec mandates. */ 22 | /* Any address in this region will do, but often 0xffffffff80000000 is chosen as */ 23 | /* that is the beginning of the region. */ 24 | . = 0xffffffff80000000; 25 | 26 | .text : { 27 | *(.text .text.*) 28 | } :text 29 | 30 | /* Move to the next memory page for .rodata */ 31 | . += CONSTANT(MAXPAGESIZE); 32 | 33 | .rodata : { 34 | *(.rodata .rodata.*) 35 | } :rodata 36 | 37 | /* Move to the next memory page for .data */ 38 | . += CONSTANT(MAXPAGESIZE); 39 | 40 | .data : { 41 | *(.data .data.*) 42 | } :data 43 | 44 | .bss : { 45 | *(COMMON) 46 | *(.bss .bss.*) 47 | } :data 48 | 49 | .got : { 50 | *(.got) 51 | } :data 52 | 53 | . += CONSTANT(MAXPAGESIZE); 54 | 55 | _kernel_end = .; 56 | 57 | } -------------------------------------------------------------------------------- /build/targets/x86_64.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "x86_64-unknown-none", 3 | "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", 4 | "arch": "x86_64", 5 | "target-endian": "little", 6 | "target-pointer-width": "64", 7 | "target-c-int-width": "32", 8 | "os": "none", 9 | "executables": true, 10 | "linker-flavor": "ld.lld", 11 | "linker": "rust-lld", 12 | "disable-redzone": true, 13 | "panic-strategy": "abort", 14 | "features": "-mmx,-sse,+soft-float", 15 | "rustc-abi": "x86-softfloat", 16 | "code-model": "kernel", 17 | "pre-link-args": { 18 | "ld.lld": [ 19 | "--gc-sections", 20 | "--script=./build/linker-scripts/kernel.ld" 21 | ] 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /build/xtask/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk -------------------------------------------------------------------------------- /build/xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xtask" 3 | version = "0.1.0" 4 | edition = "2018" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | xshell = "0.2.7" 10 | pico-args = "0.5.0" 11 | anyhow = "1" 12 | wsl = "0.1.0" 13 | dotenv = "0.15.0" 14 | fatfs = "0.3.6" 15 | -------------------------------------------------------------------------------- /build/xtask/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use dotenv::dotenv; 3 | use fatfs::{format_volume, FormatVolumeOptions}; 4 | use pico_args::Arguments; 5 | use std::io::{Cursor, Read, Seek, Write}; 6 | use std::path::{Path, PathBuf}; 7 | use std::{env, fs, vec}; 8 | use xshell::{cmd, Shell}; 9 | 10 | const HELP: &str = "\ 11 | xtask 12 | The build system for xernel 13 | FLAGS: 14 | -h, --help Print this message. 15 | --release Build the kernel with optimizations. 16 | --gdb Start QEMU with GDB server enabled and waiting for a connection. 17 | --check Only checks if the format is correct, without making changes (Can only be used with the fmt or lint subcommand) 18 | --cpus Set the number CPU cores (default: 2). 19 | --ram Set the amount of RAM in given size (M for Megabyte and G for Gigabyte) (default: 128M). 20 | --wsl-qemu If you use wsl but got a X server installed like GWSL you can use this flag to say you want to use the qemu you've got installed with your wsl distro and not on windows (also possible to use a env variable called qemu_in_wsl and setting it to true) 21 | --kvm Use KVM for QEMU (default: false). 22 | --monitor Enable QEMU monitor 23 | SUBCOMMANDS: 24 | build Build the kernel without running it. 25 | run Build and run the kernel using QEMU. 26 | fmt Run cargo fmt 27 | clippy Run clippy 28 | lint Run clippy and cargo fmt 29 | clean Cleans the limine clone and runs cargo clean 30 | "; 31 | 32 | fn main() -> Result<()> { 33 | dotenv().ok(); 34 | let mut args = Arguments::from_env(); 35 | 36 | // print help message if requested 37 | if args.contains(["-h", "--help"]) { 38 | print!("{}", HELP); 39 | return Ok(()); 40 | } 41 | 42 | let release = args.contains("--release"); 43 | let gdb = args.contains("--gdb"); 44 | let check = args.contains("--check"); 45 | 46 | // cd into the root folder of this workspace 47 | let sh = Shell::new().unwrap(); 48 | 49 | sh.set_var("RUSTFLAGS", "-Cforce-frame-pointers=yes -Z macro-backtrace"); 50 | 51 | let _cwd = sh.push_dir(root()); 52 | 53 | match args.subcommand()?.as_deref() { 54 | Some("build") => { 55 | // build the kernel 56 | build(&sh, release, args)?; 57 | } 58 | Some("run") => { 59 | // first build the kernel 60 | build(&sh, release, args.clone())?; 61 | 62 | // then run the produced binray in QEMU 63 | run(&sh, gdb, args)?; 64 | } 65 | Some("lint") => { 66 | fmt(&sh, check)?; 67 | clippy(&sh)?; 68 | } 69 | Some("fmt") => { 70 | fmt(&sh, check)?; 71 | } 72 | Some("clippy") => { 73 | clippy(&sh)?; 74 | } 75 | 76 | Some("help") => { 77 | print!("{}", HELP); 78 | } 79 | 80 | Some("clean") => { 81 | cmd!(sh, "rm -rf kernel/limine").run()?; 82 | cmd!(sh, "cargo clean").run()?; 83 | } 84 | 85 | Some(cmd) => bail!("Unknown subcommand: '{}'", cmd), 86 | None => bail!("You must supply a subcommand."), 87 | } 88 | 89 | Ok(()) 90 | } 91 | 92 | fn build(sh: &Shell, rl: bool, mut args: Arguments) -> Result<()> { 93 | let target = args 94 | .opt_value_from_str::<_, String>("--target")? 95 | .unwrap_or_else(|| "x86_64".to_string()); 96 | 97 | if !Path::new(sh.current_dir().as_path().join("kernel/limine").as_path()).exists() { 98 | sh.change_dir(sh.current_dir().as_path().join("kernel")); 99 | cmd!( 100 | sh, 101 | "git clone https://github.com/limine-bootloader/limine.git 102 | --branch=v8.x-binary 103 | --depth=1" 104 | ) 105 | .run()?; 106 | sh.change_dir(root()); 107 | } 108 | 109 | let release = if rl { &["--release"] } else { &[][..] }; 110 | 111 | cmd!( 112 | sh, 113 | "cargo build 114 | {release...} 115 | -p xernel 116 | --target ./build/targets/{target}.json 117 | -Z build-std=core,alloc,compiler_builtins 118 | -Z build-std-features=compiler-builtins-mem 119 | " 120 | ) 121 | .run()?; 122 | 123 | cmd!( 124 | sh, 125 | "cargo build 126 | {release...} 127 | -p init 128 | --target ./build/targets/{target}.json 129 | -Z build-std=core,alloc,compiler_builtins 130 | -Z build-std-features=compiler-builtins-mem" 131 | ) 132 | .run()?; 133 | 134 | let build_dir = if rl { "release" } else { "debug" }; 135 | 136 | cmd!(sh, "cp ./target/x86_64-unknown-none/{build_dir}/init ./target/").run()?; 137 | 138 | create_initramfs()?; 139 | 140 | let diskname = "xernel.hdd"; 141 | let disksize = 64 * 1024 * 1024; // 64 MB 142 | 143 | let data_vec = vec![0_u8; disksize]; 144 | let mut disk = Cursor::new(data_vec); 145 | 146 | format_volume(&mut disk, FormatVolumeOptions::new().fat_type(fatfs::FatType::Fat32))?; 147 | 148 | let fs = fatfs::FileSystem::new(&mut disk, fatfs::FsOptions::new())?; 149 | { 150 | let root_dir = fs.root_dir(); 151 | 152 | copy_to_image(&root_dir, &format!("./target/{target}/{build_dir}/xernel"), "xernel")?; 153 | 154 | copy_to_image(&root_dir, "./logo.bmp", "logo.bmp")?; 155 | copy_to_image(&root_dir, "./target/initramfs", "initramfs")?; 156 | 157 | let dir = root_dir.create_dir("EFI")?; 158 | let dir = dir.create_dir("BOOT")?; 159 | 160 | copy_to_image(&dir, "./kernel/limine/BOOTX64.EFI", "BOOTX64.EFI")?; 161 | copy_to_image(&dir, "./kernel/limine.conf", "limine.conf")?; 162 | } 163 | fs.unmount()?; 164 | 165 | fs::write(diskname, disk.into_inner())?; 166 | 167 | Ok(()) 168 | } 169 | 170 | fn create_initramfs() -> Result<()> { 171 | // file format of the initramfs: 172 | // 1. name of the file (16 byte) 173 | // 2. size of the file (u64) 174 | // 3. the file data 175 | // 4. ... the next files until the end of the initramfs file 176 | 177 | let mut data = Vec::new(); 178 | 179 | // (name, path) 180 | let files = vec![ 181 | ("init", "./target/init") 182 | ]; 183 | 184 | for file in files { 185 | let name = file.0; 186 | let path = file.1; 187 | 188 | let file_data = fs::read(path)?; 189 | let mut name_vec = name.as_bytes().to_vec(); 190 | name_vec.resize(16, 0); 191 | 192 | data.extend(&name_vec); 193 | data.extend(&(file_data.len() as u64).to_le_bytes()); 194 | data.extend(&file_data); 195 | } 196 | 197 | fs::write("target/initramfs", data)?; 198 | 199 | Ok(()) 200 | } 201 | 202 | fn run(sh: &Shell, gdb: bool, mut args: Arguments) -> Result<()> { 203 | let gdb_debug = if gdb { &["-S"] } else { &[][..] }; 204 | 205 | let ram = args 206 | .opt_value_from_str::<_, String>("--ram")? 207 | .unwrap_or_else(|| "128M".to_string()); 208 | let cpus = args.opt_value_from_str::<_, u32>("--cpus")?.unwrap_or(2).to_string(); 209 | 210 | let kvm = if args.contains("--kvm") { 211 | &["-enable-kvm"] 212 | } else { 213 | &[][..] 214 | }; 215 | 216 | let qemu_monitor = if args.contains("--monitor") { 217 | &["-monitor"] 218 | } else { 219 | &["-debugcon"] 220 | }; 221 | 222 | let mut file_extension = ""; 223 | 224 | let qemu_in_wsl_arg = args.contains("--wsl-qemu"); 225 | 226 | let qemu_in_wsl_env = env::var("qemu_in_wsl").unwrap_or("false".to_string()).parse().unwrap(); 227 | 228 | let qemu_in_wsl = qemu_in_wsl_arg || qemu_in_wsl_env; 229 | 230 | if wsl::is_wsl() && !qemu_in_wsl { 231 | file_extension = ".exe"; 232 | } 233 | 234 | cmd!( 235 | sh, 236 | "qemu-system-x86_64{file_extension} 237 | -bios ./kernel/uefi-edk2/OVMF.fd 238 | -m {ram} 239 | -smp {cpus} 240 | -cdrom xernel.hdd 241 | --no-reboot 242 | --no-shutdown 243 | {qemu_monitor...} stdio 244 | -d int 245 | -D qemu.log 246 | {kvm...} 247 | -s {gdb_debug...}" 248 | ) 249 | .run()?; 250 | 251 | Ok(()) 252 | } 253 | 254 | fn clippy(sh: &Shell) -> Result<()> { 255 | let _cwd = sh.push_dir(root()); 256 | 257 | cmd!( 258 | sh, 259 | "cargo clippy 260 | -p xernel 261 | --target ./build/targets/x86_64.json 262 | -Z build-std=core,alloc,compiler_builtins 263 | -Z build-std-features=compiler-builtins-mem" 264 | ) 265 | .run()?; 266 | 267 | Ok(()) 268 | } 269 | 270 | fn fmt(sh: &Shell, check: bool) -> Result<()> { 271 | let _cwd = sh.push_dir(root()); 272 | 273 | let check_arg = if check { &["--", "--check"][..] } else { &[] }; 274 | cmd!( 275 | sh, 276 | "cargo fmt 277 | -p xernel 278 | {check_arg...}" 279 | ) 280 | .run()?; 281 | 282 | Ok(()) 283 | } 284 | 285 | fn root() -> PathBuf { 286 | let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); 287 | path.pop(); 288 | path.pop(); 289 | path 290 | } 291 | 292 | fn copy_to_image(dir: &fatfs::Dir, src_path: &str, dst_path: &str) -> Result<()> { 293 | let data = fs::read(src_path)?; 294 | 295 | dir.create_file(dst_path)?.write_all(&data)?; 296 | 297 | Ok(()) 298 | } 299 | -------------------------------------------------------------------------------- /crates/libxernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "libxernel" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | bitflags = "2.8.0" 10 | 11 | [features] 12 | kernel = [] 13 | -------------------------------------------------------------------------------- /crates/libxernel/src/boot.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Deref, DerefMut}; 2 | 3 | /// Type for values that are only available at runtime 4 | /// 5 | /// Can be set multiple times and mostly used in a [`static`] context 6 | pub enum InitAtBoot { 7 | Initialized(T), 8 | Uninitialized, 9 | } 10 | 11 | impl Deref for InitAtBoot { 12 | type Target = T; 13 | 14 | fn deref(&self) -> &T { 15 | match self { 16 | InitAtBoot::Initialized(x) => x, 17 | InitAtBoot::Uninitialized => { 18 | #[cfg(debug_assertions)] 19 | core::panic!("tried to access boot resource that is not initialized"); 20 | #[cfg(not(debug_assertions))] 21 | unsafe { 22 | core::hint::unreachable_unchecked() 23 | } 24 | } 25 | } 26 | } 27 | } 28 | 29 | impl DerefMut for InitAtBoot { 30 | fn deref_mut(&mut self) -> &mut T { 31 | match self { 32 | InitAtBoot::Initialized(x) => x, 33 | InitAtBoot::Uninitialized => { 34 | #[cfg(debug_assertions)] 35 | panic!("tried to access boot resource that is not initialized"); 36 | #[cfg(not(debug_assertions))] 37 | unsafe { 38 | core::hint::unreachable_unchecked() 39 | } 40 | } 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /crates/libxernel/src/collections/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod ringbuffer; 2 | -------------------------------------------------------------------------------- /crates/libxernel/src/collections/ringbuffer.rs: -------------------------------------------------------------------------------- 1 | use core::mem::MaybeUninit; 2 | 3 | struct Ringbuffer { 4 | buffer: [MaybeUninit; N], 5 | read: usize, 6 | write: usize, 7 | size: usize, 8 | } 9 | 10 | impl Ringbuffer { 11 | pub const fn new() -> Self { 12 | Self { 13 | buffer: [const { MaybeUninit::::zeroed() }; N], 14 | read: 0, 15 | write: 0, 16 | size: 0, 17 | } 18 | } 19 | 20 | pub fn push(&mut self, value: T) { 21 | if N == 0 { 22 | return; 23 | } 24 | 25 | if self.size >= N { 26 | unsafe { 27 | self.buffer[self.write].assume_init_drop(); 28 | } 29 | 30 | self.buffer[self.write].write(value); 31 | self.wrap_inc_write(); 32 | } else { 33 | self.buffer[self.write].write(value); 34 | self.wrap_inc_write(); 35 | self.inc_size(); 36 | } 37 | } 38 | 39 | pub fn pop(&mut self) -> Option { 40 | if N == 0 { 41 | return None; 42 | } 43 | 44 | if self.is_empty() { 45 | return None; 46 | } 47 | 48 | let ret = unsafe { self.buffer[self.read].assume_init_read() }; 49 | 50 | self.wrap_inc_read(); 51 | self.dec_size(); 52 | 53 | Some(ret) 54 | } 55 | 56 | pub fn skip(&mut self) { 57 | if N == 0 { 58 | return; 59 | } 60 | 61 | if self.is_empty() { 62 | return; 63 | } 64 | 65 | unsafe { 66 | self.buffer[self.read].assume_init_drop(); 67 | } 68 | 69 | self.wrap_inc_read(); 70 | self.dec_size(); 71 | } 72 | 73 | pub fn peek(&mut self) -> Option<&T> { 74 | if N == 0 { 75 | return None; 76 | } 77 | 78 | if self.is_empty() { 79 | return None; 80 | } 81 | 82 | let ret = unsafe { self.buffer[self.read].assume_init_ref() }; 83 | 84 | Some(ret) 85 | } 86 | 87 | pub fn size(&self) -> usize { 88 | self.size 89 | } 90 | 91 | pub fn capacity(&self) -> usize { 92 | N 93 | } 94 | 95 | pub fn is_empty(&self) -> bool { 96 | self.size == 0 97 | } 98 | 99 | pub fn is_full(&self) -> bool { 100 | self.size == N 101 | } 102 | 103 | fn wrap_inc_read(&mut self) { 104 | self.read = (self.read + 1) % N; 105 | } 106 | 107 | fn wrap_inc_write(&mut self) { 108 | self.write = (self.write + 1) % N; 109 | } 110 | 111 | fn inc_size(&mut self) { 112 | self.size += 1; 113 | } 114 | 115 | fn dec_size(&mut self) { 116 | self.size -= 1; 117 | } 118 | } 119 | 120 | impl Drop for Ringbuffer { 121 | fn drop(&mut self) { 122 | while self.pop().is_some() {} 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /crates/libxernel/src/crypto/csprng.rs: -------------------------------------------------------------------------------- 1 | // TODO: Find good name for random number generator trait 2 | trait Csprng {} 3 | -------------------------------------------------------------------------------- /crates/libxernel/src/crypto/fortuna.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anubis-rs/xernel/abf71c1983b5819dd4471c2c9a492b11b3a594b9/crates/libxernel/src/crypto/fortuna.rs -------------------------------------------------------------------------------- /crates/libxernel/src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod csprng; 2 | -------------------------------------------------------------------------------- /crates/libxernel/src/crypto/primality.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anubis-rs/xernel/abf71c1983b5819dd4471c2c9a492b11b3a594b9/crates/libxernel/src/crypto/primality.rs -------------------------------------------------------------------------------- /crates/libxernel/src/ipl.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | #[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] 4 | #[repr(u8)] 5 | #[allow(clippy::upper_case_acronyms)] 6 | pub enum IPL { 7 | Passive = 0, 8 | APC = 1, 9 | DPC = 2, 10 | Device = 13, 11 | Clock = 14, 12 | High = 15, 13 | } 14 | 15 | impl From for IPL { 16 | fn from(value: usize) -> Self { 17 | match value { 18 | 0 => IPL::Passive, 19 | 1 => IPL::APC, 20 | 2 => IPL::DPC, 21 | 13 => IPL::Device, 22 | 14 => IPL::Clock, 23 | 15 => IPL::High, 24 | _ => panic!("Bad IPL"), 25 | } 26 | } 27 | } 28 | 29 | impl From for IPL { 30 | fn from(value: u64) -> Self { 31 | IPL::from(value as usize) 32 | } 33 | } 34 | 35 | impl From for IPL { 36 | fn from(value: u8) -> Self { 37 | IPL::from(value as usize) 38 | } 39 | } 40 | 41 | fn set_ipl(ipl: IPL) { 42 | unsafe { 43 | asm!("mov cr8, {}", in(reg) ipl as u64, options(nomem, nostack, preserves_flags)); 44 | } 45 | } 46 | 47 | pub fn get_ipl() -> IPL { 48 | let ipl: u64; 49 | 50 | unsafe { 51 | asm!("mov {}, cr8", out(reg) ipl, options(nomem, nostack, preserves_flags)); 52 | } 53 | 54 | IPL::from(ipl) 55 | } 56 | 57 | pub fn raise_ipl(ipl: IPL) -> IPL { 58 | let old_ipl = get_ipl(); 59 | 60 | assert!(old_ipl as u64 <= ipl as u64); 61 | 62 | set_ipl(ipl); 63 | 64 | old_ipl 65 | } 66 | 67 | pub fn splx(ipl: IPL) { 68 | assert!(ipl as u64 <= get_ipl() as u64); 69 | 70 | set_ipl(ipl); 71 | } 72 | -------------------------------------------------------------------------------- /crates/libxernel/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![allow(unused)] 3 | 4 | pub mod boot; 5 | pub mod collections; 6 | pub mod on_drop; 7 | pub mod sync; 8 | pub mod syscall; 9 | 10 | #[cfg(feature = "kernel")] 11 | pub mod ipl; 12 | -------------------------------------------------------------------------------- /crates/libxernel/src/on_drop.rs: -------------------------------------------------------------------------------- 1 | use core::mem::ManuallyDrop; 2 | 3 | use core::ops::{Deref, DerefMut}; 4 | 5 | use crate::sync::SpinlockGuard; 6 | 7 | pub struct OnDrop 8 | where 9 | F: FnOnce(), 10 | { 11 | value: ManuallyDrop, 12 | callback: ManuallyDrop, 13 | } 14 | 15 | impl OnDrop 16 | where 17 | F: FnOnce(), 18 | { 19 | pub fn new(value: T, callback: F) -> Self { 20 | Self { 21 | value: ManuallyDrop::new(value), 22 | callback: ManuallyDrop::new(callback), 23 | } 24 | } 25 | } 26 | 27 | impl Drop for OnDrop 28 | where 29 | F: FnOnce(), 30 | { 31 | fn drop(&mut self) { 32 | unsafe { 33 | ManuallyDrop::::drop(&mut self.value); 34 | (ManuallyDrop::::take(&mut self.callback))(); 35 | } 36 | } 37 | } 38 | 39 | impl OnDrop, F> 40 | where 41 | F: FnOnce(), 42 | { 43 | pub fn unlock(self) {} 44 | } 45 | 46 | impl Deref for OnDrop 47 | where 48 | F: FnOnce(), 49 | { 50 | type Target = T; 51 | 52 | fn deref(&self) -> &Self::Target { 53 | &self.value 54 | } 55 | } 56 | 57 | impl DerefMut for OnDrop 58 | where 59 | F: FnOnce(), 60 | { 61 | fn deref_mut(&mut self) -> &mut Self::Target { 62 | &mut self.value 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /crates/libxernel/src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::once::Once; 2 | pub use self::rwlock::{ReadGuard, RwLock, WriteGuard}; 3 | pub use self::spin::{SpinlockGuard, Spinlock}; 4 | 5 | mod once; 6 | mod rwlock; 7 | mod spin; 8 | 9 | #[cfg(feature = "kernel")] 10 | mod spinirq; 11 | #[cfg(feature = "kernel")] 12 | pub use self::spinirq::{SpinlockIRQ, SpinlockIRQGuard}; 13 | -------------------------------------------------------------------------------- /crates/libxernel/src/sync/once.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | cell::UnsafeCell, 3 | mem::MaybeUninit, 4 | ops::Deref, 5 | panic, 6 | sync::atomic::{AtomicBool, Ordering}, 7 | }; 8 | 9 | /// Type which represents a value which gets set exactly once 10 | /// 11 | /// This type will allow to be set once, then never again. 12 | /// Made for values which are only available at runtime, used in a [`static`] context and won't change after initialization 13 | pub struct Once { 14 | /// Determines if the value is set or if it's uninitialized 15 | is_set: AtomicBool, 16 | data: UnsafeCell>, 17 | } 18 | 19 | impl Once { 20 | /// Creates a new uninitialized Once object 21 | pub const fn new() -> Self { 22 | Self { 23 | is_set: AtomicBool::new(false), 24 | data: UnsafeCell::new(MaybeUninit::uninit()), 25 | } 26 | } 27 | 28 | /// Sets the value 29 | /// 30 | /// # Panics 31 | /// Panics if the value is already set 32 | pub fn set_once(&self, val: T) { 33 | // Checks if the value is already set 34 | if !self.is_set.load(Ordering::Acquire) { 35 | unsafe { 36 | // Write data to UnsafeCell 37 | (*self.data.get()).as_mut_ptr().write(val); 38 | } 39 | // Set is_set value to true 40 | self.is_set.store(true, Ordering::Release); 41 | } else { 42 | // If already set panic! 43 | panic!("Value already set"); 44 | } 45 | } 46 | 47 | /// Returns `true` if some [`set_once()`](struct.Once.html#methods.set_once) has completed successfully 48 | pub fn is_completed(&self) -> bool { 49 | self.is_set.load(Ordering::Relaxed) 50 | } 51 | } 52 | 53 | unsafe impl Send for Once {} 54 | unsafe impl Sync for Once {} 55 | 56 | impl Deref for Once { 57 | type Target = T; 58 | 59 | fn deref(&self) -> &Self::Target { 60 | // Check if the value is_set 61 | if self.is_set.load(Ordering::Acquire) { 62 | // Return a reference if set 63 | unsafe { &*(*self.data.get()).as_ptr() } 64 | } else { 65 | // panic! if uninitialized 66 | panic!("Value not set"); 67 | } 68 | } 69 | } 70 | 71 | impl Default for Once { 72 | fn default() -> Self { 73 | Self::new() 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /crates/libxernel/src/sync/rwlock.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Deref, DerefMut}; 2 | use core::{cell::UnsafeCell, sync::atomic::AtomicU32, sync::atomic::Ordering}; 3 | 4 | pub struct RwLock { 5 | state: AtomicU32, 6 | data: UnsafeCell, 7 | } 8 | 9 | unsafe impl Sync for RwLock where T: Send + Sync {} 10 | 11 | impl RwLock { 12 | pub const fn new(value: T) -> Self { 13 | Self { 14 | state: AtomicU32::new(0), // Unlocked. 15 | data: UnsafeCell::new(value), 16 | } 17 | } 18 | 19 | pub fn read(&self) -> ReadGuard { 20 | let mut current_state = self.state.load(Ordering::Relaxed); 21 | 22 | loop { 23 | if current_state < u32::MAX { 24 | match self.state.compare_exchange_weak( 25 | current_state, 26 | current_state + 1, 27 | Ordering::Acquire, 28 | Ordering::Relaxed, 29 | ) { 30 | Ok(_) => return ReadGuard { rwlock: self }, 31 | Err(e) => current_state = e, 32 | } 33 | } 34 | 35 | while current_state == u32::MAX { 36 | core::hint::spin_loop(); 37 | current_state = self.state.load(Ordering::Relaxed); 38 | } 39 | } 40 | } 41 | 42 | pub fn write(&self) -> WriteGuard { 43 | while self 44 | .state 45 | .compare_exchange(0, u32::MAX, Ordering::Acquire, Ordering::Relaxed) 46 | .is_err() 47 | { 48 | core::hint::spin_loop(); 49 | } 50 | WriteGuard { rwlock: self } 51 | } 52 | } 53 | 54 | pub struct ReadGuard<'a, T> { 55 | rwlock: &'a RwLock, 56 | } 57 | 58 | impl ReadGuard<'_, T> { 59 | pub fn unlock(self) {} 60 | } 61 | 62 | impl Deref for ReadGuard<'_, T> { 63 | type Target = T; 64 | fn deref(&self) -> &T { 65 | unsafe { &*self.rwlock.data.get() } 66 | } 67 | } 68 | 69 | impl Drop for ReadGuard<'_, T> { 70 | fn drop(&mut self) { 71 | self.rwlock.state.fetch_sub(1, Ordering::Release); 72 | } 73 | } 74 | 75 | pub struct WriteGuard<'a, T> { 76 | rwlock: &'a RwLock, 77 | } 78 | 79 | impl WriteGuard<'_, T> { 80 | pub fn unlock(self) {} 81 | } 82 | 83 | impl Deref for WriteGuard<'_, T> { 84 | type Target = T; 85 | fn deref(&self) -> &T { 86 | unsafe { &*self.rwlock.data.get() } 87 | } 88 | } 89 | 90 | impl DerefMut for WriteGuard<'_, T> { 91 | fn deref_mut(&mut self) -> &mut T { 92 | unsafe { &mut *self.rwlock.data.get() } 93 | } 94 | } 95 | 96 | impl Drop for WriteGuard<'_, T> { 97 | fn drop(&mut self) { 98 | self.rwlock.state.store(0, Ordering::Release); 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /crates/libxernel/src/sync/spin.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | arch::asm, 3 | cell::UnsafeCell, 4 | ops::{Deref, DerefMut}, 5 | sync::atomic::{AtomicBool, Ordering}, 6 | }; 7 | 8 | use crate::{ 9 | ipl::{raise_ipl, splx, IPL}, 10 | on_drop::OnDrop, 11 | }; 12 | 13 | /// Simple data locking structure using a spin loop. 14 | /// 15 | /// This spinlock will block threads waiting for the lock to become available. 16 | /// Accessing the data is only possible through the RAII guards returned from [`Spinlock::lock`] and [`Spinlock::try_lock`], since they guarantee you are the owner of the lock. 17 | pub struct Spinlock { 18 | /// Atomic variable which is used to determine if the Spinlock is locked or not 19 | is_locked: AtomicBool, 20 | /// The data itself 21 | data: UnsafeCell, 22 | } 23 | 24 | /// Spinlock RAII wrapper type for safe release of lock 25 | /// 26 | /// When acquiring a lock through [`Spinlock::lock`] or [`Spinlock::try_lock`], a SpinlockGuard gets returned which is a wrapper over the mutex itself. 27 | /// This type is used for releasing the spinlock when the value goes out of scope, so you don't have to think of unlocking yourself. 28 | pub struct SpinlockGuard<'a, T: ?Sized + 'a> { 29 | lock: &'a Spinlock, 30 | } 31 | 32 | unsafe impl Send for Spinlock {} 33 | unsafe impl Sync for Spinlock {} 34 | 35 | impl Spinlock { 36 | /// Creates an unlocked and initialized spinlock 37 | pub const fn new(data: T) -> Self { 38 | Self { 39 | is_locked: AtomicBool::new(false), 40 | data: UnsafeCell::new(data), 41 | } 42 | } 43 | } 44 | 45 | impl Spinlock { 46 | /// Acquires a lock for this spinlock and returns a RAII guard 47 | /// 48 | /// It tries to acquire the lock, if it's already locked the thread enters a so-called spin loop 49 | /// When the value of the underlying atomic boolean changes, it tries again to acquire the lock but no guarantee given 50 | /// that it will be given the lock. 51 | pub fn lock(&self) -> SpinlockGuard<'_, T> { 52 | loop { 53 | if !self.is_locked.swap(true, Ordering::Acquire) { 54 | return SpinlockGuard { lock: self }; 55 | } 56 | 57 | while self.is_locked.load(Ordering::Relaxed) { 58 | core::hint::spin_loop(); 59 | } 60 | } 61 | } 62 | 63 | /// Tries one time to acquire the lock 64 | /// 65 | /// Simply a try if the lock is free, if not [`None`] returned, else a [`SpinlockGuard`] wrapped in an option 66 | pub fn try_lock(&self) -> Option> { 67 | if !self.is_locked.swap(true, Ordering::AcqRel) { 68 | // is_locked was false and now we have atomically swapped it to true, 69 | // so no one else has access to this data. 70 | return Some(SpinlockGuard { lock: self }); 71 | } 72 | None 73 | } 74 | 75 | pub fn with_lock(&self, function: F) -> U 76 | where 77 | F: FnOnce(&mut T) -> U, 78 | { 79 | let mut lock = self.lock(); 80 | function(&mut *lock) 81 | } 82 | 83 | pub fn aquire(&self) -> OnDrop, impl FnOnce()> { 84 | let ipl = raise_ipl(IPL::DPC); 85 | let callback = move || splx(ipl); 86 | OnDrop::new(self.lock(), callback) 87 | } 88 | 89 | pub fn aquire_at(&self, ipl: IPL) -> OnDrop, impl FnOnce()> { 90 | let ipl = raise_ipl(ipl); 91 | let callback = move || splx(ipl); 92 | OnDrop::new(self.lock(), callback) 93 | } 94 | 95 | /// Unlocking a spinlock 96 | /// 97 | /// With the drop approach the lock only gets released when the [`SpinlockGuard`] value goes out of scope. 98 | /// It is possible to earlier drop the value with `drop(guard);` but it looks like unclean programming. 99 | /// This associated function is no different to [`drop`] but when reading the code it is much clearer what is happening. 100 | pub fn unlock(_guard: SpinlockGuard<'_, T>) {} 101 | } 102 | 103 | impl SpinlockGuard<'_, T> { 104 | // FIXME: Find a way to unlock when aquire is used. Since the spinlockguard can't be moved out of the OnDrop Type 105 | /// Unlocking a spinlock 106 | /// 107 | /// Sometimes it is nice to be able to unlock a lock when you want to. 108 | /// Normally a Spinlock in Rust would only unlock when the corresponding Guard would be dropped. 109 | /// In special cases, like the [`Scheduler`], we even need the lock to be released before the function end, since we would wind up in a dead lock on the next timer interrupt. 110 | /// Semantically there is no difference between this method and [`Spinlock::unlock`](struct.Spinlock.html#method.unlock) 111 | pub fn unlock(self) {} 112 | } 113 | 114 | impl Drop for SpinlockGuard<'_, T> { 115 | fn drop(&mut self) { 116 | // Releasing the lock 117 | self.lock.is_locked.store(false, Ordering::Release); 118 | } 119 | } 120 | 121 | impl Deref for SpinlockGuard<'_, T> { 122 | type Target = T; 123 | 124 | fn deref(&self) -> &Self::Target { 125 | unsafe { &*self.lock.data.get() } 126 | } 127 | } 128 | 129 | impl DerefMut for SpinlockGuard<'_, T> { 130 | fn deref_mut(&mut self) -> &mut Self::Target { 131 | unsafe { &mut *self.lock.data.get() } 132 | } 133 | } 134 | 135 | #[inline] 136 | fn write_cr8(ipl: IPL) { 137 | unsafe { 138 | asm!("mov cr8, {}", in(reg) ipl as u64, options(nomem, nostack, preserves_flags)); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /crates/libxernel/src/sync/spinirq.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::Spinlock; 2 | use core::arch::asm; 3 | use core::ops::{Deref, DerefMut}; 4 | use core::sync::atomic::{compiler_fence, Ordering}; 5 | 6 | use super::SpinlockGuard; 7 | 8 | /// A handle for interrupt state 9 | pub struct HeldIRQ(bool); 10 | 11 | /// Spinlock which disables interrupts when taking the lock 12 | pub struct SpinlockIRQ { 13 | lock: Spinlock, 14 | } 15 | 16 | impl SpinlockIRQ { 17 | pub const fn new(data: T) -> Self { 18 | Self { 19 | lock: Spinlock::new(data), 20 | } 21 | } 22 | 23 | /// Calls the lock of the inner [`Spinlock`] and freezes the interrupts 24 | pub fn lock(&self) -> SpinlockIRQGuard { 25 | let inner_lock = self.lock.lock(); 26 | 27 | SpinlockIRQGuard { 28 | guard: inner_lock, 29 | _held_irq: hold_interrupts(), 30 | } 31 | } 32 | 33 | /// Unlock the underlying spinlock 34 | /// 35 | /// If needed to release the lock before the Guard gets dropped you may use this function 36 | pub fn unlock(_guard: SpinlockIRQGuard<'_, T>) {} 37 | } 38 | 39 | /// Wrapper Type over SpinlockGuard and HeldIRQ 40 | pub struct SpinlockIRQGuard<'a, T: 'a> { 41 | guard: SpinlockGuard<'a, T>, 42 | _held_irq: HeldIRQ, 43 | } 44 | 45 | impl SpinlockIRQGuard<'_, T> { 46 | /// Unlock the underlying spinlock 47 | /// 48 | /// If needed to release the lock before the Guard gets dropped you may use this function 49 | pub fn unlock(self) {} 50 | } 51 | 52 | impl Deref for SpinlockIRQGuard<'_, T> { 53 | type Target = T; 54 | 55 | fn deref(&self) -> &Self::Target { 56 | &(self.guard) 57 | } 58 | } 59 | 60 | impl DerefMut for SpinlockIRQGuard<'_, T> { 61 | fn deref_mut(&mut self) -> &mut Self::Target { 62 | &mut (self.guard) 63 | } 64 | } 65 | 66 | impl Drop for HeldIRQ { 67 | fn drop(&mut self) { 68 | if self.0 { 69 | enable_interrupts(); 70 | } 71 | } 72 | } 73 | 74 | /// Gets the interrupt state 75 | /// 76 | /// Returns a bool if interrupts are currently enabled or not. 77 | /// Is used when dropping the SpinlockIRQGuard to get back to the old interrupt state. 78 | #[inline(always)] 79 | pub fn interrupts_enabled() -> bool { 80 | if cfg!(target_arch = "x86_64") { 81 | unsafe { 82 | let flags: usize; 83 | asm!("pushfq; pop {}", out(reg) flags, options(nomem, preserves_flags)); 84 | (flags & 0x0200) != 0 85 | } 86 | } else { 87 | unimplemented!("Interrupts enabled not implemented for this architecture"); 88 | } 89 | } 90 | 91 | /// Returns a HeldIRQ object with the current interrupt state 92 | /// 93 | /// Gets the current interrupt state and creates a HeldIRQ object 94 | /// It then disables the interrupts, even if they are already disabled and returns the HeldIRQ object. 95 | pub fn hold_interrupts() -> HeldIRQ { 96 | let enabled = interrupts_enabled(); 97 | let retval = HeldIRQ(enabled); 98 | disable_interrupts(); 99 | retval 100 | } 101 | 102 | /// Disables interrupts across multiple architectures 103 | #[inline(always)] 104 | pub fn disable_interrupts() { 105 | unsafe { 106 | if cfg!(target_arch = "x86_64") { 107 | asm!("cli", options(nomem, nostack)); 108 | } else { 109 | unimplemented!("Disable interrupts not implemented for this architecture"); 110 | } 111 | } 112 | compiler_fence(Ordering::SeqCst); 113 | } 114 | 115 | /// Enables interrupts across multiple architectures 116 | #[inline(always)] 117 | pub fn enable_interrupts() { 118 | compiler_fence(Ordering::SeqCst); 119 | unsafe { 120 | if cfg!(target_arch = "x86_64") { 121 | asm!("sti", options(nomem, nostack)); 122 | } else { 123 | unimplemented!("Enable interrupts not implemented for this architecture"); 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /crates/libxernel/src/syscall.rs: -------------------------------------------------------------------------------- 1 | // constants for syscall numbers 2 | 3 | use bitflags::bitflags; 4 | 5 | pub const SYS_READ: usize = 0; 6 | pub const SYS_WRITE: usize = 1; 7 | pub const SYS_OPEN: usize = 2; 8 | pub const SYS_CLOSE: usize = 3; 9 | pub const SYS_MMAP: usize = 4; 10 | pub const SYS_LOG: usize = 5; 11 | 12 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 13 | #[repr(isize)] 14 | pub enum SyscallError { 15 | NoPermission = -1, 16 | VNodeNotFound = -2, 17 | NotADirectory = -3, 18 | IsADirectory = -4, 19 | NoSpace = -5, 20 | NotEmpty = -6, 21 | EntryNotFound = -7, 22 | MountPointNotFound = -8, 23 | FileSystemNotFound = -9, 24 | MalformedPath = -10, 25 | InvalidArgument = -11, 26 | } 27 | 28 | bitflags! { 29 | #[derive(Clone, Copy)] 30 | pub struct ProtectionFlags: u8 { 31 | const READ = 1 << 0; 32 | const WRITE = 1 << 1; 33 | const EXECUTE = 1 << 2; 34 | } 35 | } 36 | 37 | bitflags! { 38 | #[derive(PartialEq, Eq)] 39 | pub struct MapFlags: u8 { 40 | const SHARED = 1 << 0; 41 | const PRIVATE = 1 << 1; 42 | const ANONYMOUS = 1 << 3; 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /gdb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Call gdb and connect to qemu 4 | gdb target/x86_64/debug/xernel \ 5 | -ex "target remote :1234" \ 6 | -ex "break kernel_main" \ 7 | -ex "continue" 8 | -------------------------------------------------------------------------------- /kernel/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | limine 13 | 14 | *.log 15 | *.iso -------------------------------------------------------------------------------- /kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xernel" 3 | version = "0.1.0" 4 | authors = ["Raphael O. = Once::new(); 17 | static HPET_PERIOD: Once = Once::new(); 18 | static HPET_CLOCK_TICK_UNIT: Once = Once::new(); 19 | static HPET_BASE_ADDRESS: Once = Once::new(); 20 | 21 | pub fn init() { 22 | let hpet_info = acpi_parsing::HpetInfo::new(&ACPI.tables).unwrap(); 23 | 24 | HPET_CLOCK_TICK_UNIT.set_once(hpet_info.clock_tick_unit); 25 | HPET_BASE_ADDRESS.set_once(hpet_info.base_address as u64 + *HIGHER_HALF_OFFSET); 26 | 27 | let mut mapper = KERNEL_PAGE_MAPPER.lock(); 28 | 29 | mapper.map::( 30 | PhysFrame::containing_address(PhysAddr::new(hpet_info.base_address as u64)), 31 | Page::containing_address(VirtAddr::new(hpet_info.base_address as u64 + *HIGHER_HALF_OFFSET)), 32 | PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE, 33 | true, 34 | ); 35 | 36 | let period = (read(0) >> 32) & u64::MAX; 37 | 38 | assert!(period != 0); 39 | 40 | // INFO: Period must be less or equal to 0x05F5E100 according to HPET spec 41 | assert!(period <= 0x05F5E100); 42 | 43 | let f = (u64::pow(10, 15) as f64 / period as f64) as u64; 44 | 45 | debug!("HPET Period: {} femtoseconds", period); 46 | debug!("HPET Period: {} nanoseconds", period / 1_000_000); 47 | debug!("HPET Frequency: {} Hz", f); 48 | debug!("HPET Frequency: {} MHz", f / 1_000_000); 49 | 50 | HPET_FREQUENCY.set_once(f); 51 | HPET_PERIOD.set_once(period); 52 | 53 | // set ENABLE_CNF bit 54 | write( 55 | HPET_CONFIGURATION_REGISTER_OFFSET, 56 | read(HPET_CONFIGURATION_REGISTER_OFFSET) | 1, 57 | ); 58 | } 59 | 60 | pub fn read_main_counter() -> u64 { 61 | read(HPET_MAIN_COUNTER_REGISTER_OFFSET) 62 | } 63 | 64 | pub fn frequency() -> u64 { 65 | *HPET_FREQUENCY 66 | } 67 | 68 | pub fn sleep(nanos: u64) { 69 | let target_counter = read_main_counter() + ((nanos * 1_000_000) / *HPET_PERIOD); 70 | 71 | while read_main_counter() < target_counter { 72 | spin_loop(); 73 | } 74 | } 75 | 76 | /// returns the number of microseconds since start of the hpet 77 | pub fn microseconds() -> u64 { 78 | read_main_counter() / (frequency() / 1_000_000) 79 | } 80 | 81 | /// returns the number of milliseconds since start of the hpet 82 | pub fn milliseconds() -> u64 { 83 | read_main_counter() / (frequency() / 1_000) 84 | } 85 | 86 | fn write(offset: u64, val: u64) { 87 | let hpet_ptr = *HPET_BASE_ADDRESS as *mut u64; 88 | 89 | unsafe { 90 | core::ptr::write_volatile(hpet_ptr.byte_offset(offset as isize), val); 91 | } 92 | } 93 | 94 | fn read(offset: u64) -> u64 { 95 | let hpet_ptr = *HPET_BASE_ADDRESS as *mut u64; 96 | 97 | unsafe { core::ptr::read_volatile(hpet_ptr.byte_offset(offset as isize) as *const u64) } 98 | } 99 | -------------------------------------------------------------------------------- /kernel/src/acpi/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod hpet; 2 | 3 | use core::ptr::NonNull; 4 | 5 | use crate::mem::HIGHER_HALF_OFFSET; 6 | use acpi_parsing::platform::interrupt::Apic; 7 | use acpi_parsing::{AcpiHandler, AcpiTables, InterruptModel, PhysicalMapping}; 8 | use libxernel::sync::Once; 9 | use limine::RsdpRequest; 10 | 11 | #[derive(Clone)] 12 | struct AcpiMapper; 13 | 14 | static RSDP_REQUEST: RsdpRequest = RsdpRequest::new(0); 15 | 16 | pub static ACPI: Once = Once::new(); 17 | 18 | pub fn init() { 19 | ACPI.set_once(Acpi::new()); 20 | } 21 | 22 | impl AcpiHandler for AcpiMapper { 23 | unsafe fn map_physical_region(&self, physical_address: usize, size: usize) -> PhysicalMapping { 24 | PhysicalMapping::new( 25 | physical_address, 26 | NonNull::new_unchecked((physical_address + *HIGHER_HALF_OFFSET as usize) as *mut _), 27 | size, 28 | size, 29 | self.clone(), 30 | ) 31 | } 32 | 33 | fn unmap_physical_region(_region: &PhysicalMapping) { 34 | // the region is never unmapped 35 | } 36 | } 37 | 38 | pub struct Acpi { 39 | tables: AcpiTables, 40 | } 41 | 42 | impl Acpi { 43 | pub fn new() -> Self { 44 | let address = RSDP_REQUEST.get_response().get().unwrap().address.as_ptr(); 45 | 46 | let acpi_tables = unsafe { 47 | AcpiTables::from_rsdp(AcpiMapper, address.unwrap() as usize - *HIGHER_HALF_OFFSET as usize) 48 | .expect("failed to get acpi tables") 49 | }; 50 | 51 | Self { tables: acpi_tables } 52 | } 53 | 54 | pub fn get_apic(&self) -> Apic { 55 | match ACPI.tables.platform_info().unwrap().interrupt_model { 56 | InterruptModel::Apic(apic) => apic, 57 | InterruptModel::Unknown => panic!("No apic found"), 58 | _ => unreachable!(), 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /kernel/src/allocator/buddy.rs: -------------------------------------------------------------------------------- 1 | // Credits to Stupremee (https://github.com/Stupremee) 2 | // https://github.com/Stupremee/novos/blob/main/crates/kernel/src/allocator/buddy.rs 3 | 4 | use super::{align_up, AllocStats, Error, Result}; 5 | use core::{cmp, ptr::NonNull}; 6 | use x86_64::VirtAddr; 7 | 8 | struct ListNode { 9 | next: Option>, 10 | } 11 | 12 | pub struct BuddyAllocator { 13 | orders: [Option>; MAX_ORDER], 14 | pub stats: AllocStats, 15 | } 16 | 17 | impl BuddyAllocator { 18 | /// Create a empty and uninitialized buddy allocator. 19 | pub const fn new() -> Self { 20 | Self { 21 | orders: [None; MAX_ORDER], 22 | stats: AllocStats::with_name("Physical Memory"), 23 | } 24 | } 25 | 26 | pub fn size_for_order(&self, order: usize) -> usize { 27 | (1 << order) * MIN_ORDER_SIZE 28 | } 29 | 30 | pub fn order_for_size(&self, size: usize) -> usize { 31 | let size = cmp::max(size, MIN_ORDER_SIZE); 32 | let size = size.next_power_of_two() / MIN_ORDER_SIZE; 33 | size.trailing_zeros() as usize 34 | } 35 | 36 | fn buddy_of(&self, block: NonNull, order: usize) -> Result> { 37 | let buddy = block.as_ptr() as usize ^ self.size_for_order(order); 38 | NonNull::new(buddy as *mut _).ok_or(Error::NullPointer) 39 | } 40 | 41 | pub unsafe fn add_region(&mut self, start: NonNull, end: NonNull) -> Result { 42 | let start = start.as_ptr(); 43 | let mut start = align_up(start as _, MIN_ORDER_SIZE) as *mut u8; 44 | let end = end.as_ptr(); 45 | 46 | if (end as usize).saturating_sub(start as usize) < MIN_ORDER_SIZE { 47 | return Err(Error::RegionTooSmall); 48 | } 49 | 50 | if end < start { 51 | return Err(Error::InvalidRegion); 52 | } 53 | 54 | let mut total = 0; 55 | while (end as usize).saturating_sub(start as usize) >= MIN_ORDER_SIZE { 56 | let order = self.add_single_region(start, end)?; 57 | let size = self.size_for_order(order); 58 | 59 | start = start.add(size); 60 | total += size; 61 | } 62 | 63 | Ok(total) 64 | } 65 | 66 | unsafe fn add_single_region(&mut self, start: *mut u8, end: *mut u8) -> Result { 67 | let start_addr = start as usize; 68 | 69 | let mut order = 0; 70 | while order < (MAX_ORDER - 1) { 71 | let size = self.size_for_order(order + 1); 72 | 73 | let new_end = match start_addr.checked_add(size) { 74 | Some(num) if num <= end as usize => num, 75 | _ => break, 76 | }; 77 | 78 | let buddy = self 79 | .buddy_of(NonNull::new(start as *mut _).unwrap(), order + 1)? 80 | .as_ptr(); 81 | if new_end <= end as usize && (start.cast() <= buddy && buddy <= end.cast()) { 82 | order += 1; 83 | } else { 84 | break; 85 | } 86 | } 87 | 88 | // push the block to the list for the given order 89 | self.order_push(order, NonNull::new(start).unwrap().cast()); 90 | 91 | // update statistics 92 | let size = self.size_for_order(order); 93 | self.stats.total += size; 94 | self.stats.free += size; 95 | 96 | Ok(order) 97 | } 98 | 99 | pub fn allocate(&mut self, order: usize) -> Result> { 100 | // check if we exceeded the maximum order 101 | if order >= MAX_ORDER { 102 | return Err(Error::OrderTooLarge); 103 | } 104 | 105 | if let Some(block) = self.order_pop(order) { 106 | let size = self.size_for_order(order); 107 | self.alloc_stats(size); 108 | 109 | return NonNull::new(block.as_ptr().cast()).ok_or(Error::NullPointer); 110 | } 111 | 112 | let block = self.allocate(order + 1).map_err(|_| Error::NoMemoryAvailable)?; 113 | 114 | let buddy = self.buddy_of(block.cast(), order)?; 115 | 116 | self.order_push(order, buddy.cast()); 117 | 118 | let size = self.size_for_order(order); 119 | self.alloc_stats(size); 120 | 121 | Ok(block) 122 | } 123 | 124 | pub unsafe fn deallocate(&mut self, block: NonNull, order: usize) -> Result<()> { 125 | let buddy_addr = self.buddy_of(block.cast(), order)?; 126 | 127 | if self.order_remove(order, buddy_addr.cast()) { 128 | let size = self.size_for_order(order); 129 | self.alloc_stats(size); 130 | 131 | let new_block = cmp::min(buddy_addr.cast(), block); 132 | 133 | let new_order = order + 1; 134 | if new_order >= MAX_ORDER { 135 | self.order_push(order, buddy_addr.cast()); 136 | self.order_push(order, block.cast()); 137 | 138 | self.dealloc_stats(size * 2); 139 | } else { 140 | self.deallocate(new_block, new_order)?; 141 | } 142 | } else { 143 | self.order_push(order, block.cast()); 144 | 145 | let size = self.size_for_order(order); 146 | self.dealloc_stats(size); 147 | } 148 | 149 | Ok(()) 150 | } 151 | 152 | fn order_push(&mut self, order: usize, ptr: NonNull) { 153 | let head = self.orders[order]; 154 | 155 | unsafe { 156 | let vptr = VirtAddr::new(ptr.as_ptr() as u64); 157 | vptr.as_mut_ptr::().write(ListNode { next: head }); 158 | } 159 | 160 | self.orders[order] = Some(ptr.cast()); 161 | } 162 | 163 | fn order_pop(&mut self, order: usize) -> Option> { 164 | let head = self.orders[order]?; 165 | let vhead = VirtAddr::new(head.as_ptr() as u64); 166 | 167 | unsafe { 168 | self.orders[order] = (*vhead.as_ptr::()).next; 169 | } 170 | 171 | Some(head) 172 | } 173 | 174 | fn order_remove(&mut self, order: usize, to_remove: NonNull) -> bool { 175 | let mut cur: *mut Option> = match self.orders.get_mut(order) { 176 | Some(cur) => cur, 177 | None => return false, 178 | }; 179 | 180 | while let Some(ptr) = unsafe { *cur } { 181 | let vptr = VirtAddr::new(ptr.as_ptr() as u64).as_mut_ptr::(); 182 | 183 | if ptr == to_remove { 184 | unsafe { 185 | *cur = (*vptr).next; 186 | } 187 | return true; 188 | } 189 | 190 | unsafe { 191 | cur = &mut (*vptr).next; 192 | } 193 | } 194 | 195 | false 196 | } 197 | 198 | fn alloc_stats(&mut self, size: usize) { 199 | self.stats.free -= size; 200 | self.stats.allocated += size; 201 | } 202 | 203 | fn dealloc_stats(&mut self, size: usize) { 204 | self.stats.free += size; 205 | self.stats.allocated -= size; 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /kernel/src/allocator/mod.rs: -------------------------------------------------------------------------------- 1 | // Credits to Stupremee (https://github.com/Stupremee) 2 | // https://github.com/Stupremee/novos/blob/main/crates/kernel/src/allocator.rs 3 | 4 | pub mod buddy; 5 | pub mod unit; 6 | 7 | use core::fmt; 8 | 9 | /// Result for every memory allocation operation. 10 | pub type Result = core::result::Result; 11 | 12 | /// Aligns the given `addr` upwards to `align`. 13 | pub fn align_up(addr: usize, align: usize) -> usize { 14 | (addr + align - 1) & !(align - 1) 15 | } 16 | 17 | /// Any error that can happen while allocating or deallocating memory. 18 | #[derive(Debug)] 19 | pub enum Error { 20 | /// region was too small 21 | RegionTooSmall, 22 | /// the `end` pointer of a memory region was before the `start` pointer 23 | InvalidRegion, 24 | /// order exceeded the maximum order 25 | OrderTooLarge, 26 | /// no free memory left 27 | NoMemoryAvailable, 28 | /// can't allocate zero pages 29 | AllocateZeroPages, 30 | /// this is not a real error and should never be thrown somewhere 31 | NoSlabForLayout, 32 | /// `NonNull` was null 33 | /// 34 | /// Mostly just a safety mechanism to avoid UB. 35 | NullPointer, 36 | } 37 | 38 | /// Statistics for a memory allocator. 39 | #[derive(Debug, Clone)] 40 | pub struct AllocStats { 41 | /// The name of the allocator that collected these stat.s 42 | pub name: &'static str, 43 | /// The number of size that were allocated. 44 | pub allocated: usize, 45 | /// The number of bytes that are left for allocation. 46 | pub free: usize, 47 | /// The total number of bytes that this allocator has available for allocation. 48 | pub total: usize, 49 | } 50 | 51 | impl AllocStats { 52 | /// Create a new [`AllocStats`] instance for the given allocator name. 53 | pub const fn with_name(name: &'static str) -> Self { 54 | Self { 55 | name, 56 | free: 0, 57 | allocated: 0, 58 | total: 0, 59 | } 60 | } 61 | } 62 | 63 | impl fmt::Display for AllocStats { 64 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 65 | writeln!(f, "{}", self.name)?; 66 | self.name.chars().try_for_each(|_| write!(f, "-"))?; 67 | 68 | writeln!(f, "\n{:<11} {}", "Allocated:", unit::bytes(self.allocated))?; 69 | writeln!(f, "{:<11} {}", "Free:", unit::bytes(self.free))?; 70 | writeln!(f, "{:<11} {}", "Total:", unit::bytes(self.total))?; 71 | 72 | self.name.chars().try_for_each(|_| write!(f, "-"))?; 73 | Ok(()) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /kernel/src/allocator/unit.rs: -------------------------------------------------------------------------------- 1 | // Credits to Stupremee (https://github.com/Stupremee) 2 | // https://github.com/Stupremee/novos/blob/main/crates/kernel/src/unit.rs 3 | 4 | //! Utilities for working with raw byte units. 5 | use core::fmt; 6 | 7 | /// `1 KiB` 8 | pub const KIB: usize = 1 << 10; 9 | /// `1 MiB` 10 | pub const MIB: usize = 1 << 20; 11 | /// `1 GiB` 12 | pub const GIB: usize = 1 << 30; 13 | /// `1 TiB` 14 | pub const TIB: usize = 1 << 40; 15 | 16 | /// Return a formattable type that will pretty-print the given amount of bytes. 17 | pub fn bytes + Copy>(x: I) -> impl fmt::Display { 18 | ByteUnit(x) 19 | } 20 | 21 | /// Wrapper around raw byte that pretty-prints 22 | /// them using the [`Display`](core::fmt::Display) 23 | /// implementation. 24 | #[derive(Debug, Clone, Copy)] 25 | pub struct ByteUnit(I); 26 | 27 | impl fmt::Display for ByteUnit 28 | where 29 | I: Into + Copy, 30 | { 31 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 32 | let this = Into::::into(self.0); 33 | let count = this as f32; 34 | 35 | match this { 36 | 0..KIB => write!(f, "{this:>6} B")?, 37 | KIB..MIB => write!(f, "{:>6.2} KiB", count / KIB as f32)?, 38 | MIB..GIB => write!(f, "{:>6.2} MiB", count / MIB as f32)?, 39 | GIB..TIB => write!(f, "{:>6.2} GiB", count / GIB as f32)?, 40 | _ => write!(f, "{:>6.2} TiB", count / TIB as f32)?, 41 | }; 42 | 43 | Ok(()) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/apic.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | use libxernel::sync::{Once, Spinlock}; 3 | 4 | use crate::arch::amd64::ioapic; 5 | use crate::arch::amd64::ioapic::IOApic; 6 | use crate::arch::amd64::lapic::LocalApic; 7 | use crate::sched::context::TrapFrame; 8 | 9 | pub static IOAPICS: Spinlock> = Spinlock::new(Vec::new()); 10 | 11 | pub static APIC: Once = Once::new(); 12 | 13 | pub fn init() { 14 | let mut io_apics = IOAPICS.lock(); 15 | 16 | ioapic::init(&mut io_apics); 17 | 18 | APIC.set_once(LocalApic::new()); 19 | } 20 | 21 | pub fn apic_spurious_interrupt(_stack_frame: &mut TrapFrame) { 22 | APIC.eoi(); 23 | } 24 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/cpuid.rs: -------------------------------------------------------------------------------- 1 | pub enum CpuFeatures {} 2 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/gdt.rs: -------------------------------------------------------------------------------- 1 | use alloc::alloc::alloc_zeroed; 2 | use alloc::boxed::Box; 3 | use alloc::vec::Vec; 4 | use core::ptr::addr_of; 5 | use libxernel::sync::{Once, Spinlock}; 6 | use x86_64::instructions::segmentation::{Segment, CS, DS, ES, SS}; 7 | use x86_64::instructions::tables::load_tss; 8 | use x86_64::structures::gdt::SegmentSelector; 9 | use x86_64::structures::gdt::{Descriptor, GlobalDescriptorTable}; 10 | use x86_64::structures::tss::TaskStateSegment; 11 | use x86_64::VirtAddr; 12 | 13 | pub const DOUBLE_FAULT_IST_INDEX: u16 = 0; 14 | pub const IST_STACK_SIZE: usize = 4096 * 5; 15 | 16 | static mut BSP_IST_STACK: [u8; IST_STACK_SIZE] = [0; IST_STACK_SIZE]; 17 | 18 | static TSS: Once = Once::new(); 19 | pub static GDT_BSP: Once<(GlobalDescriptorTable, Selectors)> = Once::new(); 20 | 21 | static GDT_AP: Spinlock> = Spinlock::new(Vec::new()); 22 | 23 | #[derive(Debug)] 24 | struct Gdt { 25 | gdt: &'static GlobalDescriptorTable, 26 | selectors: Selectors, 27 | tss: &'static TaskStateSegment, 28 | ap_id: usize, 29 | } 30 | 31 | #[derive(Debug)] 32 | pub struct Selectors { 33 | pub code_selector: SegmentSelector, 34 | pub data_selector: SegmentSelector, 35 | pub tss_selector: SegmentSelector, 36 | pub user_code_selector: SegmentSelector, 37 | pub user_data_selector: SegmentSelector, 38 | } 39 | 40 | pub fn init() { 41 | let mut tss = TaskStateSegment::new(); 42 | tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = { 43 | let stack_start = VirtAddr::from_ptr(addr_of!(BSP_IST_STACK)); 44 | stack_start + IST_STACK_SIZE as u64 45 | }; 46 | 47 | TSS.set_once(tss); 48 | 49 | let mut gdt = GlobalDescriptorTable::new(); 50 | 51 | let code_selector = gdt.append(Descriptor::kernel_code_segment()); 52 | let data_selector = gdt.append(Descriptor::kernel_data_segment()); 53 | 54 | // let kernel_data_flags = DescriptorFlags::USER_SEGMENT | DescriptorFlags::PRESENT | DescriptorFlags::WRITABLE; 55 | // let data_selector = gdt.add_entry(Descriptor::UserSegment(kernel_data_flags.bits())); 56 | 57 | // System segment descriptors (which the TSS descriptor is) are 16-bytes and take up 2 slots in the GDT 58 | // This results in user code having index 5, user data index 6 59 | let tss_selector = gdt.append(Descriptor::tss_segment(&TSS)); 60 | let user_data_selector = gdt.append(Descriptor::user_data_segment()); 61 | let user_code_selector = gdt.append(Descriptor::user_code_segment()); 62 | GDT_BSP.set_once(( 63 | gdt, 64 | Selectors { 65 | code_selector, 66 | data_selector, 67 | tss_selector, 68 | user_code_selector, 69 | user_data_selector, 70 | }, 71 | )); 72 | 73 | GDT_BSP.0.load(); 74 | unsafe { 75 | CS::set_reg(GDT_BSP.1.code_selector); 76 | SS::set_reg(GDT_BSP.1.data_selector); 77 | DS::set_reg(GDT_BSP.1.data_selector); 78 | ES::set_reg(GDT_BSP.1.data_selector); 79 | 80 | load_tss(GDT_BSP.1.tss_selector); 81 | } 82 | } 83 | 84 | pub fn init_ap(ap_id: usize) { 85 | let mut gdt_ap = GDT_AP.lock(); 86 | 87 | let gdt: &'static mut GlobalDescriptorTable = Box::leak(Box::new(GlobalDescriptorTable::new())); 88 | let code_selector = gdt.append(Descriptor::kernel_code_segment()); 89 | let data_selector = gdt.append(Descriptor::kernel_data_segment()); 90 | let user_data_selector = gdt.append(Descriptor::user_data_segment()); 91 | let user_code_selector = gdt.append(Descriptor::user_code_segment()); 92 | 93 | let mut boxed_tss = Box::new(TaskStateSegment::new()); 94 | 95 | let ist0 = unsafe { alloc_zeroed(core::alloc::Layout::from_size_align(IST_STACK_SIZE, 4096).unwrap()) }; 96 | boxed_tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = 97 | unsafe { VirtAddr::from_ptr(ist0.add(IST_STACK_SIZE)) }; 98 | 99 | let tss: &'static mut TaskStateSegment = Box::leak(boxed_tss); 100 | let tss_selector = gdt.append(Descriptor::tss_segment(tss)); 101 | 102 | gdt_ap.push(Gdt { 103 | gdt, 104 | selectors: Selectors { 105 | code_selector, 106 | data_selector, 107 | tss_selector, 108 | user_code_selector, 109 | user_data_selector, 110 | }, 111 | tss, 112 | ap_id, 113 | }); 114 | 115 | gdt.load(); 116 | unsafe { 117 | CS::set_reg(code_selector); 118 | SS::set_reg(data_selector); 119 | DS::set_reg(data_selector); 120 | ES::set_reg(data_selector); 121 | 122 | load_tss(tss_selector); 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/interrupts/idt.rs: -------------------------------------------------------------------------------- 1 | use crate::sched::context::TrapFrame; 2 | use core::arch::{asm, naked_asm}; 3 | use core::mem::size_of; 4 | use core::ptr::addr_of; 5 | 6 | use paste::paste; 7 | use seq_macro::seq; 8 | 9 | pub const IDT_ENTRIES: usize = 256; 10 | 11 | macro_rules! has_error_code_macro { 12 | (true) => { 13 | "nop" 14 | }; 15 | (false) => { 16 | "push 0" 17 | }; 18 | } 19 | 20 | macro_rules! interrupt_handler { 21 | ($interrupt_number:expr, $has_error_code:expr) => { 22 | paste! { 23 | #[naked] 24 | extern "C" fn []() { 25 | unsafe { 26 | naked_asm!( 27 | has_error_code_macro!($has_error_code), 28 | "push r15", 29 | "push r14", 30 | "push r13", 31 | "push r12", 32 | "push r11", 33 | "push r10", 34 | "push r9", 35 | "push r8", 36 | "push rdi", 37 | "push rsi", 38 | "push rdx", 39 | "push rcx", 40 | "push rbx", 41 | "push rax", 42 | "push rbp", 43 | concat!("mov rdi, ", $interrupt_number), 44 | "mov rsi, rsp", 45 | "call generic_interrupt_handler", 46 | "pop rbp", 47 | "pop rax", 48 | "pop rbx", 49 | "pop rcx", 50 | "pop rdx", 51 | "pop rsi", 52 | "pop rdi", 53 | "pop r8", 54 | "pop r9", 55 | "pop r10", 56 | "pop r11", 57 | "pop r12", 58 | "pop r13", 59 | "pop r14", 60 | "pop r15", 61 | "add rsp, 0x8", // skip error code 62 | "iretq" 63 | ) 64 | } 65 | } 66 | } 67 | }; 68 | } 69 | 70 | seq!(N in 0..=7 { interrupt_handler!(N, false); }); 71 | 72 | interrupt_handler!(8, true); 73 | interrupt_handler!(9, false); 74 | 75 | seq!(N in 10..=14 { interrupt_handler!(N, true); }); 76 | 77 | interrupt_handler!(15, false); 78 | interrupt_handler!(16, true); 79 | interrupt_handler!(17, true); 80 | interrupt_handler!(18, false); 81 | interrupt_handler!(19, false); 82 | interrupt_handler!(20, false); 83 | interrupt_handler!(21, true); 84 | interrupt_handler!(22, false); 85 | interrupt_handler!(23, false); 86 | interrupt_handler!(24, false); 87 | interrupt_handler!(25, false); 88 | interrupt_handler!(26, false); 89 | interrupt_handler!(27, false); 90 | interrupt_handler!(28, false); 91 | interrupt_handler!(29, true); 92 | interrupt_handler!(30, true); 93 | 94 | seq!(N in 31..256 { interrupt_handler!(N, false); }); 95 | 96 | #[repr(C, packed)] 97 | struct Idtr { 98 | size: u16, 99 | offset: u64, 100 | } 101 | 102 | impl Idtr { 103 | #[inline] 104 | const fn new(size: u16, offset: u64) -> Self { 105 | Self { size, offset } 106 | } 107 | 108 | #[inline(always)] 109 | unsafe fn load(&self) { 110 | asm!("lidt [{}]", in(reg) self, options(nostack)); 111 | } 112 | } 113 | 114 | #[derive(Copy, Clone)] 115 | pub(super) enum IRQHandler { 116 | Handler(fn(&mut TrapFrame)), 117 | None, 118 | } 119 | 120 | #[repr(C, packed)] 121 | pub struct IDTEntry { 122 | offset_low: u16, 123 | selector: u16, 124 | ist: u8, 125 | flags: u8, 126 | offset_mid: u16, 127 | offset_hi: u32, 128 | reserved: u32, 129 | } 130 | 131 | impl IDTEntry { 132 | const NULL: Self = Self { 133 | offset_low: 0x00, 134 | selector: 0x00, 135 | ist: 0x00, 136 | flags: 0x00, 137 | offset_mid: 0x00, 138 | offset_hi: 0x00, 139 | reserved: 0x00, 140 | }; 141 | 142 | pub(crate) fn set_handler(&mut self, handler: *const u8) { 143 | self.offset_low = handler as u16; 144 | self.offset_mid = (handler as usize >> 16) as u16; 145 | self.offset_hi = (handler as usize >> 32) as u32; 146 | self.flags = 0x8e; 147 | self.selector = 8; 148 | } 149 | } 150 | 151 | static mut IDT: [IDTEntry; IDT_ENTRIES] = [IDTEntry::NULL; IDT_ENTRIES]; 152 | 153 | pub fn init() { 154 | unsafe { 155 | seq!(N in 0..256 { 156 | #( 157 | IDT[N].set_handler(interrupt_handler~N as *const u8); 158 | )* 159 | }); 160 | 161 | let idtr = Idtr::new( 162 | (IDT_ENTRIES * size_of::() - 1) as u16, 163 | (addr_of!(IDT) as *const _) as u64, 164 | ); 165 | 166 | idtr.load(); 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/interrupts/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod idt; 2 | 3 | use crate::arch::amd64::apic::APIC; 4 | use crate::arch::amd64::{ports::outb, read_cr2}; 5 | use crate::dpc::dispatch_dpcs; 6 | use crate::drivers::ps2::keyboard::keyboard; 7 | use crate::sched::context::TrapFrame; 8 | use core::arch::asm; 9 | use core::sync::atomic::{compiler_fence, Ordering}; 10 | use idt::{IRQHandler, IDT_ENTRIES}; 11 | use libxernel::ipl::{get_ipl, raise_ipl, splx, IPL}; 12 | 13 | use super::apic::apic_spurious_interrupt; 14 | use libxernel::sync::SpinlockIRQ; 15 | 16 | static INTERRUPT_HANDLERS: SpinlockIRQ<[IRQHandler; IDT_ENTRIES]> = SpinlockIRQ::new([IRQHandler::None; IDT_ENTRIES]); 17 | 18 | pub fn init() { 19 | idt::init(); 20 | 21 | let mut handlers = INTERRUPT_HANDLERS.lock(); 22 | 23 | handlers[0xD] = IRQHandler::Handler(general_fault_handler); 24 | handlers[0xE] = IRQHandler::Handler(page_fault_handler); 25 | handlers[0x8] = IRQHandler::Handler(double_fault_handler); 26 | handlers[0xF0] = IRQHandler::Handler(apic_spurious_interrupt); 27 | // TODO: allocate vectors accordingly or manually set all known interrupt handlers here 28 | handlers[0x2f] = IRQHandler::Handler(dispatch_dpcs); 29 | handlers[0xd0] = IRQHandler::Handler(keyboard); 30 | } 31 | 32 | #[no_mangle] 33 | extern "sysv64" fn generic_interrupt_handler(isr: usize, ctx: *mut TrapFrame) { 34 | let new_ipl = IPL::from(isr >> 4); 35 | let current_ipl = get_ipl(); 36 | 37 | if (new_ipl as u8) < (current_ipl as u8) { 38 | panic!("IPL not less or equal"); 39 | } 40 | 41 | raise_ipl(new_ipl); 42 | enable(); 43 | 44 | let handlers = INTERRUPT_HANDLERS.lock(); 45 | 46 | let ctx = unsafe { &mut *ctx }; 47 | 48 | match &handlers[isr] { 49 | IRQHandler::Handler(handler) => { 50 | let handler = *handler; 51 | handlers.unlock(); 52 | handler(ctx); 53 | } 54 | 55 | IRQHandler::None => panic!("unhandled interrupt {}", isr), 56 | } 57 | 58 | if isr > 32 { 59 | APIC.eoi(); 60 | } 61 | 62 | disable(); 63 | 64 | splx(current_ipl); 65 | } 66 | 67 | #[inline] 68 | pub fn enable() { 69 | compiler_fence(Ordering::Release); 70 | unsafe { 71 | asm!("sti", options(nomem, nostack)); 72 | } 73 | } 74 | 75 | #[inline] 76 | pub fn disable() { 77 | compiler_fence(Ordering::Acquire); 78 | unsafe { 79 | asm!("cli", options(nomem, nostack)); 80 | } 81 | } 82 | 83 | pub fn allocate_vector(ipl: IPL) -> Option { 84 | let starting = core::cmp::max((ipl as u8) << 4, 32); 85 | 86 | let handlers = INTERRUPT_HANDLERS.lock(); 87 | 88 | for i in starting..starting + 16 { 89 | if let IRQHandler::None = handlers[i as usize] { 90 | return Some(i); 91 | } 92 | } 93 | 94 | None 95 | } 96 | 97 | pub fn register_handler(vector: u8, handler: fn(&mut TrapFrame)) { 98 | let mut handlers = INTERRUPT_HANDLERS.lock(); 99 | 100 | match handlers[vector as usize] { 101 | IRQHandler::None => {} 102 | _ => panic!("register_handler: handler has already been registered"), 103 | } 104 | 105 | handlers[vector as usize] = IRQHandler::Handler(handler); 106 | } 107 | 108 | fn double_fault_handler(frame: &mut TrapFrame) { 109 | dbg!("EXCEPTION: DOUBLE FAULT"); 110 | dbg!("{:#?}", frame); 111 | dbg!("{}", frame.error_code); 112 | println!("EXCEPTION: DOUBLE FAULT"); 113 | println!("{:#?}", frame); 114 | println!("{}", frame.error_code); 115 | loop { 116 | unsafe { 117 | asm!("hlt"); 118 | } 119 | } 120 | } 121 | 122 | fn page_fault_handler(frame: &mut TrapFrame) { 123 | dbg!("EXCEPTION: PAGE FAULT"); 124 | dbg!("Accessed Address: {:?}", read_cr2()); 125 | dbg!("Error Code: {:?}", frame.error_code); 126 | dbg!("{:#?}", frame); 127 | println!("EXCEPTION: PAGE FAULT"); 128 | println!("Accessed Address: {:?}", read_cr2()); 129 | println!("Error Code: {:?}", frame.error_code); 130 | println!("{:#?}", frame); 131 | loop { 132 | unsafe { 133 | asm!("hlt"); 134 | } 135 | } 136 | } 137 | 138 | fn general_fault_handler(frame: &mut TrapFrame) { 139 | dbg!("EXCEPTION: GENERAL PROTECTION FAULT"); 140 | dbg!("{:?}", frame); 141 | dbg!("{:b}", frame.error_code); 142 | println!("EXCEPTION: GENERAL PROTECTION FAULT"); 143 | println!("{:?}", frame); 144 | println!("{}", frame.error_code); 145 | unsafe { 146 | asm!("hlt"); 147 | } 148 | } 149 | 150 | /// Disable Programmable Interrupt Controller. 151 | pub fn disable_pic() { 152 | unsafe { 153 | // Set ICW1 154 | outb(0x20, 0x11); 155 | outb(0xa0, 0x11); 156 | 157 | // Set IWC2 (IRQ base offsets) 158 | outb(0x21, 0x20); 159 | outb(0xa1, 0x28); 160 | 161 | // Set ICW3 162 | outb(0x21, 4); 163 | outb(0xa1, 2); 164 | 165 | // Set ICW4 166 | outb(0x21, 1); 167 | outb(0xa1, 1); 168 | 169 | // Set OCW1 (interrupt masks) 170 | outb(0x21, 0xff); 171 | outb(0xa1, 0xff); 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/ioapic.rs: -------------------------------------------------------------------------------- 1 | use acpi_parsing::platform::interrupt::Apic; 2 | use alloc::vec::Vec; 3 | use x86_64::structures::paging::PageTableFlags; 4 | use x86_64::{PhysAddr, VirtAddr}; 5 | 6 | use crate::acpi::ACPI; 7 | use crate::mem::{paging::KERNEL_PAGE_MAPPER, HIGHER_HALF_OFFSET}; 8 | use crate::{dbg, debug}; 9 | 10 | pub struct IOApic { 11 | id: u8, 12 | address: u64, 13 | interrupt_base: u32, 14 | } 15 | 16 | impl IOApic { 17 | pub unsafe fn read(&self, reg: u32) -> u32 { 18 | ((self.address) as *mut u32).write_volatile(reg); 19 | ((self.address + 0x10) as *const u32).read_volatile() 20 | } 21 | 22 | pub unsafe fn write(&mut self, reg: u32, val: u32) { 23 | ((self.address) as *mut u32).write_volatile(reg); 24 | ((self.address + 0x10) as *mut u32).write_volatile(val); 25 | } 26 | 27 | pub unsafe fn mask_irq(&mut self) { 28 | todo!(); 29 | } 30 | 31 | pub unsafe fn unmask_irq(&mut self) { 32 | todo!(); 33 | } 34 | 35 | pub unsafe fn write_irq( 36 | &mut self, 37 | irq_number: u8, 38 | irq_vector: u8, 39 | apic_id: u8, 40 | level_sensitive: bool, 41 | low_priority: bool, 42 | ) { 43 | let redirection_entry = (0x10 + irq_number * 2) as u32; 44 | 45 | if !(0x10..=0xFE).contains(&irq_vector) { 46 | dbg!("[IOAPIC] write_irq: bad irq_vector {}", irq_vector); 47 | } 48 | 49 | if apic_id > 15 { 50 | dbg!("[IOAPIC] write_irq: bad apic_id {}", apic_id); 51 | } 52 | 53 | let mut val = irq_vector as _; 54 | 55 | if low_priority { 56 | val |= 1 << 8; 57 | } 58 | 59 | // level_sensitive describes if edge or level sensitive 60 | // true stands for level sensitive therefore setting the according bit 61 | if level_sensitive { 62 | val |= 1 << 15; 63 | } 64 | 65 | // redirection_entry has to be accessed as two 32-bit registers 66 | // creating own write value for higher reigster, since only receiver apic id is set in the 67 | // reg 68 | let destination_field: u32 = (apic_id as u32) << 24; 69 | 70 | self.write(redirection_entry, val); 71 | self.write(redirection_entry + 1, destination_field); 72 | } 73 | 74 | pub fn init(&mut self, apic_info: &Apic) { 75 | debug!("{:?}", apic_info.io_apics); 76 | 77 | let mut mapper = KERNEL_PAGE_MAPPER.lock(); 78 | mapper.map_range( 79 | PhysAddr::new(self.address - *HIGHER_HALF_OFFSET), 80 | VirtAddr::new(self.address), 81 | 0x2000, 82 | PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE, 83 | true, 84 | ); 85 | 86 | unsafe { 87 | self.write_irq(1, 0xd0, 0, false, true); 88 | debug!("IOAPICID: {:b}", self.read(0)); 89 | debug!("IOAPICVER: {:b}", self.read(1)); 90 | debug!("IOAPICARB: {:b}", self.read(2)); 91 | } 92 | } 93 | } 94 | 95 | pub fn init(io_apics: &mut Vec) { 96 | let apic_info = ACPI.get_apic(); 97 | 98 | for ioapic in apic_info.io_apics.iter() { 99 | io_apics.push(IOApic { 100 | id: ioapic.id, 101 | address: (ioapic.address as u64) + *HIGHER_HALF_OFFSET, 102 | interrupt_base: ioapic.global_system_interrupt_base, 103 | }); 104 | } 105 | 106 | let ioapic = io_apics.first_mut().unwrap(); 107 | ioapic.init(&apic_info); 108 | } 109 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/lapic.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::Ordering; 2 | use core::time::Duration; 3 | 4 | use crate::acpi::hpet; 5 | use crate::arch::amd64::rdmsr; 6 | use crate::mem::paging::KERNEL_PAGE_MAPPER; 7 | use crate::mem::HIGHER_HALF_OFFSET; 8 | use x86_64::structures::paging::{Page, PageTableFlags, PhysFrame, Size4KiB}; 9 | use x86_64::{PhysAddr, VirtAddr}; 10 | 11 | use super::tsc::TSC_TICKS_PER_MS; 12 | use super::wrmsr; 13 | 14 | const IA32_APIC_BASE_MSR: u32 = 0x1B; 15 | const IA32_TSC_DEADLINE_MSR: u32 = 0x6E0; 16 | 17 | const LAPICRegID: u64 = 0x20; 18 | const LAPICRegTPR: u64 = 0x80; // Task Priority Register 19 | const LAPICRegEOI: u64 = 0xB0; 20 | const LAPICRegSpurious: u64 = 0xF0; 21 | const LAPICRegICR0: u64 = 0x300; // Interrupt Command Register 22 | const LAPICRegICR1: u64 = 0x310; 23 | const LAPICRegTimer: u64 = 0x320; 24 | const LAPICRegTimerInitial: u64 = 0x380; 25 | const LAPICRegTimerCurrentCount: u64 = 0x390; 26 | const LAPICRegTimerDivider: u64 = 0x3e0; 27 | 28 | pub struct LocalApic { 29 | address: u64, 30 | frequency: u64, 31 | } 32 | 33 | impl LocalApic { 34 | pub fn new() -> Self { 35 | let mut mapper = KERNEL_PAGE_MAPPER.lock(); 36 | 37 | let mut apic_base = unsafe { rdmsr(IA32_APIC_BASE_MSR) }; 38 | 39 | // INFO: IA32_APIC_BASE_MSR contains two flags on bit 8 and bit 11 40 | // BSP flag, bit 8 ⎯ Indicates if the processor is the bootstrap processor (BSP). 41 | // APIC Global Enable flag, bit 11 ⎯ Enables or disables the local APIC 42 | // To get the local apic base address, bit range 12 - 35, we set the flag bits to zero 43 | apic_base &= !(1 << 8); 44 | apic_base &= !(1 << 11); 45 | 46 | debug!("apic base: {:x}", apic_base); 47 | 48 | mapper.map::( 49 | PhysFrame::containing_address(PhysAddr::new(apic_base)), 50 | Page::containing_address(VirtAddr::new(apic_base + *HIGHER_HALF_OFFSET)), 51 | PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE, 52 | true, 53 | ); 54 | 55 | let mut lapic = LocalApic { 56 | address: apic_base + *HIGHER_HALF_OFFSET, 57 | frequency: 0, 58 | }; 59 | 60 | lapic.enable_apic(); 61 | lapic.init_timer_frequency(); 62 | 63 | lapic 64 | } 65 | 66 | pub unsafe fn read(&self, reg: u64) -> u32 { 67 | ((self.address + reg) as *const u32).read_volatile() 68 | } 69 | 70 | pub unsafe fn write(&self, reg: u64, val: u32) { 71 | ((self.address + reg) as *mut u32).write_volatile(val); 72 | } 73 | 74 | pub fn lapic_id(&self) -> u32 { 75 | unsafe { self.read(LAPICRegID) } 76 | } 77 | 78 | pub fn eoi(&self) { 79 | unsafe { self.write(LAPICRegEOI, 0) } 80 | } 81 | 82 | // Spurious interrupt vector 83 | pub fn siv(&self) -> u32 { 84 | unsafe { self.read(LAPICRegSpurious) } 85 | } 86 | 87 | pub fn set_siv(&self, value: u32) { 88 | unsafe { self.write(LAPICRegSpurious, value) } 89 | } 90 | 91 | pub fn enable_apic(&self) { 92 | unsafe { 93 | self.set_siv(0x1ff); 94 | 95 | // set the task priority to 0 96 | self.write(LAPICRegTPR, 0); 97 | } 98 | } 99 | 100 | pub fn periodic_timer(&self, int_no: u8, period: &Duration) { 101 | let mut apic_ticks = self.frequency * period.as_micros() as u64 / (1000 * 1000); 102 | apic_ticks /= 16; 103 | 104 | unsafe { 105 | // set divider to 16 106 | self.write(LAPICRegTimerDivider, 3); 107 | 108 | // set the interrupt vector & oneshot mode 109 | self.write(LAPICRegTimer, (1 << 17) | int_no as u32); 110 | 111 | // set the counter to the calculated value 112 | self.write(LAPICRegTimerInitial, apic_ticks as u32); 113 | } 114 | } 115 | 116 | pub fn oneshot(&self, int_no: u8, deadline: &Duration) { 117 | let mut apic_ticks = self.frequency * deadline.as_micros() as u64 / (1000 * 1000); 118 | apic_ticks /= 16; 119 | 120 | unsafe { 121 | // set divider to 16 122 | self.write(LAPICRegTimerDivider, 3); 123 | 124 | // set the interrupt vector & periodic mode 125 | self.write(LAPICRegTimer, int_no as u32); 126 | 127 | // set the counter to the calculated value 128 | self.write(LAPICRegTimerInitial, apic_ticks as u32); 129 | } 130 | } 131 | 132 | pub fn deadline(&self, int_no: u8, deadline: &Duration) { 133 | unsafe { 134 | // set the interrupt vector & deadline mode 135 | self.write(LAPICRegTimer, (2 << 17) | int_no as u32); 136 | 137 | wrmsr( 138 | IA32_TSC_DEADLINE_MSR, 139 | deadline.as_millis() as u64 * TSC_TICKS_PER_MS.load(Ordering::Acquire), 140 | ); 141 | } 142 | } 143 | 144 | pub fn stop(&self) { 145 | unsafe { 146 | self.write(LAPICRegTimerInitial, 0); 147 | } 148 | } 149 | 150 | pub fn send_ipi(&self, lapic_id: u32, vec: u32) { 151 | unsafe { 152 | self.write(LAPICRegICR1, lapic_id << 24); 153 | self.write(LAPICRegICR0, vec); 154 | } 155 | } 156 | 157 | pub fn init_timer_frequency(&mut self) { 158 | unsafe { 159 | // set the divisor to 1 160 | self.write(LAPICRegTimerDivider, 0b1011); 161 | 162 | let hpet_cycles_to_wait = hpet::frequency() / 1000; 163 | 164 | let hpet_start_counter = hpet::read_main_counter(); 165 | 166 | // set the initial count to 0xffffffff 167 | self.write(LAPICRegTimerInitial, 0xffffffff); 168 | 169 | // wait for 10 ms 170 | while hpet::read_main_counter() - hpet_start_counter < hpet_cycles_to_wait {} 171 | 172 | let apic_ticks = 0xffffffff - self.read(LAPICRegTimerCurrentCount); 173 | 174 | let hpet_end_counter = hpet::read_main_counter(); 175 | 176 | let hpet_ticks = hpet_end_counter - hpet_start_counter; 177 | 178 | let apic_frequency = apic_ticks as u64 * hpet::frequency() / hpet_ticks; 179 | 180 | debug!("APIC Ticks {} in 10ms", apic_ticks); 181 | debug!("APIC Ticks per ms {}", apic_ticks); 182 | debug!("APIC Frequency: {} Hz", apic_frequency); 183 | debug!("HPET Frequency: {} Hz", hpet::frequency()); 184 | debug!("HPET Ticks {}", hpet_ticks); 185 | self.frequency = apic_frequency; 186 | } 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod apic; 2 | pub mod cpuid; 3 | pub mod gdt; 4 | pub mod interrupts; 5 | mod ioapic; 6 | mod lapic; 7 | pub mod ports; 8 | pub mod tsc; 9 | 10 | use crate::arch::amd64::apic::APIC; 11 | use crate::cpu::register_cpu; 12 | use crate::sched::context::Context; 13 | use crate::KERNEL_PAGE_MAPPER; 14 | use core::arch::{asm, global_asm}; 15 | use libxernel::ipl::IPL; 16 | use limine::SmpInfo; 17 | use x86_64::VirtAddr; 18 | 19 | global_asm!(include_str!("switch.S")); 20 | 21 | extern "C" { 22 | pub fn switch_context(old: *mut *mut Context, new: *const Context); 23 | } 24 | 25 | #[no_mangle] 26 | pub extern "C" fn x86_64_ap_main(boot_info: *const SmpInfo) -> ! { 27 | let boot_info = unsafe { &*boot_info }; 28 | let ap_id = boot_info.processor_id as usize; 29 | 30 | { 31 | let kernel_page_mapper = KERNEL_PAGE_MAPPER.lock(); 32 | unsafe { 33 | kernel_page_mapper.load_pt(); 34 | } 35 | } 36 | 37 | info!("booting CPU {:?}", boot_info); 38 | 39 | gdt::init_ap(ap_id); 40 | info!("CPU{}: gdt initialized", ap_id); 41 | 42 | interrupts::init(); 43 | info!("CPU{}: idt initialized", ap_id); 44 | 45 | register_cpu(); 46 | info!("CPU{}: cpu registered", ap_id); 47 | 48 | APIC.enable_apic(); 49 | info!("CPU{}: apic initialized", ap_id); 50 | 51 | hcf(); 52 | } 53 | 54 | #[inline] 55 | pub fn read_cr2() -> VirtAddr { 56 | let value: u64; 57 | 58 | unsafe { 59 | asm!("mov {}, cr2", out(reg) value, options(nomem, nostack, preserves_flags)); 60 | 61 | VirtAddr::new(value) 62 | } 63 | } 64 | 65 | #[inline] 66 | pub fn read_cr8() -> IPL { 67 | let value: u64; 68 | 69 | unsafe { 70 | asm!("mov {}, cr8", out(reg) value, options(nomem, nostack, preserves_flags)); 71 | } 72 | IPL::from(value) 73 | } 74 | 75 | #[inline] 76 | pub fn write_cr8(ipl: IPL) { 77 | unsafe { 78 | asm!("mov cr8, {}", in(reg) ipl as u64, options(nomem, nostack, preserves_flags)); 79 | } 80 | } 81 | 82 | pub const FS_BASE: u32 = 0xC0000100; 83 | pub const GS_BASE: u32 = 0xC0000101; 84 | pub const KERNEL_GS_BASE: u32 = 0xC0000102; 85 | 86 | #[inline] 87 | pub unsafe fn wrmsr(msr: u32, value: u64) { 88 | let low = value as u32; 89 | let high = (value >> 32) as u32; 90 | asm!("wrmsr", in("ecx") msr, in("eax") low, in("edx") high); 91 | } 92 | 93 | #[inline] 94 | pub unsafe fn rdmsr(msr: u32) -> u64 { 95 | let (high, low): (u32, u32); 96 | unsafe { 97 | asm!("rdmsr", out("eax") low, out("edx") high, in("ecx") msr); 98 | } 99 | ((high as u64) << 32) | (low as u64) 100 | } 101 | 102 | pub fn hcf() -> ! { 103 | unsafe { 104 | loop { 105 | asm!("hlt"); 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/ports.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | #[inline] 4 | pub unsafe fn outb(port: u16, value: u8) { 5 | asm!( 6 | "out dx, al", 7 | in("dx") port, 8 | in("al") value, 9 | options(preserves_flags, nomem, nostack) 10 | ); 11 | } 12 | 13 | #[inline] 14 | pub unsafe fn inb(port: u16) -> u8 { 15 | let ret: u8; 16 | 17 | asm!( 18 | "in al, dx", 19 | in("dx") port, 20 | out("al") ret, 21 | options(preserves_flags, nomem, nostack) 22 | ); 23 | 24 | ret 25 | } 26 | 27 | #[inline] 28 | pub unsafe fn outw(port: u16, value: u16) { 29 | asm!( 30 | "out dx, ax", 31 | in("dx") port, 32 | in("ax") value, 33 | options(preserves_flags, nomem, nostack) 34 | ); 35 | } 36 | 37 | #[inline] 38 | pub unsafe fn inw(port: u16) -> u16 { 39 | let ret: u16; 40 | 41 | asm!( 42 | "in ax, dx", 43 | out("ax") ret, 44 | in("dx") port, 45 | options(nomem, nostack, preserves_flags) 46 | ); 47 | 48 | ret 49 | } 50 | 51 | #[inline] 52 | pub unsafe fn outl(port: u16, value: u32) { 53 | asm!( 54 | "out dx, eax", 55 | in("dx") port, 56 | in("eax") value, 57 | options(preserves_flags, nomem, nostack) 58 | ); 59 | } 60 | 61 | #[inline] 62 | pub unsafe fn inl(port: u16) -> u32 { 63 | let ret: u32; 64 | 65 | asm!( 66 | "in eax, dx", 67 | in("dx") port, 68 | out("eax") ret, 69 | options(nomem, nostack, preserves_flags) 70 | ); 71 | 72 | ret 73 | } 74 | -------------------------------------------------------------------------------- /kernel/src/arch/amd64/switch.S: -------------------------------------------------------------------------------- 1 | .global switch_context 2 | 3 | switch_context: 4 | sub rsp, 0x30 5 | mov [rsp + 0x00], rbx 6 | mov [rsp + 0x08], rbp 7 | mov [rsp + 0x10], r12 8 | mov [rsp + 0x18], r13 9 | mov [rsp + 0x20], r14 10 | mov [rsp + 0x28], r15 11 | mov [rdi], rsp 12 | 13 | mov rsp, rsi 14 | mov rbx, [rsp + 0x00] 15 | mov rbp, [rsp + 0x08] 16 | mov r12, [rsp + 0x10] 17 | mov r13, [rsp + 0x18] 18 | mov r14, [rsp + 0x20] 19 | mov r15, [rsp + 0x28] 20 | add rsp, 0x30 21 | ret -------------------------------------------------------------------------------- /kernel/src/arch/amd64/tsc.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | arch::asm, 3 | sync::atomic::{AtomicU64, Ordering}, 4 | }; 5 | 6 | use crate::acpi::hpet; 7 | 8 | pub static TSC_TICKS_PER_MS: AtomicU64 = AtomicU64::new(0); 9 | 10 | // TODO: Use TSC Deadshot mode for apic 11 | pub fn calibrate_tsc() { 12 | let start: u64 = rdtsc(); 13 | hpet::sleep(10_000_000); 14 | let end: u64 = rdtsc(); 15 | 16 | println!("start: {} end: {}", start, end); 17 | 18 | let ticks_per_ms = (end - start) / 10; 19 | 20 | println!("ticks_per_ms: {}", ticks_per_ms); 21 | 22 | TSC_TICKS_PER_MS.store(ticks_per_ms, Ordering::SeqCst); 23 | } 24 | 25 | pub fn rdtsc() -> u64 { 26 | let ret: u64; 27 | 28 | unsafe { 29 | asm!("rdtsc", lateout("rax") ret); 30 | } 31 | 32 | ret 33 | } 34 | -------------------------------------------------------------------------------- /kernel/src/arch/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod amd64; 2 | -------------------------------------------------------------------------------- /kernel/src/cpu.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::amd64::apic::APIC; 2 | use crate::arch::amd64::{rdmsr, wrmsr, KERNEL_GS_BASE}; 3 | use crate::dpc::{DpcCall, DpcQueue}; 4 | use crate::sched::process::Process; 5 | use crate::sched::thread::Thread; 6 | use crate::timer::timer_event::TimerEvent; 7 | use crate::timer::timer_queue::TimerQueue; 8 | use alloc::boxed::Box; 9 | use alloc::collections::VecDeque; 10 | use alloc::sync::Arc; 11 | use alloc::vec::Vec; 12 | use core::cell::{Cell, UnsafeCell}; 13 | use core::ops::Deref; 14 | use core::pin::Pin; 15 | use core::sync::atomic::{AtomicUsize, Ordering}; 16 | use libxernel::ipl::IPL; 17 | use libxernel::sync::{Once, Spinlock}; 18 | 19 | static CPU_ID_COUNTER: AtomicUsize = AtomicUsize::new(0); 20 | 21 | pub static CPU_COUNT: Once = Once::new(); 22 | 23 | pub struct PerCpu { 24 | data: UnsafeCell>, 25 | } 26 | 27 | unsafe impl Send for PerCpu {} 28 | unsafe impl Sync for PerCpu {} 29 | 30 | impl PerCpu { 31 | pub const fn new() -> Self { 32 | Self { 33 | data: UnsafeCell::new(Vec::::new()), 34 | } 35 | } 36 | 37 | pub fn init(&self, init_fn: fn() -> T) { 38 | assert_eq!(*CPU_COUNT, CPU_ID_COUNTER.load(Ordering::SeqCst)); 39 | 40 | let vec = unsafe { &mut *self.data.get() }; 41 | 42 | for _ in 0..*CPU_COUNT { 43 | vec.push(init_fn()); 44 | } 45 | } 46 | 47 | pub fn wait_until_initialized(&self) { 48 | loop { 49 | let vec = unsafe { &*self.data.get() }; 50 | 51 | if vec.len() == *CPU_COUNT { 52 | break; 53 | } 54 | 55 | core::hint::spin_loop(); 56 | } 57 | } 58 | 59 | fn check_initialized(&self) { 60 | let vec = unsafe { &*self.data.get() }; 61 | 62 | assert_eq!(vec.len(), *CPU_COUNT); 63 | } 64 | 65 | pub fn get(&self) -> &T { 66 | self.check_initialized(); 67 | 68 | let cpu_id = current_cpu().cpu_id; 69 | let vec = unsafe { &mut *self.data.get() }; 70 | &vec[cpu_id] 71 | } 72 | 73 | #[allow(clippy::mut_from_ref)] 74 | pub fn get_mut(&self) -> &mut T { 75 | self.check_initialized(); 76 | 77 | let cpu_id = current_cpu().cpu_id; 78 | let vec = unsafe { &mut *self.data.get() }; 79 | &mut vec[cpu_id] 80 | } 81 | 82 | pub unsafe fn get_index(&self, index: usize) -> &T { 83 | self.check_initialized(); 84 | 85 | let vec = unsafe { &mut *self.data.get() }; 86 | &vec[index] 87 | } 88 | 89 | #[allow(clippy::mut_from_ref)] 90 | pub unsafe fn get_index_mut(&self, index: usize) -> &mut T { 91 | self.check_initialized(); 92 | 93 | let vec = unsafe { &mut *self.data.get() }; 94 | &mut vec[index] 95 | } 96 | 97 | pub unsafe fn get_all(&self) -> &Vec { 98 | self.check_initialized(); 99 | 100 | &*self.data.get() 101 | } 102 | 103 | #[allow(clippy::mut_from_ref)] 104 | pub unsafe fn get_all_mut(&self) -> &mut Vec { 105 | self.check_initialized(); 106 | 107 | &mut *self.data.get() 108 | } 109 | } 110 | 111 | impl Deref for PerCpu { 112 | type Target = T; 113 | 114 | fn deref(&self) -> &Self::Target { 115 | self.get() 116 | } 117 | } 118 | 119 | #[repr(C, align(8))] 120 | pub struct Cpu { 121 | // NOTE: don't move these variables as we need to access them from assembly 122 | user_space_stack: usize, 123 | pub kernel_stack: Cell, 124 | 125 | cpu_id: usize, 126 | pub lapic_id: u32, 127 | pub run_queue: Spinlock>>, 128 | pub wait_queue: Spinlock>>, 129 | pub current_thread: Spinlock>>, 130 | pub idle_thread: Arc, 131 | 132 | pub timer_queue: Spinlock, 133 | pub dpc_queue: Spinlock, 134 | pub next: Spinlock>>, 135 | } 136 | 137 | impl Cpu { 138 | pub fn enqueue_timer(&self, event: TimerEvent) { 139 | self.timer_queue.aquire_at(IPL::High).enqueue(event); 140 | } 141 | 142 | pub fn enqueue_dpc(&self, dpc: Box) { 143 | self.dpc_queue.aquire_at(IPL::High).enqueue(dpc); 144 | } 145 | 146 | pub fn enqueue_thread(&self, thread: Arc) { 147 | self.run_queue.aquire().push_back(thread) 148 | } 149 | } 150 | 151 | pub fn register_cpu() { 152 | let cpu_id = CPU_ID_COUNTER.fetch_add(1, Ordering::SeqCst); 153 | let lapic_id = APIC.lapic_id(); 154 | 155 | let cpu_data = Box::leak(Box::new(Cpu { 156 | user_space_stack: 0, 157 | kernel_stack: Cell::new(0), 158 | cpu_id, 159 | lapic_id, 160 | run_queue: Spinlock::new(VecDeque::new()), 161 | wait_queue: Spinlock::new(VecDeque::new()), 162 | current_thread: Spinlock::new(None), 163 | idle_thread: Arc::new(Thread::idle_thread()), 164 | timer_queue: Spinlock::new(TimerQueue::new()), 165 | dpc_queue: Spinlock::new(DpcQueue::new()), 166 | next: Spinlock::new(None), 167 | })); 168 | 169 | // use KERNEL_GS_BASE to store the cpu_data 170 | unsafe { wrmsr(KERNEL_GS_BASE, (cpu_data as *const Cpu).expose_provenance() as u64) } 171 | } 172 | 173 | pub fn current_cpu() -> Pin<&'static Cpu> { 174 | if !CPU_COUNT.is_completed() || *CPU_COUNT != CPU_ID_COUNTER.load(Ordering::SeqCst) { 175 | panic!("current_cpu called before all cpus registered"); 176 | } 177 | 178 | unsafe { Pin::new_unchecked(&*core::ptr::with_exposed_provenance(rdmsr(KERNEL_GS_BASE) as usize)) } 179 | } 180 | 181 | pub fn current_thread() -> Arc { 182 | current_cpu() 183 | .current_thread 184 | .aquire() 185 | .clone() 186 | .unwrap_or(current_cpu().idle_thread.clone()) 187 | } 188 | 189 | pub fn current_process() -> Arc> { 190 | current_thread() 191 | .get_process() 192 | .unwrap_or_else(|| panic!("current_process called with no current process")) 193 | } 194 | 195 | pub fn wait_until_cpus_registered() { 196 | while CPU_ID_COUNTER.load(Ordering::SeqCst) != *CPU_COUNT { 197 | core::hint::spin_loop(); 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /kernel/src/dpc.rs: -------------------------------------------------------------------------------- 1 | use core::ops::RangeBounds; 2 | 3 | use alloc::{boxed::Box, collections::VecDeque}; 4 | use libxernel::ipl::{get_ipl, raise_ipl, splx, IPL}; 5 | 6 | use crate::{ 7 | arch::amd64::{apic::APIC, write_cr8}, 8 | cpu::current_cpu, 9 | sched::{context::TrapFrame, scheduler::switch_threads}, 10 | }; 11 | 12 | pub trait DpcCall { 13 | fn call(self: Box); 14 | } 15 | 16 | pub enum DpcState { 17 | Unbound, 18 | Bound, 19 | Running, 20 | } 21 | 22 | pub struct Dpc { 23 | pub callback: fn(T), 24 | pub arg: T, 25 | state: DpcState, 26 | } 27 | 28 | pub struct DpcQueue { 29 | pub dpcs: VecDeque>, 30 | } 31 | 32 | impl DpcCall for Dpc { 33 | fn call(self: Box) { 34 | (self.callback)(self.arg) 35 | } 36 | } 37 | 38 | impl Dpc { 39 | pub fn new(callback: fn(T), data: T) -> Self { 40 | Self { 41 | callback, 42 | arg: data, 43 | state: DpcState::Unbound, 44 | } 45 | } 46 | } 47 | 48 | impl DpcQueue { 49 | pub fn new() -> Self { 50 | Self { dpcs: VecDeque::new() } 51 | } 52 | 53 | pub fn enqueue(&mut self, dpc: Box) { 54 | self.dpcs.push_back(dpc); 55 | } 56 | 57 | pub fn drain(&mut self, range: R) -> VecDeque> 58 | where 59 | R: RangeBounds, 60 | { 61 | let mut dpcs: VecDeque> = VecDeque::new(); 62 | 63 | self.dpcs.drain(range).for_each(|dpc| dpcs.push_front(dpc)); 64 | dpcs 65 | } 66 | 67 | pub fn dequeue(&mut self) -> Option> { 68 | self.dpcs.pop_front() 69 | } 70 | } 71 | 72 | pub fn enqueue_dpc(dpc: Box) { 73 | if get_ipl() < IPL::DPC { 74 | let ipl = raise_ipl(IPL::DPC); 75 | 76 | dpc.call(); 77 | 78 | splx(ipl); 79 | return; 80 | } 81 | 82 | current_cpu().enqueue_dpc(dpc); 83 | raise_dpc_interrupt() 84 | } 85 | 86 | pub fn raise_dpc_interrupt() { 87 | APIC.send_ipi(current_cpu().lapic_id, 0x2f) 88 | } 89 | 90 | pub fn dispatch_dpcs(_: &mut TrapFrame) { 91 | APIC.eoi(); 92 | let cpu = current_cpu(); 93 | 94 | assert!(get_ipl() == IPL::DPC); 95 | 96 | while let Some(dpc) = { 97 | let old = raise_ipl(IPL::High); 98 | let mut lock = cpu.dpc_queue.lock(); 99 | let dpc = lock.dequeue(); 100 | write_cr8(old); 101 | dpc 102 | } { 103 | dpc.call(); 104 | } 105 | 106 | let old = cpu.current_thread.aquire().clone(); 107 | let new = cpu.next.aquire().clone(); 108 | 109 | if old.is_some() && new.is_some() { 110 | **cpu.next.aquire() = None; 111 | let ipl = get_ipl(); 112 | 113 | switch_threads(old.unwrap(), new.unwrap()); 114 | splx(ipl); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /kernel/src/drivers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod ps2; 2 | -------------------------------------------------------------------------------- /kernel/src/drivers/ps2/keyboard.rs: -------------------------------------------------------------------------------- 1 | use crate::{arch::amd64::ports::inb, sched::context::TrapFrame}; 2 | 3 | pub fn keyboard(_: &mut TrapFrame) { 4 | dbg!("keyboard hit"); 5 | let scancode = unsafe { inb(0x60) }; 6 | dbg!("scancode: {}", scancode); 7 | debug!("scancode: {}", scancode); 8 | } 9 | -------------------------------------------------------------------------------- /kernel/src/drivers/ps2/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod keyboard; 2 | -------------------------------------------------------------------------------- /kernel/src/framebuffer/mod.rs: -------------------------------------------------------------------------------- 1 | mod font; 2 | 3 | use core::ptr::copy; 4 | 5 | use crate::{framebuffer::font::FONT, utils::limine_module}; 6 | use libxernel::sync::{Once, Spinlock}; 7 | use limine::{File, FramebufferRequest}; 8 | 9 | use limine::Framebuffer as LimineFramebuffer; 10 | 11 | /// A struct providing information about the framebuffer 12 | pub struct Framebuffer { 13 | /// Current position (in byte) in the pixel framebuffer 14 | cursor: u64, 15 | /// How many characters were printed on the line already 16 | char_current_line: u8, 17 | /// Current selected color 18 | color: Color, 19 | /// Address where the framebuffer should print 20 | address: *mut u8, 21 | } 22 | 23 | /// Type to represent a RGB color value 24 | pub struct Color { 25 | r: u8, 26 | g: u8, 27 | b: u8, 28 | } 29 | 30 | static FRAMEBUFFER_REQUEST: FramebufferRequest = FramebufferRequest::new(0); 31 | 32 | /// [`Framebuffer`] wrapped in a [`Spinlock`] for static usage 33 | pub static FRAMEBUFFER: Spinlock = Spinlock::new(Framebuffer { 34 | cursor: 0, 35 | char_current_line: 0, 36 | color: Color { 37 | r: 0xff, 38 | g: 0xff, 39 | b: 0xff, 40 | }, 41 | address: core::ptr::null_mut(), 42 | }); 43 | 44 | pub static FRAMEBUFFER_DATA: Once<&'static LimineFramebuffer> = Once::new(); 45 | 46 | pub fn init() { 47 | FRAMEBUFFER_DATA.set_once( 48 | FRAMEBUFFER_REQUEST 49 | .get_response() 50 | .get() 51 | .expect("limine-protocol: invalid framebuffer response") 52 | .framebuffers() 53 | .first() 54 | .expect("limine-protocol: could not get first framebuffer"), 55 | ); 56 | 57 | // show start image 58 | let img_file = limine_module::get_limine_module("logo").unwrap(); 59 | 60 | unsafe { 61 | let mut framebuffer = FRAMEBUFFER.lock(); 62 | 63 | framebuffer.address = FRAMEBUFFER_DATA 64 | .address 65 | .as_ptr() 66 | .expect("Could not get framebuffer address") 67 | .cast::(); 68 | 69 | framebuffer.show_bitmap_image(img_file); 70 | } 71 | } 72 | 73 | impl Framebuffer { 74 | /// Prints a single character to the framebuffer 75 | /// 76 | /// Writes a single given character (from the included FONT) to the framebuffer 77 | /// Also implements the support for downscrolling the framebuffer 78 | unsafe fn putc(&mut self, character: char) { 79 | debug_assert!(character.is_ascii()); 80 | 81 | let c = character as u8; 82 | 83 | let mut index: u16 = 0; 84 | 85 | if u64::from(self.char_current_line) == (FRAMEBUFFER_DATA.width / 9) { 86 | self.char_current_line = 0; 87 | 88 | self.cursor -= self.cursor % FRAMEBUFFER_DATA.pitch; 89 | self.cursor += FRAMEBUFFER_DATA.pitch * 17; 90 | } 91 | 92 | if self.cursor >= self.length() - FRAMEBUFFER_DATA.pitch * 17 { 93 | self.cursor -= FRAMEBUFFER_DATA.pitch * 17; 94 | 95 | copy( 96 | self.address.add((FRAMEBUFFER_DATA.pitch * 17) as usize), 97 | self.address, 98 | (self.length() - FRAMEBUFFER_DATA.pitch * 17) as usize, 99 | ); 100 | 101 | for i in 0..FRAMEBUFFER_DATA.pitch * 17 { 102 | self.address.add((self.cursor + i) as usize).write_volatile(0x00); 103 | } 104 | } 105 | 106 | if character == '\n' { 107 | self.cursor -= self.cursor % FRAMEBUFFER_DATA.pitch; 108 | self.cursor += FRAMEBUFFER_DATA.pitch * 17; 109 | 110 | self.char_current_line = 0; 111 | 112 | return; 113 | } 114 | 115 | if character == '\t' { 116 | self.cursor += 32 * 4 * 4; 117 | 118 | return; 119 | } 120 | 121 | if character != ' ' { 122 | index = (c as u16 - 32) * 16; 123 | } 124 | 125 | self.char_current_line += 1; 126 | 127 | for i in index..index + 16 { 128 | let bitmap: u8 = FONT[i as usize]; 129 | 130 | for j in 0..8 { 131 | if (bitmap & (1 << (7 - j))) >= 1 { 132 | self.address.add(self.cursor as usize).write_volatile(self.color.b); 133 | self.address 134 | .add((self.cursor + 1) as usize) 135 | .write_volatile(self.color.g); 136 | self.address 137 | .add((self.cursor + 2) as usize) 138 | .write_volatile(self.color.r); 139 | } 140 | self.cursor += (FRAMEBUFFER_DATA.bpp / 8) as u64; 141 | } 142 | self.cursor -= FRAMEBUFFER_DATA.bpp as u64; 143 | self.cursor += FRAMEBUFFER_DATA.pitch; 144 | } 145 | 146 | self.cursor += FRAMEBUFFER_DATA.bpp as u64; 147 | self.cursor += (FRAMEBUFFER_DATA.bpp / 8) as u64; 148 | 149 | self.cursor -= FRAMEBUFFER_DATA.pitch * 16; 150 | } 151 | 152 | /// Prints a string to the framebuffer 153 | /// 154 | /// Iterates over a string and calls [`Framebuffer::putc`] for every character. 155 | pub fn puts(&mut self, string: &str) { 156 | unsafe { 157 | for c in string.chars() { 158 | self.putc(c); 159 | } 160 | } 161 | } 162 | 163 | /// Returns the framebuffer size in bytes 164 | pub fn length(&self) -> u64 { 165 | FRAMEBUFFER_DATA.height * FRAMEBUFFER_DATA.pitch 166 | } 167 | 168 | /// Sets the color which the framebuffer uses for writing 169 | /// 170 | /// Accepts three [`u8`] arguments which represent the values of the rgb color model 171 | pub fn set_color(&mut self, r: u8, g: u8, b: u8) { 172 | self.color.r = r; 173 | self.color.g = g; 174 | self.color.b = b; 175 | } 176 | 177 | /// Sets the cursor to the start of the next pixel line 178 | pub fn new_line(&mut self) { 179 | self.cursor -= self.cursor % FRAMEBUFFER_DATA.pitch; 180 | self.cursor += FRAMEBUFFER_DATA.pitch; 181 | } 182 | 183 | /// Displays a given bitmap image on the framebuffer 184 | pub unsafe fn show_bitmap_image(&mut self, image_data: &File) { 185 | let address = FRAMEBUFFER_DATA.address.as_ptr().unwrap().cast::(); 186 | 187 | let file_base = image_data.base.as_ptr().unwrap(); 188 | 189 | let bpp = file_base.offset(0x1c).read(); 190 | 191 | let img_data_offset = file_base.offset(0xa).read() as u32; 192 | 193 | let img_base = file_base.add(img_data_offset as usize); 194 | 195 | let mut image_addr = img_base; 196 | 197 | let width = file_base.offset(0x12).read() as u16; 198 | let height = file_base.offset(0x16).read() as u16; 199 | 200 | self.new_line(); 201 | 202 | for i in 0..(width * height) { 203 | address 204 | .add(self.cursor as usize) 205 | .write_volatile(image_addr.offset(0).read()); 206 | address 207 | .add((self.cursor + 1) as usize) 208 | .write_volatile(image_addr.offset(1).read()); 209 | address 210 | .add((self.cursor + 2) as usize) 211 | .write_volatile(image_addr.offset(2).read()); 212 | 213 | image_addr = image_addr.add((bpp / 8).into()); 214 | self.cursor += FRAMEBUFFER_DATA.bpp as u64 / 8; 215 | 216 | if i % width == 0 && i != 0 { 217 | self.new_line(); 218 | } 219 | } 220 | 221 | self.new_line(); 222 | self.new_line(); 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /kernel/src/fs/file.rs: -------------------------------------------------------------------------------- 1 | use alloc::sync::Arc; 2 | use libxernel::sync::Spinlock; 3 | 4 | use super::vnode::VNode; 5 | 6 | pub struct File { 7 | node: Arc>, 8 | offset: usize, 9 | } 10 | 11 | impl File { 12 | pub fn new(node: Arc>) -> Self { 13 | Self { node, offset: 0 } 14 | } 15 | 16 | pub fn get_node(&self) -> Arc> { 17 | self.node.clone() 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /kernel/src/fs/initramfs.rs: -------------------------------------------------------------------------------- 1 | use alloc::{ 2 | collections::btree_map::BTreeMap, 3 | string::{String, ToString}, 4 | vec::Vec, 5 | }; 6 | use libxernel::sync::Spinlock; 7 | 8 | use crate::utils::limine_module; 9 | 10 | static initramfs: Spinlock>> = Spinlock::new(BTreeMap::new()); 11 | 12 | pub fn load_initramfs() { 13 | let file = limine_module::get_limine_module("initramfs").unwrap(); 14 | let data = unsafe { core::slice::from_raw_parts(file.base.as_ptr().unwrap(), file.length as usize) }; 15 | 16 | let mut idx: usize = 0; 17 | 18 | while idx < file.length as usize { 19 | let name = String::from_utf8(data[idx..idx + 16].iter().take_while(|&&b| b != 0).copied().collect()) 20 | .expect("Invalid UTF-8 in the name of a file in initramfs"); 21 | idx += 16; 22 | 23 | let size = u64::from_le_bytes([ 24 | data[idx], 25 | data[idx + 1], 26 | data[idx + 2], 27 | data[idx + 3], 28 | data[idx + 4], 29 | data[idx + 5], 30 | data[idx + 6], 31 | data[idx + 7], 32 | ]) as usize; 33 | idx += 8; 34 | 35 | let file_data = data[idx..idx + size].to_vec(); 36 | idx += size; 37 | 38 | initramfs.lock().insert(name, file_data); 39 | } 40 | } 41 | 42 | pub fn initramfs_read(path: &str) -> Option> { 43 | initramfs.lock().get(&path.to_string()).cloned() 44 | } 45 | -------------------------------------------------------------------------------- /kernel/src/fs/mod.rs: -------------------------------------------------------------------------------- 1 | //! The design and implementation of this virtual file system is heavily influenced by BSD. 2 | 3 | #[derive(Debug)] 4 | pub enum Error { 5 | VNodeNotFound, 6 | NotADirectory, 7 | IsADirectory, 8 | NoSpace, 9 | NotEmpty, 10 | EntryNotFound, 11 | MountPointNotFound, 12 | FileSystemNotFound, 13 | } 14 | 15 | pub type Result = core::result::Result; 16 | 17 | pub mod file; 18 | pub mod initramfs; 19 | mod mount; 20 | pub mod pathbuf; 21 | pub mod tmpfs; 22 | pub mod vfs; 23 | pub mod vfs_syscalls; 24 | pub mod vnode; 25 | -------------------------------------------------------------------------------- /kernel/src/fs/mount.rs: -------------------------------------------------------------------------------- 1 | use super::{pathbuf::PathBuf, vnode::VNode, Result}; 2 | use alloc::{string::String, sync::Arc}; 3 | use libxernel::sync::Spinlock; 4 | 5 | // According to BSD each Mount object has a pointer to vfsops and to private data 6 | // As in vnode we combine the member which holds the vfs operations and the private data which is used by the file system 7 | pub struct Mount { 8 | /// Operations vector including private data for file system 9 | mnt_op_data: Arc>, 10 | /// VNode we are mounted on 11 | /// None if root node 12 | vnode_covered: Option>>, 13 | flags: u64, 14 | } 15 | 16 | impl Mount { 17 | pub fn new(driver: Arc>, vnode_covered: Option>>) -> Self { 18 | Mount { 19 | mnt_op_data: driver, 20 | vnode_covered, 21 | flags: 0, 22 | } 23 | } 24 | } 25 | 26 | impl Mount { 27 | pub fn vfs_mount(&mut self, path: String) { 28 | self.mnt_op_data.lock().vfs_mount(path) 29 | } 30 | 31 | pub fn vfs_start(&mut self) { 32 | self.mnt_op_data.lock().vfs_start() 33 | } 34 | 35 | pub fn vfs_unmount(&self) { 36 | self.mnt_op_data.lock().vfs_unmount() 37 | } 38 | 39 | pub fn vfs_root(&self) -> Result>> { 40 | self.mnt_op_data.lock().vfs_root() 41 | } 42 | 43 | pub fn vfs_quotactl(&self) { 44 | self.mnt_op_data.lock().vfs_quotactl() 45 | } 46 | 47 | pub fn vfs_statvfs(&self) { 48 | self.mnt_op_data.lock().vfs_statvfs() 49 | } 50 | 51 | pub fn vfs_sync(&self) { 52 | self.mnt_op_data.lock().vfs_sync() 53 | } 54 | 55 | pub fn vfs_vget(&self) { 56 | self.mnt_op_data.lock().vfs_vget() 57 | } 58 | 59 | pub fn vfs_lookup(&self, path: &PathBuf) -> Result>> { 60 | self.mnt_op_data.lock().vfs_lookup(path) 61 | } 62 | 63 | pub fn vfs_fhtovp(&self) { 64 | self.mnt_op_data.lock().vfs_fhtovp() 65 | } 66 | 67 | pub fn vfs_vptofh(&self) { 68 | self.mnt_op_data.lock().vfs_vptofh() 69 | } 70 | 71 | pub fn vfs_init(&mut self) { 72 | self.mnt_op_data.lock().vfs_init() 73 | } 74 | 75 | pub fn vfs_done(&self) { 76 | self.mnt_op_data.lock().vfs_done() 77 | } 78 | 79 | pub fn vfs_extattrctl(&self) { 80 | self.mnt_op_data.lock().vfs_extattrctl() 81 | } 82 | 83 | pub fn vfs_name(&self) -> String { 84 | self.mnt_op_data.lock().vfs_name() 85 | } 86 | } 87 | 88 | /// Operations supported on mounted file system 89 | /// Has an extra method called `name` since Rust traits don't support variables, with trait objects, the `name` method returns the vfs_name 90 | pub trait VfsOps { 91 | /// Mounts a new instance of the file system. 92 | fn vfs_mount(&mut self, path: String); 93 | 94 | /// Makes the file system operational. 95 | fn vfs_start(&mut self); 96 | 97 | /// Unmounts an instance of the file system. 98 | fn vfs_unmount(&self); 99 | 100 | /// Gets the file system root vnode. 101 | fn vfs_root(&self) -> Result>>; 102 | 103 | /// Queries or modifies space quotas. 104 | fn vfs_quotactl(&self) { 105 | unimplemented!("{} does not implement vfs_quotactl", self.vfs_name()); 106 | } 107 | 108 | /// Gets file system statistics. 109 | fn vfs_statvfs(&self) { 110 | unimplemented!("{} does not implement vfs_statvfs", self.vfs_name()); 111 | } 112 | 113 | /// Flushes file system buffers. 114 | fn vfs_sync(&self); 115 | 116 | /// Gets a vnode from a file identifier. 117 | fn vfs_vget(&self); 118 | 119 | fn vfs_lookup(&self, path: &PathBuf) -> Result>>; 120 | 121 | /// Converts a NFS file handle to a vnode. 122 | fn vfs_fhtovp(&self) { 123 | unimplemented!("{} does not implement vfs_fhtovp", self.vfs_name()); 124 | } 125 | 126 | /// Converts a vnode to a NFS file handle. 127 | fn vfs_vptofh(&self) { 128 | unimplemented!("{} does not implement vfs_vptofh", self.vfs_name()); 129 | } 130 | 131 | /// Initializes the file system driver. 132 | fn vfs_init(&mut self); 133 | 134 | /// Reinitializes the file system driver. 135 | fn vfs_reinit(&self) { 136 | unimplemented!("{} does not implement vfs_reinit", self.vfs_name()); 137 | } 138 | 139 | /// Finalizes the file system driver. 140 | fn vfs_done(&self); 141 | 142 | /// Mounts an instance of the file system as the root file system. 143 | fn vfs_mountroot(&self) { 144 | unimplemented!("{} does not implement vfs_mountroot", self.vfs_name()); 145 | } 146 | 147 | /// Controls extended attributes. 148 | // The generic vfs_stdextattrctl function is provided as a simple hook for file system that do not support this operation 149 | // TODO: create a generic vfs_stdextattrctl function 150 | fn vfs_extattrctl(&self) { 151 | unimplemented!("{} does not implement vfs_extattrctl", self.vfs_name()); 152 | } 153 | 154 | /// Returns the name of the file system 155 | fn vfs_name(&self) -> String; 156 | } 157 | -------------------------------------------------------------------------------- /kernel/src/fs/pathbuf.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Display; 2 | 3 | use alloc::{ 4 | string::{String, ToString}, 5 | vec::Vec, 6 | }; 7 | 8 | use super::vfs::VFS; 9 | 10 | #[derive(Debug)] 11 | pub struct PathBuf { 12 | inner: String, 13 | } 14 | 15 | impl PathBuf { 16 | pub fn new() -> PathBuf { 17 | PathBuf { inner: String::new() } 18 | } 19 | 20 | pub fn with_capacity(capacity: usize) -> PathBuf { 21 | PathBuf { 22 | inner: String::with_capacity(capacity), 23 | } 24 | } 25 | 26 | pub fn components(&self) -> Vec { 27 | self.inner.split_inclusive('/').map(|s| s.to_string()).collect() 28 | } 29 | 30 | pub fn into_string(self) -> String { 31 | self.inner 32 | } 33 | 34 | pub fn capacity(&self) -> usize { 35 | self.inner.capacity() 36 | } 37 | 38 | pub fn clear(&mut self) { 39 | self.inner.clear() 40 | } 41 | 42 | pub fn starts_with(&self, pat: &PathBuf) -> bool { 43 | self.inner.starts_with(&pat.as_string()) 44 | } 45 | 46 | pub fn as_string(&self) -> String { 47 | self.inner.clone() 48 | } 49 | 50 | pub fn len(&self) -> usize { 51 | self.inner.len() 52 | } 53 | 54 | pub fn exists(&self) -> bool { 55 | VFS.lock().lookuppn(self.inner.clone()).is_ok() 56 | } 57 | 58 | pub fn is_empty(&self) -> bool { 59 | self.inner.is_empty() 60 | } 61 | 62 | pub fn strip_prefix(&self, prefix: &PathBuf) -> PathBuf { 63 | PathBuf::from( 64 | self.inner 65 | .strip_prefix(&prefix.as_string()) 66 | .expect("Stripping prefix of pathbuf paniced!"), 67 | ) 68 | } 69 | 70 | // TODO: 71 | pub fn push(&mut self) {} 72 | 73 | // TODO: 74 | pub fn pop(&mut self) {} 75 | } 76 | 77 | impl From for PathBuf { 78 | fn from(path: String) -> Self { 79 | PathBuf { inner: path } 80 | } 81 | } 82 | 83 | impl From<&str> for PathBuf { 84 | fn from(path: &str) -> Self { 85 | PathBuf { 86 | inner: path.to_string(), 87 | } 88 | } 89 | } 90 | 91 | impl From<&String> for PathBuf { 92 | fn from(path: &String) -> Self { 93 | PathBuf { inner: path.clone() } 94 | } 95 | } 96 | 97 | impl PartialEq for PathBuf { 98 | fn eq(&self, other: &Self) -> bool { 99 | self.inner == other.inner 100 | } 101 | } 102 | 103 | impl PartialEq for PathBuf { 104 | fn eq(&self, other: &String) -> bool { 105 | &self.inner == other 106 | } 107 | } 108 | 109 | impl PartialEq for &PathBuf { 110 | fn eq(&self, other: &String) -> bool { 111 | &self.inner == other 112 | } 113 | } 114 | 115 | impl PartialEq<&str> for &PathBuf { 116 | fn eq(&self, other: &&str) -> bool { 117 | &self.inner == other 118 | } 119 | } 120 | 121 | impl Clone for PathBuf { 122 | fn clone(&self) -> Self { 123 | Self { 124 | inner: self.inner.clone(), 125 | } 126 | } 127 | } 128 | 129 | impl Display for PathBuf { 130 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 131 | f.write_str(&self.inner) 132 | } 133 | } 134 | 135 | impl Default for PathBuf { 136 | fn default() -> Self { 137 | Self::new() 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /kernel/src/fs/tmpfs.rs: -------------------------------------------------------------------------------- 1 | use alloc::{ 2 | string::{String, ToString}, 3 | sync::Arc, 4 | sync::Weak, 5 | vec::Vec, 6 | }; 7 | use libxernel::{boot::InitAtBoot, sync::Spinlock}; 8 | 9 | use crate::{fs::Error, fs::Result}; 10 | 11 | use super::{ 12 | mount::{Mount, VfsOps}, 13 | pathbuf::PathBuf, 14 | vnode::{VNode, VNodeOperations, VType}, 15 | }; 16 | 17 | pub struct Tmpfs { 18 | root_node: InitAtBoot>>, 19 | mounted_on: Option, 20 | mount: Option>, 21 | } 22 | 23 | impl Tmpfs { 24 | pub fn new() -> Self { 25 | Self { 26 | root_node: InitAtBoot::Uninitialized, 27 | mounted_on: None, 28 | mount: None, 29 | } 30 | } 31 | } 32 | 33 | impl VfsOps for Tmpfs { 34 | fn vfs_mount(&mut self, path: String) { 35 | println!("mounting tmpfs on {}", path); 36 | 37 | self.mounted_on = Some(path); 38 | } 39 | 40 | fn vfs_start(&mut self) { 41 | self.root_node 42 | .lock() 43 | .create("test.txt".to_string(), VType::Regular) 44 | .expect("Creation of root node in tmpfs failed"); 45 | } 46 | 47 | fn vfs_unmount(&self) { 48 | todo!() 49 | } 50 | 51 | fn vfs_root(&self) -> Result>> { 52 | Ok(self.root_node.clone()) 53 | } 54 | 55 | fn vfs_vget(&self) { 56 | todo!() 57 | } 58 | 59 | fn vfs_init(&mut self) { 60 | let tmpfs_node = TmpfsNode::new(VType::Directory); 61 | 62 | let root = Arc::new(Spinlock::new(VNode::new( 63 | Weak::new(), 64 | Arc::new(Spinlock::new(tmpfs_node)), 65 | VType::Directory, 66 | None, 67 | ))); 68 | 69 | self.root_node = InitAtBoot::Initialized(root); 70 | } 71 | 72 | fn vfs_done(&self) { 73 | todo!() 74 | } 75 | 76 | fn vfs_name(&self) -> String { 77 | "tmpfs".to_string() 78 | } 79 | 80 | fn vfs_lookup(&self, path: &PathBuf) -> Result>> { 81 | if path == "/" || path.is_empty() { 82 | return Ok(self.root_node.clone()); 83 | } 84 | 85 | self.root_node.lock().lookup(path) 86 | } 87 | 88 | fn vfs_sync(&self) { 89 | todo!() 90 | } 91 | } 92 | 93 | enum TmpfsNodeData { 94 | Children(Vec<(PathBuf, Arc>)>), 95 | Data(Vec), 96 | } 97 | 98 | pub struct TmpfsNode { 99 | parent: Option>>, 100 | data: TmpfsNodeData, 101 | } 102 | 103 | impl TmpfsNode { 104 | pub fn new(vtype: VType) -> Self { 105 | if vtype == VType::Directory { 106 | Self { 107 | parent: None, 108 | data: TmpfsNodeData::Children(Vec::new()), 109 | } 110 | } else { 111 | Self { 112 | parent: None, 113 | data: TmpfsNodeData::Data(Vec::new()), 114 | } 115 | } 116 | } 117 | } 118 | 119 | impl VNodeOperations for TmpfsNode { 120 | fn close(&self) { 121 | todo!() 122 | } 123 | 124 | fn create( 125 | &mut self, 126 | file_name: String, 127 | v_type: VType, 128 | mount: Weak>, 129 | ) -> Result>> { 130 | let new_node = Arc::new(Spinlock::new(VNode::new( 131 | mount, 132 | Arc::new(Spinlock::new(TmpfsNode::new(v_type))), 133 | v_type, 134 | None, 135 | ))); 136 | 137 | if let TmpfsNodeData::Children(children) = &mut self.data { 138 | children.push((PathBuf::from(file_name), new_node.clone())); 139 | } else { 140 | return Err(Error::NotADirectory); 141 | } 142 | 143 | Ok(new_node) 144 | } 145 | 146 | fn ioctl(&self) { 147 | todo!() 148 | } 149 | 150 | fn lookup(&self, path: &PathBuf) -> Result>> { 151 | println!("tmpfs path lookup: {}", path); 152 | 153 | let stripped_path = if path.starts_with(&PathBuf::from("/")) { 154 | path.strip_prefix(&PathBuf::from("/")) 155 | } else { 156 | path.clone() 157 | }; 158 | 159 | let components = stripped_path.components(); 160 | 161 | if let TmpfsNodeData::Children(children) = &self.data { 162 | match components.len().cmp(&1) { 163 | core::cmp::Ordering::Equal => { 164 | let node = children 165 | .iter() 166 | .find(|(pt, _)| pt == components[0]) 167 | .map(|(_, node)| node.clone()); 168 | node.ok_or(Error::EntryNotFound) 169 | } 170 | core::cmp::Ordering::Greater => { 171 | let node = children 172 | .iter() 173 | .find(|(pt, _)| pt == components[0]) 174 | .map(|(_, node)| node.clone()); 175 | 176 | if let Some(node) = node { 177 | return node.lock().lookup(&stripped_path); 178 | } else { 179 | Err(Error::EntryNotFound) 180 | } 181 | } 182 | core::cmp::Ordering::Less => todo!(), 183 | } 184 | } else { 185 | Err(Error::NotADirectory) 186 | } 187 | } 188 | 189 | fn mknod(&self) { 190 | todo!() 191 | } 192 | 193 | fn open(&self) { 194 | println!("opening file on tmpfs"); 195 | } 196 | 197 | fn read(&self, buf: &mut [u8]) -> Result { 198 | if let TmpfsNodeData::Data(data) = &self.data { 199 | let max_read = if buf.len() > data.len() { data.len() } else { buf.len() }; 200 | 201 | buf[..max_read].copy_from_slice(&data[..max_read]); 202 | 203 | Ok(max_read) 204 | } else { 205 | Err(Error::IsADirectory) 206 | } 207 | } 208 | 209 | fn write(&mut self, buf: &mut [u8]) -> Result { 210 | if let TmpfsNodeData::Data(ref mut data) = &mut self.data { 211 | data.resize(data.len() + buf.len(), 0); 212 | 213 | let max_write = if buf.len() > data.len() { data.len() } else { buf.len() }; 214 | 215 | data.reserve(max_write); 216 | 217 | data[..max_write].copy_from_slice(&buf[..max_write]); 218 | 219 | Ok(max_write) 220 | } else { 221 | Err(Error::IsADirectory) 222 | } 223 | } 224 | 225 | fn readdir(&self) { 226 | todo!() 227 | } 228 | 229 | fn readlink(&self) { 230 | todo!() 231 | } 232 | 233 | fn reclaim(&self) { 234 | todo!() 235 | } 236 | 237 | fn remove(&self) { 238 | todo!() 239 | } 240 | 241 | fn rename(&self) { 242 | todo!() 243 | } 244 | 245 | fn mkdir(&self) { 246 | todo!() 247 | } 248 | 249 | fn rmdir(&self) { 250 | todo!() 251 | } 252 | 253 | fn symlink(&self) { 254 | todo!() 255 | } 256 | } 257 | -------------------------------------------------------------------------------- /kernel/src/fs/vfs.rs: -------------------------------------------------------------------------------- 1 | use alloc::{ 2 | string::{String, ToString}, 3 | sync::Arc, 4 | vec, 5 | vec::Vec, 6 | }; 7 | use libxernel::boot::InitAtBoot; 8 | use libxernel::sync::Spinlock; 9 | 10 | use super::{ 11 | mount::{Mount, VfsOps}, 12 | pathbuf::PathBuf, 13 | tmpfs::Tmpfs, 14 | vnode::VNode, 15 | {Error, Result}, 16 | }; 17 | 18 | pub static VFS: Spinlock = Spinlock::new(Vfs::new()); 19 | 20 | pub struct Vfs { 21 | mount_point_list: Vec<(PathBuf, Arc>)>, 22 | drivers: Vec<(String, Arc>)>, 23 | free_vnodes: Vec>, 24 | root: InitAtBoot>>, 25 | } 26 | 27 | impl Vfs { 28 | // get virtual node by asking the file system driver (use mount point list to see which driver to ask) 29 | // veneer layer gets implemented here 30 | pub const fn new() -> Self { 31 | Vfs { 32 | mount_point_list: Vec::new(), 33 | drivers: Vec::new(), 34 | free_vnodes: Vec::new(), 35 | root: InitAtBoot::Uninitialized, 36 | } 37 | } 38 | 39 | pub fn root_node(&self) -> Arc> { 40 | self.root.clone() 41 | } 42 | 43 | pub fn get_mount(&self, mounted_on: &PathBuf) -> Result>> { 44 | self.mount_point_list 45 | .iter() 46 | .find(|(pt, _)| pt == mounted_on) 47 | .map(|(_, mnt)| mnt) 48 | .ok_or(Error::MountPointNotFound) 49 | .cloned() 50 | } 51 | 52 | pub fn register_filesystem(&mut self, name: String, operations: Arc>) { 53 | self.drivers.push((name, operations)); 54 | } 55 | 56 | pub fn vn_mount(&mut self, name_of_fs: &str, where_to_mount: &str) -> Result<()> { 57 | let driver = self 58 | .drivers 59 | .iter() 60 | .find(|(name, _)| name == name_of_fs) 61 | .map(|(_, driver)| driver) 62 | .ok_or(Error::FileSystemNotFound)?; 63 | 64 | let node_covered = if where_to_mount == "/" { 65 | None 66 | } else { 67 | // get vnode to mount on 68 | if let Ok(node) = self.lookuppn(where_to_mount.to_string()) { 69 | Some(node) 70 | } else { 71 | return Err(Error::EntryNotFound); 72 | } 73 | }; 74 | 75 | let mount = Arc::new(Spinlock::new(Mount::new(driver.clone(), node_covered))); 76 | 77 | let root_node = mount.lock().vfs_root().expect("root node not found"); 78 | 79 | root_node.lock().vfsp = Arc::downgrade(&mount); 80 | 81 | mount.lock().vfs_mount(where_to_mount.to_string()); 82 | 83 | mount.lock().vfs_start(); 84 | 85 | self.mount_point_list.push((PathBuf::from(where_to_mount), mount)); 86 | 87 | Ok(()) 88 | } 89 | 90 | /// Lookup path name 91 | pub fn lookuppn(&self, path: String) -> Result>> { 92 | let path = PathBuf::from(path); 93 | 94 | let mnt_point = self.get_mount_point(&path)?; 95 | 96 | let mnt = self 97 | .mount_point_list 98 | .iter() 99 | .find(|(pt, _)| pt == mnt_point) 100 | .map(|(_, mnt)| mnt) 101 | .ok_or(Error::MountPointNotFound)?; 102 | 103 | mnt.lock().vfs_lookup(&path.strip_prefix(mnt_point)) 104 | } 105 | 106 | fn get_mount_point(&self, path: &PathBuf) -> Result<&PathBuf> { 107 | let mnt_point = self 108 | .mount_point_list 109 | .iter() 110 | .filter(|(pt, _)| path.starts_with(pt)) 111 | .max_by_key(|(pt, _)| pt.len()) 112 | .map(|(pt, _)| pt) 113 | .ok_or(Error::MountPointNotFound)?; 114 | 115 | Ok(mnt_point) 116 | } 117 | 118 | pub fn vn_open(&self, path: String, _mode: u64) -> Result>> { 119 | let node = self.lookuppn(path)?; 120 | 121 | node.lock().open(); 122 | 123 | Ok(node) 124 | } 125 | 126 | pub fn vn_close(&mut self) {} 127 | 128 | // TODO: When available, replace node with filedescriptor 129 | pub fn vn_read(&self, node: Arc>, buf: &mut [u8]) -> Result { 130 | node.lock().read(buf) 131 | } 132 | 133 | pub fn vn_write(&self, node: Arc>, buf: &mut [u8]) -> Result { 134 | node.lock().write(buf) 135 | } 136 | 137 | pub fn vn_create(&mut self) {} 138 | 139 | pub fn vn_remove(&mut self) {} 140 | 141 | pub fn vn_link(&mut self) {} 142 | 143 | pub fn vn_rename(&mut self) {} 144 | } 145 | 146 | pub fn init() { 147 | let mut vfs = VFS.lock(); 148 | 149 | let tmpfs = Arc::new(Spinlock::new(Tmpfs::new())); 150 | 151 | tmpfs.lock().vfs_init(); 152 | 153 | vfs.register_filesystem(String::from("tmpfs"), tmpfs.clone()); 154 | 155 | vfs.root = InitAtBoot::Initialized(tmpfs.lock().vfs_root().unwrap()); 156 | 157 | vfs.vn_mount("tmpfs", "/").expect("Mounting tmpfs on / failed"); 158 | } 159 | 160 | pub fn test() { 161 | let t = VFS.lock().vn_open("/test.txt".to_string(), 0).unwrap(); 162 | 163 | let mut write_buf: Vec = vec![5; 10]; 164 | 165 | VFS.lock() 166 | .vn_write(t.clone(), &mut write_buf) 167 | .expect("write to file failed"); 168 | 169 | let mut read_buf: Vec = vec![0; 5]; 170 | 171 | VFS.lock().vn_read(t.clone(), &mut read_buf).expect("read failed"); 172 | 173 | println!( 174 | "name of fs where node is mounted: {}", 175 | t.lock().vfsp.upgrade().unwrap().lock().vfs_name() 176 | ); 177 | println!("{:?}", write_buf); 178 | println!("{:?}", read_buf); 179 | } 180 | -------------------------------------------------------------------------------- /kernel/src/fs/vfs_syscalls.rs: -------------------------------------------------------------------------------- 1 | use alloc::string::String; 2 | 3 | use crate::{cpu::current_process, syscall::Result}; 4 | 5 | use super::{file::File, vfs::VFS}; 6 | 7 | pub fn sys_open(path: String, mode: u64) -> Result { 8 | let vfs = VFS.lock(); 9 | 10 | let node = vfs.vn_open(path, mode)?; 11 | 12 | let file_handle = File::new(node); 13 | 14 | let process = current_process(); 15 | let mut process = process.lock(); 16 | 17 | let fd = process.append_fd(file_handle); 18 | 19 | Ok(fd as isize) 20 | } 21 | 22 | pub fn sys_close(fd: usize) -> Result { 23 | let process = current_process(); 24 | let process = process.lock(); 25 | 26 | let file_handle = process.get_filehandle_from_fd(fd); 27 | 28 | let node = file_handle.get_node(); 29 | 30 | node.lock().close(); 31 | 32 | Ok(0) 33 | } 34 | 35 | pub fn sys_read(fd: usize, buf: &mut [u8]) -> Result { 36 | let vfs = VFS.lock(); 37 | 38 | let process = current_process(); 39 | let process = process.lock(); 40 | 41 | let file_handle = process.get_filehandle_from_fd(fd); 42 | 43 | let node = file_handle.get_node(); 44 | 45 | let res = vfs.vn_read(node, buf)?; 46 | 47 | Ok(res as isize) 48 | } 49 | 50 | pub fn sys_write(fd: usize, buf: &mut [u8]) -> Result { 51 | let vfs = VFS.lock(); 52 | 53 | let process = current_process(); 54 | let process = process.lock(); 55 | 56 | let file_handle = process.get_filehandle_from_fd(fd); 57 | 58 | let node = file_handle.get_node(); 59 | 60 | let res = vfs.vn_write(node, buf)?; 61 | 62 | Ok(res as isize) 63 | } 64 | -------------------------------------------------------------------------------- /kernel/src/fs/vnode.rs: -------------------------------------------------------------------------------- 1 | use super::mount::Mount; 2 | use super::pathbuf::PathBuf; 3 | use super::Result; 4 | use alloc::string::String; 5 | use alloc::{sync::Arc, sync::Weak}; 6 | use libxernel::sync::Spinlock; 7 | 8 | #[derive(PartialEq, Eq, Copy, Clone)] 9 | pub enum VType { 10 | Non, 11 | Regular, 12 | Directory, 13 | BlockDevice, 14 | CharacterDevice, 15 | SymbolicLink, 16 | Socket, 17 | Fifo, 18 | Bad, 19 | } 20 | 21 | // Each Vnode gets a field file system specific handler which is a struct given by the file system driver which implements the VNode Operations trait 22 | // since this struct can also be used for the file system to store file system specific data we combine the fields v_data and v_op of the mount struct from NetBSD. 23 | pub struct VNode { 24 | /// ptr to vfs we are in 25 | /// filesystem to which the vnode (we are mounted to) belongs to 26 | pub vfsp: Weak>, 27 | /// Holds the vnode operations vector and the private data for fs in one member 28 | /// since the struct, which each fs, which implements the VNodeOperations trait can directly own the private fs data 29 | v_data_op: Arc>, 30 | v_type: VType, 31 | flags: u64, 32 | // TODO: add attributes 33 | // maybe like netbsd, use union https://github.com/NetBSD/src/blob/trunk/sys/sys/vnode.h#L172 34 | // used if vnode is mountpoint, v_mounted_here points to the other file system 35 | v_mounted_here: Option>, 36 | } 37 | 38 | impl VNode { 39 | pub fn new( 40 | vfsp: Weak>, 41 | data_op: Arc>, 42 | v_type: VType, 43 | v_mounted_here: Option>, 44 | ) -> Self { 45 | VNode { 46 | vfsp, 47 | v_data_op: data_op, 48 | v_type, 49 | v_mounted_here, 50 | flags: 0, 51 | } 52 | } 53 | } 54 | 55 | impl VNode { 56 | pub fn close(&self) { 57 | self.v_data_op.lock().close(); 58 | } 59 | 60 | pub fn access(&self) { 61 | self.v_data_op.lock().access() 62 | } 63 | 64 | pub fn bmap(&self) { 65 | self.v_data_op.lock().bmap() 66 | } 67 | 68 | pub fn create(&mut self, path: String, v_type: VType) -> Result>> { 69 | self.v_data_op.lock().create(path, v_type, self.vfsp.clone()) 70 | } 71 | 72 | pub fn fsync(&self) { 73 | self.v_data_op.lock().fsync() 74 | } 75 | 76 | pub fn getattr(&self) { 77 | self.v_data_op.lock().getattr() 78 | } 79 | 80 | pub fn inactive(&self) { 81 | self.v_data_op.lock().inactive() 82 | } 83 | 84 | pub fn ioctl(&self) { 85 | self.v_data_op.lock().ioctl() 86 | } 87 | 88 | pub fn link(&self) { 89 | self.v_data_op.lock().link() 90 | } 91 | 92 | pub fn lookup(&self, path: &PathBuf) -> Result>> { 93 | self.v_data_op.lock().lookup(path) 94 | } 95 | 96 | pub fn mknod(&self) { 97 | self.v_data_op.lock().mknod() 98 | } 99 | 100 | pub fn open(&self) { 101 | self.v_data_op.lock().open() 102 | } 103 | 104 | pub fn pathconf(&self) { 105 | self.v_data_op.lock().pathconf() 106 | } 107 | 108 | pub fn read(&self, buf: &mut [u8]) -> Result { 109 | self.v_data_op.lock().read(buf) 110 | } 111 | 112 | pub fn readdir(&self) { 113 | self.v_data_op.lock().readdir() 114 | } 115 | 116 | pub fn readlink(&self) { 117 | self.v_data_op.lock().readlink() 118 | } 119 | 120 | pub fn reclaim(&self) { 121 | self.v_data_op.lock().reclaim() 122 | } 123 | 124 | pub fn remove(&self) { 125 | self.v_data_op.lock().remove() 126 | } 127 | 128 | pub fn rename(&self) { 129 | self.v_data_op.lock().rename() 130 | } 131 | 132 | pub fn mkdir(&self) { 133 | self.v_data_op.lock().mkdir() 134 | } 135 | 136 | pub fn rmdir(&self) { 137 | self.v_data_op.lock().rmdir() 138 | } 139 | 140 | pub fn setattr(&self) { 141 | self.v_data_op.lock().setattr() 142 | } 143 | 144 | pub fn symlink(&self) { 145 | self.v_data_op.lock().symlink() 146 | } 147 | 148 | pub fn write(&self, buf: &mut [u8]) -> Result { 149 | self.v_data_op.lock().write(buf) 150 | } 151 | 152 | pub fn kqfilter(&self) { 153 | self.v_data_op.lock().kqfilter() 154 | } 155 | } 156 | 157 | /// This trait maps logical operations to real functions. It is file system specific as the actions taken by each operation depend heavily on the file system where the file resides. 158 | pub trait VNodeOperations { 159 | /// Aborts an in-progress operation. 160 | fn abortop(&self) { 161 | unimplemented!() 162 | } 163 | 164 | /// Checks access permissions on a file. 165 | fn access(&self) { 166 | unimplemented!() 167 | } 168 | 169 | fn advlock(&self) { 170 | unimplemented!() 171 | } 172 | 173 | /// Maps a logical block number to a physical block number. 174 | fn bmap(&self) { 175 | unimplemented!() 176 | } 177 | 178 | /// Writes a system buffer. 179 | fn bwrite(&self) { 180 | unimplemented!() 181 | } 182 | 183 | /// Closes a file. 184 | fn close(&self); 185 | 186 | /// Creates a new file. 187 | fn create(&mut self, path: String, v_type: VType, mount: Weak>) -> Result>>; 188 | 189 | /// Synchronizes the file with on-disk contents. 190 | fn fsync(&self) { 191 | unimplemented!() 192 | } 193 | 194 | /// Gets a file's attributes. 195 | fn getattr(&self) { 196 | unimplemented!() 197 | } 198 | 199 | /// Marks the vnode as inactive. 200 | fn inactive(&self) { 201 | unimplemented!() 202 | } 203 | 204 | /// Performs an ioctl on a file. 205 | fn ioctl(&self); 206 | 207 | /// Creates a new hard link for a file. 208 | fn link(&self) { 209 | unimplemented!() 210 | } 211 | 212 | /// Performs a path name lookup. 213 | fn lookup(&self, path: &PathBuf) -> Result>>; 214 | 215 | /// Creates a new special file (a device or a named pipe). 216 | fn mknod(&self); 217 | 218 | /// Opens a file. 219 | fn open(&self); 220 | 221 | /// Returns pathconf information. 222 | fn pathconf(&self) { 223 | unimplemented!() 224 | } 225 | 226 | /// Reads a chunk of data from a file. 227 | fn read(&self, buf: &mut [u8]) -> Result; 228 | 229 | /// Reads directory entries from a directory. 230 | fn readdir(&self); 231 | 232 | /// Reads the contents of a symbolic link. 233 | fn readlink(&self); 234 | 235 | /// Reclaims the vnode. 236 | fn reclaim(&self); 237 | 238 | /// Removes a file. 239 | fn remove(&self); 240 | 241 | /// Renames a file. 242 | fn rename(&self); 243 | 244 | /// Creates a new directory. 245 | fn mkdir(&self); 246 | 247 | /// Removes a directory. 248 | fn rmdir(&self); 249 | 250 | /// Sets a file's attributes. 251 | fn setattr(&self) { 252 | unimplemented!() 253 | } 254 | 255 | /// Performs a file transfer between the file system's backing store and memory. 256 | fn strategy(&self) { 257 | unimplemented!() 258 | } 259 | 260 | /// Creates a new symbolic link for a file. 261 | fn symlink(&self); 262 | 263 | /// Writes a chunk of data to a file. 264 | fn write(&mut self, buf: &mut [u8]) -> Result; 265 | 266 | fn kqfilter(&self) { 267 | unimplemented!() 268 | } 269 | 270 | fn print(&self) { 271 | unimplemented!() 272 | } // OpenBSD has it, NetBSD not?! 273 | 274 | /// Performs a fcntl on a file. 275 | fn fcntl(&self) { 276 | unimplemented!() 277 | } // NetBSD has it, OpenBSD not?! 278 | /// Performs a poll on a file. 279 | fn poll(&self) { 280 | unimplemented!() 281 | } // NetBSD has it, OpenBSD not?! 282 | /// Revoke access to a vnode and all aliases. 283 | fn revoke(&self) { 284 | unimplemented!() 285 | } // NetBSD has it, OpenBSD not?! 286 | /// Maps a file on a memory region. 287 | fn mmap(&self) { 288 | unimplemented!() 289 | } // NetBSD has it, OpenBSD not?! 290 | /// Test and inform file system of seek 291 | fn seek(&self) { 292 | unimplemented!() 293 | } // NetBSD has it, OpenBSD not?! 294 | /// Truncates a file. 295 | fn truncate(&self) { 296 | unimplemented!() 297 | } // NetBSD has it, OpenBSD not?! 298 | /// Updates a file's times. 299 | fn update(&self) { 300 | unimplemented!() 301 | } // NetBSD has it, OpenBSD not?! 302 | /// Reads memory pages from the file. 303 | fn getpages(&self) { 304 | unimplemented!() 305 | } // NetBSD has it, OpenBSD not?! 306 | /// Writes memory pages to the file. 307 | fn putpages(&self) { 308 | unimplemented!() 309 | } // NetBSD has it, OpenBSD not?! 310 | } 311 | -------------------------------------------------------------------------------- /kernel/src/logger.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::fmt::Write; 3 | 4 | use x86_64::instructions::port::Port; 5 | 6 | struct Writer; 7 | 8 | impl core::fmt::Write for Writer { 9 | fn write_str(&mut self, s: &str) -> fmt::Result { 10 | let mut port = Port::new(0xe9); 11 | 12 | for c in s.chars() { 13 | unsafe { 14 | port.write(c as u8); 15 | } 16 | } 17 | 18 | Ok(()) 19 | } 20 | } 21 | 22 | #[doc(hidden)] 23 | pub fn _print(args: fmt::Arguments) { 24 | let mut writer = Writer; 25 | // UNWRAP: We always return `Ok(())` inside `write_str` so this is unreachable. 26 | writer.write_fmt(args).unwrap(); 27 | writer.write_char('\n').unwrap(); 28 | } 29 | 30 | #[macro_export] 31 | macro_rules! dbg { 32 | ($($arg:tt)*) => ($crate::logger::_print(format_args!($($arg)*))); 33 | } 34 | -------------------------------------------------------------------------------- /kernel/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | #![feature(abi_x86_interrupt)] 4 | #![feature(naked_functions)] 5 | #![feature(let_chains)] 6 | #![allow(dead_code)] 7 | #![allow(clippy::fn_to_numeric_cast)] 8 | #![allow(non_upper_case_globals)] 9 | extern crate alloc; 10 | 11 | #[macro_use] 12 | mod writer; 13 | 14 | #[macro_use] 15 | mod logger; 16 | 17 | #[macro_use] 18 | mod utils; 19 | 20 | mod acpi; 21 | mod allocator; 22 | mod arch; 23 | mod cpu; 24 | mod dpc; 25 | mod drivers; 26 | mod framebuffer; 27 | mod fs; 28 | mod mem; 29 | mod sched; 30 | mod syscall; 31 | mod timer; 32 | mod userland; 33 | 34 | use alloc::sync::Arc; 35 | use core::arch::{asm, naked_asm}; 36 | use core::panic::PanicInfo; 37 | use core::time::Duration; 38 | use fs::initramfs; 39 | use libxernel::sync::Spinlock; 40 | use limine::*; 41 | use x86_64::instructions::interrupts; 42 | 43 | use arch::amd64::gdt; 44 | 45 | use x86_64::structures::paging::Page; 46 | use x86_64::structures::paging::PageTableFlags; 47 | use x86_64::structures::paging::Size2MiB; 48 | use x86_64::VirtAddr; 49 | 50 | use crate::acpi::hpet; 51 | use crate::arch::amd64; 52 | use crate::arch::amd64::apic; 53 | use crate::arch::amd64::hcf; 54 | use crate::cpu::wait_until_cpus_registered; 55 | use crate::cpu::CPU_COUNT; 56 | use crate::cpu::{current_cpu, register_cpu}; 57 | use crate::fs::vfs; 58 | use crate::fs::vfs::VFS; 59 | use crate::mem::frame::FRAME_ALLOCATOR; 60 | use crate::mem::paging::KERNEL_PAGE_MAPPER; 61 | use crate::sched::process::Process; 62 | use crate::sched::process::KERNEL_PROCESS; 63 | use crate::sched::scheduler::reschedule; 64 | use crate::sched::thread::Thread; 65 | use crate::timer::hardclock; 66 | use crate::timer::timer_event::TimerEvent; 67 | use crate::utils::backtrace; 68 | use crate::utils::rtc::Rtc; 69 | static BOOTLOADER_INFO: BootInfoRequest = BootInfoRequest::new(0); 70 | static SMP_REQUEST: SmpRequest = SmpRequest::new(0); 71 | 72 | #[panic_handler] 73 | fn panic(info: &PanicInfo) -> ! { 74 | // disable interrupts in panic handler to prevent getting scheduled again 75 | interrupts::disable(); 76 | 77 | // TODO: check which task paniced and kill it 78 | 79 | dbg!("Kernel PANIC !!!"); 80 | dbg!("panic info: {:#?}", info); 81 | 82 | // print the panic info 83 | // NOTE: this might panic again, but it is better than printing nothing 84 | error!("Kernel PANIC !!!"); 85 | error!("panic info: {:#?}", info); 86 | loop {} 87 | } 88 | 89 | // define the kernel's entry point function 90 | #[no_mangle] 91 | extern "C" fn kernel_main() -> ! { 92 | framebuffer::init(); 93 | info!("framebuffer initialized"); 94 | 95 | gdt::init(); 96 | info!("GDT loaded"); 97 | amd64::interrupts::init(); 98 | info!("IDT loaded"); 99 | amd64::interrupts::disable_pic(); 100 | 101 | mem::init(); 102 | 103 | acpi::init(); 104 | info!("acpi initialized"); 105 | 106 | backtrace::init(); 107 | info!("backtrace initialized"); 108 | 109 | hpet::init(); 110 | 111 | apic::init(); 112 | 113 | syscall::init(); 114 | 115 | vfs::init(); 116 | 117 | vfs::test(); 118 | 119 | initramfs::load_initramfs(); 120 | info!("initramfs loaded"); 121 | 122 | let bootloader_info = BOOTLOADER_INFO 123 | .get_response() 124 | .get() 125 | .expect("barebones: recieved no bootloader info"); 126 | 127 | info!( 128 | "bootloader: (name={:?}, version={:?})", 129 | bootloader_info.name.to_str().unwrap(), 130 | bootloader_info.version.to_str().unwrap() 131 | ); 132 | 133 | Rtc::read(); 134 | 135 | KERNEL_PROCESS.set_once(Arc::new(Spinlock::new(Process::new(None)))); 136 | 137 | let smp_response = SMP_REQUEST.get_response().get_mut().unwrap(); 138 | 139 | let bsp_lapic_id = smp_response.bsp_lapic_id; 140 | 141 | CPU_COUNT.set_once(smp_response.cpu_count as usize); 142 | 143 | register_cpu(); 144 | 145 | for cpu in smp_response.cpus().iter_mut() { 146 | if cpu.lapic_id != bsp_lapic_id { 147 | cpu.goto_address = arch::amd64::x86_64_ap_main; 148 | } 149 | } 150 | 151 | wait_until_cpus_registered(); 152 | 153 | timer::init(); 154 | info!("scheduler initialized"); 155 | 156 | let process = Arc::new(Spinlock::new(Process::new(Some(KERNEL_PROCESS.clone())))); 157 | 158 | let _user_task = Thread::new_user_thread(process.clone(), VirtAddr::new(0x200000)); 159 | 160 | let page = FRAME_ALLOCATOR.aquire().allocate_frame::().unwrap(); 161 | 162 | KERNEL_PAGE_MAPPER.aquire().map( 163 | page, 164 | Page::from_start_address(VirtAddr::new(0x200000)).unwrap(), 165 | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::PRESENT, 166 | true, 167 | ); 168 | 169 | let mut process = process.aquire(); 170 | let pm = process.get_page_table().as_mut().unwrap(); 171 | pm.map( 172 | page, 173 | Page::from_start_address(VirtAddr::new(0x200000)).unwrap(), 174 | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::PRESENT, 175 | true, 176 | ); 177 | 178 | process.unlock(); 179 | 180 | // unsafe { 181 | // let start_address_fn = test_userspace_fn as usize; 182 | 183 | // // the `test_userspace_fn` is very small and should fit in 512 bytes 184 | // for i in 0..512 { 185 | // let ptr = (0x200000 + i) as *mut u8; 186 | // let val = (start_address_fn + i) as *mut u8; 187 | 188 | // ptr.write_volatile(val.read_volatile()); 189 | // } 190 | // } 191 | 192 | let main_task = Thread::kernel_thread_from_fn(kmain_thread); 193 | 194 | // let kernel_task = Thread::kernel_thread_from_fn(task1); 195 | 196 | // let kernel_task2 = Thread::kernel_thread_from_fn(task2); 197 | 198 | current_cpu().enqueue_thread(Arc::new(main_task)); 199 | // current_cpu().enqueue_thread(Arc::new(kernel_task)); 200 | // current_cpu().enqueue_thread(Arc::new(kernel_task2)); 201 | 202 | userland::init(); 203 | info!("userland initialized"); 204 | 205 | let timekeeper = TimerEvent::new(hardclock, (), Duration::from_secs(1), false); 206 | 207 | current_cpu().enqueue_timer(timekeeper); 208 | 209 | let resched = TimerEvent::new(reschedule, (), Duration::from_millis(5), false); 210 | 211 | current_cpu().enqueue_timer(resched); 212 | 213 | amd64::interrupts::enable(); 214 | hcf(); 215 | } 216 | 217 | pub fn kmain_thread() { 218 | let mut var = 1; 219 | 220 | loop { 221 | for _ in 0..i16::MAX { 222 | unsafe { 223 | asm!("nop"); 224 | } 225 | } 226 | 227 | dbg!("hello from main {}", var); 228 | var += 1; 229 | } 230 | } 231 | 232 | #[naked] 233 | pub extern "C" fn test_userspace_fn() { 234 | //loop { 235 | unsafe { 236 | naked_asm!( 237 | "\ 238 | mov rax, 0 239 | mov rdi, 2 240 | mov rsi, 3 241 | mov rdx, 4 242 | syscall 243 | mov rax, 0 244 | " 245 | ); 246 | } 247 | //} 248 | } 249 | 250 | #[no_mangle] 251 | fn task1() { 252 | let mut var = 1; 253 | 254 | loop { 255 | for _ in 0..i16::MAX { 256 | unsafe { 257 | asm!("nop"); 258 | } 259 | } 260 | 261 | dbg!("hello from task1 {}", var); 262 | var += 1; 263 | } 264 | } 265 | 266 | fn task2() { 267 | let mut var = -1; 268 | 269 | loop { 270 | for _ in 0..i16::MAX { 271 | unsafe { 272 | asm!("nop"); 273 | } 274 | } 275 | 276 | dbg!("hello from task2 {}", var); 277 | var -= 1; 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /kernel/src/mem/frame.rs: -------------------------------------------------------------------------------- 1 | use core::ptr::NonNull; 2 | 3 | use crate::{allocator::buddy::BuddyAllocator, mem::HIGHER_HALF_OFFSET}; 4 | use libxernel::sync::{Once, Spinlock}; 5 | use limine::{MemmapEntry, MemmapRequest, MemoryMapEntryType, NonNullPtr}; 6 | use x86_64::{ 7 | structures::paging::{PageSize, PhysFrame}, 8 | PhysAddr, 9 | }; 10 | 11 | static MMAP_REQUEST: MemmapRequest = MemmapRequest::new(0); 12 | 13 | pub static MEMORY_MAP: Once<&'static [NonNullPtr]> = Once::new(); 14 | 15 | pub struct PhysFrameAllocator(BuddyAllocator<{ super::FRAME_SIZE as usize }, 12>); // maximum allocation size is 16mb 16 | 17 | pub static FRAME_ALLOCATOR: Spinlock = Spinlock::new(PhysFrameAllocator(BuddyAllocator::new())); 18 | 19 | impl PhysFrameAllocator { 20 | pub fn allocate_frame(&mut self) -> Option> { 21 | let order = self.0.order_for_size(P::SIZE as usize); 22 | 23 | let frame = self.0.allocate(order); 24 | let start_addr = frame.unwrap().as_ptr() as u64 - *HIGHER_HALF_OFFSET; 25 | let pframe = PhysFrame::from_start_address(PhysAddr::new(start_addr)); 26 | pframe.ok() 27 | } 28 | 29 | pub unsafe fn deallocate_frame(&mut self, frame: PhysFrame

) { 30 | let order = self.0.order_for_size(P::SIZE as usize); 31 | 32 | self.0 33 | .deallocate( 34 | NonNull::new((frame.start_address().as_u64() + *HIGHER_HALF_OFFSET) as *mut u8).unwrap(), 35 | order, 36 | ) 37 | .unwrap(); 38 | } 39 | } 40 | 41 | pub fn init() { 42 | let mut buddy = FRAME_ALLOCATOR.lock(); 43 | 44 | MEMORY_MAP.set_once( 45 | MMAP_REQUEST 46 | .get_response() 47 | .get() 48 | .expect("barebones: recieved no mmap") 49 | .memmap(), 50 | ); 51 | 52 | for entry in *MEMORY_MAP { 53 | if entry.typ == MemoryMapEntryType::Usable { 54 | unsafe { 55 | buddy 56 | .0 57 | .add_region( 58 | NonNull::new((entry.base + *HIGHER_HALF_OFFSET) as *mut u8).unwrap(), 59 | NonNull::new((entry.base + *HIGHER_HALF_OFFSET + entry.len) as *mut u8).unwrap(), 60 | ) 61 | .unwrap(); 62 | } 63 | } 64 | } 65 | 66 | dbg!("{}", buddy.0.stats); 67 | } 68 | -------------------------------------------------------------------------------- /kernel/src/mem/heap.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::GlobalAlloc; 2 | use core::ptr::NonNull; 3 | 4 | use libxernel::sync::Spinlock; 5 | use linked_list_allocator::Heap; 6 | use x86_64::structures::paging::{Page, PageSize, PageTableFlags, PhysFrame, Size2MiB}; 7 | use x86_64::VirtAddr; 8 | 9 | use crate::allocator::align_up; 10 | 11 | use super::HEAP_START_ADDR; 12 | use super::{frame::FRAME_ALLOCATOR, paging::KERNEL_PAGE_MAPPER}; 13 | 14 | // TODO: Replace heap by self written slab allocator 15 | static HEAP: Spinlock = Spinlock::new(Heap::empty()); 16 | 17 | const HEAP_INITIAL_PAGE_COUNT: u64 = 2; // 4 MiB 18 | 19 | struct Allocator; 20 | 21 | #[global_allocator] 22 | static ALLOCATOR: Allocator = Allocator; 23 | 24 | unsafe impl GlobalAlloc for Allocator { 25 | unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { 26 | let mut heap = HEAP.lock(); 27 | 28 | if let Ok(ptr) = heap.allocate_first_fit(layout) { 29 | ptr.as_ptr() 30 | } else { 31 | // expand heap 32 | let expansion_size = align_up(layout.size(), Size2MiB::SIZE as usize); 33 | 34 | info!("expanding heap by {} MiB", expansion_size / 1024 / 1024); 35 | 36 | let current_top = align_up(heap.top() as usize, Size2MiB::SIZE as usize); 37 | 38 | for start_address in (current_top..current_top + expansion_size).step_by(Size2MiB::SIZE as usize) { 39 | let page = { 40 | let mut allocator = FRAME_ALLOCATOR.lock(); 41 | allocator.allocate_frame::().unwrap() 42 | }; 43 | 44 | KERNEL_PAGE_MAPPER.lock().map::( 45 | PhysFrame::containing_address(page.start_address()), 46 | Page::containing_address(VirtAddr::new(start_address as u64)), 47 | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE | PageTableFlags::PRESENT, 48 | true, 49 | ); 50 | 51 | heap.extend(Size2MiB::SIZE as usize); 52 | } 53 | 54 | // try to allocate again 55 | heap.allocate_first_fit(layout) 56 | .expect("heap allocation failed after expansion") 57 | .as_ptr() 58 | } 59 | } 60 | 61 | unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { 62 | if ptr.is_null() { 63 | return; 64 | } 65 | 66 | let mut heap = HEAP.lock(); 67 | 68 | heap.deallocate(NonNull::new(ptr).unwrap(), layout); 69 | } 70 | } 71 | 72 | pub fn init() { 73 | let mut heap = HEAP.lock(); 74 | let mut page_mapper = KERNEL_PAGE_MAPPER.lock(); 75 | 76 | for start_address in (HEAP_START_ADDR..HEAP_START_ADDR + (HEAP_INITIAL_PAGE_COUNT * Size2MiB::SIZE) as usize) 77 | .step_by(Size2MiB::SIZE as usize) 78 | { 79 | let page = { 80 | let mut allocator = FRAME_ALLOCATOR.lock(); 81 | allocator.allocate_frame::().unwrap() 82 | }; 83 | 84 | page_mapper.map::( 85 | PhysFrame::containing_address(page.start_address()), 86 | Page::containing_address(VirtAddr::new(start_address as u64)), 87 | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE | PageTableFlags::PRESENT, 88 | true, 89 | ); 90 | } 91 | 92 | unsafe { 93 | heap.init( 94 | HEAP_START_ADDR as *mut u8, 95 | (HEAP_INITIAL_PAGE_COUNT * Size2MiB::SIZE) as usize, 96 | ); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /kernel/src/mem/mmap.rs: -------------------------------------------------------------------------------- 1 | use libxernel::syscall::{MapFlags, ProtectionFlags, SyscallError}; 2 | use x86_64::{ 3 | structures::{ 4 | idt::PageFaultErrorCode, 5 | paging::{Page, PageSize, Size4KiB}, 6 | }, 7 | VirtAddr, 8 | }; 9 | 10 | use crate::{allocator::align_up, cpu::current_process}; 11 | 12 | use super::{frame::FRAME_ALLOCATOR, vm::ptflags_from_protflags}; 13 | 14 | #[allow(unused_variables)] 15 | pub fn mmap( 16 | addr: usize, 17 | len: usize, 18 | prot: usize, 19 | flags: usize, 20 | fd: usize, 21 | offset: usize, 22 | ) -> Result { 23 | let addr = VirtAddr::new(addr as u64); 24 | let prot = ProtectionFlags::from_bits(prot as u8).ok_or(SyscallError::InvalidArgument)?; 25 | let flags = MapFlags::from_bits(flags as u8).ok_or(SyscallError::InvalidArgument)?; 26 | let len = align_up(len, Size4KiB::SIZE as usize); 27 | 28 | let process = current_process(); 29 | let mut process = process.lock(); 30 | 31 | match flags { 32 | MapFlags::ANONYMOUS => { 33 | let start_address = process.vm().create_entry_at(addr, len, prot, flags); 34 | 35 | Ok(start_address.as_u64() as isize) 36 | } 37 | _ => todo!("mmap: implement MAP_SHARED and MAP_PRIVATE"), 38 | } 39 | } 40 | 41 | /// Handles a page fault and returns whether the fault was handled successfully 42 | pub fn handle_page_fault(addr: VirtAddr, error_code: PageFaultErrorCode) -> bool { 43 | let process = current_process(); 44 | let mut process = process.lock(); 45 | 46 | let vm_entry = process.vm().get_entry_from_address(addr); 47 | 48 | if let Some(vm_entry) = vm_entry { 49 | if vm_entry.flags != MapFlags::ANONYMOUS { 50 | todo!("handle_page_fault: implement non-anonymous mappings"); 51 | } 52 | 53 | // If the page is present we don't need to map it 54 | // FIXME: this doesn't work when COW is implemented 55 | if error_code.contains(PageFaultErrorCode::PROTECTION_VIOLATION) { 56 | return false; 57 | } 58 | 59 | let base_addr = addr.align_down(Size4KiB::SIZE); 60 | let frame = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); 61 | 62 | let pt_flags = ptflags_from_protflags(vm_entry.prot, process.page_table.is_some()); 63 | let pt = process.get_page_table().as_mut().unwrap(); 64 | 65 | pt.map::(frame, Page::from_start_address(base_addr).unwrap(), pt_flags, true); 66 | 67 | true 68 | } else { 69 | false 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /kernel/src/mem/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod frame; 2 | pub mod heap; 3 | pub mod mmap; 4 | pub mod paging; 5 | pub mod vm; 6 | 7 | use libxernel::sync::Once; 8 | use limine::HhdmRequest; 9 | 10 | static HHDM_REQUEST: HhdmRequest = HhdmRequest::new(0); 11 | 12 | pub static HIGHER_HALF_OFFSET: Once = Once::new(); 13 | 14 | pub const KERNEL_OFFSET: u64 = 0xffff_ffff_8000_0000; 15 | pub const HEAP_START_ADDR: usize = 0xffff_9000_0000_0000; 16 | 17 | // NOTE: stack grows down 18 | pub const KERNEL_THREAD_STACK_TOP: u64 = 0xffff_a000_0000_0000; 19 | 20 | pub const PROCESS_START: u64 = 0x0000_0000_0040_0000; 21 | pub const PROCESS_END: u64 = 0x0000_7fff_ffff_f000; 22 | 23 | pub const STACK_SIZE: u64 = 0x40000; 24 | pub const FRAME_SIZE: u64 = 4096; 25 | 26 | pub fn init() { 27 | HIGHER_HALF_OFFSET.set_once(HHDM_REQUEST.get_response().get().unwrap().offset); 28 | 29 | frame::init(); 30 | info!("pm initialized"); 31 | 32 | paging::init(); 33 | info!("vm initialized"); 34 | 35 | heap::init(); 36 | info!("heap initialized"); 37 | } 38 | -------------------------------------------------------------------------------- /kernel/src/mem/vm.rs: -------------------------------------------------------------------------------- 1 | use alloc::collections::BTreeMap; 2 | use libxernel::syscall::{MapFlags, ProtectionFlags}; 3 | use x86_64::structures::paging::{PageTableFlags, PhysFrame}; 4 | use x86_64::{ 5 | structures::paging::{PageSize, Size4KiB}, 6 | VirtAddr, 7 | }; 8 | 9 | use crate::cpu::current_process; 10 | use crate::mem::PROCESS_END; 11 | 12 | use super::frame::FRAME_ALLOCATOR; 13 | use super::{PROCESS_START, STACK_SIZE}; 14 | 15 | pub struct VmEntry { 16 | pub start: VirtAddr, 17 | pub length: usize, 18 | pub prot: ProtectionFlags, 19 | pub flags: MapFlags, 20 | // TODO: add something to represent to which file this entry belongs to 21 | file: Option<()>, 22 | } 23 | 24 | impl VmEntry { 25 | pub fn end(&self) -> VirtAddr { 26 | self.start + self.length as u64 27 | } 28 | 29 | pub fn unmap(&self) { 30 | let process = current_process(); 31 | let mut process = process.lock(); 32 | 33 | // SAFETY: only userspace processes should have Vm mappings 34 | let page_mapper = process.get_page_table().as_mut().unwrap(); 35 | let mut frame_allocator = FRAME_ALLOCATOR.lock(); 36 | 37 | for page in (self.start..self.end()).step_by(Size4KiB::SIZE as usize) { 38 | if let Some(phys_addr) = page_mapper.translate(page) { 39 | unsafe { 40 | frame_allocator.deallocate_frame(PhysFrame::::containing_address(phys_addr)); 41 | } 42 | } 43 | 44 | page_mapper.unmap(page); 45 | } 46 | } 47 | } 48 | 49 | pub struct Vm { 50 | entries: BTreeMap, 51 | } 52 | 53 | impl Vm { 54 | pub const fn new() -> Self { 55 | Self { 56 | entries: BTreeMap::new(), 57 | } 58 | } 59 | 60 | fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) { 61 | let entry = VmEntry { 62 | start, 63 | length, 64 | prot, 65 | flags, 66 | file: None, 67 | }; 68 | 69 | self.entries.insert(start, entry); 70 | } 71 | 72 | pub fn is_available(&self, start: VirtAddr, length: usize) -> bool { 73 | let start = start.as_u64(); 74 | 75 | !self.entries.iter().any(|(_, entry)| { 76 | entry.start.as_u64() < start && entry.end().as_u64() + Size4KiB::SIZE > start 77 | || start + length as u64 + Size4KiB::SIZE > entry.start.as_u64() 78 | && (start + length as u64 + Size4KiB::SIZE) < entry.end().as_u64() + Size4KiB::SIZE 79 | }) 80 | } 81 | 82 | pub fn create_entry_low(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr { 83 | self.create_entry_at(VirtAddr::new(PROCESS_START), length, prot, flags) 84 | } 85 | 86 | pub fn create_entry_high(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr { 87 | let mut start_address = VirtAddr::new(PROCESS_END - length as u64); 88 | 89 | loop { 90 | if self.is_available(start_address, length) { 91 | if start_address.as_u64() < PROCESS_START { 92 | panic!( 93 | "create_entry_high: {:x}(length = {}) is out of bounds", 94 | start_address, length 95 | ); 96 | } 97 | 98 | self.add_entry(start_address, length, prot, flags); 99 | return start_address; 100 | } 101 | 102 | // NOTE: at the moment only a stack should be create at the high end of the process address space 103 | start_address -= STACK_SIZE; 104 | } 105 | } 106 | 107 | /// A new entry is created at the given address or higher 108 | pub fn create_entry_at( 109 | &mut self, 110 | mut start: VirtAddr, 111 | length: usize, 112 | prot: ProtectionFlags, 113 | flags: MapFlags, 114 | ) -> VirtAddr { 115 | if start.as_u64() + length as u64 > PROCESS_END { 116 | panic!("create_entry_at: {:x}(length = {}) is out of bounds", start, length); 117 | } 118 | 119 | if !start.is_aligned(Size4KiB::SIZE) { 120 | panic!("create_entry_at: {:x} is not aligned", start); 121 | } 122 | 123 | if start.as_u64() < PROCESS_START { 124 | start = VirtAddr::new(PROCESS_START); 125 | } 126 | 127 | if self.is_available(start, length) { 128 | self.add_entry(start, length, prot, flags); 129 | return start; 130 | } 131 | 132 | let mut values_iter = self.entries.values(); 133 | let mut previous = values_iter.next().unwrap(); 134 | let current = values_iter.next(); 135 | 136 | if current.is_none() { 137 | let new_start = previous.end() + Size4KiB::SIZE; 138 | let new_start = new_start.align_up(Size4KiB::SIZE); 139 | 140 | self.add_entry(new_start, length, prot, flags); 141 | return new_start; 142 | } 143 | 144 | let mut current = current.unwrap(); 145 | 146 | loop { 147 | if current.start - previous.end() >= length as u64 + 2 * Size4KiB::SIZE { 148 | let new_start = previous.end() + Size4KiB::SIZE; 149 | let new_start = new_start.align_up(Size4KiB::SIZE); 150 | 151 | self.add_entry(new_start, length, prot, flags); 152 | return new_start; 153 | } 154 | 155 | previous = current; 156 | let current_opt = values_iter.next(); 157 | 158 | if current_opt.is_none() { 159 | let new_start = previous.end() + Size4KiB::SIZE; 160 | let new_start = new_start.align_up(Size4KiB::SIZE); 161 | 162 | if new_start.as_u64() + length as u64 > PROCESS_END { 163 | panic!( 164 | "create_entry_at: {:x}(length = {}) is out of bounds! Vm space is exhausted", 165 | new_start, length 166 | ); 167 | } 168 | 169 | self.add_entry(new_start, length, prot, flags); 170 | return new_start; 171 | } 172 | 173 | current = current_opt.unwrap(); 174 | } 175 | } 176 | 177 | pub fn get_entry_from_address(&self, addr: VirtAddr) -> Option<&VmEntry> { 178 | self.entries 179 | .iter() 180 | .find(|(_, entry)| entry.start <= addr && entry.end() > addr) 181 | .map(|(_, entry)| entry) 182 | } 183 | 184 | pub fn clean_up(&mut self) { 185 | self.entries.values().for_each(|value| value.unmap()); 186 | self.entries.clear(); 187 | } 188 | } 189 | 190 | pub fn ptflags_from_protflags(flags: ProtectionFlags, user_accessible: bool) -> PageTableFlags { 191 | let mut new_flags = PageTableFlags::PRESENT; 192 | 193 | if user_accessible { 194 | new_flags |= PageTableFlags::USER_ACCESSIBLE; 195 | } 196 | 197 | if !flags.contains(ProtectionFlags::READ) { 198 | // NOTE: it is not possible to remove read access from a page 199 | } 200 | 201 | if flags.contains(ProtectionFlags::WRITE) { 202 | new_flags |= PageTableFlags::WRITABLE; 203 | } 204 | 205 | if !flags.contains(ProtectionFlags::EXECUTE) { 206 | new_flags |= PageTableFlags::NO_EXECUTE; 207 | } 208 | 209 | new_flags 210 | } 211 | 212 | pub fn protflags_from_ptflags(flags: PageTableFlags) -> ProtectionFlags { 213 | let mut new_flags = ProtectionFlags::empty(); 214 | 215 | if flags.contains(PageTableFlags::WRITABLE) { 216 | new_flags |= ProtectionFlags::WRITE; 217 | } 218 | 219 | if !flags.contains(PageTableFlags::NO_EXECUTE) { 220 | new_flags |= ProtectionFlags::EXECUTE; 221 | } 222 | 223 | new_flags 224 | } 225 | -------------------------------------------------------------------------------- /kernel/src/sched/context.rs: -------------------------------------------------------------------------------- 1 | use core::arch::naked_asm; 2 | 3 | #[derive(Debug, Clone, Copy, Default)] 4 | #[repr(C)] 5 | pub struct Context { 6 | pub rbx: u64, 7 | pub rbp: u64, 8 | 9 | pub r12: u64, 10 | pub r13: u64, 11 | pub r14: u64, 12 | pub r15: u64, 13 | 14 | pub rip: u64, 15 | } 16 | 17 | impl Context { 18 | pub const fn new() -> Self { 19 | Self { 20 | r15: 0, 21 | r14: 0, 22 | r13: 0, 23 | r12: 0, 24 | rbx: 0, 25 | rbp: 0, 26 | rip: 0, 27 | } 28 | } 29 | } 30 | 31 | #[derive(Debug, Clone, Copy, Default)] 32 | #[repr(C)] 33 | pub struct TrapFrame { 34 | pub rbp: u64, 35 | pub rax: u64, 36 | pub rbx: u64, 37 | pub rcx: u64, 38 | pub rdx: u64, 39 | pub rsi: u64, 40 | pub rdi: u64, 41 | pub r8: u64, 42 | pub r9: u64, 43 | pub r10: u64, 44 | pub r11: u64, 45 | pub r12: u64, 46 | pub r13: u64, 47 | pub r14: u64, 48 | pub r15: u64, 49 | pub error_code: u64, // might be fake 50 | pub rip: u64, 51 | pub cs: u64, 52 | pub rflags: u64, 53 | pub rsp: u64, 54 | pub ss: u64, 55 | } 56 | 57 | impl TrapFrame { 58 | /// Creates a new, zero-initialized context 59 | pub const fn new() -> Self { 60 | Self { 61 | rbp: 0, 62 | rax: 0, 63 | rbx: 0, 64 | rcx: 0, 65 | rdx: 0, 66 | rsi: 0, 67 | rdi: 0, 68 | r8: 0, 69 | r9: 0, 70 | r10: 0, 71 | r11: 0, 72 | r12: 0, 73 | r13: 0, 74 | r14: 0, 75 | r15: 0, 76 | error_code: 0, 77 | rip: 0, 78 | cs: 0, 79 | rflags: 0, 80 | rsp: 0, 81 | ss: 0, 82 | } 83 | } 84 | } 85 | 86 | // TODO: Maybe rework switching to new thread 87 | // TODO: Move to switch.S since platform dependant 88 | #[naked] 89 | /// Restores the gives TrapFrame and jumps to new RIP via iretq 90 | /// Is used to startup a new thread when it's first executed 91 | pub extern "C" fn thread_trampoline() -> ! { 92 | unsafe { 93 | naked_asm!( 94 | "mov rax, 0; 95 | mov cr8, rax; 96 | mov rsp, rbx; 97 | pop rbp; 98 | pop rax; 99 | pop rbx; 100 | pop rcx; 101 | pop rdx; 102 | pop rsi; 103 | pop rdi; 104 | pop r8; 105 | pop r9; 106 | pop r10; 107 | pop r11; 108 | pop r12; 109 | pop r13; 110 | pop r14; 111 | pop r15; 112 | add rsp, 0x8; 113 | iretq;" 114 | ) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /kernel/src/sched/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod context; 2 | pub mod process; 3 | pub mod scheduler; 4 | pub mod thread; 5 | -------------------------------------------------------------------------------- /kernel/src/sched/process.rs: -------------------------------------------------------------------------------- 1 | use alloc::sync::Weak; 2 | use core::sync::atomic::{AtomicUsize, Ordering}; 3 | use libxernel::syscall::{MapFlags, ProtectionFlags}; 4 | use x86_64::structures::paging::{Page, PageSize, PageTableFlags, Size4KiB}; 5 | use x86_64::{align_down, align_up, VirtAddr}; 6 | 7 | use crate::fs::file::File; 8 | use crate::fs::vnode::VNode; 9 | use crate::mem::frame::FRAME_ALLOCATOR; 10 | use crate::mem::vm::{protflags_from_ptflags, Vm}; 11 | use crate::mem::{HIGHER_HALF_OFFSET, KERNEL_THREAD_STACK_TOP, PROCESS_START, STACK_SIZE}; 12 | use crate::VFS; 13 | use alloc::collections::BTreeMap; 14 | use alloc::sync::Arc; 15 | use alloc::vec::Vec; 16 | 17 | use libxernel::sync::{Once, Spinlock}; 18 | 19 | use crate::mem::paging::{Pagemap, KERNEL_PAGE_MAPPER}; 20 | use crate::sched::thread::Thread; 21 | 22 | /// Ongoing counter for the ProcessID 23 | static PROCESS_ID_COUNTER: AtomicUsize = AtomicUsize::new(0); 24 | 25 | pub static KERNEL_PROCESS: Once>> = Once::new(); 26 | 27 | pub struct Process { 28 | pub pid: usize, 29 | /// A kernel process has no page table 30 | pub page_table: Option, 31 | pub parent: Weak>, 32 | pub children: Vec>>, 33 | pub threads: Vec>>, 34 | pub fds: BTreeMap, 35 | pub kernel_thread_stack_top: usize, 36 | pub thread_id_counter: usize, 37 | pub vm: Vm, 38 | pub cwd: Arc>, 39 | } 40 | 41 | impl Process { 42 | pub fn new(parent_process: Option>>) -> Self { 43 | let mut page_map = Pagemap::new(None); 44 | page_map.fill_with_kernel_entries(); 45 | 46 | let parent = match parent_process { 47 | Some(p) => Arc::downgrade(&p), 48 | None => Weak::new(), 49 | }; 50 | 51 | Self { 52 | pid: PROCESS_ID_COUNTER.fetch_add(1, Ordering::AcqRel), 53 | page_table: Some(page_map), 54 | parent, 55 | children: Vec::new(), 56 | threads: Vec::new(), 57 | fds: BTreeMap::new(), 58 | kernel_thread_stack_top: KERNEL_THREAD_STACK_TOP as usize, 59 | thread_id_counter: 0, 60 | vm: Vm::new(), 61 | cwd: VFS.lock().root_node(), 62 | } 63 | } 64 | 65 | pub fn new_kernel_stack(&mut self) -> usize { 66 | let stack_top = self.kernel_thread_stack_top; 67 | self.kernel_thread_stack_top -= STACK_SIZE as usize; 68 | let stack_bottom = self.kernel_thread_stack_top; 69 | 70 | // create guard page 71 | self.kernel_thread_stack_top -= Size4KiB::SIZE as usize; 72 | 73 | for addr in (stack_bottom..stack_top).step_by(Size4KiB::SIZE as usize) { 74 | let phys_page = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); 75 | let virt_page = Page::from_start_address(VirtAddr::new(addr as u64)).unwrap(); 76 | 77 | KERNEL_PAGE_MAPPER.lock().map( 78 | phys_page, 79 | virt_page, 80 | PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE, 81 | true, 82 | ); 83 | } 84 | 85 | stack_top 86 | } 87 | 88 | pub fn new_user_stack(&mut self) -> usize { 89 | let stack_bottom = self 90 | .vm 91 | .create_entry_high( 92 | STACK_SIZE as usize, 93 | ProtectionFlags::READ | ProtectionFlags::WRITE, 94 | MapFlags::ANONYMOUS, 95 | ) 96 | .as_u64() as usize; 97 | let stack_top = STACK_SIZE as usize + stack_bottom; 98 | 99 | for addr in (stack_bottom..stack_top).step_by(Size4KiB::SIZE as usize) { 100 | let phys_page = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); 101 | let virt_page = Page::from_start_address(VirtAddr::new(addr as u64)).unwrap(); 102 | 103 | self.page_table.as_mut().unwrap().map( 104 | phys_page, 105 | virt_page, 106 | PageTableFlags::PRESENT 107 | | PageTableFlags::WRITABLE 108 | | PageTableFlags::USER_ACCESSIBLE 109 | | PageTableFlags::NO_EXECUTE, 110 | false, 111 | ); 112 | } 113 | 114 | stack_top 115 | } 116 | 117 | /// Load an ELF file into the process memory 118 | /// 119 | /// Returns the entry point of the ELF file 120 | pub fn load_elf(&mut self, elf_data: &[u8]) -> VirtAddr { 121 | let elf = elf::ElfBytes::::minimal_parse(elf_data).expect("Failed to parse ELF"); 122 | 123 | for ph in elf.segments().expect("Failed to get program headers") { 124 | if ph.p_type == elf::abi::PT_LOAD { 125 | let start = ph.p_vaddr + PROCESS_START; 126 | let end = start + ph.p_memsz; 127 | 128 | let page_start = align_down(start, Size4KiB::SIZE); 129 | let page_end = align_up(end, Size4KiB::SIZE); 130 | 131 | let mut flags = PageTableFlags::PRESENT | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::WRITABLE; 132 | 133 | if ph.p_flags & elf::abi::PF_X == 0 { 134 | // flags |= PageTableFlags::NO_EXECUTE; TODO: fix NO_EXECUTE in page mapper 135 | } 136 | if ph.p_flags & elf::abi::PF_W != 0 { 137 | flags |= PageTableFlags::WRITABLE; 138 | } 139 | 140 | for addr in (page_start..page_end).step_by(Size4KiB::SIZE as usize) { 141 | let phys_page = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); 142 | let virt_page = Page::from_start_address(VirtAddr::new(addr)).unwrap(); 143 | 144 | self.page_table 145 | .as_mut() 146 | .unwrap() 147 | .map(phys_page, virt_page, flags, false); 148 | 149 | // write data to the page 150 | let page_offset = if addr.overflowing_sub(start).1 { start - addr } else { 0 }; 151 | let data_len = Size4KiB::SIZE - page_offset; 152 | let segment_offset: u64 = addr + page_offset - start; 153 | 154 | let data = &elf_data 155 | [(ph.p_offset + segment_offset) as usize..(ph.p_offset + segment_offset + data_len) as usize]; 156 | 157 | unsafe { 158 | core::ptr::copy( 159 | data.as_ptr(), 160 | (phys_page.start_address().as_u64() + page_offset + *HIGHER_HALF_OFFSET) as *mut u8, 161 | data_len as usize, 162 | ); 163 | } 164 | } 165 | 166 | self.vm.create_entry_at( 167 | VirtAddr::new(page_start), 168 | (page_end - page_start) as usize, 169 | protflags_from_ptflags(flags), 170 | MapFlags::ANONYMOUS, 171 | ); 172 | } 173 | } 174 | 175 | VirtAddr::new(elf.ehdr.e_entry + PROCESS_START) 176 | } 177 | 178 | pub fn next_tid(&mut self) -> usize { 179 | let tid = self.thread_id_counter; 180 | self.thread_id_counter += 1; 181 | 182 | tid 183 | } 184 | 185 | pub fn append_fd(&mut self, file_handle: File) -> u32 { 186 | let mut counter = 0; 187 | 188 | let fd = loop { 189 | if let alloc::collections::btree_map::Entry::Vacant(e) = self.fds.entry(counter) { 190 | e.insert(file_handle); 191 | break counter; 192 | } 193 | 194 | counter += 1; 195 | }; 196 | 197 | fd as u32 198 | } 199 | 200 | pub fn get_filehandle_from_fd(&self, fd: usize) -> &File { 201 | let handle = self.fds.get(&fd).expect("Failed to get FileHandle for fd"); 202 | 203 | handle 204 | } 205 | 206 | pub fn get_page_table(&mut self) -> &mut Option { 207 | &mut self.page_table 208 | } 209 | 210 | pub fn vm(&mut self) -> &mut Vm { 211 | &mut self.vm 212 | } 213 | } 214 | 215 | impl Drop for Process { 216 | fn drop(&mut self) { 217 | self.vm.clean_up(); 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /kernel/src/sched/scheduler.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::amd64::gdt::GDT_BSP; 2 | use crate::arch::amd64::switch_context; 3 | use crate::cpu::current_cpu; 4 | use crate::timer::timer_event::TimerEvent; 5 | use alloc::sync::Arc; 6 | use core::time::Duration; 7 | use x86_64::registers::control::Cr3; 8 | use x86_64::registers::segmentation::{Segment, DS}; 9 | 10 | use super::thread::{Thread, ThreadStatus}; 11 | 12 | pub fn reschedule(_: ()) { 13 | let cpu = current_cpu(); 14 | 15 | let next_ref = cpu.run_queue.aquire().pop_front(); 16 | 17 | let current_ref = cpu.current_thread.aquire().clone(); 18 | 19 | let old = if let Some(current_thread) = current_ref { 20 | current_thread.clone() 21 | } else { 22 | **cpu.current_thread.aquire() = Some(cpu.idle_thread.clone()); 23 | cpu.idle_thread.clone() 24 | }; 25 | 26 | let new = if let Some(next_thread) = next_ref { 27 | cpu.run_queue.aquire().push_back(next_thread.clone()); 28 | 29 | next_thread.clone() 30 | } else { 31 | cpu.idle_thread.clone() 32 | }; 33 | 34 | register_reschedule_event(new.priority.ms()); 35 | 36 | if Arc::ptr_eq(&old, &new) { 37 | return; 38 | } 39 | 40 | **cpu.next.aquire() = Some(new); 41 | } 42 | 43 | pub fn enqueue_thread(thread: Thread) { 44 | current_cpu().run_queue.aquire().push_back(Arc::new(thread)); 45 | } 46 | 47 | pub fn dequeue_thread(thread: Arc) -> Option> { 48 | let cpu = current_cpu(); 49 | 50 | let mut index_to_remove = 0; 51 | 52 | for (i, thrd) in cpu.run_queue.aquire().iter().enumerate() { 53 | if Arc::ptr_eq(&thread, thrd) { 54 | index_to_remove = i; 55 | break; 56 | } 57 | } 58 | 59 | let thread = cpu.run_queue.aquire().remove(index_to_remove); 60 | thread 61 | } 62 | 63 | pub fn switch_threads(old: Arc, new: Arc) { 64 | old.status.set(ThreadStatus::Ready); 65 | 66 | new.status.set(ThreadStatus::Running); 67 | 68 | if !new.is_kernel_thread() { 69 | unsafe { 70 | let process = new.process.upgrade().unwrap(); 71 | let mut process = process.lock(); 72 | 73 | // SAFETY: A user thread always has a page table 74 | let pt = process.get_page_table().as_mut().unwrap(); 75 | 76 | let cr3 = Cr3::read_raw(); 77 | 78 | let cr3 = cr3.0.start_address().as_u64() | cr3.1 as u64; 79 | 80 | if cr3 != pt.pml4().as_u64() { 81 | pt.load_pt(); 82 | } 83 | 84 | DS::set_reg(GDT_BSP.1.user_data_selector); 85 | 86 | current_cpu() 87 | .kernel_stack 88 | .set(new.kernel_stack.as_ref().unwrap().kernel_stack_top); 89 | } 90 | } 91 | 92 | **current_cpu().current_thread.aquire() = Some(new.clone()); 93 | 94 | unsafe { 95 | switch_context(old.context.get(), *new.context.get()); 96 | } 97 | } 98 | 99 | fn register_reschedule_event(millis: u64) { 100 | let event = TimerEvent::new(reschedule, (), Duration::from_millis(millis), false); 101 | 102 | let cpu = current_cpu(); 103 | 104 | cpu.enqueue_timer(event); 105 | } 106 | -------------------------------------------------------------------------------- /kernel/src/sched/thread.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use alloc::sync::Arc; 3 | use alloc::sync::Weak; 4 | use core::cell::{Cell, UnsafeCell}; 5 | use core::pin::Pin; 6 | 7 | use x86_64::VirtAddr; 8 | 9 | use libxernel::sync::Spinlock; 10 | use x86_64::structures::paging::{Page, PageSize, PhysFrame, Size4KiB}; 11 | 12 | use crate::mem::frame::FRAME_ALLOCATOR; 13 | use crate::mem::paging::KERNEL_PAGE_MAPPER; 14 | use crate::mem::STACK_SIZE; 15 | 16 | use super::context::thread_trampoline; 17 | use super::context::{Context, TrapFrame}; 18 | use super::process::{Process, KERNEL_PROCESS}; 19 | 20 | #[derive(Debug, Clone, PartialEq, Eq, Copy)] 21 | /// Current status of the thread 22 | pub enum ThreadStatus { 23 | Initial, 24 | Running, 25 | Ready, 26 | Sleeping, 27 | BlockingOnIo, 28 | // TODO: better name 29 | Done, 30 | } 31 | 32 | #[derive(Debug, Clone, Copy)] 33 | /// Priority level of the thread 34 | pub enum ThreadPriority { 35 | Low, 36 | Normal, 37 | High, 38 | } 39 | 40 | impl ThreadPriority { 41 | /// Get the number of ms the thread can run from the priority 42 | pub fn ms(&self) -> u64 { 43 | match *self { 44 | Self::Low => 20, 45 | Self::Normal => 35, 46 | Self::High => 50, 47 | } 48 | } 49 | } 50 | 51 | #[derive(Debug, Copy, Clone)] 52 | #[repr(C, packed)] 53 | pub struct KernelStack { 54 | pub user_space_stack: usize, 55 | pub kernel_stack_top: usize, 56 | } 57 | 58 | pub struct Thread { 59 | pub id: usize, 60 | pub process: Weak>, 61 | pub status: Cell, 62 | pub priority: ThreadPriority, 63 | pub context: UnsafeCell<*mut Context>, 64 | pub trap_frame: UnsafeCell<*mut TrapFrame>, 65 | // pub affinity 66 | pub thread_stack: usize, 67 | /// Only a user space thread has a kernel stack 68 | pub kernel_stack: Option>>, 69 | } 70 | 71 | unsafe impl Sync for Thread {} 72 | unsafe impl Send for Thread {} 73 | 74 | impl Thread { 75 | pub fn new_kernel_thread(entry_point: VirtAddr) -> Self { 76 | let thread_stack = KERNEL_PROCESS.lock().new_kernel_stack(); 77 | 78 | let mut trap_frame = TrapFrame::new(); 79 | 80 | trap_frame.ss = 0x10; // kernel stack segment 81 | trap_frame.cs = 0x8; // kernel code segment 82 | trap_frame.rip = entry_point.as_u64(); 83 | trap_frame.rflags = 0x202; 84 | 85 | let mut context = Context::new(); 86 | 87 | context.rip = thread_trampoline as u64; 88 | 89 | let (trap_ptr, ctx_ptr) = unsafe { Thread::setup_stack(thread_stack, trap_frame, context, false) }; 90 | 91 | let mut parent = KERNEL_PROCESS.lock(); 92 | 93 | let tid = parent.next_tid(); 94 | 95 | Self { 96 | id: tid, 97 | process: Arc::downgrade(&KERNEL_PROCESS), 98 | status: Cell::new(ThreadStatus::Initial), 99 | priority: ThreadPriority::Normal, 100 | context: UnsafeCell::new(ctx_ptr), 101 | trap_frame: UnsafeCell::new(trap_ptr), 102 | thread_stack, 103 | kernel_stack: None, 104 | } 105 | } 106 | 107 | pub fn kernel_thread_from_fn(entry: fn()) -> Self { 108 | let thread_stack = KERNEL_PROCESS.lock().new_kernel_stack(); 109 | 110 | let mut trap_frame = TrapFrame::new(); 111 | 112 | trap_frame.ss = 0x10; // kernel stack segment 113 | trap_frame.cs = 0x8; // kernel code segment 114 | trap_frame.rip = entry as u64; 115 | trap_frame.rflags = 0x202; 116 | 117 | let mut context = Context::new(); 118 | 119 | context.rip = thread_trampoline as u64; 120 | 121 | let (trap_ptr, ctx_ptr) = unsafe { Thread::setup_stack(thread_stack, trap_frame, context, false) }; 122 | 123 | let mut parent = KERNEL_PROCESS.lock(); 124 | 125 | let tid = parent.next_tid(); 126 | 127 | Self { 128 | id: tid, 129 | process: Arc::downgrade(&KERNEL_PROCESS), 130 | status: Cell::new(ThreadStatus::Initial), 131 | priority: ThreadPriority::Normal, 132 | trap_frame: UnsafeCell::new(trap_ptr), 133 | context: UnsafeCell::new(ctx_ptr), 134 | thread_stack, 135 | kernel_stack: None, 136 | } 137 | } 138 | 139 | pub fn new_user_thread(parent_process: Arc>, entry_point: VirtAddr) -> Self { 140 | let thread_stack = parent_process.lock().new_user_stack(); 141 | let kernel_stack_end = parent_process.lock().new_kernel_stack(); 142 | 143 | let mut trap_frame = TrapFrame::new(); 144 | 145 | trap_frame.ss = 0x2b; // user stack segment 146 | trap_frame.cs = 0x33; // user code segment 147 | trap_frame.rip = entry_point.as_u64(); 148 | trap_frame.rsp = thread_stack as u64; 149 | trap_frame.rflags = 0x202; 150 | 151 | let mut context = Context::new(); 152 | 153 | context.rip = thread_trampoline as u64; 154 | 155 | let mut parent = parent_process.lock(); 156 | 157 | let (trap_ptr, ctx_ptr) = unsafe { Thread::setup_stack(kernel_stack_end, trap_frame, context, true) }; 158 | 159 | Self { 160 | id: parent.next_tid(), 161 | thread_stack, 162 | process: Arc::downgrade(&parent_process), 163 | status: Cell::new(ThreadStatus::Initial), 164 | priority: ThreadPriority::Normal, 165 | trap_frame: UnsafeCell::new(trap_ptr), 166 | context: UnsafeCell::new(ctx_ptr), 167 | kernel_stack: Some(Box::pin(KernelStack { 168 | user_space_stack: 0, 169 | kernel_stack_top: kernel_stack_end - 27, 170 | })), 171 | } 172 | } 173 | 174 | unsafe fn setup_stack( 175 | stack: usize, 176 | trap_frame: TrapFrame, 177 | ctx: Context, 178 | is_user: bool, 179 | ) -> (*mut TrapFrame, *mut Context) { 180 | let ptr = (stack as *mut u64).offset(-1); 181 | 182 | let ctx_begin = -27; 183 | let frame_begin = -20; 184 | let end_of_combined_frame = -27; 185 | 186 | (ptr.offset(frame_begin) as *mut TrapFrame).write(trap_frame); 187 | 188 | if is_user { 189 | ptr.offset(frame_begin + 19).write(trap_frame.rsp); 190 | } else { 191 | ptr.offset(frame_begin + 19) 192 | .write(ptr.offset(end_of_combined_frame) as u64); 193 | } 194 | 195 | (ptr.offset(ctx_begin) as *mut Context).write(ctx); 196 | ptr.offset(ctx_begin).write(ptr.offset(frame_begin) as u64); 197 | 198 | ( 199 | ptr.offset(frame_begin) as *mut TrapFrame, 200 | ptr.offset(ctx_begin) as *mut Context, 201 | ) 202 | } 203 | 204 | pub fn idle_thread() -> Self { 205 | let thread_stack = KERNEL_PROCESS.lock().new_kernel_stack(); 206 | 207 | let mut parent = KERNEL_PROCESS.lock(); 208 | 209 | Self { 210 | id: parent.next_tid(), 211 | process: Arc::downgrade(&KERNEL_PROCESS), 212 | status: Cell::new(ThreadStatus::Ready), 213 | priority: ThreadPriority::Low, 214 | context: UnsafeCell::new(core::ptr::null_mut()), 215 | trap_frame: UnsafeCell::new(core::ptr::null_mut()), 216 | thread_stack, 217 | kernel_stack: None, 218 | } 219 | } 220 | 221 | pub fn set_priority(&mut self, priority: ThreadPriority) { 222 | self.priority = priority; 223 | } 224 | 225 | pub fn is_kernel_thread(&self) -> bool { 226 | unsafe { 227 | let trap_frame_ptr = self.trap_frame.get(); 228 | if !trap_frame_ptr.is_null() { 229 | let trap_frame_ref = *trap_frame_ptr; 230 | (*trap_frame_ref).cs == 0x8 && (*trap_frame_ref).ss == 0x10 231 | } else { 232 | false 233 | } 234 | } 235 | } 236 | 237 | pub fn get_process(&self) -> Option>> { 238 | self.process.upgrade() 239 | } 240 | } 241 | 242 | impl Drop for Thread { 243 | fn drop(&mut self) { 244 | if self.is_kernel_thread() { 245 | let mut page_mapper = KERNEL_PAGE_MAPPER.lock(); 246 | let mut frame_allocator = FRAME_ALLOCATOR.lock(); 247 | 248 | for addr in (self.thread_stack..self.thread_stack + STACK_SIZE as usize).step_by(Size4KiB::SIZE as usize) { 249 | unsafe { 250 | let page = Page::::from_start_address(VirtAddr::new(addr as u64)).unwrap(); 251 | let phys_addr = page_mapper.translate(page.start_address()).unwrap(); 252 | 253 | frame_allocator.deallocate_frame(PhysFrame::::containing_address(phys_addr)); 254 | page_mapper.unmap(page.start_address()); 255 | } 256 | } 257 | } 258 | } 259 | } 260 | -------------------------------------------------------------------------------- /kernel/src/syscall/mod.rs: -------------------------------------------------------------------------------- 1 | use alloc::string::{String, ToString}; 2 | use core::{ 3 | arch::naked_asm, 4 | ffi::{c_char, CStr}, 5 | }; 6 | use libxernel::syscall::{SyscallError, SYS_CLOSE, SYS_LOG, SYS_MMAP, SYS_OPEN, SYS_READ, SYS_WRITE}; 7 | use x86_64::{ 8 | registers::{ 9 | model_specific::{Efer, EferFlags, LStar, Star}, 10 | rflags::RFlags, 11 | }, 12 | VirtAddr, 13 | }; 14 | 15 | use crate::{ 16 | arch::amd64::gdt::GDT_BSP, 17 | fs::{self, vfs_syscalls}, 18 | mem::mmap::mmap, 19 | }; 20 | 21 | impl From for SyscallError { 22 | fn from(err: fs::Error) -> SyscallError { 23 | match err { 24 | fs::Error::VNodeNotFound => SyscallError::VNodeNotFound, 25 | fs::Error::NotADirectory => SyscallError::NotADirectory, 26 | fs::Error::IsADirectory => SyscallError::IsADirectory, 27 | fs::Error::NoSpace => SyscallError::NoSpace, 28 | fs::Error::NotEmpty => SyscallError::NotEmpty, 29 | fs::Error::EntryNotFound => SyscallError::EntryNotFound, 30 | fs::Error::MountPointNotFound => SyscallError::MountPointNotFound, 31 | fs::Error::FileSystemNotFound => SyscallError::FileSystemNotFound, 32 | } 33 | } 34 | } 35 | 36 | pub type Result = core::result::Result; 37 | 38 | pub fn init() { 39 | // set IA32_STAR 40 | Star::write( 41 | GDT_BSP.1.user_code_selector, 42 | GDT_BSP.1.user_data_selector, 43 | GDT_BSP.1.code_selector, 44 | GDT_BSP.1.data_selector, 45 | ) 46 | .unwrap(); 47 | 48 | // enable IA32_EFER 49 | unsafe { 50 | Efer::write(Efer::read() | EferFlags::SYSTEM_CALL_EXTENSIONS); 51 | } 52 | 53 | LStar::write(VirtAddr::new(asm_syscall_handler as u64)); 54 | 55 | // disable interrupts when syscall handler is called 56 | x86_64::registers::model_specific::SFMask::write(RFlags::INTERRUPT_FLAG); 57 | } 58 | 59 | #[derive(Debug)] 60 | #[repr(C)] 61 | struct SyscallData { 62 | syscall_number: usize, 63 | arg0: usize, 64 | arg1: usize, 65 | arg2: usize, 66 | arg3: usize, 67 | arg4: usize, 68 | arg5: usize, 69 | eflags: usize, 70 | return_address: usize, 71 | } 72 | 73 | /* 74 | * Register setup: 75 | * rax system call number 76 | * rdi arg0 77 | * rsi arg1 78 | * rdx arg2 79 | * r10 arg3 80 | * r8 arg4 81 | * r9 arg5 82 | * r11 eflags for syscall/sysret 83 | * rcx return address for syscall/sysret 84 | */ 85 | 86 | #[naked] 87 | unsafe extern "C" fn asm_syscall_handler() { 88 | naked_asm!( 89 | " 90 | swapgs # gs contains the stackpointer for this thread now 91 | 92 | mov gs:0, rsp # save the stackpointer for this task 93 | mov rsp, gs:8 # load the kernel stackpointer for this task 94 | 95 | swapgs # TODO: fix the kernel to not rely on the KERNEL_GS_BASE MSR containing the cpu_data 96 | 97 | # backup registers for sysretq 98 | push rbp 99 | push rbx # save callee-saved registers 100 | push r12 101 | push r13 102 | push r14 103 | push r15 104 | 105 | # save the syscall data 106 | push rcx 107 | push r11 108 | push r9 109 | push r8 110 | push r10 111 | push rdx 112 | push rsi 113 | push rdi 114 | push rax 115 | 116 | mov rdi, rsp # pass the SyscallData struct to the syscall handler 117 | 118 | sti # enable interrupts 119 | 120 | call general_syscall_handler 121 | 122 | cli # disable interrupts, interrupts are automatically re-enabled when the syscall handler returns 123 | 124 | # restore the syscall data 125 | pop rdi # we don't restore rax as it's the return value of the syscall 126 | pop rdi 127 | pop rsi 128 | pop rdx 129 | pop r10 130 | pop r8 131 | pop r9 132 | pop r11 133 | pop rcx 134 | 135 | pop r15 # restore callee-saved registers 136 | pop r14 137 | pop r13 138 | pop r12 139 | pop rbx 140 | pop rbp # restore stack and registers for sysretq 141 | 142 | swapgs # TODO: fix the kernel to not rely on the KERNEL_GS_BASE MSR containing the cpu_data 143 | 144 | mov rsp, gs:0 # load the stackpointer for this task 145 | 146 | swapgs 147 | sysretq 148 | " 149 | ); 150 | } 151 | 152 | fn syscall_arg_to_slice<'a, T>(ptr: usize, len: usize) -> &'a mut [T] { 153 | unsafe { core::slice::from_raw_parts_mut(ptr as *mut T, len) } 154 | } 155 | 156 | fn syscall_arg_to_reference<'a, T>(ptr: usize) -> &'a mut T { 157 | unsafe { &mut *(ptr as *mut T) } 158 | } 159 | 160 | fn syscall_arg_to_string(ptr: usize) -> Option { 161 | unsafe { 162 | CStr::from_ptr(ptr as *const c_char) 163 | .to_str() 164 | .ok() 165 | .map(|s| s.to_string()) 166 | } 167 | } 168 | 169 | #[no_mangle] 170 | extern "sysv64" fn general_syscall_handler(data: SyscallData) -> i64 { 171 | // println!("general_syscall_handler: {:#x?}", data); 172 | 173 | let result = match data.syscall_number { 174 | SYS_READ => vfs_syscalls::sys_read(data.arg0, syscall_arg_to_slice(data.arg1, data.arg2)), 175 | SYS_WRITE => vfs_syscalls::sys_write(data.arg0, syscall_arg_to_slice(data.arg1, data.arg2)), 176 | SYS_OPEN => { 177 | let path = syscall_arg_to_string(data.arg0); 178 | 179 | match path { 180 | Some(path) => vfs_syscalls::sys_open(path, data.arg1 as u64), 181 | None => Err(SyscallError::MalformedPath), 182 | } 183 | } 184 | SYS_CLOSE => vfs_syscalls::sys_close(data.arg0), 185 | SYS_MMAP => mmap(data.arg0, data.arg1, data.arg2, data.arg3, data.arg4, data.arg5), 186 | SYS_LOG => { 187 | let message = syscall_arg_to_string(data.arg0); 188 | 189 | match message { 190 | Some(message) => { 191 | info!("{}", message); 192 | Ok(0) 193 | } 194 | None => Err(SyscallError::InvalidArgument), 195 | } 196 | } 197 | _ => { 198 | unimplemented!("unknown syscall: {:x?}", data); 199 | } 200 | }; 201 | 202 | match result { 203 | Ok(value) => value as i64, 204 | Err(error) => error as i64, 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /kernel/src/timer/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod timer_event; 2 | pub mod timer_queue; 3 | 4 | use core::{ 5 | sync::atomic::{AtomicUsize, Ordering}, 6 | time::Duration, 7 | }; 8 | 9 | use crate::{arch::amd64::interrupts::allocate_vector, cpu::current_cpu}; 10 | 11 | use self::timer_event::TimerEvent; 12 | 13 | use crate::amd64::interrupts::register_handler; 14 | use crate::amd64::tsc; 15 | use crate::apic::APIC; 16 | use crate::sched::context::TrapFrame; 17 | use libxernel::{ipl::IPL, sync::Once}; 18 | 19 | static UPTIME: AtomicUsize = AtomicUsize::new(0); 20 | static TIMER_VECTOR: Once = Once::new(); 21 | 22 | pub fn init() { 23 | tsc::calibrate_tsc(); 24 | 25 | if let Some(vec) = allocate_vector(IPL::Clock) { 26 | TIMER_VECTOR.set_once(vec); 27 | } else { 28 | panic!("Could not allocate timer vector"); 29 | } 30 | 31 | register_handler(*TIMER_VECTOR, timer_interrupt_handler); 32 | } 33 | 34 | pub fn timer_interrupt_handler(_frame: &mut TrapFrame) { 35 | // if periodic, add again to queue 36 | // set timer to next event in queue 37 | 38 | let cpu = current_cpu(); 39 | 40 | let mut timer_queue = cpu.timer_queue.aquire_at(IPL::High); 41 | 42 | //timer_queue.deadlines(); 43 | 44 | timer_queue.event_dispatch(); 45 | 46 | let next_event = timer_queue.events.front(); 47 | 48 | if let Some(event) = next_event { 49 | APIC.oneshot(*TIMER_VECTOR, &event.deadline); 50 | 51 | // TODO: Find a way to clone event 52 | if event.periodic { 53 | //timer_queue.queue_event(event.clone()); 54 | } 55 | } else { 56 | // No event in event queue? 57 | } 58 | } 59 | 60 | pub fn hardclock(_: ()) { 61 | println!("hardclock event with uptime {:?}", UPTIME); 62 | UPTIME.fetch_add(1, Ordering::SeqCst); 63 | let event = TimerEvent::new(hardclock, (), Duration::from_secs(1), false); 64 | 65 | current_cpu().enqueue_timer(event); 66 | } 67 | -------------------------------------------------------------------------------- /kernel/src/timer/timer_event.rs: -------------------------------------------------------------------------------- 1 | use crate::dpc::{enqueue_dpc, Dpc, DpcCall}; 2 | use core::time::Duration; 3 | 4 | use crate::current_cpu; 5 | use alloc::boxed::Box; 6 | 7 | pub trait EventExecutor { 8 | fn dispatch(self); 9 | } 10 | 11 | enum EventState { 12 | Waiting, 13 | Running, 14 | } 15 | 16 | pub struct TimerEvent { 17 | dpc: Box, 18 | // nanosecs: usize, 19 | pub deadline: Duration, 20 | state: EventState, 21 | callback_core: u32, 22 | pub periodic: bool, 23 | } 24 | 25 | impl EventExecutor for TimerEvent { 26 | fn dispatch(self) { 27 | enqueue_dpc(self.dpc) 28 | } 29 | } 30 | 31 | impl TimerEvent { 32 | pub fn new(callback: fn(T), data: T, deadline: Duration, periodic: bool) -> Self { 33 | let dpc = Dpc::new(callback, data); 34 | Self { 35 | dpc: Box::new(dpc), 36 | deadline, 37 | state: EventState::Waiting, 38 | callback_core: current_cpu().lapic_id, 39 | periodic, 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /kernel/src/timer/timer_queue.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::amd64::apic::APIC; 2 | use crate::timer::timer_event::EventExecutor; 3 | use crate::timer::timer_event::TimerEvent; 4 | use crate::timer::TIMER_VECTOR; 5 | use alloc::collections::VecDeque; 6 | use alloc::vec::Vec; 7 | use core::time::Duration; 8 | 9 | pub struct TimerQueue { 10 | pub events: VecDeque, 11 | pub is_timer_set: bool, 12 | } 13 | 14 | impl TimerQueue { 15 | pub fn new() -> Self { 16 | Self { 17 | events: VecDeque::new(), 18 | is_timer_set: false, 19 | } 20 | } 21 | 22 | pub fn event_dispatch(&mut self) { 23 | let mut deadline = Duration::ZERO; 24 | 25 | if let Some(event) = self.events.pop_front() { 26 | deadline = event.deadline; 27 | event.dispatch(); 28 | } 29 | 30 | let mut indices_to_remove: Vec = Vec::new(); 31 | 32 | for (index, ev) in self.events.iter_mut().enumerate() { 33 | ev.deadline -= deadline; 34 | 35 | if ev.deadline.is_zero() { 36 | indices_to_remove.push(index); 37 | } 38 | } 39 | 40 | for &index in indices_to_remove.iter().rev() { 41 | if let Some(event) = self.events.remove(index) { 42 | event.dispatch(); 43 | } 44 | } 45 | } 46 | 47 | pub fn enqueue(&mut self, event: TimerEvent) { 48 | if self.events.is_empty() { 49 | APIC.oneshot(*TIMER_VECTOR, &event.deadline); 50 | self.events.push_front(event); 51 | } else { 52 | if event.deadline < self.events.front().unwrap().deadline { 53 | APIC.stop(); 54 | APIC.oneshot(*TIMER_VECTOR, &event.deadline); 55 | } 56 | 57 | let insert_index = self 58 | .events 59 | .iter() 60 | .position(|i| i.deadline >= event.deadline) 61 | .unwrap_or(self.events.len()); 62 | self.events.insert(insert_index, event); 63 | } 64 | } 65 | 66 | pub fn len(&self) -> usize { 67 | self.events.len() 68 | } 69 | 70 | pub fn deadlines(&self) { 71 | println!("==="); 72 | self.events 73 | .iter() 74 | .for_each(|i| println!("event deadline: {:?}", i.deadline)); 75 | 76 | if let Some(event) = self.events.front() { 77 | let first = event.deadline; 78 | if self.events.iter().all(|ev| ev.deadline == first) { 79 | println!("all have the same deadline"); 80 | } 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /kernel/src/userland.rs: -------------------------------------------------------------------------------- 1 | use alloc::sync::Arc; 2 | use libxernel::sync::Spinlock; 3 | 4 | use crate::{ 5 | cpu::current_cpu, 6 | fs::initramfs::initramfs_read, 7 | sched::{ 8 | process::{Process, KERNEL_PROCESS}, 9 | thread::Thread, 10 | }, 11 | }; 12 | 13 | pub fn init() { 14 | let init_elf = initramfs_read("init").expect("init process not found in initramfs"); 15 | let init_process = Arc::new(Spinlock::new(Process::new(Some(KERNEL_PROCESS.clone())))); 16 | KERNEL_PROCESS.lock().children.push(init_process.clone()); 17 | 18 | let entry_point = init_process.aquire().load_elf(&init_elf); 19 | 20 | dbg!("init process entry point: {:#x}", entry_point); 21 | 22 | let init_thread = Thread::new_user_thread(init_process.clone(), entry_point); 23 | current_cpu().enqueue_thread(Arc::new(init_thread)); 24 | } 25 | -------------------------------------------------------------------------------- /kernel/src/utils/backtrace.rs: -------------------------------------------------------------------------------- 1 | pub fn init() {} 2 | 3 | // TODO: print symbol names 4 | pub fn log_backtrace(initial_rbp: usize) { 5 | dbg!("==================== BACKTRACE ===================="); 6 | 7 | let mut rbp = initial_rbp; 8 | 9 | while rbp != 0 { 10 | if unsafe { *(rbp as *const usize) } == 0 { 11 | break; 12 | } 13 | 14 | let rip = unsafe { *(rbp as *const usize).offset(1) }; 15 | 16 | if rip == 0 { 17 | break; 18 | } 19 | 20 | let symbol = ""; // crate::symbols::get_symbol(rip); 21 | dbg!("0x{:x} {}", rip, symbol); 22 | 23 | rbp = unsafe { *(rbp as *const usize) }; 24 | } 25 | 26 | dbg!("=================================================="); 27 | } 28 | -------------------------------------------------------------------------------- /kernel/src/utils/defer.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! on_drop { 3 | ($name:ident, $t:expr) => {{ 4 | OnDrop::new($name, $t) 5 | }}; 6 | ($name:expr, $t:expr) => {{ 7 | OnDrop::new($name, $t) 8 | }}; 9 | } 10 | 11 | #[macro_export] 12 | macro_rules! defer { 13 | ($t:expr) => { 14 | let _guard = OnDrop::new((), $t); 15 | }; 16 | ($t:tt) => { 17 | let _guard = OnDrop::new((), || $t); 18 | }; 19 | } 20 | -------------------------------------------------------------------------------- /kernel/src/utils/limine_module.rs: -------------------------------------------------------------------------------- 1 | use limine::{File, ModuleRequest}; 2 | 3 | static MODULE_REQUEST: ModuleRequest = ModuleRequest::new(0); 4 | 5 | pub fn get_limine_module(name: &str) -> Option<&File> { 6 | let modules = MODULE_REQUEST.get_response().get().unwrap().modules(); 7 | 8 | for m in modules { 9 | // NOTE: the cmdline is wrapped in quotes, so we need to remove them 10 | let cmdline = m.cmdline.to_str().unwrap().to_str().unwrap(); 11 | let mut cmd_chars = cmdline.chars(); 12 | cmd_chars.next(); 13 | cmd_chars.next_back(); 14 | 15 | let cmdline_name = cmd_chars.as_str(); 16 | 17 | if cmdline_name == name { 18 | return Some(unsafe { &*m.as_ptr() }); 19 | } 20 | } 21 | 22 | None 23 | } 24 | -------------------------------------------------------------------------------- /kernel/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod backtrace; 2 | pub mod defer; 3 | pub mod limine_module; 4 | pub mod rtc; 5 | -------------------------------------------------------------------------------- /kernel/src/utils/rtc.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::amd64::ports::{inb, outb}; 2 | use core::arch::asm; 3 | const CMOSAddress: u16 = 0x70; 4 | const CMOSData: u16 = 0x71; 5 | 6 | pub struct Rtc; 7 | 8 | impl Rtc { 9 | pub fn read() { 10 | let status: u8 = Rtc::read_cmos(0x0b); 11 | 12 | let bcd: bool = !(status & 0x04) > 0; 13 | 14 | while Rtc::read_cmos(0x0A) & 0x80 > 0 { 15 | unsafe { 16 | asm!("pause"); 17 | } 18 | } 19 | 20 | let second = Rtc::decode(Rtc::read_cmos(0x0), bcd); 21 | let minute = Rtc::decode(Rtc::read_cmos(0x02), bcd); 22 | let hour = Rtc::decode(Rtc::read_cmos(0x04), bcd); 23 | let day = Rtc::decode(Rtc::read_cmos(0x07), bcd); 24 | let month = Rtc::decode(Rtc::read_cmos(0x08), bcd); 25 | let year = Rtc::decode(Rtc::read_cmos(0x09), bcd) + 2000; 26 | 27 | println!( 28 | "Booted at: {}-{}-{} {}:{}:{} GMT", 29 | year, month, day, hour, minute, second 30 | ); 31 | } 32 | 33 | fn decode(value: u8, bcd: bool) -> i64 { 34 | if bcd { 35 | ((value & 0x0f) + ((value / 16) * 10)).into() 36 | } else { 37 | value as i64 38 | } 39 | } 40 | 41 | fn read_cmos(reg: u8) -> u8 { 42 | unsafe { 43 | outb(CMOSAddress, reg); 44 | inb(CMOSData) 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /kernel/src/writer.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::fmt::Write; 3 | 4 | use libxernel::sync::SpinlockIRQ; 5 | 6 | use crate::framebuffer::FRAMEBUFFER; 7 | 8 | struct Writer; 9 | 10 | static WRITER: SpinlockIRQ = SpinlockIRQ::new(Writer); 11 | 12 | impl core::fmt::Write for Writer { 13 | fn write_str(&mut self, s: &str) -> fmt::Result { 14 | let mut fb = FRAMEBUFFER.lock(); 15 | fb.puts(s); 16 | Ok(()) 17 | } 18 | } 19 | 20 | #[macro_export] 21 | macro_rules! println { 22 | ($($arg:tt)*) => ($crate::writer::_println(format_args!($($arg)*))); 23 | } 24 | 25 | #[macro_export] 26 | macro_rules! debug { 27 | ($($arg:tt)*) => ($crate::writer::_log_print(format_args!($($arg)*), "DEBUG", 0x03, 0xe8, 0xfc)); 28 | } 29 | 30 | #[macro_export] 31 | macro_rules! info { 32 | ($($arg:tt)*) => ($crate::writer::_log_print(format_args!($($arg)*), "INFO", 0x03, 0xfc, 0x0b)); 33 | } 34 | 35 | #[macro_export] 36 | macro_rules! error { 37 | ($($arg:tt)*) => ($crate::writer::_log_print(format_args!($($arg)*), "ERROR", 0xfc, 0x03, 0x0f)); 38 | } 39 | 40 | #[macro_export] 41 | macro_rules! warning { 42 | ($($arg:tt)*) => ($crate::writer::_log_print(format_args!($($arg)*), "WARNING", 0xfc, 0xca, 0x03)); 43 | } 44 | 45 | #[macro_export] 46 | macro_rules! print { 47 | ($($arg:tt)*) => ($crate::writer::_print(format_args!($($arg)*))); 48 | } 49 | 50 | #[macro_export] 51 | macro_rules! log { 52 | () => { $crate::writer::_log_print(format_args!(""), file!(), 0xFF, 0xFF, 0xFF) }; 53 | ($($arg:tt)*) => { $crate::writer::_log_print(format_args!($($arg)*), file!(), 0xFF, 0xFF, 0xFF) }; 54 | } 55 | 56 | #[doc(hidden)] 57 | pub fn _print(args: fmt::Arguments) { 58 | let mut writer = WRITER.lock(); 59 | // UNWRAP: We always return `Ok(())` inside `write_str` so this is unreachable. 60 | writer.write_fmt(args).unwrap(); 61 | } 62 | 63 | #[doc(hidden)] 64 | pub fn _println(args: fmt::Arguments) { 65 | let mut writer = WRITER.lock(); 66 | // UNWRAP: We always return `Ok(())` inside `write_str` so this is unreachable. 67 | writer.write_fmt(args).unwrap(); 68 | writer.write_char('\n').unwrap(); 69 | } 70 | 71 | #[doc(hidden)] 72 | pub fn _log_print(args: fmt::Arguments, level: &str, r: u8, g: u8, b: u8) { 73 | // UNWRAP: We always return `Ok(())` inside `write_str` so this is unreachable. 74 | let mut writer = WRITER.lock(); 75 | 76 | writer.write_char('[').unwrap(); 77 | 78 | FRAMEBUFFER.lock().set_color(r, g, b); 79 | writer.write_str(level).unwrap(); 80 | FRAMEBUFFER.lock().set_color(0xff, 0xff, 0xff); 81 | writer.write_str("] ").unwrap(); 82 | writer.write_fmt(args).unwrap(); 83 | 84 | writer.write_char('\n').unwrap(); 85 | } 86 | -------------------------------------------------------------------------------- /kernel/uefi-edk2/License.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019, TianoCore and contributors. All rights reserved. 2 | 3 | SPDX-License-Identifier: BSD-2-Clause-Patent 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, 9 | this list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | Subject to the terms and conditions of this license, each copyright holder 16 | and contributor hereby grants to those receiving rights under this license 17 | a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable 18 | (except for failure to satisfy the conditions of this license) patent 19 | license to make, have made, use, offer to sell, sell, import, and otherwise 20 | transfer this software, where such license applies only to those patent 21 | claims, already acquired or hereafter acquired, licensable by such copyright 22 | holder or contributor that are necessarily infringed by: 23 | 24 | (a) their Contribution(s) (the licensed copyrights of copyright holders and 25 | non-copyrightable additions of contributors, in source or binary form) 26 | alone; or 27 | 28 | (b) combination of their Contribution(s) with the work of authorship to 29 | which such Contribution(s) was added by such copyright holder or 30 | contributor, if, at the time the Contribution is added, such addition 31 | causes such combination to be necessarily infringed. The patent license 32 | shall not apply to any other combinations which include the 33 | Contribution. 34 | 35 | Except as expressly stated above, no rights or licenses from any copyright 36 | holder or contributor is granted under this license, whether expressly, by 37 | implication, estoppel or otherwise. 38 | 39 | DISCLAIMER 40 | 41 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 42 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 43 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 44 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE 45 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 46 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 47 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 48 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 49 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 50 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 51 | POSSIBILITY OF SUCH DAMAGE. 52 | -------------------------------------------------------------------------------- /kernel/uefi-edk2/OVMF.fd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anubis-rs/xernel/abf71c1983b5819dd4471c2c9a492b11b3a594b9/kernel/uefi-edk2/OVMF.fd -------------------------------------------------------------------------------- /kernel/uefi-edk2/OvmfPkg.License.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved. 2 | 3 | SPDX-License-Identifier: BSD-2-Clause-Patent 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, 9 | this list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | Subject to the terms and conditions of this license, each copyright holder 16 | and contributor hereby grants to those receiving rights under this license 17 | a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable 18 | (except for failure to satisfy the conditions of this license) patent 19 | license to make, have made, use, offer to sell, sell, import, and otherwise 20 | transfer this software, where such license applies only to those patent 21 | claims, already acquired or hereafter acquired, licensable by such copyright 22 | holder or contributor that are necessarily infringed by: 23 | 24 | (a) their Contribution(s) (the licensed copyrights of copyright holders and 25 | non-copyrightable additions of contributors, in source or binary form) 26 | alone; or 27 | 28 | (b) combination of their Contribution(s) with the work of authorship to 29 | which such Contribution(s) was added by such copyright holder or 30 | contributor, if, at the time the Contribution is added, such addition 31 | causes such combination to be necessarily infringed. The patent license 32 | shall not apply to any other combinations which include the 33 | Contribution. 34 | 35 | Except as expressly stated above, no rights or licenses from any copyright 36 | holder or contributor is granted under this license, whether expressly, by 37 | implication, estoppel or otherwise. 38 | 39 | DISCLAIMER 40 | 41 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 42 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 43 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 44 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE 45 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 46 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 47 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 48 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 49 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 50 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 51 | POSSIBILITY OF SUCH DAMAGE. 52 | 53 | ================================================================================ 54 | 55 | Some files are subject to the following license, the MIT license. Those files 56 | are located in: 57 | - OvmfPkg/Include/IndustryStandard/Xen/ 58 | - OvmfPkg/XenBusDxe/ 59 | 60 | SPDX-License-Identifier: MIT 61 | 62 | Permission is hereby granted, free of charge, to any person obtaining a copy 63 | of this software and associated documentation files (the "Software"), to deal 64 | in the Software without restriction, including without limitation the rights 65 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 66 | copies of the Software, and to permit persons to whom the Software is 67 | furnished to do so, subject to the following conditions: 68 | 69 | The above copyright notice and this permission notice (including the next 70 | paragraph) shall be included in all copies or substantial portions of the 71 | Software. 72 | 73 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 74 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 75 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 76 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 77 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 78 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 79 | THE SOFTWARE. 80 | -------------------------------------------------------------------------------- /logo.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anubis-rs/xernel/abf71c1983b5819dd4471c2c9a492b11b3a594b9/logo.bmp -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly 2 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 120 -------------------------------------------------------------------------------- /status_quo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anubis-rs/xernel/abf71c1983b5819dd4471c2c9a492b11b3a594b9/status_quo.png -------------------------------------------------------------------------------- /userland/init/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "init" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | -------------------------------------------------------------------------------- /userland/init/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | 4 | use core::arch::asm; 5 | 6 | #[panic_handler] 7 | fn panic(__info: &core::panic::PanicInfo) -> ! { 8 | loop {} 9 | } 10 | 11 | #[no_mangle] 12 | pub extern "C" fn _start() -> ! { 13 | main(); 14 | loop { 15 | for _ in 0..i16::MAX { 16 | unsafe { 17 | asm!("nop"); 18 | } 19 | } 20 | 21 | main(); 22 | } 23 | } 24 | 25 | fn main() { 26 | let hello_str = "Hello from userspace :)\0"; 27 | unsafe { 28 | asm!( 29 | "syscall", 30 | in("rdi") hello_str.as_ptr(), 31 | in("rax") 5 32 | ); 33 | } 34 | } 35 | --------------------------------------------------------------------------------