├── .dir-locals.el ├── .envrc ├── .gdbinit ├── .gitignore ├── Makefile ├── README.md ├── crates ├── bitmap-alloc │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ │ ├── alloc.rs │ │ ├── bootstrap.rs │ │ └── lib.rs ├── ring_buffer │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── test-infra │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ │ └── lib.rs └── test-macro │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ └── lib.rs ├── flake ├── flake.lock ├── flake.nix └── update-and-diff-profiles.sh ├── img └── demo.gif ├── kernel ├── .cargo │ └── config.toml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── linker.ld ├── src │ ├── acpi.rs │ ├── ansiterm.rs │ ├── apic.rs │ ├── barrier.rs │ ├── block.rs │ ├── boot_info.rs │ ├── debug.rs │ ├── elf.rs │ ├── fs │ │ ├── ext2 │ │ │ ├── block_group.rs │ │ │ ├── directory.rs │ │ │ ├── file_system.rs │ │ │ ├── inode.rs │ │ │ ├── mod.rs │ │ │ ├── strings.rs │ │ │ ├── superblock.rs │ │ │ └── vfs.rs │ │ ├── fat.rs │ │ ├── mod.rs │ │ └── sysfs.rs │ ├── gdt.rs │ ├── graphics │ │ ├── font.rs │ │ ├── framebuffer.rs │ │ ├── mod.rs │ │ └── text_buffer.rs │ ├── hpet.rs │ ├── interrupts.rs │ ├── ioapic.rs │ ├── keyboard.rs │ ├── lib.rs │ ├── logging.rs │ ├── main.rs │ ├── memory │ │ ├── address.rs │ │ ├── heap.rs │ │ ├── mapping.rs │ │ ├── mod.rs │ │ ├── page.rs │ │ ├── page_table.rs │ │ └── physical.rs │ ├── pci │ │ ├── capabilities.rs │ │ ├── device.rs │ │ ├── device_id.rs │ │ ├── location.rs │ │ └── mod.rs │ ├── percpu.rs │ ├── qemu.rs │ ├── registers.rs │ ├── sched │ │ ├── mod.rs │ │ ├── preempt.rs │ │ ├── schedcore.rs │ │ ├── stack.rs │ │ ├── syscall.rs │ │ ├── task.rs │ │ └── userspace.rs │ ├── serial.rs │ ├── shell.rs │ ├── strings.rs │ ├── sync │ │ ├── atomic_int.rs │ │ ├── init_cell.rs │ │ ├── mod.rs │ │ ├── mutex.rs │ │ ├── once_cell.rs │ │ ├── once_channel.rs │ │ ├── spin_lock.rs │ │ └── wait_cell.rs │ ├── tests │ │ ├── magic.rs │ │ └── mod.rs │ ├── tick.rs │ ├── transmute.rs │ ├── vfs │ │ ├── fs.rs │ │ ├── mod.rs │ │ └── path.rs │ └── virtio │ │ ├── block.rs │ │ ├── config.rs │ │ ├── device.rs │ │ ├── features.rs │ │ ├── mod.rs │ │ ├── queue.rs │ │ └── rng.rs └── x86_64-rust_os.json ├── limine.cfg ├── old-code └── grub-64-bit-boot │ ├── README.md │ ├── boot.asm │ ├── grub.cfg │ ├── linker.ld │ ├── long_mode_init.asm │ └── multiboot_header.asm ├── scripts ├── create-boot-image.sh ├── create-test-ext2-image.sh └── create-test-fat-image.sh ├── shell.nix └── userspace ├── hello ├── .gitignore ├── Makefile └── hello.asm └── primes ├── .gitignore ├── Makefile ├── primes.c └── runtime.asm /.dir-locals.el: -------------------------------------------------------------------------------- 1 | ;; Fixes rust-analyzer complaining about a lack of a crate for `test`. See 2 | ;; https://github.com/rust-lang/rust-analyzer/issues/3801. Getting the actual 3 | ;; target seems to work, thanks to 4 | ;; https://github.com/rust-lang/rust-analyzer/pull/8774, but we still need to 5 | ;; disable _all_ targets to avoid the error. 6 | ((rust-mode . ((lsp-rust-analyzer-check-all-targets . nil)))) 7 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | use nix 2 | -------------------------------------------------------------------------------- /.gdbinit: -------------------------------------------------------------------------------- 1 | # Pretty printing 2 | set print pretty on 3 | 4 | # My layout 5 | tui new-layout rust-os {-horizontal src 1 {asm 1 regs 1} 1} 2 status 0 cmd 1 6 | tui layout rust-os 7 | 8 | # Log output to gdb.txt so we don't need to scroll in TUI mode 9 | set logging file gdb.log 10 | set logging enabled on 11 | set trace-commands on 12 | 13 | # Helper functions 14 | 15 | # I used this during an investigation into a General Protection Fault caused by 16 | # the ret instruction in the task switching switch_to_task assembly. I wanted to 17 | # see the stack (specifically just the instruction pointer we are returning to) 18 | # and registers at the point of the fault. This command will dump the registers 19 | # and stack, then continue execution. 20 | define dump_continue 21 | info reg rax rbx rcx rdx rsi rdi rbp rsp r8 r9 r10 r11 r12 r13 r14 r15 rip eflags cs ss ds es fs gs gs_base k_gs_base cr3 22 | info stack 23 | x /1xg $rsp 24 | continue 25 | end 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /iso_root 3 | /kernel.elf 4 | /kernel.iso 5 | /*.hdd 6 | /gdb.log 7 | /.ccls-cache/ 8 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | KERNEL_HDD = kernel.hdd 2 | OVMF = $(shell nix build ./flake#OVMF --print-out-paths --no-link)/OVMF.fd 3 | QEMU_DEBUG_BIN = $(shell nix build ./flake#qemu-x86_64-debug --print-out-paths --no-link)/bin/qemu-system-x86_64 4 | QEMU_SOURCE_CODE = $(shell nix build ./flake#qemu-x86_64-debug --print-out-paths --no-link)/raw 5 | 6 | TEST_FAT_HDD = test_fat.hdd 7 | TEST_EXT2_HDD = test_ext2.hdd 8 | 9 | RUST_BUILD_MODE = debug 10 | RUST_BUILD_MODE_FLAG = 11 | ifeq ($(RUST_BUILD_MODE),release) 12 | RUST_BUILD_MODE_FLAG = --release 13 | endif 14 | 15 | KERNEL = kernel/target/x86_64-rust_os/$(RUST_BUILD_MODE)/rust-os 16 | 17 | # Not all crates support `cargo test` 18 | TEST_CRATES += crates/bitmap-alloc 19 | TEST_CRATES += crates/ring_buffer 20 | TEST_CRATES += crates/test-infra 21 | TEST_CRATES += crates/test-macro 22 | ALL_CRATES = $(TEST_CRATES) kernel 23 | 24 | .DEFAULT_GOAL := all 25 | .PHONY: all 26 | all: $(KERNEL_HDD) 27 | 28 | QEMU=qemu-system-x86_64 29 | RUN_QEMU_GDB=no 30 | ifeq ($(RUN_QEMU_GDB),yes) 31 | QEMU=gdb --directory $(QEMU_SOURCE_CODE)/build --args $(QEMU_DEBUG_BIN) 32 | else 33 | # GTK is a much nicer display than SDL, but to compile QEMU with debug symbols 34 | # in Nix, we had to disable the GTK wrappers. 35 | QEMU_COMMON_ARGS += -display gtk,zoom-to-fit=on 36 | endif 37 | 38 | # Good reference for QEMU options: https://wiki.gentoo.org/wiki/QEMU/Options 39 | UEFI = on 40 | ifeq ($(UEFI),on) 41 | $(info UEFI is enabled) 42 | QEMU_COMMON_ARGS += -bios $(OVMF) 43 | else 44 | $(info UEFI is disabled) 45 | endif 46 | 47 | GRAPHICS=off 48 | ifeq ($(GRAPHICS),on) 49 | $(info QEMU graphics are enabled) 50 | QEMU_COMMON_ARGS += -vga virtio # More modern, better performance than default -vga std 51 | QEMU_COMMON_ARGS += -serial stdio # Send serial output to terminal 52 | else 53 | $(info QEMU graphics are disabled) 54 | QEMU_COMMON_ARGS += -nographic 55 | # N.B. -nographic implies -serial stdio 56 | endif 57 | 58 | # Use virtio for the disk: 59 | QEMU_COMMON_ARGS += -drive file=$(KERNEL_HDD),if=none,id=drive-virtio-disk0,format=raw -device virtio-blk-pci,scsi=off,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=0,serial=hello-blk 60 | QEMU_COMMON_ARGS += -drive file=$(TEST_FAT_HDD),if=none,id=drive-virtio-disk1,format=raw -device virtio-blk-pci,scsi=off,drive=drive-virtio-disk1,id=virtio-disk1,serial=test-fat 61 | QEMU_COMMON_ARGS += -drive file=$(TEST_EXT2_HDD),if=none,id=drive-virtio-disk2,format=raw -device virtio-blk-pci,scsi=off,drive=drive-virtio-disk2,id=virtio-disk2,serial=test-ext2 62 | QEMU_COMMON_ARGS += -smp 4 # Use 4 cores 63 | QEMU_COMMON_ARGS += -m 2G # More memory 64 | QEMU_COMMON_ARGS += -device virtio-rng-pci-non-transitional # RNG is the simplest virtio device. Good for testing. 65 | QEMU_COMMON_ARGS += -device isa-debug-exit,iobase=0xf4,iosize=0x04 # Exit QEMU when the kernel writes to port 0xf4 66 | 67 | QEMU_ARGS += $(QEMU_COMMON_ARGS) 68 | QEMU_ARGS += -M q35,accel=kvm # Use the q35 chipset. accel=kvm enables hardware acceleration, makes things way faster. 69 | 70 | .PHONY: run 71 | run: $(KERNEL_HDD) $(TEST_FAT_HDD) $(TEST_EXT2_HDD) 72 | $(QEMU) $(QEMU_ARGS) 73 | 74 | # N.B. Run `make run-debug` in one terminal, and `make gdb` in another. 75 | QEMU_DEBUG_ARGS += $(QEMU_COMMON_ARGS) 76 | QEMU_DEBUG_ARGS += -M q35 # Use the q35 chipset, but don't use kvm acceleration for debug mode because it makes logging interrupts give less info. 77 | QEMU_DEBUG_ARGS += -d cpu_reset,guest_errors # Log some unexpected things. Run qemu-system-x86_64 -d help to see more. Add `int` for interrupts 78 | 79 | .PHONY: run-debug 80 | run-debug: $(KERNEL_HDD) $(TEST_FAT_HDD) $(TEST_EXT2_HDD) 81 | qemu-system-x86_64 $(QEMU_DEBUG_ARGS) -s -S 82 | 83 | .PHONY: gdb 84 | gdb: # No deps because we don't want an accidental rebuild if `make debug` already ran. 85 | rust-gdb $(KERNEL) -ex "target remote :1234" 86 | 87 | .PHONY: kernel 88 | kernel: 89 | cd kernel && cargo build $(RUST_BUILD_MODE_FLAG) 90 | 91 | CMDLINE= 92 | 93 | # Adapted from https://github.com/limine-bootloader/limine-barebones/blob/trunk/GNUmakefile 94 | .PHONY: $(KERNEL_HDD) 95 | $(KERNEL_HDD): kernel 96 | ./scripts/create-boot-image.sh $(KERNEL_HDD) $(KERNEL) "$(CMDLINE)" 97 | 98 | $(TEST_FAT_HDD): 99 | ./scripts/create-test-fat-image.sh $(TEST_FAT_HDD) 100 | 101 | .PHONY: $(TEST_EXT2_HDD) 102 | $(TEST_EXT2_HDD): 103 | ./scripts/create-test-ext2-image.sh $(TEST_EXT2_HDD) 104 | 105 | .PHONY: test 106 | test: 107 | for crate in $(TEST_CRATES); do \ 108 | (cd $$crate && cargo test) \ 109 | done 110 | 111 | for crate in $(ALL_CRATES); do \ 112 | (cd $$crate && cargo clippy -- -D warnings && cargo fmt --check) \ 113 | done 114 | 115 | (cd kernel && cargo clippy --no-default-features -- -D warnings) # Ensure there isn't dead code due to tests 116 | 117 | .PHONY: clean 118 | clean: 119 | rm -rf target img_mount iso_root *.iso *.elf *.hdd 120 | for crate in $(ALL_CRATES); do \ 121 | (cd $$crate && cargo clean) \ 122 | done 123 | -------------------------------------------------------------------------------- /crates/bitmap-alloc/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | -------------------------------------------------------------------------------- /crates/bitmap-alloc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bitmap-alloc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | 8 | [dev-dependencies] 9 | proptest = "1" 10 | -------------------------------------------------------------------------------- /crates/bitmap-alloc/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Code for bitmap allocator, used for physical memory allocation in the 2 | //! kernel. 3 | 4 | #![cfg_attr(not(test), no_std)] 5 | #![feature(int_roundings)] 6 | #![warn(clippy::all, clippy::pedantic, clippy::nursery, clippy::cargo)] 7 | #![allow( 8 | clippy::cast_possible_truncation, 9 | clippy::cast_precision_loss, 10 | clippy::cargo_common_metadata, 11 | clippy::doc_markdown, 12 | clippy::implicit_hasher, 13 | clippy::implicit_return, 14 | clippy::len_without_is_empty, 15 | clippy::missing_const_for_fn, 16 | clippy::missing_errors_doc, 17 | clippy::missing_panics_doc, 18 | clippy::module_name_repetitions, 19 | clippy::multiple_crate_versions, 20 | clippy::must_use_candidate, 21 | clippy::new_without_default, 22 | clippy::redundant_pub_crate, 23 | clippy::suboptimal_flops, 24 | clippy::upper_case_acronyms, 25 | clippy::wildcard_imports 26 | )] 27 | 28 | mod alloc; 29 | mod bootstrap; 30 | 31 | pub use alloc::*; 32 | pub use bootstrap::*; 33 | -------------------------------------------------------------------------------- /crates/ring_buffer/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /crates/ring_buffer/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "ring_buffer" 7 | version = "0.1.0" 8 | -------------------------------------------------------------------------------- /crates/ring_buffer/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ring_buffer" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | -------------------------------------------------------------------------------- /crates/ring_buffer/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This module implements a simple, static, append-only ring buffer. 2 | //! 3 | //! The ring buffer is implemented as a fixed-size array of elements with a 4 | //! pointer to the next free location. 5 | 6 | #![cfg_attr(not(test), no_std)] 7 | #![warn(clippy::all, clippy::pedantic, clippy::nursery, clippy::cargo)] 8 | #![allow( 9 | clippy::cast_possible_truncation, 10 | clippy::cast_precision_loss, 11 | clippy::cargo_common_metadata, 12 | clippy::implicit_hasher, 13 | clippy::implicit_return, 14 | clippy::missing_const_for_fn, 15 | clippy::missing_errors_doc, 16 | clippy::missing_panics_doc, 17 | clippy::module_name_repetitions, 18 | clippy::multiple_crate_versions, 19 | clippy::must_use_candidate, 20 | clippy::new_without_default, 21 | clippy::suboptimal_flops, 22 | clippy::wildcard_imports 23 | )] 24 | 25 | /// A simple, static, append-only ring buffer. 26 | #[derive(Debug)] 27 | pub struct RingBuffer { 28 | /// The fixed-size array of elements. 29 | elements: [Option; N], 30 | 31 | /// The index of the next free location. 32 | next_free: usize, 33 | } 34 | 35 | // TODO: Get rid of Copy requirement. 36 | impl RingBuffer { 37 | pub const fn new() -> Self { 38 | Self { 39 | elements: [None; N], 40 | next_free: 0, 41 | } 42 | } 43 | 44 | #[allow(clippy::len_without_is_empty)] 45 | pub fn len(&self) -> usize { 46 | // If we have wrapped around, then the length is the number of elements. 47 | if self.elements[self.next_free].is_some() { 48 | N 49 | } else { 50 | self.next_free 51 | } 52 | } 53 | 54 | /// Append an element to the ring buffer. This will overwrite the oldest 55 | /// element if the buffer is full. 56 | pub fn push(&mut self, element: T) { 57 | self.elements[self.next_free] = Some(element); 58 | self.next_free = (self.next_free + 1) % N; 59 | } 60 | 61 | /// Get the element at the given index, where the index counts backwards 62 | /// from the latest element. For example, `0` is the element most recently 63 | /// pushed, `1` is the second most recent element, and `N-1` is the oldest 64 | /// element. Returns `None` if no element exists at the given index. 65 | pub fn get(&self, index: usize) -> Option<&T> { 66 | // Out of bounds 67 | if index >= self.elements.len() { 68 | return None; 69 | } 70 | 71 | if index < self.next_free { 72 | self.elements[self.next_free - index - 1].as_ref() 73 | } else { 74 | self.elements[N - 1 - (index - self.next_free)].as_ref() 75 | } 76 | } 77 | 78 | /// Same as `get`, but returns a mutable reference. 79 | pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { 80 | // TODO: Use a macro to avoid code duplication with `get`. 81 | 82 | // Out of bounds 83 | if index >= self.elements.len() { 84 | return None; 85 | } 86 | 87 | if index < self.next_free { 88 | self.elements[self.next_free - index - 1].as_mut() 89 | } else { 90 | self.elements[N - 1 - (index - self.next_free)].as_mut() 91 | } 92 | } 93 | 94 | pub fn iter(&self) -> RingBufferIter { 95 | RingBufferIter { 96 | buffer: self, 97 | index: 0, 98 | } 99 | } 100 | } 101 | 102 | /// An iterator over the elements of a ring buffer. The iterator yields the 103 | /// elements in order from newest to oldest. 104 | pub struct RingBufferIter<'a, T, const N: usize> { 105 | buffer: &'a RingBuffer, 106 | index: usize, 107 | } 108 | 109 | impl<'a, T: Copy, const N: usize> Iterator for RingBufferIter<'a, T, N> { 110 | type Item = &'a T; 111 | 112 | fn next(&mut self) -> Option { 113 | if self.index >= self.buffer.len() { 114 | return None; 115 | } 116 | 117 | let item = self.buffer.get(self.index); 118 | self.index += 1; 119 | item 120 | } 121 | } 122 | 123 | #[cfg(test)] 124 | mod tests { 125 | use super::*; 126 | 127 | // TODO proptest that compares this with a fixed-size Deque 128 | 129 | #[test] 130 | fn push_and_get() { 131 | let mut buffer = RingBuffer::::new(); 132 | assert_eq!(buffer.len(), 0); 133 | 134 | buffer.push(1); 135 | assert_eq!(buffer.len(), 1); 136 | assert_eq!(buffer.get_mut(0), Some(&mut 1)); 137 | assert_eq!(buffer.get_mut(1), None); 138 | 139 | assert_eq!(buffer.get(0), Some(&1)); 140 | assert_eq!(buffer.get(1), None); 141 | 142 | buffer.push(2); 143 | assert_eq!(buffer.len(), 2); 144 | assert_eq!(buffer.get_mut(0), Some(&mut 2)); 145 | assert_eq!(buffer.get_mut(1), Some(&mut 1)); 146 | assert_eq!(buffer.get_mut(2), None); 147 | 148 | assert_eq!(buffer.get(0), Some(&2)); 149 | assert_eq!(buffer.get(1), Some(&1)); 150 | assert_eq!(buffer.get(2), None); 151 | 152 | buffer.push(3); 153 | assert_eq!(buffer.len(), 3); 154 | assert_eq!(buffer.get_mut(0), Some(&mut 3)); 155 | assert_eq!(buffer.get_mut(1), Some(&mut 2)); 156 | assert_eq!(buffer.get_mut(2), Some(&mut 1)); 157 | assert_eq!(buffer.get_mut(3), None); 158 | 159 | assert_eq!(buffer.get(0), Some(&3)); 160 | assert_eq!(buffer.get(1), Some(&2)); 161 | assert_eq!(buffer.get(2), Some(&1)); 162 | assert_eq!(buffer.get(3), None); 163 | 164 | // Wrap around 165 | buffer.push(4); 166 | assert_eq!(buffer.len(), 3); 167 | assert_eq!(buffer.get_mut(0), Some(&mut 4)); 168 | assert_eq!(buffer.get_mut(1), Some(&mut 3)); 169 | assert_eq!(buffer.get_mut(2), Some(&mut 2)); 170 | assert_eq!(buffer.get_mut(3), None); 171 | 172 | assert_eq!(buffer.get(0), Some(&4)); 173 | assert_eq!(buffer.get(1), Some(&3)); 174 | assert_eq!(buffer.get(2), Some(&2)); 175 | assert_eq!(buffer.get(3), None); 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /crates/test-infra/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | -------------------------------------------------------------------------------- /crates/test-infra/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "test-infra" 7 | version = "0.1.0" 8 | -------------------------------------------------------------------------------- /crates/test-infra/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-infra" 3 | version = "0.1.0" 4 | edition = "2021" 5 | -------------------------------------------------------------------------------- /crates/test-infra/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Holds definitions and functions used in kernel tests. It is only outside the 2 | //! kernel so we can create proc macros for tests. 3 | 4 | #![no_std] 5 | 6 | use core::fmt; 7 | 8 | /// Holds a single test. 9 | pub struct SimpleTest { 10 | pub name: &'static str, 11 | pub module: &'static str, 12 | pub file: &'static str, 13 | pub line: u32, 14 | pub column: u32, 15 | pub test_fn: fn(), 16 | } 17 | 18 | impl fmt::Debug for SimpleTest { 19 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 20 | f.debug_struct("SimpleTest") 21 | .field("name", &self.name) 22 | .field("module", &self.module) 23 | .field("file", &self.file) 24 | .field("line", &self.line) 25 | .field("column", &self.column) 26 | .finish() 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /crates/test-macro/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | -------------------------------------------------------------------------------- /crates/test-macro/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "proc-macro2" 7 | version = "1.0.60" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" 10 | dependencies = [ 11 | "unicode-ident", 12 | ] 13 | 14 | [[package]] 15 | name = "quote" 16 | version = "1.0.28" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" 19 | dependencies = [ 20 | "proc-macro2", 21 | ] 22 | 23 | [[package]] 24 | name = "syn" 25 | version = "2.0.18" 26 | source = "registry+https://github.com/rust-lang/crates.io-index" 27 | checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e" 28 | dependencies = [ 29 | "proc-macro2", 30 | "quote", 31 | "unicode-ident", 32 | ] 33 | 34 | [[package]] 35 | name = "test-infra" 36 | version = "0.1.0" 37 | 38 | [[package]] 39 | name = "test-macro" 40 | version = "0.1.0" 41 | dependencies = [ 42 | "proc-macro2", 43 | "quote", 44 | "syn", 45 | "test-infra", 46 | ] 47 | 48 | [[package]] 49 | name = "unicode-ident" 50 | version = "1.0.9" 51 | source = "registry+https://github.com/rust-lang/crates.io-index" 52 | checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" 53 | -------------------------------------------------------------------------------- /crates/test-macro/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-macro" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [lib] 7 | proc-macro = true 8 | 9 | [dependencies] 10 | proc-macro2 = "1.0" 11 | quote = "1.0" 12 | syn = { version = "2.0", features = ["full"] } 13 | test-infra = { path = "../test-infra" } 14 | -------------------------------------------------------------------------------- /crates/test-macro/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This crate exists to export proc macros for kernel tests. 2 | 3 | #![feature(proc_macro_span)] 4 | 5 | extern crate proc_macro; 6 | 7 | use proc_macro::TokenStream; 8 | use quote::{format_ident, quote}; 9 | use syn::{parse_macro_input, ItemFn}; 10 | 11 | #[proc_macro_attribute] 12 | pub fn kernel_test(args: TokenStream, item: TokenStream) -> TokenStream { 13 | assert!(args.is_empty(), "kernel_test attribute takes no arguments"); 14 | let original_item = item.clone(); 15 | 16 | let parsed_item = parse_macro_input!(item as ItemFn); 17 | let fn_name_ident = parsed_item.sig.ident; 18 | let fn_name_str = fn_name_ident.to_string(); 19 | let struct_ident = format_ident!("TEST_{}", fn_name_ident); 20 | 21 | let test_struct: TokenStream = quote! { 22 | #[used] 23 | #[link_section = ".init_test_array"] 24 | #[allow(non_upper_case_globals)] 25 | static #struct_ident: ::test_infra::SimpleTest = ::test_infra::SimpleTest { 26 | name: #fn_name_str, 27 | file: core::file!(), 28 | line: core::line!(), 29 | column: core::column!(), 30 | module: core::module_path!(), 31 | test_fn: #fn_name_ident, 32 | }; 33 | } 34 | .into(); 35 | 36 | original_item.into_iter().chain(test_struct).collect() 37 | } 38 | -------------------------------------------------------------------------------- /flake/flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1681202837, 9 | "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "cfacdce06f30d2b68473a46042957675eebb3401", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1681358109, 24 | "narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "nixpkgs-unstable", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "nixpkgs-unstable": { 38 | "locked": { 39 | "lastModified": 1687502512, 40 | "narHash": "sha256-dBL/01TayOSZYxtY4cMXuNCBk8UMLoqRZA+94xiFpJA=", 41 | "owner": "nixos", 42 | "repo": "nixpkgs", 43 | "rev": "3ae20aa58a6c0d1ca95c9b11f59a2d12eebc511f", 44 | "type": "github" 45 | }, 46 | "original": { 47 | "owner": "nixos", 48 | "ref": "nixos-unstable", 49 | "repo": "nixpkgs", 50 | "type": "github" 51 | } 52 | }, 53 | "root": { 54 | "inputs": { 55 | "nixpkgs-unstable": "nixpkgs-unstable", 56 | "rust-overlay": "rust-overlay" 57 | } 58 | }, 59 | "rust-overlay": { 60 | "inputs": { 61 | "flake-utils": "flake-utils", 62 | "nixpkgs": "nixpkgs" 63 | }, 64 | "locked": { 65 | "lastModified": 1687573996, 66 | "narHash": "sha256-F7pDERmi8MomkMhcUW88IW6RRrxAk7QO2PXs+LMpxpI=", 67 | "owner": "oxalica", 68 | "repo": "rust-overlay", 69 | "rev": "ec002586144fe0f48039dced270c188f0b8213ab", 70 | "type": "github" 71 | }, 72 | "original": { 73 | "owner": "oxalica", 74 | "repo": "rust-overlay", 75 | "type": "github" 76 | } 77 | }, 78 | "systems": { 79 | "locked": { 80 | "lastModified": 1681028828, 81 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 82 | "owner": "nix-systems", 83 | "repo": "default", 84 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 85 | "type": "github" 86 | }, 87 | "original": { 88 | "owner": "nix-systems", 89 | "repo": "default", 90 | "type": "github" 91 | } 92 | } 93 | }, 94 | "root": "root", 95 | "version": 7 96 | } 97 | -------------------------------------------------------------------------------- /flake/flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | nixpkgs-unstable.url = "github:nixos/nixpkgs/nixos-unstable"; 4 | rust-overlay.url = "github:oxalica/rust-overlay"; 5 | }; 6 | 7 | outputs = { self, nixpkgs-unstable, rust-overlay }: 8 | let 9 | system = "x86_64-linux"; 10 | pkgs = import nixpkgs-unstable { 11 | inherit system; 12 | overlays = [ rust-overlay.overlays.default ]; 13 | config = { allowUnfree = true; }; 14 | }; 15 | 16 | # Compile QEMU with debug symbols so we can run QEMU itself with GDB. 17 | qemu-x86_64 = pkgs.qemu.override { 18 | # We only need x86_64. This keeps compile times down. 19 | hostCpuOnly = true; 20 | 21 | # Wrapping the qemu-system-* binaries with the GTK wrapper in nixpkgs 22 | # removes debug symbols. 23 | gtkSupport = false; 24 | }; 25 | qemu-x86_64-debug = qemu-x86_64.overrideAttrs (finalAttrs: previousAttrs: { 26 | # QEMU-specific flags to add debug info. See https://www.cnblogs.com/root-wang/p/8005212.html 27 | configureFlags = previousAttrs.configureFlags ++ [ 28 | "--enable-debug" 29 | # --enable-debug uses -g, we want even more debug info. 30 | "--extra-cflags=-g3" 31 | # -fdebug-prefix-map is needed to make sure the debug info is relative 32 | # to the output directory. Otherwise it will be relative to the build 33 | # directory, which is not what we want because that directory won't 34 | # exist after the build, and GDB will get confused because the build 35 | # uses absolute paths. 36 | # 37 | # NOTE: Nix package builds appear to build in a /build directory. I 38 | # think the subdirectory under /build is dependent on the source you 39 | # download, which in our case is qemu-{version}.tar.xz, which untars 40 | # to qemu-{version}. I can't use $(pwd) because that gets escaped by 41 | # something before it gets turned into CFLAGS. 42 | "--extra-cflags=-fdebug-prefix-map=/build/qemu-${previousAttrs.version}=.." 43 | "--disable-pie" 44 | ]; 45 | 46 | # Disable default hardening flags. These are very confusing when doing 47 | # development and they break builds of packages/systems that don't 48 | # expect these flags to be on. Automatically enables stuff like 49 | # FORTIFY_SOURCE, -Werror=format-security, -fPIE, etc. See: 50 | # - https://nixos.org/manual/nixpkgs/stable/#sec-hardening-in-nixpkgs 51 | # - https://nixos.wiki/wiki/C#Hardening_flags 52 | hardeningDisable = ["all"]; 53 | 54 | # Don't strip debug info from executables. 55 | dontStrip = true; 56 | 57 | # By default some script goes and separates debug info from the 58 | # binaries. We don't want that. 59 | separateDebugInfo = false; 60 | 61 | # Store all of the source artifacts so GDB can use them. 62 | # 63 | # Note that gdb expects us to be in the build/ sub-directory, and some 64 | # paths are still absolute. See 65 | # https://github.com/mesonbuild/meson/issues/10533 for possible 66 | # alternatives like -fdebug-prefix-map. Also see 67 | # https://alex.dzyoba.com/blog/gdb-source-path/ 68 | postFixup = (previousAttrs.postFixup or "") + '' 69 | mkdir -p $out/raw 70 | # In Meson we are in a build/ subdirectory 71 | cp -r .. $out/raw/ 72 | ''; 73 | }); 74 | in 75 | with pkgs; 76 | { 77 | devShells.x86_64-linux.default = pkgs.mkShell { 78 | nativeBuildInputs = [ 79 | (rust-bin.selectLatestNightlyWith (toolchain: toolchain.default.override { 80 | extensions = [ 81 | "rust-src" # Needed to rebuild core with build-std. See https://doc.rust-lang.org/cargo/reference/unstable.html#build-std 82 | "llvm-tools-preview" 83 | ]; 84 | targets = [ 85 | "x86_64-unknown-none" 86 | ]; 87 | })) 88 | 89 | # Debugging 90 | gdb 91 | 92 | # Assembly 93 | nasm 94 | 95 | # For emulation 96 | qemu 97 | screen # For serial ports 98 | 99 | # Build 100 | xorriso 101 | parted 102 | ]; 103 | }; 104 | 105 | packages.${system} = { 106 | inherit qemu-x86_64-debug; 107 | 108 | # Nix has an OVMF package, but it doesn't seem to include OVMF.fd. We 109 | # use the zip file that the limine barebones build uses 110 | # https://github.com/limine-bootloader/limine-barebones/blob/e08f355a22fbefb27cfea4e3d890eb9551bdac1b/GNUmakefile#L28-L30 111 | OVMF = pkgs.stdenv.mkDerivation { 112 | name = "OVMF"; 113 | src = pkgs.fetchzip { 114 | url = "https://efi.akeo.ie/OVMF/OVMF-X64.zip"; 115 | sha256 = "sha256-dF+HQJ9TREfqxnUSAHWzkbkw93ifLIqmROhv3uM4Rss="; 116 | stripRoot = false; 117 | }; 118 | 119 | installPhase = '' 120 | cp -r . $out/ 121 | ''; 122 | }; 123 | 124 | limine = 125 | let 126 | # See https://github.com/limine-bootloader/limine/releases for 127 | # releases. Make sure to use the "-binary" version! 128 | version = "v4.20230428.0-binary"; 129 | in pkgs.stdenv.mkDerivation { 130 | pname = "limine"; 131 | inherit version; 132 | 133 | src = pkgs.fetchFromGitHub { 134 | owner = "limine-bootloader"; 135 | repo = "limine"; 136 | rev = version; 137 | sha256 = "sha256-QnmKKRzcjDIDNO6YbbBpyFS09imdhYw046miFkQ1/Rw="; 138 | }; 139 | 140 | buildPhase = '' 141 | make 142 | ''; 143 | 144 | installPhase = '' 145 | cp -r . $out/ 146 | ''; 147 | }; 148 | }; 149 | }; 150 | } 151 | -------------------------------------------------------------------------------- /flake/update-and-diff-profiles.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Updates the nix flake and diffs the profiles 4 | 5 | set -eu 6 | 7 | shell_names=$(nix eval --impure --expr 'builtins.attrNames (builtins.getFlake (toString ./.)).outputs.devShells.x86_64-linux' --json | jq .[] -r) 8 | 9 | generate_profile() { 10 | shell=$1 11 | profile_prefix=$2 12 | 13 | profile_name="profile-${profile_prefix}-${shell}" 14 | rm -f "$profile_name" 15 | 16 | nix build ".#devShells.x86_64-linux.$shell" --out-link "./$profile_name" 17 | } 18 | 19 | for shell in $shell_names; do 20 | echo "Generating old profile for shell: $shell" 21 | generate_profile "$shell" old 22 | done 23 | 24 | echo "Updating flake" 25 | nix flake update 26 | 27 | for shell in $shell_names; do 28 | echo "Generating new profile for shell: $shell" 29 | generate_profile "$shell" new 30 | done 31 | 32 | LIGHT_BLUE="\e[94m" 33 | END_COLOR="\e[0m" 34 | 35 | for shell in $shell_names; do 36 | echo "" 37 | echo -e "${LIGHT_BLUE}Diff for shell: $shell${END_COLOR}" 38 | 39 | old_profile="profile-old-$shell" 40 | new_profile="profile-new-$shell" 41 | 42 | nix store diff-closures "./$old_profile" "./$new_profile" 43 | 44 | rm -f "$old_profile" "$new_profile" 45 | done 46 | -------------------------------------------------------------------------------- /img/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jdreaver/rust-os/b65c9648457625a2a7f26a292141ab2383157a92/img/demo.gif -------------------------------------------------------------------------------- /kernel/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "x86_64-rust_os.json" 3 | 4 | [unstable] 5 | build-std = ["core", "compiler_builtins", "alloc"] 6 | build-std-features = ["compiler-builtins-mem"] 7 | 8 | [target.x86_64-rust_os] 9 | rustflags = [ 10 | "-C", "link-arg=-Tlinker.ld", 11 | # Simplify building stack traces 12 | "-Cforce-frame-pointers" 13 | ] 14 | -------------------------------------------------------------------------------- /kernel/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-os" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | acpi = "4.1" 8 | bitfield-struct = "0.3" 9 | bitflags = "2.3" 10 | bitmap-alloc = { path = "../crates/bitmap-alloc" } 11 | bitvec = { version = "1", default-features = false, features = ["atomic", "alloc"] } 12 | elf = { version = "0.7", default-features = false } 13 | limine = "=0.1.10" 14 | linked_list_allocator = "0.9" 15 | log = "0.4" 16 | paste = "1.0" 17 | pc-keyboard = "0.5" 18 | pic8259 = "0.10" 19 | ring_buffer = { path = "../crates/ring_buffer" } 20 | seq-macro = "0.3" 21 | spin = "0.9" 22 | test-infra = { path = "../crates/test-infra" } 23 | test-macro = { path = "../crates/test-macro" } 24 | x86_64 = "0.14" 25 | # Latest version of zerocopy was like 2 years old when I added it, but I needed 26 | # some newer features, like 27 | # https://github.com/google/zerocopy/commit/a728cb9ecc5eef30462969da09820f0053bc0d62 28 | zerocopy = { git = "https://github.com/google/zerocopy", rev = "b5b30d0287734c004bb2c1c97cc53a460443a57f" } 29 | 30 | # Test dependencies 31 | proptest = { version = "1", default-features = false, features = ["alloc", "unstable"], optional = true } 32 | 33 | [features] 34 | default = ["tests"] 35 | tests = ["proptest"] 36 | -------------------------------------------------------------------------------- /kernel/linker.ld: -------------------------------------------------------------------------------- 1 | /* Tell the linker that we want an x86_64 ELF64 output file */ 2 | OUTPUT_FORMAT(elf64-x86-64) 3 | OUTPUT_ARCH(i386:x86-64) 4 | 5 | /* We want the symbol _start to be our entry point */ 6 | ENTRY(_start) 7 | 8 | /* Define the program headers we want so the bootloader gives us the right */ 9 | /* MMU permissions */ 10 | PHDRS 11 | { 12 | text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */ 13 | rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */ 14 | data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */ 15 | dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */ 16 | percpu PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */ 17 | } 18 | 19 | SECTIONS 20 | { 21 | /* We wanna be placed in the topmost 2GiB of the address space, for optimisations */ 22 | /* and because that is what the Limine spec mandates. */ 23 | /* Any address in this region will do, but often 0xffffffff80000000 is chosen as */ 24 | /* that is the beginning of the region. */ 25 | . = 0xffffffff80000000; 26 | 27 | .text : { 28 | *(.text .text.*) 29 | } :text 30 | 31 | /* Move to the next memory page for .rodata */ 32 | . += CONSTANT(MAXPAGESIZE); 33 | 34 | /* The built-in `x86_64-unknown-none` target generates relocatable executables */ 35 | /* by default, so we need to include the relocation information (.dynstr, .dynsym, */ 36 | /* and .rela) for the bootloader too properly load the kernel at runtime. */ 37 | .dynsym : { 38 | *(.dynsym) 39 | } :rodata 40 | 41 | .dynstr : { 42 | *(.dynstr) 43 | } :rodata 44 | 45 | .rela : { 46 | *(.rela*) 47 | } :rodata 48 | 49 | .rodata : { 50 | *(.rodata .rodata.*) 51 | } :rodata 52 | 53 | .debug_gdb_scripts : { 54 | KEEP(*(.debug_gdb_scripts)) 55 | } :rodata 56 | 57 | /* The percpu section starts at a vaddr of 0 so that all of the percpu */ 58 | /* variables have addresses that are an _offset_ into the percpu section. */ 59 | _percpu_load = .; 60 | .percpu 0 : AT(_percpu_load) { 61 | _percpu_start = .; 62 | *(.percpu) 63 | _percpu_end = .; 64 | } :percpu 65 | . = _percpu_load + SIZEOF(.percpu); 66 | 67 | /* Move to the next memory page for .data */ 68 | . += CONSTANT(MAXPAGESIZE); 69 | 70 | /* The dynamic table is used to find the relocation info (declared above), so it */ 71 | /* must be included both in the :data and :dynamic segments. */ 72 | .dynamic : { 73 | *(.dynamic) 74 | } :data :dynamic 75 | 76 | .got : { 77 | *(.got) 78 | } :data 79 | 80 | .data : { 81 | *(.data.rel.ro .data.rel.ro.*) 82 | *(.data .data.*) 83 | } :data 84 | 85 | .init : { 86 | /* Testing infrastructure */ 87 | _start_init_test_array = .; 88 | KEEP(*(.init_test_array)) 89 | _end_init_test_array = .; 90 | } :data 91 | 92 | /* NOTE: .bss needs to be the last thing mapped to :data, otherwise lots of */ 93 | /* unnecessary zeros will be written to the binary. */ 94 | /* If you need, for example, .init_array and .fini_array, those should be placed */ 95 | /* above this. */ 96 | .bss : { 97 | *(COMMON) 98 | *(.dynbss) 99 | *(.bss .bss.*) 100 | } :data 101 | 102 | /* Discard .note.* and .eh_frame since they may cause issues on some hosts. */ 103 | /DISCARD/ : { 104 | *(.eh_frame) 105 | *(.note .note.*) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /kernel/src/ansiterm.rs: -------------------------------------------------------------------------------- 1 | //! ANSI terminal escape codes. See: 2 | //! - 3 | //! - 4 | 5 | use core::fmt; 6 | 7 | /// Start of an ANSI escape or command sequence. Often represented as `\033` (octal) or 8 | /// `\x1B` (hexadecimal), but equal to 27. 9 | pub(crate) const ANSI_ESCAPE: u8 = 27; 10 | 11 | pub(crate) const CLEAR_FORMAT: AnsiEscapeSequence = 12 | AnsiEscapeSequence::SelectGraphicRendition(SelectGraphicRendition::Reset); 13 | 14 | pub(crate) const BOLD: AnsiEscapeSequence = 15 | AnsiEscapeSequence::SelectGraphicRendition(SelectGraphicRendition::Bold); 16 | 17 | pub(crate) const GREEN: AnsiEscapeSequence = AnsiEscapeSequence::SelectGraphicRendition( 18 | SelectGraphicRendition::ForegroundColor(Color::Green), 19 | ); 20 | 21 | /// An ANSI escape sequence value that can be used in format strings. The meat 22 | /// of the logic for printing the sequence is in the `Display` trait 23 | /// implementation. 24 | pub(crate) enum AnsiEscapeSequence { 25 | SelectGraphicRendition(SelectGraphicRendition), 26 | MoveCursorTopLeft, 27 | ClearScreenFromCursorToEnd, 28 | ClearEntireLine, 29 | } 30 | 31 | impl fmt::Display for AnsiEscapeSequence { 32 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 33 | write!(f, "\x1B[")?; 34 | match self { 35 | Self::SelectGraphicRendition(sgr) => write!(f, "{sgr}")?, 36 | Self::MoveCursorTopLeft => write!(f, "H")?, 37 | Self::ClearScreenFromCursorToEnd => write!(f, "J")?, 38 | Self::ClearEntireLine => write!(f, "2K")?, 39 | } 40 | Ok(()) 41 | } 42 | } 43 | 44 | /// 45 | #[allow(dead_code)] 46 | pub(crate) enum SelectGraphicRendition { 47 | Reset, 48 | Bold, 49 | ForegroundColor(Color), 50 | BackgroundColor(Color), 51 | DefaultForegroundColor, 52 | DefaultBackgroundColor, 53 | } 54 | 55 | impl fmt::Display for SelectGraphicRendition { 56 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 57 | // N.B. \x1B[ already added from outer `AnsiEscapeSequence` impl 58 | match self { 59 | Self::Reset => write!(f, "0")?, 60 | Self::Bold => write!(f, "1")?, 61 | Self::ForegroundColor(color) => write!(f, "{}", color.foreground_byte())?, 62 | Self::BackgroundColor(color) => write!(f, "{}", color.background_byte())?, 63 | Self::DefaultForegroundColor => write!(f, "39")?, 64 | Self::DefaultBackgroundColor => write!(f, "49")?, 65 | } 66 | write!(f, "m") 67 | } 68 | } 69 | 70 | #[derive(Debug, Clone, Copy)] 71 | #[allow(dead_code)] 72 | pub(crate) enum Color { 73 | Black, 74 | Red, 75 | Green, 76 | Yellow, 77 | Blue, 78 | Magenta, 79 | Cyan, 80 | White, 81 | } 82 | 83 | impl Color { 84 | fn foreground_byte(self) -> u8 { 85 | match self { 86 | Self::Black => 30, 87 | Self::Red => 31, 88 | Self::Green => 32, 89 | Self::Yellow => 33, 90 | Self::Blue => 34, 91 | Self::Magenta => 35, 92 | Self::Cyan => 36, 93 | Self::White => 37, 94 | } 95 | } 96 | 97 | fn background_byte(self) -> u8 { 98 | match self { 99 | Self::Black => 40, 100 | Self::Red => 41, 101 | Self::Green => 42, 102 | Self::Yellow => 43, 103 | Self::Blue => 44, 104 | Self::Magenta => 45, 105 | Self::Cyan => 46, 106 | Self::White => 47, 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /kernel/src/barrier.rs: -------------------------------------------------------------------------------- 1 | //! Functions for dealing with memory barriers/fences. These are needed 2 | //! particularly during memory-mapped device IO to ensure stores and loads are 3 | //! performed in the correct order, and the CPU is not allowed to reorder them. 4 | //! See: 5 | //! 6 | //! - 7 | //! - 8 | 9 | use core::arch::x86_64; 10 | 11 | /// A memory barrier that prevents the CPU from reordering loads and stores 12 | /// across it. 13 | /// 14 | /// This is a full memory barrier, and on x86_64 is implemented using the 15 | /// `mfence` instruction. 16 | #[inline] 17 | pub(crate) fn barrier() { 18 | unsafe { x86_64::_mm_mfence() } 19 | } 20 | 21 | /// A memory barrier that prevents the CPU from reordering loads across it. 22 | /// 23 | /// This is a read memory barrier, and on x86_64 is implemented using the 24 | /// `lfence` instruction. 25 | #[inline] 26 | #[allow(dead_code)] 27 | pub(crate) fn read_barrier() { 28 | unsafe { x86_64::_mm_lfence() } 29 | } 30 | 31 | /// A memory barrier that prevents the CPU from reordering stores across it. 32 | /// 33 | /// This is a write memory barrier, and on x86_64 is implemented using the 34 | /// `sfence` instruction. 35 | #[inline] 36 | #[allow(dead_code)] 37 | pub(crate) fn write_barrier() { 38 | unsafe { x86_64::_mm_sfence() } 39 | } 40 | -------------------------------------------------------------------------------- /kernel/src/block.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Debug; 2 | use core::ops::{Add, Deref, DerefMut}; 3 | 4 | use alloc::boxed::Box; 5 | use alloc::sync::Arc; 6 | use zerocopy::FromBytes; 7 | 8 | use crate::transmute::{TransmuteCollection, TransmuteView}; 9 | use crate::virtio; 10 | 11 | /// Wrapper around a `BlockDeviceDriver` implementation. 12 | #[derive(Debug)] 13 | pub(crate) struct BlockDevice { 14 | driver: Arc, 15 | } 16 | 17 | impl BlockDevice { 18 | pub(crate) fn new(driver: D) -> Self { 19 | Self { 20 | driver: Arc::new(driver), 21 | } 22 | } 23 | 24 | /// Number of blocks to read using the given block size. 25 | pub(crate) fn read_blocks( 26 | &self, 27 | block_size: BlockSize, 28 | start_block: BlockIndex, 29 | num_blocks: usize, 30 | ) -> BlockBuffer { 31 | let block_size: u16 = block_size.0; 32 | let device_block_size: u16 = self.driver.device_block_size().0; 33 | 34 | let device_start_block = 35 | BlockIndex(start_block.0 * u64::from(block_size / device_block_size)); 36 | let device_num_blocks = num_blocks * block_size.div_ceil(device_block_size) as usize; 37 | let data = self 38 | .driver 39 | .read_device_blocks(device_start_block, device_num_blocks); 40 | 41 | BlockBuffer { 42 | device_start_block, 43 | _device_num_blocks: device_num_blocks, 44 | data, 45 | driver: self.driver.clone(), 46 | } 47 | } 48 | } 49 | 50 | pub(crate) trait BlockDeviceDriver: Debug { 51 | fn device_block_size(&self) -> BlockSize; 52 | 53 | /// Number of _device_ blocks to read, using the device's block size. 54 | fn read_device_blocks(&self, start_block: BlockIndex, num_blocks: usize) -> Box<[u8]>; 55 | 56 | fn write_device_blocks(&self, start_block: BlockIndex, data: &[u8]); 57 | } 58 | 59 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 60 | pub(crate) struct BlockSize(u16); 61 | 62 | impl BlockSize { 63 | pub(crate) const fn new(value: u16) -> Self { 64 | Self(value) 65 | } 66 | } 67 | 68 | impl From for BlockSize { 69 | fn from(value: u16) -> Self { 70 | Self(value) 71 | } 72 | } 73 | 74 | impl From for u16 { 75 | fn from(value: BlockSize) -> Self { 76 | value.0 77 | } 78 | } 79 | 80 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 81 | pub(crate) struct BlockIndex(u64); 82 | 83 | impl BlockIndex { 84 | pub(crate) const fn new(value: u64) -> Self { 85 | Self(value) 86 | } 87 | } 88 | 89 | impl From for BlockIndex { 90 | fn from(value: u64) -> Self { 91 | Self(value) 92 | } 93 | } 94 | 95 | impl From for u64 { 96 | fn from(value: BlockIndex) -> Self { 97 | value.0 98 | } 99 | } 100 | 101 | impl Add for BlockIndex { 102 | type Output = Self; 103 | 104 | fn add(self, rhs: Self) -> Self::Output { 105 | Self(self.0 + rhs.0) 106 | } 107 | } 108 | 109 | /// In-memory buffer for a disk block. 110 | #[derive(Debug)] 111 | pub(crate) struct BlockBuffer { 112 | device_start_block: BlockIndex, 113 | _device_num_blocks: usize, 114 | data: Box<[u8]>, 115 | driver: Arc, 116 | } 117 | 118 | impl BlockBuffer { 119 | pub(crate) fn data(&self) -> &[u8] { 120 | &self.data 121 | } 122 | 123 | pub(crate) fn data_mut(&mut self) -> &mut [u8] { 124 | &mut self.data 125 | } 126 | 127 | pub(crate) fn into_view(self) -> Option> { 128 | TransmuteView::new(self) 129 | } 130 | 131 | pub(crate) fn into_collection(self) -> TransmuteCollection { 132 | TransmuteCollection::new(self) 133 | } 134 | 135 | /// Flushes the block back to disk 136 | pub(crate) fn flush(&self) { 137 | assert!( 138 | self.data.len() % self.driver.device_block_size().0 as usize == 0, 139 | "BlockBuffer flush: data buffer is not a multiple of the device block size" 140 | ); 141 | self.driver 142 | .write_device_blocks(self.device_start_block, &self.data); 143 | } 144 | } 145 | 146 | impl Deref for BlockBuffer { 147 | type Target = [u8]; 148 | 149 | fn deref(&self) -> &Self::Target { 150 | &self.data 151 | } 152 | } 153 | 154 | impl DerefMut for BlockBuffer { 155 | fn deref_mut(&mut self) -> &mut Self::Target { 156 | &mut self.data 157 | } 158 | } 159 | 160 | impl AsRef for BlockBuffer 161 | where 162 | T: ?Sized, 163 | ::Target: AsRef, 164 | { 165 | fn as_ref(&self) -> &T { 166 | self.deref().as_ref() 167 | } 168 | } 169 | 170 | impl AsMut for BlockBuffer 171 | where 172 | T: ?Sized, 173 | ::Target: AsMut, 174 | { 175 | fn as_mut(&mut self) -> &mut T { 176 | self.deref_mut().as_mut() 177 | } 178 | } 179 | 180 | pub(crate) fn virtio_block_device(device_id: usize) -> BlockDevice { 181 | BlockDevice::new(VirtioBlockDevice::new(device_id)) 182 | } 183 | 184 | #[derive(Debug)] 185 | pub(crate) struct VirtioBlockDevice { 186 | device_id: usize, 187 | } 188 | 189 | impl VirtioBlockDevice { 190 | fn new(device_id: usize) -> Self { 191 | Self { device_id } 192 | } 193 | } 194 | 195 | impl BlockDeviceDriver for VirtioBlockDevice { 196 | fn device_block_size(&self) -> BlockSize { 197 | BlockSize::try_from(virtio::VIRTIO_BLOCK_SECTOR_SIZE_BYTES as u16) 198 | .expect("invalid virtio block size") 199 | } 200 | 201 | fn read_device_blocks(&self, start_block: BlockIndex, num_blocks: usize) -> Box<[u8]> { 202 | let response = virtio::virtio_block_read(self.device_id, start_block.0, num_blocks as u32) 203 | .wait_sleep(); 204 | let virtio::VirtIOBlockResponse::Read(mut response) = response else { 205 | panic!("unexpected virtio block response: {:?}", response); 206 | }; 207 | Box::from(&*response.data()) 208 | } 209 | 210 | fn write_device_blocks(&self, start_block: BlockIndex, data: &[u8]) { 211 | let response = virtio::virtio_block_write(self.device_id, start_block.0, data).wait_sleep(); 212 | let virtio::VirtIOBlockResponse::Write = response else { 213 | panic!("unexpected virtio block response: {:?}", response); 214 | }; 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /kernel/src/debug.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | use crate::boot_info; 4 | 5 | /// This is a hack to get GDB to load our pretty printers. See 6 | /// 7 | #[used] 8 | #[link_section = ".debug_gdb_scripts"] 9 | static GDB_PRETTY_PRINTERS: [u8; 34] = *b"\x01gdb_load_rust_pretty_printers.py\0"; 10 | 11 | /// Generates a stack trace by iterating over the stack frame pointers. Requires 12 | /// `-C force-frame-pointers=yes` to be passed to rustc, otherwise Rust tends to 13 | /// treat `rbp` as a general purpose register. 14 | /// 15 | /// See: 16 | /// - 17 | /// - 18 | /// - 19 | pub(crate) fn print_stack_trace() { 20 | let boot_info_data = boot_info::boot_info(); 21 | 22 | log::warn!("Stack trace:"); 23 | let mut rbp: *const u64; 24 | unsafe { 25 | asm!("mov {}, rbp", out(reg) rbp); 26 | } 27 | while !rbp.is_null() { 28 | let return_address = unsafe { *(rbp.offset(1)) }; 29 | let location = find_symbol_in_map_file(boot_info_data, return_address).unwrap_or("???"); 30 | log::warn!(" {return_address:#x} [{location}]"); 31 | rbp = unsafe { *(rbp) as *const u64 }; 32 | } 33 | } 34 | 35 | fn find_symbol_in_map_file( 36 | boot_info_data: &boot_info::BootInfo, 37 | address: u64, 38 | ) -> Option<&'static str> { 39 | let map_file = boot_info_data.kernel_symbol_map_file.as_ref()?; 40 | map_file.find_function_symbol_for_instruction_address(address) 41 | } 42 | -------------------------------------------------------------------------------- /kernel/src/elf.rs: -------------------------------------------------------------------------------- 1 | use alloc::string::String; 2 | use alloc::vec::Vec; 3 | use core::fmt; 4 | 5 | use bitflags::bitflags; 6 | use elf::endian::AnyEndian; 7 | use elf::segment::ProgramHeader; 8 | use elf::{ElfBytes, ParseError}; 9 | use x86_64::VirtAddr; 10 | 11 | use crate::memory::PageTableEntryFlags; 12 | 13 | /// Wrapper around a parsed ELF header for executables. 14 | pub(crate) struct ElfExecutableHeader<'a> { 15 | pub(crate) parsed: ElfBytes<'a, AnyEndian>, 16 | pub(crate) entrypoint: VirtAddr, 17 | pub(crate) loadable_segments: Vec, 18 | } 19 | 20 | #[derive(Debug)] 21 | pub(crate) enum ElfExecutableHeaderError { 22 | ParseError(ParseError), 23 | Other(String), 24 | } 25 | 26 | impl<'a> ElfExecutableHeader<'a> { 27 | pub(crate) fn parse(bytes: &'a [u8]) -> Result { 28 | let parsed = ElfBytes::::minimal_parse(bytes) 29 | .map_err(ElfExecutableHeaderError::ParseError)?; 30 | 31 | if parsed.ehdr.e_type != elf::abi::ET_EXEC { 32 | return Err(ElfExecutableHeaderError::Other(format!( 33 | "expected ET_EXEC but found {:?}", 34 | parsed.ehdr.e_type 35 | ))); 36 | } 37 | 38 | if parsed.ehdr.e_machine != elf::abi::EM_X86_64 { 39 | return Err(ElfExecutableHeaderError::Other(format!( 40 | "expected EM_X86_64 but found {:?}", 41 | parsed.ehdr.e_machine 42 | ))); 43 | } 44 | 45 | let entrypoint = VirtAddr::new(parsed.ehdr.e_entry); 46 | 47 | let Some(segments) = parsed 48 | .segments() else { 49 | return Err(ElfExecutableHeaderError::Other( 50 | String::from("no segments found") 51 | )); 52 | }; 53 | 54 | let mut loadable_segments = Vec::new(); 55 | for program_header in segments { 56 | if program_header.p_type != elf::abi::PT_LOAD { 57 | continue; 58 | } 59 | 60 | if program_header.p_paddr > 0 && program_header.p_paddr != program_header.p_vaddr { 61 | return Err(ElfExecutableHeaderError::Other(format!( 62 | "invalid p_addr: {program_header:?}" 63 | ))); 64 | } 65 | 66 | let file_offset = program_header.p_offset; 67 | let vaddr = VirtAddr::new(program_header.p_vaddr); 68 | let mem_size = program_header.p_memsz; 69 | let flags = 70 | LoadableSegmentFlags::from_bits(program_header.p_flags).ok_or_else(|| { 71 | ElfExecutableHeaderError::Other(format!("invalid flags: {program_header:?}")) 72 | })?; 73 | let alignment = program_header.p_align; 74 | 75 | loadable_segments.push(LoadableSegment { 76 | raw_header: program_header, 77 | file_offset, 78 | vaddr, 79 | mem_size, 80 | flags, 81 | alignment, 82 | }); 83 | } 84 | 85 | Ok(Self { 86 | parsed, 87 | entrypoint, 88 | loadable_segments, 89 | }) 90 | } 91 | } 92 | 93 | impl fmt::Debug for ElfExecutableHeader<'_> { 94 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 95 | f.debug_struct("ElfExecutableHeader") 96 | .field("header", &self.parsed.ehdr) 97 | .field("entrypoint", &self.entrypoint) 98 | .field("loadable_segments", &self.loadable_segments) 99 | .finish() 100 | } 101 | } 102 | 103 | #[derive(Debug)] 104 | #[allow(dead_code)] // TODO: Remove allow(dead_code) when we actually use this 105 | pub(crate) struct LoadableSegment { 106 | pub(crate) raw_header: ProgramHeader, 107 | pub(crate) file_offset: u64, 108 | pub(crate) vaddr: VirtAddr, 109 | pub(crate) mem_size: u64, 110 | pub(crate) flags: LoadableSegmentFlags, 111 | pub(crate) alignment: u64, 112 | } 113 | 114 | bitflags! { 115 | #[derive(Debug)] 116 | #[repr(transparent)] 117 | pub(super) struct LoadableSegmentFlags: u32 { 118 | const EXECUTABLE = 1; 119 | const WRITABLE = 2; 120 | const READABLE = 4; 121 | } 122 | } 123 | 124 | impl LoadableSegmentFlags { 125 | pub(crate) fn page_table_entry_flags(&self) -> PageTableEntryFlags { 126 | let mut flags = PageTableEntryFlags::empty(); 127 | 128 | if !self.contains(Self::EXECUTABLE) { 129 | flags |= PageTableEntryFlags::NO_EXECUTE; 130 | } 131 | 132 | if self.contains(Self::WRITABLE) { 133 | flags |= PageTableEntryFlags::WRITABLE; 134 | } 135 | 136 | if self.contains(Self::READABLE) { 137 | flags |= PageTableEntryFlags::PRESENT; 138 | } 139 | 140 | flags 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /kernel/src/fs/ext2/block_group.rs: -------------------------------------------------------------------------------- 1 | use zerocopy::{AsBytes, FromBytes, FromZeroes}; 2 | 3 | use super::superblock::{BlockAddress, LocalInodeIndex}; 4 | 5 | /// See 6 | #[repr(C, packed)] 7 | #[derive(Debug, FromZeroes, FromBytes, AsBytes)] 8 | pub(super) struct BlockGroupDescriptor { 9 | pub(super) block_bitmap: BlockAddress, 10 | pub(super) inode_bitmap: BlockAddress, 11 | pub(super) inode_table: InodeTableBlockAddress, 12 | pub(super) free_blocks_count: u16, 13 | pub(super) free_inodes_count: u16, 14 | pub(super) used_dirs_count: u16, 15 | _pad: u16, 16 | _reserved: [u8; 12], 17 | } 18 | 19 | #[repr(transparent)] 20 | #[derive(Debug, Clone, Copy, FromZeroes, FromBytes, AsBytes)] 21 | pub(super) struct InodeTableBlockAddress(pub(super) BlockAddress); 22 | 23 | /// See 24 | /// 25 | /// Each bit represent the current state of a block within that block group, 26 | /// where 1 means "used" and 0 "free/available". The first block of this block 27 | /// group is represented by bit 0 of byte 0, the second by bit 1 of byte 0. The 28 | /// 8th block is represented by bit 7 (most significant bit) of byte 0 while the 29 | /// 9th block is represented by bit 0 (least significant bit) of byte 1. 30 | #[derive(Debug)] 31 | pub(super) struct BlockBitmap<'a>(Bitmap<'a>); 32 | 33 | impl<'a> BlockBitmap<'a> { 34 | pub(super) fn new(bytes: &'a mut [u8]) -> Self { 35 | Self(Bitmap(bytes)) 36 | } 37 | 38 | pub(super) fn reserve_next_free(&mut self) -> Option { 39 | let index = self.0.reserve_next_free()?; 40 | Some(BlockAddress(index as u32)) 41 | } 42 | } 43 | 44 | /// See 45 | /// 46 | /// The "Inode Bitmap" works in a similar way as the "Block Bitmap", difference 47 | /// being in each bit representing an inode in the "Inode Table" rather than a 48 | /// block. Since inode numbers start from 1 rather than 0, the first bit in the 49 | /// first block group's inode bitmap represent inode number 1. 50 | #[derive(Debug)] 51 | pub(super) struct InodeBitmap<'a>(Bitmap<'a>); 52 | 53 | impl<'a> InodeBitmap<'a> { 54 | pub(super) fn new(bytes: &'a mut [u8]) -> Self { 55 | Self(Bitmap(bytes)) 56 | } 57 | 58 | pub(super) fn is_used(&self, local_index: LocalInodeIndex) -> Option { 59 | self.0.is_used(local_index.0 as usize) 60 | } 61 | 62 | pub(super) fn reserve_next_free(&mut self) -> Option { 63 | let index = self.0.reserve_next_free()?; 64 | Some(LocalInodeIndex(index as u32)) 65 | } 66 | } 67 | 68 | #[derive(Debug)] 69 | struct Bitmap<'a>(pub(super) &'a mut [u8]); 70 | 71 | impl<'a> Bitmap<'a> { 72 | pub(super) fn is_used(&self, index: usize) -> Option { 73 | let byte = self.0.get(index / 8)?; 74 | let bit = index % 8; 75 | let mask = 1 << bit; 76 | Some(byte & mask != 0) 77 | } 78 | 79 | /// Finds the next open entry in the bitmap, and returns its index. Returns 80 | /// `None` if there are no more remaining entries. 81 | pub(super) fn reserve_next_free(&mut self) -> Option { 82 | for (index, byte) in self.0.iter_mut().enumerate() { 83 | if *byte == 0xFF { 84 | continue; 85 | } 86 | for bit in 0..8 { 87 | let mask = 1 << bit; 88 | if *byte & mask == 0 { 89 | *byte |= mask; 90 | return Some(index * 8 + bit); 91 | } 92 | } 93 | } 94 | None 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /kernel/src/fs/ext2/inode.rs: -------------------------------------------------------------------------------- 1 | use bitflags::bitflags; 2 | use zerocopy::{AsBytes, FromBytes, FromZeroes}; 3 | 4 | use super::superblock::BlockAddress; 5 | 6 | /// See 7 | #[repr(C, packed)] 8 | #[derive(Debug, Clone, FromZeroes, FromBytes, AsBytes)] 9 | pub(super) struct Inode { 10 | pub(super) mode: InodeMode, 11 | pub(super) uid: u16, 12 | pub(super) size_low: u32, 13 | pub(super) atime: u32, 14 | pub(super) ctime: u32, 15 | pub(super) mtime: u32, 16 | pub(super) dtime: u32, 17 | pub(super) gid: u16, 18 | pub(super) links_count: u16, 19 | pub(super) blocks: u32, 20 | pub(super) flags: u32, 21 | pub(super) osd1: u32, 22 | pub(super) direct_blocks: InodeDirectBlocks, 23 | pub(super) singly_indirect_block: BlockAddress, 24 | pub(super) doubly_indirect_block: BlockAddress, 25 | pub(super) triply_indirect_block: BlockAddress, 26 | pub(super) generation: u32, 27 | pub(super) file_acl: u32, 28 | /// High 32 bits of file size. This is dir_acl in revision 0. 29 | pub(super) size_high: u32, 30 | pub(super) faddr: u32, 31 | pub(super) osd2: [u8; 12], 32 | } 33 | 34 | impl Inode { 35 | pub(super) fn is_dir(&self) -> bool { 36 | let mode = self.mode; 37 | mode.contains(InodeMode::IFDIR) 38 | } 39 | 40 | pub(super) fn is_file(&self) -> bool { 41 | let mode = self.mode; 42 | mode.contains(InodeMode::IFREG) 43 | } 44 | } 45 | 46 | #[derive(Debug, Copy, Clone, FromZeroes, FromBytes, AsBytes)] 47 | #[repr(transparent)] 48 | /// 49 | pub(super) struct InodeMode(u16); 50 | 51 | bitflags! { 52 | impl InodeMode: u16 { 53 | // Access rights 54 | 55 | /// Others execute 56 | const IXOTH = 0x001; 57 | 58 | /// Others write 59 | const IWOTH = 0x002; 60 | 61 | /// Others read 62 | const IROTH = 0x004; 63 | 64 | /// Group execute 65 | const IXGRP = 0x008; 66 | 67 | /// Group write 68 | const IWGRP = 0x010; 69 | 70 | /// Group read 71 | const IRGRP = 0x020; 72 | 73 | /// User execute 74 | const IXUSR = 0x040; 75 | 76 | /// User write 77 | const IWUSR = 0x080; 78 | 79 | /// User read 80 | const IRUSR = 0x100; 81 | 82 | // Process execution user/group override 83 | 84 | /// Sticky bit 85 | const ISVTX = 0x200; 86 | 87 | /// Set process group id 88 | const ISGID = 0x400; 89 | 90 | /// Set process user id 91 | const ISUID = 0x800; 92 | 93 | // File format 94 | 95 | /// FIFO 96 | const IFIFO = 0x1000; 97 | 98 | /// Character device 99 | const IFCHR = 0x2000; 100 | 101 | /// Directory 102 | const IFDIR = 0x4000; 103 | 104 | /// Block device 105 | const IFBLK = 0x6000; 106 | 107 | /// Regular file 108 | const IFREG = 0x8000; 109 | 110 | /// Symbolic link 111 | const IFLNK = 0xA000; 112 | 113 | /// Socket 114 | const IFSOCK = 0xC000; 115 | } 116 | } 117 | 118 | #[derive(Debug, Clone, Copy, FromZeroes, FromBytes, AsBytes)] 119 | #[repr(transparent)] 120 | pub(super) struct InodeDirectBlocks(pub(super) [BlockAddress; 12]); 121 | 122 | impl InodeDirectBlocks { 123 | pub(super) fn empty() -> Self { 124 | Self([BlockAddress(0); 12]) 125 | } 126 | 127 | pub(super) fn insert(&mut self, index: usize, block: BlockAddress) { 128 | assert!(index < self.0.len(), "index {index} out of bounds"); 129 | self.0[index] = block; 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /kernel/src/fs/ext2/mod.rs: -------------------------------------------------------------------------------- 1 | //! Code for interacting with ext2 filesystems. 2 | //! 3 | //! # Resources 4 | //! 5 | //! - 6 | //! - 7 | //! - 8 | //! - 9 | //! - "CHAPTER 18: The Ext2 and Ext3 Filesystems" in "Understanding the Linux Kernel - Bovet (3rd ed, 2005)" 10 | 11 | mod block_group; 12 | mod directory; 13 | mod file_system; 14 | mod inode; 15 | mod strings; 16 | mod superblock; 17 | mod vfs; 18 | 19 | pub(crate) use vfs::*; 20 | -------------------------------------------------------------------------------- /kernel/src/fs/ext2/strings.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | 3 | use zerocopy::{AsBytes, FromBytes, FromZeroes}; 4 | 5 | /// Wrapper around a byte array that represents a nul-terminated string. 6 | #[derive(Copy, Clone, FromZeroes, FromBytes, AsBytes)] 7 | #[repr(transparent)] 8 | pub(super) struct CStringBytes(B); 9 | 10 | impl CStringBytes<[u8; N]> { 11 | pub(super) fn as_str(&self) -> &str { 12 | c_str_from_bytes(&self.0) 13 | } 14 | } 15 | 16 | impl fmt::Debug for CStringBytes<[u8; N]> { 17 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 18 | f.debug_tuple("CStringBytes").field(&self.as_str()).finish() 19 | } 20 | } 21 | 22 | /// Creates a null-terminated string from a byte slice. 23 | pub(super) fn c_str_from_bytes(bytes: &[u8]) -> &str { 24 | let nul_location = bytes.iter().position(|&c| c == 0).unwrap_or(bytes.len()); 25 | core::str::from_utf8(&bytes[..nul_location]).unwrap_or("") 26 | } 27 | -------------------------------------------------------------------------------- /kernel/src/fs/fat.rs: -------------------------------------------------------------------------------- 1 | use zerocopy::{AsBytes, FromBytes, FromZeroes}; 2 | 3 | /// The BIOS parameter block is the first part of the boot sector. 4 | #[derive(Debug, AsBytes, FromBytes, FromZeroes)] 5 | #[repr(C, packed)] 6 | pub struct BIOSParameterBlock { 7 | jmp_boot: [u8; 3], 8 | oem_name: [u8; 8], 9 | bytes_per_sector: u16, 10 | sectors_per_cluster: u8, 11 | reserved_sectors: u16, 12 | fat_count: u8, 13 | root_dir_entries: u16, 14 | total_sectors: u16, 15 | media_descriptor: u8, 16 | sectors_per_fat: u16, 17 | sectors_per_track: u16, 18 | head_count: u16, 19 | hidden_sectors: u32, 20 | total_sectors_large: u32, 21 | } 22 | -------------------------------------------------------------------------------- /kernel/src/fs/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod ext2; 2 | pub(crate) mod fat; 3 | pub(crate) mod sysfs; 4 | -------------------------------------------------------------------------------- /kernel/src/fs/sysfs.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use alloc::format; 3 | use alloc::string::String; 4 | use alloc::vec; 5 | 6 | use crate::sched::TaskId; 7 | use crate::{sched, vfs}; 8 | 9 | #[derive(Debug)] 10 | pub(crate) struct Sysfs; 11 | 12 | impl vfs::FileSystem for Sysfs { 13 | fn read_root(&mut self) -> vfs::Inode { 14 | vfs::Inode { 15 | inode_type: vfs::InodeType::Directory(Box::new(VFSRootInode)), 16 | } 17 | } 18 | } 19 | 20 | #[derive(Debug)] 21 | struct VFSRootInode; 22 | 23 | impl vfs::DirectoryInode for VFSRootInode { 24 | fn subdirectories(&mut self) -> alloc::vec::Vec> { 25 | vec![Box::new(VFSTasksDirectory)] 26 | } 27 | } 28 | 29 | /// Holds a subdirectory per running task. 30 | #[derive(Debug)] 31 | struct VFSTasksDirectory; 32 | 33 | impl vfs::DirectoryEntry for VFSTasksDirectory { 34 | fn name(&self) -> String { 35 | String::from("tasks") 36 | } 37 | 38 | fn entry_type(&self) -> vfs::DirectoryEntryType { 39 | vfs::DirectoryEntryType::Directory 40 | } 41 | 42 | fn get_inode(&mut self) -> vfs::Inode { 43 | vfs::Inode { 44 | inode_type: vfs::InodeType::Directory(Box::new(Self)), 45 | } 46 | } 47 | } 48 | 49 | impl vfs::DirectoryInode for VFSTasksDirectory { 50 | fn subdirectories(&mut self) -> alloc::vec::Vec> { 51 | sched::TASKS 52 | .lock() 53 | .task_ids() 54 | .into_iter() 55 | .map(|task_id| Box::new(VFSTaskDirectory { task_id }) as Box) 56 | .collect() 57 | } 58 | } 59 | 60 | /// Subdirectory for a specific task. 61 | #[derive(Debug, Clone)] 62 | struct VFSTaskDirectory { 63 | task_id: TaskId, 64 | } 65 | 66 | impl vfs::DirectoryEntry for VFSTaskDirectory { 67 | fn name(&self) -> String { 68 | format!("{}", u32::from(self.task_id)) 69 | } 70 | 71 | fn entry_type(&self) -> vfs::DirectoryEntryType { 72 | vfs::DirectoryEntryType::Directory 73 | } 74 | 75 | fn get_inode(&mut self) -> vfs::Inode { 76 | vfs::Inode { 77 | inode_type: vfs::InodeType::Directory(Box::new(self.clone())), 78 | } 79 | } 80 | } 81 | 82 | impl vfs::DirectoryInode for VFSTaskDirectory { 83 | fn subdirectories(&mut self) -> alloc::vec::Vec> { 84 | vec![Box::new(VFSTaskInfoFile { 85 | task_id: self.task_id, 86 | })] 87 | } 88 | } 89 | 90 | /// General info about a task 91 | #[derive(Debug)] 92 | struct VFSTaskInfoFile { 93 | task_id: TaskId, 94 | } 95 | 96 | impl VFSTaskInfoFile { 97 | fn data(&self) -> String { 98 | sched::TASKS 99 | .lock_disable_interrupts() 100 | .get_task(self.task_id) 101 | .map_or_else( 102 | || format!("task not found..."), 103 | |task| format!("{:#X?}", task), 104 | ) 105 | } 106 | } 107 | 108 | impl vfs::DirectoryEntry for VFSTaskInfoFile { 109 | fn name(&self) -> String { 110 | String::from("info") 111 | } 112 | 113 | fn entry_type(&self) -> vfs::DirectoryEntryType { 114 | vfs::DirectoryEntryType::File 115 | } 116 | 117 | fn get_inode(&mut self) -> vfs::Inode { 118 | vfs::Inode { 119 | inode_type: vfs::InodeType::File(Box::new(Self { 120 | task_id: self.task_id, 121 | })), 122 | } 123 | } 124 | } 125 | 126 | impl vfs::FileInode for VFSTaskInfoFile { 127 | fn read(&mut self, buffer: &mut [u8], offset: usize) -> vfs::FileInodeReadResult { 128 | sysfs_read_file(&self.data(), buffer, offset) 129 | } 130 | } 131 | 132 | /// Generic code to implement a sysfs file read that just reads from a string. 133 | fn sysfs_read_file( 134 | file_content: &str, 135 | buffer: &mut [u8], 136 | offset: usize, 137 | ) -> vfs::FileInodeReadResult { 138 | let data = file_content.as_bytes(); 139 | let start = offset.min(data.len()); 140 | let end = (offset + buffer.len()).min(data.len()); 141 | let copy_data = &data[start..end]; 142 | buffer[..copy_data.len()].copy_from_slice(copy_data); 143 | if end == data.len() { 144 | vfs::FileInodeReadResult::Done { 145 | bytes_read: file_content.len(), 146 | } 147 | } else { 148 | vfs::FileInodeReadResult::Success 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /kernel/src/graphics/mod.rs: -------------------------------------------------------------------------------- 1 | mod font; 2 | mod framebuffer; 3 | mod text_buffer; 4 | 5 | use core::fmt::Write; 6 | 7 | use crate::boot_info; 8 | use crate::sync::SpinLock; 9 | 10 | use framebuffer::VESAFramebuffer32Bit; 11 | use text_buffer::TextBuffer; 12 | 13 | static FRAMEBUFFER: SpinLock> = SpinLock::new(None); 14 | 15 | static TEXT_BUFFER: SpinLock = SpinLock::new(TextBuffer::new()); 16 | 17 | pub(crate) fn init(boot_info_data: &boot_info::BootInfo) { 18 | FRAMEBUFFER.lock().replace(unsafe { 19 | VESAFramebuffer32Bit::from_limine_framebuffer(boot_info_data.framebuffer) 20 | .expect("failed to create VESAFramebuffer32Bit") 21 | }); 22 | } 23 | 24 | pub(crate) fn write_text_buffer(text: &str) { 25 | let mut framebuffer_lock = FRAMEBUFFER.lock(); 26 | let framebuffer = framebuffer_lock 27 | .as_mut() 28 | .expect("framebuffer not initialized"); 29 | let mut text_buffer = TEXT_BUFFER.lock(); 30 | text_buffer 31 | .write_str(text) 32 | .expect("failed to write to text buffer"); 33 | text_buffer.flush(framebuffer); 34 | } 35 | -------------------------------------------------------------------------------- /kernel/src/graphics/text_buffer.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | 3 | use bitvec::prelude::AsBits; 4 | use ring_buffer::RingBuffer; 5 | 6 | use super::font::{ 7 | FONT_HEIGHT_PIXELS, FONT_SPACE_CHARACTER_INDEX, FONT_START_CHAR_ASCII_CODE, FONT_WIDTH_PIXELS, 8 | OPENGL_FONT, 9 | }; 10 | use super::framebuffer::{ARGB32Bit, VESAFramebuffer32Bit, ARGB32BIT_BLACK, ARGB32BIT_WHITE}; 11 | 12 | /// ASCII character along with a color. 13 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 14 | pub(super) struct ColorChar { 15 | char_byte: u8, 16 | color: ARGB32Bit, 17 | } 18 | 19 | impl ColorChar { 20 | pub(super) fn new(char_byte: u8, color: ARGB32Bit) -> Self { 21 | Self { char_byte, color } 22 | } 23 | } 24 | 25 | /// A cursor-based text buffer that can print text to a framebuffer. 26 | pub(super) struct TextBuffer { 27 | /// Ring buffer that holds the text lines. 28 | buffer: RingBuffer<[ColorChar; W], N>, 29 | 30 | /// Cursor into the current line of text. 31 | cursor: usize, 32 | } 33 | 34 | impl TextBuffer { 35 | pub(super) const fn new() -> Self { 36 | Self { 37 | buffer: RingBuffer::new(), 38 | cursor: 0, 39 | } 40 | } 41 | 42 | fn new_line(&mut self) { 43 | self.buffer.push([ColorChar::new(0x00, ARGB32BIT_WHITE); W]); 44 | self.cursor = 0; 45 | } 46 | 47 | /// Writes a character to the internal `TextLineBuffer`, but doesn't flush 48 | /// the text to the framebuffer. You must call `flush` to draw the text to 49 | /// the framebuffer. 50 | pub(super) fn write_char(&mut self, c: ColorChar) { 51 | // Wrap text for newline and consume char 52 | if c.char_byte == b'\n' { 53 | self.new_line(); 54 | return; 55 | } 56 | 57 | // Wrap text to next line but don't consume char 58 | if self.cursor == W { 59 | self.new_line(); 60 | } 61 | 62 | // Get the current line, ensuring one exists. 63 | let current_line = if let Some(line) = self.buffer.get_mut(0) { 64 | line 65 | } else { 66 | self.new_line(); 67 | self.buffer.get_mut(0).unwrap() 68 | }; 69 | 70 | current_line[self.cursor] = c; 71 | self.cursor += 1; 72 | } 73 | 74 | /// Clear the framebuffer and then draw all the text that fits in the 75 | /// framebuffer. 76 | pub(super) fn flush(&mut self, framebuffer: &mut VESAFramebuffer32Bit) { 77 | framebuffer.clear(); 78 | 79 | // Start at the last line of the text buffer and draw lines until we run 80 | // out of space in the framebuffer or we run out of lines in the text 81 | // buffer. 82 | let mut pixel_y: usize = framebuffer.height_pixels(); 83 | 84 | // N.B. Our ring buffer implementation iterates from the most recently 85 | // inserted item to the oldest item. 86 | for line in self.buffer.iter() { 87 | // Find y coordinate for line. The +1 is for spacing between lines 88 | pixel_y = match pixel_y.checked_sub(FONT_HEIGHT_PIXELS + 1) { 89 | Some(y) => y, 90 | None => break, 91 | }; 92 | 93 | let mut x = 1; // A bit of space from left edge of screen 94 | for c in line.iter() { 95 | if x + FONT_WIDTH_PIXELS > framebuffer.width_pixels() { 96 | break; 97 | } 98 | draw_char(framebuffer, x, pixel_y, *c); 99 | x += FONT_WIDTH_PIXELS + 1; // +1 for padding between characters 100 | } 101 | } 102 | } 103 | } 104 | 105 | impl fmt::Write for TextBuffer { 106 | fn write_str(&mut self, s: &str) -> fmt::Result { 107 | for byte in s.bytes() { 108 | self.write_char(ColorChar { 109 | char_byte: byte, 110 | color: ARGB32BIT_WHITE, 111 | }); 112 | } 113 | Ok(()) 114 | } 115 | } 116 | 117 | fn draw_char(framebuffer: &mut VESAFramebuffer32Bit, x: usize, y: usize, c: ColorChar) { 118 | let index: usize = c 119 | .char_byte 120 | .checked_sub(FONT_START_CHAR_ASCII_CODE) 121 | .map_or(FONT_SPACE_CHARACTER_INDEX, |index| index as usize); 122 | 123 | let Some(char_bytes) = OPENGL_FONT.get(index) else { return }; 124 | let bitmap = char_bytes.as_bits::(); 125 | 126 | framebuffer.draw_bitmap(x, y, bitmap, FONT_WIDTH_PIXELS, c.color, ARGB32BIT_BLACK); 127 | } 128 | 129 | #[cfg(feature = "tests")] 130 | mod test { 131 | use super::*; 132 | 133 | use alloc::vec::Vec; 134 | 135 | use crate::tests::kernel_test; 136 | 137 | fn assert_line_text_equal(line: &[ColorChar], expected: &[u8]) { 138 | assert_eq!(line.len(), expected.len()); 139 | 140 | let left_chars = line 141 | .iter() 142 | .map(|c| c.char_byte as char) 143 | .collect::>(); 144 | let right_chars = expected.iter().map(|c| *c as char).collect::>(); 145 | 146 | assert_eq!(left_chars, right_chars); 147 | } 148 | 149 | #[kernel_test] 150 | fn test_text_buffer_writer() { 151 | use core::fmt::Write; 152 | let mut text_buffer: TextBuffer<4, 4> = TextBuffer::new(); 153 | writeln!(text_buffer, "abc").unwrap(); 154 | writeln!(text_buffer, "1234").unwrap(); 155 | 156 | assert_eq!(text_buffer.buffer.len(), 3); 157 | assert_line_text_equal(text_buffer.buffer.get_mut(0).unwrap(), &[0; 4]); 158 | assert_line_text_equal(text_buffer.buffer.get_mut(1).unwrap(), b"1234"); 159 | assert_line_text_equal(text_buffer.buffer.get_mut(2).unwrap(), b"abc\0"); 160 | } 161 | 162 | #[kernel_test] 163 | fn test_text_buffer_implicit_line_wrap() { 164 | use core::fmt::Write; 165 | let mut text_buffer: TextBuffer<5, 4> = TextBuffer::new(); 166 | writeln!(text_buffer, "abc").unwrap(); 167 | writeln!(text_buffer, "1234567").unwrap(); 168 | 169 | assert_eq!(text_buffer.buffer.len(), 4); 170 | assert_line_text_equal(text_buffer.buffer.get_mut(0).unwrap(), &[0; 4]); 171 | assert_line_text_equal(text_buffer.buffer.get_mut(1).unwrap(), b"567\0"); 172 | assert_line_text_equal(text_buffer.buffer.get_mut(2).unwrap(), b"1234"); 173 | assert_line_text_equal(text_buffer.buffer.get_mut(3).unwrap(), b"abc\0"); 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /kernel/src/ioapic.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | 3 | use bitfield_struct::bitfield; 4 | 5 | use crate::acpi::ACPIInfo; 6 | use crate::interrupts::InterruptVector; 7 | use crate::register_struct; 8 | use crate::registers::RegisterRW; 9 | use crate::sync::InitCell; 10 | 11 | static IOAPIC: InitCell = InitCell::new(); 12 | 13 | pub(crate) fn init(acpi_info: &ACPIInfo) { 14 | let ioapic = IOAPIC::from_acpi_info(acpi_info); 15 | IOAPIC.init(ioapic); 16 | } 17 | 18 | pub(crate) fn install_irq(interrupt_vector: InterruptVector, irq_entry: IOAPICIRQNumber) { 19 | let ioapic = IOAPIC.get().expect("IOAPIC not initialized!"); 20 | 21 | ioapic.write_ioredtbl( 22 | irq_entry as u8, 23 | IOAPICRedirectionTableRegister::new() 24 | .with_interrupt_vector(interrupt_vector.0) 25 | .with_interrupt_mask(false) 26 | .with_delivery_mode(0) // Fixed 27 | .with_destination_mode(false) // Physical 28 | .with_delivery_status(false) 29 | .with_destination_field(ioapic.ioapic_id().id()), 30 | ); 31 | } 32 | 33 | /// Global list of registered IOAPIC IRQs to ensure we don't have collisions. 34 | #[derive(Debug, Clone, Copy)] 35 | #[repr(u8)] 36 | pub(crate) enum IOAPICIRQNumber { 37 | _Reserved = 0, 38 | 39 | /// Assumes that the keyboard IRQ for the IOAPIC is 1, which is the same as 40 | /// if we were using the 8259 PIC. If we wanted to determine this 41 | /// dynamically, we could read the IOAPIC redirection table entry for IRQ 1, 42 | /// or if that doesn't exist I think we need to parse some ACPI AML. 43 | Keyboard = 1, 44 | 45 | // Some reserved numbers in the middle. I don't trust that these aren't 46 | // already taken. 47 | Tick = 9, 48 | } 49 | 50 | /// See 51 | #[derive(Clone)] 52 | struct IOAPIC { 53 | id: u8, 54 | global_system_interrupt_base: u32, 55 | registers: IOAPICRegisters, 56 | } 57 | 58 | impl IOAPIC { 59 | fn from_acpi_info(acpi_info: &ACPIInfo) -> Self { 60 | let ioapic = acpi_info.ioapic_info(0); 61 | let registers = unsafe { IOAPICRegisters::from_address(ioapic.address) }; 62 | Self { 63 | id: ioapic.id, 64 | global_system_interrupt_base: ioapic.global_system_interrupt_base, 65 | registers, 66 | } 67 | } 68 | 69 | /// Reads an IOAPIC register by selecting it and then reading the IO window. 70 | fn read_32_bit_register(&self, register: u8) -> u32 { 71 | self.registers.io_register_select().write(register); 72 | self.registers.io_window().read() 73 | } 74 | 75 | /// Reads a 64 IOAPIC register by reading two 32 bit registers. 76 | #[allow(dead_code)] // TODO: Remove once used 77 | fn read_64_bit_register(&self, register: u8) -> u64 { 78 | let low = self.read_32_bit_register(register); 79 | let high = self.read_32_bit_register(register + 1); 80 | (u64::from(high) << 32) | u64::from(low) 81 | } 82 | 83 | /// Writes an IOAPIC register by selecting it and then writing the IO window. 84 | fn write_32_bit_register(&self, register: u8, value: u32) { 85 | self.registers.io_register_select().write(register); 86 | self.registers.io_window().write(value); 87 | } 88 | 89 | /// Writes a 64 IOAPIC register by writing two 32 bit registers. 90 | fn write_64_bit_register(&self, register: u8, value: u64) { 91 | let low = value as u32; 92 | let high = (value >> 32) as u32; 93 | self.write_32_bit_register(register, low); 94 | self.write_32_bit_register(register + 1, high); 95 | } 96 | 97 | /// See "3.2.1.IOAPICID—IOAPIC IDENTIFICATION REGISTER". This register 98 | /// contains the 4-bit APIC ID. The ID serves as a physical name of the 99 | /// IOAPIC. All APIC devices using the APIC bus should have a unique APIC 100 | /// ID. The APIC bus arbitration ID for the I/O unit is also writtten during 101 | /// a write to the APICID Register (same data is loaded into both). This 102 | /// register must be programmed with the correct ID value before using the 103 | /// IOAPIC for message transmission. 104 | fn ioapic_id(&self) -> IOAPICID { 105 | let raw = self.read_32_bit_register(IOAPIC_ID_REGISTER_OFFSET); 106 | IOAPICID::from(raw) 107 | } 108 | 109 | /// See "3.2.2. IOAPICVER—IOAPIC VERSION REGISTER" 110 | fn ioapic_version(&self) -> IOAPICVersion { 111 | let raw = self.read_32_bit_register(IOAPIC_VERSION_REGISTER_OFFSET); 112 | IOAPICVersion::from(raw) 113 | } 114 | 115 | /// See "3.2.4. 82093AA (IOAPIC) IOREDTBL\[23:0\]—I/O REDIRECTION TABLE REGISTERS" 116 | #[allow(dead_code)] // TODO: Remove once used 117 | fn read_ioredtbl(&self, entry: u8) -> IOAPICRedirectionTableRegister { 118 | // Intel IOAPIC only has 24 entries 119 | assert!(entry < 24, "Intel IOAPIC only has 24 entries!"); 120 | let offset = IOAPIC_REDIRECTION_TABLE_REGISTER_OFFSET + (entry * 2); 121 | let raw = self.read_64_bit_register(offset); 122 | IOAPICRedirectionTableRegister::from(raw) 123 | } 124 | 125 | /// See "3.2.4. 82093AA (IOAPIC) IOREDTBL\[23:0\]—I/O REDIRECTION TABLE REGISTERS" 126 | fn write_ioredtbl(&self, entry: u8, value: IOAPICRedirectionTableRegister) { 127 | // Intel IOAPIC only has 24 entries 128 | assert!(entry < 24, "Intel IOAPIC only has 24 entries!"); 129 | let offset = IOAPIC_REDIRECTION_TABLE_REGISTER_OFFSET + (entry * 2); 130 | self.write_64_bit_register(offset, value.into()); 131 | } 132 | } 133 | 134 | #[allow(clippy::missing_fields_in_debug)] 135 | impl fmt::Debug for IOAPIC { 136 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 137 | f.debug_struct("IOAPIC") 138 | .field("id", &self.id) 139 | .field( 140 | "global_system_interrupt_base", 141 | &self.global_system_interrupt_base, 142 | ) 143 | .field("ioapic_id", &self.ioapic_id()) 144 | .field("ioapic_version", &self.ioapic_version()) 145 | .finish() 146 | } 147 | } 148 | 149 | register_struct!( 150 | /// See and "82093AA I/O ADVANCED 151 | /// PROGRAMMABLE INTERRUPT CONTROLLER (IOAPIC) (1996)" 152 | IOAPICRegisters { 153 | 0x00 => io_register_select: RegisterRW, 154 | 0x10 => io_window: RegisterRW, 155 | } 156 | ); 157 | 158 | const IOAPIC_ID_REGISTER_OFFSET: u8 = 0x00; 159 | 160 | #[bitfield(u32)] 161 | /// See "3.2.1.IOAPICID—IOAPIC IDENTIFICATION REGISTER" 162 | struct IOAPICID { 163 | #[bits(24)] 164 | __reserved: u32, 165 | #[bits(4)] 166 | id: u8, 167 | #[bits(4)] 168 | __reserved: u8, 169 | } 170 | 171 | const IOAPIC_VERSION_REGISTER_OFFSET: u8 = 0x01; 172 | 173 | #[bitfield(u32)] 174 | /// See "3.2.2. IOAPICVER—IOAPIC VERSION REGISTER" 175 | struct IOAPICVersion { 176 | version: u8, 177 | __reserved: u8, 178 | max_redirection_entry: u8, 179 | __reserved: u8, 180 | } 181 | 182 | const IOAPIC_REDIRECTION_TABLE_REGISTER_OFFSET: u8 = 0x10; 183 | 184 | #[bitfield(u64)] 185 | /// See "3.2.4. 82093AA (IOAPIC) IOREDTBL\[23:0\]—I/O REDIRECTION TABLE REGISTERS" 186 | struct IOAPICRedirectionTableRegister { 187 | interrupt_vector: u8, 188 | #[bits(3)] 189 | delivery_mode: u8, 190 | destination_mode: bool, 191 | delivery_status: bool, 192 | interrupt_input_pin_polarity: bool, 193 | remote_irr: bool, 194 | trigger_mode: bool, 195 | interrupt_mask: bool, 196 | #[bits(39)] 197 | __reserved: u64, 198 | destination_field: u8, 199 | } 200 | -------------------------------------------------------------------------------- /kernel/src/keyboard.rs: -------------------------------------------------------------------------------- 1 | use pc_keyboard::{layouts, DecodedKey, HandleControl, Keyboard, ScancodeSet1}; 2 | use x86_64::instructions::port::Port; 3 | 4 | use crate::interrupts::{InterruptHandlerID, InterruptVector}; 5 | use crate::sync::SpinLock; 6 | use crate::{interrupts, ioapic}; 7 | 8 | static KEYBOARD: SpinLock>> = SpinLock::new(None); 9 | 10 | pub(crate) fn init_keyboard() { 11 | KEYBOARD.lock().replace(Keyboard::new( 12 | layouts::Us104Key, 13 | ScancodeSet1, 14 | HandleControl::Ignore, 15 | )); 16 | 17 | let interrupt_vector = interrupts::install_interrupt_next_vector(1, keyboard_interrupt_handler); 18 | ioapic::install_irq(interrupt_vector, ioapic::IOAPICIRQNumber::Keyboard); 19 | } 20 | 21 | fn keyboard_interrupt_handler(_vector: InterruptVector, _handler_id: InterruptHandlerID) { 22 | // https://wiki.osdev.org/%228042%22_PS/2_Controller#PS.2F2_Controller_IO_Ports 23 | const KEYBOARD_PORT: u16 = 0x60; 24 | 25 | let mut lock = KEYBOARD.lock(); 26 | let keyboard = lock.as_mut().expect("keyboard not initialized"); 27 | let mut port = Port::new(KEYBOARD_PORT); 28 | 29 | // KEYBOARD has an internal state machine that processes e.g. modifier keys 30 | // like shift and caps lock. It needs to be fed with the scancodes of the 31 | // pressed keys. If the scancode is a valid key, the keyboard crate will 32 | // eventually return a `DecodedKey`. 33 | let scancode: u8 = unsafe { port.read() }; 34 | if let Ok(Some(key_event)) = keyboard.add_byte(scancode) { 35 | if let Some(key) = keyboard.process_keyevent(key_event) { 36 | match key { 37 | DecodedKey::Unicode(character) => { 38 | log::info!("FOUND UNICODE CHAR {character}"); 39 | } 40 | DecodedKey::RawKey(key) => log::info!("FOUND RAW CHAR {key:?}"), 41 | } 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /kernel/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(abi_x86_interrupt)] 3 | #![feature(allocator_api)] 4 | #![feature(asm_const)] 5 | #![feature(cell_leak)] 6 | #![feature(int_roundings)] 7 | #![feature(offset_of)] 8 | #![feature(maybe_uninit_uninit_array)] 9 | #![feature(const_maybe_uninit_uninit_array)] 10 | #![feature(naked_functions)] 11 | #![feature(pointer_is_aligned)] 12 | #![feature(strict_provenance)] 13 | #![feature(sync_unsafe_cell)] 14 | #![warn(clippy::all, clippy::pedantic, clippy::nursery, clippy::cargo)] 15 | #![allow( 16 | clippy::cast_possible_truncation, 17 | clippy::cast_precision_loss, 18 | clippy::cargo_common_metadata, 19 | clippy::doc_markdown, 20 | clippy::implicit_hasher, 21 | clippy::implicit_return, 22 | clippy::len_without_is_empty, 23 | clippy::missing_const_for_fn, 24 | clippy::missing_errors_doc, 25 | clippy::missing_panics_doc, 26 | clippy::module_name_repetitions, 27 | clippy::multiple_crate_versions, 28 | clippy::must_use_candidate, 29 | clippy::new_without_default, 30 | clippy::non_send_fields_in_send_ty, 31 | clippy::option_if_let_else, 32 | clippy::redundant_pub_crate, 33 | clippy::suboptimal_flops, 34 | clippy::upper_case_acronyms, 35 | clippy::wildcard_imports 36 | )] 37 | 38 | #[macro_use] // For format! macro 39 | #[allow(unused_imports)] // format! macro is unused at the time of writing 40 | extern crate alloc; 41 | 42 | pub(crate) mod acpi; 43 | pub(crate) mod ansiterm; 44 | pub(crate) mod apic; 45 | pub(crate) mod barrier; 46 | pub(crate) mod block; 47 | pub(crate) mod boot_info; 48 | pub(crate) mod debug; 49 | pub(crate) mod elf; 50 | pub(crate) mod fs; 51 | pub(crate) mod gdt; 52 | pub(crate) mod graphics; 53 | pub(crate) mod hpet; 54 | pub(crate) mod interrupts; 55 | pub(crate) mod ioapic; 56 | pub(crate) mod keyboard; 57 | pub(crate) mod logging; 58 | pub(crate) mod memory; 59 | pub(crate) mod pci; 60 | pub(crate) mod percpu; 61 | pub(crate) mod qemu; 62 | #[allow(dead_code)] // This could be its own crate 63 | pub(crate) mod registers; 64 | pub(crate) mod sched; 65 | pub(crate) mod serial; 66 | pub(crate) mod shell; 67 | pub(crate) mod strings; 68 | pub(crate) mod sync; 69 | #[cfg(feature = "tests")] 70 | pub(crate) mod tests; 71 | pub(crate) mod tick; 72 | pub(crate) mod transmute; 73 | pub(crate) mod vfs; 74 | pub(crate) mod virtio; 75 | 76 | use core::sync::atomic::{AtomicU8, Ordering}; 77 | 78 | use alloc::string::String; 79 | use apic::ProcessorID; 80 | 81 | pub fn start() -> ! { 82 | serial::init(); 83 | logging::init(); 84 | 85 | let boot_info_data = boot_info::boot_info(); 86 | early_per_cpu_setup(ProcessorID( 87 | boot_info_data.bootstrap_processor_lapic_id as u8, 88 | )); 89 | 90 | log::info!("kernel cmdline: {}", boot_info_data.kernel_cmdline); 91 | global_setup(boot_info_data); 92 | 93 | // Finish bootstrapping current CPU 94 | later_per_cpu_setup(); 95 | 96 | // Bootstrap other CPUs 97 | for mut entry in boot_info::limine_smp_entries() { 98 | entry.bootstrap_cpu(bootstrap_secondary_cpu); 99 | } 100 | 101 | // Ensure that all CPUs have finished bootstrapping before continuing 102 | let cpu_count = boot_info::limine_smp_entries().count() as u8; 103 | while NUM_CPUS_BOOTSTRAPPED.load(Ordering::Acquire) < cpu_count { 104 | core::hint::spin_loop(); 105 | } 106 | 107 | tick::global_init(); 108 | 109 | sched::new_task( 110 | String::from("shell"), 111 | shell::run_serial_shell, 112 | core::ptr::null::<()>(), 113 | ); 114 | sched::start_scheduler(); 115 | } 116 | 117 | /// Records how many CPUs have been bootstrapped. Used as a synchronization 118 | /// point before continuing with init. 119 | static NUM_CPUS_BOOTSTRAPPED: AtomicU8 = AtomicU8::new(0); 120 | 121 | fn early_per_cpu_setup(processor_id: ProcessorID) { 122 | gdt::init_per_cpu_gdt(processor_id); 123 | interrupts::init_interrupts(); 124 | percpu::init_current_cpu(processor_id); 125 | tick::per_cpu_init(); 126 | } 127 | 128 | fn global_setup(boot_info_data: &boot_info::BootInfo) { 129 | unsafe { 130 | memory::init(boot_info_data, boot_info::limine_memory_regions); 131 | }; 132 | 133 | // N.B. Probing ACPI must happen after heap initialization because the Rust 134 | // `acpi` crate uses alloc. It would be nice to not need that... 135 | unsafe { acpi::init(boot_info_data.rsdp_address.expect("no rsdp address")) }; 136 | 137 | let acpi_info = acpi::acpi_info(); 138 | apic::global_init(acpi_info); 139 | ioapic::init(acpi_info); 140 | sched::global_init(); 141 | 142 | unsafe { 143 | hpet::init(acpi_info.hpet_address()); 144 | }; 145 | 146 | keyboard::init_keyboard(); 147 | 148 | // Initialize VirtIO devices 149 | let pci_config_region_base_address = acpi_info.pci_config_region_base_address(); 150 | pci::for_pci_devices_brute_force(pci_config_region_base_address, |device| { 151 | let Some(device_config) = virtio::VirtIODeviceConfig::from_pci_config(device) else { return; }; 152 | // TODO: Remove clone here 153 | virtio::try_init_virtio_rng(device_config.clone()); 154 | virtio::try_init_virtio_block(device_config); 155 | }); 156 | 157 | graphics::init(boot_info_data); 158 | } 159 | 160 | fn later_per_cpu_setup() { 161 | apic::per_cpu_init(); 162 | sched::per_cpu_init(); 163 | NUM_CPUS_BOOTSTRAPPED.fetch_add(1, Ordering::Release); 164 | } 165 | 166 | extern "C" fn bootstrap_secondary_cpu(info: *const limine::LimineSmpInfo) -> ! { 167 | let info = unsafe { &*info }; 168 | let processor_id = ProcessorID(info.lapic_id as u8); 169 | // log::info!("bootstrapping CPU: {info:#x?}"); 170 | early_per_cpu_setup(processor_id); 171 | later_per_cpu_setup(); 172 | sched::start_scheduler(); 173 | } 174 | 175 | pub fn panic_handler(info: &core::panic::PanicInfo) -> ! { 176 | logging::force_unlock_logger(); 177 | log::error!("PANIC: {info}"); 178 | debug::print_stack_trace(); 179 | 180 | let task_id = sched::current_task_id(); 181 | let processor_id = percpu::get_processor_id_no_guard(); 182 | log::error!("PANIC: task {task_id:?} on CPU {processor_id:?}"); 183 | 184 | hlt_loop() 185 | } 186 | 187 | pub(crate) fn hlt_loop() -> ! { 188 | loop { 189 | x86_64::instructions::hlt(); 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /kernel/src/logging.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::SpinLock; 2 | use crate::{ansiterm, serial_println}; 3 | 4 | /// Dummy type to help us implement a logger using the `log` crate. 5 | struct Logger { 6 | writer: SpinLock, 7 | } 8 | 9 | static LOGGER: Logger = Logger { 10 | writer: SpinLock::new(LogSerialWriter), 11 | }; 12 | 13 | impl log::Log for Logger { 14 | fn enabled(&self, metadata: &log::Metadata) -> bool { 15 | metadata.level() <= log::max_level() 16 | } 17 | 18 | fn log(&self, record: &log::Record) { 19 | if self.enabled(record.metadata()) { 20 | self.writer.lock_disable_interrupts().print_record(record); 21 | } 22 | } 23 | 24 | fn flush(&self) {} 25 | } 26 | 27 | /// Dummy type just so we can put something in a `SpinLock` and ensure that we 28 | /// don't interleave log messages. 29 | struct LogSerialWriter; 30 | 31 | impl LogSerialWriter { 32 | #[allow(clippy::unused_self)] 33 | fn print_record(&self, record: &log::Record) { 34 | let color = match record.level() { 35 | log::Level::Error => ansiterm::Color::Red, 36 | log::Level::Warn => ansiterm::Color::Yellow, 37 | log::Level::Info => ansiterm::Color::Green, 38 | // White is actually kinda grey. Bright white is white. 39 | log::Level::Debug | log::Level::Trace => ansiterm::Color::White, 40 | }; 41 | let color_code = ansiterm::AnsiEscapeSequence::SelectGraphicRendition( 42 | ansiterm::SelectGraphicRendition::ForegroundColor(color), 43 | ); 44 | let clear = ansiterm::CLEAR_FORMAT; 45 | 46 | serial_println!("{color_code}[{}]{clear} {}", record.level(), record.args()); 47 | } 48 | } 49 | 50 | pub(crate) fn init() { 51 | log::set_logger(&LOGGER).unwrap(); 52 | log::set_max_level(log::LevelFilter::Info); 53 | log::debug!("Logging initialized"); 54 | } 55 | 56 | /// Force unlock the logger to ensure we can log something during a panic or CPU 57 | /// exception. 58 | pub(crate) fn force_unlock_logger() { 59 | unsafe { 60 | LOGGER.writer.force_unlock(); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /kernel/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | #![feature(allocator_api)] 4 | 5 | extern crate alloc; 6 | 7 | use rust_os::{panic_handler, start}; 8 | 9 | #[no_mangle] 10 | extern "C" fn _start() -> ! { 11 | start(); 12 | } 13 | 14 | #[panic_handler] 15 | fn panic(info: &core::panic::PanicInfo) -> ! { 16 | panic_handler(info) 17 | } 18 | -------------------------------------------------------------------------------- /kernel/src/memory/address.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::ops::{Add, Sub}; 3 | 4 | use x86_64::{PhysAddr, VirtAddr}; 5 | 6 | use super::mapping::{KERNEL_PHYSICAL_MAPPING_END, KERNEL_PHYSICAL_MAPPING_START}; 7 | 8 | /// Physical address that has been mapped to the kernel physical address space. 9 | /// A `KernPhysAddr` is trivially convertible to and from a `PhysAddr` by using 10 | /// the `KERNEL_PHYSICAL_MAPPING_START` offset. 11 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 12 | #[repr(transparent)] 13 | pub(crate) struct KernPhysAddr(u64); 14 | 15 | impl KernPhysAddr { 16 | pub(crate) fn new(addr: u64) -> Self { 17 | assert!( 18 | (KERNEL_PHYSICAL_MAPPING_START..=KERNEL_PHYSICAL_MAPPING_END).contains(&addr), 19 | "physical address {addr:#x} is not in kernel physical mapping range" 20 | ); 21 | Self(addr) 22 | } 23 | 24 | pub(crate) fn as_u64(self) -> u64 { 25 | self.0 26 | } 27 | 28 | pub(crate) fn to_phys_addr(self) -> PhysAddr { 29 | let addr = self.0; 30 | assert!( 31 | (KERNEL_PHYSICAL_MAPPING_START..=KERNEL_PHYSICAL_MAPPING_END).contains(&addr), 32 | "physical address {addr:#x} is not in kernel physical mapping range" 33 | ); 34 | PhysAddr::new(addr - KERNEL_PHYSICAL_MAPPING_START) 35 | } 36 | 37 | pub(crate) fn from_phys_addr(addr: PhysAddr) -> Self { 38 | Self::new(addr.as_u64() + KERNEL_PHYSICAL_MAPPING_START) 39 | } 40 | 41 | pub(crate) fn align_down(self, align: u64) -> Self { 42 | Self(x86_64::align_down(self.0, align)) 43 | } 44 | 45 | pub(crate) fn as_ptr(self) -> *const T { 46 | self.as_u64() as *const T 47 | } 48 | 49 | pub(crate) fn as_mut_ptr(self) -> *mut T { 50 | self.as_ptr::().cast_mut() 51 | } 52 | } 53 | 54 | impl fmt::Debug for KernPhysAddr { 55 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 56 | f.debug_tuple("KernVirtAddr") 57 | .field(&format_args!("{:#x}", self.0)) 58 | .finish() 59 | } 60 | } 61 | 62 | impl From for VirtAddr { 63 | fn from(addr: KernPhysAddr) -> Self { 64 | Self::new(addr.0) 65 | } 66 | } 67 | 68 | impl From for PhysAddr { 69 | fn from(addr: KernPhysAddr) -> Self { 70 | addr.to_phys_addr() 71 | } 72 | } 73 | 74 | impl From for KernPhysAddr { 75 | fn from(addr: PhysAddr) -> Self { 76 | Self::from_phys_addr(addr) 77 | } 78 | } 79 | 80 | impl Add for KernPhysAddr { 81 | type Output = Self; 82 | 83 | fn add(self, rhs: u64) -> Self::Output { 84 | Self::new(self.0 + rhs) 85 | } 86 | } 87 | 88 | impl Sub for KernPhysAddr { 89 | type Output = Self; 90 | 91 | fn sub(self, rhs: u64) -> Self::Output { 92 | Self::new(self.0 - rhs) 93 | } 94 | } 95 | 96 | impl Add for KernPhysAddr { 97 | type Output = Self; 98 | 99 | fn add(self, rhs: usize) -> Self::Output { 100 | Self::new(self.0 + rhs as u64) 101 | } 102 | } 103 | 104 | impl Sub for KernPhysAddr { 105 | type Output = Self; 106 | 107 | fn sub(self, rhs: usize) -> Self::Output { 108 | Self::new(self.0 - rhs as u64) 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /kernel/src/memory/heap.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::{GlobalAlloc, Layout}; 2 | use core::ptr::NonNull; 3 | 4 | use linked_list_allocator::Heap; 5 | use x86_64::VirtAddr; 6 | 7 | use crate::memory::with_kernel_page_table_lock; 8 | use crate::sync::SpinLock; 9 | 10 | use super::mapping::{ 11 | allocate_and_map_pages, KERNEL_HEAP_REGION_MAX_SIZE, KERNEL_HEAP_REGION_START, 12 | }; 13 | use super::page::{Page, PageRange, PageSize}; 14 | use super::page_table::{MapError, PageTableEntryFlags}; 15 | 16 | #[global_allocator] 17 | static ALLOCATOR: LockedHeap = LockedHeap(SpinLock::new(Heap::empty())); 18 | 19 | /// Wrapper around `linked_list_allocator::Heap` that implements `GlobalAlloc`. 20 | /// The `LockedHeap` in that crate doesn't understand interrupts, so we wrap it 21 | /// in our own `SpinLock`. 22 | struct LockedHeap(SpinLock); 23 | 24 | unsafe impl GlobalAlloc for LockedHeap { 25 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 26 | self.0 27 | .lock_disable_interrupts() 28 | .allocate_first_fit(layout) 29 | .ok() 30 | .map_or(core::ptr::null_mut::(), |allocation| { 31 | allocation.as_ptr() 32 | }) 33 | } 34 | 35 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 36 | self.0 37 | .lock_disable_interrupts() 38 | .deallocate(NonNull::new_unchecked(ptr), layout); 39 | } 40 | } 41 | 42 | const HEAP_START: usize = KERNEL_HEAP_REGION_START as usize; 43 | const HEAP_SIZE: usize = 10 * 1024 * 1024; // 10 MiB 44 | 45 | /// Maps pages for a kernel heap defined by `HEAP_START` and `HEAP_SIZE` and 46 | /// initializes `ALLOCATOR` with this heap. 47 | pub(super) fn init() -> Result<(), MapError> { 48 | assert!(HEAP_SIZE < KERNEL_HEAP_REGION_MAX_SIZE as usize); 49 | 50 | let heap_start_addr = VirtAddr::new(HEAP_START as u64); 51 | let heap_start = Page::containing_address(heap_start_addr, PageSize::Size4KiB); 52 | let page_range = PageRange::from_num_bytes(heap_start, HEAP_SIZE); 53 | let flags = PageTableEntryFlags::PRESENT | PageTableEntryFlags::WRITABLE; 54 | 55 | with_kernel_page_table_lock(|table| allocate_and_map_pages(table, page_range.iter(), flags))?; 56 | 57 | unsafe { 58 | // `init() actually writes to the heap, which is why we can only 59 | // initialize the allocator after we map the pages. 60 | ALLOCATOR 61 | .0 62 | .lock_disable_interrupts() 63 | .init(HEAP_START, HEAP_SIZE); 64 | } 65 | 66 | Ok(()) 67 | } 68 | -------------------------------------------------------------------------------- /kernel/src/memory/mod.rs: -------------------------------------------------------------------------------- 1 | mod address; 2 | mod heap; 3 | mod mapping; 4 | mod page; 5 | mod page_table; 6 | mod physical; 7 | 8 | pub(crate) use address::*; 9 | pub(crate) use mapping::*; 10 | pub(crate) use page::*; 11 | pub(crate) use page_table::*; 12 | pub(crate) use physical::*; 13 | 14 | use bitmap_alloc::MemoryRegion; 15 | 16 | use crate::boot_info::BootInfo; 17 | 18 | pub(crate) unsafe fn init(boot_info_data: &BootInfo, usable_memory_regions: R) 19 | where 20 | I: Iterator, 21 | R: Fn() -> I, 22 | { 23 | mapping::init(boot_info_data); 24 | physical::init(usable_memory_regions); 25 | mapping::clean_up_kernel_page_table(); 26 | heap::init().expect("failed to initialize heap"); 27 | } 28 | -------------------------------------------------------------------------------- /kernel/src/memory/page.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::ops::{Add, Sub}; 3 | 4 | use x86_64::{PhysAddr, VirtAddr}; 5 | 6 | use super::address::KernPhysAddr; 7 | 8 | /// A `Page` is a page of memory of a given address type `A` (e.g. `VirtAddr`, 9 | /// `PhysAddr`, etc). 10 | #[derive(Debug, Clone, Copy)] 11 | pub(crate) struct Page { 12 | start_addr: A, 13 | size: PageSize, 14 | } 15 | 16 | impl Page { 17 | pub(crate) fn from_start_addr(start_addr: A, size: PageSize) -> Self { 18 | assert!( 19 | start_addr.is_aligned(size.size_bytes() as u64), 20 | "start_addr {start_addr:x?} is not aligned to page size {size:?}" 21 | ); 22 | Self { start_addr, size } 23 | } 24 | 25 | pub(crate) fn containing_address(addr: A, size: PageSize) -> Self { 26 | let start_addr = addr.align_down(size.size_bytes() as u64); 27 | Self { start_addr, size } 28 | } 29 | 30 | pub(crate) fn start_addr(&self) -> A { 31 | self.start_addr 32 | } 33 | 34 | pub(crate) fn size(&self) -> PageSize { 35 | self.size 36 | } 37 | } 38 | 39 | impl Page { 40 | pub(crate) fn zero(&mut self) { 41 | let start_ptr = self.start_addr.as_mut_ptr::(); 42 | let size_bytes = self.size.size_bytes(); 43 | unsafe { 44 | // N.B. `write_bytes` is a highly optimized way to write zeroes. 45 | // Making a slice and doing `slice.fill(0)` is supposed to optimize 46 | // to this, but it doesn't seem to when compiling in debug mode and 47 | // it makes page table allocation very slow. 48 | start_ptr.write_bytes(0, size_bytes); 49 | } 50 | } 51 | } 52 | 53 | impl Page { 54 | pub(crate) fn flush(&self) { 55 | x86_64::instructions::tlb::flush(self.start_addr); 56 | } 57 | } 58 | 59 | #[derive(Debug)] 60 | pub(crate) struct PageRange { 61 | start: Page, 62 | num_pages: usize, 63 | } 64 | 65 | impl PageRange { 66 | pub(crate) fn new(start: Page, num_pages: usize) -> Self { 67 | Self { start, num_pages } 68 | } 69 | 70 | pub(crate) fn from_num_bytes(start: Page, num_bytes: usize) -> Self { 71 | let num_pages = num_bytes.div_ceil(start.size.size_bytes()); 72 | Self { start, num_pages } 73 | } 74 | 75 | pub(crate) fn start_addr(&self) -> A { 76 | self.start.start_addr 77 | } 78 | 79 | pub(crate) fn page_size(&self) -> PageSize { 80 | self.start.size 81 | } 82 | 83 | pub(crate) fn num_pages(&self) -> usize { 84 | self.num_pages 85 | } 86 | 87 | pub(crate) fn num_bytes(&self) -> usize { 88 | self.num_pages * self.start.size.size_bytes() 89 | } 90 | 91 | pub(crate) fn iter(&self) -> PageRangeIter { 92 | PageRangeIter { 93 | range: self, 94 | current_page: 0, 95 | } 96 | } 97 | } 98 | 99 | impl PageRange { 100 | pub(crate) fn as_byte_slice(&mut self) -> &mut [u8] { 101 | let start_ptr = self.start.start_addr.as_mut_ptr::(); 102 | let size_bytes = self.num_bytes(); 103 | unsafe { core::slice::from_raw_parts_mut(start_ptr, size_bytes) } 104 | } 105 | 106 | pub(crate) fn zero(&mut self) { 107 | let start_ptr = self.start.start_addr.as_mut_ptr::(); 108 | let size_bytes = self.num_bytes(); 109 | unsafe { 110 | // N.B. `write_bytes` is a highly optimized way to write zeroes. 111 | // Making a slice and doing `slice.fill(0)` is supposed to optimize 112 | // to this, but it doesn't seem to when compiling in debug mode and 113 | // it makes page table allocation very slow. 114 | start_ptr.write_bytes(0, size_bytes); 115 | } 116 | } 117 | } 118 | 119 | #[derive(Debug)] 120 | pub(crate) struct PageRangeIter<'a, A> { 121 | range: &'a PageRange, 122 | current_page: usize, 123 | } 124 | 125 | impl<'a, A: Address> Iterator for PageRangeIter<'a, A> { 126 | type Item = Page; 127 | 128 | fn next(&mut self) -> Option { 129 | if self.current_page >= self.range.num_pages { 130 | return None; 131 | } 132 | 133 | let start_addr = 134 | self.range.start.start_addr + self.current_page * self.range.start.size.size_bytes(); 135 | let page = Page { 136 | start_addr, 137 | size: self.range.start.size, 138 | }; 139 | 140 | self.current_page += 1; 141 | Some(page) 142 | } 143 | } 144 | 145 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 146 | pub(crate) enum PageSize { 147 | Size4KiB, 148 | Size2MiB, 149 | Size1GiB, 150 | } 151 | 152 | impl PageSize { 153 | pub(crate) fn size_bytes(self) -> usize { 154 | match self { 155 | Self::Size4KiB => 4096, 156 | Self::Size2MiB => 2 * 1024 * 1024, 157 | Self::Size1GiB => 1024 * 1024 * 1024, 158 | } 159 | } 160 | } 161 | 162 | /// The `Address` trait abstracts over different address types. 163 | pub(crate) trait Address: 164 | Copy + Sized + PartialOrd + PartialEq + Eq + Add + Sub 165 | { 166 | fn as_u64(self) -> u64; 167 | 168 | fn align_down(self, align: u64) -> Self; 169 | 170 | fn is_aligned(self, align: u64) -> bool { 171 | self.align_down(align) == self 172 | } 173 | } 174 | 175 | impl Address for VirtAddr { 176 | fn as_u64(self) -> u64 { 177 | self.as_u64() 178 | } 179 | 180 | fn align_down(self, align: u64) -> Self { 181 | self.align_down(align) 182 | } 183 | } 184 | 185 | impl Address for PhysAddr { 186 | fn as_u64(self) -> u64 { 187 | self.as_u64() 188 | } 189 | 190 | fn align_down(self, align: u64) -> Self { 191 | self.align_down(align) 192 | } 193 | } 194 | 195 | impl Address for KernPhysAddr { 196 | fn as_u64(self) -> u64 { 197 | self.as_u64() 198 | } 199 | 200 | fn align_down(self, align: u64) -> Self { 201 | self.align_down(align) 202 | } 203 | } 204 | 205 | pub(crate) trait AsMutPtr { 206 | #[allow(clippy::wrong_self_convention)] 207 | fn as_mut_ptr(self) -> *mut T; 208 | } 209 | 210 | impl AsMutPtr for VirtAddr { 211 | fn as_mut_ptr(self) -> *mut T { 212 | Self::as_mut_ptr(self) 213 | } 214 | } 215 | 216 | impl AsMutPtr for KernPhysAddr { 217 | fn as_mut_ptr(self) -> *mut T { 218 | Self::as_mut_ptr(self) 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /kernel/src/memory/physical.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::AllocError; 2 | 3 | use x86_64::PhysAddr; 4 | 5 | use bitmap_alloc::{bootstrap_allocator, BitmapAllocator, MemoryRegion}; 6 | 7 | use crate::sync::SpinLock; 8 | 9 | use super::address::KernPhysAddr; 10 | use super::page::{Page, PageRange, PageSize}; 11 | 12 | /// Physical memory frame allocator used by all kernel contexts. 13 | pub(super) static KERNEL_PHYSICAL_ALLOCATOR: LockedPhysicalMemoryAllocator = 14 | LockedPhysicalMemoryAllocator::new(); 15 | 16 | pub(super) unsafe fn init(usable_memory_regions: R) 17 | where 18 | I: Iterator, 19 | R: Fn() -> I, 20 | { 21 | KERNEL_PHYSICAL_ALLOCATOR.init(usable_memory_regions); 22 | } 23 | 24 | /// Simply wraps `PhysicalMemoryAllocator` in a lock. This exists because some 25 | /// `x86_64` functions want a `&mut Allocator` and we can't have multiple 26 | /// mutable references to the same object. 27 | pub(crate) struct LockedPhysicalMemoryAllocator<'a> { 28 | lock: SpinLock>>, 29 | } 30 | 31 | impl LockedPhysicalMemoryAllocator<'_> { 32 | const fn new() -> Self { 33 | Self { 34 | lock: SpinLock::new(None), 35 | } 36 | } 37 | 38 | unsafe fn init(&self, memory_regions: R) 39 | where 40 | I: Iterator, 41 | R: Fn() -> I, 42 | { 43 | let allocator = PhysicalMemoryAllocator::new(memory_regions); 44 | self.lock.lock().replace(allocator); 45 | } 46 | 47 | pub(super) fn with_lock(&self, f: impl FnOnce(&mut PhysicalMemoryAllocator) -> R) -> R { 48 | let mut lock_guard = self.lock.lock(); 49 | let allocator = lock_guard 50 | .as_mut() 51 | .expect("kernel memory allocator not initialized"); 52 | f(allocator) 53 | } 54 | } 55 | 56 | /// Wrapper around `BitmapAllocator` that knows how to deal with the kernel. 57 | pub(super) struct PhysicalMemoryAllocator<'a> { 58 | pub(super) allocator: BitmapAllocator<'a>, 59 | } 60 | 61 | pub(crate) const PAGE_SIZE: usize = 4096; // 4 KiB 62 | 63 | impl PhysicalMemoryAllocator<'_> { 64 | unsafe fn new(memory_regions: R) -> Self 65 | where 66 | I: Iterator, 67 | R: Fn() -> I, 68 | { 69 | let allocator = 70 | bootstrap_allocator(PAGE_SIZE, memory_regions, |bitmap_addr, bitmap_len| { 71 | // Make sure to use a kernel physical address pointer 72 | let phys_addr = PhysAddr::new(bitmap_addr as u64); 73 | let kern_phys_addr = KernPhysAddr::from_phys_addr(phys_addr); 74 | let ptr = kern_phys_addr.as_mut_ptr::(); 75 | core::slice::from_raw_parts_mut(ptr, bitmap_len) 76 | }); 77 | Self { allocator } 78 | } 79 | } 80 | 81 | impl PhysicalMemoryAllocator<'_> { 82 | pub(super) fn allocate_page(&mut self) -> Result, AllocError> { 83 | let pages = self.allocate_pages(1)?; 84 | let mut pages = pages.iter(); 85 | let page = pages.next().expect("somehow we got less than one page!"); 86 | assert!(pages.next().is_none(), "somehow we got more than one page!"); 87 | Ok(page) 88 | } 89 | 90 | pub(super) fn allocate_pages( 91 | &mut self, 92 | num_pages: usize, 93 | ) -> Result, AllocError> { 94 | let page = self 95 | .allocator 96 | .allocate_contiguous(num_pages) 97 | .ok_or(AllocError)?; 98 | 99 | assert!(page > 0, "we allocated the zero page, which shouldn't happen since the first page should be reserved"); 100 | 101 | let phys_addr = PhysAddr::new((page * PAGE_SIZE) as u64); 102 | let start_addr = KernPhysAddr::from(phys_addr); 103 | let start_page = Page::from_start_addr(start_addr, PageSize::Size4KiB); 104 | Ok(PageRange::new(start_page, num_pages)) 105 | } 106 | 107 | pub(super) fn free_page(&mut self, page: Page) { 108 | self.free_pages(&PageRange::new(page, 1)); 109 | } 110 | 111 | pub(super) fn free_pages(&mut self, pages: &PageRange) { 112 | let start_addr = PhysAddr::from(pages.start_addr()); 113 | let start_page = start_addr.as_u64() as usize / pages.page_size().size_bytes(); 114 | self.allocator 115 | .free_contiguous(start_page, pages.num_pages()); 116 | } 117 | } 118 | 119 | /// Physically contiguous buffer of memory. Allocates by page, so it can 120 | /// allocate more memory than requested. Useful for e.g. Direct Memory Access 121 | /// (DMA) like with VirtIO buffers. 122 | /// 123 | /// NOTE: This type implements `Drop` and will free the allocated memory when 124 | /// it goes out of scope. 125 | #[derive(Debug)] 126 | pub(crate) struct PhysicalBuffer { 127 | pages: PageRange, 128 | } 129 | 130 | impl PhysicalBuffer { 131 | pub(crate) fn allocate_zeroed_pages(num_pages: usize) -> Result { 132 | let mut pages = 133 | KERNEL_PHYSICAL_ALLOCATOR.with_lock(|allocator| allocator.allocate_pages(num_pages))?; 134 | pages.zero(); 135 | Ok(Self { pages }) 136 | } 137 | 138 | pub(crate) fn allocate_zeroed(min_bytes: usize) -> Result { 139 | let num_pages = min_bytes.div_ceil(PAGE_SIZE); 140 | Self::allocate_zeroed_pages(num_pages) 141 | } 142 | 143 | pub(crate) fn as_slice_mut(&mut self) -> &mut [u8] { 144 | let ptr = self.address().as_mut_ptr::(); 145 | let len_bytes = self.pages.num_pages() * self.pages.page_size().size_bytes(); 146 | unsafe { core::slice::from_raw_parts_mut(ptr, len_bytes) } 147 | } 148 | 149 | pub(crate) fn address(&self) -> KernPhysAddr { 150 | self.pages.start_addr() 151 | } 152 | } 153 | 154 | impl Drop for PhysicalBuffer { 155 | fn drop(&mut self) { 156 | KERNEL_PHYSICAL_ALLOCATOR.with_lock(|allocator| { 157 | allocator.free_pages(&self.pages); 158 | }); 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /kernel/src/pci/location.rs: -------------------------------------------------------------------------------- 1 | use crate::memory::KernPhysAddr; 2 | 3 | /// Location within the PCI Express Enhanced Configuration Mechanism memory 4 | /// region. See "7.2.2 PCI Express Enhanced Configuration Access Mechanism 5 | /// (ECAM)" of the PCI Express Base Specification, as well as 6 | /// . 7 | #[derive(Debug, Clone)] 8 | pub(crate) struct PCIDeviceLocation { 9 | /// Physical address where the PCI Express extended configuration mechanism 10 | /// memory region starts for this device. 11 | pub(crate) ecam_base_address: KernPhysAddr, 12 | 13 | /// Which PCIe bus this device is on. 14 | pub(crate) bus_number: u8, 15 | 16 | /// Device number of the device within the bus. 17 | pub(crate) device_number: u8, 18 | 19 | /// Function number of the device if the device is a multifunction device. 20 | pub(crate) function_number: u8, 21 | } 22 | 23 | impl PCIDeviceLocation { 24 | pub(crate) fn device_base_address(&self) -> KernPhysAddr { 25 | let bus = u64::from(self.bus_number); 26 | let device = u64::from(self.device_number); 27 | let function = u64::from(self.function_number); 28 | self.ecam_base_address + ((bus << 20) | (device << 15) | (function << 12)) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /kernel/src/pci/mod.rs: -------------------------------------------------------------------------------- 1 | mod capabilities; 2 | mod device; 3 | mod device_id; 4 | mod location; 5 | 6 | pub(crate) use capabilities::*; 7 | pub(crate) use device::*; 8 | -------------------------------------------------------------------------------- /kernel/src/qemu.rs: -------------------------------------------------------------------------------- 1 | use x86_64::instructions::port::Port; 2 | 3 | /// Custom exit codes because QEMU does a binary OR with 0x1 4 | #[derive(Debug, Clone, Copy)] 5 | #[repr(u32)] // Must match `iosize` for the `isa-debug-exit` device 6 | pub(crate) enum QEMUExitCode { 7 | Success = 0x10, 8 | // Failed = 0x11, 9 | } 10 | 11 | /// Exit QEMU with the given exit code 12 | /// 13 | /// Device must be created with `isa-debug-exit,iobase=0xf4,iosize=0x04` 14 | const QEMU_EXIT_PORT: u16 = 0xf4; 15 | 16 | pub(crate) fn exit_qemu(exit_code: QEMUExitCode) { 17 | unsafe { 18 | let mut port = Port::new(QEMU_EXIT_PORT); 19 | port.write(exit_code as u32); 20 | } 21 | log::error!("Exiting QEMU failed! Is the device `isa-debug-exit` missing?"); 22 | } 23 | -------------------------------------------------------------------------------- /kernel/src/sched/mod.rs: -------------------------------------------------------------------------------- 1 | mod preempt; 2 | mod schedcore; 3 | mod stack; 4 | mod syscall; 5 | mod task; 6 | mod userspace; 7 | 8 | pub(crate) use preempt::*; 9 | pub(crate) use schedcore::*; 10 | pub(crate) use stack::*; 11 | pub(crate) use task::*; 12 | pub(crate) use userspace::*; 13 | -------------------------------------------------------------------------------- /kernel/src/sched/preempt.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Deref, DerefMut}; 2 | 3 | use crate::barrier::barrier; 4 | use crate::define_per_cpu_i64; 5 | 6 | define_per_cpu_i64!( 7 | /// When preempt_count > 0, preemption is disabled, which means the 8 | /// scheduler will not switch off the current task. 9 | PREEMPT_COUNT 10 | ); 11 | 12 | pub(super) fn get_preempt_count_no_guard() -> i64 { 13 | get_per_cpu_no_guard_PREEMPT_COUNT() 14 | } 15 | 16 | pub(super) fn set_preempt_count(count: i64) { 17 | set_per_cpu_PREEMPT_COUNT(count); 18 | } 19 | 20 | /// Simple type that disables preemption while it is alive, and re-enables it 21 | /// when dropped. 22 | #[derive(Debug, PartialOrd, Ord, PartialEq, Eq)] 23 | pub(crate) struct PreemptGuard { 24 | val: T, 25 | } 26 | 27 | impl PreemptGuard { 28 | #[allow(dead_code)] 29 | pub(crate) fn map U>(self, f: F) -> PreemptGuard { 30 | let val = self.val; 31 | PreemptGuard::new(f(val)) 32 | } 33 | } 34 | 35 | impl PreemptGuard { 36 | pub(crate) fn new(val: T) -> Self { 37 | inc_per_cpu_PREEMPT_COUNT(); 38 | barrier(); 39 | Self { val } 40 | } 41 | } 42 | 43 | impl Drop for PreemptGuard { 44 | fn drop(&mut self) { 45 | barrier(); 46 | dec_per_cpu_PREEMPT_COUNT(); 47 | } 48 | } 49 | 50 | impl Deref for PreemptGuard { 51 | type Target = T; 52 | 53 | #[allow(clippy::explicit_deref_methods)] 54 | fn deref(&self) -> &T { 55 | &self.val 56 | } 57 | } 58 | 59 | impl DerefMut for PreemptGuard { 60 | #[allow(clippy::explicit_deref_methods)] 61 | fn deref_mut(&mut self) -> &mut T { 62 | &mut self.val 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /kernel/src/sched/stack.rs: -------------------------------------------------------------------------------- 1 | use x86_64::VirtAddr; 2 | 3 | use bitmap_alloc::BitmapAllocator; 4 | 5 | use crate::memory::{self, with_kernel_page_table_lock}; 6 | use crate::memory::{Page, PageRange, PageSize, PageTableEntryFlags, UnmapError, PAGE_SIZE}; 7 | use crate::sync::SpinLock; 8 | 9 | /// Size of a kernel stack, including the guard page (so subtract one page to get 10 | /// usable stack space). 11 | /// 12 | /// N.B. This is quite large because apparently Rust programs compiled with 13 | /// debug mode use a ton of the stack. We don't need this much stack in release 14 | /// mode. 15 | const KERNEL_STACK_SIZE_PAGES: usize = 16; 16 | const KERNEL_STACK_PHYS_PAGES: usize = KERNEL_STACK_SIZE_PAGES - 1; 17 | const KERNEL_STACK_SIZE_BYTES: usize = KERNEL_STACK_SIZE_PAGES * PAGE_SIZE; 18 | const KERNEL_STACK_START_VIRT_ADDR: usize = memory::KERNEL_STACK_REGION_START as usize; 19 | 20 | const MAX_KERNEL_STACKS: usize = 256; 21 | const MAX_KERNEL_ALLOC_BIT_CHUNKS: usize = MAX_KERNEL_STACKS.div_ceil(u64::BITS as usize); 22 | 23 | static mut KERNEL_ALLOC_BIT_CHUNKS: [u64; MAX_KERNEL_ALLOC_BIT_CHUNKS] = 24 | [0; MAX_KERNEL_ALLOC_BIT_CHUNKS]; 25 | 26 | static KERNEL_STACK_ALLOCATOR: SpinLock> = SpinLock::new(None); 27 | 28 | pub(super) fn stack_init() { 29 | assert!( 30 | MAX_KERNEL_STACKS * KERNEL_STACK_SIZE_PAGES < memory::KERNEL_STACK_REGION_MAX_SIZE as usize 31 | ); 32 | 33 | let allocator = KernelStackAllocator::new(); 34 | KERNEL_STACK_ALLOCATOR.lock().replace(allocator); 35 | } 36 | 37 | pub(super) fn allocate_stack() -> KernelStack { 38 | let mut lock = KERNEL_STACK_ALLOCATOR.lock_disable_interrupts(); 39 | let allocator = lock 40 | .as_mut() 41 | .expect("kernel stack allocator not initialized"); 42 | allocator.allocate().expect("out of kernel stacks") 43 | } 44 | 45 | /// Allocator that hands out kernel stacks. All kernel stacks are the same size, 46 | /// and they have a guard page at the end of the stack. 47 | struct KernelStackAllocator<'a> { 48 | allocator: BitmapAllocator<'a>, 49 | } 50 | 51 | impl KernelStackAllocator<'_> { 52 | /// Create a new kernel stack allocator. 53 | fn new() -> Self { 54 | let bits = unsafe { &mut KERNEL_ALLOC_BIT_CHUNKS }; 55 | let allocator = BitmapAllocator::new(bits); 56 | Self { allocator } 57 | } 58 | 59 | fn allocate(&mut self) -> Option { 60 | // Allocate virtual memory 61 | let stack_index = self.allocator.allocate_contiguous(1)?; 62 | let start_addr = VirtAddr::new( 63 | (KERNEL_STACK_START_VIRT_ADDR + stack_index * KERNEL_STACK_SIZE_BYTES) as u64, 64 | ); 65 | let stack = KernelStack { start_addr }; 66 | 67 | // Map the guard page as invalid 68 | unsafe { 69 | with_kernel_page_table_lock(|table| { 70 | match memory::unmap_page(table, stack.guard_page()) { 71 | Err(UnmapError::PageNotMapped) => {} 72 | Ok(page) => panic!("somehow the guard page was physically mapped! {:?}", page), 73 | Err(e) => panic!("failed to unmap kernel stack guard page: {:?}", e), 74 | } 75 | }); 76 | } 77 | 78 | // Map the physical memory into the virtual address space 79 | let pages = stack.physically_mapped_pages(); 80 | let flags = PageTableEntryFlags::PRESENT | PageTableEntryFlags::WRITABLE; 81 | memory::with_kernel_page_table_lock(|table| { 82 | memory::allocate_and_map_pages(table, pages.iter(), flags) 83 | .expect("failed to map kernel stack pages"); 84 | }); 85 | 86 | // Zero out the memory 87 | unsafe { 88 | let ptr = start_addr.as_mut_ptr::().add(PAGE_SIZE); 89 | ptr.write_bytes(0, KERNEL_STACK_SIZE_BYTES - PAGE_SIZE); 90 | }; 91 | 92 | Some(stack) 93 | } 94 | 95 | fn free(&mut self, stack: &KernelStack) { 96 | let stack_index = (stack.start_addr.as_u64() - KERNEL_STACK_START_VIRT_ADDR as u64) 97 | / KERNEL_STACK_SIZE_BYTES as u64; 98 | self.allocator.free_contiguous(stack_index as usize, 1); 99 | 100 | for page in stack.physically_mapped_pages().iter() { 101 | unsafe { 102 | memory::with_kernel_page_table_lock(|table| { 103 | memory::unmap_and_free_page(table, page) 104 | .expect("failed to unmap kernel stack page"); 105 | }); 106 | }; 107 | } 108 | } 109 | } 110 | 111 | #[derive(Debug)] 112 | pub(super) struct KernelStack { 113 | /// Virtual address of the start of top of stack (highest address in the 114 | /// stack). 115 | start_addr: VirtAddr, 116 | } 117 | 118 | impl KernelStack { 119 | /// Get the virtual address of the top (highest memory address) of the 120 | /// stack. 121 | pub(crate) fn top_addr(&self) -> VirtAddr { 122 | self.start_addr + KERNEL_STACK_SIZE_BYTES 123 | } 124 | 125 | fn guard_page(&self) -> Page { 126 | assert!(self.start_addr.as_u64() % PAGE_SIZE as u64 == 0); 127 | Page::from_start_addr(self.start_addr, PageSize::Size4KiB) 128 | } 129 | 130 | fn physically_mapped_pages(&self) -> PageRange { 131 | let start_addr = self.start_addr + PAGE_SIZE; 132 | let start_page = Page::from_start_addr(start_addr, PageSize::Size4KiB); 133 | PageRange::new(start_page, KERNEL_STACK_PHYS_PAGES) 134 | } 135 | } 136 | 137 | impl Drop for KernelStack { 138 | fn drop(&mut self) { 139 | let mut lock = KERNEL_STACK_ALLOCATOR.lock_disable_interrupts(); 140 | let allocator = lock 141 | .as_mut() 142 | .expect("kernel stack allocator not initialized"); 143 | allocator.free(self); 144 | } 145 | } 146 | 147 | /// Useful function for page faults to determine if we hit a kernel guard page. 148 | pub(crate) fn is_kernel_guard_page(addr: VirtAddr) -> bool { 149 | let above_kernel_stack = addr.as_u64() >= KERNEL_STACK_START_VIRT_ADDR as u64; 150 | let kernel_stack_size = KERNEL_STACK_SIZE_BYTES as u64 * MAX_KERNEL_STACKS as u64; 151 | let within_kernel_stack = 152 | addr.as_u64() < KERNEL_STACK_START_VIRT_ADDR as u64 + kernel_stack_size; 153 | 154 | if !(above_kernel_stack && within_kernel_stack) { 155 | return false; 156 | } 157 | 158 | // The guard page is the first page in each stack 159 | let relative_start = addr.as_u64() - KERNEL_STACK_START_VIRT_ADDR as u64; 160 | let stack_page_index = relative_start / PAGE_SIZE as u64; 161 | stack_page_index % KERNEL_STACK_SIZE_PAGES as u64 == 0 162 | } 163 | -------------------------------------------------------------------------------- /kernel/src/sched/syscall.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | use x86_64::registers::rflags::RFlags; 4 | use x86_64::VirtAddr; 5 | 6 | use crate::define_per_cpu_u64; 7 | use crate::gdt::{USER_CODE_SELECTOR, USER_DATA_SELECTOR}; 8 | use crate::percpu::get_processor_id_no_guard; 9 | 10 | use super::schedcore::{current_task_id, kill_current_task, run_scheduler}; 11 | use super::task::{TaskExitCode, TaskRegisters}; 12 | 13 | pub(super) fn syscall_init() { 14 | // N.B. There is some other initialization done when setting up the GDT for 15 | // the STAR register to set user and kernel mode segments. See gdt.rs for 16 | // more details. 17 | 18 | // Enable System Call Extensions (SCE) to allow userspace to use the syscall 19 | // instruction. 20 | unsafe { 21 | x86_64::registers::model_specific::Efer::update(|efer| { 22 | *efer |= x86_64::registers::model_specific::EferFlags::SYSTEM_CALL_EXTENSIONS; 23 | }); 24 | } 25 | 26 | // Use SFMASK register to disable interrupts when executing a syscall. This 27 | // is important because we use swapgs and we mess with the stack. 28 | x86_64::registers::model_specific::SFMask::write(RFlags::INTERRUPT_FLAG); 29 | 30 | // Set syscall handler address via LSTAR register 31 | let syscall_handler_addr = VirtAddr::new(syscall_handler as usize as u64); 32 | x86_64::registers::model_specific::LStar::write(syscall_handler_addr); 33 | } 34 | 35 | define_per_cpu_u64!( 36 | /// Used during syscalls to store and restore the top of the kernel stack. 37 | pub(super) TOP_OF_KERNEL_STACK 38 | ); 39 | 40 | define_per_cpu_u64!( 41 | /// Used during syscalls to store and restore the user stack 42 | pub(super) USER_STACK_SCRATCH 43 | ); 44 | 45 | #[naked] 46 | pub(super) unsafe extern "C" fn syscall_handler() { 47 | unsafe { 48 | asm!( 49 | // Swap out the user GS base for the kernel GS base and restore the 50 | // kernel stack. 51 | "swapgs", 52 | "mov gs:{user_stack_scratch}, rsp", 53 | "mov rsp, gs:{kernel_stack}", 54 | 55 | // Construct a pointer to the syscall arguments on the stack. Must 56 | // match TaskRegisters struct order (in reverse). 57 | "push {user_data_selector}", // ss 58 | "push gs:{user_stack_scratch}", // rsp 59 | "push r11", // rflags, part of syscall convention 60 | "push {user_code_selector}", // cs 61 | "push rcx", // rip, part of syscall convention 62 | "push rdi", // syscall number 63 | // Callee-clobbered 64 | "push rdi", 65 | "push rsi", 66 | "push rdx", 67 | "push rcx", 68 | "push rax", 69 | "push r8", 70 | "push r9", 71 | "push r10", 72 | "push r11", 73 | // Callee-saved 74 | "push rbx", 75 | "push rbp", 76 | "push r12", 77 | "push r13", 78 | "push r14", 79 | "push r15", 80 | 81 | // First arg is pointer to syscall arguments on the stack 82 | "mov rdi, rsp", 83 | 84 | // Call the actual syscall handler 85 | "call {syscall_handler_inner}", 86 | 87 | // Restore registers and run systretq to get back to userland. 88 | // Callee-saved 89 | "pop r15", 90 | "pop r14", 91 | "pop r13", 92 | "pop r12", 93 | "pop rbp", 94 | "pop rbx", 95 | // Callee-clobbered 96 | "pop r11", 97 | "pop r10", 98 | "pop r9", 99 | "pop r8", 100 | "pop rax", 101 | "pop rcx", 102 | "pop rdx", 103 | "pop rsi", 104 | "pop rdi", 105 | // Syscall number 106 | "pop rdi", 107 | // iretq frame 108 | "pop rcx", 109 | "add rsp, 8", // cs, ignored 110 | "pop r11", 111 | "pop rax", // rsp, putting in rax for now so we can put it in rsp later 112 | "add rsp, 8", // ss, ignored 113 | 114 | // Store kernel stack and restore user stack 115 | "mov gs:{kernel_stack}, rsp", 116 | "mov rsp, rax", // rsp was popped into rax earlier 117 | "swapgs", 118 | 119 | // Return to userspace 120 | "sysretq", 121 | kernel_stack = sym TOP_OF_KERNEL_STACK, 122 | user_data_selector = const USER_DATA_SELECTOR.0, 123 | user_code_selector = const USER_CODE_SELECTOR.0, 124 | user_stack_scratch = sym USER_STACK_SCRATCH, 125 | syscall_handler_inner = sym syscall_handler_inner, 126 | options(noreturn), 127 | ) 128 | } 129 | } 130 | 131 | #[allow(clippy::similar_names)] 132 | extern "C" fn syscall_handler_inner(registers: &mut TaskRegisters) { 133 | let processor_id = get_processor_id_no_guard(); 134 | let task_id = current_task_id(); 135 | log::warn!( 136 | "syscall handler! processor: {processor_id:?}, task: {task_id:?}, registers: {registers:x?}" 137 | ); 138 | 139 | let syscall_num = registers.syscall_number_or_irq_or_error_code; 140 | 141 | let arg1 = registers.rsi; 142 | let arg2 = registers.rdx; 143 | let arg3 = registers.r10; 144 | let arg4 = registers.r8; 145 | let arg5 = registers.r9; 146 | 147 | let handler = SYSCALL_HANDLERS 148 | .get(syscall_num as usize) 149 | .into_iter() 150 | .flatten() 151 | .next(); 152 | #[allow(clippy::option_if_let_else)] 153 | match handler { 154 | Some(handler) => handler(arg1, arg2, arg3, arg4, arg5), 155 | None => { 156 | log::warn!( 157 | "Unknown syscall {syscall_num} called with args ({arg1}, {arg2}, {arg3}, {arg4}, {arg5})", 158 | ); 159 | } 160 | }; 161 | 162 | // Run scheduler after syscalls 163 | run_scheduler(); 164 | } 165 | 166 | type SyscallHandler = fn(u64, u64, u64, u64, u64); 167 | 168 | static SYSCALL_HANDLERS: [Option; 2] = [ 169 | Some(syscall_exit), // 0 170 | Some(syscall_print), 171 | ]; 172 | 173 | fn syscall_exit(exit_code: u64, _: u64, _: u64, _: u64, _: u64) { 174 | kill_current_task(TaskExitCode::from(exit_code)); 175 | } 176 | 177 | fn syscall_print(data_ptr: u64, data_len: u64, _: u64, _: u64, _: u64) { 178 | let s = unsafe { core::slice::from_raw_parts(data_ptr as *const u8, data_len as usize) }; 179 | let s = core::str::from_utf8(s).unwrap(); 180 | log::info!("PRINT SYSCALL: {}", s); 181 | } 182 | -------------------------------------------------------------------------------- /kernel/src/sched/userspace.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use alloc::string::String; 3 | use alloc::vec::Vec; 4 | use core::arch::asm; 5 | 6 | use x86_64::registers::rflags::RFlags; 7 | use x86_64::VirtAddr; 8 | 9 | use crate::memory::{ 10 | allocate_and_map_pages, set_page_flags, Page, PageRange, PageSize, PageTableEntryFlags, 11 | }; 12 | use crate::{elf, task_creator_box, vfs}; 13 | 14 | use super::schedcore::current_task; 15 | use super::syscall::TOP_OF_KERNEL_STACK; 16 | use super::task::TaskId; 17 | 18 | /// Parameters to create a new process. 19 | pub(crate) struct ExecParams { 20 | pub(crate) path: vfs::FilePath, 21 | pub(crate) args: Vec, 22 | } 23 | 24 | pub(crate) fn new_userspace_task(params: ExecParams) -> TaskId { 25 | create_userspace_task(params.path.as_string(), Box::new(params)) 26 | } 27 | 28 | task_creator_box!(create_userspace_task, ExecParams, task_userspace_setup); 29 | 30 | /// Kernel function that is called when we are starting a userspace task. This 31 | /// is the "entrypoint" to a userspace task, and performs some setup before 32 | /// actually jumping to userspace. 33 | extern "C" fn task_userspace_setup(params: Box) { 34 | let path = ¶ms.path; 35 | let inode = match vfs::get_path_inode(path) { 36 | Ok(inode) => inode, 37 | Err(e) => { 38 | log::warn!("Failed to get inode for path: {e:?}"); 39 | return; 40 | } 41 | }; 42 | 43 | let vfs::InodeType::File(mut file) = inode.inode_type else { 44 | log::warn!("Path {path} not a file"); 45 | return; 46 | }; 47 | 48 | let bytes = file.read_all(); 49 | let elf_exe = match elf::ElfExecutableHeader::parse(&bytes) { 50 | Ok(exe) => exe, 51 | Err(e) => { 52 | log::warn!("Failed to parse ELF: {e:?}"); 53 | return; 54 | } 55 | }; 56 | 57 | let instruction_ptr = elf_exe.entrypoint; 58 | let stack_ptr = set_up_elf_segments(&elf_exe, ¶ms); 59 | 60 | // N.B. It is important that jump_to_userspace is marked as returning !, 61 | // which means it never returns, because I _think_ that the compiler will 62 | // properly clean up all the other stuff in this function. Before I had `!` 63 | // I was getting some intermittent page faults. 64 | drop(params); 65 | drop(file); 66 | drop(elf_exe); 67 | drop(bytes); 68 | 69 | unsafe { 70 | jump_to_userspace(instruction_ptr, stack_ptr); 71 | }; 72 | } 73 | 74 | // Separate function so we can clean up before jump_to_userspace, which never returns 75 | fn set_up_elf_segments(elf_exe: &elf::ElfExecutableHeader, params: &ExecParams) -> VirtAddr { 76 | let task = current_task(); 77 | let mut table = task.page_table.lock(); 78 | 79 | // Map ELF segments to userspace addresses 80 | for segment in &elf_exe.loadable_segments { 81 | assert!(segment.alignment as usize == PageSize::Size4KiB.size_bytes()); 82 | 83 | let segment_data = elf_exe 84 | .parsed 85 | .segment_data(&segment.raw_header) 86 | .expect("failed to get segment data"); 87 | let start_page = Page::from_start_addr(segment.vaddr, PageSize::Size4KiB); 88 | let mut user_pages = PageRange::from_num_bytes(start_page, segment.mem_size as usize); 89 | 90 | let initial_flags = PageTableEntryFlags::PRESENT 91 | | PageTableEntryFlags::WRITABLE 92 | | PageTableEntryFlags::USER_ACCESSIBLE; 93 | 94 | allocate_and_map_pages(&mut table, user_pages.iter(), initial_flags) 95 | .expect("failed to map segment pages"); 96 | user_pages.as_byte_slice()[..segment_data.len()].copy_from_slice(segment_data); 97 | 98 | let user_flags = 99 | segment.flags.page_table_entry_flags() | PageTableEntryFlags::USER_ACCESSIBLE; 100 | set_page_flags(&mut table, user_pages.iter(), user_flags) 101 | .expect("failed to set segment flags"); 102 | } 103 | 104 | // Allocate a stack 105 | let stack_start = VirtAddr::new(0x2_1000_0000); 106 | let stack_page = Page::from_start_addr(stack_start, PageSize::Size4KiB); 107 | let stack_pages = PageRange::new(stack_page, 4); 108 | let stack_flags = PageTableEntryFlags::PRESENT 109 | | PageTableEntryFlags::WRITABLE 110 | | PageTableEntryFlags::USER_ACCESSIBLE; 111 | allocate_and_map_pages(&mut table, stack_pages.iter(), stack_flags) 112 | .expect("failed to map stack pages"); 113 | 114 | // Initialize stack. See "3.4 Process Initialization" in the System V AMD64 115 | // ABI spec, and https://lwn.net/Articles/631631/ for a good explanation. 116 | 117 | let stack_ptr = stack_start + stack_pages.num_bytes(); 118 | let mut stack_ptr = stack_ptr.as_mut_ptr::(); 119 | 120 | // TODO: Add environment variables and auxiliary vector onto stack 121 | 122 | // Push args onto stack as nul-terminated strings (remember first arg is the program path) 123 | let first_arg = params 124 | .path 125 | .components 126 | .last() 127 | .map(|s| String::from(s.as_str())) 128 | .unwrap_or_default(); 129 | let all_args = core::iter::once(&first_arg).chain(params.args.iter()); 130 | let arg_locations = all_args 131 | .map(|arg| { 132 | // Write as nul-terminated string 133 | let arg_ptr = stack_ptr.wrapping_sub(arg.len() + 1); 134 | unsafe { 135 | arg_ptr.copy_from_nonoverlapping(arg.as_ptr(), arg.len()); 136 | arg_ptr.add(arg.len()).write(0); 137 | } 138 | stack_ptr = arg_ptr; 139 | arg_ptr as usize 140 | }) 141 | .collect::>(); 142 | 143 | // Align stack pointer _down_ to usize alignment (stack grows down) 144 | let mut stack_ptr: *mut usize = unsafe { 145 | #[allow(clippy::cast_ptr_alignment)] 146 | stack_ptr 147 | .sub(8) 148 | .add(stack_ptr.align_offset(8)) 149 | .cast::() 150 | }; 151 | assert!( 152 | stack_ptr as usize % core::mem::align_of::() == 0, 153 | "stack_ptr {stack_ptr:p} not aligned!" 154 | ); 155 | 156 | // Push argv onto stack 157 | arg_locations.iter().rev().for_each(|arg_ptr| unsafe { 158 | stack_ptr = stack_ptr.sub(1); 159 | stack_ptr.write(*arg_ptr); 160 | }); 161 | 162 | // Push argc onto stack 163 | unsafe { 164 | stack_ptr = stack_ptr.sub(1); 165 | stack_ptr.cast::().write(arg_locations.len()); 166 | } 167 | 168 | VirtAddr::new(stack_ptr as u64) 169 | } 170 | 171 | #[naked] 172 | pub(super) unsafe extern "C" fn jump_to_userspace( 173 | user_instruction_pointer: VirtAddr, 174 | user_stack_pointer: VirtAddr, 175 | ) { 176 | unsafe { 177 | asm!( 178 | // Store the kernel stack and switch to the user stack 179 | "mov gs:{kernel_stack}, rsp", 180 | "mov rsp, rsi", // Second argument, new stack pointer 181 | // Set up sysretq arguments 182 | "mov rcx, rdi", // First argument, new instruction pointer 183 | "mov r11, {rflags}", // rflags 184 | // Swap out the kernel GS base for the user's so userspace can't 185 | // mess with our GS base. 186 | "swapgs", 187 | // Jump to userspace 188 | "sysretq", 189 | kernel_stack = sym TOP_OF_KERNEL_STACK, 190 | rflags = const RFlags::INTERRUPT_FLAG.bits(), 191 | options(noreturn), 192 | ) 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /kernel/src/serial.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Write; 2 | 3 | use x86_64::instructions::port::{PortRead, PortWrite}; 4 | 5 | use crate::{ansiterm, sync::InitCell}; 6 | 7 | /// Reads and writes to a serial port. See 8 | /// 9 | /// Note that this doesn't do any sort of locking, so multiple writers may write 10 | /// concurrently. This is important so we can print inside of non-maskable 11 | /// exception handlers without worrying about deadlocks. Any desired locking 12 | /// should be done at a higher level. 13 | struct SerialPort { 14 | // data is technically read/write, but we only use it for writing 15 | data: u16, 16 | int_en: u16, 17 | fifo_ctrl: u16, 18 | line_ctrl: u16, 19 | modem_ctrl: u16, 20 | line_sts: u16, 21 | } 22 | 23 | const COM1_PORT: u16 = 0x3F8; 24 | const COM2_PORT: u16 = 0x2F8; 25 | 26 | impl SerialPort { 27 | const LINE_STATUS_DATA_READY: u8 = 1 << 0; 28 | const LINE_STATUS_TRANSMITTER_EMPTY: u8 = 1 << 5; 29 | 30 | const fn new(com_port: u16) -> Self { 31 | Self { 32 | data: com_port, 33 | int_en: com_port + 1, 34 | fifo_ctrl: com_port + 2, 35 | line_ctrl: com_port + 3, 36 | modem_ctrl: com_port + 4, 37 | line_sts: com_port + 5, 38 | } 39 | } 40 | 41 | /// See for init options 42 | fn init(&self) { 43 | unsafe { 44 | // Disable interrupts 45 | u8::write_to_port(self.int_en, 0x00); 46 | 47 | // Enable DLAB 48 | u8::write_to_port(self.line_ctrl, 0x80); 49 | 50 | // Set maximum speed to 115200 bps by configuring DLL and DLM 51 | u8::write_to_port(self.data, 0x01); // Low byte 52 | u8::write_to_port(self.int_en, 0x00); // High byte 53 | 54 | // Disable DLAB and set data word length to 8 bits, no parity, one 55 | // stop bit 56 | u8::write_to_port(self.line_ctrl, 0x03); 57 | 58 | // Enable FIFO, clear them, with 14-byte threshold 59 | u8::write_to_port(self.fifo_ctrl, 0xC7); 60 | 61 | // Mark data terminal ready, signal request to send 62 | // and enable auxilliary output #2 (used as interrupt line for CPU) 63 | u8::write_to_port(self.modem_ctrl, 0x0B); 64 | 65 | // Enable interrupts 66 | // 67 | // TODO: Do we even need interrupts? 68 | u8::write_to_port(self.int_en, 0x01); 69 | } 70 | } 71 | 72 | fn is_transmit_empty(&self) -> bool { 73 | unsafe { u8::read_from_port(self.line_sts) & Self::LINE_STATUS_TRANSMITTER_EMPTY != 0 } 74 | } 75 | 76 | fn write(&self, byte: u8) { 77 | // Wait for line to clear 78 | while !self.is_transmit_empty() { 79 | core::hint::spin_loop(); 80 | } 81 | 82 | unsafe { 83 | u8::write_to_port(self.data, byte); 84 | } 85 | } 86 | 87 | fn write_str_bytes(&self, s: &str) { 88 | for byte in s.bytes() { 89 | self.write(byte); 90 | } 91 | } 92 | 93 | fn is_data_ready(&self) -> bool { 94 | unsafe { u8::read_from_port(self.line_sts) & Self::LINE_STATUS_DATA_READY != 0 } 95 | } 96 | 97 | fn read(&self) -> u8 { 98 | while !self.is_data_ready() { 99 | core::hint::spin_loop(); 100 | } 101 | 102 | unsafe { u8::read_from_port(self.data) } 103 | } 104 | } 105 | 106 | impl Write for SerialPort { 107 | fn write_str(&mut self, s: &str) -> core::fmt::Result { 108 | self.write_str_bytes(s); 109 | Ok(()) 110 | } 111 | } 112 | 113 | /// This type exists just so we can use the `Write` trait. Useful for use with 114 | /// the `write!` macro. We don't want to implement `Write` directly on 115 | /// `SerialPort` because we don't want to have to make a global mutable 116 | /// reference to it. 117 | pub(crate) struct SerialWriter(); 118 | 119 | impl Write for SerialWriter { 120 | fn write_str(&mut self, s: &str) -> core::fmt::Result { 121 | SERIAL1 122 | .get() 123 | .expect("SERIAL1 not initialized") 124 | .write_str_bytes(s); 125 | Ok(()) 126 | } 127 | } 128 | 129 | pub(crate) fn init() { 130 | SERIAL1.init(init_serial_port(COM1_PORT)); 131 | SERIAL2.init(init_serial_port(COM2_PORT)); 132 | } 133 | 134 | fn init_serial_port(com_port: u16) -> SerialPort { 135 | let mut serial_port = SerialPort::new(com_port); 136 | serial_port.init(); 137 | 138 | // When running in UEFI, the OVMF firmware prints a ton of crap 139 | // formatting characters to the serial port. This clears the screen 140 | // before printing so we don't have to look at that. Here it is for 141 | // posterity: 142 | // 143 | // [=3hBdsDxe: failed to load Boot0001 "UEFI QEMU DVD-ROM QM00005 " from PciRoot(0x0)/Pci(0x1F,0x2)/Sata(0x2,0xFFFF,0x0): Not Found 144 | // BdsDxe: loading Boot0002 "UEFI QEMU HARDDISK QM00001 " from PciRoot(0x0)/Pci(0x1F,0x2)/Sata(0x0,0xFFFF,0x0) 145 | // BdsDxe: starting Boot0002 "UEFI QEMU HARDDISK QM00001 " from PciRoot(0x0)/Pci(0x1F,0x2)/Sata(0x0,0xFFFF,0x0) 146 | //  147 | // 148 | // TODO: Find a way to clear the serial port so qemu doesn't see that 149 | // stuff in the first place. 150 | // 151 | // See https://gist.github.com/fnky/458719343aabd01cfb17a3a4f7296797 for 152 | // escape codes: 153 | // 154 | // - `[0m` resets all styles and colors 155 | // - `[H` moves the cursor to the top left 156 | // - `[J` clears the screen from the cursor down 157 | write!( 158 | serial_port, 159 | "{}{}{}", 160 | ansiterm::CLEAR_FORMAT, 161 | ansiterm::AnsiEscapeSequence::MoveCursorTopLeft, 162 | ansiterm::AnsiEscapeSequence::ClearScreenFromCursorToEnd, 163 | ) 164 | .expect("Failed to clear serial port"); 165 | 166 | serial_port 167 | } 168 | 169 | static SERIAL1: InitCell = InitCell::new(); 170 | static SERIAL2: InitCell = InitCell::new(); 171 | 172 | /// Fetch the global serial writer for use in `write!` macros. 173 | /// 174 | /// # Examples 175 | /// 176 | /// ``` 177 | /// writeln!(serial1_writer(), "Hello, world!"); 178 | /// ``` 179 | pub(crate) fn serial1_writer() -> SerialWriter { 180 | SerialWriter() 181 | } 182 | 183 | // Starts with underscore so no one tries to use it. It needs to be exported 184 | // though. 185 | #[doc(hidden)] 186 | pub(crate) fn _print(args: ::core::fmt::Arguments) { 187 | serial1_writer() 188 | .write_fmt(args) 189 | .expect("Printing to serial failed"); 190 | } 191 | 192 | /// Prints to the host through the serial interface. 193 | #[macro_export] 194 | macro_rules! serial_print { 195 | ($($arg:tt)*) => { 196 | $crate::serial::_print(format_args!($($arg)*)) 197 | }; 198 | } 199 | 200 | /// Prints to the host through the serial interface, appending a newline. 201 | #[macro_export] 202 | macro_rules! serial_println { 203 | () => { 204 | $crate::serial_print!("\n") 205 | }; 206 | 207 | ($fmt:expr) => { 208 | { 209 | $crate::serial_print!($fmt); 210 | $crate::serial_print!("\n"); 211 | } 212 | }; 213 | 214 | ($fmt:expr, $($arg:tt)*) => { 215 | { 216 | $crate::serial_print!($fmt, $( $arg )*); 217 | $crate::serial_print!("\n"); 218 | } 219 | }; 220 | } 221 | 222 | pub(crate) fn serial1_write_byte(byte: u8) { 223 | SERIAL1.get().expect("SERIAL1 not initialized").write(byte); 224 | } 225 | 226 | /// Read the next byte from the serial port. 227 | pub(crate) fn serial1_read_byte() -> u8 { 228 | SERIAL1.get().expect("SERIAL1 not initialized").read() 229 | } 230 | -------------------------------------------------------------------------------- /kernel/src/strings.rs: -------------------------------------------------------------------------------- 1 | /// # Safety 2 | /// 3 | /// If the string is not null-terminated, this will happily iterate through 4 | /// memory and print garbage until it finds a null byte or we hit a protection 5 | /// fault because we tried to ready a page we don't have access to. 6 | pub(crate) unsafe fn c_str_from_pointer(ptr: *const u8, max_size: usize) -> &'static str { 7 | let mut len: usize = 0; 8 | while len < max_size { 9 | let c = *ptr.add(len); 10 | if c == 0 { 11 | break; 12 | } 13 | len += 1; 14 | } 15 | 16 | let slice = core::slice::from_raw_parts(ptr, len); 17 | core::str::from_utf8(slice).unwrap_or("") 18 | } 19 | 20 | // /// A wrapper around a `Write` that handles indentation. 21 | // pub(crate) struct IndentWriter<'a, W: Write> { 22 | // writer: &'a mut W, 23 | // indent: usize, 24 | // indent_delta: usize, 25 | // on_start_of_line: bool, 26 | // } 27 | 28 | // impl IndentWriter<'_, W> { 29 | // pub(crate) fn new(writer: &mut W, indent_delta: usize) -> IndentWriter { 30 | // IndentWriter { 31 | // writer, 32 | // indent: 0, 33 | // indent_delta, 34 | // on_start_of_line: true, 35 | // } 36 | // } 37 | 38 | // pub(crate) fn indent(&mut self) { 39 | // self.indent += self.indent_delta; 40 | // } 41 | 42 | // pub(crate) fn unindent(&mut self) { 43 | // self.indent -= self.indent_delta; 44 | // } 45 | // } 46 | 47 | // impl Write for IndentWriter<'_, W> { 48 | // fn write_str(&mut self, s: &str) -> core::fmt::Result { 49 | // for c in s.chars() { 50 | // // Do indentation if we are at the start of a line. If we are about 51 | // // to print a newline though, don't worry about it. 52 | // if self.on_start_of_line && c != '\n' { 53 | // write!(self.writer, "{}", " ".repeat(self.indent))?; 54 | // self.on_start_of_line = false; 55 | // } 56 | 57 | // // Write the character 58 | // write!(self.writer, "{c}")?; 59 | 60 | // // If we just printed a newline, we are at the start of a line. 61 | // self.on_start_of_line = c == '\n'; 62 | // } 63 | // Ok(()) 64 | // } 65 | // } 66 | -------------------------------------------------------------------------------- /kernel/src/sync/atomic_int.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::marker::PhantomData; 3 | use core::sync::atomic::{AtomicU16, AtomicU32, AtomicU64, AtomicU8, Ordering}; 4 | 5 | /// Wrapper around an atomic integer type (via `AtomicInt`) that supports 6 | /// transparently converting to/from a specific type. 7 | pub(crate) struct AtomicInt 8 | where 9 | I: AtomicIntTrait, 10 | { 11 | atom: I::Atomic, 12 | _phantom: PhantomData, 13 | } 14 | 15 | impl AtomicInt 16 | where 17 | I: AtomicIntTrait + fmt::Display + Copy, 18 | T: From + Into, 19 | { 20 | pub(crate) fn new(val: T) -> Self { 21 | Self { 22 | atom: ::new(val.into()), 23 | _phantom: PhantomData, 24 | } 25 | } 26 | 27 | pub(crate) fn load(&self) -> T { 28 | let val = ::load(&self.atom, Ordering::Acquire); 29 | T::from(val) 30 | } 31 | 32 | pub(crate) fn store(&self, val: T) { 33 | ::store(&self.atom, val.into(), Ordering::Release); 34 | } 35 | 36 | pub(crate) fn swap(&self, val: T) -> T { 37 | let old_val = ::swap(&self.atom, val.into(), Ordering::Acquire); 38 | T::from(old_val) 39 | } 40 | } 41 | 42 | impl fmt::Debug for AtomicInt 43 | where 44 | I: AtomicIntTrait + fmt::Display + Copy, 45 | T: From + Into + fmt::Debug, 46 | { 47 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 48 | write!(f, "AtomicInt({:?})", self.load()) 49 | } 50 | } 51 | 52 | pub(crate) trait AtomicIntTrait { 53 | type Atomic; 54 | 55 | fn new(val: Self) -> Self::Atomic; 56 | fn load(atom: &Self::Atomic, order: Ordering) -> Self; 57 | fn store(atom: &Self::Atomic, val: Self, order: Ordering); 58 | fn swap(atom: &Self::Atomic, val: Self, order: Ordering) -> Self; 59 | } 60 | 61 | macro_rules! atomic_int_trait_impl { 62 | ($type:ty, $atom:ty) => { 63 | impl AtomicIntTrait for $type { 64 | type Atomic = $atom; 65 | 66 | fn new(val: Self) -> Self::Atomic { 67 | Self::Atomic::new(val) 68 | } 69 | 70 | fn load(atom: &Self::Atomic, order: Ordering) -> Self { 71 | atom.load(order) 72 | } 73 | 74 | fn store(atom: &Self::Atomic, val: Self, order: Ordering) { 75 | atom.store(val, order); 76 | } 77 | 78 | fn swap(atom: &Self::Atomic, val: Self, order: Ordering) -> Self { 79 | atom.swap(val, order) 80 | } 81 | } 82 | }; 83 | } 84 | 85 | atomic_int_trait_impl!(u8, AtomicU8); 86 | atomic_int_trait_impl!(u16, AtomicU16); 87 | atomic_int_trait_impl!(u32, AtomicU32); 88 | atomic_int_trait_impl!(u64, AtomicU64); 89 | 90 | /// Wrapper around `AtomicInt` that allows fallible conversion, which is super 91 | /// useful for enums. 92 | pub(crate) struct AtomicEnum 93 | where 94 | I: AtomicIntTrait, 95 | I::Atomic: fmt::Debug, 96 | { 97 | int: AtomicInt, 98 | _phantom: PhantomData, 99 | } 100 | 101 | impl AtomicEnum 102 | where 103 | I: AtomicIntTrait + fmt::Display + Copy, 104 | I::Atomic: fmt::Debug, 105 | T: TryFrom + Into, 106 | { 107 | pub(crate) fn new(val: T) -> Self { 108 | Self { 109 | int: AtomicInt::new(val.into()), 110 | _phantom: PhantomData, 111 | } 112 | } 113 | 114 | fn convert_from_integer(val: I) -> T { 115 | T::try_from(val).map_or_else( 116 | |_| { 117 | panic!("ERROR: Invalid enum value {val}"); 118 | }, 119 | |enum_val| enum_val, 120 | ) 121 | } 122 | 123 | pub(crate) fn load(&self) -> T { 124 | let val = self.int.load(); 125 | Self::convert_from_integer(val) 126 | } 127 | 128 | pub(crate) fn swap(&self, val: T) -> T { 129 | let old_val = self.int.swap(val.into()); 130 | Self::convert_from_integer(old_val) 131 | } 132 | } 133 | 134 | impl fmt::Debug for AtomicEnum 135 | where 136 | I: AtomicIntTrait + fmt::Display + Copy, 137 | I::Atomic: fmt::Debug, 138 | T: TryFrom + Into + fmt::Debug, 139 | { 140 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 141 | write!(f, "AtomicEnum({:?})", self.load()) 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /kernel/src/sync/init_cell.rs: -------------------------------------------------------------------------------- 1 | use super::once_cell::OnceCell; 2 | 3 | /// A cell that can be initialized only once. This is useful because we can 4 | /// share it between multiple threads without having to use a mutex, and since 5 | /// the value can only be written once, we don't need a mutable reference to 6 | /// write to it, so we can store this value as a static. 7 | #[derive(Debug)] 8 | pub(crate) struct InitCell { 9 | cell: OnceCell, 10 | } 11 | 12 | impl InitCell { 13 | pub(crate) const fn new() -> Self { 14 | Self { 15 | cell: OnceCell::new(), 16 | } 17 | } 18 | 19 | pub(crate) fn init(&self, value: T) { 20 | self.cell.set(value); 21 | } 22 | 23 | pub(crate) fn get(&self) -> Option<&T> { 24 | self.cell.get_ref() 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /kernel/src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod atomic_int; 2 | pub(crate) mod init_cell; 3 | pub(crate) mod mutex; 4 | pub(crate) mod once_cell; 5 | pub(crate) mod once_channel; 6 | pub(crate) mod spin_lock; 7 | pub(crate) mod wait_cell; 8 | 9 | pub(crate) use atomic_int::*; 10 | pub(crate) use init_cell::*; 11 | pub(crate) use mutex::*; 12 | pub(crate) use once_channel::*; 13 | pub(crate) use spin_lock::*; 14 | pub(crate) use wait_cell::*; 15 | -------------------------------------------------------------------------------- /kernel/src/sync/mutex.rs: -------------------------------------------------------------------------------- 1 | use alloc::collections::VecDeque; 2 | use core::ops::{Deref, DerefMut}; 3 | 4 | use crate::sched; 5 | use crate::sched::TaskId; 6 | 7 | use super::spin_lock::{SpinLock, SpinLockGuard}; 8 | 9 | /// Mutex that puts processes to sleep while waiting for access. 10 | #[derive(Debug)] 11 | pub(crate) struct Mutex { 12 | /// The inner value. 13 | inner: SpinLock, 14 | 15 | /// The queue of processes waiting for access. 16 | waiting_tasks: SpinLock>, 17 | } 18 | 19 | impl Mutex { 20 | pub(crate) const fn new(data: T) -> Self { 21 | Self { 22 | inner: SpinLock::new(data), 23 | waiting_tasks: SpinLock::new(VecDeque::new()), 24 | } 25 | } 26 | 27 | // TODO: It would be preferable to only wake up the next task, but we need 28 | // to be careful that the task we wake up actually takes the lock, or else 29 | // everyone else will wait. For example, if the next task actually died or 30 | // is sleeping for some other reason, we need to skip it and try the next 31 | // one. 32 | fn wake_tasks(&self) { 33 | let mut waiting_tasks = self.waiting_tasks.lock(); 34 | while let Some(task_id) = waiting_tasks.pop_front() { 35 | sched::awaken_task(task_id); 36 | } 37 | } 38 | 39 | /// Attempts to lock the mutex and sleeps while unsuccessful. 40 | pub(crate) fn lock(&self) -> MutexGuard<'_, T> { 41 | loop { 42 | // Set desired_state to sleeping before checking value to avoid race 43 | // condition where we get woken up before we go to sleep. 44 | let task_id = sched::prepare_to_sleep(); 45 | 46 | self.waiting_tasks 47 | .lock_disable_interrupts() 48 | .push_back(task_id); 49 | 50 | let guard = self.inner.try_lock_allow_preempt(); 51 | if let Some(guard) = guard { 52 | // TODO: We should probably remove ourselves from the waiting 53 | // task list here, but I think worst case is that we just have 54 | // to keep trying to wake up tasks. 55 | sched::awaken_task(task_id); 56 | return MutexGuard { 57 | inner_guard: guard, 58 | parent: self, 59 | }; 60 | } 61 | 62 | sched::run_scheduler(); 63 | } 64 | } 65 | } 66 | 67 | pub(crate) struct MutexGuard<'a, T> { 68 | inner_guard: SpinLockGuard<'a, T>, 69 | parent: &'a Mutex, 70 | } 71 | 72 | impl<'a, T> Deref for MutexGuard<'a, T> { 73 | type Target = T; 74 | 75 | #[allow(clippy::explicit_deref_methods)] 76 | fn deref(&self) -> &T { 77 | self.inner_guard.deref() 78 | } 79 | } 80 | 81 | impl<'a, T> DerefMut for MutexGuard<'a, T> { 82 | #[allow(clippy::explicit_deref_methods)] 83 | fn deref_mut(&mut self) -> &mut T { 84 | self.inner_guard.deref_mut() 85 | } 86 | } 87 | 88 | impl Drop for MutexGuard<'_, T> { 89 | fn drop(&mut self) { 90 | self.parent.wake_tasks(); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /kernel/src/sync/once_cell.rs: -------------------------------------------------------------------------------- 1 | use core::cell::{Ref, RefCell}; 2 | use core::sync::atomic::{AtomicBool, Ordering}; 3 | 4 | /// A thread-safe cell that can be written to only once. 5 | /// 6 | /// Uses atomics for speed, but the tradeoff is we panic if the cell is written 7 | /// to more than once. (If we used a spinlock we could ensure we only write once 8 | /// and return an error instead of panicking.) 9 | /// 10 | /// This should not be used directly in kernel code, but is instead a useful 11 | /// primitive for other synchronization primitives that are safer. 12 | #[derive(Debug)] 13 | pub(super) struct OnceCell { 14 | message: RefCell>, 15 | ready: AtomicBool, 16 | } 17 | 18 | unsafe impl Sync for OnceCell where T: Send {} 19 | 20 | impl OnceCell { 21 | pub(super) const fn new() -> Self { 22 | Self { 23 | message: RefCell::new(None), 24 | ready: AtomicBool::new(false), 25 | } 26 | } 27 | 28 | /// Write a value to the cell. 29 | /// 30 | /// This function can only be called once. This is important because writing 31 | /// a value discards the old value, and we will never drop the old value 32 | /// (this is a `MaybeUninit` feature/limitation). We panic if we call this 33 | /// function twice. 34 | pub(super) fn set(&self, message: T) { 35 | let _ = self.message.replace(Some(message)); 36 | let old = self.ready.swap(true, Ordering::Release); 37 | assert!(!old, "ERROR: Tried to write cell value twice"); 38 | } 39 | 40 | /// Extracts the value from the cell. 41 | /// 42 | /// This function can only be called once. It is itself "safe" because the 43 | /// invariant on `set` ensures we only call `set` once, and the function 44 | /// also uses the atomic bool `ready` to ensure we only read once. However, 45 | /// the caller should ensure that if `get_once` is ever called, then other 46 | /// functions that get references or copied to the value are _never_ called. 47 | pub(super) fn get_once(&self) -> Option { 48 | if self.ready.swap(false, Ordering::Acquire) { 49 | // Safety: We should only read a message once, since we are reifying 50 | // it from a single memory location. The swap above is an extra 51 | // safeguard to ensure we don't read the message twice. 52 | self.message.replace(None) 53 | } else { 54 | None 55 | } 56 | } 57 | 58 | /// Extracts a reference to the stored value. 59 | pub(super) fn get_ref(&self) -> Option<&T> { 60 | if self.ready.load(Ordering::Acquire) { 61 | // NOTE: We use `Ref::leak` here to avoid needing to wrap in `Ref`. 62 | // Using `leak` means writing to the `RefCell` will panic, which is 63 | // preferred to unsafe behavior. 64 | Ref::leak(self.message.borrow()).as_ref() 65 | } else { 66 | None 67 | } 68 | } 69 | } 70 | 71 | impl OnceCell { 72 | /// Extracts a clone of the stored value. 73 | pub(super) fn get_clone(&self) -> Option { 74 | if self.ready.load(Ordering::Acquire) { 75 | self.message.borrow().clone() 76 | } else { 77 | None 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /kernel/src/sync/once_channel.rs: -------------------------------------------------------------------------------- 1 | use alloc::sync::Arc; 2 | use core::marker::PhantomData; 3 | 4 | use crate::sched; 5 | use crate::sched::TaskId; 6 | 7 | use super::once_cell::OnceCell; 8 | 9 | /// Creates a `OnceSender` and `OnceReceiver` pair. The sender can send a single 10 | /// value (hence the "once") to the receiver, and the receiver can wait for the 11 | /// value to be sent. While waiting, the receiver is put to sleep, and the 12 | /// sender ensures the receiver is woken up when the value is sent. 13 | /// 14 | /// Note that this function must be called on the receiver's thread and the 15 | /// receiver can't be moved to another thread, since the `ThreadId` for the 16 | /// receiver is stored in `OnceSender`. This is enforced by `OnceReceiver` _not_ 17 | /// implementing `Send`. 18 | pub(crate) fn once_channel() -> (OnceSender, OnceReceiver) { 19 | let receiver_task_id = sched::current_task_id(); 20 | let cell = Arc::new(OnceCell::new()); 21 | let sender = OnceSender { 22 | cell: cell.clone(), 23 | receiver_task_id, 24 | }; 25 | let receiver = OnceReceiver { 26 | cell, 27 | _no_send: PhantomData, 28 | }; 29 | (sender, receiver) 30 | } 31 | 32 | /// Sender side of a `once_channel`. 33 | #[derive(Debug)] 34 | pub(crate) struct OnceSender { 35 | cell: Arc>, 36 | receiver_task_id: TaskId, 37 | } 38 | 39 | impl OnceSender { 40 | /// Write a value to the channel so the receiver can read it. This can only 41 | /// be called once because it consumes `self`. 42 | pub(crate) fn send(self, message: T) { 43 | // Safety: We only call this function once, which is enforced by this 44 | // function consuming `self`. 45 | self.cell.set(message); 46 | sched::awaken_task(self.receiver_task_id); 47 | } 48 | } 49 | 50 | /// Receiver side of a `once_channel`. 51 | #[derive(Debug)] 52 | pub(crate) struct OnceReceiver { 53 | cell: Arc>, 54 | 55 | /// This is a hack to make `OnceReceiver` not implement `Send`. This is 56 | /// necessary so the `TaskId` of the receiver doesn't change. If the 57 | /// `TaskId` changed, then the sender would wake up the wrong task. 58 | _no_send: PhantomData<*const ()>, 59 | } 60 | 61 | impl OnceReceiver { 62 | pub(crate) fn wait_sleep(self) -> T { 63 | loop { 64 | // Set desired_state to sleeping before checking value to avoid race 65 | // condition where we get woken up before we go to sleep. 66 | let task_id = sched::prepare_to_sleep(); 67 | 68 | let message = self.cell.get_once(); 69 | if let Some(message) = message { 70 | sched::awaken_task(task_id); 71 | return message; 72 | } 73 | sched::run_scheduler(); 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /kernel/src/sync/spin_lock.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Deref, DerefMut}; 2 | 3 | use spin::mutex::{SpinMutex, SpinMutexGuard}; 4 | 5 | use crate::sched::PreemptGuard; 6 | 7 | /// Wrapper around `spin::mutex::SpinMutex` with some added features, like 8 | /// handling disabling and enabling interrupts. 9 | #[derive(Debug)] 10 | pub(crate) struct SpinLock { 11 | mutex: SpinMutex, 12 | } 13 | 14 | impl SpinLock { 15 | pub(crate) const fn new(data: T) -> Self { 16 | Self { 17 | mutex: SpinMutex::new(data), 18 | } 19 | } 20 | 21 | pub(crate) fn lock(&self) -> SpinLockGuard<'_, T> { 22 | // Ordering is important! Disable preemption before taking the lock. 23 | let preempt_guard = PreemptGuard::new(()); 24 | SpinLockGuard { 25 | guard: self.mutex.lock(), 26 | _interrupt_guard: InterruptGuard { 27 | needs_enabling: false, 28 | }, 29 | _preempt_guard: Some(preempt_guard), 30 | } 31 | } 32 | 33 | /// Internal function to take a lock but not mess with the preemption count. 34 | /// Used in Mutexes. 35 | pub(super) fn try_lock_allow_preempt(&self) -> Option> { 36 | let guard = self.mutex.try_lock()?; 37 | Some(SpinLockGuard { 38 | guard, 39 | _interrupt_guard: InterruptGuard { 40 | needs_enabling: false, 41 | }, 42 | _preempt_guard: None, 43 | }) 44 | } 45 | 46 | /// Locks the mutex and disables interrupts while the lock is held. Restores 47 | /// interrupts to their previous state (enabled or disabled) once the lock 48 | /// is released. 49 | pub(crate) fn lock_disable_interrupts(&self) -> SpinLockGuard<'_, T> { 50 | // Ordering is important! Disable preemption before taking the lock. 51 | let preempt_guard = PreemptGuard::new(()); 52 | 53 | let saved_intpt_flag = x86_64::instructions::interrupts::are_enabled(); 54 | 55 | // If interrupts are enabled, disable them for now. They will be 56 | // re-enabled when the guard drops. 57 | if saved_intpt_flag { 58 | x86_64::instructions::interrupts::disable(); 59 | } 60 | 61 | SpinLockGuard { 62 | guard: self.mutex.lock(), 63 | _interrupt_guard: InterruptGuard { 64 | needs_enabling: saved_intpt_flag, 65 | }, 66 | _preempt_guard: Some(preempt_guard), 67 | } 68 | } 69 | 70 | pub(crate) unsafe fn force_unlock(&self) { 71 | self.mutex.force_unlock(); 72 | } 73 | } 74 | 75 | /// Wrapper around `spin::mutex::SpinMutexGuard`, used with `SpinLock`. 76 | pub(crate) struct SpinLockGuard<'a, T: ?Sized + 'a> { 77 | guard: SpinMutexGuard<'a, T>, 78 | // Note: ordering is very important here! We want to restore interrupts to 79 | // their previous state (enabled or disabled) _after_ the spinlock guard is 80 | // dropped. Rust drops fields in order. 81 | _interrupt_guard: InterruptGuard, 82 | // We want to drop preemption after dropping the lock and enabling 83 | // interrupts. 84 | _preempt_guard: Option>, 85 | } 86 | 87 | impl<'a, T> Deref for SpinLockGuard<'a, T> { 88 | type Target = T; 89 | 90 | #[allow(clippy::explicit_deref_methods)] 91 | fn deref(&self) -> &T { 92 | self.guard.deref() 93 | } 94 | } 95 | 96 | impl<'a, T> DerefMut for SpinLockGuard<'a, T> { 97 | #[allow(clippy::explicit_deref_methods)] 98 | fn deref_mut(&mut self) -> &mut T { 99 | self.guard.deref_mut() 100 | } 101 | } 102 | 103 | struct InterruptGuard { 104 | needs_enabling: bool, 105 | } 106 | 107 | impl Drop for InterruptGuard { 108 | fn drop(&mut self) { 109 | if self.needs_enabling { 110 | x86_64::instructions::interrupts::enable(); 111 | } 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /kernel/src/sync/wait_cell.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::sched; 4 | use crate::sched::TaskId; 5 | 6 | use super::once_cell::OnceCell; 7 | use super::spin_lock::SpinLock; 8 | 9 | /// A value that can be waited on by tasks. Tasks sleep while they wait, and 10 | /// they are woken up when the value is written. Each waiting task is given a 11 | /// copy of the value. It is common to use `Arc` as the value type, to make 12 | /// copies cheap. 13 | #[derive(Debug)] 14 | pub(crate) struct WaitCell { 15 | cell: OnceCell, 16 | waiting_tasks: SpinLock>, 17 | } 18 | 19 | impl WaitCell { 20 | pub(crate) fn new() -> Self { 21 | Self { 22 | cell: OnceCell::new(), 23 | waiting_tasks: SpinLock::new(Vec::new()), 24 | } 25 | } 26 | 27 | /// Sends value to all waiting tasks and wakes them up. 28 | pub(crate) fn send_all_consumers(&self, val: T) { 29 | self.cell.set(val); 30 | let mut task_ids = self.waiting_tasks.lock_disable_interrupts(); 31 | for task_id in task_ids.drain(..) { 32 | sched::awaken_task(task_id); 33 | } 34 | } 35 | 36 | /// Waits until the value is initialized, sleeping if necessary. 37 | pub(crate) fn wait_sleep(&self) -> T { 38 | loop { 39 | // Set desired_state to sleeping before checking value to avoid race 40 | // condition where we get woken up before we go to sleep. 41 | let task_id = sched::prepare_to_sleep(); 42 | 43 | // TODO: If we have a spurious wakeup we might add ourselves twice 44 | // because we would have never been removed from waiting_tasks in 45 | // the first place. Seems fine for now. 46 | self.waiting_tasks.lock_disable_interrupts().push(task_id); 47 | 48 | let message = self.cell.get_clone(); 49 | if let Some(message) = message { 50 | sched::awaken_task(task_id); 51 | return message; 52 | } 53 | sched::run_scheduler(); 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /kernel/src/tests/magic.rs: -------------------------------------------------------------------------------- 1 | //! This module is called magic because it uses linker script magic to register 2 | //! and run tests. Based on the Linux kernel [KUnit 3 | //! architecture](https://www.kernel.org/doc/html/latest/dev-tools/kunit/architecture.html). 4 | 5 | use proptest::{prop_assert, proptest}; 6 | 7 | use test_infra::SimpleTest; 8 | use test_macro::kernel_test; 9 | 10 | extern "C" { 11 | static _start_init_test_array: u8; 12 | static _end_init_test_array: u8; 13 | } 14 | 15 | pub(super) fn run_tests_from_linker() { 16 | log::info!("Running tests from linker..."); 17 | 18 | let tests = find_tests(); 19 | log::info!("{} tests found", tests.len()); 20 | 21 | for test in tests { 22 | log::info!("Running test {}::{}...", test.module, test.name); 23 | let test_fn = test.test_fn; 24 | test_fn(); 25 | } 26 | 27 | log::info!("Tests from linker complete!"); 28 | } 29 | 30 | pub fn find_tests() -> &'static [SimpleTest] { 31 | let test_array_start = unsafe { core::ptr::addr_of!(_start_init_test_array) }; 32 | let test_array_end = unsafe { core::ptr::addr_of!(_end_init_test_array) }; 33 | let test_array_size_bytes = test_array_end as usize - test_array_start as usize; 34 | assert!( 35 | test_array_size_bytes % core::mem::size_of::() == 0, 36 | "test array size must be a multiple of Test struct size" 37 | ); 38 | let num_tests = test_array_size_bytes / core::mem::size_of::(); 39 | 40 | let tests = unsafe { 41 | assert!( 42 | test_array_start as usize % core::mem::align_of::() == 0, 43 | "test array start must be aligned to Test struct alignment" 44 | ); 45 | #[allow(clippy::cast_ptr_alignment)] 46 | core::slice::from_raw_parts(test_array_start.cast::(), num_tests) 47 | }; 48 | tests 49 | } 50 | 51 | #[kernel_test] 52 | fn my_test_fn() { 53 | let x = 1; 54 | assert!(x == 1); 55 | } 56 | 57 | #[kernel_test] 58 | fn my_other_test() { 59 | let x = "hello"; 60 | assert!(x == "hello"); 61 | } 62 | 63 | proptest!( 64 | #[kernel_test] 65 | fn example_proptest_test(x in 0..100u8) { 66 | prop_assert!(x < 100); 67 | } 68 | ); 69 | -------------------------------------------------------------------------------- /kernel/src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod magic; 2 | 3 | pub(crate) use test_macro::kernel_test; 4 | 5 | pub(crate) fn run_test_suite() { 6 | magic::run_tests_from_linker(); 7 | } 8 | -------------------------------------------------------------------------------- /kernel/src/tick.rs: -------------------------------------------------------------------------------- 1 | //! Global tick system that runs every `TICK_HZ` times per second. 2 | 3 | use alloc::boxed::Box; 4 | use alloc::collections::VecDeque; 5 | 6 | use crate::hpet::Milliseconds; 7 | use crate::interrupts::{InterruptVector, ReservedInterruptVector}; 8 | use crate::sync::SpinLock; 9 | use crate::{apic, hpet, interrupts, ioapic, sched}; 10 | 11 | /// Frequency of the global tick system. 12 | const TICK_HZ: u64 = 20; 13 | 14 | const TICK_MILLIS: Milliseconds = Milliseconds::new(1000 / TICK_HZ); 15 | 16 | /// Global list of timers 17 | static TIMERS: SpinLock> = SpinLock::new(VecDeque::new()); 18 | 19 | #[allow(clippy::assertions_on_constants)] 20 | pub(crate) fn global_init() { 21 | assert!( 22 | 1000 % TICK_HZ == 0, 23 | "TICK_HZ must be a divisor of 1000 so we can evenly divide milliseconds into ticks" 24 | ); 25 | 26 | hpet::enable_periodic_timer_handler( 27 | 0, 28 | tick_broadcast_handler, 29 | ioapic::IOAPICIRQNumber::Tick, 30 | hpet::HPETTimerNumber::Tick, 31 | TICK_MILLIS, 32 | ); 33 | } 34 | 35 | pub(crate) fn per_cpu_init() { 36 | interrupts::install_interrupt_reserved_vector( 37 | ReservedInterruptVector::CPUTick, 38 | 0, 39 | cpu_tick_handler, 40 | ); 41 | } 42 | 43 | /// Handler for tick from the HPET. Broadcasts to all CPUs. 44 | fn tick_broadcast_handler(_vector: InterruptVector, _handler_id: interrupts::InterruptHandlerID) { 45 | // Iterate through all timers and fire off + remove ones that expired. 46 | TIMERS.lock().retain_mut(|timer| { 47 | if timer.expiration <= hpet::elapsed_milliseconds() { 48 | (timer.callback)(); 49 | false 50 | } else { 51 | true 52 | } 53 | }); 54 | 55 | // Send a tick to all CPUs 56 | apic::send_ipi_all_cpus(InterruptVector(ReservedInterruptVector::CPUTick as u8)); 57 | } 58 | 59 | fn cpu_tick_handler(_vector: InterruptVector, _handler_id: interrupts::InterruptHandlerID) { 60 | // Let the scheduler do accounting 61 | sched::scheduler_tick(TICK_MILLIS); 62 | } 63 | 64 | struct Timer { 65 | /// Expiration time in milliseconds since boot. 66 | expiration: Milliseconds, 67 | 68 | /// Callback to call when the timer expires. This function is called in an 69 | /// interrupt context, so it must be fast and it must not sleep, block, or 70 | /// take spin locks that shouldn't be taken in an interrupt context! 71 | /// 72 | /// TODO: Implement something akin to linux softirq so we can be more 73 | /// flexible with our timers. 74 | callback: Box, 75 | } 76 | 77 | /// Adds a timer to be called after the global milliseconds since boot reaches 78 | /// the given number of milliseconds. 79 | pub(crate) fn add_timer(expiration: Milliseconds, callback: F) 80 | where 81 | F: FnMut() + Send + 'static, 82 | { 83 | let mut timers = TIMERS.lock(); 84 | let timer = Timer { 85 | expiration, 86 | callback: Box::new(callback), 87 | }; 88 | timers.push_back(timer); 89 | } 90 | 91 | /// Adds a timer to be called after the given number of milliseconds. 92 | pub(crate) fn add_relative_timer(timeout: Milliseconds, callback: F) 93 | where 94 | F: FnMut() + Send + 'static, 95 | { 96 | let current_millis = hpet::elapsed_milliseconds(); 97 | let expiration = current_millis + timeout; 98 | add_timer(expiration, callback); 99 | } 100 | -------------------------------------------------------------------------------- /kernel/src/transmute.rs: -------------------------------------------------------------------------------- 1 | //! Utilities to cast between types and bytes. 2 | 3 | use core::marker::PhantomData; 4 | use core::ops::{Deref, DerefMut}; 5 | 6 | use zerocopy::{AsBytes, FromBytes, LayoutVerified}; 7 | 8 | /// Casts a slice of bytes to a reference of the given type. 9 | pub(crate) fn try_cast_bytes_ref(bytes: &[u8]) -> Option<&T> { 10 | Some(LayoutVerified::<_, T>::new_from_prefix(bytes)?.0.into_ref()) 11 | } 12 | 13 | /// Casts a slice of bytes to a reference of the given type, starting at the 14 | /// given offset. 15 | pub(crate) fn try_cast_bytes_ref_offset(bytes: &[u8], offset: usize) -> Option<&T> { 16 | let bytes = bytes.get(offset..)?; 17 | try_cast_bytes_ref(bytes) 18 | } 19 | 20 | /// Casts a slice of bytes to a mutable reference of the given type. 21 | pub(crate) fn try_cast_bytes_ref_mut(bytes: &mut [u8]) -> Option<&mut T> { 22 | Some(LayoutVerified::<_, T>::new_from_prefix(bytes)?.0.into_mut()) 23 | } 24 | 25 | /// Casts a slice of bytes to a mutable reference of the given type, starting at 26 | /// the given offset. 27 | pub(crate) fn try_cast_bytes_ref_mut_offset( 28 | bytes: &mut [u8], 29 | offset: usize, 30 | ) -> Option<&mut T> { 31 | let bytes = bytes.get_mut(offset..)?; 32 | try_cast_bytes_ref_mut(bytes) 33 | } 34 | 35 | pub(crate) fn try_write_bytes_offset( 36 | bytes: &mut [u8], 37 | offset: usize, 38 | value: T, 39 | ) -> Option<()> { 40 | let val_ref = try_cast_bytes_ref_mut_offset(bytes, offset)?; 41 | *val_ref = value; 42 | Some(()) 43 | } 44 | 45 | /// Wrapper around a buffer `B` that interprets the underlying bytes as a given 46 | /// type. 47 | #[derive(Debug)] 48 | pub(crate) struct TransmuteView { 49 | buffer: B, 50 | _phantom: PhantomData, 51 | } 52 | 53 | impl TransmuteView { 54 | pub(crate) fn buffer(&self) -> &B { 55 | &self.buffer 56 | } 57 | } 58 | 59 | impl, T: FromBytes> TransmuteView { 60 | pub(crate) fn new(buffer: B) -> Option { 61 | // Assert that the conversion works. Ideally we could just store the 62 | // LayoutVerified here, but we can't do that _and_ store the buffer 63 | // itself because it makes the borrow checker unhappy. 64 | let _: &T = try_cast_bytes_ref(buffer.as_ref())?; 65 | 66 | Some(Self { 67 | buffer, 68 | _phantom: PhantomData, 69 | }) 70 | } 71 | } 72 | 73 | impl, T: FromBytes> Deref for TransmuteView { 74 | type Target = T; 75 | 76 | fn deref(&self) -> &Self::Target { 77 | // Invariant: the cast is supposed to be infallible because we checked 78 | // it in the constructor. 79 | try_cast_bytes_ref(self.buffer.as_ref()) 80 | .expect("INTERNAL ERROR: cast is supposed to be infallible") 81 | } 82 | } 83 | 84 | impl + AsMut<[u8]>, T: FromBytes + AsBytes> DerefMut for TransmuteView { 85 | fn deref_mut(&mut self) -> &mut Self::Target { 86 | // Invariant: the cast is supposed to be infallible because we checked 87 | // it in the constructor. 88 | try_cast_bytes_ref_mut(self.buffer.as_mut()) 89 | .expect("INTERNAL ERROR: cast is supposed to be infallible") 90 | } 91 | } 92 | 93 | impl AsRef for TransmuteView 94 | where 95 | T: FromBytes, 96 | B: AsRef<[u8]>, 97 | ::Target: AsRef, 98 | { 99 | #[allow(clippy::explicit_deref_methods)] 100 | fn as_ref(&self) -> &T { 101 | self.deref() 102 | } 103 | } 104 | 105 | impl + AsMut<[u8]>, T: FromBytes + AsBytes> AsMut for TransmuteView 106 | where 107 | ::Target: AsMut, 108 | { 109 | #[allow(clippy::explicit_deref_methods)] 110 | fn as_mut(&mut self) -> &mut T { 111 | self.deref_mut() 112 | } 113 | } 114 | 115 | /// Wrapper around a buffer `B` that interprets the underlying bytes as a 116 | /// collection of the given type. Supports indexing into this collection by a 117 | /// given offset. 118 | #[derive(Debug)] 119 | pub(crate) struct TransmuteCollection { 120 | buffer: B, 121 | _phantom: PhantomData, 122 | } 123 | 124 | impl TransmuteCollection { 125 | pub(crate) fn new(buffer: B) -> Self { 126 | Self { 127 | buffer, 128 | _phantom: PhantomData, 129 | } 130 | } 131 | 132 | pub(crate) fn buffer(&self) -> &B { 133 | &self.buffer 134 | } 135 | } 136 | 137 | impl, T: FromBytes> TransmuteCollection { 138 | pub(crate) fn get(&self, offset: usize) -> Option<&T> { 139 | try_cast_bytes_ref_offset(self.buffer.as_ref(), offset) 140 | } 141 | } 142 | 143 | impl + AsMut<[u8]>, T: FromBytes + AsBytes> TransmuteCollection { 144 | pub(crate) fn get_mut(&mut self, offset: usize) -> Option<&mut T> { 145 | try_cast_bytes_ref_mut_offset(self.buffer.as_mut(), offset) 146 | } 147 | 148 | pub(crate) fn write(&mut self, offset: usize, value: T) -> Option<()> { 149 | try_write_bytes_offset(self.buffer.as_mut(), offset, value) 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /kernel/src/vfs/fs.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Debug; 2 | 3 | use alloc::boxed::Box; 4 | use alloc::string::String; 5 | use alloc::vec::Vec; 6 | 7 | use crate::sync::{Mutex, MutexGuard}; 8 | 9 | use super::FilePath; 10 | 11 | static MOUNTED_ROOT_FILE_SYSTEM: Mutex>> = Mutex::new(None); 12 | 13 | pub(crate) fn mount_root_filesystem(fs: Box) { 14 | MOUNTED_ROOT_FILE_SYSTEM.lock().replace(fs); 15 | } 16 | 17 | pub(crate) fn unmount_root_filesystem() { 18 | MOUNTED_ROOT_FILE_SYSTEM.lock().take(); 19 | } 20 | 21 | pub(crate) fn root_filesystem_lock( 22 | ) -> MutexGuard<'static, Option>> { 23 | MOUNTED_ROOT_FILE_SYSTEM.lock() 24 | } 25 | 26 | /// Top level VFS abstraction for an underlying filesystem. 27 | pub(crate) trait FileSystem { 28 | fn read_root(&mut self) -> Inode; 29 | 30 | fn traverse_path(&mut self, path: &FilePath) -> Option { 31 | let mut inode = self.read_root(); 32 | for component in &path.components { 33 | let InodeType::Directory(mut dir) = inode.inode_type else { 34 | log::warn!("traverse_path: expected directory but found {:?}", inode.inode_type); 35 | return None; 36 | }; 37 | 38 | let mut entry = dir 39 | .subdirectories() 40 | .into_iter() 41 | .find(|entry| entry.name() == component.as_str())?; 42 | inode = entry.get_inode(); 43 | } 44 | Some(inode) 45 | } 46 | } 47 | 48 | #[derive(Debug)] 49 | pub(crate) struct Inode { 50 | pub(crate) inode_type: InodeType, 51 | } 52 | 53 | #[derive(Debug)] 54 | pub(crate) enum InodeType { 55 | File(Box), 56 | Directory(Box), 57 | } 58 | 59 | pub(crate) trait FileInode: Debug { 60 | fn read(&mut self, buffer: &mut [u8], offset: usize) -> FileInodeReadResult; 61 | 62 | fn read_all(&mut self) -> Vec { 63 | let mut buffer = Vec::new(); 64 | let mut offset = 0; 65 | loop { 66 | let mut read_buffer = vec![0u8; 4096]; 67 | match self.read(&mut read_buffer, offset) { 68 | FileInodeReadResult::Success => { 69 | buffer.extend_from_slice(&read_buffer); 70 | offset += read_buffer.len(); 71 | } 72 | FileInodeReadResult::Done { bytes_read } => { 73 | buffer.extend_from_slice(&read_buffer[..bytes_read]); 74 | break; 75 | } 76 | } 77 | } 78 | buffer 79 | } 80 | 81 | fn write(&mut self, _data: &[u8]) -> bool { 82 | false 83 | } 84 | } 85 | 86 | pub(crate) enum FileInodeReadResult { 87 | /// Success means entire buffer was filled. 88 | Success, 89 | 90 | /// Done means the file has been fully read. Not all bytes may have been 91 | /// read, so we return how many were read. 92 | Done { bytes_read: usize }, 93 | } 94 | 95 | pub(crate) trait DirectoryInode: Debug { 96 | // TODO: Return an iterator instead of a Vec (probably a dyn for some 97 | // iterator type to avoid an impl in the return position). 98 | fn subdirectories(&mut self) -> Vec>; 99 | 100 | fn create_file(&mut self, _name: &str) -> Option> { 101 | log::warn!("create_file: not implemented for {:?}", self); 102 | None 103 | } 104 | } 105 | 106 | pub(crate) trait DirectoryEntry: Debug { 107 | fn name(&self) -> String; 108 | fn entry_type(&self) -> DirectoryEntryType; 109 | fn get_inode(&mut self) -> Inode; 110 | } 111 | 112 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 113 | pub(crate) enum DirectoryEntryType { 114 | File, 115 | Directory, 116 | } 117 | 118 | pub(crate) fn get_path_inode(path: &FilePath) -> Result { 119 | let mut lock = root_filesystem_lock(); 120 | let Some(filesystem) = lock.as_mut() else { 121 | return Err(String::from("No filesystem mounted. Run 'mount ' first.")); 122 | }; 123 | if !path.absolute { 124 | return Err(format!("Path must be absolute. Got {path}")); 125 | } 126 | 127 | let Some(inode) = filesystem.traverse_path(path) else { 128 | return Err(format!("No such file or directory: {path}")); 129 | }; 130 | Ok(inode) 131 | } 132 | -------------------------------------------------------------------------------- /kernel/src/vfs/mod.rs: -------------------------------------------------------------------------------- 1 | mod fs; 2 | mod path; 3 | 4 | pub(crate) use fs::*; 5 | pub(crate) use path::*; 6 | -------------------------------------------------------------------------------- /kernel/src/vfs/path.rs: -------------------------------------------------------------------------------- 1 | use core::str::FromStr; 2 | 3 | use alloc::fmt; 4 | use alloc::string::{String, ToString}; 5 | use alloc::vec::Vec; 6 | 7 | /// A path in the file system. 8 | #[derive(Debug, Clone)] 9 | pub(crate) struct FilePath { 10 | /// An absolute path starts from the root directory. 11 | pub(crate) absolute: bool, 12 | 13 | /// Components of a path not including separators (the `/` character). 14 | pub(crate) components: Vec, 15 | } 16 | 17 | impl FilePath { 18 | pub(crate) fn split_dirname_filename(&self) -> Option<(Self, FilePathComponent)> { 19 | let (filename, parent) = self.components.split_last()?; 20 | let parent_path = Self { 21 | absolute: self.absolute, 22 | components: parent.to_vec(), 23 | }; 24 | Some((parent_path, filename.clone())) 25 | } 26 | 27 | pub(crate) fn as_string(&self) -> String { 28 | let mut s = String::new(); 29 | if self.absolute { 30 | s.push('/'); 31 | } 32 | s.push_str( 33 | &self 34 | .components 35 | .iter() 36 | .map(FilePathComponent::as_str) 37 | .collect::>() 38 | .join("/"), 39 | ); 40 | s 41 | } 42 | } 43 | 44 | /// A component of a file path. Notably, this cannot include the `/` character, 45 | /// and is non-empty. 46 | #[derive(Debug, Clone)] 47 | pub(crate) struct FilePathComponent(String); 48 | 49 | impl FilePathComponent { 50 | fn new(s: &str) -> Option { 51 | assert!( 52 | !s.contains('/'), 53 | "constructed FilePathComponent with '/': {s}" 54 | ); 55 | if s.is_empty() { 56 | None 57 | } else { 58 | Some(Self(s.to_string())) 59 | } 60 | } 61 | 62 | pub(crate) fn as_str(&self) -> &str { 63 | &self.0 64 | } 65 | } 66 | 67 | impl fmt::Display for FilePathComponent { 68 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 69 | write!(f, "{}", self.0)?; 70 | Ok(()) 71 | } 72 | } 73 | 74 | impl FilePath { 75 | pub(crate) fn parse(s: &str) -> Option { 76 | let absolute = s.starts_with('/'); 77 | let components: Vec = s 78 | .split('/') 79 | .filter(|s| !s.is_empty()) 80 | .filter_map(FilePathComponent::new) 81 | .collect(); 82 | if !absolute && components.is_empty() { 83 | None 84 | } else { 85 | Some(Self { 86 | absolute, 87 | components, 88 | }) 89 | } 90 | } 91 | } 92 | 93 | impl FromStr for FilePath { 94 | type Err = &'static str; 95 | 96 | fn from_str(s: &str) -> Result { 97 | Self::parse(s).ok_or("file path is empty") 98 | } 99 | } 100 | 101 | impl fmt::Display for FilePath { 102 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 103 | write!(f, "{}", self.as_string()) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /kernel/src/virtio/device.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | use core::cmp::min; 3 | 4 | use bitflags::Flags; 5 | 6 | use crate::apic::ProcessorID; 7 | use crate::barrier::barrier; 8 | use crate::interrupts; 9 | use crate::interrupts::{InterruptHandler, InterruptHandlerID}; 10 | 11 | use super::config::{VirtIOConfigStatus, VirtIODeviceConfig}; 12 | use super::features::{Features, ReservedFeatureBits}; 13 | use super::queue::{ 14 | VirtQueue, VirtQueueAvailRing, VirtQueueDescriptorTable, VirtQueueIndex, VirtQueueUsedRing, 15 | }; 16 | 17 | #[derive(Debug)] 18 | pub(super) struct VirtIOInitializedDevice 19 | where 20 | F: Flags, 21 | { 22 | pub(super) config: VirtIODeviceConfig, 23 | pub(super) _features: Features, 24 | } 25 | 26 | impl VirtIOInitializedDevice 27 | where 28 | F: Flags, 29 | { 30 | /// See "3 General Initialization And Device Operation" and "4.1.5 31 | /// PCI-specific Initialization And Device Operation" 32 | pub(super) fn new( 33 | device_config: VirtIODeviceConfig, 34 | negotiate_device_bits: impl FnOnce(&mut F), 35 | max_virtqueues: u16, 36 | ) -> (Self, Vec) { 37 | let config = device_config.common_virtio_config(); 38 | 39 | // Reset the VirtIO device by writing 0 to the status register (see 40 | // 4.1.4.3.1 Device Requirements: Common configuration structure layout) 41 | let mut status = VirtIOConfigStatus::new(); 42 | config.device_status().write(status); 43 | barrier(); 44 | 45 | // Set the ACKNOWLEDGE status bit to indicate that the driver knows 46 | // that the device is present. 47 | status.set_acknowledge(true); 48 | config.device_status().write(status); 49 | barrier(); 50 | 51 | // Set the DRIVER status bit to indicate that the driver is ready to 52 | // drive the device. 53 | status.set_driver(true); 54 | config.device_status().write(status); 55 | barrier(); 56 | 57 | // Feature negotiation. There are up to 128 feature bits, and 58 | // the feature registers are 32 bits wide, so we use the feature 59 | // selection registers 4 times to select features. 60 | let mut features = device_config.get_device_features::(); 61 | 62 | // Disable some features 63 | // 64 | // TODO: Record that we did this in the virtqueues so they know to set 65 | // `used_event` or not in the avail ring, instead of just assuming that 66 | // we did this. 67 | features.negotiate_reserved_bits(|bits| { 68 | // Disable VIRTIO_F_EVENT_IDX so we don't need to mess with `used_event` 69 | // in avail ring. 70 | bits.remove(ReservedFeatureBits::EVENT_IDX); 71 | 72 | // Disable VIRTIO_F_NOTIFICATION_DATA so we don't need to deal with 73 | // extra offset information when notifying device of new avail ring 74 | // entries. 75 | bits.remove(ReservedFeatureBits::NOTIFICATION_DATA); 76 | 77 | // We don't use NOTIF_CONFIG_DATA 78 | bits.remove(ReservedFeatureBits::NOTIF_CONFIG_DATA); 79 | }); 80 | 81 | // Write the features we want to enable 82 | features.negotiate_device_bits(negotiate_device_bits); 83 | device_config.set_driver_features(&features); 84 | 85 | // Set the FEATURES_OK status bit to indicate that the driver has 86 | // written the feature bits. 87 | status.set_features_ok(true); 88 | config.device_status().write(status); 89 | 90 | // Re-read the status to ensure that the FEATURES_OK bit is still set. 91 | status = config.device_status().read(); 92 | assert!(status.features_ok(), "failed to set FEATURES_OK status bit"); 93 | 94 | // Initialize virtqueues 95 | let num_queues = config.num_queues().read(); 96 | assert!( 97 | num_queues > 0, 98 | "number of queues in a VirtIO device must be greater than 0" 99 | ); 100 | 101 | let num_queues = min(num_queues, max_virtqueues); 102 | let mut virtqueues = Vec::with_capacity(num_queues as usize); 103 | for i in 0..num_queues { 104 | let idx = VirtQueueIndex(i); 105 | config.queue_select().write(idx); 106 | 107 | let queue_size = config.queue_size().read(); 108 | assert!( 109 | queue_size > 0, 110 | "queue size for queue {i} must be greater than 0" 111 | ); 112 | 113 | let descriptors = unsafe { 114 | VirtQueueDescriptorTable::allocate(queue_size) 115 | .expect("failed to allocate driver ring buffer") 116 | }; 117 | config.queue_desc().write(descriptors.physical_address()); 118 | 119 | let avail_ring = unsafe { 120 | VirtQueueAvailRing::allocate(queue_size) 121 | .expect("failed to allocate driver ring buffer") 122 | }; 123 | config.queue_driver().write(avail_ring.physical_address()); 124 | 125 | let used_ring = unsafe { 126 | VirtQueueUsedRing::allocate(queue_size) 127 | .expect("failed to allocate driver ring buffer") 128 | }; 129 | config.queue_device().write(used_ring.physical_address()); 130 | 131 | // Enable the queue 132 | config.queue_enable().write(1); 133 | 134 | virtqueues.push(VirtQueue::new( 135 | idx, 136 | device_config.notify_config(), 137 | config.queue_notify_off().read(), 138 | descriptors, 139 | avail_ring, 140 | used_ring, 141 | )); 142 | } 143 | 144 | // TODO: Device-specific setup 145 | 146 | // Set the DRIVER_OK status bit to indicate that the driver 147 | // finished configuring the device. 148 | status.set_driver_ok(true); 149 | config.device_status().write(status); 150 | 151 | let device = Self { 152 | config: device_config, 153 | _features: features, 154 | }; 155 | (device, virtqueues) 156 | } 157 | 158 | pub(super) fn install_virtqueue_msix_handler( 159 | &mut self, 160 | virtqueue_index: VirtQueueIndex, 161 | msix_table_index: u16, 162 | processor_id: ProcessorID, 163 | handler_id: InterruptHandlerID, 164 | handler: InterruptHandler, 165 | ) { 166 | // Select the virtqueue and tell it to use the given MSI-X table index 167 | let common_config = self.config.common_virtio_config(); 168 | common_config.queue_select().write(virtqueue_index); 169 | common_config.queue_msix_vector().write(msix_table_index); 170 | 171 | // Read back the virtqueue's MSI-X table index to ensure that it was 172 | // set correctly 173 | assert_eq!( 174 | common_config.queue_msix_vector().read(), 175 | msix_table_index, 176 | "failed to set virtqueue's MSI-X table index" 177 | ); 178 | 179 | // Install the interrupt handler via MSI-X 180 | let msix = self 181 | .config 182 | .pci_type0_config() 183 | .msix_config() 184 | .expect("failed to get MSIX config for VirtIO device"); 185 | let interrupt_vector = interrupts::install_interrupt_next_vector(handler_id, handler); 186 | let mut table_entry = msix.table_entry(msix_table_index as usize); 187 | table_entry.set_interrupt_vector(processor_id, interrupt_vector); 188 | msix.enable(); 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /kernel/src/virtio/features.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::marker::PhantomData; 3 | 4 | use bitflags::{bitflags, Flags}; 5 | 6 | /// See "2.2 Feature Bits". The `F` type parameter is used to specify 7 | /// the device-specific feature bits via a `Flags` implementation. 8 | pub(super) struct Features { 9 | bits: u128, 10 | _phantom: PhantomData, 11 | } 12 | 13 | bitflags! { 14 | #[derive(Debug)] 15 | #[repr(transparent)] 16 | /// See "2.2 Feature Bits" and "6 Reserved Feature Bits" in the VirtIO spec. 17 | pub(super) struct ReservedFeatureBits: u128 { 18 | const INDIRECT_DESC = 1 << 28; 19 | const EVENT_IDX = 1 << 29; 20 | const VERSION_1 = 1 << 32; 21 | const ACCESS_PLATFORM = 1 << 33; 22 | const RING_PACKED = 1 << 34; 23 | const IN_ORDER = 1 << 35; 24 | const ORDER_PLATFORM = 1 << 36; 25 | const SR_IOV = 1 << 37; 26 | const NOTIFICATION_DATA = 1 << 38; 27 | const NOTIF_CONFIG_DATA = 1 << 39; 28 | const RING_RESET = 1 << 40; 29 | } 30 | } 31 | 32 | impl Features 33 | where 34 | F: Flags, 35 | { 36 | pub(super) fn new(bits: u128) -> Self { 37 | Self { 38 | bits, 39 | _phantom: PhantomData, 40 | } 41 | } 42 | 43 | pub(super) fn as_u128(&self) -> u128 { 44 | self.bits 45 | } 46 | 47 | pub(super) fn negotiate_reserved_bits(&mut self, f: impl FnOnce(&mut ReservedFeatureBits)) { 48 | self.negotiate_flags_impl(f); 49 | } 50 | 51 | pub(super) fn negotiate_device_bits(&mut self, f: impl FnOnce(&mut F)) 52 | where 53 | F: Flags, 54 | { 55 | self.negotiate_flags_impl(f); 56 | } 57 | 58 | // Separate function to make type parameter stuff clearer between reserved 59 | // and device flags. 60 | fn negotiate_flags_impl(&mut self, f: impl FnOnce(&mut I)) 61 | where 62 | I: Flags, 63 | { 64 | let mut bits = I::from_bits_retain(self.bits); 65 | f(&mut bits); 66 | self.bits = bits.bits(); 67 | } 68 | } 69 | 70 | impl fmt::Debug for Features 71 | where 72 | F: Flags + fmt::Debug, 73 | { 74 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 75 | f.debug_struct("Features") 76 | .field( 77 | "reserved", 78 | &ReservedFeatureBits::from_bits_truncate(self.bits), 79 | ) 80 | .field("device_specific", &F::from_bits_truncate(self.bits)) 81 | .finish() 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /kernel/src/virtio/mod.rs: -------------------------------------------------------------------------------- 1 | mod block; 2 | mod config; 3 | mod device; 4 | mod features; 5 | mod queue; 6 | mod rng; 7 | 8 | pub(crate) use block::*; 9 | pub(crate) use config::*; 10 | pub(crate) use rng::*; 11 | -------------------------------------------------------------------------------- /kernel/src/virtio/rng.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use bitflags::bitflags; 3 | use x86_64::PhysAddr; 4 | 5 | use crate::apic::ProcessorID; 6 | use crate::interrupts::{InterruptHandlerID, InterruptVector}; 7 | use crate::memory::{KernPhysAddr, PhysicalBuffer}; 8 | use crate::sync::{once_channel, OnceReceiver, OnceSender, SpinLock}; 9 | 10 | use super::device::VirtIOInitializedDevice; 11 | use super::queue::{ 12 | ChainedVirtQueueDescriptorElem, VirtQueue, VirtQueueData, VirtQueueDescriptorFlags, 13 | }; 14 | use super::VirtIODeviceConfig; 15 | 16 | static VIRTIO_RNG: SpinLock> = SpinLock::new(None); 17 | 18 | pub(crate) fn try_init_virtio_rng(device_config: VirtIODeviceConfig) { 19 | let device_id = device_config.pci_config().device_id(); 20 | if device_id.vendor_id() != 0x1af4 { 21 | return; 22 | } 23 | if !VirtIORNG::VENDOR_IDS.contains(&device_id.device_id()) { 24 | return; 25 | } 26 | 27 | let mut virtio_rng = VirtIORNG::from_device(device_config); 28 | virtio_rng.enable_msix(ProcessorID(0)); 29 | 30 | VIRTIO_RNG.lock_disable_interrupts().replace(virtio_rng); 31 | } 32 | 33 | pub(crate) fn request_random_numbers(num_bytes: u32) -> OnceReceiver> { 34 | let mut lock = VIRTIO_RNG.lock_disable_interrupts(); 35 | let rng = lock.as_mut().expect("VirtIO RNG not initialized"); 36 | rng.request_random_numbers(num_bytes) 37 | } 38 | 39 | /// See "5.4 Entropy Device" in the VirtIO spec. The virtio entropy device 40 | /// supplies high-quality randomness for guest use. 41 | #[derive(Debug)] 42 | struct VirtIORNG { 43 | initialized_device: VirtIOInitializedDevice, 44 | virtqueue: VirtQueueData, 45 | } 46 | 47 | impl VirtIORNG { 48 | const VENDOR_IDS: [u16; 2] = [0x1005, 0x1044]; 49 | 50 | fn from_device(device_config: VirtIODeviceConfig) -> Self { 51 | let device_id = device_config.pci_config().device_id().device_id(); 52 | assert!( 53 | Self::VENDOR_IDS.contains(&device_id), 54 | "VirtIORNG: Device ID mismatch, got {device_id}" 55 | ); 56 | 57 | let (initialized_device, virtqueues) = 58 | VirtIOInitializedDevice::new(device_config, |_: &mut RNGFeatureBits| {}, 1); 59 | 60 | let num_virtqueues = virtqueues.len(); 61 | let virtqueue = if let Ok::<[VirtQueue; 1], _>([virtqueue]) = virtqueues.try_into() { 62 | VirtQueueData::new(virtqueue) 63 | } else { 64 | panic!("VirtIORNG: expected exactly one virtqueue, got {num_virtqueues}"); 65 | }; 66 | 67 | Self { 68 | initialized_device, 69 | virtqueue, 70 | } 71 | } 72 | 73 | fn enable_msix(&mut self, processor_id: ProcessorID) { 74 | let msix_table_id = 0; 75 | let handler_id = 1; // If we had multiple RNG devices, we could disambiguate them 76 | self.initialized_device.install_virtqueue_msix_handler( 77 | self.virtqueue.index(), 78 | msix_table_id, 79 | processor_id, 80 | handler_id, 81 | virtio_rng_interrupt, 82 | ); 83 | } 84 | 85 | fn request_random_numbers(&mut self, num_bytes: u32) -> OnceReceiver> { 86 | assert!(num_bytes > 0, "cannot request zero bytes from RNG!"); 87 | 88 | // Create a descriptor chain for the buffer 89 | let buffer = PhysicalBuffer::allocate_zeroed(num_bytes as usize) 90 | .expect("failed to allocate rng buffer"); 91 | let desc = ChainedVirtQueueDescriptorElem { 92 | addr: PhysAddr::from(buffer.address()), 93 | len: num_bytes, 94 | flags: VirtQueueDescriptorFlags::new().with_device_write(true), 95 | }; 96 | let (sender, receiver) = once_channel(); 97 | let request = VirtIORNGRequest { 98 | _descriptor_buffer: buffer, 99 | sender, 100 | }; 101 | 102 | self.virtqueue.add_buffer(&[desc], request); 103 | self.virtqueue.notify_device(); 104 | 105 | receiver 106 | } 107 | } 108 | 109 | bitflags! { 110 | #[derive(Debug)] 111 | #[repr(transparent)] 112 | /// VirtIO RNG device has no device-specific feature bits. See "5.4.3 113 | /// Feature bits". 114 | struct RNGFeatureBits: u128 { 115 | } 116 | } 117 | 118 | #[derive(Debug)] 119 | struct VirtIORNGRequest { 120 | // Buffer is kept here so we can drop it when we are done with the request. 121 | _descriptor_buffer: PhysicalBuffer, 122 | sender: OnceSender>, 123 | } 124 | 125 | fn virtio_rng_interrupt(_vector: InterruptVector, _handler_id: InterruptHandlerID) { 126 | let mut lock = VIRTIO_RNG.lock_disable_interrupts(); 127 | let rng = lock.as_mut().expect("VirtIO RNG not initialized"); 128 | 129 | rng.virtqueue 130 | .process_new_entries(|used_entry, mut descriptor_chain, request| { 131 | let Some(request) = request else { 132 | log::warn!("VirtIO RNG: no request for used entry: {used_entry:#x?}"); 133 | return; 134 | }; 135 | 136 | let descriptor = descriptor_chain.next().expect("no descriptor in chain"); 137 | assert!( 138 | descriptor_chain.next().is_none(), 139 | "more than one descriptor in RNG chain" 140 | ); 141 | 142 | // The used entry should be using the exact same buffer we just 143 | // created, but let's pretend we didn't know that. 144 | let addr = KernPhysAddr::from(descriptor.addr); 145 | let buffer = unsafe { 146 | core::slice::from_raw_parts( 147 | addr.as_ptr::(), 148 | // NOTE: Using the length from the used entry, not the buffer 149 | // length, b/c the RNG device might not have written the whole 150 | // thing! 151 | used_entry.len as usize, 152 | ) 153 | }; 154 | 155 | request.sender.send(Box::from(buffer)); 156 | }); 157 | } 158 | -------------------------------------------------------------------------------- /kernel/x86_64-rust_os.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "x86_64-unknown-none", 3 | "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", 4 | "arch": "x86_64", 5 | "target-endian": "little", 6 | "target-pointer-width": "64", 7 | "target-c-int-width": "32", 8 | "os": "none", 9 | "executables": true, 10 | "linker-flavor": "ld.lld", 11 | "linker": "rust-lld", 12 | "panic-strategy": "abort", 13 | "disable-redzone": true, 14 | "features": "-mmx,-sse,+soft-float" 15 | } 16 | -------------------------------------------------------------------------------- /limine.cfg: -------------------------------------------------------------------------------- 1 | # Timeout in seconds that Limine will use before automatically booting. 2 | TIMEOUT=0 3 | 4 | # The entry name that will be displayed in the boot menu. 5 | :Rust OS 6 | # We use the Limine boot protocol. 7 | PROTOCOL=limine 8 | 9 | # Path to the kernel to boot. boot:/// represents the partition on which limine.cfg is located. 10 | KERNEL_PATH=boot:///kernel.elf 11 | 12 | # Load symbol table 13 | MODULE_PATH=boot:///kernel.symbols 14 | 15 | CMDLINE= 16 | -------------------------------------------------------------------------------- /old-code/grub-64-bit-boot/README.md: -------------------------------------------------------------------------------- 1 | # multiboot2/GRUB bootloader 2 | 3 | I replaced this with [limine](https://github.com/limine-bootloader/limine) in . 4 | -------------------------------------------------------------------------------- /old-code/grub-64-bit-boot/grub.cfg: -------------------------------------------------------------------------------- 1 | set timeout=0 2 | set default=0 3 | 4 | menuentry "my os" { 5 | multiboot2 /boot/kernel.bin 6 | boot 7 | } 8 | -------------------------------------------------------------------------------- /old-code/grub-64-bit-boot/linker.ld: -------------------------------------------------------------------------------- 1 | ENTRY(start) 2 | 3 | SECTIONS { 4 | . = 1M; 5 | 6 | .boot : 7 | { 8 | /* ensure that the multiboot header is at the beginning */ 9 | *(.multiboot_header) 10 | } 11 | 12 | .text : 13 | { 14 | *(.text .text.*) 15 | } 16 | 17 | .rodata : { 18 | *(.rodata .rodata.*) 19 | } 20 | 21 | .bss : { 22 | *(.bss .bss.*) 23 | } 24 | 25 | .data.rel.ro : { 26 | *(.data.rel.ro.local*) *(.data.rel.ro .data.rel.ro.*) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /old-code/grub-64-bit-boot/long_mode_init.asm: -------------------------------------------------------------------------------- 1 | global long_mode_start 2 | 3 | extern multiboot_header 4 | 5 | section .text 6 | bits 64 7 | long_mode_start: 8 | ; Store multiboot header location in rdi so it is passed as the first 9 | ; parameter to the kernel. (rdi stores the first arg to a function in 10 | ; the x86_64 ABI) 11 | xor rdi, rdi ; Clear rax so we can store pointer in eax and not worry about the upper 32 bits 12 | mov edi, [multiboot_header] 13 | 14 | ; Call the kernel main function 15 | extern kmain 16 | call kmain 17 | 18 | ; print `OKAY` to screen 19 | mov rax, 0x2f592f412f4b2f4f 20 | mov qword [0xb8000], rax 21 | hlt 22 | -------------------------------------------------------------------------------- /old-code/grub-64-bit-boot/multiboot_header.asm: -------------------------------------------------------------------------------- 1 | section .multiboot_header 2 | header_start: 3 | dd 0xe85250d6 ; magic number (multiboot 2) 4 | dd 0 ; architecture 0 (protected mode i386) 5 | dd header_end - header_start ; header length 6 | ; checksum 7 | dd 0x100000000 - (0xe85250d6 + 0 + (header_end - header_start)) 8 | 9 | ; insert optional multiboot tags here 10 | 11 | ; required end tag 12 | dw 0 ; type 13 | dw 0 ; flags 14 | dd 8 ; size 15 | header_end: 16 | -------------------------------------------------------------------------------- /scripts/create-boot-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Used by the Makefile to create a bootable disk image with the kernel and limine 4 | # 5 | # Adapted from https://github.com/limine-bootloader/limine-barebones/blob/trunk/GNUmakefile 6 | 7 | set -eu 8 | 9 | if [ $# -ne 3 ]; then 10 | echo "Usage: $0 " 11 | exit 1 12 | fi 13 | 14 | cd "$(dirname "$0")/.." 15 | 16 | kernel_hdd=$1 17 | kernel_binary=$2 18 | cmdline=$3 19 | 20 | limine_dir=$(nix build ./flake#limine --print-out-paths --no-link) 21 | 22 | echo "kernel_hdd: $kernel_hdd" 23 | echo "kernel_binary: $kernel_binary" 24 | echo "limine_dir: $limine_dir" 25 | echo "cmdline: $cmdline" 26 | 27 | rm -f "$kernel_hdd" 28 | dd if=/dev/zero bs=1M count=0 seek=64 of="$kernel_hdd" 29 | parted -s "$kernel_hdd" mklabel gpt 30 | parted -s "$kernel_hdd" mkpart ESP fat32 2048s 100% 31 | parted -s "$kernel_hdd" set 1 esp on 32 | "$limine_dir/limine-deploy" "$kernel_hdd" 33 | 34 | loopback_dev=$(sudo losetup -Pf --show "$kernel_hdd") 35 | echo "loopback_dev: $loopback_dev" 36 | sudo mkfs.fat -F 32 "${loopback_dev}p1" 37 | mkdir -p img_mount 38 | sudo mount "${loopback_dev}p1" img_mount 39 | sudo mkdir -p img_mount/EFI/BOOT 40 | sudo cp -v "$kernel_binary" img_mount/kernel.elf 41 | 42 | # Run nm to create a map of all the kernel's symbols. Useful for stack traces 43 | nm="nm" 44 | # N.B. llvm-nm should be better, but its --demangle doesn't actually work well for some reason 45 | # nm="$(rustc --print sysroot)/lib/rustlib/x86_64-unknown-linux-gnu/bin/llvm-nm" 46 | sudo "$nm" --demangle "$kernel_binary" | sudo tee img_mount/kernel.symbols > /dev/null 47 | 48 | sudo cp -v limine.cfg "$limine_dir/limine.sys" img_mount/ 49 | sudo sed -i "s|CMDLINE=|CMDLINE=$cmdline|" img_mount/limine.cfg 50 | sudo cp -v "$limine_dir/BOOTX64.EFI" img_mount/EFI/BOOT/ 51 | sync img_mount 52 | 53 | sudo umount img_mount 54 | sudo losetup -d "$loopback_dev" 55 | rm -rf img_mount 56 | -------------------------------------------------------------------------------- /scripts/create-test-ext2-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | if [ "$#" -ne 1 ]; then 6 | echo "Usage: $0 OUTPUT_FILE" >&2 7 | exit 1 8 | fi 9 | 10 | cd "$(dirname "$0")/.." 11 | 12 | output_file="$1" 13 | 14 | rm -f "$output_file" 15 | truncate -s 16M "$output_file" 16 | mkfs.ext2 "$output_file" 17 | 18 | # Mount the image to a temporary directory. 19 | mount_dir=/tmp/ext2-test-image-mount 20 | rm -rf "$mount_dir" 21 | mkdir -p "$mount_dir" 22 | sudo mount -oloop "$output_file" "$mount_dir" 23 | user=$USER 24 | sudo chown "$user" "$mount_dir" 25 | 26 | # Populate some files 27 | echo "Hello, world!" > "$mount_dir/hello.txt" 28 | mkdir "$mount_dir/nested-dir" 29 | echo "Nested hello" > "$mount_dir/nested-dir/nested.txt" 30 | 31 | # Include userspace files 32 | mkdir "$mount_dir/bin" 33 | 34 | make -C userspace/hello clean 35 | make -C userspace/hello 36 | cp userspace/hello/hello "$mount_dir/bin/hello" 37 | 38 | make -C userspace/primes clean 39 | make -C userspace/primes 40 | cp userspace/primes/primes "$mount_dir/bin/primes" 41 | 42 | # Unmount 43 | sudo exa --tree -lahgnimuU "$mount_dir" 44 | sync "$mount_dir" 45 | sudo umount "$mount_dir" 46 | rm -rf "$mount_dir" 47 | -------------------------------------------------------------------------------- /scripts/create-test-fat-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | if [ "$#" -ne 1 ]; then 6 | echo "Usage: $0 OUTPUT_FILE" >&2 7 | exit 1 8 | fi 9 | 10 | output_file="$1" 11 | 12 | dd if=/dev/zero of="$output_file" bs=1024 count=2000 13 | mformat -i "$output_file" 14 | 15 | long_txt=/tmp/long.txt 16 | rm -f "$long_txt" 17 | for i in $(seq 1 1000); do 18 | echo "Rust is cool!" >> "$long_txt" 19 | done 20 | mcopy -i "$output_file" "$long_txt" :: 21 | 22 | short_txt=/tmp/short.txt 23 | echo "Rust is cool!" > "$short_txt" 24 | mcopy -i "$output_file" "$short_txt" :: 25 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | { system ? builtins.currentSystem }: 2 | 3 | # Flake is in a subdirectory so we don't copy this entire repo to /nix/store. 4 | # See: 5 | # - https://github.com/NixOS/nix/issues/3121 6 | # - https://discourse.nixos.org/t/tweag-nix-dev-update-31/19481 7 | # - https://github.com/NixOS/nix/issues/4097 8 | (builtins.getFlake (toString ./flake)).devShells.${system}.default 9 | -------------------------------------------------------------------------------- /userspace/hello/.gitignore: -------------------------------------------------------------------------------- 1 | /hello 2 | *.o 3 | *.lst 4 | -------------------------------------------------------------------------------- /userspace/hello/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all 2 | all: hello 3 | 4 | hello: hello.o 5 | ld -o $@ $? 6 | 7 | hello.o: hello.asm 8 | nasm -f elf64 -F dwarf -g $? 9 | 10 | .PHONY: clean 11 | clean: 12 | rm -f hello hello.o 13 | -------------------------------------------------------------------------------- /userspace/hello/hello.asm: -------------------------------------------------------------------------------- 1 | global _start 2 | 3 | section .text 4 | 5 | _start: 6 | int3 ; Test interrupts 7 | 8 | mov rdi, 1 ; print( 9 | mov rsi, msg ; "Hello, world!\n", 10 | mov rdx, msglen ; sizeof("Hello, world!\n") 11 | syscall ; ); 12 | 13 | mov rdi, 0 ; exit( 14 | mov rsi, 0 ; EXIT_SUCCESS 15 | syscall ; ); 16 | 17 | section .rodata 18 | msg: db "Hello, world!", 10 19 | msglen: equ $ - msg 20 | -------------------------------------------------------------------------------- /userspace/primes/.gitignore: -------------------------------------------------------------------------------- 1 | /primes 2 | *.o 3 | -------------------------------------------------------------------------------- /userspace/primes/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all 2 | all: primes 3 | 4 | primes: runtime.o primes.o 5 | ld -static -z noexecstack -o $@ $? 6 | 7 | primes.o: primes.c 8 | gcc -nostdlib -g -O0 -fno-stack-protector -o $@ -c $< 9 | 10 | runtime: runtime.o 11 | ld -o $@ $? 12 | 13 | runtime.o: runtime.asm 14 | nasm -f elf64 -F dwarf -g $? 15 | 16 | .PHONY: clean 17 | clean: 18 | rm -f primes *.o 19 | -------------------------------------------------------------------------------- /userspace/primes/primes.c: -------------------------------------------------------------------------------- 1 | extern void syscall_print(char *str, int len); 2 | extern void syscall_exit(int exit_code); 3 | 4 | int is_prime(int x) { 5 | for (int i = 2; i < x; i++) { 6 | if (x % i == 0) { 7 | return 0; 8 | } 9 | } 10 | return 1; 11 | } 12 | 13 | int naive_nth_prime(int n) { 14 | int i = 2; 15 | int count = 0; 16 | while (count < n) { 17 | if (is_prime(i)) { 18 | count++; 19 | } 20 | i++; 21 | } 22 | return i; 23 | } 24 | 25 | int _strlen(char *str) { 26 | int len = 0; 27 | while (*str != '\0') { 28 | len++; 29 | str++; 30 | } 31 | return len; 32 | } 33 | 34 | char *_strcpy(char *dest, char *src) { 35 | int len = 0; 36 | while (*src != '\0') { 37 | *dest = *src; 38 | len++; 39 | src++; 40 | dest++; 41 | } 42 | *dest = '\0'; 43 | return dest; 44 | } 45 | 46 | void int_to_str(int x, char buffer[]) { 47 | // First dump the number into the buffer in reverse order 48 | int i = 0; 49 | while (x > 0) { 50 | buffer[i] = (x % 10) + '0'; 51 | x /= 10; 52 | i++; 53 | } 54 | buffer[i] = '\0'; 55 | 56 | // Now reverse the buffer 57 | int j = i - 1; 58 | i = 0; 59 | while (i < j) { 60 | char tmp = buffer[i]; 61 | buffer[i] = buffer[j]; 62 | buffer[j] = tmp; 63 | i++; 64 | j--; 65 | } 66 | } 67 | 68 | int str_to_int(char *str) { 69 | int x = 0; 70 | while (*str != '\0') { 71 | x *= 10; 72 | 73 | if (*str < '0' || *str > '9') { 74 | char *err = "Error: non-digit character in integer string\n"; 75 | syscall_print(err, _strlen(err)); 76 | syscall_exit(1); 77 | } 78 | 79 | x += *str - '0'; 80 | str++; 81 | } 82 | return x; 83 | } 84 | 85 | int main(int argc, char *argv[]) { 86 | // First argument is the index of the prime to find 87 | if (argc != 2) { 88 | char *usage = "Usage: primes \n"; 89 | syscall_print(usage, _strlen(usage)); 90 | syscall_exit(1); 91 | } 92 | char *n_str = argv[1]; 93 | int n = str_to_int(n_str); 94 | 95 | // Perform computation 96 | int nth_prime = naive_nth_prime(n); 97 | 98 | // Construct output message 99 | char buffer[100] = "The "; 100 | char *ptr = buffer + _strlen(buffer); 101 | ptr = _strcpy(ptr, n_str); 102 | ptr = _strcpy(ptr, "th prime is: "); 103 | int_to_str(nth_prime, ptr); 104 | 105 | syscall_print((char *)buffer, _strlen(buffer)); 106 | 107 | return 0; 108 | } 109 | -------------------------------------------------------------------------------- /userspace/primes/runtime.asm: -------------------------------------------------------------------------------- 1 | section .text 2 | 3 | extern main 4 | 5 | global _start: 6 | 7 | ;; See "3.4 Process Initialization" in the System V AMD64 ABI spec, and 8 | ;; https://lwn.net/Articles/631631/ for a an explanation of what the stack looks 9 | ;; like. 10 | _start: 11 | ; Pop argc off the stack and into rdi (first argument to main) 12 | pop rdi 13 | 14 | ; Get argv from stack and put into rsi 15 | mov rsi, rsp 16 | 17 | ; Call main 18 | call main 19 | 20 | ; Exit the program with the exit code from main (return value is in rax) 21 | mov rdi, rax 22 | call syscall_exit 23 | 24 | global syscall_print 25 | 26 | ;; Calls the print syscall. String to be printed is in rdi, length of string is rsi. 27 | syscall_print: 28 | ; Shift args into correct registers 29 | mov rdx, rsi ; length of string 30 | mov rsi, rdi ; string to print 31 | mov rdi, 1 ; print syscall 32 | syscall 33 | ret 34 | 35 | global syscall_exit 36 | 37 | ;; Exit code is in rdi 38 | syscall_exit: 39 | ; Shift args into correct registers 40 | mov rsi, rdi ; exit code 41 | mov rdi, 0 ; exit syscall 42 | syscall 43 | --------------------------------------------------------------------------------