├── .cargo └── config ├── .gitignore ├── Cargo.toml ├── Makefile ├── README.md ├── boot └── x86_64 │ ├── boot.asm │ ├── grub.cfg │ └── linker.ld ├── kernel ├── .cargo │ └── config ├── Cargo.toml └── src │ ├── buddy_alloc.rs │ ├── elf.rs │ ├── fat16.rs │ ├── frame_alloc.rs │ ├── gdt.rs │ ├── global_alloc.rs │ ├── interrupts.rs │ ├── lib.rs │ ├── mem.rs │ ├── port.rs │ ├── scheduler.rs │ ├── serial_port.rs │ ├── syscalls.rs │ └── vga_buffer.rs ├── runner ├── .cargo │ └── config ├── Cargo.toml └── src │ └── main.rs ├── userspace ├── Cargo.toml └── src │ ├── boot.rs │ └── lib.rs └── x86_64-rust_os.json /.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "x86_64-rust_os.json" 3 | 4 | [target.'cfg(target_os = "none")'] 5 | runner = "bootimage runner" 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | .idea 4 | Cargo.lock 5 | .gdb_history 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "kernel", 4 | "runner", 5 | "userspace" 6 | ] 7 | 8 | [profile.dev] 9 | panic = "abort" 10 | 11 | [profile.release] 12 | panic = "abort" 13 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | arch ?= x86_64 2 | kernel := target/kernel-$(arch).bin 3 | iso := target/rust-os-$(arch).iso 4 | 5 | linker_script := boot/$(arch)/linker.ld 6 | ld_mapfile := target/linker.map 7 | grub_cfg := boot/$(arch)/grub.cfg 8 | assembly_source_files := $(wildcard boot/$(arch)/*.asm) 9 | assembly_object_files := $(patsubst boot/$(arch)/%.asm, target/arch/$(arch)/%.o, $(assembly_source_files)) 10 | rust_os := target/x86_64-rust_os/release/librust_os.a 11 | userspace_src := userspace/src 12 | ubin1 := target/x86_64-rust_os/release/boot 13 | disk := target/disk.img 14 | 15 | .PHONY: all clean run debug iso 16 | 17 | all: $(kernel) 18 | 19 | clean: 20 | @rm -rf target 21 | 22 | test: 23 | @sed -Ei 's/^(crate-type = ).*/\1["lib"]/g' kernel/Cargo.toml 24 | @cargo xtest -p rust-os-runner --bin rust-os-runner 25 | @sed -Ei 's/^(crate-type = ).*/\1["staticlib"]/g' kernel/Cargo.toml 26 | 27 | $(disk): $(ubin1) 28 | @dd if=/dev/zero of=$(disk) bs=1000 count=100000 29 | @mkfs.fat -F 16 $(disk) 30 | @mcopy -o -i target/disk.img $(ubin1) ::/boot 31 | @mmd -i target/disk.img /dir1 32 | @mmd -i target/disk.img /dir2 33 | @mmd -i target/disk.img /dir3 34 | @mmd -i target/disk.img /dir1/sub1 35 | @mmd -i target/disk.img /dir1/sub2 36 | @sh -c "echo 'hello from nikos' | mcopy -o -i target/disk.img - ::/hi.txt" 37 | @sh -c "echo 'this file nested af' | mcopy -o -i target/disk.img - ::/dir1/sub1/nested.txt" 38 | 39 | run: $(iso) $(disk) 40 | @qemu-system-x86_64 -m size=8000 -serial stdio --no-reboot -cdrom $(iso) -drive file=$(disk),media=disk,format=raw,bus=0,unit=0 -boot d -display gtk,zoom-to-fit=on 41 | 42 | debug: $(iso) $(disk) 43 | @qemu-system-x86_64 -m size=8000 -monitor stdio -d int --no-reboot -s -S -cdrom $(iso) -drive file=$(disk),media=disk,format=raw,bus=0,unit=0 -boot d -display gtk,zoom-to-fit=on 44 | 45 | iso: $(iso) 46 | 47 | $(iso): $(kernel) $(grub_cfg) 48 | @mkdir -p target/isofiles/boot/grub 49 | @cp $(kernel) target/isofiles/boot/kernel.bin 50 | @cp $(grub_cfg) target/isofiles/boot/grub 51 | @grub-mkrescue -o $(iso) target/isofiles # 2> /dev/null 52 | @rm -r target/isofiles 53 | 54 | $(kernel): $(rust_os) $(assembly_object_files) $(linker_script) 55 | @mkdir -p target 56 | @ld -z noreloc-overflow -n -T $(linker_script) -o $(kernel) -Map=$(ld_mapfile) $(assembly_object_files) $(rust_os) 57 | 58 | # compile assembly files 59 | target/arch/$(arch)/%.o: boot/$(arch)/%.asm 60 | @mkdir -p $(shell dirname $@) 61 | @nasm -felf64 $< -o $@ 62 | 63 | # compile userspace programs 64 | $(ubin1): FORCE 65 | @cargo rustc -p userspace --bin boot -Z build-std=core,alloc --release -- --emit=obj -C relocation-model=static -C target-feature=+crt-static 66 | 67 | # compile rust OS 68 | $(rust_os): FORCE 69 | @cargo build -Z build-std=core,alloc -Z build-std-features=compiler-builtins-mem -p rust-os --release 70 | 71 | FORCE: ; 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rust-os 2 | 3 | A very secure OS in Rust! Guaranteed to be unhackable due to no networking. Sometimes it crashes but what can you do. 4 | 5 | ## Features 6 | 7 | ### Memory management 8 | Utilizes a buddy allocator, allowing the kernel to dynamically allocate memory after initializing (`buddy_alloc.rs`)! 9 | 10 | Memory is divided into huge blocks (as large as the memory areas), and whenever a block of size `x` is allocated, we divide the current smallest block into two buddy blocks recursively until we have a block just large enough for `x` to fit. Then we return that block's virtual address. 11 | 12 | Similarly, when two buddy blocks are freed, they are united into a larger block of double size. 13 | 14 | Before the buddy allocator is initialized, a frame allocator is used (`frame_alloc.rs`) which doesn't reclaim the freed pages. That's mostly to hold the data structures (ie. vectors) that the buddy allocator needs. 15 | 16 | The global allocator is declared in `global_alloc.rs` and allows for switching between the two implementations above. 17 | 18 | ### Virtual memory 19 | 20 | A recursive page table is used to map physical to virtual memory (`mem.rs`). Each process has its own page table and is mapped by default to 0x400000 like the 32 bit processes of old. The higher half of the kernel starting at 0xC0000000 maps the entire physical memory (4 GiB). 21 | 22 | ### Multiprocessing 23 | 24 | The Programmable Interrupt Timer is used with default settings to switch to the next task for preemptive multitasking. That means that around 18 times a second, an interrupt fires and the kernel switches tasks in a round-robin fashion. The context is saved and the context of the next process is restored, then the processor `iretq`s to change to usermode (`scheduler.rs`). Right now executables simply live in the kernel itself (`userspace.rs`) until a filesystem exists and are mapped to 0x400000 to be executed in usermode. 25 | 26 | ### User interaction 27 | 28 | Stuff written on the keyboard causes an IRQ which is caught by the kernel (`interrupts.rs`). Currently the key pressed is simply written to the screen, so there's no real user interaction. 29 | 30 | ### System calls 31 | 32 | System calls are supported using the fast syscall mechanism (`syscall` opcode). When called, the handler saves most registers (except floating-point stuff etc.), allocates a stack and uses that for executing the syscall. After that it restores registers and returns to userspace via `sysretq` (`syscalls.rs`). 33 | 34 | ### Faults / interrupts 35 | 36 | An interrupt descriptor table is used to handle different kinds of interrupts / faults (`interrupts.rs`). Those that can be ignored are, while more serious ones (page faults, double faults, GPFs) cause a hang. 37 | 38 | ### Filesystem 39 | TODO 40 | 41 | ## How to run 42 | 43 | Need to install some stuff first: 44 | ``` 45 | sudo apt install grub-pc-bin xorriso nasm qemu-system 46 | rustup toolchain install nightly 47 | rustup default nightly 48 | ``` 49 | 50 | `make run` 51 | Might break between Rust toolchains :( 52 | Last tested with `rustc 1.50.0-nightly (1700ca07c 2020-12-08)` 53 | -------------------------------------------------------------------------------- /boot/x86_64/boot.asm: -------------------------------------------------------------------------------- 1 | extern ua64_mode_start 2 | 3 | global _start 4 | 5 | section .multiboot_header 6 | header_start: 7 | dd 0xe85250d6 ; multiboot 2 magic number 8 | dd 0 ; architecture 0 (protected mode i386) 9 | dd header_end - header_start ; header length 10 | dd 0x100000000 - (0xe85250d6 + 0 + (header_end - header_start)) ; checksum 11 | ; end tag 12 | dw 0 ; type 13 | dw 0 ; flags 14 | dd 8 ; size 15 | header_end: 16 | 17 | bits 32 18 | 19 | _start: 20 | mov esp, _stack_top_low 21 | push ebx 22 | call _multiboot_check 23 | call _cpuid_check 24 | call _long_mode_check 25 | call _setup_page_table 26 | call _enable_paging 27 | mov eax, _gdt64_pointer_low 28 | lgdt [eax] 29 | pop ebx 30 | call _ua64_mode_entry 31 | 32 | _boot_error: 33 | mov dword [0xb8000], 0x4f524f45 34 | mov dword [0xb8004], 0x4f3a4f52 35 | mov dword [0xb8008], 0x4f204f20 36 | mov byte [0xb800a], al 37 | hlt 38 | 39 | _multiboot_check: 40 | ; check if we were loaded by Multiboot compliant bootloader 41 | cmp eax, 0x36d76289 42 | jne .no_multiboot 43 | ret 44 | .no_multiboot: 45 | mov al, '0' 46 | jmp _boot_error 47 | 48 | _cpuid_check: 49 | ; Check if CPUID is supported by attempting to flip the ID bit (bit 21) 50 | ; in the FLAGS register. If we can flip it, CPUID is available. 51 | 52 | ; Copy FLAGS in to EAX via stack 53 | pushfd 54 | pop eax 55 | 56 | ; Copy to ECX as well for comparing later on 57 | mov ecx, eax 58 | 59 | ; Flip the ID bit 60 | xor eax, 1 << 21 61 | 62 | ; Copy EAX to FLAGS via the stack 63 | push eax 64 | popfd 65 | 66 | ; Copy FLAGS back to EAX (with the flipped bit if CPUID is supported) 67 | pushfd 68 | pop eax 69 | 70 | ; Restore FLAGS from the old version stored in ECX (i.e. flipping the 71 | ; ID bit back if it was ever flipped). 72 | push ecx 73 | popfd 74 | 75 | ; Compare EAX and ECX. If they are equal then that means the bit 76 | ; wasn't flipped, and CPUID isn't supported. 77 | cmp eax, ecx 78 | je .no_cpuid 79 | ret 80 | .no_cpuid: 81 | mov al, "1" 82 | jmp _boot_error 83 | 84 | _long_mode_check: 85 | ; check if processor supports CPUID 86 | check_long_mode: 87 | ; test if extended processor info in available 88 | mov eax, 0x80000000 ; implicit argument for cpuid 89 | cpuid ; get highest supported argument 90 | cmp eax, 0x80000001 ; it needs to be at least 0x80000001 91 | jb .no_long_mode ; if it's less, the CPU is too old for long mode 92 | 93 | ; use extended info to test if long mode is available 94 | mov eax, 0x80000001 ; argument for extended processor info 95 | cpuid ; returns various feature bits in ecx and edx 96 | test edx, 1 << 29 ; test if the LM-bit is set in the D-register 97 | jz .no_long_mode ; If it's not set, there is no long mode 98 | ret 99 | .no_long_mode: 100 | mov al, '2' 101 | jmp _boot_error 102 | 103 | _setup_page_table: 104 | ; map first P4 entry to P3 table 105 | mov eax, _p3_table_low 106 | or eax, 7 ; present + writable + user 107 | mov [_p4_table_low], eax 108 | 109 | ; map first P3 entry to P2 table 110 | mov eax, _p2_table_0_low 111 | or eax, 3 ; present + writable 112 | mov [_p3_table_low], eax 113 | mov [_p3_table_low + 24], eax 114 | add eax, 4096 115 | mov [_p3_table_low + 32], eax 116 | add eax, 4096 117 | mov [_p3_table_low + 40], eax 118 | add eax, 4096 119 | mov [_p3_table_low + 48], eax 120 | 121 | ; map each P2 entry to a huge 2MiB page 122 | mov ecx, 0 ; counter variable 123 | 124 | .map_p2_table: 125 | ; map ecx-th P2 entry to a huge page that starts at address 2MiB*ecx 126 | mov eax, 0x200000 ; 2MiB 127 | mul ecx ; start address of ecx-th page 128 | or eax, 0x83 ; present + writable + huge 129 | mov esi, ecx 130 | shl esi, 3 131 | lea edi, [_p2_table_0_low] 132 | add esi, edi 133 | mov dword [esi], eax ; map ecx-th entry 134 | inc ecx ; increase counter 135 | cmp ecx, 2048 ; if counter == 512, the whole P2 table is mapped 136 | jne .map_p2_table ; else map the next entry 137 | 138 | ret 139 | 140 | _enable_paging: 141 | ; load P4 to cr3 register (cpu uses this to access the P4 table) 142 | mov eax, _p4_table_low 143 | mov cr3, eax 144 | xor eax, eax 145 | mov eax, cr3 146 | 147 | ; enable PAE-flag in cr4 (Physical Address Extension) 148 | mov eax, cr4 149 | or eax, 1 << 5 150 | mov cr4, eax 151 | 152 | ; set the long mode bit in the EFER MSR (model specific register) 153 | ; also enable System Call Extensions (SCE) to be able to use the syscall opcode 154 | mov ecx, 0xC0000080 155 | rdmsr 156 | or eax, 1 157 | or eax, 1 << 8 158 | wrmsr 159 | 160 | ; enable paging in the cr0 register 161 | mov eax, cr0 162 | or eax, 1 << 31 163 | mov cr0, eax 164 | 165 | ret 166 | 167 | _ua64_mode_entry: 168 | mov edx, 0xC0000000 169 | add esp, edx 170 | mov eax, _ua64_mode_entry_high 171 | jmp eax 172 | _ua64_mode_entry_high: 173 | mov dword [_p3_table], 0 174 | mov edi, ebx 175 | jmp _gdt64_code_off:ua64_mode_start 176 | 177 | section .rodata 178 | gdt64: 179 | dq 0 ; zero entry 180 | _gdt64_code_off: equ $ - gdt64 ; new 181 | dq (1<<43) | (1<<44) | (1<<47) | (1<<53) ; code segment 182 | _gdt64_pointer: 183 | dw $ - gdt64 - 1 184 | dq gdt64 185 | _gdt64_pointer_low: equ _gdt64_pointer - 0xC0000000 186 | 187 | section .bss 188 | align 4096 189 | _p4_table: 190 | resb 4096 191 | _p3_table: 192 | resb 4096 193 | _p2_table_0: 194 | resb 4096 195 | _p2_table_1: 196 | resb 4096 197 | _p2_table_2: 198 | resb 4096 199 | _p2_table_3: 200 | resb 4096 201 | _stack_bottom: 202 | resb 1024*40 203 | _stack_top: 204 | 205 | _p4_table_low: equ _p4_table - 0xC0000000 206 | _p3_table_low: equ _p3_table - 0xC0000000 207 | _p2_table_0_low: equ _p2_table_0 - 0xC0000000 208 | _p2_table_1_low: equ _p2_table_1 - 0xC0000000 209 | _p2_table_2_low: equ _p2_table_2 - 0xC0000000 210 | _p2_table_3_low: equ _p2_table_3 - 0xC0000000 211 | _stack_top_low: equ _stack_top - 0xC0000000 212 | -------------------------------------------------------------------------------- /boot/x86_64/grub.cfg: -------------------------------------------------------------------------------- 1 | set timeout=0 2 | set default=0 3 | 4 | menuentry "rust_os" { 5 | multiboot2 /boot/kernel.bin 6 | boot 7 | } 8 | -------------------------------------------------------------------------------- /boot/x86_64/linker.ld: -------------------------------------------------------------------------------- 1 | ENTRY(_start) 2 | 3 | SECTIONS { 4 | . = 0xC0100000; 5 | 6 | .boot : AT (ADDR (.boot) - 0xC0000000) 7 | { 8 | _bootstart = .; 9 | *(.multiboot_header) 10 | _bootend = .; 11 | } 12 | .rodata : AT (ADDR (.rodata) - 0xC0000000) 13 | { 14 | _rodatastart = .; 15 | *(.rodata*) 16 | _rodataend = .; 17 | } 18 | .text : AT (ADDR (.text) - 0xC0000000) 19 | { 20 | _textstart = .; 21 | *(.text) 22 | _textend = .; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /kernel/.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "../x86_64-rust_os.json" 3 | -------------------------------------------------------------------------------- /kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-os" 3 | version = "0.1.0" 4 | authors = ["Nikos Filippakis "] 5 | edition = "2018" 6 | 7 | [lib] 8 | crate-type = ["staticlib"] 9 | 10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 11 | [dependencies] 12 | spin = "0.5.2" 13 | uart_16550 = "0.2.10" 14 | x86_64 = "0.15.2" 15 | pc-keyboard = "0.3.1" 16 | multiboot2 = "^0.23.0" 17 | if_chain = "1.0.0" 18 | 19 | [dependencies.lazy_static] 20 | version = "1.4.0" 21 | features = ["spin_no_std"] 22 | 23 | [features] 24 | "no-panic-handler" = [] 25 | -------------------------------------------------------------------------------- /kernel/src/buddy_alloc.rs: -------------------------------------------------------------------------------- 1 | use crate::frame_alloc::FrameSingleAllocator; 2 | use crate::mem::PhysAddr; 3 | use crate::mem::VirtAddr; 4 | use crate::mem::FRAME_SIZE; 5 | // use crate::serial_println; 6 | use alloc::alloc::{GlobalAlloc, Layout}; 7 | use alloc::vec::Vec; 8 | use core::cmp; 9 | use core::fmt::Display; 10 | use core::ptr::null_mut; 11 | use spin::{Mutex, RwLock}; 12 | 13 | pub struct BuddyAllocatorManager { 14 | buddy_allocators: RwLock>>, 15 | } 16 | 17 | enum MemAreaRequest { 18 | Success((PhysAddr, PhysAddr)), 19 | SmallerThanReq((PhysAddr, PhysAddr), Option<(PhysAddr, PhysAddr)>), 20 | Fail, 21 | } 22 | 23 | impl BuddyAllocatorManager { 24 | pub fn new() -> BuddyAllocatorManager { 25 | // Create an empty buddy allocator list. At this point we're still using the dumb page allocator. 26 | let buddy_allocators = RwLock::new(Vec::with_capacity(32)); 27 | BuddyAllocatorManager { buddy_allocators } 28 | } 29 | 30 | pub fn add_memory_area(&self, start_addr: PhysAddr, end_addr: PhysAddr, block_size: u16) { 31 | // Add a new buddy allocator to the list with these specs. 32 | // As each one has some dynamic internal structures, we try to make it so that none of these 33 | // has to use itself when allocating these. 34 | let new_buddy_alloc = Mutex::new(BuddyAllocator::new(start_addr, end_addr, block_size)); 35 | // On creation the buddy allocator constructor might lock the list of buddy allocators 36 | // due to the fact that it allocates memory for its internal structures (except for the very 37 | // first buddy allocator which still uses the previous, dumb allocator). 38 | // Therefore we first create it and then we lock the list in order to push the new 39 | // buddy allocator to the list. 40 | self.buddy_allocators.write().push(new_buddy_alloc); 41 | } 42 | 43 | pub fn add_mem_area_with_size( 44 | &self, 45 | frame_alloc: &mut dyn FrameSingleAllocator, 46 | mem_size: usize, 47 | block_size: u16, 48 | ) -> bool { 49 | // Find and create a buddy allocator with the memory area requested. 50 | // We use get_mem_area_with_size first to find the memory area. 51 | // That function might instead find one (or two) smaller memory areas if the current 52 | // memory block that we're pulling memory from isn't big enough. 53 | // In that case add these smaller ones but keep looping until we get a memory block 54 | // as big as the one requested. 55 | // If we run out of memory, we simply return false. 56 | loop { 57 | match Self::get_mem_area_with_size(frame_alloc, mem_size) { 58 | // Success! Found a memory area big enough for our purposes. 59 | MemAreaRequest::Success((mem_start, mem_end)) => { 60 | // serial_println!( 61 | // "* Adding requested mem area to BuddyAlloc: {} to {} ({})", 62 | // mem_start, 63 | // mem_end, 64 | // mem_end.addr() - mem_start.addr() 65 | // ); 66 | self.add_memory_area(mem_start, mem_end, block_size); 67 | return true; 68 | } 69 | // Found one or two smaller memory areas instead, insert them and keep looking. 70 | MemAreaRequest::SmallerThanReq((mem_start, mem_end), second_area) => { 71 | self.add_memory_area(mem_start, mem_end, block_size); 72 | // serial_println!( 73 | // "* Adding smaller mem area to BuddyAlloc: {} to {} ({})", 74 | // mem_start, 75 | // mem_end, 76 | // mem_end.addr() - mem_start.addr() 77 | // ); 78 | if let Some((mem_start, mem_end)) = second_area { 79 | self.add_memory_area(mem_start, mem_end, block_size); 80 | // serial_println!( 81 | // "* Adding smaller mem area to BuddyAlloc: {} to {} ({})", 82 | // mem_start, 83 | // mem_end, 84 | // mem_end.addr() - mem_start.addr() 85 | // ); 86 | } 87 | } 88 | // Ran out of memory! Return false. 89 | MemAreaRequest::Fail => { 90 | // serial_println!( 91 | // "! Failed to find mem area big enough for BuddyAlloc: {}", 92 | // mem_size 93 | // ); 94 | return false; 95 | } 96 | } 97 | } 98 | } 99 | 100 | fn get_mem_area_with_size( 101 | frame_alloc: &mut dyn FrameSingleAllocator, 102 | mem_size: usize, 103 | ) -> MemAreaRequest { 104 | // This function tries to find a continuous memory area as big as the one requested by 105 | // pulling pages from the frame allocator. If it doesn't find an area big enough immediately, 106 | // it might return one or two smaller ones (so that we don't leave memory unused for no reason 107 | // if it doesn't fit our purposes). 108 | if let Some(first_page) = unsafe { frame_alloc.allocate() } { 109 | let first_addr = first_page.addr(); 110 | let mut last_addr = first_addr + FRAME_SIZE; 111 | // Keep pulling pages from the frame allocator until we hit the required memory size 112 | // or until we run out of memory or we get a block that is not after the previous block received. 113 | while let Some(next_page) = unsafe { frame_alloc.allocate() } { 114 | if next_page.addr() == last_addr { 115 | last_addr += FRAME_SIZE; 116 | } else { 117 | break; 118 | } 119 | if last_addr - first_addr == mem_size { 120 | break; 121 | } 122 | } 123 | // If we found a memory area big enough, great! Return it. 124 | if last_addr - first_addr == mem_size { 125 | MemAreaRequest::Success((PhysAddr::new(first_addr), PhysAddr::new(last_addr))) 126 | } else { 127 | // If we found a smaller memory block, get the largest piece that is a power of 2 128 | // and also greater than a page size. We can use that to make a smaller buddy allocator. 129 | if let Some(first_memarea) = Self::get_largest_page_multiple(first_addr, last_addr) 130 | { 131 | // Try to form a second such block with the left-over memory to not waste it. 132 | let second_memarea = 133 | Self::get_largest_page_multiple(first_memarea.1.addr(), last_addr); 134 | MemAreaRequest::SmallerThanReq(first_memarea, second_memarea) 135 | } else { 136 | // This should never happen but let's be safe 137 | MemAreaRequest::Fail 138 | } 139 | } 140 | } else { 141 | // Couldn't even pull a single page from the frame allocator :( 142 | MemAreaRequest::Fail 143 | } 144 | } 145 | 146 | fn get_largest_page_multiple(start: usize, end: usize) -> Option<(PhysAddr, PhysAddr)> { 147 | // Given a start and end address, try to find the largest memory size that can fit into that 148 | // area that is also a left shift of a FRAME_SIZE (ie. 4096, 8192, 16384 etc.) 149 | // We need this because our buddy allocator needs a memory area whose size is a power of 2 150 | // in order to be able to split it cleanly and efficiently. 151 | // Also, the smallest size of that memory area will be the FRAME_SIZE. 152 | let mem_len = end - start; 153 | if mem_len == 0 { 154 | None 155 | } else { 156 | // double page_mult while it still fits in this mem area 157 | let mut page_mult = FRAME_SIZE; 158 | while page_mult <= mem_len { 159 | page_mult <<= 1; 160 | } 161 | // we went over the limit so divide by two 162 | page_mult >>= 1; 163 | let start_addr = PhysAddr::new(start); 164 | Some((start_addr, start_addr.offset(page_mult))) 165 | } 166 | } 167 | } 168 | 169 | unsafe impl GlobalAlloc for BuddyAllocatorManager { 170 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 171 | // Loop through the list of buddy allocators until we can find one that can give us 172 | // the requested memory. 173 | let allocation = 174 | self.buddy_allocators 175 | .read() 176 | .iter() 177 | .enumerate() 178 | .find_map(|(_i, allocator)| { 179 | // for each allocator 180 | allocator.try_lock().and_then(|mut allocator| { 181 | allocator 182 | .alloc(layout.size(), layout.align()) 183 | .map(|allocation| { 184 | // try allocating until one succeeds and return this allocation 185 | // serial_println!( 186 | // " - BuddyAllocator #{} allocated {} bytes", 187 | // i, 188 | // layout.size() 189 | // ); 190 | // serial_println!("{}", *allocator); 191 | allocation 192 | }) 193 | }) 194 | }); 195 | // Convert physical address to virtual if we got an allocation, otherwise return null. 196 | allocation 197 | .and_then(|phys| phys.to_virt()) 198 | .map(|virt| virt.addr() as *mut u8) 199 | .unwrap_or(null_mut()) 200 | } 201 | 202 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 203 | let virt_addr = VirtAddr::new(ptr as usize); 204 | if let Some((phys_addr, _)) = virt_addr.to_phys() { 205 | for (_i, allocator_mtx) in self.buddy_allocators.read().iter().enumerate() { 206 | // for each allocator 207 | if let Some(mut allocator) = allocator_mtx.try_lock() { 208 | // find the one whose memory range contains this address 209 | if allocator.contains(phys_addr) { 210 | // deallocate using this allocator! 211 | allocator.dealloc(phys_addr, layout.size(), layout.align()); 212 | // serial_println!( 213 | // " - BuddyAllocator #{} de-allocated {} bytes", 214 | // i, 215 | // layout.size() 216 | // ); 217 | // serial_println!("{}", *allocator); 218 | return; 219 | } 220 | } 221 | } 222 | } 223 | // serial_println!( 224 | // "! Could not de-allocate virtual address: {} / Memory lost", 225 | // virt_addr 226 | // ); 227 | } 228 | } 229 | 230 | struct BuddyAllocator { 231 | start_addr: PhysAddr, // the first physical address that this struct manages 232 | end_addr: PhysAddr, // one byte after the last physical address that this struct manages 233 | num_levels: u8, // the number of non-leaf levels 234 | block_size: u16, // the size of blocks on the leaf level 235 | free_lists: Vec>, // the list of free blocks on each level 236 | } 237 | 238 | impl BuddyAllocator { 239 | fn new(start_addr: PhysAddr, end_addr: PhysAddr, block_size: u16) -> BuddyAllocator { 240 | // number of levels excluding the leaf level 241 | let mut num_levels: u8 = 0; 242 | while ((block_size as usize) << num_levels as usize) < end_addr.addr() - start_addr.addr() { 243 | num_levels += 1; 244 | } 245 | // vector of free lists 246 | let mut free_lists: Vec> = Vec::with_capacity((num_levels + 1) as usize); 247 | // Initialize each free list with a small capacity (in order to use the current allocator 248 | // at least for the first few items and not the one that will be in use when we're actually 249 | // using this as the allocator as this might lead to this allocator using itself and locking) 250 | for _ in 0..(num_levels + 1) { 251 | free_lists.push(Vec::with_capacity(4)); 252 | } 253 | // The top-most block is (the only) free for now! 254 | free_lists[0].push(0); 255 | // We need 1< bool { 266 | // whether a given physical address belongs to this allocator 267 | addr.addr() >= self.start_addr.addr() && addr.addr() < self.end_addr.addr() 268 | } 269 | 270 | fn max_size(&self) -> usize { 271 | // max size that can be supported by this buddy allocator 272 | (self.block_size as usize) << (self.num_levels as usize) 273 | } 274 | 275 | fn req_size_to_level(&self, size: usize) -> Option { 276 | // Find the level of this allocator than can accommodate the required memory size. 277 | let max_size = self.max_size(); 278 | if size > max_size { 279 | // can't allocate more than the maximum size for this allocator! 280 | None 281 | } else { 282 | // find the largest block level that can support this size 283 | let mut next_level = 1; 284 | while (max_size >> next_level) >= size { 285 | next_level += 1; 286 | } 287 | // ...but not larger than the max level! 288 | let req_level = cmp::min(next_level - 1, self.num_levels as usize); 289 | Some(req_level) 290 | } 291 | } 292 | 293 | fn alloc(&mut self, size: usize, alignment: usize) -> Option { 294 | // We should always be aligned due to how the buddy allocator works 295 | // (everything will be aligned to block_size bytes). 296 | // If we need in some case that we are aligned to a greater size, 297 | // allocate a memory block of (alignment) bytes. 298 | let size = cmp::max(size, alignment); 299 | // find which level of this allocator can accommodate this amount of memory (if any) 300 | self.req_size_to_level(size).and_then(|req_level| { 301 | // We can accommodate it! Now to check if we actually have / can make a free block 302 | // or we're too full. 303 | self.get_free_block(req_level).map(|block| { 304 | // We got a free block! 305 | // get_free_block gives us the index of the block in the given level 306 | // so we need to find the size of each block in that level and multiply by the index 307 | // to get the offset of the memory that was allocated. 308 | let offset = block as usize * (self.max_size() >> req_level as usize) as usize; 309 | // Add the base address of this buddy allocator's block and return 310 | PhysAddr::new(self.start_addr.addr() + offset) 311 | }) 312 | }) 313 | } 314 | 315 | fn dealloc(&mut self, addr: PhysAddr, size: usize, alignment: usize) { 316 | // As above, find which size was used for this allocation so that we can find the level 317 | // that gave us this memory block. 318 | let size = cmp::max(size, alignment); 319 | // find which level of this allocator was used for this memory request 320 | if let Some(req_level) = self.req_size_to_level(size) { 321 | // find size of each block at this level 322 | let level_block_size = self.max_size() >> req_level; 323 | // calculate which # block was just freed by using the start address and block size 324 | let block_num = 325 | ((addr.addr() - self.start_addr.addr()) as usize / level_block_size) as u32; 326 | // push freed block to the free list so we can reuse it 327 | self.free_lists[req_level].push(block_num); 328 | // try merging buddy blocks now that we might have some to merge 329 | self.merge_buddies(req_level, block_num); 330 | } 331 | } 332 | 333 | fn merge_buddies(&mut self, level: usize, block_num: u32) { 334 | // toggle last bit to get buddy block 335 | let buddy_block = block_num ^ 1; 336 | // if buddy block in free list 337 | if let Some(buddy_idx) = self.free_lists[level] 338 | .iter() 339 | .position(|blk| *blk == buddy_block) 340 | { 341 | // remove current block (in last place) 342 | self.free_lists[level].pop(); 343 | // remove buddy block 344 | self.free_lists[level].remove(buddy_idx); 345 | // add free block to free list 1 level above 346 | self.free_lists[level - 1].push(block_num / 2); 347 | // repeat the process! 348 | self.merge_buddies(level - 1, block_num / 2) 349 | } 350 | } 351 | 352 | fn get_free_block(&mut self, level: usize) -> Option { 353 | // Get a block from the free list at this level or split a block above and 354 | // return one of the splitted blocks. 355 | self.free_lists[level] 356 | .pop() 357 | .or_else(|| self.split_level(level)) 358 | } 359 | 360 | fn split_level(&mut self, level: usize) -> Option { 361 | // We reached the maximum level, we can't split anymore! We can't support this allocation. 362 | if level == 0 { 363 | None 364 | } else { 365 | self.get_free_block(level - 1).map(|block| { 366 | // Get a block from 1 level above us and split it. 367 | // We push the second of the splitted blocks to the current free list 368 | // and we return the other one as we now have a block for this allocation! 369 | self.free_lists[level].push(block * 2 + 1); 370 | block * 2 371 | }) 372 | } 373 | } 374 | } 375 | 376 | impl Display for BuddyAllocator { 377 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 378 | let mut res = writeln!( 379 | f, 380 | " Start: {:x?} / End: {:x?} / Levels: {} / Block size: {} / Max alloc: {}", 381 | self.start_addr, 382 | self.end_addr, 383 | self.num_levels + 1, 384 | self.block_size, 385 | (self.block_size as usize) << (self.num_levels as usize), 386 | ); 387 | res = res.and_then(|_| write!(f, " Free lists: ")); 388 | for i in 0usize..(self.num_levels as usize + 1) { 389 | res = res.and_then(|_| write!(f, "{} in L{} / ", self.free_lists[i].len(), i)); 390 | } 391 | res 392 | } 393 | } 394 | -------------------------------------------------------------------------------- /kernel/src/elf.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | use alloc::boxed::Box; 3 | use core::pin::Pin; 4 | use crate::{mem::VirtAddr, scheduler::Task, mem::PageTable, mem}; 5 | use core::convert::TryInto; 6 | use crate::serial_println; 7 | 8 | #[derive(Debug)] 9 | struct ProgramHeader { 10 | htype: u8, 11 | physical_offset: usize, 12 | load_address: VirtAddr, 13 | phys_size: usize, 14 | } 15 | 16 | pub struct Elf { 17 | data: Pin>, 18 | entry_point: VirtAddr, 19 | headers: Vec, 20 | } 21 | 22 | impl Into for Elf { 23 | fn into(self) -> Task { 24 | let mut task_pt = unsafe {PageTable::new()}; 25 | let phys_addr = unsafe {VirtAddr::new(self.data.as_ptr() as usize).to_phys().unwrap().0}; 26 | for header in self.headers.iter() { 27 | if header.htype != 1 { 28 | continue; 29 | } 30 | for page_idx in (0..header.phys_size).step_by(mem::FRAME_SIZE) { // map each page (frame) at a time for this header 31 | unsafe { 32 | let page_virt = header.load_address.offset(page_idx * mem::FRAME_SIZE); 33 | let page_phys = phys_addr.offset(header.physical_offset + page_idx * mem::FRAME_SIZE); 34 | serial_println!("ELF: Mapping {:x} to {:x}", page_virt.addr(), page_phys.addr()); 35 | task_pt.map_virt_to_phys( 36 | page_virt, // map the nth page corresponding to this header's loadable data 37 | page_phys, // locate the physical address of the prog data for this page 38 | mem::BIT_PRESENT | mem::BIT_USER); // mapped page is user accessible and present 39 | } 40 | } 41 | } 42 | 43 | let mut stack_space: Pin> = Pin::new(Box::new([0u8; mem::FRAME_SIZE])); // allocate some memory to use for the stack 44 | unsafe { 45 | let stack_space_phys = VirtAddr::new(stack_space.as_mut_ptr() as *const u8 as usize) 46 | .to_phys() 47 | .unwrap() 48 | .0; 49 | // take physical address of stack 50 | task_pt.map_virt_to_phys( 51 | mem::VirtAddr::new(0x800000), 52 | stack_space_phys, 53 | mem::BIT_PRESENT | mem::BIT_WRITABLE | mem::BIT_USER, 54 | ); // map the stack memory to 0x800000 55 | } 56 | Task::new( 57 | self.entry_point, 58 | mem::VirtAddr::new(0x801000), 59 | task_pt, 60 | self.data, 61 | stack_space, 62 | ) 63 | } 64 | } 65 | 66 | impl Elf { 67 | pub fn new(data: Vec) -> Self { 68 | let entry_point = VirtAddr::new(usize::from_le_bytes(data[24..32].try_into().unwrap())); 69 | let ph_off = usize::from_le_bytes(data[32..40].try_into().unwrap()); 70 | let ph_siz = u16::from_le_bytes(data[54..56].try_into().unwrap()) as usize; 71 | let ph_cnt = u16::from_le_bytes(data[56..58].try_into().unwrap()) as usize; 72 | 73 | let headers = (0..ph_cnt).map(|i| { 74 | let header_index = ph_off + i * ph_siz; 75 | let header = &data[header_index..header_index+ph_siz]; 76 | let htype = header[0] as u8; 77 | let physical_offset = usize::from_le_bytes(header[8..16].try_into().unwrap()); 78 | let load_address = VirtAddr::new(usize::from_le_bytes(header[16..24].try_into().unwrap())); 79 | let phys_size = usize::from_le_bytes(header[32..40].try_into().unwrap()); 80 | ProgramHeader { htype, physical_offset, load_address, phys_size } 81 | }).collect(); 82 | 83 | serial_println!("Elf headers: {:x?} EIP: {:x?}", headers, entry_point); 84 | 85 | Self { data: Pin::new(data.into_boxed_slice()), entry_point, headers } 86 | } 87 | } -------------------------------------------------------------------------------- /kernel/src/fat16.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::{self, Display, Debug}; 2 | use core::convert::TryInto; 3 | use alloc::vec::Vec; 4 | use crate::{port::Port, println}; 5 | const SECTOR_SIZE: usize = 512; 6 | pub const DIR_ENTRY_SIZE: usize = 32; 7 | 8 | pub struct SizedString([u8; N]); 9 | 10 | static IDE: IDE = IDE::new_primary_master(); 11 | 12 | impl SizedString { 13 | pub fn new(c: &[u8]) -> Self { 14 | Self(c.try_into().unwrap()) 15 | } 16 | } 17 | 18 | impl Display for SizedString { 19 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 20 | for c in self.0.iter() { 21 | write!(f, "{}", *c as char)?; 22 | } 23 | Ok(()) 24 | } 25 | } 26 | 27 | impl Debug for SizedString { 28 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 29 | Display::fmt(self, f) 30 | } 31 | } 32 | 33 | pub struct IDE { 34 | io_port: Port, 35 | sel_port: Port, 36 | err_io_port: Port, 37 | sec_count_port: Port, 38 | lba0: Port, 39 | lba1: Port, 40 | lba2: Port, 41 | ctl_port: Port, 42 | } 43 | 44 | impl IDE { 45 | pub const fn new_primary_master() -> IDE { 46 | IDE { // port numbers for primary IDE 47 | io_port: Port::::new(0x1F0), 48 | sel_port: Port::::new(0x1F6), 49 | err_io_port: Port::::new(0x1F1), 50 | sec_count_port: Port::::new(0x1F2), 51 | lba0: Port::::new(0x1F3), 52 | lba1: Port::::new(0x1F4), 53 | lba2: Port::::new(0x1F5), 54 | ctl_port: Port::::new(0x1F7), 55 | } 56 | } 57 | 58 | fn is_ready(&self) -> bool { 59 | let status = self.ctl_port.read(); 60 | if status & 0xA1 != 0 { // BSY or ERR or DF set 61 | false 62 | } else { 63 | status & 8 != 0 // DRQ set 64 | } 65 | } 66 | 67 | fn read_sectors(&self, lba: usize, cnt: usize, buf: &mut [u8]) { 68 | self.sel_port.write(0xE0 | ((lba >> 24) as u8 & 0xF)); // select master IDE 69 | self.err_io_port.write(0); // wait 70 | self.sec_count_port.write(cnt as u8); // read cnt sectors 71 | self.lba0.write(lba as u8); // write logical block address 72 | self.lba1.write((lba>>8) as u8); 73 | self.lba2.write((lba>>16) as u8); 74 | self.ctl_port.write(0x20); // read cmd 75 | 76 | 77 | for i in 0..cnt { 78 | while !self.is_ready() {} //wait for disk to be ready to transfer stuff 79 | 80 | for j in 0..SECTOR_SIZE/2 { 81 | let b = self.io_port.read(); // read 2 bytes of data 82 | buf[i*SECTOR_SIZE + j*2] = b as u8; 83 | buf[i*SECTOR_SIZE + j*2+1] = (b>>8) as u8; 84 | } 85 | 86 | for _ in 0..4 { 87 | self.ctl_port.read(); // wait a bit for status to be set 88 | } 89 | } 90 | 91 | self.is_ready(); 92 | } 93 | 94 | pub fn read(&self, address: usize, buf: &mut [u8]) { 95 | let mut v: Vec = Vec::new(); 96 | v.resize(buf.len() + SECTOR_SIZE*2, 0); 97 | 98 | let first_sector = address / SECTOR_SIZE; 99 | let read_sectors = v.len() / SECTOR_SIZE; 100 | let start_address = address % SECTOR_SIZE; 101 | 102 | self.read_sectors(first_sector, read_sectors, &mut v); 103 | 104 | buf.copy_from_slice(&v[start_address..(start_address+buf.len())]); 105 | } 106 | } 107 | 108 | #[allow(dead_code)] 109 | #[derive(Debug)] 110 | pub struct FAT16 { 111 | label: SizedString<11>, 112 | sector_size: u16, 113 | cluster_sectors: u8, 114 | reserved_sectors: u16, 115 | fat_cnt: u8, 116 | fat_size: u16, 117 | root_start: u16, 118 | root_entries: u16, 119 | data_start: usize, 120 | } 121 | 122 | impl FAT16 { 123 | pub fn new() -> FAT16 { 124 | let mut buf = [0u8; 512]; 125 | 126 | IDE.read(0, &mut buf); 127 | 128 | let sector_size = buf[11] as u16 + ((buf[12] as u16) << 8); 129 | let fat_cnt = buf[16]; 130 | let reserved_sectors = buf[14] as u16 + ((buf[15] as u16) << 8); 131 | let fat_size = buf[22] as u16 + ((buf[23] as u16) << 8); 132 | let root_start = fat_cnt as u16 * fat_size as u16 + reserved_sectors; 133 | let root_entries = buf[17] as u16 + ((buf[18] as u16) << 8); 134 | FAT16 { 135 | label: SizedString::<11>::new(&buf[43..54]), 136 | sector_size, 137 | cluster_sectors: buf[13], 138 | reserved_sectors, 139 | fat_cnt, 140 | fat_size, 141 | root_start, 142 | root_entries, 143 | data_start: root_start as usize * sector_size as usize + root_entries as usize * DIR_ENTRY_SIZE, 144 | } 145 | } 146 | 147 | pub fn root(&self) -> DirEntry { 148 | self.at(0).unwrap() 149 | } 150 | 151 | pub fn root_addr(&self) -> usize { 152 | self.root_start as usize * self.sector_size as usize 153 | } 154 | 155 | fn next_cluster(&self, cluster: u16) -> Option { 156 | let mut buf = [0u8; 2]; 157 | let fat_start = self.reserved_sectors as usize * self.sector_size as usize; 158 | let fat_offset = cluster as usize * 2; 159 | IDE.read(fat_start + fat_offset, &mut buf); 160 | let next_cluster = buf[0] as u16 + ((buf[1] as u16) << 8); 161 | // println!("{:x} {} NEXT IS {}", fat_start, cluster, next_cluster); 162 | if next_cluster >= 0xFFF8 { 163 | None 164 | } else { 165 | Some(next_cluster) 166 | } 167 | } 168 | 169 | fn cluster_bytes(&self) -> usize { 170 | self.cluster_sectors as usize * self.sector_size as usize 171 | } 172 | 173 | pub fn read_data(&self, d: &DirEntry) -> Vec { 174 | let mut buf = Vec::new(); 175 | let mut to_read = d.size as usize; 176 | buf.resize(to_read, 0); 177 | let mut idx = 0usize; 178 | let cluster_bytes = self.cluster_bytes(); 179 | 180 | let mut cluster = d.cluster; 181 | while let Some(cl) = cluster { 182 | IDE.read(self.cluster_addr(cl), &mut buf[idx..idx + cluster_bytes.min(to_read)]); 183 | 184 | if to_read <= cluster_bytes { 185 | break; 186 | } 187 | to_read -= cluster_bytes; 188 | idx += cluster_bytes; 189 | 190 | cluster = self.next_cluster(cl); 191 | } 192 | 193 | buf 194 | } 195 | 196 | pub fn at(&self, index: u16) -> Option { 197 | if index == 0 { 198 | Some(DirEntry{ 199 | name: SizedString::<11>::new("ROOT ".as_bytes()), 200 | attr: 0x10, 201 | cluster: None, 202 | size: self.root_entries as usize * DIR_ENTRY_SIZE, 203 | index: 0, 204 | }) 205 | } else { 206 | let addr = self.root_addr() + (index-1) as usize * DIR_ENTRY_SIZE; 207 | self.at_addr(addr) 208 | } 209 | } 210 | 211 | pub fn at_addr(&self, addr: usize) -> Option { 212 | let mut buf = [0u8; DIR_ENTRY_SIZE]; 213 | IDE.read(addr, &mut buf); 214 | if buf[0] == 0 { 215 | None 216 | } else { 217 | let idx = 1 + ((addr - self.root_addr()) / DIR_ENTRY_SIZE) as u16; // get index of dir entry struct from start of root dir 218 | Some(DirEntry::new(&buf, idx)) 219 | } 220 | } 221 | 222 | fn cluster_addr(&self, cluster: u16) -> usize { 223 | (cluster as usize - 2) * self.cluster_bytes() as usize + self.data_start 224 | } 225 | 226 | pub fn ls(&self, e: &DirEntry) -> DirIter { 227 | if e.index == 0 { 228 | DirIter(self.root_addr(), self) 229 | } else { 230 | DirIter(self.cluster_addr(e.cluster.unwrap()), self) 231 | } 232 | } 233 | } 234 | 235 | pub struct DirIter<'a>(usize, &'a FAT16); 236 | 237 | impl Iterator for DirIter<'_> { 238 | type Item = DirEntry; 239 | 240 | fn next(&mut self) -> Option { 241 | let item = self.1.at_addr(self.0); 242 | self.0 += DIR_ENTRY_SIZE; 243 | item 244 | } 245 | } 246 | 247 | #[derive(Debug)] 248 | pub struct DirEntry { 249 | pub name: SizedString<11>, 250 | attr: u8, 251 | cluster: Option, 252 | pub size: usize, 253 | pub index: u16, 254 | } 255 | 256 | impl DirEntry { 257 | pub fn new(b: &[u8; DIR_ENTRY_SIZE], index: u16) -> Self { 258 | Self { 259 | name: SizedString::<11>::new(&b[0..11]), 260 | attr: b[11], 261 | cluster: Some(b[26] as u16 + ((b[27] as u16) << 8)), 262 | size: u32::from_le_bytes(b[28..32].try_into().unwrap()) as usize, 263 | index, 264 | } 265 | } 266 | 267 | pub fn is_archive(&self) -> bool { 268 | self.attr & 0x20 != 0 269 | } 270 | 271 | pub fn is_dir(&self) -> bool { 272 | self.attr & 0x10 != 0 273 | } 274 | } 275 | 276 | pub fn load_main() -> Option> { 277 | let f = FAT16::new(); 278 | // println!("FAT16: {:x?}", f); 279 | for i in f.ls(&f.root()) { 280 | println!("{:?}", i); 281 | if i.is_archive() { 282 | let v = f.read_data(&i); 283 | // println!("contents {:?}", v.iter().take(20).collect::>()); 284 | if &i.name.0[0..4] == "BOOT".as_bytes() { 285 | return Some(v); 286 | } 287 | } 288 | } 289 | None 290 | } -------------------------------------------------------------------------------- /kernel/src/frame_alloc.rs: -------------------------------------------------------------------------------- 1 | use crate::mem::{PhysAddr, VirtAddr, FRAME_SIZE}; 2 | use crate::serial_println; 3 | use core::cmp::max; 4 | use core::slice::Iter; 5 | use multiboot2::BootInformation; 6 | use multiboot2::MemoryArea; 7 | 8 | pub static mut BOOTINFO_ALLOCATOR: Option = None; 9 | 10 | pub trait FrameSingleAllocator: Send { 11 | unsafe fn allocate(&mut self) -> Option; 12 | } 13 | 14 | pub struct SimpleAllocator { 15 | kernel_end_phys: usize, // end address of our kernel sections (don't write before this!) 16 | mem_areas: Iter<'static, MemoryArea>, // memory areas from multiboot 17 | cur_area: Option<(usize, usize)>, // currently used area's bounds 18 | next_page: Option, // physical address of last page returned 19 | } 20 | 21 | unsafe impl core::marker::Send for SimpleAllocator {} // shh it's ok pointers are thread-safe 22 | 23 | impl SimpleAllocator { 24 | pub unsafe fn init(boot_info: &'static BootInformation<'static>) { 25 | let kernel_end = boot_info.end_address(); 26 | let kernel_end_phys = VirtAddr::new(kernel_end).to_phys().unwrap().0.addr(); 27 | let mem_tag = boot_info 28 | .memory_map_tag() 29 | .expect("Must have memory map tag"); 30 | let mut alloc = SimpleAllocator { 31 | kernel_end_phys, 32 | mem_areas: mem_tag.memory_areas().iter(), 33 | cur_area: None, 34 | next_page: None, 35 | }; 36 | alloc.next_area(); 37 | 38 | BOOTINFO_ALLOCATOR.replace(alloc); 39 | } 40 | 41 | fn next_area(&mut self) -> Option<(usize, usize)> { 42 | self.cur_area = self.mem_areas.next().map(|mem_area| { 43 | // get base addr and length for current area 44 | let base_addr = mem_area.start_address() as usize; 45 | let area_len = mem_area.size() as usize; 46 | // start after kernel end 47 | let mem_start = max(base_addr, self.kernel_end_phys); 48 | let mem_end = base_addr + area_len; 49 | // memory start addr aligned with page size 50 | let start_addr = ((mem_start + FRAME_SIZE - 1) / FRAME_SIZE) * FRAME_SIZE; 51 | // memory end addr aligned with page size 52 | let end_addr = (mem_end / FRAME_SIZE) * FRAME_SIZE; 53 | serial_println!( 54 | "- FrameAlloc: New area: {:x} to {:x} ({})", 55 | start_addr, 56 | end_addr, 57 | end_addr - start_addr 58 | ); 59 | self.next_page = Some(start_addr); 60 | (start_addr, end_addr) 61 | }); 62 | 63 | self.cur_area 64 | } 65 | } 66 | 67 | impl FrameSingleAllocator for SimpleAllocator { 68 | unsafe fn allocate(&mut self) -> Option { 69 | // return a page from this area 70 | let frame = PhysAddr::new(self.next_page?); 71 | // get current area end addr if we still have an area left 72 | let (_, end_addr) = self.cur_area?; 73 | // increment addr to the next page 74 | *(self.next_page.as_mut()?) += FRAME_SIZE; 75 | if self.next_page? <= end_addr { 76 | Some(frame) 77 | } else { 78 | // end of the frame is beyond the area limits, go to next area and try again 79 | self.next_area()?; 80 | self.allocate() 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /kernel/src/gdt.rs: -------------------------------------------------------------------------------- 1 | use crate::println; 2 | use lazy_static::lazy_static; 3 | use x86_64::instructions::segmentation::{Segment, CS,DS}; 4 | use x86_64::instructions::tables::load_tss; 5 | use x86_64::structures::gdt::{ 6 | Descriptor, DescriptorFlags, GlobalDescriptorTable, SegmentSelector, 7 | }; 8 | use x86_64::structures::tss::TaskStateSegment; 9 | use x86_64::{PrivilegeLevel, VirtAddr}; 10 | 11 | pub const DOUBLE_FAULT_IST_INDEX: u8 = 0; 12 | const STACK_SIZE: usize = 0x2000; 13 | pub static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE]; 14 | pub static mut PRIV_TSS_STACK: [u8; STACK_SIZE] = [0; STACK_SIZE]; 15 | 16 | lazy_static! { 17 | static ref TSS: TaskStateSegment = { 18 | let mut tss = TaskStateSegment::new(); 19 | tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = { 20 | let stack_start = VirtAddr::from_ptr(unsafe { &STACK }); 21 | let stack_end = stack_start + STACK_SIZE as u64; 22 | stack_end 23 | }; 24 | tss.privilege_stack_table[0] = { 25 | let stack_start = VirtAddr::from_ptr(unsafe { &PRIV_TSS_STACK }); 26 | let stack_end = stack_start + STACK_SIZE as u64; 27 | stack_end 28 | }; 29 | tss 30 | }; 31 | } 32 | 33 | lazy_static! { 34 | static ref GDT: (GlobalDescriptorTable, [SegmentSelector; 5]) = { 35 | let mut gdt = GlobalDescriptorTable::new(); 36 | let kernel_data_flags = 37 | DescriptorFlags::USER_SEGMENT | DescriptorFlags::PRESENT | DescriptorFlags::WRITABLE; 38 | let code_sel = gdt.append(Descriptor::kernel_code_segment()); 39 | let data_sel = gdt.append(Descriptor::UserSegment(kernel_data_flags.bits())); 40 | let tss_sel = gdt.append(Descriptor::tss_segment(&TSS)); 41 | let user_data_sel = gdt.append(Descriptor::user_data_segment()); 42 | let user_code_sel = gdt.append(Descriptor::user_code_segment()); 43 | ( 44 | gdt, 45 | [code_sel, data_sel, tss_sel, user_data_sel, user_code_sel], 46 | ) 47 | }; 48 | } 49 | 50 | pub fn init_gdt() { 51 | GDT.0.load(); 52 | let stack = unsafe { &STACK as *const _ }; 53 | let user_stack = unsafe { &PRIV_TSS_STACK as *const _ }; 54 | println!( 55 | " - Loaded GDT: {:p} TSS: {:p} Stack {:p} User stack: {:p} CS segment: {} TSS segment: {}", 56 | &GDT.0 as *const _, &*TSS as *const _, stack, user_stack, GDT.1[0].0, GDT.1[1].0 57 | ); 58 | unsafe { 59 | CS::set_reg(GDT.1[0]); 60 | // set_cs(GDT.1[0]); 61 | // load_ds(GDT.1[1]); 62 | DS::set_reg(GDT.1[1]); 63 | load_tss(GDT.1[2]); 64 | } 65 | } 66 | 67 | #[inline(always)] 68 | pub unsafe fn set_usermode_segs() -> (u16, u16) { 69 | // set ds and tss, return cs and ds 70 | let (mut cs, mut ds) = (GDT.1[4], GDT.1[3]); 71 | cs.0 |= PrivilegeLevel::Ring3 as u16; 72 | ds.0 |= PrivilegeLevel::Ring3 as u16; 73 | // load_ds(ds); 74 | DS::set_reg(ds); 75 | (cs.0, ds.0) 76 | } 77 | -------------------------------------------------------------------------------- /kernel/src/global_alloc.rs: -------------------------------------------------------------------------------- 1 | use crate::buddy_alloc::BuddyAllocatorManager; 2 | use crate::frame_alloc::FrameSingleAllocator; 3 | use crate::mem::{PhysAddr, VirtAddr, FRAME_SIZE}; 4 | use crate::serial_println; 5 | use alloc::alloc::{GlobalAlloc, Layout}; 6 | use alloc::vec::Vec; 7 | use core::ptr::null_mut; 8 | use if_chain::if_chain; 9 | use lazy_static::lazy_static; 10 | use spin::{Mutex, RwLock}; 11 | 12 | struct AllocatorInfo { 13 | strategy: RwLock>, 14 | frame_allocator: Mutex>, 15 | free_frames: Mutex>>, 16 | } 17 | 18 | lazy_static! { 19 | static ref ALLOCATOR_INFO: AllocatorInfo = AllocatorInfo { 20 | strategy: RwLock::new(None), 21 | frame_allocator: Mutex::new(None), 22 | free_frames: Mutex::new(None), 23 | }; 24 | } 25 | 26 | pub struct Allocator; 27 | 28 | unsafe impl GlobalAlloc for Allocator { 29 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 30 | if_chain! { 31 | if let Some(ref strategy) = *ALLOCATOR_INFO.strategy.read(); 32 | then { 33 | return strategy.alloc(layout); 34 | } 35 | } 36 | if_chain! { 37 | // try locking the free_frames mutex (this locking fails when dealloc needs to allocate 38 | // more space for its Vec and calls this as it already holds this lock!) 39 | if let Some(ref mut guard) = ALLOCATOR_INFO.free_frames.try_lock(); 40 | // get as mutable 41 | if let Some(ref mut free) = guard.as_mut(); 42 | // get last page (if it exists) 43 | if let Some(page) = free.pop(); 44 | // if a page exists 45 | if let Some(virt) = page.to_virt(); 46 | // return the page 47 | then { 48 | serial_println!(" - GlobalAlloc: Reusing {:x}", virt.addr()); 49 | return virt.to_ref(); 50 | } 51 | } 52 | if_chain! { 53 | // lock the frame allocator 54 | if let Some(ref mut allocator) = ALLOCATOR_INFO.frame_allocator.lock().as_mut(); 55 | // get a physical page from it 56 | if let Some(page) = allocator.allocate(); 57 | // convert it to virtual (add 0xC0000000) 58 | if let Some(virt) = page.to_virt(); 59 | // return the page 60 | then { 61 | serial_println!(" - GlobalAlloc: Allocated {:x} {}", virt.addr(), layout.size()); 62 | return virt.to_ref(); 63 | } 64 | } 65 | null_mut() 66 | } 67 | 68 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 69 | if_chain! { 70 | if let Some(ref strategy) = *ALLOCATOR_INFO.strategy.read(); 71 | then { 72 | return strategy.dealloc(ptr, layout); 73 | } 74 | } 75 | if_chain! { 76 | // try converting the deallocated virtual page address to the physical address 77 | if let Some((phys_addr, _)) = VirtAddr::new(ptr as usize).to_phys(); 78 | // try locking the free frames list (this fails if we've already locked free_frames 79 | // for some reason, i.e. if we're in the middle of reallocating it due to a push to it) 80 | if let Some(ref mut guard) = ALLOCATOR_INFO.free_frames.try_lock(); 81 | // get as mutable 82 | if let Some(ref mut free) = guard.as_mut(); 83 | // add the physical address to the free frames list 84 | then { 85 | free.push(phys_addr); 86 | } 87 | } 88 | serial_println!(" - GlobalAlloc: Deallocated {:x}", ptr as usize); 89 | } 90 | } 91 | 92 | pub fn init_allocator_info(frame_alloc: &'static mut dyn FrameSingleAllocator) { 93 | // set the frame allocator as our current allocator 94 | ALLOCATOR_INFO.frame_allocator.lock().replace(frame_alloc); 95 | let old_free_frames = ALLOCATOR_INFO.free_frames.lock().take(); 96 | // avoid dropping this inside a lock so we don't trigger a free 97 | // while holding the lock 98 | drop(old_free_frames); 99 | ALLOCATOR_INFO 100 | .free_frames 101 | .lock() 102 | .replace(Vec::with_capacity(200)); 103 | } 104 | 105 | pub fn init_global_alloc(frame_alloc: &'static mut dyn FrameSingleAllocator) { 106 | let first_page = unsafe { frame_alloc.allocate().unwrap() }; 107 | init_allocator_info(frame_alloc); 108 | // create our buddy allocator manager (holds a list of buddy allocators for memory regions) 109 | let manager = BuddyAllocatorManager::new(); 110 | // Create a buddy allocator over a single page which will be provided by our old allocator. 111 | // This helps us have a single valid page from which our buddy allocator 112 | // will be able to give blocks away, as otherwise on its first allocation, the buddy allocator 113 | // would have to call itself in order to create its own internal structures 114 | // (ie. the free list for each level and the array that holds whether each block is split or not). 115 | // This way we have one buddy allocator with a single page, which will be used by the second 116 | // one which will be larger, which will then be used by a larger one until we can map most 117 | // of the memory. None of these allocators should therefore need to use itself in order to 118 | // allocate its internal structures which saves us some headaches. 119 | manager.add_memory_area(first_page, first_page.offset(FRAME_SIZE), 16); 120 | // Moment of truth! Start using our list of buddy allocators. 121 | ALLOCATOR_INFO.strategy.write().replace(manager); 122 | // Create a second, larger buddy allocator in our list which is supported by the first one, 123 | // as described above. 124 | let frame_alloc = ALLOCATOR_INFO.frame_allocator.lock().take().unwrap(); 125 | // Get our current buddy allocator 126 | ALLOCATOR_INFO 127 | .strategy 128 | .read() 129 | .as_ref() 130 | .map(|buddy_manager| { 131 | // Allocate increasingly large memory areas. 132 | // The previously created buddy allocator (which uses a single page) will be used to back 133 | // the first of these areas' internal structures to avoid the area having to use itself. 134 | // Then the first two areas will be used to support the third, etc. 135 | // Until we can support 1GiB buddy allocators (the final type) which need a big 136 | // amount of continuous backing memory (some MiB for the is_split bitmap plus 137 | // several Vecs for the free lists). 138 | buddy_manager.add_mem_area_with_size(frame_alloc, FRAME_SIZE * 8, 16); 139 | buddy_manager.add_mem_area_with_size(frame_alloc, FRAME_SIZE * 64, 16); 140 | buddy_manager.add_mem_area_with_size(frame_alloc, 1 << 24, 16); 141 | while buddy_manager.add_mem_area_with_size(frame_alloc, 1 << 30, 16) {} 142 | }); 143 | } 144 | -------------------------------------------------------------------------------- /kernel/src/interrupts.rs: -------------------------------------------------------------------------------- 1 | use core::arch::{asm, naked_asm}; 2 | use alloc::borrow::ToOwned; 3 | use alloc::vec::Vec; 4 | use crate::port::{end_of_interrupt, Port}; 5 | use crate::scheduler; 6 | use crate::syscalls; 7 | use crate::{print, println, serial_println}; 8 | use lazy_static::lazy_static; 9 | use spin::Mutex; 10 | 11 | // use x86_64::registers::Segment; 12 | use x86_64::registers::segmentation::Segment; 13 | use x86_64::registers::segmentation::CS; 14 | use x86_64::instructions::tables::{lidt, DescriptorTablePointer}; 15 | use x86_64::structures::gdt::SegmentSelector; 16 | use x86_64::structures::idt::InterruptStackFrame; 17 | 18 | use pc_keyboard::{layouts, DecodedKey, Keyboard, ScancodeSet1}; 19 | 20 | use core::mem::size_of; 21 | use x86_64::VirtAddr; 22 | 23 | type IDTHandler = extern "x86-interrupt" fn(); 24 | 25 | lazy_static! { 26 | static ref KEYS_BUF: Mutex> = 27 | Mutex::new(Vec::::new()); 28 | static ref KEYBOARD: Mutex> = 29 | Mutex::new(Keyboard::new(layouts::Us104Key, ScancodeSet1)); 30 | } 31 | 32 | macro_rules! irq_fn { 33 | ($f: ident, $i: literal, $e:expr) => { 34 | unsafe extern "x86-interrupt" fn $f(_stack_frame: &mut InterruptStackFrame) { 35 | asm!("cli"); 36 | $e(); 37 | end_of_interrupt($i); 38 | asm!("sti"); 39 | } 40 | } 41 | } 42 | 43 | extern "x86-interrupt" fn div_by_zero(stack_frame: &mut InterruptStackFrame) { 44 | println!(" !! div by zero {:?}", stack_frame); 45 | } 46 | 47 | extern "x86-interrupt" fn breakpoint(stack_frame: &mut InterruptStackFrame) { 48 | println!(" !! int3 {:?}", stack_frame); 49 | } 50 | 51 | extern "x86-interrupt" fn page_fault(stack_frame: &mut InterruptStackFrame, err_code: u64) { 52 | println!(" !! page fault @ {:x}! err code: {} {:?}", stack_frame.instruction_pointer.as_u64(), err_code, stack_frame); 53 | loop {} 54 | } 55 | 56 | extern "x86-interrupt" fn gpf(stack_frame: &mut InterruptStackFrame, err_code: u64) { 57 | println!(" !! gpf! err code: {} {:?}", err_code, stack_frame); 58 | loop {} 59 | } 60 | 61 | extern "x86-interrupt" fn double_fault(stack_frame: &mut InterruptStackFrame, err_code: u64) { 62 | println!(" !! double fault! err code: {} {:?}", err_code, stack_frame); 63 | loop {} 64 | } 65 | 66 | extern "x86-interrupt" fn ide(_stack_frame: &mut InterruptStackFrame) { 67 | serial_println!(" # IDE IRQ"); 68 | } 69 | 70 | // timer interrupt function to change contexts 71 | #[naked] 72 | unsafe extern "sysv64" fn timer(_stack_frame: &mut InterruptStackFrame) { 73 | naked_asm!("\ 74 | push r15; push r14; push r13; push r12; push r11; push r10; push r9;\ 75 | push r8; push rdi; push rsi; push rdx; push rcx; push rbx; push rax; push rbp;\ 76 | mov rdi, rsp // first arg of context switch is the context which is all the registers saved above 77 | sub rsp, 0x400 78 | jmp {context_switch} 79 | ", context_switch = sym scheduler::context_switch); 80 | } 81 | 82 | irq_fn!(keyboard, 33, || { 83 | let port: Port = Port::new(0x60); 84 | let scancode = port.read(); 85 | let mut keybd = KEYBOARD.lock(); 86 | if let Ok(Some(key_evt)) = keybd.add_byte(scancode) { 87 | if let Some(key) = keybd.process_keyevent(key_evt) { 88 | match key { 89 | DecodedKey::Unicode('\n') | DecodedKey::RawKey(pc_keyboard::KeyCode::Enter) => { // on enter press 90 | let mut chars = KEYS_BUF.lock(); 91 | let mut chars_cp = Vec::new(); 92 | chars.clone_into(&mut chars_cp); 93 | *syscalls::STDIN_BUF.lock() = Some(chars_cp); // replace the stdin buf with the current recorded keys 94 | *chars = Vec::new(); // replace the keys buf with an empty char vec 95 | }, 96 | DecodedKey::Unicode(character) => { 97 | print!("{}", character); 98 | KEYS_BUF.lock().push(character as u8); 99 | }, 100 | DecodedKey::RawKey(key) => { 101 | print!("{:?}", key); 102 | }, 103 | } 104 | } 105 | } 106 | }); 107 | 108 | lazy_static! { 109 | static ref INTERRUPT_TABLE: InterruptDescriptorTable = { 110 | let mut vectors = [IDTEntry::empty(); 0x100]; 111 | macro_rules! idt_entry { 112 | ($i:literal, $e:expr) => { 113 | vectors[$i] = 114 | IDTEntry::new($e as *const IDTHandler, CS::get_reg(), 0, true, 0); 115 | }; 116 | } 117 | idt_entry!(0, div_by_zero); 118 | idt_entry!(3, breakpoint); 119 | vectors[8] = IDTEntry::new( 120 | double_fault as *const IDTHandler, 121 | CS::get_reg(), 122 | crate::gdt::DOUBLE_FAULT_IST_INDEX + 1, 123 | true, 124 | 0, 125 | ); 126 | idt_entry!(13, gpf); 127 | idt_entry!(14, page_fault); 128 | idt_entry!(32, timer); 129 | idt_entry!(33, keyboard); 130 | idt_entry!(46, ide); 131 | InterruptDescriptorTable(vectors) 132 | }; 133 | } 134 | 135 | #[repr(C, packed)] 136 | struct InterruptDescriptorTable([IDTEntry; 0x100]); 137 | 138 | impl InterruptDescriptorTable { 139 | fn load(&'static self) { 140 | let idt_ptr = DescriptorTablePointer { 141 | base: VirtAddr::from_ptr(self as *const _), 142 | limit: (size_of::() - 1) as u16, 143 | }; 144 | println!(" - Setting up IDT with {} entries", INTERRUPT_TABLE.0.len()); 145 | println!(" - IDT ptr address: {:x}", &idt_ptr as *const _ as u64); 146 | println!( 147 | " - IDT address: {:x}", 148 | &INTERRUPT_TABLE.0 as *const _ as u64 149 | ); 150 | unsafe { 151 | lidt(&idt_ptr); 152 | } 153 | } 154 | } 155 | 156 | #[derive(Copy, Clone)] 157 | #[repr(C, packed)] 158 | struct IDTEntry { 159 | handler_low: u16, 160 | gdt_selector: u16, 161 | options: u16, 162 | handler_mid: u16, 163 | handler_hi: u32, 164 | reserved: u32, 165 | } 166 | 167 | impl IDTEntry { 168 | fn new( 169 | handler: *const IDTHandler, 170 | gdt_selector: SegmentSelector, 171 | int_stack_idx: u8, 172 | disable_interrupts: bool, 173 | dpl_priv: u8, 174 | ) -> IDTEntry { 175 | let mut options: u16 = int_stack_idx as u16 & 0b111; 176 | if !disable_interrupts { 177 | options |= 1 << 8; 178 | } 179 | options |= 1 << 9; 180 | options |= 1 << 10; 181 | options |= 1 << 11; 182 | options |= (dpl_priv as u16 & 0b11) << 13; 183 | options |= 1 << 15; 184 | let handler_ptr = handler as u64; 185 | let handler_low = (handler_ptr & 0xFFFF) as u16; 186 | let handler_mid = ((handler_ptr >> 16) & 0xFFFF) as u16; 187 | let handler_hi = (handler_ptr >> 32) as u32; 188 | let gdt_selector = gdt_selector.0; 189 | IDTEntry { 190 | handler_low, 191 | handler_mid, 192 | handler_hi, 193 | options, 194 | gdt_selector, 195 | reserved: 0, 196 | } 197 | } 198 | 199 | fn empty() -> IDTEntry { 200 | IDTEntry { 201 | handler_low: 0, 202 | handler_mid: 0, 203 | handler_hi: 0, 204 | options: 0, 205 | gdt_selector: CS::get_reg().0, 206 | reserved: 0, 207 | } 208 | } 209 | } 210 | 211 | pub fn setup_idt() { 212 | INTERRUPT_TABLE.load(); 213 | } 214 | -------------------------------------------------------------------------------- /kernel/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(naked_functions)] 3 | #![feature(abi_x86_interrupt)] 4 | #![feature(alloc_error_handler)] 5 | #![feature(str_from_raw_parts)] 6 | #![allow(static_mut_refs)] 7 | 8 | extern crate alloc; 9 | extern crate multiboot2; 10 | extern crate pc_keyboard; 11 | extern crate x86_64; 12 | 13 | pub mod buddy_alloc; 14 | pub mod frame_alloc; 15 | mod gdt; 16 | pub mod global_alloc; 17 | pub mod interrupts; 18 | pub mod mem; 19 | pub mod port; 20 | pub mod scheduler; 21 | pub mod serial_port; 22 | pub mod syscalls; 23 | pub mod vga_buffer; 24 | pub mod fat16; 25 | pub mod elf; 26 | 27 | use core::arch::asm; 28 | use gdt::init_gdt; 29 | use interrupts::setup_idt; 30 | use vga_buffer::cls; 31 | 32 | use crate::port::init_pics; 33 | use crate::vga_buffer::set_color; 34 | use crate::vga_buffer::Color; 35 | use crate::elf::Elf; 36 | 37 | #[cfg(not(feature = "no-panic-handler"))] 38 | use core::panic::PanicInfo; 39 | use multiboot2::{BootInformationHeader, BootInformation}; 40 | 41 | #[global_allocator] 42 | static ALLOCATOR: global_alloc::Allocator = global_alloc::Allocator; 43 | 44 | static mut BOOT_INFO: Option = None; 45 | 46 | #[alloc_error_handler] 47 | fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! { 48 | panic!("allocation error: {:?}", layout) 49 | } 50 | 51 | /// This function is called on panic. 52 | #[cfg(not(feature = "no-panic-handler"))] 53 | #[panic_handler] 54 | fn panic(info: &PanicInfo) -> ! { 55 | println!("{}", info); 56 | loop {} 57 | } 58 | 59 | #[no_mangle] 60 | pub extern "C" fn ua64_mode_start() -> ! { 61 | let mut multiboot_info_addr: usize; 62 | unsafe { 63 | asm!("\ 64 | mov ax, 0 65 | mov ss, ax 66 | mov ds, ax 67 | mov es, ax 68 | mov fs, ax 69 | mov gs, ax 70 | ", out("rdi") multiboot_info_addr); 71 | } 72 | let boot_info = unsafe { 73 | BOOT_INFO = Some(BootInformation::load( 74 | mem::PhysAddr::new(multiboot_info_addr) 75 | .to_virt() 76 | .unwrap() 77 | .addr() as *const BootInformationHeader, 78 | ).unwrap_unchecked()); 79 | BOOT_INFO.as_ref().unwrap() 80 | }; 81 | start(boot_info); 82 | } 83 | 84 | pub fn start(boot_info: &'static BootInformation) -> ! { 85 | cls(); 86 | init_gdt(); 87 | setup_idt(); 88 | unsafe { 89 | syscalls::init_syscalls(); 90 | } 91 | unsafe { 92 | let pt = mem::get_page_table(); 93 | println!("Page table: {:p}", pt); 94 | let entry0 = pt.get_entry(0); 95 | println!("Entry 0: {}", entry0); 96 | let entry03 = entry0.next_pt().get_entry(3); 97 | println!("Entry 0-3: {}", entry03); 98 | let entry032 = entry03.next_pt().get_entry(2); 99 | println!("Entry 0-3-2: {}", entry032); 100 | println!( 101 | "addr 0x172d05e00 is: {}", 102 | mem::VirtAddr::new(0x172d05e00).to_phys().unwrap().0 103 | ); 104 | } 105 | println!("Kernel end at: {:x}", boot_info.end_address()); 106 | unsafe { 107 | frame_alloc::SimpleAllocator::init(boot_info); 108 | global_alloc::init_global_alloc(frame_alloc::BOOTINFO_ALLOCATOR.as_mut().unwrap()); 109 | } 110 | set_color(Color::Green, Color::Black, false); 111 | init_pics(); 112 | 113 | let main = fat16::load_main().unwrap(); // load the /BOOT main program from fat16 114 | 115 | let elf = Elf::new(main); // parse the file as an elf to find loadable sections 116 | 117 | let sched = &scheduler::SCHEDULER; 118 | sched.schedule_task(elf.into()); // transform to a task and schedule it 119 | loop {} // no need to do anything here as we will be interrupted anyway 120 | } 121 | -------------------------------------------------------------------------------- /kernel/src/mem.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | use alloc::boxed::Box; 3 | use core::fmt::Display; 4 | 5 | const VIRT_OFFSET: usize = 0xC0000000; 6 | pub const FRAME_SIZE: usize = 0x1000; 7 | type EmptyFrame = [u8; FRAME_SIZE as usize]; 8 | 9 | #[repr(C)] 10 | #[derive(Copy, Clone)] 11 | pub struct PTEntry(usize); 12 | 13 | #[repr(C)] 14 | pub struct PageTable { 15 | entries: [PTEntry; 512], 16 | } 17 | 18 | pub const BIT_PRESENT: u16 = 1; 19 | pub const BIT_WRITABLE: u16 = 1 << 1; 20 | pub const BIT_USER: u16 = 1 << 2; 21 | pub const BIT_WRITE_THROUGH: u16 = 1 << 3; 22 | pub const BIT_NO_CACHE: u16 = 1 << 4; 23 | pub const BIT_ACCESSED: u16 = 1 << 5; 24 | pub const BIT_DIRTY: u16 = 1 << 6; 25 | pub const BIT_HUGE: u16 = 1 << 7; 26 | pub const BIT_GLOBAL: u16 = 1 << 8; 27 | 28 | impl PTEntry { 29 | pub fn get_bit(&self, bit: u16) -> bool { 30 | (self.0 & (bit as usize)) != 0 31 | } 32 | 33 | pub fn set_opts(&mut self, options: u16) { 34 | let val = (self.0 >> 9) << 9; 35 | self.0 = val | options as usize; 36 | } 37 | 38 | pub fn set_bit(&mut self, bit: u16, v: bool) { 39 | if ((self.0 & (bit as usize)) != 0) != v { 40 | self.0 ^= bit as usize; 41 | } 42 | } 43 | 44 | pub fn set_phys_addr(&mut self, addr: PhysAddr) { 45 | let val = self.0 & ((1 << 9) - 1); 46 | self.0 = addr.addr() | val; 47 | } 48 | 49 | pub fn phys_addr(&self) -> PhysAddr { 50 | PhysAddr::new(self.0 & (((1 << 40) - 1) * FRAME_SIZE)) 51 | } 52 | 53 | pub unsafe fn next_pt(&self) -> &'static mut PageTable { 54 | self.phys_addr().to_virt().unwrap().to_ref::() 55 | } 56 | } 57 | 58 | impl Display for PTEntry { 59 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 60 | if self.get_bit(BIT_PRESENT) { 61 | let res = write!(f, "{}", self.phys_addr()); 62 | if self.get_bit(BIT_WRITABLE) { 63 | write!(f, " writable").unwrap(); 64 | } 65 | if self.get_bit(BIT_USER) { 66 | write!(f, " user").unwrap(); 67 | } 68 | if self.get_bit(BIT_WRITE_THROUGH) { 69 | write!(f, " write_through").unwrap(); 70 | } 71 | if self.get_bit(BIT_NO_CACHE) { 72 | write!(f, " no_cache").unwrap(); 73 | } 74 | if self.get_bit(BIT_ACCESSED) { 75 | write!(f, " accessed").unwrap(); 76 | } 77 | if self.get_bit(BIT_DIRTY) { 78 | write!(f, " dirty").unwrap(); 79 | } 80 | if self.get_bit(BIT_HUGE) { 81 | write!(f, " huge").unwrap(); 82 | } 83 | if self.get_bit(BIT_GLOBAL) { 84 | write!(f, " global").unwrap(); 85 | } 86 | res 87 | } else { 88 | write!(f, "") 89 | } 90 | } 91 | } 92 | 93 | pub unsafe fn get_page_table() -> &'static mut PageTable { 94 | let mut p4: usize; 95 | asm!("mov rax, cr3", out("rax") p4); 96 | &mut *((p4 + VIRT_OFFSET) as *mut PageTable) 97 | } 98 | 99 | impl PageTable { 100 | pub unsafe fn new() -> Box { 101 | let mut pt = Box::new(PageTable { 102 | entries: [PTEntry(0); 512], 103 | }); // allocate the master PT struct 104 | pt.entries[0].set_phys_addr(Self::alloc_page()); // allocate page for the first child PT 105 | pt.entries[0].set_bit(BIT_PRESENT, true); 106 | pt.entries[0].set_bit(BIT_WRITABLE, true); 107 | pt.entries[0].set_bit(BIT_USER, true); // entry is present, writable and accessible by user 108 | let pt0 = pt.entries[0].next_pt(); // get the child PT we just allocated 109 | let cur_pt0 = get_page_table().entries[0].next_pt(); 110 | pt0.entries[3] = cur_pt0.entries[3].clone(); // copy over the entries 3, 4, 5, 6 from the equivalent 111 | pt0.entries[4] = cur_pt0.entries[4].clone(); // child PT that is currently in use 112 | pt0.entries[5] = cur_pt0.entries[5].clone(); // these correspond to the addresses our kernel uses 113 | pt0.entries[6] = cur_pt0.entries[6].clone(); // plus some more, so that the entire physical memory is mapped 114 | pt 115 | } 116 | 117 | pub unsafe fn phys_addr(&self) -> PhysAddr { 118 | let virt = VirtAddr::new(self as *const _ as usize); 119 | virt.to_phys().unwrap().0 120 | } 121 | 122 | pub unsafe fn enable(&self) { 123 | let phys_addr = self.phys_addr().addr(); 124 | asm!("mov cr3, rax", in("rax") phys_addr); 125 | } 126 | 127 | unsafe fn alloc_page() -> PhysAddr { 128 | let frame: Box = Box::new([0; FRAME_SIZE as usize]); 129 | VirtAddr::new(Box::into_raw(frame) as usize) 130 | .to_phys() 131 | .unwrap() 132 | .0 133 | } 134 | 135 | pub fn get_entry(&mut self, i: usize) -> &mut PTEntry { 136 | &mut self.entries[i] 137 | } 138 | pub unsafe fn map_virt_to_phys( 139 | &mut self, 140 | virt: VirtAddr, 141 | phys: PhysAddr, 142 | create_options: u16, 143 | ) -> &'static PTEntry { 144 | let create_huge = (create_options & BIT_HUGE) != 0; 145 | let p4_off = (virt.addr() >> 39) & 0b1_1111_1111; 146 | let pte = self.get_entry(p4_off as usize); 147 | if !pte.get_bit(BIT_PRESENT) { 148 | let new_frame = Self::alloc_page(); 149 | pte.set_phys_addr(new_frame); 150 | pte.set_bit(BIT_PRESENT, true); 151 | } 152 | if (create_options & BIT_WRITABLE) != 0 { 153 | pte.set_bit(BIT_WRITABLE, true); 154 | } 155 | if (create_options & BIT_USER) != 0 { 156 | pte.set_bit(BIT_USER, true); 157 | } 158 | let p3_off = (virt.addr() >> 30) & 0b1_1111_1111; 159 | let pte = pte.next_pt().get_entry(p3_off as usize); 160 | if !pte.get_bit(BIT_PRESENT) || pte.get_bit(BIT_HUGE) { 161 | let new_frame = Self::alloc_page(); 162 | pte.set_phys_addr(new_frame); 163 | pte.set_bit(BIT_PRESENT, true); 164 | } 165 | if (create_options & BIT_WRITABLE) != 0 { 166 | pte.set_bit(BIT_WRITABLE, true); 167 | } 168 | if (create_options & BIT_USER) != 0 { 169 | pte.set_bit(BIT_USER, true); 170 | } 171 | let p2_off = (virt.addr() >> 21) & 0b1_1111_1111; 172 | let pte = pte.next_pt().get_entry(p2_off as usize); 173 | if !pte.get_bit(BIT_PRESENT) || pte.get_bit(BIT_HUGE) { 174 | if create_huge { 175 | pte.set_phys_addr(phys); 176 | pte.set_opts(create_options); 177 | return pte; 178 | } else { 179 | let new_frame = Self::alloc_page(); 180 | pte.set_phys_addr(new_frame); 181 | pte.set_bit(BIT_PRESENT, true); 182 | } 183 | } 184 | if (create_options & BIT_WRITABLE) != 0 { 185 | pte.set_bit(BIT_WRITABLE, true); 186 | } 187 | if (create_options & BIT_USER) != 0 { 188 | pte.set_bit(BIT_USER, true); 189 | } 190 | let p1_off = (virt.addr() / FRAME_SIZE) & 0b1_1111_1111; 191 | let pte = pte.next_pt().get_entry(p1_off as usize); 192 | pte.set_phys_addr(phys); 193 | pte.set_opts(create_options); 194 | return pte; 195 | } 196 | } 197 | 198 | #[derive(Copy, Clone, Debug)] 199 | pub struct PhysAddr(usize); 200 | #[derive(Copy, Clone, Debug)] 201 | pub struct VirtAddr(usize); 202 | 203 | impl VirtAddr { 204 | pub fn new(addr: usize) -> Self { 205 | VirtAddr(addr) 206 | } 207 | 208 | pub fn offset(&self, offset: usize) -> VirtAddr { 209 | VirtAddr::new(self.0 + offset) 210 | } 211 | 212 | pub unsafe fn to_ref(&self) -> &'static mut T { 213 | &mut *(self.0 as *mut T) 214 | } 215 | 216 | pub unsafe fn to_phys(&self) -> Option<(PhysAddr, &'static PTEntry)> { 217 | let p4_off = (self.0 >> 39) & 0b1_1111_1111; 218 | let pte = get_page_table().get_entry(p4_off as usize); 219 | if !pte.get_bit(BIT_PRESENT) { 220 | return None; 221 | } 222 | let p3_off = (self.0 >> 30) & 0b1_1111_1111; 223 | let pte = pte.next_pt().get_entry(p3_off as usize); 224 | if !pte.get_bit(BIT_PRESENT) { 225 | return None; 226 | } else if pte.get_bit(BIT_HUGE) { 227 | let page_off = self.0 & 0x3fffffff; // 1 GiB huge page 228 | return Some((pte.phys_addr().offset(page_off), &*pte)); 229 | } 230 | let p2_off = (self.0 >> 21) & 0b1_1111_1111; 231 | let pte = pte.next_pt().get_entry(p2_off as usize); 232 | if !pte.get_bit(BIT_PRESENT) { 233 | return None; 234 | } else if pte.get_bit(BIT_HUGE) { 235 | let page_off = self.0 & 0x1fffff; // 2 MiB huge page 236 | return Some((pte.phys_addr().offset(page_off), &*pte)); 237 | } 238 | let p1_off = (self.0 / FRAME_SIZE) & 0b1_1111_1111; 239 | let pte = pte.next_pt().get_entry(p1_off as usize); 240 | if !pte.get_bit(BIT_PRESENT) { 241 | return None; 242 | } else { 243 | let page_off = self.0 & 0xfff; // normal page 244 | return Some((pte.phys_addr().offset(page_off), &*pte)); 245 | } 246 | } 247 | 248 | pub fn addr(&self) -> usize { 249 | self.0 250 | } 251 | } 252 | 253 | impl PhysAddr { 254 | pub fn new(addr: usize) -> Self { 255 | PhysAddr(addr) 256 | } 257 | 258 | pub unsafe fn to_virt(&self) -> Option { 259 | if self.0 < 0x100000000 { 260 | Some(VirtAddr::new(self.0 + VIRT_OFFSET)) 261 | } else { 262 | None 263 | } 264 | } 265 | 266 | pub fn addr(&self) -> usize { 267 | self.0 268 | } 269 | 270 | pub fn offset(&self, offset: usize) -> PhysAddr { 271 | PhysAddr::new(self.0 + offset) 272 | } 273 | } 274 | 275 | impl Display for VirtAddr { 276 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 277 | write!(f, "VirtAddr <{:x}>", self.0) 278 | } 279 | } 280 | 281 | impl Display for PhysAddr { 282 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 283 | write!(f, "PhysAddr <{:x}>", self.0) 284 | } 285 | } 286 | -------------------------------------------------------------------------------- /kernel/src/port.rs: -------------------------------------------------------------------------------- 1 | use crate::println; 2 | use core::arch::asm; 3 | use core::marker::PhantomData; 4 | 5 | pub trait InOut { 6 | unsafe fn port_in(port: u16) -> Self; 7 | unsafe fn port_out(port: u16, val: Self); 8 | } 9 | 10 | impl InOut for u8 { 11 | unsafe fn port_in(port: u16) -> Self { 12 | let mut val; 13 | asm!("in al, dx", out("al") val, in("dx") port); 14 | return val; 15 | } 16 | 17 | unsafe fn port_out(port: u16, val: Self) { 18 | asm!("out dx, al", in("al") val, in("dx") port); 19 | } 20 | } 21 | 22 | impl InOut for u16 { 23 | unsafe fn port_in(port: u16) -> Self { 24 | let mut val; 25 | asm!("in ax, dx", out("ax") val, in("dx") port); 26 | return val; 27 | } 28 | 29 | unsafe fn port_out(port: u16, val: Self) { 30 | asm!("out dx, ax", in("ax") val, in("dx") port); 31 | } 32 | } 33 | 34 | impl InOut for u32 { 35 | unsafe fn port_in(port: u16) -> Self { 36 | let mut val; 37 | asm!("in eax, dx", out("eax") val, in("dx") port); 38 | return val; 39 | } 40 | 41 | unsafe fn port_out(port: u16, val: Self) { 42 | asm!("out dx, eax", in("eax") val, in("dx") port); 43 | } 44 | } 45 | 46 | pub struct Port 47 | where 48 | T: InOut, 49 | { 50 | port: u16, 51 | pt: PhantomData, 52 | } 53 | 54 | impl Port 55 | where 56 | T: InOut, 57 | { 58 | pub const fn new(port: u16) -> Port { 59 | Port { 60 | port, 61 | pt: PhantomData, 62 | } 63 | } 64 | 65 | pub fn write(&self, val: T) { 66 | unsafe { 67 | T::port_out(self.port, val); 68 | } 69 | } 70 | 71 | pub fn read(&self) -> T { 72 | unsafe { T::port_in(self.port) } 73 | } 74 | } 75 | 76 | const PIC_MASTER_PORT: u16 = 0x20; 77 | const PIC_SLAVE_PORT: u16 = 0xA0; 78 | const WAIT_PORT: u16 = 0x11; 79 | 80 | const ICW1_ICW4: u8 = 0x01; // ICW4 (not) needed 81 | const ICW1_INIT: u8 = 0x10; // Initialization - required! 82 | const ICW4_8086: u8 = 0x01; // 8086/88 (MCS-80/85) mode 83 | 84 | const PIC_MASTER_NEW_OFFSET: u8 = 0x20; 85 | const PIC_SLAVE_NEW_OFFSET: u8 = 0x28; 86 | 87 | const END_OF_INTERRUPT: u8 = 0x20; 88 | 89 | pub fn init_pics() { 90 | let master_cmd: Port = Port::new(PIC_MASTER_PORT); 91 | let master_data: Port = Port::new(PIC_MASTER_PORT + 1); 92 | let slave_cmd: Port = Port::new(PIC_SLAVE_PORT); 93 | let slave_data: Port = Port::new(PIC_SLAVE_PORT + 1); 94 | let wait_port: Port = Port::new(WAIT_PORT); 95 | let wait = || wait_port.write(0); 96 | 97 | // save interrupt masks 98 | let a1 = master_data.read(); 99 | let a2 = slave_data.read(); 100 | 101 | println!(" - PIC interrupt masks: master {} slave {}", a1, a2); 102 | 103 | 104 | unsafe { 105 | asm!("cli"); 106 | } 107 | // begin initialization 108 | master_cmd.write(ICW1_INIT + ICW1_ICW4); 109 | wait(); 110 | slave_cmd.write(ICW1_INIT + ICW1_ICW4); 111 | wait(); 112 | 113 | // set interrupt offsets 114 | master_data.write(PIC_MASTER_NEW_OFFSET); 115 | wait(); 116 | slave_data.write(PIC_SLAVE_NEW_OFFSET); 117 | wait(); 118 | 119 | // chain slave PIC to master 120 | master_data.write(4); // tell master there is a slave PIC at IRQ2 121 | wait(); 122 | slave_data.write(2); // tell slave it's cascade 123 | wait(); 124 | 125 | // set mode 126 | master_data.write(ICW4_8086); 127 | wait(); 128 | slave_data.write(ICW4_8086); 129 | wait(); 130 | 131 | // restore interrupt masks 132 | master_data.write(a1); 133 | slave_data.write(a2); 134 | 135 | println!(" - Enabling interrupts"); 136 | unsafe { 137 | asm!("sti"); 138 | } 139 | println!(" - Interrupts enabled"); 140 | 141 | // disable_pit(); 142 | } 143 | 144 | pub fn end_of_interrupt(interrupt_id: u8) { 145 | if interrupt_id >= PIC_SLAVE_NEW_OFFSET && interrupt_id < PIC_SLAVE_NEW_OFFSET + 8 { 146 | Port::new(PIC_SLAVE_PORT).write(END_OF_INTERRUPT); 147 | } 148 | Port::new(PIC_MASTER_PORT).write(END_OF_INTERRUPT); 149 | } 150 | 151 | pub fn disable_pit() { 152 | const PIT_COMMAND_PORT: u16 = 0x43; 153 | const PIT_CHANNEL0_PORT: u16 = 0x40; 154 | 155 | Port::::new(PIT_COMMAND_PORT).write(0x30); // select channel 0 156 | let chan: Port = Port::new(PIT_CHANNEL0_PORT); 157 | chan.write(0); 158 | chan.write(0); // set freq to 0 159 | } -------------------------------------------------------------------------------- /kernel/src/scheduler.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | use crate::gdt; 3 | use crate::mem; 4 | use crate::port; 5 | use crate::serial_println; 6 | use alloc::boxed::Box; 7 | use alloc::vec::Vec; 8 | use core::fmt::Display; 9 | use core::pin::Pin; 10 | use lazy_static::lazy_static; 11 | use spin::Mutex; 12 | 13 | #[derive(Debug, Clone)] 14 | pub struct Context { 15 | pub rbp: u64, 16 | pub rax: u64, 17 | pub rbx: u64, 18 | pub rcx: u64, 19 | pub rdx: u64, 20 | pub rsi: u64, 21 | pub rdi: u64, 22 | pub r8: u64, 23 | pub r9: u64, 24 | pub r10: u64, 25 | pub r11: u64, 26 | pub r12: u64, 27 | pub r13: u64, 28 | pub r14: u64, 29 | pub r15: u64, 30 | pub rip: u64, 31 | pub cs: u64, 32 | pub rflags: u64, 33 | pub rsp: u64, 34 | pub ss: u64, 35 | } 36 | 37 | #[inline(always)] 38 | pub unsafe fn get_context() -> *const Context { 39 | let ctxp: *const Context; 40 | asm!("push r15; push r14; push r13; push r12; push r11; push r10; push r9;\ 41 | push r8; push rdi; push rsi; push rdx; push rcx; push rbx; push rax; push rbp;\ 42 | mov {}, rsp; sub rsp, 0x400;", 43 | out(reg) ctxp); 44 | ctxp 45 | } 46 | 47 | #[inline(always)] 48 | pub unsafe fn restore_context(ctxr: &Context) { 49 | asm!("mov rsp, {};\ 50 | pop rbp; pop rax; pop rbx; pop rcx; pop rdx; pop rsi; pop rdi; pop r8; pop r9;\ 51 | pop r10; pop r11; pop r12; pop r13; pop r14; pop r15; iretq;", 52 | in(reg) ctxr); 53 | } 54 | 55 | #[inline(never)] 56 | pub unsafe fn jmp_to_usermode(code: mem::VirtAddr, stack_end: mem::VirtAddr) { 57 | let (cs_idx, ds_idx) = gdt::set_usermode_segs(); 58 | x86_64::instructions::tlb::flush_all(); // flush the TLB after address-space switch 59 | asm!("\ 60 | push rax // stack segment 61 | push rsi // rsp 62 | push 0x200 // rflags (only interrupt bit set) 63 | push rdx // code segment 64 | push rdi // ret to virtual addr 65 | iretq", 66 | in("rdi") code.addr(), in("rsi") stack_end.addr(), in("dx") cs_idx, in("ax") ds_idx); 67 | } 68 | 69 | #[derive(Clone, Debug)] 70 | enum TaskState { 71 | // a task's state can either be 72 | SavedContext(Context), // a saved context 73 | StartingInfo(mem::VirtAddr, mem::VirtAddr), // or a starting instruction and stack pointer 74 | } 75 | 76 | pub struct Task { 77 | state: TaskState, // the current state of the task 78 | task_pt: Box, // the page table for this task 79 | _data_bytes: Pin>, // a vector to keep the task's data to be mapped 80 | _stack_bytes: Pin>, // a vector to keep the task's stack space 81 | } 82 | 83 | impl Task { 84 | pub fn new( 85 | exec_base: mem::VirtAddr, 86 | stack_end: mem::VirtAddr, 87 | task_pt: Box, 88 | _data_bytes: Pin>, 89 | _stack_bytes: Pin>, 90 | ) -> Task { 91 | // ask for the vecs to be pinned as we take the pointer to the data above 92 | // and we don't want the data to be moved around in physical memory while we've mapped it to virtual memory 93 | Task { 94 | state: TaskState::StartingInfo(exec_base, stack_end), 95 | task_pt, 96 | _data_bytes, 97 | _stack_bytes, 98 | } 99 | } 100 | } 101 | 102 | impl Display for Task { 103 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 104 | unsafe { 105 | write!( 106 | f, 107 | "PT: {}, Context: {:x?}", 108 | self.task_pt.phys_addr(), 109 | self.state 110 | ) 111 | } 112 | } 113 | } 114 | 115 | pub struct Scheduler { 116 | tasks: Mutex>, 117 | cur_task: Mutex>, 118 | } 119 | 120 | impl Scheduler { 121 | pub fn new() -> Scheduler { 122 | Scheduler { 123 | tasks: Mutex::new(Vec::new()), 124 | cur_task: Mutex::new(None), // so that next task is 0 125 | } 126 | } 127 | 128 | pub unsafe fn schedule_data(&self, prog_data: Vec, entry_offset: usize) { 129 | let prog_bytes = Pin::new(prog_data.into_boxed_slice()); 130 | let fn_addr = mem::VirtAddr::new(prog_bytes.as_ptr() as usize); 131 | let userspace_fn_phys = fn_addr.to_phys().unwrap().0; // virtual address to physical 132 | let page_phys_start = (userspace_fn_phys.addr() >> 12) << 12; // zero out page offset to get which page we should map 133 | let fn_page_offset = userspace_fn_phys.addr() - page_phys_start; // offset of function from page start 134 | let userspace_fn_virt_base = 0x400000; // target virtual address of page 135 | let userspace_fn_virt = userspace_fn_virt_base + fn_page_offset + entry_offset; // target virtual address of function 136 | serial_println!( 137 | "Mapping {:x} to {:x}", 138 | page_phys_start, 139 | userspace_fn_virt_base 140 | ); 141 | let mut task_pt = mem::PageTable::new(); // copy over the kernel's page tables 142 | task_pt.map_virt_to_phys( 143 | mem::VirtAddr::new(userspace_fn_virt_base), 144 | mem::PhysAddr::new(page_phys_start), 145 | mem::BIT_PRESENT | mem::BIT_USER, 146 | ); // map the program's code 147 | task_pt.map_virt_to_phys( 148 | mem::VirtAddr::new(userspace_fn_virt_base).offset(mem::FRAME_SIZE), 149 | mem::PhysAddr::new(page_phys_start).offset(mem::FRAME_SIZE), 150 | mem::BIT_PRESENT | mem::BIT_USER, 151 | ); // also map another page to be sure we got the entire function in 152 | let mut stack_space = Pin::new(Box::new([0u8; mem::FRAME_SIZE])); // allocate some pinned memory to use for the stack 153 | let stack_space_phys = mem::VirtAddr::new(stack_space.as_mut_ptr() as *const u8 as usize) 154 | .to_phys() 155 | .unwrap() 156 | .0; 157 | // take physical address of stack 158 | task_pt.map_virt_to_phys( 159 | mem::VirtAddr::new(0x800000), 160 | stack_space_phys, 161 | mem::BIT_PRESENT | mem::BIT_WRITABLE | mem::BIT_USER, 162 | ); // map the stack memory to 0x800000 163 | let task = Task::new( 164 | mem::VirtAddr::new(userspace_fn_virt), 165 | mem::VirtAddr::new(0x801000), 166 | task_pt, 167 | prog_bytes, 168 | stack_space, 169 | ); // create task struct 170 | self.schedule_task(task); // schedule the task 171 | } 172 | 173 | 174 | pub fn schedule_task(&self, task: Task) { 175 | self.tasks.lock().push(task); // push task struct to list of tasks 176 | } 177 | 178 | pub unsafe fn save_current_context(&self, ctxp: *const Context) { 179 | self.cur_task.lock().map(|cur_task_idx| { 180 | // if there is a current task 181 | let ctx = (*ctxp).clone(); 182 | self.tasks.lock()[cur_task_idx].state = TaskState::SavedContext(ctx); 183 | // replace its context with the given one 184 | }); 185 | } 186 | 187 | pub unsafe fn run_next(&self) { 188 | let tasks_len = self.tasks.lock().len(); // how many tasks are available 189 | if tasks_len > 0 { 190 | let task_state = { 191 | let mut cur_task_opt = self.cur_task.lock(); // lock the current task index 192 | let cur_task = cur_task_opt.get_or_insert(0); // default to 0 193 | let next_task = (*cur_task + 1) % tasks_len; // next task index 194 | *cur_task = next_task; 195 | let task = &self.tasks.lock()[next_task]; // get the next task 196 | serial_println!("Switching to task #{} ({})", next_task, task); 197 | task.task_pt.enable(); // enable task's page table 198 | task.state.clone() // clone task state information 199 | }; // release held locks 200 | match task_state { 201 | TaskState::SavedContext(ctx) => { 202 | restore_context(&ctx) // either restore the saved context 203 | } 204 | TaskState::StartingInfo(exec_base, stack_end) => { 205 | jmp_to_usermode(exec_base, stack_end) // or initialize the task with the given instruction, stack pointers 206 | } 207 | } 208 | } 209 | loop {} // no task to jump to 210 | } 211 | } 212 | 213 | lazy_static! { 214 | pub static ref SCHEDULER: Scheduler = Scheduler::new(); 215 | } 216 | 217 | pub unsafe extern "sysv64" fn context_switch(ctx: *const Context) { 218 | SCHEDULER.save_current_context(ctx); 219 | port::end_of_interrupt(32); 220 | SCHEDULER.run_next(); 221 | } -------------------------------------------------------------------------------- /kernel/src/serial_port.rs: -------------------------------------------------------------------------------- 1 | use lazy_static::lazy_static; 2 | use spin::Mutex; 3 | use uart_16550::SerialPort; 4 | 5 | lazy_static! { 6 | pub static ref SERIAL1: Mutex = { 7 | let mut serial_port = unsafe { SerialPort::new(0x3F8) }; 8 | serial_port.init(); 9 | Mutex::new(serial_port) 10 | }; 11 | } 12 | 13 | pub fn _print(args: ::core::fmt::Arguments) { 14 | use core::fmt::Write; 15 | SERIAL1 16 | .try_lock() 17 | .map(|mut lock| lock.write_fmt(args).expect("Printing to serial failed")); 18 | } 19 | 20 | /// Prints to the host through the serial interface. 21 | #[macro_export] 22 | macro_rules! serial_print { 23 | ($($arg:tt)*) => { 24 | $crate::serial_port::_print(format_args!($($arg)*)); 25 | }; 26 | } 27 | 28 | /// Prints to the host through the serial interface, appending a newline. 29 | #[macro_export] 30 | macro_rules! serial_println { 31 | () => ($crate::serial_print!("\n")); 32 | ($fmt:expr) => ($crate::serial_print!(concat!($fmt, "\n"))); 33 | ($fmt:expr, $($arg:tt)*) => ($crate::serial_print!( 34 | concat!($fmt, "\n"), $($arg)*)); 35 | } 36 | -------------------------------------------------------------------------------- /kernel/src/syscalls.rs: -------------------------------------------------------------------------------- 1 | use core::arch::{asm, naked_asm}; 2 | use crate::{fat16, print}; 3 | use alloc::vec::Vec; 4 | use alloc::format; 5 | use alloc::string::{String, ToString}; 6 | use lazy_static::lazy_static; 7 | use spin::Mutex; 8 | 9 | // register for address of syscall handler 10 | const MSR_STAR: usize = 0xc0000081; 11 | const MSR_LSTAR: usize = 0xc0000082; 12 | const MSR_FMASK: usize = 0xc0000084; 13 | 14 | lazy_static! { 15 | pub static ref STDIN_BUF: Mutex>> = Mutex::new(None); 16 | } 17 | 18 | pub unsafe fn init_syscalls() { 19 | let handler_addr = handle_syscall_wrapper as *const () as u64; 20 | // clear Interrupt flag on syscall with AMD's MSR_FSTAR register 21 | asm!("\ 22 | xor rdx, rdx 23 | mov rax, 0x200 24 | wrmsr", in("rcx") MSR_FMASK, out("rdx") _); 25 | // write handler address to AMD's MSR_LSTAR register 26 | asm!("\ 27 | mov rdx, rax 28 | shr rdx, 32 29 | wrmsr", in("rax") handler_addr, in("rcx") MSR_LSTAR, out("rdx") _); 30 | // write segments to use on syscall/sysret to AMD'S MSR_STAR register 31 | asm!("\ 32 | xor rax, rax 33 | mov rdx, 0x230008 // use seg selectors 8, 16 for syscall and 43, 51 for sysret 34 | wrmsr", in("rcx") MSR_STAR, out("rax") _, out("rdx") _); 35 | } 36 | 37 | #[inline(never)] 38 | fn sys_print(str: u64, strlen: u64, i1: u64, i2: u64) -> u64 { 39 | let s = unsafe{core::str::from_raw_parts(str as *const u8, strlen as usize)}; 40 | if i1 != 0 && i2 != 0 { 41 | print!("{} {} {}", s, i1, i2); 42 | } else if i1 != 0 { 43 | print!("{} {}", s, i1); 44 | } else { 45 | print!("{}", s); 46 | } 47 | 1 48 | } 49 | 50 | #[inline(never)] 51 | fn sys_getline(str: u64, strlen: u64) -> u64 { 52 | if let Some(mut v) = STDIN_BUF.try_lock().take() { 53 | if let Some(vv) = v.take() { 54 | let strptr = str as *mut u8; 55 | let cplen = vv.len().min(strlen as usize); 56 | unsafe {strptr.copy_from(vv.as_ptr() as *mut u8, cplen)}; 57 | return cplen as u64; 58 | } 59 | } 60 | 0 61 | } 62 | 63 | #[inline(never)] 64 | fn sys_read(inode: u64, out: u64, outlen: u64) -> u64 { 65 | let f = fat16::FAT16::new(); 66 | let outptr = out as *mut u8; 67 | if let Some(de) = f.at(inode as u16) { 68 | if de.is_dir() { 69 | let mut dir_contents = String::new(); 70 | f.ls(&de).for_each(|x| { 71 | let s = format!("{}: {}", x.name, x.index).to_string(); // make a string with the dir listings 72 | dir_contents.push_str(&s); 73 | dir_contents.push_str("\n"); 74 | }); 75 | 76 | let cplen = outlen.min(dir_contents.len() as u64); 77 | unsafe {outptr.copy_from(dir_contents.as_ptr() as *mut u8, cplen as usize)}; // write the dir contents to the out buffer 78 | cplen 79 | } else { 80 | let data = f.read_data(&de); 81 | let cplen = outlen.min(data.len() as u64); 82 | unsafe {outptr.copy_from(data.as_ptr() as *mut u8, cplen as usize)}; // write the data to the out buffer 83 | cplen 84 | } 85 | } else { 86 | 0 87 | } 88 | } 89 | 90 | #[inline(never)] 91 | fn sys_unhandled() -> u64 { 92 | panic!("bad syscall number!"); 93 | } 94 | 95 | 96 | // save the registers, handle the syscall and return to usermode 97 | #[naked] 98 | extern "C" fn handle_syscall_wrapper() { 99 | unsafe { 100 | naked_asm!("\ 101 | push rcx // backup registers for sysretq 102 | push r11 103 | push rbp // save callee-saved registers 104 | push rbx 105 | push r12 106 | push r13 107 | push r14 108 | push r15 109 | mov rbp, rsp // save rsp 110 | sub rsp, 0x400 // make some room in the stack 111 | mov rcx, r10 // move fourth syscall arg to rcx which is the fourth argument register in sysv64 112 | mov r8, rax // move syscall number to the 5th argument register 113 | call {syscall_alloc_stack} // call the handler with the syscall number in r8 114 | mov rsp, rbp // restore rsp from rbp 115 | pop r15 // restore callee-saved registers 116 | pop r14 117 | pop r13 118 | pop r12 119 | pop rbx 120 | pop rbp // restore stack and registers for sysretq 121 | pop r11 122 | pop rcx 123 | sysretq // back to userland", 124 | syscall_alloc_stack = sym syscall_alloc_stack); 125 | } 126 | } 127 | 128 | // allocate a temp stack and call the syscall handler 129 | unsafe extern "sysv64" fn syscall_alloc_stack(arg0: u64, arg1: u64, arg2: u64, arg3: u64, syscall: u64) -> u64 { 130 | let syscall_stack: Vec = Vec::with_capacity(0x10000); 131 | let stack_ptr = syscall_stack.as_ptr(); 132 | let retval = handle_syscall_with_temp_stack(arg0, arg1, arg2, arg3, syscall, stack_ptr); 133 | drop(syscall_stack); // we can now drop the syscall temp stack 134 | return retval; 135 | } 136 | 137 | #[inline(never)] 138 | extern "sysv64" fn handle_syscall_with_temp_stack(arg0: u64, arg1: u64, arg2: u64, arg3: u64, syscall: u64, temp_stack: *const u8) -> u64 { 139 | let old_stack: *const u8; 140 | unsafe { 141 | asm!("\ 142 | mov {old_stack}, rsp 143 | mov rsp, {temp_stack} // move our stack to the newly allocated one 144 | sti // enable interrupts", 145 | temp_stack = in(reg) temp_stack, old_stack = out(reg) old_stack); 146 | } 147 | let retval: u64 = match syscall { 148 | 0x1337 => sys_print(arg0, arg1, arg2, arg3), 149 | 0x1338 => sys_getline(arg0, arg1), 150 | 0x8EAD => sys_read(arg0, arg1, arg2), 151 | _ => sys_unhandled(), 152 | }; 153 | unsafe { 154 | asm!("\ 155 | cli // disable interrupts while restoring the stack 156 | mov rsp, {old_stack} // restore the old stack 157 | ", 158 | old_stack = in(reg) old_stack); 159 | } 160 | retval 161 | } 162 | -------------------------------------------------------------------------------- /kernel/src/vga_buffer.rs: -------------------------------------------------------------------------------- 1 | use crate::mem; 2 | use core::fmt; 3 | use lazy_static::lazy_static; 4 | use spin::Mutex; 5 | 6 | lazy_static! { 7 | pub static ref WRITER: Mutex = 8 | Mutex::new(ScreenWriter::new(mem::PhysAddr::new(0xb8000))); 9 | } 10 | 11 | #[macro_export] 12 | macro_rules! print { 13 | ($($arg:tt)*) => ($crate::vga_buffer::_print(format_args!($($arg)*))); 14 | } 15 | 16 | #[macro_export] 17 | macro_rules! println { 18 | () => ($crate::print!("\n")); 19 | ($($arg:tt)*) => ($crate::print!("{}\n", format_args!($($arg)*))); 20 | } 21 | 22 | pub fn cls() { 23 | WRITER.try_lock().map(|mut lock| lock.clear()); 24 | } 25 | 26 | #[allow(dead_code)] 27 | pub fn set_color(fg: Color, bg: Color, blink: bool) { 28 | WRITER.try_lock().map(|mut lock| { 29 | lock.set_fg(fg); 30 | lock.set_bg(bg); 31 | lock.set_blink(blink); 32 | }); 33 | } 34 | 35 | #[inline(never)] 36 | pub fn _print(args: fmt::Arguments) { 37 | use core::fmt::Write; 38 | WRITER 39 | .try_lock() 40 | .map(|mut lock| lock.write_fmt(args).unwrap()); 41 | } 42 | 43 | #[derive(Clone, Copy, PartialEq, Eq)] 44 | #[repr(u8)] 45 | #[allow(dead_code)] 46 | pub enum Color { 47 | Black = 0, 48 | Blue = 1, 49 | Green = 2, 50 | Cyan = 3, 51 | Red = 4, 52 | Magenta = 5, 53 | Brown = 6, 54 | LightGray = 7, 55 | DarkGray = 8, 56 | LightBlue = 9, 57 | LightGreen = 10, 58 | LightCyan = 11, 59 | LightRed = 12, 60 | Pink = 13, 61 | Yellow = 14, 62 | White = 15, 63 | } 64 | 65 | #[repr(C)] 66 | #[derive(Copy, Clone)] 67 | pub struct ScreenChar { 68 | chr: u8, 69 | color: u8, 70 | } 71 | 72 | const BUFFER_HEIGHT: usize = 20; 73 | const BUFFER_WIDTH: usize = 80; 74 | 75 | #[repr(transparent)] 76 | struct Buffer { 77 | chars: [[ScreenChar; BUFFER_WIDTH]; BUFFER_HEIGHT], 78 | } 79 | 80 | pub struct ScreenWriter { 81 | col: usize, 82 | row: usize, 83 | fg_color: Color, 84 | bg_color: Color, 85 | blink: bool, 86 | buffer: &'static mut Buffer, 87 | } 88 | 89 | #[allow(dead_code)] 90 | impl ScreenWriter { 91 | pub fn new(phys_addr: mem::PhysAddr) -> ScreenWriter { 92 | ScreenWriter { 93 | col: 0, 94 | row: BUFFER_HEIGHT - 1, 95 | fg_color: Color::White, 96 | bg_color: Color::Black, 97 | blink: false, 98 | buffer: unsafe { phys_addr.to_virt().unwrap().to_ref() }, 99 | } 100 | } 101 | 102 | pub fn write(&mut self, byte: u8) { 103 | match byte { 104 | b'\n' => self.new_line(), 105 | b => { 106 | let fg_u4 = self.fg_color as u8 & 0b1111; 107 | let bg_u3 = self.bg_color as u8 & 0b1111; 108 | let blink = if self.blink { 1 } else { 0 }; 109 | let color_code: u8 = fg_u4 | bg_u3 << 4 | blink << 7; 110 | self.buffer.chars[self.row][self.col] = ScreenChar { 111 | chr: b, 112 | color: color_code, 113 | }; 114 | self.col += 1; 115 | if self.col == BUFFER_WIDTH { 116 | self.new_line(); 117 | } 118 | } 119 | } 120 | } 121 | 122 | pub fn new_line(&mut self) { 123 | for r in 0..BUFFER_HEIGHT - 1 { 124 | for c in 0..BUFFER_WIDTH { 125 | self.buffer.chars[r][c] = self.buffer.chars[r + 1][c]; 126 | } 127 | } 128 | self.clear_line(BUFFER_HEIGHT - 1); 129 | self.col = 0; 130 | } 131 | 132 | pub fn clear(&mut self) { 133 | for r in 0..BUFFER_HEIGHT { 134 | self.clear_line(r); 135 | } 136 | } 137 | 138 | pub fn clear_line(&mut self, line: usize) { 139 | self.buffer.chars[line] = [ScreenChar { 140 | chr: b' ', 141 | color: 0, 142 | }; BUFFER_WIDTH]; 143 | } 144 | 145 | pub fn set_fg(&mut self, color: Color) { 146 | self.fg_color = color; 147 | } 148 | 149 | pub fn set_bg(&mut self, color: Color) { 150 | self.bg_color = color; 151 | } 152 | 153 | pub fn set_blink(&mut self, blink: bool) { 154 | self.blink = blink; 155 | } 156 | 157 | pub fn get_line(&self, line: usize) -> [u8; BUFFER_WIDTH] { 158 | let mut line_buf = [0 as u8; BUFFER_WIDTH]; 159 | for (i, c) in self.buffer.chars[BUFFER_HEIGHT - 1 - line] 160 | .iter() 161 | .enumerate() 162 | { 163 | line_buf[i] = c.chr; 164 | } 165 | line_buf 166 | } 167 | } 168 | 169 | impl fmt::Write for ScreenWriter { 170 | fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { 171 | s.chars().for_each(|c| self.write(c as u8)); 172 | return Ok(()); 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /runner/.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "../x86_64-rust_os.json" 3 | 4 | [target.'cfg(target_os = "none")'] 5 | runner = "bootimage runner" 6 | -------------------------------------------------------------------------------- /runner/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust-os-runner" 3 | version = "0.1.0" 4 | authors = ["Nikos Filippakis "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | [dependencies] 9 | "rust-os" = { path = "../kernel", features = ["no-panic-handler"] } 10 | bootloader = { version = "0.9.8", features = ["map_physical_memory"] } 11 | x86_64 = "0.12.2" 12 | spin = "0.5.2" 13 | 14 | [dependencies.lazy_static] 15 | version = "1.0" 16 | features = ["spin_no_std"] 17 | 18 | [package.metadata.bootimage] 19 | test-args = ["-device", "isa-debug-exit,iobase=0xf4,iosize=0x04", "-serial", "stdio", "-display", "none"] 20 | test-success-exit-code = 33 # (0x10 << 1) | 1 21 | 22 | [package.metadata.bootloader] 23 | physical-memory-offset = "0xC0000000" 24 | -------------------------------------------------------------------------------- /runner/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | #![feature(asm)] 4 | #![feature(lang_items)] 5 | #![feature(naked_functions)] 6 | #![feature(custom_test_frameworks)] 7 | #![test_runner(crate::test_runner)] 8 | #![reexport_test_harness_main = "test_main"] 9 | 10 | use bootloader::bootinfo::MemoryRegionType; 11 | use bootloader::BootInfo; 12 | use core::alloc::GlobalAlloc; 13 | use core::alloc::Layout; 14 | use core::panic::PanicInfo; 15 | use lazy_static::lazy_static; 16 | use rust_os::buddy_alloc::BuddyAllocatorManager; 17 | use rust_os::frame_alloc; 18 | use rust_os::frame_alloc::FrameSingleAllocator; 19 | use rust_os::global_alloc; 20 | use rust_os::interrupts::setup_idt; 21 | use rust_os::mem; 22 | use rust_os::mem::FRAME_SIZE; 23 | use rust_os::port::init_pics; 24 | use rust_os::vga_buffer::{cls, WRITER}; 25 | use rust_os::{println, serial_println}; 26 | use spin::Mutex; 27 | use x86_64::instructions::port::Port; 28 | 29 | lazy_static! { 30 | static ref BOOT_INFO: Mutex> = Mutex::new(None); 31 | } 32 | 33 | static mut DUMMY_ALLOCATOR: Option = None; 34 | 35 | #[cfg(test)] 36 | #[panic_handler] 37 | fn panic(info: &PanicInfo) -> ! { 38 | serial_println!("[failed]\n"); 39 | serial_println!("Error: {}\n", info); 40 | exit_qemu(QemuExitCode::Failed); 41 | loop {} 42 | } 43 | 44 | #[repr(u32)] 45 | pub enum QemuExitCode { 46 | Success = 0x10, 47 | Failed = 0x11, 48 | } 49 | 50 | pub fn exit_qemu(exit_code: QemuExitCode) { 51 | unsafe { 52 | let mut port = Port::new(0xf4); 53 | port.write(exit_code as u32); 54 | } 55 | } 56 | 57 | #[cfg(test)] 58 | fn test_runner(tests: &[&dyn Fn()]) { 59 | serial_println!("Running {} tests", tests.len()); 60 | for test in tests { 61 | test(); 62 | } 63 | exit_qemu(QemuExitCode::Success); 64 | loop {} 65 | } 66 | 67 | #[no_mangle] 68 | pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! { 69 | { 70 | BOOT_INFO.lock().replace(boot_info); 71 | } 72 | #[cfg(test)] 73 | test_main(); 74 | loop {} 75 | } 76 | 77 | #[test_case] 78 | fn test_vga_out() { 79 | serial_println!("Testing: VGA output..."); 80 | for i in 0..200 { 81 | println!("output {}", i); 82 | } 83 | let line = WRITER.lock().get_line(1); 84 | assert_eq!(&line[0..10], "output 199".as_bytes()); 85 | serial_println!( 86 | "Last output: {} == 'output 199'", 87 | core::str::from_utf8(&line[0..10]).unwrap() 88 | ); 89 | serial_println!("[x] Test passed!"); 90 | } 91 | 92 | #[test_case] 93 | fn test_int3() { 94 | setup_idt(); 95 | cls(); 96 | serial_println!("Testing: INT3 breakpoint exception is handled..."); 97 | x86_64::instructions::interrupts::int3(); 98 | let mut found_int3 = false; 99 | for i in (1..20).rev() { 100 | let line = WRITER.lock().get_line(i); 101 | if line[0] != 0x20 { 102 | let line_s = core::str::from_utf8(&line).unwrap(); 103 | if line_s.contains("int3") { 104 | found_int3 = true; 105 | } 106 | serial_println!("Exception handler output: {}", line_s); 107 | } 108 | } 109 | assert!(found_int3); 110 | serial_println!("Found \"int3\" pattern"); 111 | serial_println!("[x] Test passed!"); 112 | } 113 | 114 | #[test_case] 115 | fn test_timer() { 116 | setup_idt(); 117 | cls(); 118 | serial_println!("Testing: Timer IRQ handler writes dots on screen..."); 119 | let line = WRITER.lock().get_line(0); 120 | assert!(!line.contains(&('.' as u8))); 121 | serial_println!("Line starts out with no dots..."); 122 | init_pics(); 123 | for _ in 0..1000000 {} 124 | let line = WRITER.lock().get_line(0); 125 | assert!(line.contains(&('.' as u8))); 126 | serial_println!( 127 | "Line has dots after some time: {}", 128 | core::str::from_utf8(&line).unwrap() 129 | ); 130 | serial_println!("[x] Test passed!"); 131 | } 132 | 133 | #[test_case] 134 | fn test_paging_table() { 135 | cls(); 136 | serial_println!("Testing: Paging table resolution..."); 137 | let phys = mem::PhysAddr::new(0x1000000); 138 | let virt = unsafe { phys.to_virt().unwrap() }; 139 | serial_println!("Testing {} phys to virt: {}", phys, virt); 140 | assert_eq!(virt.addr(), 0xC1000000); 141 | let (phys, pte) = unsafe { virt.to_phys().unwrap() }; 142 | serial_println!("Testing virt {} back to phys: {}", virt, phys); 143 | assert_eq!(phys.addr(), 0x1000000); 144 | serial_println!("Testing page table entry attrs: {}", pte); 145 | assert!(pte.get_bit(mem::BIT_PRESENT)); 146 | assert!(pte.get_bit(mem::BIT_WRITABLE)); 147 | assert!(pte.get_bit(mem::BIT_HUGE)); 148 | serial_println!("[x] Test passed!"); 149 | } 150 | 151 | struct DummyFrameAllocator(u64, u64); 152 | 153 | impl frame_alloc::FrameSingleAllocator for DummyFrameAllocator { 154 | unsafe fn allocate(&mut self) -> Option { 155 | if self.0 < self.1 { 156 | let phys = mem::PhysAddr::new(self.0 * mem::FRAME_SIZE as u64); 157 | serial_println!(" - Allocated frame #{} ({})", self.0, phys); 158 | self.0 += 1; 159 | Some(phys) 160 | } else { 161 | serial_println!(" - Could not allocate frame!"); 162 | None 163 | } 164 | } 165 | } 166 | 167 | fn get_frame_allocator() -> DummyFrameAllocator { 168 | let frame_range = (*BOOT_INFO.lock().unwrap().memory_map) 169 | .iter() 170 | .rev() 171 | .find(|region| region.region_type == MemoryRegionType::Usable) 172 | .unwrap(); 173 | 174 | DummyFrameAllocator( 175 | frame_range.range.start_frame_number, 176 | frame_range.range.end_frame_number, 177 | ) 178 | } 179 | 180 | #[test_case] 181 | fn test_frame_allocation() { 182 | cls(); 183 | serial_println!("Testing: Frame allocation and mapping new paging table entries..."); 184 | 185 | unsafe { 186 | let allocator = get_frame_allocator(); 187 | DUMMY_ALLOCATOR.replace(allocator); 188 | // initialize frame allocator 189 | global_alloc::init_allocator_info(DUMMY_ALLOCATOR.as_mut().unwrap()); 190 | } 191 | 192 | let virt = mem::VirtAddr::new(0xB0000000); 193 | let phys = mem::PhysAddr::new(0xb8000); 194 | unsafe { 195 | serial_println!("Mapping physical frame {} to virtual {}", phys, virt); 196 | let pte = mem::get_page_table().map_virt_to_phys( 197 | virt, 198 | phys, 199 | mem::BIT_WRITABLE | mem::BIT_PRESENT, 200 | ); 201 | serial_println!("Mapping written in PT entry at addr {:p}: {}", pte, pte); 202 | serial_println!("Writing 'X' to virtual {}", virt); 203 | let cptr: &mut [u8; 100] = virt.to_ref(); 204 | cptr[0] = 'X' as u8; 205 | cptr[1] = 15u8; 206 | } 207 | let line = WRITER.lock().get_line(19); 208 | assert_eq!(line[0], 'X' as u8); 209 | serial_println!( 210 | "VGA buffer: {}", 211 | core::str::from_utf8(&line[0..10]).unwrap() 212 | ); 213 | 214 | serial_println!("[x] Test passed!"); 215 | } 216 | 217 | #[test_case] 218 | fn test_global_allocator() { 219 | cls(); 220 | serial_println!("Creating new buddy allocator manager"); 221 | unsafe { 222 | let mut allocator = get_frame_allocator(); 223 | let first_page = allocator.allocate().unwrap(); 224 | DUMMY_ALLOCATOR.replace(allocator); 225 | // initialize frame allocator 226 | global_alloc::init_allocator_info(DUMMY_ALLOCATOR.as_mut().unwrap()); 227 | // initialize buddy allocator with a single page 228 | let buddy_alloc_manager = BuddyAllocatorManager::new(); 229 | buddy_alloc_manager.add_memory_area(first_page, first_page.offset(FRAME_SIZE), 16); 230 | // allocate different block sizes 231 | let blk_16 = buddy_alloc_manager.alloc(Layout::from_size_align(16, 4).unwrap()); 232 | let blk_32 = buddy_alloc_manager.alloc(Layout::from_size_align(32, 4).unwrap()); 233 | let blk_64 = buddy_alloc_manager.alloc(Layout::from_size_align(64, 4).unwrap()); 234 | let blk_8 = buddy_alloc_manager.alloc(Layout::from_size_align(8, 4).unwrap()); 235 | // test relations of block sizes 236 | let diff = blk_32 as usize - blk_16 as usize; 237 | serial_println!( 238 | "32-block and 16-block must be 32 bytes apart: {:?} - {:?} = {}", 239 | blk_16, 240 | blk_32, 241 | diff 242 | ); 243 | assert_eq!(diff, 32); 244 | let diff = blk_64 as usize - blk_32 as usize; 245 | serial_println!( 246 | "64-block and 32-block must be 32 bytes apart: {:?} - {:?} = {}", 247 | blk_64, 248 | blk_32, 249 | diff 250 | ); 251 | assert_eq!(diff, 32); 252 | let diff = blk_8 as usize - blk_16 as usize; 253 | serial_println!( 254 | "8-block and 16-block must be 16 bytes apart: {:?} - {:?} = {}", 255 | blk_8, 256 | blk_16, 257 | diff 258 | ); 259 | assert_eq!(diff, 16); 260 | buddy_alloc_manager.dealloc(blk_16, Layout::from_size_align(16, 4).unwrap()); 261 | let blk_8_2 = buddy_alloc_manager.alloc(Layout::from_size_align(8, 8).unwrap()); 262 | serial_println!("After deallocating 16-block new 8-block should be in the same position as old 16-block: {:?} == {:?}", blk_16, blk_8_2); 263 | assert_eq!(blk_16, blk_8_2); 264 | buddy_alloc_manager.dealloc(blk_8_2, Layout::from_size_align(8, 8).unwrap()); 265 | buddy_alloc_manager.dealloc(blk_32, Layout::from_size_align(32, 4).unwrap()); 266 | buddy_alloc_manager.dealloc(blk_64, Layout::from_size_align(64, 4).unwrap()); 267 | buddy_alloc_manager.dealloc(blk_8, Layout::from_size_align(8, 4).unwrap()); 268 | // deallocate everything and allocate a 128-byte block 269 | let blk_128 = buddy_alloc_manager.alloc(Layout::from_size_align(128, 4).unwrap()); 270 | serial_println!( 271 | "After deallocating everything the blocks should have been merged together and \ 272 | a 128-block should be in the same location as the last 8-block: {:?} == {:?}", 273 | blk_128, 274 | blk_8_2 275 | ); 276 | assert_eq!(blk_128, blk_8_2); 277 | } 278 | serial_println!("[x] Test passed!"); 279 | } 280 | -------------------------------------------------------------------------------- /userspace/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "userspace" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [[bin]] 7 | name = "boot" 8 | path = "src/boot.rs" 9 | 10 | [dependencies] 11 | x86_64 = "0.15.2" 12 | -------------------------------------------------------------------------------- /userspace/src/boot.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | use userspace::*; 4 | 5 | #[unsafe(no_mangle)] 6 | extern "C" fn _start() { 7 | let mut buf = [0u8; 1024]; 8 | printf("TOTALLY A TERMINAL\n", 0, 0); 9 | loop { 10 | printf("\n$ ",0, 0); 11 | let l = getline(&mut buf); 12 | printf("\n", 0, 0); 13 | let s = bytes_to_str(&buf, l); 14 | 15 | if prefix(s, "help") { 16 | printf("echo x -> print x\n", 0, 0); 17 | printf("read fno -> read file / list dir with this file no (root is 0)\n", 0, 0); 18 | printf("help -> show this\n", 0, 0); 19 | printf("exit -> shut down\n", 0, 0); 20 | } else if prefix(s, "echo ") { 21 | printf(&s[5..], 0, 0); 22 | } else if prefix(s, "read ") { 23 | match s[5..].parse::() { 24 | Ok(inode) => { 25 | printf("Reading inode", inode, 0); 26 | printf("\n\n", 0, 0); 27 | let l = readi(inode, &mut buf); 28 | let s = bytes_to_str(&buf, l); 29 | printf(s, 0, 0); 30 | }, 31 | Err(_) => { 32 | printf("Bad inode no", 0, 0); 33 | } 34 | } 35 | } else if prefix(s, "exit") { 36 | break; 37 | } else { 38 | printf("Unknown command, use help to see cmds", 0, 0); 39 | } 40 | 41 | sleep(10000); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /userspace/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(str_from_raw_parts)] 3 | use core::arch::asm; 4 | use core::str; 5 | use core::panic::PanicInfo; 6 | 7 | #[inline(never)] 8 | pub fn syscall( 9 | n: u64, 10 | arg1: u64, 11 | arg2: u64, 12 | arg3: u64, 13 | arg4: u64, 14 | ) -> u64 { 15 | let mut ret: u64; 16 | unsafe { 17 | asm!( 18 | "syscall", 19 | inlateout("rax") n as u64 => ret, 20 | in("rdi") arg1, 21 | in("rsi") arg2, 22 | in("rdx") arg3, 23 | in("r10") arg4, 24 | out("rcx") _, // rcx is used to store old rip 25 | out("r11") _, // r11 is used to store old rflags 26 | options(nostack, preserves_flags) 27 | ); 28 | } 29 | ret 30 | } 31 | 32 | #[panic_handler] 33 | fn panic(_info: &PanicInfo) -> ! { 34 | loop {} 35 | } 36 | 37 | pub fn sleep(c: u64) { 38 | for _ in 0..c { 39 | unsafe { 40 | asm!("nop"); 41 | } 42 | } 43 | } 44 | 45 | pub fn printf(str: &str, a1: u64, a2: u64) -> u64 { 46 | syscall(0x1337, str.as_ptr() as *const u8 as u64, str.len() as u64, a1, a2) 47 | } 48 | 49 | pub fn bytes_to_str(b: &[u8], l: usize) -> &str { 50 | unsafe { 51 | str::from_raw_parts(b.as_ptr(), l) 52 | } 53 | } 54 | 55 | pub fn getline(buf: &mut [u8]) -> usize { 56 | let mut l = 0; 57 | while l == 0 { 58 | l = syscall(0x1338, buf.as_ptr() as u64, buf.len() as u64, 0, 0) as usize; 59 | } 60 | l 61 | } 62 | 63 | pub fn prefix(s: &str, pre: &str) -> bool { 64 | if s.len() < pre.len() { 65 | return false; 66 | } 67 | for i in 0..pre.len() { 68 | if pre.bytes().nth(i) != s.bytes().nth(i) { 69 | return false; 70 | } 71 | } 72 | true 73 | } 74 | 75 | pub fn readi(inode: u64, out: &mut [u8]) -> usize { 76 | syscall(0x8EAD, inode, out.as_ptr() as u64, out.len() as u64, 0) as usize 77 | } 78 | 79 | #[unsafe(no_mangle)] 80 | pub fn memset(s: &mut [u8], c: u8) { 81 | for i in 0..s.len() { 82 | s[i] = c; 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /x86_64-rust_os.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "x86_64-unknown-none", 3 | "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", 4 | "arch": "x86_64", 5 | "target-endian": "little", 6 | "target-pointer-width": "64", 7 | "target-c-int-width": "32", 8 | "os": "none", 9 | "executables": true, 10 | "linker-flavor": "ld.lld", 11 | "linker": "rust-lld", 12 | "panic-strategy": "abort", 13 | "disable-redzone": true, 14 | "features": "-mmx,-sse,+soft-float" 15 | } 16 | --------------------------------------------------------------------------------