├── kernel ├── rust-toolchain ├── .cargo │ └── config.toml ├── src │ ├── sync │ │ ├── mod.rs │ │ ├── spinlock.rs │ │ ├── thread_block_guard.rs │ │ ├── cond_var_single.rs │ │ └── wait_queue.rs │ ├── tasking │ │ ├── mod.rs │ │ ├── scheme_container.rs │ │ ├── file.rs │ │ ├── scheme.rs │ │ ├── protection_domain.rs │ │ └── scheduler.rs │ ├── arch │ │ ├── x86_64 │ │ │ ├── grub.cfg │ │ │ ├── qemu.rs │ │ │ ├── thread_trampoline.s │ │ │ ├── link.ld │ │ │ ├── mem_funcs.s │ │ │ ├── macros.rs │ │ │ ├── tasking.rs │ │ │ ├── port.rs │ │ │ ├── serial.rs │ │ │ ├── context_switch.s │ │ │ ├── paging │ │ │ │ ├── entry.rs │ │ │ │ ├── frame.rs │ │ │ │ └── table.rs │ │ │ ├── simd.rs │ │ │ ├── address.rs │ │ │ └── mboot.s │ │ ├── mod.rs │ │ ├── cpu_data.rs │ │ ├── acpi │ │ │ ├── mod.rs │ │ │ ├── sdt.rs │ │ │ └── hpet.rs │ │ └── asid.rs │ ├── util │ │ ├── mod.rs │ │ ├── macros.rs │ │ ├── unchecked.rs │ │ ├── boot_module.rs │ │ ├── mem_funcs.rs │ │ ├── tar.rs │ │ └── lfb_text.rs │ ├── tests │ │ ├── interval_tree_test.rs │ │ ├── mod.rs │ │ ├── vmm_test.rs │ │ ├── buddy_test.rs │ │ └── heap_test.rs │ ├── wasm │ │ ├── mod.rs │ │ ├── table.rs │ │ ├── runtime.rs │ │ ├── reloc_sink.rs │ │ ├── vmctx.rs │ │ └── wasi │ │ │ ├── mod.rs │ │ │ └── definitions.rs │ ├── mm │ │ ├── mod.rs │ │ ├── mapper.rs │ │ ├── pmm.rs │ │ ├── buddy.rs │ │ ├── tcb_alloc.rs │ │ └── vma_allocator.rs │ └── lib.rs ├── x86_64-kwast.json └── Cargo.toml ├── userspace ├── .cargo │ └── config ├── Cargo.lock ├── Cargo.toml └── wasm-test │ ├── Cargo.toml │ └── src │ └── main.rs ├── docs └── screenshot.png ├── toolchain ├── SHA256SUMS ├── setup_cross_binutils.sh └── disassemble_hexdump.py ├── .gitignore ├── lib └── wasm-call │ ├── Cargo.toml │ ├── Cargo.lock │ └── src │ └── lib.rs ├── .travis.yml ├── run_tests ├── LICENSE ├── Makefile ├── bochsrc └── README.md /kernel/rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly 2 | -------------------------------------------------------------------------------- /userspace/.cargo/config: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "wasm32-wasi" 3 | -------------------------------------------------------------------------------- /docs/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kwast-os/kwast/HEAD/docs/screenshot.png -------------------------------------------------------------------------------- /kernel/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [unstable] 2 | build-std = ["core", "compiler_builtins", "alloc"] 3 | 4 | -------------------------------------------------------------------------------- /toolchain/SHA256SUMS: -------------------------------------------------------------------------------- 1 | ab66fc2d1c3ec0359b8e08843c9f33b63e8707efdff5e4cc5c200eae24722cbf *binutils-2.33.1.tar.xz 2 | -------------------------------------------------------------------------------- /kernel/src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cond_var_single; 2 | pub mod spinlock; 3 | pub mod thread_block_guard; 4 | pub mod wait_queue; 5 | -------------------------------------------------------------------------------- /kernel/src/tasking/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod file; 2 | pub mod protection_domain; 3 | pub mod scheduler; 4 | pub mod scheme; 5 | pub mod scheme_container; 6 | pub mod thread; 7 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/grub.cfg: -------------------------------------------------------------------------------- 1 | set default=0 2 | set timeout=0 3 | 4 | menuentry "default" { 5 | multiboot2 /boot/kernel 6 | module2 /boot/initrd.tar 7 | boot 8 | } 9 | -------------------------------------------------------------------------------- /kernel/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod boot_module; 2 | pub mod tar; 3 | pub mod unchecked; 4 | #[macro_use] 5 | pub mod macros; 6 | mod font; 7 | pub mod lfb_text; 8 | pub mod mem_funcs; 9 | -------------------------------------------------------------------------------- /userspace/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "wasm-test" 5 | version = "0.1.0" 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /**/.idea/ 2 | 3 | /build 4 | /**/target/ 5 | **/*.rs.bk 6 | 7 | *.tar.xz 8 | /toolchain/binutils-* 9 | /toolchain/build-* 10 | /toolchain/opt 11 | /toolchain/*.cookie 12 | -------------------------------------------------------------------------------- /userspace/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "wasm-test", 4 | ] 5 | 6 | [profile.dev] 7 | opt-level = "z" 8 | 9 | [profile.release] 10 | #lto = true 11 | opt-level = 3 12 | -------------------------------------------------------------------------------- /kernel/src/tests/interval_tree_test.rs: -------------------------------------------------------------------------------- 1 | /// Interval tree assigner tests. 2 | #[cfg(feature = "test-interval-tree-tests")] 3 | pub fn test_main() { 4 | crate::mm::avl_interval_tree::test_main(); 5 | } 6 | -------------------------------------------------------------------------------- /kernel/src/util/macros.rs: -------------------------------------------------------------------------------- 1 | macro_rules! unwrap_or_return { 2 | ($e:expr) => { 3 | match $e { 4 | Some(value) => value, 5 | None => return, 6 | } 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /kernel/src/arch/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_arch = "x86_64")] 2 | pub use x86_64::*; 3 | 4 | #[cfg(target_arch = "x86_64")] 5 | #[macro_use] 6 | mod x86_64; 7 | 8 | mod acpi; 9 | 10 | pub mod asid; 11 | pub mod cpu_data; 12 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/qemu.rs: -------------------------------------------------------------------------------- 1 | use super::port::write_port32; 2 | 3 | /// Will make QEMU exit with status (status << 1) | 1. 4 | #[allow(dead_code)] 5 | pub unsafe fn qemu_exit(status: u32) -> ! { 6 | write_port32(0xf4, status); 7 | loop {} 8 | } 9 | -------------------------------------------------------------------------------- /kernel/src/wasm/mod.rs: -------------------------------------------------------------------------------- 1 | //! WebAssembly runtime 2 | //! Used https://github.com/bytecodealliance/wasmtime/tree/master/crates/jit/src as a reference. 3 | 4 | mod func_env; 5 | pub mod main; 6 | mod module_env; 7 | mod reloc_sink; 8 | mod runtime; 9 | mod table; 10 | pub mod vmctx; 11 | pub mod wasi; 12 | -------------------------------------------------------------------------------- /userspace/wasm-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "wasm-test" 3 | version = "0.1.0" 4 | authors = ["nielsdos <7771979+nielsdos@users.noreply.github.com>"] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/thread_trampoline.s: -------------------------------------------------------------------------------- 1 | .section .text 2 | 3 | .global thread_trampoline 4 | .type thread_trampoline, @function 5 | 6 | thread_trampoline: 7 | // The stack is page aligned right now, so also 16 byte like it should for the System V ABI. 8 | movq %rbp, %rdi 9 | callq *%rbx 10 | 11 | // Should not get here. Applications should call exit. 12 | ud2 13 | -------------------------------------------------------------------------------- /lib/wasm-call/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "wasm-call" 3 | version = "0.1.0" 4 | authors = ["nielsdos <7771979+nielsdos@users.noreply.github.com>"] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [lib] 10 | proc-macro = true 11 | 12 | [dependencies] 13 | proc-macro2 = "1.0" 14 | syn = "1.0" 15 | quote = "1.0" 16 | proc-macro-error = "1.0" 17 | -------------------------------------------------------------------------------- /kernel/src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | use core::panic::PanicInfo; 2 | 3 | pub use buddy_test::*; 4 | pub use heap_test::*; 5 | pub use interval_tree_test::*; 6 | pub use vmm_test::*; 7 | 8 | use crate::arch::qemu; 9 | 10 | mod buddy_test; 11 | mod heap_test; 12 | mod interval_tree_test; 13 | mod vmm_test; 14 | 15 | #[panic_handler] 16 | fn panic(info: &PanicInfo) -> ! { 17 | println!("{:#?}", info); 18 | unsafe { qemu::qemu_exit(1) } 19 | } 20 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | dist: xenial 3 | rust: 4 | - nightly 5 | addons: 6 | apt: 7 | packages: 8 | - qemu 9 | - xorriso 10 | cache: 11 | - apt 12 | - directories: 13 | - toolchain 14 | before_install: 15 | - rustup component add rust-src 16 | - rustup target add wasm32-wasi 17 | - cargo install cargo-xbuild 18 | install: 19 | - cd toolchain && ./setup_cross_binutils.sh && cd .. 20 | script: 21 | - ./run_tests 22 | -------------------------------------------------------------------------------- /kernel/x86_64-kwast.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "x86_64-unknown-none", 3 | "code-model": "small", 4 | "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", 5 | "arch": "x86_64", 6 | "target-endian": "little", 7 | "target-pointer-width": "64", 8 | "target-c-int-width": "32", 9 | "os": "none", 10 | "linker-flavor": "gcc", 11 | "panic-strategy": "abort", 12 | "disable-redzone": true, 13 | "features": "-mmx,-sse,+soft-float" 14 | } 15 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/link.ld: -------------------------------------------------------------------------------- 1 | ENTRY(start) 2 | 3 | SECTIONS { 4 | . = 1M; 5 | 6 | .rodata BLOCK(4K) : ALIGN(4K) { 7 | KEEP(*(.mboot)) 8 | *(.rodata .rodata.*) 9 | } 10 | 11 | .text BLOCK(4K) : ALIGN(4K) { 12 | *(.text .text.*) 13 | } 14 | 15 | .data BLOCK(4K) : ALIGN(4K) { 16 | *(.data .data.*) 17 | } 18 | 19 | .bss BLOCK(4K) : ALIGN(4K) { 20 | *(.bss .bss.*) 21 | } 22 | 23 | KERNEL_END_PTR = .; 24 | } 25 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/mem_funcs.s: -------------------------------------------------------------------------------- 1 | .section .text 2 | 3 | .global page_clear 4 | .type page_clear, @function 5 | // page_clear(destination) 6 | page_clear: 7 | sub $16, %rsp 8 | movdqu %xmm0, (%rsp) 9 | pxor %xmm0, %xmm0 10 | mov $4096, %ecx 11 | 1: 12 | movdqa %xmm0, 0(%rdi) 13 | movdqa %xmm0, 16(%rdi) 14 | movdqa %xmm0, 32(%rdi) 15 | movdqa %xmm0, 48(%rdi) 16 | add $64, %rdi 17 | sub $64, %ecx 18 | jnz 1b 19 | movdqu (%rsp), %xmm0 20 | add $16, %rsp 21 | ret 22 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! println { 3 | () => ($crate::print!("\n")); 4 | ($($arg:tt)*) => ($crate::print!("{}\n", format_args!($($arg)*))); 5 | } 6 | 7 | #[macro_export] 8 | macro_rules! print { 9 | ($($arg:tt)*) => { 10 | #[cfg(not(feature = "integration-test"))] 11 | $crate::util::lfb_text::_print(format_args!($($arg)*)); 12 | #[cfg(feature = "integration-test")] 13 | $crate::arch::serial::_print(format_args!($($arg)*)); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /kernel/src/util/unchecked.rs: -------------------------------------------------------------------------------- 1 | pub trait UncheckedUnwrap { 2 | /// Unwraps a type without the cost of the branch: no safety check will be performed. 3 | /// If you're in debug mode, will assert. 4 | unsafe fn unchecked_unwrap(self) -> T; 5 | } 6 | 7 | impl UncheckedUnwrap for Option { 8 | unsafe fn unchecked_unwrap(self) -> T { 9 | debug_assert!(self.is_some()); 10 | 11 | if let Some(inner) = self { 12 | inner 13 | } else { 14 | core::hint::unreachable_unchecked(); 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /kernel/src/util/boot_module.rs: -------------------------------------------------------------------------------- 1 | //! Boot module adapter interface. 2 | 3 | use crate::arch::address::VirtAddr; 4 | 5 | /// Range. 6 | #[derive(Debug, Copy, Clone)] 7 | pub struct Range { 8 | pub start: VirtAddr, 9 | pub len: usize, 10 | } 11 | 12 | /// A boot module. 13 | #[derive(Copy, Clone, Debug)] 14 | pub struct BootModule { 15 | pub range: Range, 16 | } 17 | 18 | /// Trait for providing the boot modules. 19 | pub trait BootModuleProvider: Iterator { 20 | /// Gives the address range where the modules are. 21 | fn range(&self) -> Option; 22 | } 23 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/tasking.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::VirtAddr; 2 | use crate::tasking::thread::Stack; 3 | 4 | impl Stack { 5 | /// Prepares the stack to execute the trampoline and go to `entry`. 6 | pub unsafe fn prepare_trampoline(&mut self, entry: VirtAddr, first_arg: usize) { 7 | extern "C" { 8 | fn thread_trampoline(); 9 | } 10 | 11 | let rflags: u64 = (1 << 9) | (1 << 1); 12 | self.push(thread_trampoline as usize); 13 | self.push(rflags); 14 | self.push(entry.as_u64()); // rbx 15 | self.push(first_arg); // rbp 16 | self.push(0u64); // r12 17 | self.push(0u64); // r13 18 | self.push(0u64); // r14 19 | self.push(0u64); // r15 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /run_tests: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | run_test() { 4 | printf "\033[1;33mRunning test $1\033[0m\n" 5 | make iso KERNEL_CARGOFLAGS="--features integration-test,$1" 6 | 7 | if [ $? -ne 0 ]; then 8 | printf "\033[1;31mCompile failed\033[0m\n" 9 | exit 1 10 | fi 11 | 12 | qemu-system-x86_64 -m 128 -device isa-debug-exit,iobase=0xf4,iosize=0x04 -cdrom build/img.iso --serial mon:stdio --display none 13 | 14 | if [ $? -ne 1 ]; then 15 | printf "\033[1;31mTest $1 failed\033[0m\n" 16 | exit 1 17 | else 18 | printf "\033[1;32mTest $1 succeeded\033[0m\n" 19 | fi 20 | } 21 | 22 | run_test 'test-vmm' 23 | run_test 'test-buddy' 24 | run_test 'test-heap-one-alloc' 25 | run_test 'test-heap-big-alloc' 26 | run_test 'test-heap-realloc' 27 | run_test 'test-heap-pointers' 28 | run_test 'test-interval-tree' 29 | run_test 'test-interval-tree-fragments' 30 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/port.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | pub unsafe fn read_port8(port: u16) -> u8 { 4 | let ret: u8; 5 | llvm_asm!("inb $1, $0" : "={al}" (ret) : "{dx}N" (port) :: "volatile"); 6 | ret 7 | } 8 | 9 | pub unsafe fn write_port8(port: u16, val: u8) { 10 | llvm_asm!("outb $1, $0" :: "{dx}N" (port), "{al}" (val) :: "volatile"); 11 | } 12 | 13 | pub unsafe fn read_port16(port: u16) -> u16 { 14 | let ret: u16; 15 | llvm_asm!("inw $1, $0" : "={ax}" (ret) : "{dx}N" (port) :: "volatile"); 16 | ret 17 | } 18 | 19 | pub unsafe fn write_port16(port: u16, val: u16) { 20 | llvm_asm!("outw $1, $0" :: "{dx}N" (port), "{ax}" (val) :: "volatile"); 21 | } 22 | 23 | pub unsafe fn read_port32(port: u16) -> u32 { 24 | let ret: u32; 25 | llvm_asm!("inl $1, $0" : "={eax}" (ret) : "{dx}N" (port) :: "volatile"); 26 | ret 27 | } 28 | 29 | pub unsafe fn write_port32(port: u16, val: u32) { 30 | llvm_asm!("outl $1, $0" :: "{dx}N" (port), "{eax}" (val) :: "volatile"); 31 | } 32 | -------------------------------------------------------------------------------- /kernel/src/wasm/table.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::VirtAddr; 2 | use crate::wasm::vmctx::{VmTable, VmTableElement}; 3 | use alloc::vec::Vec; 4 | use cranelift_wasm::TableElementType; 5 | 6 | /// A table, manages table data for the runtime. 7 | pub struct Table { 8 | vec: Vec, 9 | } 10 | 11 | impl Table { 12 | /// Creates a new table. 13 | pub fn new(table: &cranelift_wasm::Table) -> Self { 14 | let vec = match table.ty { 15 | TableElementType::Func => vec![VmTableElement::null(); table.minimum as usize], 16 | TableElementType::Val(_) => unimplemented!("other type than anyfunc"), 17 | }; 18 | 19 | Self { vec } 20 | } 21 | 22 | /// Sets a table element. 23 | pub fn set(&mut self, offset: usize, value: VmTableElement) { 24 | self.vec[offset] = value; 25 | } 26 | 27 | /// Gets the VmContext representation 28 | pub fn as_vm_table(&self) -> VmTable { 29 | VmTable { 30 | base_address: VirtAddr::new(self.vec.as_ptr() as usize), 31 | amount_items: self.vec.len() as u32, 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019-2020 The Kwast contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /kernel/src/sync/spinlock.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::interrupts::{irq_restore, irq_save_and_stop, IrqState}; 2 | use crate::arch::{check_should_schedule, preempt_disable, preempt_enable}; 3 | use spin::{self, SchedulerInfluence}; 4 | 5 | pub struct PreemptCounterInfluence {} 6 | 7 | pub struct IrqInfluence { 8 | state: IrqState, 9 | } 10 | 11 | impl SchedulerInfluence for PreemptCounterInfluence { 12 | #[inline(always)] 13 | fn activate() -> Self { 14 | preempt_disable(); 15 | Self {} 16 | } 17 | } 18 | 19 | impl Drop for PreemptCounterInfluence { 20 | #[inline] 21 | fn drop(&mut self) { 22 | preempt_enable(); 23 | check_should_schedule(); 24 | } 25 | } 26 | 27 | impl SchedulerInfluence for IrqInfluence { 28 | fn activate() -> Self { 29 | Self { 30 | state: irq_save_and_stop(), 31 | } 32 | } 33 | } 34 | 35 | impl Drop for IrqInfluence { 36 | #[inline] 37 | fn drop(&mut self) { 38 | irq_restore(self.state); 39 | } 40 | } 41 | 42 | // TODO: apply Hardware Lock Elision if supported 43 | 44 | pub type Spinlock = spin::Mutex; 45 | pub type RwLock = spin::RwLock; 46 | pub type IrqSpinlock = spin::Mutex; 47 | -------------------------------------------------------------------------------- /kernel/src/tests/vmm_test.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::VirtAddr; 2 | use crate::arch::paging::{ActiveMapping, EntryFlags}; 3 | use crate::mm::mapper::MemoryMapper; 4 | 5 | /// Memory test. 6 | #[cfg(feature = "test-vmm")] 7 | pub fn test_main() { 8 | let mut mapping = unsafe { ActiveMapping::get_unlocked() }; 9 | 10 | // Note: `va1` and `va3` are in the same P2 11 | let va1 = VirtAddr::new(0x400_000); 12 | let va2 = VirtAddr::new(0xdeadb000); 13 | let va3 = VirtAddr::new(0x600_000); 14 | 15 | mapping 16 | .get_and_map_single(va1, EntryFlags::PRESENT | EntryFlags::WRITABLE) 17 | .expect("could not map page #1"); 18 | mapping 19 | .get_and_map_single(va2, EntryFlags::PRESENT | EntryFlags::WRITABLE) 20 | .expect("could not map page #2"); 21 | 22 | mapping.free_and_unmap_single(va2); 23 | 24 | // Should not PF 25 | let ptr: *mut i32 = va1.as_mut(); 26 | unsafe { 27 | ptr.write(42); 28 | } 29 | 30 | let phys = mapping.translate(va1); 31 | mapping.free_and_unmap_single(va1); 32 | 33 | mapping 34 | .get_and_map_single(va3, EntryFlags::PRESENT) 35 | .expect("could not map page #3"); 36 | assert_eq!(mapping.translate(va3), phys); 37 | mapping.free_and_unmap_single(va3); 38 | } 39 | -------------------------------------------------------------------------------- /kernel/src/sync/thread_block_guard.rs: -------------------------------------------------------------------------------- 1 | use crate::tasking::scheduler; 2 | use crate::tasking::scheduler::with_current_thread; 3 | use crate::tasking::thread::ThreadStatus; 4 | use core::intrinsics::likely; 5 | 6 | /// Guard that marks the thread as blocked. 7 | /// The thread will be yielded and woken up later on drop if the resource hasn't become available 8 | /// already. 9 | /// If it's become available already, no yield will happen and the thread can continue immediately. 10 | pub struct ThreadBlockGuard {} 11 | 12 | impl ThreadBlockGuard { 13 | /// Activates the block guard. 14 | pub fn activate() -> Self { 15 | // Mark the thread as blocked. 16 | // Next context switch the thread will block. 17 | with_current_thread(|thread| thread.set_status(ThreadStatus::Blocked)); 18 | Self {} 19 | } 20 | } 21 | 22 | impl Drop for ThreadBlockGuard { 23 | fn drop(&mut self) { 24 | // It is possible (although very unlikely) that we don't have to block anymore 25 | // because what we block on has become available already. 26 | // The scheduler will have marked the thread as `Runnable` again in that case. 27 | // We don't have to yield in that case. 28 | // TL;DR: if it's still blocked: yield. 29 | if likely(with_current_thread(|thread| thread.status()) == ThreadStatus::Blocked) { 30 | scheduler::thread_yield(); 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/serial.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | 3 | use lazy_static::lazy_static; 4 | 5 | use super::port::{read_port8, write_port8}; 6 | use crate::sync::spinlock::IrqSpinlock; 7 | 8 | struct SerialPort { 9 | port: u16, 10 | } 11 | 12 | lazy_static! { 13 | static ref PORT: IrqSpinlock = IrqSpinlock::new(SerialPort::new(0x3F8)); 14 | } 15 | 16 | #[allow(dead_code)] 17 | impl SerialPort { 18 | /// Inits and creates a serial port. 19 | fn new(port: u16) -> Self { 20 | unsafe { 21 | write_port8(port + 1, 0x00); 22 | write_port8(port + 3, 0x80); 23 | write_port8(port, 0x01); 24 | write_port8(port + 1, 0x00); 25 | write_port8(port + 3, 0x03); 26 | write_port8(port + 2, 0xc7); 27 | write_port8(port + 4, 0x0b); 28 | write_port8(port + 1, 0x01); 29 | } 30 | 31 | Self { port } 32 | } 33 | 34 | /// Sends a byte. 35 | fn send(&mut self, byte: u8) { 36 | unsafe { 37 | while (read_port8(self.port + 0x05) & 0x20) == 0 {} 38 | write_port8(self.port, byte); 39 | } 40 | } 41 | } 42 | 43 | impl fmt::Write for SerialPort { 44 | fn write_str(&mut self, s: &str) -> fmt::Result { 45 | for byte in s.bytes() { 46 | self.send(byte); 47 | } 48 | 49 | Ok(()) 50 | } 51 | } 52 | 53 | pub fn _print(args: fmt::Arguments) { 54 | use core::fmt::Write; 55 | PORT.lock().write_fmt(args).unwrap(); 56 | } 57 | -------------------------------------------------------------------------------- /kernel/src/tests/buddy_test.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::VirtAddr; 2 | use crate::arch::paging::{ActiveMapping, EntryFlags}; 3 | use crate::mm::buddy::{Tree, MAX_LEVEL}; 4 | use crate::mm::mapper::MemoryMapper; 5 | use core::mem::size_of; 6 | 7 | /// Buddy test. 8 | #[cfg(feature = "test-buddy")] 9 | pub fn test_main() { 10 | let mut mapping = unsafe { ActiveMapping::get_unlocked() }; 11 | let addr = VirtAddr::new(0xFFF00000); 12 | mapping 13 | .map_range( 14 | addr, 15 | size_of::(), 16 | EntryFlags::PRESENT | EntryFlags::WRITABLE, 17 | ) 18 | .unwrap(); 19 | let tree = unsafe { Tree::from(addr) }; 20 | 21 | assert_eq!(tree.alloc(3), Some(0)); 22 | assert_eq!(tree.alloc(2), Some(8)); 23 | assert_eq!(tree.alloc(3), Some(16)); 24 | assert_eq!(tree.alloc(4), Some(32)); 25 | assert_eq!(tree.alloc(2), Some(12)); 26 | assert_eq!(tree.alloc(3), Some(24)); 27 | assert_eq!(tree.alloc(6), Some(64)); 28 | assert_eq!(tree.alloc(7), Some(128)); 29 | assert_eq!(tree.alloc(MAX_LEVEL), None); 30 | 31 | assert_eq!(tree.alloc(3), Some(48)); 32 | tree.dealloc(3, 0); 33 | assert_eq!(tree.alloc(3), Some(0)); 34 | tree.dealloc(3, 48); 35 | assert_eq!(tree.alloc(3), Some(48)); 36 | tree.dealloc(4, 32); 37 | assert_eq!(tree.alloc(2), Some(32)); 38 | assert_eq!(tree.alloc(2), Some(36)); 39 | assert_eq!(tree.alloc(4), Some(256)); 40 | assert_eq!(tree.alloc(2), Some(40)); 41 | assert_eq!(tree.alloc(2), Some(44)); 42 | } 43 | -------------------------------------------------------------------------------- /toolchain/setup_cross_binutils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cookie='built-2020-01-29.cookie' 4 | cross_rel_path=./opt/cross 5 | 6 | if [ ! -d $cross_rel_path ]; then 7 | mkdir -p $cross_rel_path 8 | fi 9 | 10 | cross_path=$(realpath $cross_rel_path) 11 | 12 | binutils_ver=2.33.1 13 | binutils="https://ftp.gnu.org/gnu/binutils/binutils-$binutils_ver.tar.xz" 14 | binutils_file=$(basename $binutils) 15 | 16 | # This will check if a toolchain was already built. 17 | # If it wasn't, then it'll build. Otherwise it won't. 18 | 19 | if [ -f "$cookie" ]; then 20 | echo Toolchain was already built. 21 | exit 0 22 | else 23 | # Cleanup old toolchain 24 | rm -rf opt build-binutils binutils-* 25 | fi 26 | 27 | if [ ! -f $binutils_file ]; then 28 | if hash curl 2>/dev/null; then 29 | downloader="curl --output" 30 | elif hash wget 2>/dev/null; then 31 | downloader="wget -O" 32 | else 33 | echo "Neither curl or wget is available on your system. Download binutils manually from $binutils" >&2 34 | exit 1 35 | fi 36 | 37 | $downloader $binutils_file $binutils 38 | fi 39 | 40 | sha256sum -c SHA256SUMS 41 | if [ $? -ne 0 ]; then 42 | echo "SHA256 does not match" >&2 43 | exit 2 44 | fi 45 | 46 | tar xf $binutils_file 47 | 48 | mkdir build-binutils 49 | cd build-binutils 50 | ../binutils-$binutils_ver/configure --target=x86_64-elf --prefix="$cross_path" \ 51 | --disable-nls --disable-werror \ 52 | --disable-gdb --disable-libdecnumber --disable-readline --disable-sim 53 | 54 | make -j2 || exit 1 55 | make install || exit 1 56 | cd .. 57 | 58 | touch "$cookie" 59 | -------------------------------------------------------------------------------- /toolchain/disassemble_hexdump.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import struct 4 | import os 5 | import tempfile 6 | 7 | list = [0x55, 0x48, 0x89, 0xe5, 0xb8, 0x1, 0x0, 0x0, 0x0, 0x5d, 0xc3, 0x55, 0x48, 0x89, 0xe5, 0xb8, 0x2, 0x0, 0x0, 0x0, 0x5d, 0xc3, 0x55, 0x48, 0x89, 0xe5, 0xb8, 0x3, 0x0, 0x0, 0x0, 0x5d, 0xc3, 0x55, 0x48, 0x89, 0xe5, 0xb8, 0x4, 0x0, 0x0, 0x0, 0x5d, 0xc3, 0x55, 0x48, 0x89, 0xe5, 0xb8, 0x5, 0x0, 0x0, 0x0, 0x5d, 0xc3, 0x55, 0x48, 0x89, 0xe5, 0x40, 0x89, 0xf0, 0x83, 0xc0, 0xfd, 0x83, 0xf8, 0x7, 0x77, 0x1e, 0x83, 0xf8, 0x8, 0x73, 0x12, 0x89, 0xc0, 0x48, 0x8d, 0xd, 0x2c, 0x0, 0x0, 0x0, 0x48, 0x63, 0x4, 0x81, 0x48, 0x1, 0xc1, 0xff, 0xe1, 0xe8, 0x9e, 0xff, 0xff, 0xff, 0x5d, 0xc3, 0xe8, 0xc3, 0xff, 0xff, 0xff, 0x5d, 0xc3, 0xe8, 0x9b, 0xff, 0xff, 0xff, 0x5d, 0xc3, 0xe8, 0x9f, 0xff, 0xff, 0xff, 0x5d, 0xc3, 0xe8, 0xa3, 0xff, 0xff, 0xff, 0x5d, 0xc3, 0xdd, 0xff, 0xff, 0xff, 0xe4, 0xff, 0xff, 0xff, 0xe4, 0xff, 0xff, 0xff, 0xe4, 0xff, 0xff, 0xff, 0xe4, 0xff, 0xff, 0xff, 0xeb, 0xff, 0xff, 0xff, 0xf2, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0x55, 0x48, 0x89, 0xe5, 0x41, 0x57, 0x48, 0x83, 0xec, 0x8, 0x48, 0x89, 0xbc, 0x24, 0x0, 0x0, 0x0, 0x0, 0xb8, 0x3, 0x0, 0x0, 0x0, 0x49, 0x89, 0xff, 0x4c, 0x89, 0xff, 0x40, 0x89, 0xc6, 0xe8, 0x72, 0xff, 0xff, 0xff, 0x4c, 0x8b, 0xbc, 0x24, 0x0, 0x0, 0x0, 0x0, 0x49, 0x8b, 0x4f, 0x8, 0x4c, 0x89, 0xff, 0x40, 0x89, 0xc6, 0xff, 0xd1, 0x48, 0x83, 0xc4, 0x8, 0x41, 0x5f, 0x5d, 0xc3] 8 | list = [0x5d, 0x41, 0x5c, 0x5b, 0x5d, 0xc3, 0xf, 0xb, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x4c, 0x89, 0xe7, 0xe8, 0x14, 0x2b, 0x2a, 0x0, 0xb9, 0x10, 0x0, 0x0, 0x0, 0x49, 0x89, 0xc4, 0x41, 0xd3, 0xe4] 9 | 10 | out = tempfile.NamedTemporaryFile(mode = 'wb') 11 | out.write(struct.pack(f'{len(list)}B', *list)) 12 | out.flush() 13 | os.system(f'objdump -b binary -D -m i386:x86-64 {out.name} --adjust-vma 0x7ffffffb0097') 14 | out.close() 15 | -------------------------------------------------------------------------------- /kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kernel" 3 | version = "0.1.0" 4 | authors = ["nielsdos <7771979+nielsdos@users.noreply.github.com>"] 5 | edition = "2018" 6 | 7 | [lib] 8 | crate-type = ["staticlib"] 9 | 10 | [features] 11 | integration-test = [] 12 | test-vmm = [] 13 | test-buddy = [] 14 | test-heap-one-alloc = [] 15 | test-heap-big-alloc = [] 16 | test-heap-realloc = [] 17 | test-heap-pointers = [] 18 | test-interval-tree-tests = [] 19 | test-interval-tree = ["test-interval-tree-tests"] 20 | test-interval-tree-fragments = ["test-interval-tree-tests"] 21 | 22 | [profile.dev] 23 | opt-level = "z" 24 | panic = "abort" 25 | 26 | [profile.release] 27 | lto = true 28 | opt-level = 2 29 | codegen-units = 1 30 | panic = "abort" 31 | 32 | [dependencies] 33 | static_assertions = "1.1.0" 34 | atomic = { version = "0.4", features = ["nightly"] } # 0.5 seems to have a bug where it doesn't detect the atomic types properly 35 | wasm-call = { path = "../lib/wasm-call" } 36 | raw-cpuid = "^7.0" 37 | bitflags = "^1.2.1" 38 | multiboot2 = "^0.8.1" 39 | spin = "0.5" 40 | lazy_static = { version = "^1.4.0", features = ["spin_no_std"] } 41 | hashbrown = "^0.8.0" 42 | memoffset = "0.5" 43 | cranelift-wasm = { git = "https://github.com/kwast-os/wasmtime", package="cranelift-wasm", branch = "main", default-features = false, features = ["core"] } 44 | cranelift-codegen = { git = "https://github.com/kwast-os/wasmtime", package="cranelift-codegen", branch = "main", default-features = false, features = ["core"] } 45 | cranelift-native = { git = "https://github.com/kwast-os/wasmtime", package="cranelift-native", branch = "main", default-features = false, features = ["core"] } 46 | 47 | [replace] 48 | "wasmparser:0.59.0" = { git = "https://github.com/kwast-os/wasm-tools", "branch" = "0.59.0" } 49 | "spin:0.5.2" = { git = "https://github.com/kwast-os/spin-rs", rev = "434b4f9" } 50 | -------------------------------------------------------------------------------- /kernel/src/sync/cond_var_single.rs: -------------------------------------------------------------------------------- 1 | use crate::mm::tcb_alloc::with_thread; 2 | use crate::sync::spinlock::PreemptCounterInfluence; 3 | use crate::sync::thread_block_guard::ThreadBlockGuard; 4 | use crate::tasking::scheduler::with_current_thread; 5 | use crate::tasking::thread::ThreadId; 6 | use atomic::{Atomic, Ordering}; 7 | use spin::MutexGuard; 8 | 9 | /// Simple version of a condition variable: single waiter, multiple notifiers. 10 | /// There's no spurious wakeups. 11 | pub struct CondVarSingle { 12 | waiter: Atomic, 13 | } 14 | 15 | impl CondVarSingle { 16 | /// Creates a new `CondVarSingle`. 17 | pub fn new() -> Self { 18 | Self { 19 | waiter: Atomic::new(ThreadId::zero()), 20 | } 21 | } 22 | 23 | /// Notifies the waiter if there is one. 24 | pub fn notify(&self) { 25 | let tid = self.waiter.swap(ThreadId::zero(), Ordering::Acquire); 26 | if tid != ThreadId::zero() { 27 | // We shouldn't do wakeup + yield here. 28 | // It implies the caller wants to yield, but it might for example be still preparing it's blocked state. 29 | with_thread(tid, |t| t.wakeup()); 30 | } 31 | } 32 | 33 | /// Wait until notified. 34 | pub fn wait(&self, guard: MutexGuard) { 35 | let _block_guard = ThreadBlockGuard::activate(); 36 | with_current_thread(|thread| loop { 37 | match self.waiter.compare_exchange_weak( 38 | ThreadId::zero(), 39 | thread.id, 40 | Ordering::Acquire, 41 | Ordering::Relaxed, 42 | ) { 43 | Ok(_) => break, 44 | Err(_) => continue, 45 | }; 46 | }); 47 | drop(guard); 48 | } 49 | } 50 | 51 | impl Drop for CondVarSingle { 52 | fn drop(&mut self) { 53 | self.notify(); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /kernel/src/arch/cpu_data.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::asid::AsidManager; 2 | use core::cell::{Cell, RefCell}; 3 | 4 | /// Per-CPU data. 5 | #[repr(C, align(128))] // 128 = false sharing threshold 6 | pub struct CpuData { 7 | /// Self reference. 8 | reference: usize, 9 | /// Preemption disable count. Zero means enabled. 10 | preempt_count: u32, 11 | /// Should schedule flag. 12 | should_schedule: u32, 13 | /// Address Space Identifier stuff. 14 | asid_enable: Cell, 15 | asid_manager: RefCell, 16 | } 17 | 18 | impl CpuData { 19 | /// Creates a new empty per-CPU data. 20 | pub const fn new() -> Self { 21 | Self { 22 | // Need to fill in once we know the address. 23 | reference: 0, 24 | preempt_count: 0, 25 | should_schedule: 0, 26 | asid_enable: Cell::new(false), 27 | asid_manager: RefCell::new(AsidManager::new()), 28 | } 29 | } 30 | 31 | /// Offset of field `preempt_count`. 32 | pub const fn preempt_count_offset() -> usize { 33 | 8 34 | } 35 | 36 | /// Gets the `preempt_count`. 37 | pub fn preempt_count(&self) -> u32 { 38 | self.preempt_count 39 | } 40 | 41 | /// Prepare to set the per-CPU data. 42 | pub fn prepare_to_set(&mut self, asid_enable: bool) { 43 | // Assembly code also trusts on this. 44 | assert_eq!( 45 | offset_of!(CpuData, preempt_count), 46 | Self::preempt_count_offset() 47 | ); 48 | assert_eq!(offset_of!(CpuData, should_schedule), 12); 49 | assert_eq!(self.reference, 0); 50 | self.reference = self as *mut _ as usize; 51 | self.asid_enable.set(asid_enable); 52 | } 53 | 54 | /// Gets a mutable reference to the asid manager. 55 | pub fn asid_manager(&self) -> Option<&RefCell> { 56 | self.asid_enable.get().then_some(&self.asid_manager) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /userspace/wasm-test/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::fs::File; 3 | use std::io::Write; 4 | use std::io::Read; 5 | 6 | #[derive(Debug, Copy, Clone)] 7 | #[repr(transparent)] 8 | pub struct FileHandle(u64); 9 | 10 | #[derive(Debug, Copy, Clone)] 11 | #[repr(C)] 12 | pub enum CommandData { 13 | Open(i32), 14 | Read(u64), 15 | } 16 | 17 | #[derive(Debug, Copy, Clone)] 18 | #[repr(C)] 19 | pub struct Command { 20 | sender: u64, 21 | payload: CommandData, 22 | } 23 | 24 | #[derive(Copy, Clone)] 25 | #[repr(C)] 26 | pub struct ReplyPayload { 27 | status: u16, 28 | value: u64, 29 | } 30 | 31 | #[derive(Copy, Clone)] 32 | #[repr(C)] 33 | pub struct Reply { 34 | to: u64, 35 | payload: ReplyPayload, 36 | } 37 | 38 | fn main() { 39 | // File::create("myfile").expect("lol"); 40 | 41 | /*println!("abc"); 42 | 43 | for (k, v) in env::vars() { 44 | println!("{}: {}", k, v); 45 | } 46 | 47 | println!("-----");*/ 48 | 49 | println!("Hello"); 50 | 51 | let mut file = File::open(".").expect("open test"); 52 | let mut buffer = [0u8; 64]; 53 | for i in 0..10000 { 54 | let res = file.read(&mut buffer[..]).expect("read test"); 55 | //println!("{}", res); 56 | let test = unsafe { 57 | std::slice::from_raw_parts(&buffer[..] as *const _ as *const Command, 1) // TODO 58 | }; 59 | 60 | let command = test[0]; 61 | //println!("read one: {:?}", command); 62 | //assert_eq!(command.sender, 1); 63 | 64 | let mut test = unsafe { 65 | std::slice::from_raw_parts_mut(&mut buffer[..] as *mut _ as *mut Reply, 1) // TODO 66 | }; 67 | test[0] = Reply { 68 | to: command.sender, 69 | payload: ReplyPayload { 70 | status: 0, 71 | value: 12, 72 | }, 73 | }; 74 | 75 | let res = file.write(&mut buffer[..24]).expect("write test"); 76 | //println!("{} {} w", res, i-1); 77 | } 78 | println!("end"); 79 | } 80 | -------------------------------------------------------------------------------- /kernel/src/mm/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::arch; 2 | use crate::arch::address::VirtAddr; 3 | use crate::arch::paging::{get_cpu_page_mapping, ActiveMapping}; 4 | use crate::mm::mapper::MemoryMapper; 5 | use crate::mm::tcb_alloc::pagefault_tcb_alloc; 6 | use crate::tasking::scheduler::{self, with_current_thread}; 7 | use core::intrinsics::unlikely; 8 | 9 | mod alloc; 10 | pub mod avl_interval_tree; 11 | pub mod buddy; 12 | pub mod mapper; 13 | pub mod pmm; 14 | pub mod tcb_alloc; 15 | pub mod vma_allocator; 16 | 17 | /// Inits memory allocator. May only be called once. 18 | pub unsafe fn init(reserved_end: VirtAddr) { 19 | alloc::init(reserved_end); 20 | } 21 | 22 | /// Page fault handler. 23 | pub fn page_fault(fault_addr: VirtAddr, ip: VirtAddr, write: bool) { 24 | if fault_addr.as_usize() >= arch::TCB_START 25 | && fault_addr.as_usize() < arch::TCB_START + arch::TCB_LEN 26 | { 27 | // TCB fault. 28 | pagefault_tcb_alloc(fault_addr, write); 29 | return; 30 | } 31 | 32 | let failed = !with_current_thread(|thread| thread.page_fault(fault_addr)); 33 | 34 | if unlikely(failed) { 35 | if fault_addr.as_usize() < arch::USER_START || ip.as_usize() < arch::USER_START { 36 | // Kernel fault. 37 | // TODO: show cause (recognize stack overflow for example) 38 | panic!( 39 | "Pagefault in kernel, faulting address: {:?} -> {:?}, IP: {:?}, PAGEMAP: {:?}", 40 | fault_addr, 41 | // We're crashing anyway, so no concurrent things will be happening. 42 | unsafe { ActiveMapping::get_unlocked().translate(fault_addr) }, 43 | ip, 44 | get_cpu_page_mapping() 45 | ); 46 | } else { 47 | // Kill the thread. 48 | println!("Pagefault in thread, faulting address: {:?}", fault_addr); 49 | //println!("{:?}", unsafe { 50 | // ActiveMapping::get_unlocked().translate(fault_addr) 51 | //}); 52 | scheduler::thread_exit(u32::MAX); // TODO: exit code 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /kernel/src/wasm/runtime.rs: -------------------------------------------------------------------------------- 1 | use crate::tasking::scheduler::with_current_thread; 2 | use crate::wasm::main::{WASM_CALL_CONV, WASM_VMCTX_TYPE}; 3 | use crate::wasm::vmctx::{VmContext, WASM_PAGE_SIZE}; 4 | use cranelift_codegen::ir::{types, AbiParam, ArgumentPurpose, Signature}; 5 | use lazy_static::lazy_static; 6 | 7 | /// Runtime namespace for `ExternalName`. 8 | pub const RUNTIME_NAMESPACE: u32 = 1; 9 | pub const RUNTIME_MEMORY_GROW_IDX: u32 = 0; 10 | pub const RUNTIME_MEMORY_SIZE_IDX: u32 = 1; 11 | 12 | /// Runtime function data. 13 | pub struct RuntimeFunctionData { 14 | pub index: u32, 15 | pub signature: Signature, 16 | } 17 | 18 | lazy_static! { 19 | pub static ref RUNTIME_MEMORY_GROW_DATA: RuntimeFunctionData = RuntimeFunctionData { 20 | index: RUNTIME_MEMORY_GROW_IDX, 21 | signature: Signature { 22 | params: vec![ 23 | AbiParam::special(WASM_VMCTX_TYPE, ArgumentPurpose::VMContext), 24 | AbiParam::new(types::I32), // Memory index 25 | AbiParam::new(types::I32), // Pages 26 | ], 27 | returns: vec![AbiParam::new(types::I32)], 28 | call_conv: WASM_CALL_CONV, 29 | }, 30 | }; 31 | 32 | pub static ref RUNTIME_MEMORY_SIZE_DATA: RuntimeFunctionData = RuntimeFunctionData { 33 | index: RUNTIME_MEMORY_SIZE_IDX, 34 | signature: Signature { 35 | params: vec![ 36 | AbiParam::special(WASM_VMCTX_TYPE, ArgumentPurpose::VMContext), 37 | AbiParam::new(types::I32), // Memory index 38 | ], 39 | returns: vec![AbiParam::new(types::I32)], 40 | call_conv: WASM_CALL_CONV, 41 | }, 42 | }; 43 | } 44 | 45 | /// memory.size 46 | pub extern "C" fn runtime_memory_size(_vmctx: &VmContext, idx: u32) -> u32 { 47 | assert_eq!(idx, 0); 48 | let heap_size = with_current_thread(|thread| thread.heap_size()); 49 | (heap_size / WASM_PAGE_SIZE) as u32 50 | } 51 | 52 | /// memory.grow 53 | pub extern "C" fn runtime_memory_grow(_vmctx: &VmContext, idx: u32, wasm_pages: u32) -> u32 { 54 | assert_eq!(idx, 0); 55 | with_current_thread(|thread| thread.heap_grow(wasm_pages)) 56 | } 57 | -------------------------------------------------------------------------------- /kernel/src/sync/wait_queue.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::cond_var_single::CondVarSingle; 2 | use crate::sync::spinlock::Spinlock; 3 | use alloc::collections::VecDeque; 4 | use core::intrinsics::unlikely; 5 | 6 | /// A queue with one waiter and multiple producers. 7 | pub struct WaitQueue { 8 | queue: Spinlock>, 9 | cond_var: CondVarSingle, 10 | } 11 | 12 | impl WaitQueue { 13 | /// Creates a new `WaitQueue`. 14 | pub fn new() -> Self { 15 | Self { 16 | queue: Spinlock::new(VecDeque::new()), 17 | cond_var: CondVarSingle::new(), 18 | } 19 | } 20 | 21 | /// Appends an element to the back. 22 | /// Notifies the waiter. 23 | pub fn push_back(&self, t: T) { 24 | self.queue.lock().push_back(t); 25 | self.cond_var.notify(); 26 | } 27 | 28 | /// Pops an element from the front. 29 | /// Waits if no elements are available. 30 | pub fn pop_front(&self) -> T { 31 | loop { 32 | let mut guard = self.queue.lock(); 33 | if let Some(t) = guard.pop_front() { 34 | return t; 35 | } else { 36 | self.cond_var.wait(guard); 37 | } 38 | } 39 | } 40 | 41 | /// If there are no elements available: block. 42 | /// Otherwise: pops as many elements as possible without going to block. 43 | pub fn pop_front_many(&self, buffer: &mut [T]) -> usize { 44 | if unlikely(buffer.is_empty()) { 45 | return 0; 46 | } 47 | 48 | loop { 49 | let mut guard = self.queue.lock(); 50 | if let Some(t) = guard.pop_front() { 51 | buffer[0] = t; 52 | 53 | let mut count = 1usize; 54 | while count < buffer.len() { 55 | if let Some(t) = guard.pop_front() { 56 | buffer[count] = t; 57 | count += 1; 58 | } else { 59 | break; 60 | } 61 | } 62 | 63 | return count; 64 | } else { 65 | self.cond_var.wait(guard); 66 | } 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /lib/wasm-call/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "proc-macro-error" 5 | version = "1.0.4" 6 | source = "registry+https://github.com/rust-lang/crates.io-index" 7 | checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" 8 | dependencies = [ 9 | "proc-macro-error-attr", 10 | "proc-macro2", 11 | "quote", 12 | "syn", 13 | "version_check", 14 | ] 15 | 16 | [[package]] 17 | name = "proc-macro-error-attr" 18 | version = "1.0.4" 19 | source = "registry+https://github.com/rust-lang/crates.io-index" 20 | checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" 21 | dependencies = [ 22 | "proc-macro2", 23 | "quote", 24 | "version_check", 25 | ] 26 | 27 | [[package]] 28 | name = "proc-macro2" 29 | version = "1.0.24" 30 | source = "registry+https://github.com/rust-lang/crates.io-index" 31 | checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" 32 | dependencies = [ 33 | "unicode-xid", 34 | ] 35 | 36 | [[package]] 37 | name = "quote" 38 | version = "1.0.8" 39 | source = "registry+https://github.com/rust-lang/crates.io-index" 40 | checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" 41 | dependencies = [ 42 | "proc-macro2", 43 | ] 44 | 45 | [[package]] 46 | name = "syn" 47 | version = "1.0.58" 48 | source = "registry+https://github.com/rust-lang/crates.io-index" 49 | checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" 50 | dependencies = [ 51 | "proc-macro2", 52 | "quote", 53 | "unicode-xid", 54 | ] 55 | 56 | [[package]] 57 | name = "unicode-xid" 58 | version = "0.2.1" 59 | source = "registry+https://github.com/rust-lang/crates.io-index" 60 | checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" 61 | 62 | [[package]] 63 | name = "version_check" 64 | version = "0.9.2" 65 | source = "registry+https://github.com/rust-lang/crates.io-index" 66 | checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" 67 | 68 | [[package]] 69 | name = "wasm-call" 70 | version = "0.1.0" 71 | dependencies = [ 72 | "proc-macro-error", 73 | "proc-macro2", 74 | "quote", 75 | "syn", 76 | ] 77 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | export PATH := $(PATH):$(shell realpath ./toolchain/opt/cross/bin) 2 | 3 | ARCH ?= x86_64 4 | BUILD ?= debug 5 | KERNEL_CARGOFLAGS ?= 6 | QEMUFLAGS ?= 7 | 8 | RUST_OBJECT = kernel/target/$(ARCH)-kwast/$(BUILD)/libkernel.a 9 | LD_SCRIPT = kernel/src/arch/$(ARCH)/link.ld 10 | KERNEL = build/kernel-$(ARCH) 11 | ISO_FILES = build/iso 12 | ISO_IMAGE = build/img.iso 13 | ASM_SOURCES = $(wildcard kernel/src/arch/$(ARCH)/*.s) 14 | ASM_OBJECTS = $(patsubst kernel/src/arch/$(ARCH)/%.s, build/arch/$(ARCH)/%.o, $(ASM_SOURCES)) 15 | 16 | LDFLAGS = -n -T $(LD_SCRIPT) -s --gc-sections 17 | LD = $(ARCH)-elf-ld 18 | AS = $(ARCH)-elf-as 19 | 20 | QEMUFLAGS += -m 512 --enable-kvm -cpu max --serial mon:stdio -device isa-debug-exit,iobase=0xf4,iosize=0x04 21 | 22 | USER_CARGOFLAGS = 23 | ifeq ($(BUILD), release) 24 | KERNEL_CARGOFLAGS += --release 25 | USER_CARGOFLAGS += --release 26 | endif 27 | 28 | .PHONY: all clean run rust check iso initrd dirs 29 | 30 | all: $(KERNEL) 31 | 32 | clean: 33 | @rm -r build/ 34 | 35 | dirs: 36 | @mkdir -p $(ISO_FILES)/boot/grub 37 | 38 | iso: dirs initrd $(KERNEL) 39 | @cp kernel/src/arch/$(ARCH)/grub.cfg $(ISO_FILES)/boot/grub 40 | @cp $(KERNEL) $(ISO_FILES)/boot/kernel 41 | @grub-mkrescue -o $(ISO_IMAGE) $(ISO_FILES) 2> /dev/null || (echo "grub-mkrescue failed, do you have the necessary dependencies?" && exit 1) 42 | 43 | initrd: dirs 44 | @cd userspace; cargo build $(USER_CARGOFLAGS) 45 | @cd userspace/target/wasm32-wasi/$(BUILD); (for file in *.wasm; do (wasm-strip "$$file" 2> /dev/null || echo "wasm-strip is not installed. This is not a fatal error. Installing wasm-strip will result in smaller binary files."); done); tar -cf ../../../../$(ISO_FILES)/boot/initrd.tar *.wasm 46 | 47 | run: iso 48 | @qemu-system-$(ARCH) -cdrom $(ISO_IMAGE) $(QEMUFLAGS) 49 | 50 | rust: 51 | @cd kernel; RUST_TARGET_PATH=$(shell pwd) cargo build --target $(ARCH)-kwast.json $(KERNEL_CARGOFLAGS) 52 | 53 | check: 54 | @cd kernel; cargo c --target $(ARCH)-kwast.json $(KERNEL_CARGOFLAGS) 55 | 56 | $(KERNEL): rust $(RUST_OBJECT) $(ASM_OBJECTS) $(LD_SCRIPT) 57 | @$(LD) $(LDFLAGS) -o $(KERNEL) $(ASM_OBJECTS) $(RUST_OBJECT) 58 | 59 | build/arch/$(ARCH)/%.o: kernel/src/arch/$(ARCH)/%.s 60 | @mkdir -p build/arch/$(ARCH) 61 | @$(AS) -o $@ $< 62 | -------------------------------------------------------------------------------- /bochsrc: -------------------------------------------------------------------------------- 1 | # configuration file generated by Bochs 2 | plugin_ctrl: unmapped=1, biosdev=1, speaker=1, extfpuirq=1, parallel=1, serial=1, iodebug=1 3 | config_interface: textconfig 4 | display_library: x 5 | memory: host=128, guest=128 6 | romimage: file="/usr/share/bochs/BIOS-bochs-latest", address=0x0, options=none 7 | vgaromimage: file="/usr/share/bochs/VGABIOS-lgpl-latest" 8 | boot: cdrom 9 | floppy_bootsig_check: disabled=0 10 | # no floppya 11 | # no floppyb 12 | ata0: enabled=1, ioaddr1=0x1f0, ioaddr2=0x3f0, irq=14 13 | ata0-master: type=cdrom, path="build/img.iso", status=inserted, model="Generic 1234", biosdetect=auto 14 | ata0-slave: type=none 15 | ata1: enabled=1, ioaddr1=0x170, ioaddr2=0x370, irq=15 16 | ata1-master: type=none 17 | ata1-slave: type=none 18 | ata2: enabled=0 19 | ata3: enabled=0 20 | optromimage1: file=none 21 | optromimage2: file=none 22 | optromimage3: file=none 23 | optromimage4: file=none 24 | optramimage1: file=none 25 | optramimage2: file=none 26 | optramimage3: file=none 27 | optramimage4: file=none 28 | pci: enabled=1, chipset=i440fx 29 | vga: extension=vbe, update_freq=5, realtime=1 30 | cpu: count=1:1:1, ips=4000000, quantum=16, model=bx_generic, reset_on_triple_fault=1, cpuid_limit_winnt=0, ignore_bad_msrs=1, mwait_is_nop=0 31 | cpuid: level=6, stepping=3, model=3, family=6, vendor_string="AuthenticAMD", brand_string="AMD Athlon(tm) processor" 32 | cpuid: mmx=1, apic=xapic, simd=sse2, sse4a=0, misaligned_sse=0, sep=1, movbe=0, adx=0 33 | cpuid: aes=0, sha=0, xsave=0, xsaveopt=0, avx_f16c=0, avx_fma=0, bmi=0, xop=0, fma4=0 34 | cpuid: tbm=0, x86_64=1, 1g_pages=0, pcid=0, fsgsbase=0, smep=0, smap=0, mwait=1 35 | print_timestamps: enabled=0 36 | debugger_log: - 37 | magic_break: enabled=0 38 | port_e9_hack: enabled=0 39 | private_colormap: enabled=0 40 | clock: sync=none, time0=local, rtc_sync=0 41 | # no cmosimage 42 | # no loader 43 | log: - 44 | logprefix: %t%e%d 45 | debug: action=ignore 46 | info: action=report 47 | error: action=report 48 | panic: action=ask 49 | keyboard: type=mf, serial_delay=250, paste_delay=100000, user_shortcut=none 50 | mouse: type=ps2, enabled=0, toggle=ctrl+mbutton 51 | speaker: enabled=1, mode=system 52 | parport1: enabled=1, file=none 53 | parport2: enabled=0 54 | com1: enabled=1, mode=null 55 | com2: enabled=0 56 | com3: enabled=0 57 | com4: enabled=0 58 | -------------------------------------------------------------------------------- /kernel/src/mm/mapper.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::{PhysAddr, VirtAddr}; 2 | use crate::arch::paging::{CpuPageMapping, EntryFlags}; 3 | 4 | /// Trait for memory mapper: maps physical addresses to a virtual addresses. 5 | pub trait MemoryMapper { 6 | /// Gets the active paging mapping, without locking. 7 | unsafe fn get_unlocked() -> Self; 8 | 9 | /// Gets a new paging mapping. 10 | fn get_new() -> Result; 11 | 12 | /// Translate a virtual address to a physical address (if mapped). 13 | fn translate(&self, addr: VirtAddr) -> Option; 14 | 15 | /// Gets a single physical page and maps it to a given virtual address. 16 | fn get_and_map_single(&mut self, vaddr: VirtAddr, flags: EntryFlags) -> MemoryResult; 17 | 18 | /// Unmaps a single page and frees the corresponding physical frame. 19 | fn free_and_unmap_single(&mut self, vaddr: VirtAddr); 20 | 21 | /// Maps a single page. 22 | fn map_single(&mut self, vaddr: VirtAddr, paddr: PhysAddr, flags: EntryFlags) -> MemoryResult; 23 | 24 | /// Unmaps a single page. 25 | fn unmap_single(&mut self, vaddr: VirtAddr); 26 | 27 | /// Maps a range of pages to a range of physical frames. 28 | fn map_range_physical( 29 | &mut self, 30 | vaddr: VirtAddr, 31 | paddr: PhysAddr, 32 | size: usize, 33 | flags: EntryFlags, 34 | ) -> MemoryResult; 35 | 36 | /// Maps a range. 37 | fn map_range(&mut self, vaddr: VirtAddr, size: usize, flags: EntryFlags) -> MemoryResult; 38 | 39 | /// Unmaps a range. 40 | fn unmap_range(&mut self, vaddr: VirtAddr, size: usize); 41 | 42 | /// Unmaps a range and frees the corresponding physical frames. 43 | fn free_and_unmap_range(&mut self, vaddr: VirtAddr, size: usize); 44 | 45 | /// Changes the flags in a range. 46 | fn change_flags_range( 47 | &mut self, 48 | vaddr: VirtAddr, 49 | size: usize, 50 | flags: EntryFlags, 51 | ) -> MemoryResult; 52 | } 53 | 54 | /// Memory request result. 55 | pub type MemoryResult = Result<(), MemoryError>; 56 | 57 | /// Error during memory request. 58 | #[derive(Debug)] 59 | pub enum MemoryError { 60 | /// Out of physical memory. 61 | OOM, 62 | /// Out of virtual memory (no more virtual memory areas). 63 | NoMoreVMA, 64 | /// Invalid memory range (for example partial mapping a Vma out of bounds). 65 | InvalidRange, 66 | } 67 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/context_switch.s: -------------------------------------------------------------------------------- 1 | .section .text 2 | 3 | .extern next_thread_state 4 | .type next_thread_state, @function 5 | 6 | // AMD64 ABI tells us that only rbx, rbp, r12 - r15 need to be preserved by the callee. 7 | // _switch_to_next() 8 | .global _switch_to_next 9 | .type _switch_to_next, @function 10 | _switch_to_next: 11 | pushfq 12 | pushq %rbx 13 | pushq %rbp 14 | pushq %r12 15 | pushq %r13 16 | pushq %r14 17 | pushq %r15 18 | 19 | // Protect the scheduler from nesting. 20 | // The interrupt flag will be restored because of the popfq later. 21 | cli 22 | 23 | movq %rsp, %rdi 24 | call next_thread_state 25 | movq %rax, %rsp 26 | testq %rdx, %rdx 27 | jz 1f 28 | movq %rdx, %cr3 29 | 1: 30 | popq %r15 31 | popq %r14 32 | popq %r13 33 | popq %r12 34 | popq %rbp 35 | popq %rbx 36 | popfq 37 | 38 | ret 39 | 40 | .global irq0 41 | .type irq0, @function 42 | irq0: 43 | cmpl $0, %gs:8 // Check if preempt_count != 0 44 | jnz .flag 45 | pushq %rax 46 | pushq %rdi 47 | pushq %rsi 48 | pushq %rdx 49 | pushq %rcx 50 | pushq %r8 51 | pushq %r9 52 | pushq %r10 53 | pushq %r11 54 | 55 | // EOI, do this here because we might not end up at the bottom part if the other didn't come from an irq0. 56 | movb $32, %al 57 | outb %al, $32 58 | 59 | call _switch_to_next 60 | 61 | popq %r11 62 | popq %r10 63 | popq %r9 64 | popq %r8 65 | popq %rcx 66 | popq %rdx 67 | popq %rsi 68 | popq %rdi 69 | popq %rax 70 | 71 | iretq 72 | .flag: 73 | pushq %rax 74 | // EOI 75 | movb $32, %al 76 | outb %al, $32 77 | popq %rax 78 | iretq 79 | 80 | .global _thread_exit 81 | .type _thread_exit, @function 82 | _thread_exit: 83 | // We want to free the memory areas of this thread. This includes the stack. 84 | // We can use the "interrupt stack" temporarily, because it's per-core and we are guaranteed to leave it alone 85 | // when the next thread is selected. An NMI does not use this IST. 86 | cli 87 | .extern INTERRUPT_STACK_TOP 88 | movq $INTERRUPT_STACK_TOP, %rsp 89 | 90 | call _switch_to_next 91 | 92 | // Should not get here 93 | ud2 94 | 95 | .global _check_should_schedule 96 | .type _check_should_schedule, @function 97 | _check_should_schedule: 98 | cmpb $0, %gs:12 99 | jnz 1f 100 | ret 101 | 1: 102 | jmp _switch_to_next 103 | -------------------------------------------------------------------------------- /kernel/src/wasm/reloc_sink.rs: -------------------------------------------------------------------------------- 1 | //! Based on https://github.com/bytecodealliance/wasmtime/tree/master/crates/jit/src 2 | 3 | use crate::wasm::runtime::RUNTIME_NAMESPACE; 4 | use alloc::vec::Vec; 5 | use cranelift_codegen::binemit::{self, Addend, CodeOffset, Reloc}; 6 | use cranelift_codegen::ir::{ExternalName, JumpTable, LibCall, SourceLoc}; 7 | use cranelift_wasm::FuncIndex; 8 | 9 | /// Relocation target. 10 | #[derive(Debug)] 11 | pub enum RelocationTarget { 12 | /// Relocation is for a user-defined function. 13 | UserFunction(FuncIndex), 14 | /// Runtime function. 15 | RuntimeFunction(u32), 16 | /// Relocation is for a lib-defined function. 17 | LibCall(LibCall), 18 | } 19 | 20 | /// A relocation entry for the function. 21 | #[derive(Debug)] 22 | pub struct Relocation { 23 | pub code_offset: CodeOffset, 24 | pub reloc: Reloc, 25 | pub target: RelocationTarget, 26 | pub addend: Addend, 27 | } 28 | 29 | /// Relocation sink, stores relocations for code. 30 | pub struct RelocSink { 31 | pub relocations: Vec, 32 | } 33 | 34 | impl RelocSink { 35 | pub fn new() -> Self { 36 | Self { 37 | relocations: Vec::new(), 38 | } 39 | } 40 | } 41 | 42 | impl binemit::RelocSink for RelocSink { 43 | fn reloc_block(&mut self, _: u32, _: Reloc, _: u32) { 44 | unimplemented!() 45 | } 46 | 47 | fn reloc_external( 48 | &mut self, 49 | code_offset: CodeOffset, 50 | _source_loc: SourceLoc, 51 | reloc: Reloc, 52 | name: &ExternalName, 53 | addend: Addend, 54 | ) { 55 | let reloc_type = match *name { 56 | ExternalName::User { 57 | namespace: 0, 58 | index, 59 | } => RelocationTarget::UserFunction(FuncIndex::from_u32(index)), 60 | ExternalName::User { 61 | namespace: RUNTIME_NAMESPACE, 62 | index, 63 | } => RelocationTarget::RuntimeFunction(index), 64 | ExternalName::LibCall(libcall) => RelocationTarget::LibCall(libcall), 65 | _ => unreachable!(), 66 | }; 67 | 68 | self.relocations.push(Relocation { 69 | code_offset, 70 | reloc, 71 | target: reloc_type, 72 | addend, 73 | }); 74 | } 75 | 76 | #[inline] 77 | fn reloc_constant(&mut self, _: u32, _: Reloc, _: u32) { 78 | // Not necessary atm because our code and rodata is not split. 79 | } 80 | 81 | #[inline] 82 | fn reloc_jt(&mut self, _code_offset: u32, _reloc: Reloc, _jt: JumpTable) { 83 | // Not necessary atm because our code and rodata is not split. 84 | //self.relocations.push(Relocation { 85 | // code_offset, 86 | // reloc, 87 | // target: RelocationTarget::JumpTable(jt), 88 | // addend: 0, 89 | //}); 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /kernel/src/mm/pmm.rs: -------------------------------------------------------------------------------- 1 | use multiboot2::BootInformation; 2 | 3 | use crate::arch::address::{PhysAddr, VirtAddr}; 4 | use crate::mm::mapper::{MemoryError, MemoryResult}; 5 | use crate::sync::spinlock::Spinlock; 6 | use core::intrinsics::unlikely; 7 | 8 | /// The default frame allocator. 9 | /// 10 | /// How does this allocator work? 11 | /// Instead of having a fixed area in the memory to keep the stack, 12 | /// we let each free frame contain a pointer to the next free frame on the stack. 13 | /// This limits the amount of virtual memory we need to reserve. 14 | /// 15 | /// When we allocate a frame, we map it to the virtual memory and read the pointer. 16 | /// Then we move the head. There is no unnecessary mapping happening here. 17 | /// There is no additional mapping compared to the classical stack approach: 18 | /// * When a page is being allocated it'll need to be mapped anyway. 19 | /// * When a page is being freed it was already mapped. 20 | /// 21 | /// It is likely that, for an allocation, the data will be accessed anyway after the mapping. 22 | /// For a free, it is likely that the data was already accessed. 23 | /// So that means there is likely no extra TLB miss or cache miss. 24 | pub struct FrameAllocator { 25 | pub top: PhysAddr, 26 | } 27 | 28 | impl FrameAllocator { 29 | /// Initializes the allocator. 30 | pub fn init(&mut self, mboot_struct: &BootInformation, reserved_end: PhysAddr) { 31 | let reserved_end = reserved_end.align_up(); 32 | 33 | self.apply_mmap( 34 | mboot_struct 35 | .memory_map_tag() 36 | .expect("Memory map is required"), 37 | reserved_end, 38 | ); 39 | } 40 | 41 | /// Empty, uninitialized allocator. 42 | const fn empty() -> Self { 43 | FrameAllocator { 44 | top: PhysAddr::null(), 45 | } 46 | } 47 | 48 | /// Pops the top and moves the current top pointer. This function is used internally for memory management by paging. 49 | pub fn pop_top(&mut self, f: F) -> MemoryResult 50 | where 51 | F: FnOnce(PhysAddr) -> VirtAddr, 52 | { 53 | if unlikely(self.top.is_null()) { 54 | return Err(MemoryError::OOM); 55 | } 56 | 57 | // Read and set the next top address. 58 | let ptr = f(self.top).as_const(); 59 | self.top = PhysAddr::new(unsafe { *ptr }); 60 | Ok(()) 61 | } 62 | 63 | /// Similar to `pop_top`. 64 | /// This pushes a new top on the stack and links it to the previous top. 65 | pub fn push_top(&mut self, vaddr: VirtAddr, paddr: PhysAddr) { 66 | let ptr: *mut usize = vaddr.as_mut(); 67 | unsafe { 68 | ptr.write(self.top.as_usize()); 69 | } 70 | self.top = paddr; 71 | } 72 | } 73 | 74 | static PMM: Spinlock = Spinlock::new(FrameAllocator::empty()); 75 | 76 | /// Execute something using the PMM. 77 | #[inline] 78 | pub fn with_pmm(f: F) -> T 79 | where 80 | F: FnOnce(&mut FrameAllocator) -> T, 81 | { 82 | f(&mut PMM.lock()) 83 | } 84 | -------------------------------------------------------------------------------- /kernel/src/arch/acpi/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::acpi::hpet::{parse_hpet, HpetData, HpetTable}; 2 | use crate::arch::acpi::sdt::{SdtFixedMapping, SdtHeader}; 3 | use crate::arch::address::{PhysAddr, VirtAddr}; 4 | use crate::arch::paging::ActiveMapping; 5 | use crate::mm::mapper::MemoryMapper; 6 | use core::convert::TryFrom; 7 | use core::mem::size_of; 8 | 9 | pub mod hpet; 10 | mod sdt; 11 | 12 | #[derive(Debug)] 13 | pub struct ParsedData { 14 | pub hpet: Option, 15 | } 16 | 17 | pub enum RootSdt { 18 | Rsdt(PhysAddr), 19 | Xsdt(PhysAddr), 20 | } 21 | 22 | #[allow(dead_code)] 23 | #[repr(u8)] 24 | pub enum AddressSpace { 25 | SystemMemory = 0, 26 | SystemIO = 1, 27 | } 28 | 29 | #[repr(C, packed)] 30 | pub struct AcpiAddress { 31 | pub address_space: AddressSpace, 32 | pub reg_bit_width: u8, 33 | pub reg_bit_offset: u8, 34 | _reserved: u8, 35 | pub address: u64, 36 | } 37 | 38 | /// Parses the tables using the root sdt. 39 | /// `vaddr` refers to a free place which is big enough to hold the table information temporarily. 40 | pub fn parse_tables(root_sdt: RootSdt, vaddr: VirtAddr) -> ParsedData { 41 | let (root_sdt, entry_size) = match root_sdt { 42 | RootSdt::Rsdt(r) => (r, 4), 43 | RootSdt::Xsdt(r) => (r, 8), 44 | }; 45 | 46 | let mut result = ParsedData { hpet: None }; 47 | 48 | // Safety: 49 | // We are the only running process right now. 50 | // This is before the scheduler is setup, so it's not even possible to use `thread.domain()`. 51 | let mut mapping = unsafe { ActiveMapping::get_unlocked() }; 52 | let root_sdt_mapping = 53 | SdtFixedMapping::from(&mut mapping, root_sdt, vaddr).expect("root sdt should be mappable"); 54 | let root_sdt = root_sdt_mapping.sdt; 55 | 56 | let sdt_map_addr = vaddr + root_sdt_mapping.size; 57 | let entries = (root_sdt.length as usize - size_of::()) / entry_size; 58 | //println!("{} entries", entries); 59 | 60 | for i in 0..entries { 61 | let sdt_ptr_addr = root_sdt as *const _ as usize + size_of::() + i * entry_size; 62 | let sdt_addr = match entry_size { 63 | 4 => PhysAddr::new(unsafe { *(sdt_ptr_addr as *const u32) } as usize), 64 | 8 => PhysAddr::new( 65 | usize::try_from(unsafe { *(sdt_ptr_addr as *const u64) }) 66 | .expect("sdt pointer does not fit"), 67 | ), 68 | _ => unreachable!("invalid entry size"), 69 | }; 70 | 71 | if let Some(sdt_mapping) = SdtFixedMapping::from(&mut mapping, sdt_addr, sdt_map_addr) { 72 | let sdt = sdt_mapping.sdt; 73 | 74 | if sdt.name == *b"HPET" { 75 | // Safety: we know it's HPET. 76 | result.hpet = Some(parse_hpet(unsafe { 77 | &*(sdt as *const _ as *const HpetTable) 78 | })); 79 | } 80 | 81 | sdt_mapping.unmap(&mut mapping); 82 | } 83 | } 84 | 85 | root_sdt_mapping.unmap(&mut mapping); 86 | 87 | result 88 | } 89 | -------------------------------------------------------------------------------- /kernel/src/util/mem_funcs.rs: -------------------------------------------------------------------------------- 1 | use core::mem::size_of; 2 | 3 | extern "C" { 4 | pub fn page_clear(dst: *mut u8); 5 | } 6 | 7 | const SIZE: usize = size_of::(); 8 | 9 | fn is_unaligned(ptr: *const u8) -> bool { 10 | ptr as usize & (SIZE - 1) > 0 11 | } 12 | 13 | #[no_mangle] 14 | pub unsafe extern "C" fn memcpy(dst: *mut u8, src: *const u8, n: usize) -> *mut u8 { 15 | let mut i = 0usize; 16 | 17 | // First try to align the destination and do byte copies. 18 | while i < n && is_unaligned(dst.add(i)) { 19 | *dst.add(i) = *src.add(i); 20 | i += 1; 21 | } 22 | 23 | // If we end up with an aligned source now, we can do full block copies. 24 | if !is_unaligned(src.add(i)) { 25 | while i + SIZE < n { 26 | let src = src.add(i) as *mut usize; 27 | let dst = dst.add(i) as *mut usize; 28 | *dst = *src; 29 | i += SIZE; 30 | } 31 | } 32 | 33 | // Copy the left over parts. 34 | while i < n { 35 | *dst.add(i) = *src.add(i); 36 | i += 1; 37 | } 38 | 39 | dst 40 | } 41 | 42 | #[no_mangle] 43 | pub unsafe extern "C" fn memmove(dst: *mut u8, src: *const u8, mut n: usize) -> *mut u8 { 44 | if src as usize + n <= dst as usize { 45 | return memcpy(dst, src, n); 46 | } 47 | 48 | if src < dst { 49 | while n > 0 { 50 | n -= 1; 51 | *dst.add(n) = *src.add(n); 52 | } 53 | } else { 54 | let mut i = 0usize; 55 | while i < n { 56 | *dst.add(i) = *src.add(i); 57 | i += 1; 58 | } 59 | } 60 | 61 | dst 62 | } 63 | 64 | #[no_mangle] 65 | pub unsafe extern "C" fn memset(dst: *mut u8, data: i32, n: usize) -> *mut u8 { 66 | let mut i = 0usize; 67 | let data = data as u8; 68 | 69 | // First try aligning and do byte writes. 70 | while i < n && is_unaligned(dst.add(i)) { 71 | *dst.add(i) = data; 72 | i += 1; 73 | } 74 | 75 | // If we end up with an aligned source now, we can do full block copies. 76 | if !is_unaligned(dst.add(i)) { 77 | let data = data as u8 as usize; 78 | let data = data << 8 | data; 79 | let data = data << 16 | data; 80 | let data = data << 32 | data; 81 | 82 | while i + SIZE < n { 83 | let dst = dst.add(i) as *mut usize; 84 | *dst = data; 85 | i += SIZE; 86 | } 87 | } 88 | 89 | // Copy the left over parts. 90 | while i < n { 91 | *dst.add(i) = data; 92 | i += 1; 93 | } 94 | 95 | dst 96 | } 97 | 98 | #[no_mangle] 99 | pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { 100 | let mut i = 0usize; 101 | while i < n { 102 | let a = *s1.add(i); 103 | let b = *s2.add(i); 104 | if a != b { 105 | return a as i32 - b as i32; 106 | } 107 | i += 1; 108 | } 109 | 110 | 0 111 | } 112 | -------------------------------------------------------------------------------- /kernel/src/tasking/scheme_container.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::spinlock::RwLock; 2 | use crate::tasking::file::{FileDescriptor, FileHandle}; 3 | use crate::tasking::scheme::{Scheme, SchemePtr}; 4 | use crate::wasm::wasi::Errno; 5 | use alloc::boxed::Box; 6 | use alloc::collections::btree_map::Entry; 7 | use alloc::collections::BTreeMap; 8 | use alloc::sync::Arc; 9 | use spin::Once; 10 | 11 | /// Scheme identifier. 12 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 13 | #[repr(transparent)] 14 | pub struct SchemeId(usize); 15 | 16 | impl SchemeId { 17 | /// Sentinel. 18 | pub const fn sentinel() -> Self { 19 | Self(0) 20 | } 21 | } 22 | 23 | /// Error that can occur when inserting a new scheme. 24 | #[derive(Debug)] 25 | pub enum SchemeInsertionError { 26 | /// The scheme name is already taken. 27 | NameAlreadyTaken, 28 | } 29 | 30 | pub struct SchemeContainer { 31 | /// Maps a name to an id. 32 | /// It also stores a `SchemePtr` because creating it using `Arc::downgrade` is more expensive 33 | /// than cloning an already existing one. 34 | name_scheme_map: BTreeMap, (Arc, SchemePtr)>, 35 | /// Next scheme id. 36 | next_scheme_id: usize, 37 | } 38 | 39 | impl SchemeContainer { 40 | /// Creates a new scheme container. 41 | fn new() -> Self { 42 | Self { 43 | name_scheme_map: BTreeMap::new(), 44 | next_scheme_id: 1, 45 | } 46 | } 47 | 48 | /// Inserts a new scheme. 49 | pub fn insert(&mut self, name: Box<[u8]>) -> Result<(), SchemeInsertionError> { 50 | match self.name_scheme_map.entry(name) { 51 | Entry::Occupied(_) => Err(SchemeInsertionError::NameAlreadyTaken), 52 | Entry::Vacant(v) => { 53 | let scheme = Scheme::new(SchemeId(self.next_scheme_id)); 54 | self.next_scheme_id += 1; 55 | let scheme = Arc::new(scheme); 56 | let weak = Arc::downgrade(&scheme); 57 | v.insert((scheme, weak)); 58 | Ok(()) 59 | } 60 | } 61 | } 62 | 63 | /// Gets a scheme by name. 64 | //pub fn get(&self, name: Box<[u8]>) -> Option> { 65 | // self.name_scheme_map.get(&name).map(|(a, _)| a).cloned() 66 | //} 67 | 68 | pub fn open_self(&self, name: Box<[u8]>) -> Result { 69 | let (_, w) = self.name_scheme_map.get(&name).ok_or(Errno::NoDev)?; 70 | Ok(FileDescriptor::from(w.clone(), FileHandle::Own)) 71 | } 72 | 73 | pub fn open(&self, name: Box<[u8]>, i: i32) -> Result { 74 | let (a, w) = self.name_scheme_map.get(&name).ok_or(Errno::NoDev)?; 75 | // TODO: filename arg 76 | a.open(i) 77 | .map(|handle| FileDescriptor::from(w.clone(), handle)) 78 | } 79 | } 80 | 81 | static SCHEMES: Once> = Once::new(); 82 | 83 | /// Gets the schemes. 84 | pub fn schemes() -> &'static RwLock { 85 | SCHEMES.call_once(|| { 86 | let mut container = SchemeContainer::new(); 87 | 88 | container.insert(Box::new([])).expect("add self"); 89 | 90 | RwLock::new(container) 91 | }) 92 | } 93 | -------------------------------------------------------------------------------- /kernel/src/tasking/file.rs: -------------------------------------------------------------------------------- 1 | use crate::tasking::scheme::{Scheme, SchemePtr}; 2 | use crate::wasm::wasi::Errno; 3 | use alloc::boxed::Box; 4 | use alloc::sync::Arc; 5 | use alloc::vec::Vec; 6 | 7 | /// Maximum amount of files a single table can have opened. 8 | const MAX_FILES: usize = 32; 9 | 10 | /// File index in file descriptor table. 11 | pub type FileIdx = usize; 12 | 13 | /// This should be handled by the service. 14 | #[derive(Copy, Clone)] 15 | #[repr(transparent)] 16 | pub struct InnerFileHandle(pub(crate) u64); 17 | 18 | /// File handle used in a scheme (per-scheme). 19 | #[derive(Copy, Clone)] 20 | pub enum FileHandle { 21 | /// A handle to a file in the scheme. 22 | Inner(InnerFileHandle), 23 | /// A handle to the scheme itself. 24 | Own, 25 | } 26 | 27 | pub struct FileDescriptor { 28 | scheme: SchemePtr, 29 | handle: FileHandle, 30 | /// Files can be pre-opened and even mapped to a different name. 31 | /// Keep track of this because WASI needs it. 32 | pre_open_path: Option>, 33 | } 34 | 35 | pub struct FileDescriptorTable { 36 | /// File descriptor table. 37 | /// Note: there can be holes, which is why we need Option. 38 | files: Vec>, 39 | } 40 | 41 | impl FileDescriptor { 42 | /// Creates a file descriptor from scheme data. 43 | pub fn from(scheme: SchemePtr, handle: FileHandle) -> Self { 44 | Self { 45 | scheme, 46 | handle, 47 | pre_open_path: None, 48 | } 49 | } 50 | 51 | /// Pre open path. 52 | pub fn pre_open_path(&self) -> Option<&[u8]> { 53 | self.pre_open_path.as_ref().map(|path| &path[..]) 54 | } 55 | 56 | /// Sets the pre open path. 57 | pub fn set_pre_open_path(&mut self, path: Box<[u8]>) { 58 | self.pre_open_path = Some(path); 59 | } 60 | 61 | /// Execute with scheme and handle. 62 | pub fn scheme_and_handle(&self) -> Result<(Arc, FileHandle), Errno> { 63 | let scheme = self.scheme.upgrade().ok_or(Errno::NoDev)?; 64 | Ok((scheme, self.handle)) 65 | } 66 | } 67 | 68 | impl FileDescriptorTable { 69 | /// Creates a new file descriptor table. 70 | pub fn new() -> Self { 71 | //Self { files: Vec::new() } 72 | // TODO 73 | Self { 74 | files: vec![None, None, None], 75 | } 76 | } 77 | 78 | /// Insert file into lowest available index. 79 | pub fn insert_lowest(&mut self, fd: FileDescriptor) -> Option { 80 | for (idx, file) in self.files.iter_mut().enumerate() { 81 | // TODO: debug 82 | if idx < 3 { 83 | continue; 84 | } 85 | 86 | if file.is_none() { 87 | *file = Some(fd); 88 | return Some(idx); 89 | } 90 | } 91 | 92 | if self.files.len() < MAX_FILES { 93 | self.files.push(Some(fd)); 94 | Some(self.files.len() - 1) 95 | } else { 96 | None 97 | } 98 | } 99 | 100 | /// Gets a file descriptor. 101 | pub fn get(&self, idx: FileIdx) -> Option<&FileDescriptor> { 102 | self.files.get(idx).unwrap_or(&None).as_ref() 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /kernel/src/arch/acpi/sdt.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::{PhysAddr, VirtAddr}; 2 | use crate::arch::paging::{ActiveMapping, EntryFlags, PAGE_SIZE}; 3 | use crate::mm::mapper::MemoryMapper; 4 | use core::num::Wrapping; 5 | use core::slice; 6 | 7 | #[repr(C, packed)] 8 | pub struct SdtHeader { 9 | pub name: [u8; 4], 10 | pub length: u32, 11 | revision: u8, 12 | checksum: u8, 13 | oem_id: [u8; 6], 14 | oem_table_id: [u8; 8], 15 | oem_revision: u32, 16 | creator_id: u32, 17 | creator_revision: u32, 18 | } 19 | 20 | #[must_use = "Fixed mapping must be released"] 21 | pub struct SdtFixedMapping<'a> { 22 | pub sdt: &'a SdtHeader, 23 | pub size: usize, 24 | } 25 | 26 | impl<'a> SdtFixedMapping<'a> { 27 | /// Maps an sdt. 28 | pub fn from(mapping: &mut ActiveMapping, paddr: PhysAddr, vaddr: VirtAddr) -> Option { 29 | let flags = EntryFlags::PRESENT | EntryFlags::NX; 30 | 31 | // Map two pages, read length, and map more if needed. 32 | // We need at least two pages because we're not sure if the `length` is aligned 33 | // inside a single page and the offset inside the header fits in a single page. 34 | let aligned_down = paddr.align_down(); 35 | let offset = paddr.as_usize() - aligned_down.as_usize(); 36 | mapping 37 | .map_range_physical(vaddr, aligned_down, 2 * PAGE_SIZE, flags) 38 | .expect("early sdt mapping should succeed"); 39 | // Safety: we just mapped this. 40 | let sdt = unsafe { &*(vaddr + offset).as_const::() }; 41 | let required_mapped_size = 42 | (paddr + sdt.length as usize).align_up().as_usize() - aligned_down.as_usize(); 43 | 44 | // Safety limit 45 | if required_mapped_size > 4096 * 1024 { 46 | return None; 47 | } 48 | 49 | // Map more if necessary. 50 | if required_mapped_size > 2 * PAGE_SIZE { 51 | mapping 52 | .map_range_physical( 53 | vaddr + 2 * PAGE_SIZE, 54 | aligned_down + 2 * PAGE_SIZE, 55 | required_mapped_size - 2 * PAGE_SIZE, 56 | flags, 57 | ) 58 | .expect("early sdt mapping extend should succeed"); 59 | } 60 | 61 | let result = Self { 62 | sdt, 63 | size: required_mapped_size, 64 | }; 65 | 66 | // Validation 67 | // Safety: fully mapped and exists during the function call. 68 | if unsafe { Self::validate_sdt(sdt) } { 69 | Some(result) 70 | } else { 71 | result.unmap(mapping); 72 | None 73 | } 74 | } 75 | 76 | /// Unmaps this sdt. 77 | pub fn unmap(self, mapping: &'a mut ActiveMapping) { 78 | mapping.unmap_range( 79 | VirtAddr::new(self.sdt as *const _ as usize).align_down(), 80 | self.size, 81 | ); 82 | } 83 | 84 | /// Validates the SDT using its checksum. 85 | /// 86 | /// # Safety 87 | /// 88 | /// This is only safe if the `SdtHeader` comes from the ACPI tables. 89 | /// We have no way to verify whether the length is legit. 90 | /// 91 | unsafe fn validate_sdt(sdt: &SdtHeader) -> bool { 92 | let slice = slice::from_raw_parts(sdt as *const _ as *const u8, sdt.length as usize); 93 | slice.iter().map(|x| Wrapping(*x)).sum::>() == Wrapping(0) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/paging/entry.rs: -------------------------------------------------------------------------------- 1 | use bitflags::bitflags; 2 | 3 | use crate::arch::x86_64::address::PhysAddr; 4 | 5 | bitflags! { 6 | pub struct EntryFlags: u64 { 7 | const PRESENT = 1; 8 | const WRITABLE = 1 << 1; 9 | const HUGE_PAGE = 1 << 7; 10 | const GLOBAL = 1 << 8; 11 | /// No execute. 12 | const NX = 1 << 63; 13 | // Cache types (see PAT in boot assembly) 14 | const CACHE_WB = 0; 15 | const CACHE_WT = 1 << 3; 16 | const UNCACHED = 1 << 4; 17 | const UNCACHABLE = (1 << 3) | (1 << 4); 18 | const CACHE_WC = 1 << 7; 19 | const CACHE_WP = (1 << 3) | (1 << 7); 20 | } 21 | } 22 | 23 | /// Page table entry. 24 | pub struct Entry(u64); 25 | 26 | // See Intel Volume 3: bits 62:52 are ignored 27 | const USED_COUNT_MASK: u64 = 0x3ff0_0000_0000_0000; 28 | 29 | #[allow(dead_code)] 30 | impl Entry { 31 | /// Gets the used count part of this entry. 32 | /// We keep the used count in the first entry available bits. 33 | pub fn used_count(&self) -> u64 { 34 | (self.0 & USED_COUNT_MASK) >> 52 35 | } 36 | 37 | /// Raw used count part of this entry. 38 | pub fn used_count_raw(&self) -> u64 { 39 | self.0 & USED_COUNT_MASK 40 | } 41 | 42 | /// Sets the used count part of this entry. 43 | pub fn set_used_count(&mut self, count: u64) { 44 | debug_assert!(count <= 512); 45 | self.0 = (self.0 & !USED_COUNT_MASK) | (count << 52); 46 | } 47 | 48 | /// Sets the raw value. 49 | pub unsafe fn set_raw(&mut self, value: u64) { 50 | self.0 = value; 51 | } 52 | 53 | /// Gets the raw value. 54 | pub fn get_raw(&self) -> u64 { 55 | self.0 56 | } 57 | 58 | /// Returns true if this entry is unused. 59 | pub fn is_unused(&self) -> bool { 60 | self.phys_addr_unchecked().is_null() 61 | } 62 | 63 | /// Clears the entry. 64 | #[inline] 65 | pub fn clear(&mut self) { 66 | self.0 = self.used_count_raw(); 67 | } 68 | 69 | /// Sets the physical address of this entry, keeps flags. 70 | #[inline] 71 | pub fn set_phys_addr(&mut self, addr: PhysAddr) { 72 | self.0 = self.used_count_raw() | self.flags().bits() | addr.as_u64(); 73 | } 74 | 75 | /// Sets the flags, keeps the physical address. 76 | #[inline] 77 | pub fn set_flags(&mut self, flags: EntryFlags) { 78 | self.0 = self.used_count_raw() | flags.bits() | self.phys_addr_unchecked().as_u64(); 79 | } 80 | 81 | /// Sets the entry to the given address and flags. 82 | #[inline] 83 | pub fn set(&mut self, addr: PhysAddr, flags: EntryFlags) { 84 | self.0 = self.used_count_raw() | flags.bits() | addr.as_u64(); 85 | } 86 | 87 | /// Gets the flags of this entry. 88 | #[inline] 89 | pub fn flags(&self) -> EntryFlags { 90 | EntryFlags::from_bits_truncate(self.0) 91 | } 92 | 93 | /// Gets the physical address from page entry. 94 | pub fn phys_addr(&self) -> Option { 95 | if self.flags().contains(EntryFlags::PRESENT) { 96 | Some(self.phys_addr_unchecked()) 97 | } else { 98 | None 99 | } 100 | } 101 | 102 | /// Gets the physical address from page entry (unchecked). 103 | pub fn phys_addr_unchecked(&self) -> PhysAddr { 104 | PhysAddr::new((self.0 & 0x000f_ffff_ffff_f000) as usize) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /kernel/src/arch/acpi/hpet.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::acpi::sdt::SdtHeader; 2 | use crate::arch::acpi::AcpiAddress; 3 | use crate::arch::address::PhysAddr; 4 | use crate::arch::address::VirtAddr; 5 | use crate::arch::paging::ActiveMapping; 6 | use crate::arch::paging::EntryFlags; 7 | use crate::mm::mapper::MemoryMapper; 8 | use core::convert::TryInto; 9 | 10 | #[derive(Debug)] 11 | pub struct HpetData { 12 | address: PhysAddr, 13 | } 14 | 15 | #[must_use = "Hpet should be unmapped after use"] 16 | pub struct Hpet { 17 | address: VirtAddr, 18 | clock_period: u64, 19 | } 20 | 21 | #[repr(C, packed)] 22 | pub struct HpetTable { 23 | sdt_header: SdtHeader, 24 | event_timer_block_id: u32, 25 | address: AcpiAddress, 26 | hpet_nr: u8, 27 | min_clock_tick_in_periodic_mode: u16, 28 | attributes: u8, 29 | } 30 | 31 | /// Parses a Hpet table. 32 | pub fn parse_hpet(table: &HpetTable) -> HpetData { 33 | let address = PhysAddr::new( 34 | table 35 | .address 36 | .address 37 | .try_into() 38 | .expect("address should fit"), 39 | ); 40 | 41 | HpetData { address } 42 | } 43 | 44 | impl Hpet { 45 | /// Creates a mapped `Hpet` from `HpetData`. 46 | /// 47 | /// # Safety 48 | /// 49 | /// This can cause issues if the `HpetData` or virtual address is invalid. 50 | /// 51 | pub unsafe fn from(mapping: &mut ActiveMapping, vaddr: VirtAddr, hpet_data: HpetData) -> Self { 52 | mapping 53 | .map_single( 54 | vaddr, 55 | hpet_data.address, 56 | EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NX | EntryFlags::UNCACHED, 57 | ) 58 | .expect("hpet mapping should succeed"); 59 | 60 | let mut hpet = Self { 61 | address: vaddr, 62 | clock_period: 0, 63 | }; 64 | 65 | // Initialize. 66 | { 67 | let capability = hpet.read(0x0); 68 | let clock_period = capability >> 32; // in femptoseconds 69 | let num_timers = ((capability >> 8) & 31) as usize + 1; 70 | println!("{} hpet timers", num_timers); 71 | for i in 0..num_timers { 72 | // Disable interrupts on this timer. 73 | let t0_cfg_cap = hpet.read(0x100 + i * 0x20); 74 | //println!("{:b}", t0_cfg_cap); 75 | let t0_cfg_cap = t0_cfg_cap & !(1 << 2); 76 | hpet.write(0x100 + i * 0x20, t0_cfg_cap); 77 | } 78 | let enable = hpet.read(0x10) | (1 << 0); 79 | hpet.write(0x10, enable); 80 | hpet.clock_period = clock_period; 81 | } 82 | 83 | hpet 84 | } 85 | 86 | /// Reads from a 64 bit register at `offset`. 87 | /// 88 | /// # Safety 89 | /// 90 | /// This could cause exceptions if the offset is invalid. 91 | /// 92 | unsafe fn read(&self, offset: usize) -> u64 { 93 | self.address 94 | .as_const::() 95 | .add(offset / 8) 96 | .read_volatile() 97 | } 98 | 99 | /// Writes to a 64 bit register at `offset`. 100 | /// 101 | /// # Safety 102 | /// 103 | /// This could cause exceptions if the offset is invalid. 104 | /// 105 | unsafe fn write(&self, offset: usize, val: u64) { 106 | self.address 107 | .as_mut::() 108 | .add(offset / 8) 109 | .write_volatile(val); 110 | } 111 | 112 | /// Reads the current counter. 113 | pub fn counter(&self) -> u64 { 114 | // Safety: correct offset and in mapped memory 115 | unsafe { self.read(0xf0) } 116 | } 117 | 118 | /// Convert a counter value to nanoseconds. 119 | pub fn counter_to_ns(&self, val: u64) -> u64 { 120 | (val / 1000) * (self.clock_period / 1000) 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /kernel/src/util/tar.rs: -------------------------------------------------------------------------------- 1 | //! Basic, read-only, in-memory tar support. 2 | 3 | use core::iter::repeat; 4 | use core::marker::PhantomData; 5 | use core::mem::size_of; 6 | use core::slice; 7 | 8 | /// Tar standard Posix header. 9 | #[repr(C, align(512))] 10 | struct PosixHeader { 11 | name: [u8; 100], 12 | mode: [u8; 8], 13 | uid: [u8; 8], 14 | gid: [u8; 8], 15 | size: [u8; 12], 16 | mktime: [u8; 12], 17 | chksum: [u8; 8], 18 | typeflag: u8, 19 | linkname: [u8; 100], 20 | magic: [u8; 6], 21 | version: [u8; 2], 22 | uname: [u8; 32], 23 | gname: [u8; 32], 24 | devmajor: [u8; 8], 25 | devminor: [u8; 8], 26 | prefix: [u8; 155], 27 | } 28 | 29 | /// Representation of a tar archive. 30 | pub struct Tar<'a> { 31 | contents: &'a [u8], 32 | } 33 | 34 | /// Representation of a file in a tar archive. 35 | #[derive(Debug)] 36 | pub struct TarFile<'a> { 37 | data: &'a [u8], 38 | } 39 | 40 | /// Iterator for the files in a tar archive. 41 | pub struct TarIterator<'a> { 42 | ptr: *const PosixHeader, 43 | end: *const PosixHeader, 44 | _phantom: PhantomData<&'a ()>, 45 | } 46 | 47 | impl<'a> Tar<'a> { 48 | /// Creates a new in-memory tar. 49 | pub unsafe fn from_slice(contents: &'a [u8]) -> Option { 50 | (contents.len() % 512 == 0).then_some(Self { contents }) 51 | } 52 | } 53 | 54 | impl<'a> TarFile<'a> { 55 | /// Gets the file contents as a slice. 56 | pub fn as_slice(&self) -> &'a [u8] { 57 | self.data 58 | } 59 | } 60 | 61 | impl<'a> TarIterator<'a> { 62 | /// Converts an octal string to a number. 63 | fn octal_string_to_number(&self, str: &'a [u8]) -> Option { 64 | let str = match str.iter().position(|x| *x == 0) { 65 | Some(i) => &str[..i], 66 | None => str, 67 | }; 68 | 69 | str.iter().try_fold(0, |sum, c| match *c { 70 | b'0'..=b'9' => Some(sum * 8 + (*c - b'0') as usize), 71 | _ => None, 72 | }) 73 | } 74 | } 75 | 76 | impl<'a> IntoIterator for Tar<'a> { 77 | type Item = TarFile<'a>; 78 | type IntoIter = TarIterator<'a>; 79 | 80 | #[allow(clippy::cast_ptr_alignment)] 81 | fn into_iter(self) -> Self::IntoIter { 82 | assert_eq!(self.contents.as_ptr() as usize % 512, 0); 83 | let ptr = self.contents.as_ptr() as *const PosixHeader; 84 | TarIterator { 85 | ptr, 86 | end: unsafe { ptr.add(self.contents.len() / 512) }, 87 | _phantom: PhantomData, 88 | } 89 | } 90 | } 91 | 92 | impl<'a> Iterator for TarIterator<'a> { 93 | type Item = TarFile<'a>; 94 | 95 | fn next(&mut self) -> Option { 96 | if self.ptr >= self.end { 97 | return None; 98 | } 99 | 100 | // Calculate checksum 101 | let chksum = { 102 | let chksum_offset = offset_of!(PosixHeader, chksum); 103 | let slice = 104 | unsafe { slice::from_raw_parts(self.ptr as *const u8, size_of::()) }; 105 | slice[0..chksum_offset] 106 | .iter() 107 | .chain(repeat(&b' ').take(8)) 108 | .chain(slice[chksum_offset + 8..].iter()) 109 | .map(|x| *x as u32) 110 | .sum::() 111 | }; 112 | 113 | let header = unsafe { &*self.ptr }; 114 | 115 | if self.octal_string_to_number(&header.chksum)? != chksum as usize { 116 | return None; 117 | } 118 | 119 | if header.name[0] == 0 { 120 | return None; 121 | } 122 | 123 | let size = self.octal_string_to_number(&header.size)?; 124 | let data_ptr = unsafe { self.ptr.offset(1) }; 125 | 126 | self.ptr = unsafe { data_ptr.add((size + 512 - 1) / 512) }; 127 | 128 | if self.ptr >= self.end { 129 | return None; 130 | } 131 | 132 | Some(TarFile { 133 | data: unsafe { slice::from_raw_parts(data_ptr as *const u8, size) }, 134 | }) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kwast 2 | 3 | [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) [![Build Status](https://travis-ci.com/kwast-os/kwast.svg?branch=main)](https://travis-ci.com/kwast-os/kwast) 4 | 5 | **Kwast** (will be) an operating system, written in Rust, running WebAssembly. It uses a microkernel architecture for flexibility. 6 | 7 | Since WebAssembly was designed to be a safe language, we can run it without having to use hardware usermode and multiple address spaces (optionally). 8 | Processes can be run in the same address space (multiple SIPs in a single hardware protection domain), or in seperate address spaces. 9 | This enables low-cost context switches, low-cost syscalls, and a microkernel design without a big performance hit. 10 | Another interesting thing is that it means the software is cross-platform and that the compiler could enable platform-specific optimisations. 11 | 12 | For notes on Spectre, Meltdown and other related issues, see [#10](https://github.com/nielsdos/kwast/issues/10). 13 | An additional idea is to allow to use optional hardware protection domains in the future if requested. 14 | 15 | ## Contents 16 | 17 | * [Current status](#current_status) 18 | * [Getting started](#getting_started) 19 | * [Short-term goals](#short_term_goals) 20 | * [Built with](#built_with) 21 | * [Similar projects](#similar_projects) 22 | 23 | ## Current status 24 | 25 | Currently, it runs basic WebAssembly code in a basic multitasked environment. 26 | The heap uses a slab allocator design, and the virtual memory areas are managed by an AVL tree. 27 | For the ABI, I started with implementing [WASI](https://github.com/WebAssembly/WASI). 28 | 29 | Here's a screenshot of a WASI compiled Rust program ([userspace/wasm-test](userspace/wasm-test)). 30 | ![Screenshot](docs/screenshot.png "A simple Rust program") 31 | 32 | ## Getting started 33 | 34 | These instructions help you get started with building the source and getting it to run. 35 | 36 | ### Requirements 37 | 38 | * make 39 | * grub-mkrescue (you might also need to install xorriso) 40 | * qemu-system-x86_64 41 | * Rust and Cargo 42 | * Optionally wasm-strip (from [wabt](https://github.com/WebAssembly/wabt)), which reduces the binary size 43 | 44 | ### Setting up a toolchain 45 | 46 | You can setup your toolchain using the following steps: 47 | ```bash 48 | # (Inside the project root folder.) 49 | # You'll need to get the rust nightly and install the wasi toolchain: 50 | rustup component add rust-src 51 | rustup target add wasm32-wasi 52 | 53 | # You'll also need a cross-compile binutils, I wrote a bash script that builds this for you. 54 | cd toolchain 55 | ./setup_cross_binutils.sh 56 | ``` 57 | Now you're ready to build and run the project! 58 | 59 | ### Building & Running 60 | 61 | There's currently a Makefile in the `kernel` folder. The **Makefile** there provides some rules: 62 | 63 | ```bash 64 | # (Inside the project root folder.) 65 | make run # Builds iso and start a QEMU virtual machine 66 | 67 | # If you don't want to run, but only build a bootable ISO: 68 | make iso 69 | 70 | # You can make a release build using: 71 | make iso BUILD=release # (or run) 72 | 73 | # You can run tests using 74 | ./run_tests 75 | ``` 76 | 77 | ## Short-term goals 78 | 79 | * Simple PS/2 server & similar small servers 80 | * Run basic programs 81 | * SMP 82 | 83 | ## Built with 84 | 85 | * [Cranelift](https://github.com/bytecodealliance/wasmtime/tree/main/cranelift) - Code generator used to parse & run WebAssembly. Kwast uses a fork of Cranelift to let it work in a no_std environment. 86 | 87 | * To integrate Cranelift, [wasmtime](https://github.com/bytecodealliance/wasmtime/) has been used as a reference implementation, which is licensed under the [Apache License 2.0](https://github.com/bytecodealliance/wasmtime/blob/main/LICENSE). 88 | 89 | * [Spleen font](https://github.com/fcambus/spleen) - Spleen is a monospaced bitmap font licensed under the [BSD 2-Clause "Simplified" License](https://github.com/fcambus/spleen/blob/master/LICENSE). 90 | 91 | ## Similar projects 92 | * [Nebulet](https://github.com/nebulet/nebulet) - A microkernel that implements a WebAssembly "usermode" that runs in Ring 0 93 | * [wasmjit](https://github.com/kenny-ngo/wasmjit) - Small Embeddable WebAssembly Runtime 94 | * [cervus](https://github.com/cervus-v/cervus) - A WebAssembly subsystem for Linux 95 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/paging/frame.rs: -------------------------------------------------------------------------------- 1 | use multiboot2::MemoryMapTag; 2 | 3 | use super::{invalidate_page, ActiveMapping, EntryFlags, PhysAddr, VirtAddr}; 4 | use crate::mm::mapper::MemoryMapper; 5 | use crate::mm::pmm::FrameAllocator; 6 | 7 | impl FrameAllocator { 8 | /// Applies the memory map. 9 | pub fn apply_mmap(&mut self, tag: &MemoryMapTag, reserved_end: PhysAddr) { 10 | // Will be the last entry of the PML2 (PML2 exists) 11 | const P2_IDX: usize = 511; 12 | let tmp_2m_map_addr = VirtAddr::new(P2_IDX * 0x200_000); 13 | 14 | fn current_to_prev_entry_addr(current: usize) -> *mut usize { 15 | ((P2_IDX * 0x200_000) | (current & 0x1ff_fff)) as *mut _ 16 | } 17 | 18 | // Mapping flags 19 | let map_flags = 20 | EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NX | EntryFlags::HUGE_PAGE; 21 | 22 | // Safety: we are the only running thread right now, so no locking is required. 23 | let mut mapping = unsafe { ActiveMapping::get_unlocked() }; 24 | let mut e = mapping.get_2m_entry(tmp_2m_map_addr).unwrap(); 25 | 26 | // Previous entry address 27 | let mut top: usize = 0; 28 | let mut prev_entry_addr: *mut usize = &mut top as *mut _; 29 | 30 | //let mut count: usize = 0; 31 | 32 | for x in tag.memory_areas() { 33 | // There is actually no guarantee about the sanitization of the data. 34 | // While it is rare that the addresses won't be page aligned, there's apparently been 35 | // cases before where it wasn't page aligned. 36 | let mut start = PhysAddr::new(x.start_address() as usize).align_up(); 37 | let end = PhysAddr::new(x.end_address() as usize).align_down(); 38 | 39 | // Adjust for reserved area 40 | if start < reserved_end { 41 | start = reserved_end; 42 | if start >= end { 43 | continue; 44 | } 45 | } 46 | 47 | let mut current = start.as_usize(); 48 | let end = end.as_usize(); 49 | 50 | // Initial write for this area is a little bit special because we still 51 | // need to write to the previous mapping. Otherwise the stack wouldn't be linked. 52 | // Can't fail. 53 | unsafe { 54 | prev_entry_addr.write(current); 55 | } 56 | 57 | e.set(PhysAddr::new(current & !0x1ff_fff), map_flags); 58 | prev_entry_addr = current_to_prev_entry_addr(current); 59 | 60 | while current < end { 61 | unsafe { 62 | prev_entry_addr.write(current); 63 | } 64 | 65 | // When we reach a new 2 MiB part, map that to our temporary mapping. 66 | if current & 0x1ff_fff == 0 { 67 | e.set(PhysAddr::new(current & !0x1ff_fff), map_flags); 68 | } 69 | 70 | prev_entry_addr = current_to_prev_entry_addr(current); 71 | current += 0x1000; 72 | //count += 1; 73 | } 74 | } 75 | 76 | // End 77 | unsafe { 78 | prev_entry_addr.write(0); 79 | } 80 | self.top = PhysAddr::new(top); 81 | 82 | // Unmap 83 | { 84 | // Somewhat ugly, but better than complicating other code probably (for now)... 85 | let p2 = mapping 86 | .p4 87 | .next_table_mut(0) 88 | .unwrap() 89 | .next_table_mut(0) 90 | .unwrap(); 91 | 92 | p2.entries[P2_IDX].clear(); 93 | p2.decrease_used_count(); 94 | unsafe { 95 | invalidate_page(tmp_2m_map_addr.as_u64()); 96 | } 97 | } 98 | 99 | // self.debug_print_frames(); 100 | } 101 | 102 | /// Debug print all frames. 103 | #[allow(dead_code)] 104 | fn debug_print_frames(&mut self) { 105 | let mut mapping = unsafe { ActiveMapping::get_unlocked() }; 106 | 107 | while !self.top.is_null() { 108 | self.pop_top(|top| { 109 | println!("{:x}", top.as_usize()); 110 | let vaddr = VirtAddr::new(0x1000); 111 | mapping.map_single(vaddr, top, EntryFlags::PRESENT).unwrap(); 112 | vaddr 113 | }) 114 | .unwrap(); 115 | } 116 | 117 | println!(); 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/paging/table.rs: -------------------------------------------------------------------------------- 1 | use core::marker::PhantomData; 2 | 3 | use crate::arch::x86_64::address::VirtAddr; 4 | 5 | use super::entry::*; 6 | use super::MemoryError; 7 | use crate::mm::pmm::with_pmm; 8 | 9 | // We use the clever solution for static type safety as described by [Philipp Oppermann's blog](https://os.phil-opp.com/) 10 | 11 | pub trait Level {} 12 | 13 | pub trait HierarchicalLevel: Level { 14 | type NextLevel: Level; 15 | } 16 | 17 | pub enum Level4 {} 18 | 19 | pub enum Level3 {} 20 | 21 | pub enum Level2 {} 22 | 23 | pub enum Level1 {} 24 | 25 | impl Level for Level4 {} 26 | 27 | impl Level for Level3 {} 28 | 29 | impl Level for Level2 {} 30 | 31 | impl Level for Level1 {} 32 | 33 | impl HierarchicalLevel for Level4 { 34 | type NextLevel = Level3; 35 | } 36 | 37 | impl HierarchicalLevel for Level3 { 38 | type NextLevel = Level2; 39 | } 40 | 41 | impl HierarchicalLevel for Level2 { 42 | type NextLevel = Level1; 43 | } 44 | 45 | #[repr(transparent)] 46 | pub struct Table { 47 | pub entries: [Entry; 512], 48 | // Rust doesn't allow unused type parameters. 49 | _phantom: PhantomData, 50 | } 51 | 52 | impl Table 53 | where 54 | L: Level, 55 | { 56 | /// Clears the table entries. (internal use only) 57 | pub fn clear(&mut self) { 58 | for e in self.entries.iter_mut() { 59 | e.clear(); 60 | } 61 | } 62 | 63 | /// Gets the used count. 64 | pub fn used_count(&self) -> u64 { 65 | self.entries[0].used_count() 66 | } 67 | 68 | /// Sets the used count. 69 | pub fn set_used_count(&mut self, count: u64) { 70 | self.entries[0].set_used_count(count); 71 | } 72 | 73 | /// Increases the used count. 74 | pub fn increase_used_count(&mut self) { 75 | self.set_used_count(self.used_count() + 1); 76 | } 77 | 78 | /// Decreases the used count. 79 | pub fn decrease_used_count(&mut self) { 80 | debug_assert!(self.used_count() > 0); 81 | self.set_used_count(self.used_count() - 1); 82 | } 83 | } 84 | 85 | impl Table 86 | where 87 | L: HierarchicalLevel, 88 | { 89 | /// Gets the next table address (unchecked). (internal use only). 90 | fn next_table_address_unchecked(&self, index: usize) -> usize { 91 | let addr = self as *const _ as usize; 92 | (addr << 9) | (index << 12) 93 | } 94 | 95 | /// Gets the next table address. 96 | fn next_table_address(&self, index: usize) -> Option { 97 | let flags = self.entries[index].flags(); 98 | 99 | // Would be invalid if we refer to a huge page 100 | debug_assert!(!flags.contains(EntryFlags::HUGE_PAGE)); 101 | 102 | if flags.contains(EntryFlags::PRESENT) { 103 | Some(self.next_table_address_unchecked(index)) 104 | } else { 105 | None 106 | } 107 | } 108 | 109 | /// Gets the next table level. 110 | pub fn next_table(&self, index: usize) -> Option<&Table> { 111 | self.next_table_address(index) 112 | .map(|x| unsafe { &*(x as *const _) }) 113 | } 114 | 115 | /// Gets the next table level (mutable). 116 | pub fn next_table_mut(&self, index: usize) -> Option<&mut Table> { 117 | self.next_table_address(index) 118 | .map(|x| unsafe { &mut *(x as *mut _) }) 119 | } 120 | 121 | /// Gets the next table (mutable), creates it if it doesn't exist yet. 122 | pub fn next_table_may_create( 123 | &mut self, 124 | index: usize, 125 | ) -> Result<&mut Table, MemoryError> { 126 | let flags = self.entries[index].flags(); 127 | debug_assert!(!flags.contains(EntryFlags::HUGE_PAGE)); 128 | 129 | let addr = self.next_table_address_unchecked(index); 130 | let table = unsafe { &mut *(addr as *mut Table) }; 131 | 132 | // Need to create a table. 133 | if !flags.contains(EntryFlags::PRESENT) { 134 | // We could call the page mapping functions here, but it would be slower than 135 | // manipulating the pmm ourselves. 136 | with_pmm(|pmm| { 137 | pmm.pop_top(|top| { 138 | // We don't need to invalidate because it wasn't present. 139 | self.entries[index].set(top, EntryFlags::PRESENT | EntryFlags::WRITABLE); 140 | 141 | VirtAddr::new(addr) 142 | }) 143 | })?; 144 | 145 | self.increase_used_count(); 146 | 147 | table.clear(); 148 | } 149 | 150 | Ok(table) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /kernel/src/tests/heap_test.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::VirtAddr; 2 | use crate::arch::paging::PAGE_SIZE; 3 | use alloc::boxed::Box; 4 | use alloc::vec::Vec; 5 | use core::mem::size_of; 6 | 7 | /// Test one allocation. 8 | #[cfg(feature = "test-heap-one-alloc")] 9 | pub fn test_main() { 10 | Box::new(42); 11 | } 12 | 13 | /// Test big allocation and freeing. 14 | #[cfg(feature = "test-heap-big-alloc")] 15 | pub fn test_main() { 16 | let mut vec1: Vec = Vec::new(); 17 | vec1.reserve(8193); 18 | let mut vec2: Vec = Vec::new(); 19 | vec2.reserve(8193); 20 | assert_ne!(vec1.as_ptr(), vec2.as_ptr()); 21 | 22 | let test; 23 | { 24 | let mut vec3: Vec = Vec::new(); 25 | vec3.reserve(8193); 26 | assert_ne!(vec1.as_ptr(), vec3.as_ptr()); 27 | assert_ne!(vec2.as_ptr(), vec3.as_ptr()); 28 | test = vec3.as_ptr(); 29 | } 30 | 31 | let mut vec4: Vec = Vec::new(); 32 | vec4.reserve(8193); 33 | assert_ne!(vec1.as_ptr(), vec4.as_ptr()); 34 | assert_ne!(vec2.as_ptr(), vec4.as_ptr()); 35 | assert_eq!(test, vec4.as_ptr()); 36 | } 37 | 38 | /// Test heap by inspecting the pointers. 39 | #[cfg(feature = "test-heap-realloc")] 40 | pub fn test_main() { 41 | // Regular realloc. 42 | let n = 1000; 43 | let mut vec = Vec::new(); 44 | for i in 0..n { 45 | vec.push(i); 46 | } 47 | 48 | assert_eq!(vec.iter().sum::(), (n - 1) * n / 2); 49 | 50 | // Big realloc. 51 | let items = 32768 / size_of::(); 52 | let mut vec = Vec::::new(); 53 | vec.reserve_exact(items); 54 | let mut vec2 = Vec::::new(); 55 | vec2.reserve_exact(items); 56 | for i in 0..items { 57 | vec.push(i); 58 | vec2.push(i * 2); 59 | } 60 | 61 | vec.reserve_exact(items * 2); 62 | for i in 0..items { 63 | assert_eq!(vec[i], i); 64 | assert_eq!(vec2[i], i * 2); 65 | } 66 | } 67 | 68 | /// Test heap by inspecting the pointers. 69 | #[cfg(feature = "test-heap-pointers")] 70 | pub fn test_main() { 71 | let mut a: Vec = Vec::new(); 72 | a.reserve(12); 73 | let mut b: Vec = Vec::new(); 74 | b.reserve(12); 75 | let mut c: Vec = Vec::new(); 76 | c.reserve(12); 77 | let mut d: Vec = Vec::new(); 78 | d.reserve(126); 79 | 80 | // Test offset inside slab 81 | assert_eq!(a.as_ptr(), unsafe { b.as_ptr().offset(-32) }); 82 | assert_eq!(b.as_ptr(), unsafe { c.as_ptr().offset(-32) }); 83 | assert_ne!(d.as_ptr(), a.as_ptr()); 84 | assert_ne!(d.as_ptr(), b.as_ptr()); 85 | assert_ne!(d.as_ptr(), c.as_ptr()); 86 | 87 | // Test reallocating 88 | drop(b); 89 | let mut b: Vec = Vec::new(); 90 | b.reserve(20); 91 | assert_eq!(b.as_ptr(), unsafe { c.as_ptr().offset(-32) }); 92 | 93 | // Test partial & free: exhaust the 512-byte cache, 94 | // then start a new slab, then check what the heap picks. 95 | drop(a); 96 | drop(c); 97 | let mut a: Vec = Vec::new(); 98 | a.reserve(512); 99 | let mut b: Vec = Vec::new(); 100 | b.reserve(512); 101 | let mut c: Vec = Vec::new(); 102 | c.reserve(512); 103 | let mut d: Vec = Vec::new(); 104 | d.reserve(512); 105 | let mut e: Vec = Vec::new(); 106 | e.reserve(512); 107 | let mut f: Vec = Vec::new(); 108 | f.reserve(512); 109 | let mut g: Vec = Vec::new(); 110 | g.reserve(512); 111 | let mut h: Vec = Vec::new(); 112 | h.reserve(512); 113 | assert_ne!(a.as_ptr(), b.as_ptr()); 114 | assert_ne!(b.as_ptr(), c.as_ptr()); 115 | assert_ne!(c.as_ptr(), d.as_ptr()); 116 | assert_ne!(d.as_ptr(), e.as_ptr()); 117 | assert_ne!(e.as_ptr(), f.as_ptr()); 118 | assert_ne!(f.as_ptr(), g.as_ptr()); 119 | assert_ne!(g.as_ptr(), h.as_ptr()); 120 | let mut i: Vec = Vec::new(); 121 | i.reserve(512); 122 | 123 | fn page_of(x: *const i8) -> usize { 124 | x as usize & !(PAGE_SIZE - 1) 125 | } 126 | 127 | assert_eq!(page_of(f.as_ptr()), page_of(g.as_ptr())); 128 | assert!(page_of(h.as_ptr()) - page_of(a.as_ptr()) >= PAGE_SIZE); 129 | 130 | // Drop h & i so that we have a free slab. 131 | let i_ptr = i.as_ptr(); 132 | drop(h); 133 | drop(i); 134 | // Also drop a & b, so we can see if it prefers the partial or the free. 135 | let a_ptr = a.as_ptr(); 136 | let b_ptr = b.as_ptr(); 137 | drop(a); 138 | drop(b); 139 | let mut x: Vec = Vec::new(); 140 | x.reserve(512); 141 | let mut y: Vec = Vec::new(); 142 | y.reserve(512); 143 | assert_eq!(x.as_ptr(), b_ptr); 144 | assert_eq!(y.as_ptr(), a_ptr); 145 | // The partial is now full, should get from the free slab now. 146 | let mut z: Vec = Vec::new(); 147 | z.reserve(512); 148 | assert_eq!(z.as_ptr(), i_ptr); 149 | } 150 | -------------------------------------------------------------------------------- /kernel/src/util/lfb_text.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::{PhysAddr, VirtAddr}; 2 | use crate::arch::paging::{ActiveMapping, EntryFlags}; 3 | use crate::mm::mapper::MemoryMapper; 4 | use crate::sync::spinlock::IrqSpinlock; 5 | use crate::util::font::{FONT_8X16, FONT_HEIGHT, FONT_WIDTH}; 6 | use core::fmt::{self, Write}; 7 | use spin::Once; 8 | 9 | pub struct LfbParameters { 10 | pub address: PhysAddr, 11 | pub width: u32, 12 | pub height: u32, 13 | pub pitch: u32, 14 | /// Bits per pixel. 15 | pub bpp: u8, 16 | } 17 | 18 | struct LfbText { 19 | address: VirtAddr, 20 | x: u32, 21 | y: u32, 22 | width: u32, 23 | height: u32, 24 | pitch: u32, 25 | } 26 | 27 | static LFB_TEXT: Once> = Once::new(); 28 | 29 | /// Initializes the LFB text output. 30 | pub fn init( 31 | params: LfbParameters, 32 | mapping: &mut ActiveMapping, 33 | start: VirtAddr, 34 | ) -> Option { 35 | if params.bpp != 32 { 36 | return None; 37 | } 38 | 39 | let size = params.pitch * params.height * (params.bpp as u32); 40 | let flags = EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NX | EntryFlags::CACHE_WC; 41 | 42 | mapping 43 | .map_range_physical(start, params.address, size as _, flags) 44 | .ok()?; 45 | 46 | LFB_TEXT.call_once(|| { 47 | IrqSpinlock::new(LfbText { 48 | address: start, 49 | x: 0, 50 | y: 0, 51 | width: params.width, 52 | height: params.height, 53 | pitch: params.pitch / 4, 54 | }) 55 | }); 56 | 57 | Some((start + size as _).align_up()) 58 | } 59 | 60 | impl LfbText { 61 | /// Sets a pixel. 62 | fn set_pixel(&mut self, x: u32, y: u32, color: u32) { 63 | if x < self.width && y < self.height { 64 | // Safety: in bounds and aligned. 65 | unsafe { 66 | *self.address.as_mut::().add((y * self.pitch + x) as _) = color; 67 | } 68 | } 69 | } 70 | 71 | /// Gets a pixel. 72 | fn get_pixel(&mut self, x: u32, y: u32) -> u32 { 73 | if x < self.width && y < self.height { 74 | // Safety: in bounds and aligned. 75 | unsafe { 76 | *self 77 | .address 78 | .as_const::() 79 | .add((y * self.pitch + x) as _) 80 | } 81 | } else { 82 | 0 83 | } 84 | } 85 | 86 | /// Sets a character at a position in a color. 87 | fn set_character(&mut self, x: u32, y: u32, color: u32, mut c: u8) { 88 | // Handle out of range characters as spaces. 89 | if c < 32 || c > 127 { 90 | c = 32; 91 | } 92 | 93 | let c = &FONT_8X16[c as usize - 32]; 94 | 95 | for (yo, yp) in (y * FONT_HEIGHT..y * FONT_HEIGHT + FONT_HEIGHT).enumerate() { 96 | for (xo, xp) in (x * FONT_WIDTH..x * FONT_WIDTH + FONT_WIDTH).enumerate() { 97 | let color = if c[yo] & (1 << (7 - xo)) > 0 { 98 | color 99 | } else { 100 | 0 101 | }; 102 | self.set_pixel(xp, yp, color); 103 | } 104 | } 105 | } 106 | 107 | /// Goes to a new line and shifts the text up if required. 108 | fn new_line(&mut self) { 109 | self.x = 0; 110 | self.y += 1; 111 | 112 | if self.y >= self.height / FONT_HEIGHT { 113 | // This is very slow, it would be better to have a buffer with the characters. 114 | // But this is only a debug output after all... 115 | for y in 0..self.height - FONT_HEIGHT { 116 | for x in 0..self.width { 117 | let color = self.get_pixel(x, y + FONT_HEIGHT); 118 | self.set_pixel(x, y, color); 119 | } 120 | } 121 | for y in self.height - FONT_HEIGHT..self.height { 122 | for x in 0..self.width { 123 | self.set_pixel(x, y, 0); 124 | } 125 | } 126 | 127 | self.y -= 1; 128 | } 129 | } 130 | 131 | /// Writes a single character. 132 | fn write_character(&mut self, c: u8) { 133 | match c { 134 | b'\n' => self.new_line(), 135 | b'\r' => self.x = 0, 136 | c => { 137 | self.set_character(self.x, self.y, 0xffcccccc, c); 138 | 139 | self.x += 1; 140 | if self.x >= self.width / FONT_WIDTH { 141 | self.new_line(); 142 | } 143 | } 144 | } 145 | } 146 | } 147 | 148 | impl Write for LfbText { 149 | fn write_str(&mut self, s: &str) -> fmt::Result { 150 | for c in s.bytes() { 151 | self.write_character(c); 152 | } 153 | Ok(()) 154 | } 155 | } 156 | 157 | /// Prints a formatted string. 158 | pub fn _print(args: fmt::Arguments) { 159 | if let Some(lfb_text) = LFB_TEXT.try_get() { 160 | lfb_text.lock().write_fmt(args).unwrap(); 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /kernel/src/arch/asid.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::paging::invalidate_asid; 2 | use atomic::Atomic; 3 | 4 | pub type AsidGeneration = u32; 5 | 6 | /// Address Space Identifier 7 | #[derive(Debug, Copy, Clone)] 8 | #[repr(C, align(8))] 9 | pub struct Asid { 10 | generation: AsidGeneration, 11 | number: u16, 12 | } 13 | 14 | const_assert!(Atomic::::is_lock_free()); 15 | 16 | impl Asid { 17 | /// Gets the 64-bit representation of the asid. 18 | #[inline] 19 | pub fn as_u64(self) -> u64 { 20 | self.number as u64 21 | } 22 | 23 | /// Null asid. Must be valid in both asid and non-asid systems. 24 | pub const fn null() -> Self { 25 | Self { 26 | generation: 0, 27 | number: 0, 28 | } 29 | } 30 | } 31 | 32 | #[derive(Copy, Clone)] 33 | struct Entry { 34 | /// The bitsets for free/used (1 = free, 0 = used). 35 | used_free: u64, 36 | /// Bitmap which indicate that entry has not been used yet in this generation. 37 | /// 1 = never used, 0 = used at least once 38 | used_in_this_generation: u64, 39 | } 40 | 41 | pub struct AsidManager { 42 | /// 1 = at least one available in the bitset corresponding to this bit. 43 | /// 0 = all used 44 | global_mask: u64, 45 | /// Generation counter, used in the case of a roll-over. 46 | generation: AsidGeneration, 47 | /// Bitmasks. 48 | entries: [Entry; 64], 49 | } 50 | 51 | impl Entry { 52 | pub const fn new() -> Self { 53 | Self { 54 | used_free: core::u64::MAX, 55 | used_in_this_generation: core::u64::MAX, 56 | } 57 | } 58 | } 59 | 60 | impl AsidManager { 61 | /// Creates a new Address Space Identifier Manager. 62 | pub const fn new() -> Self { 63 | Self { 64 | global_mask: core::u64::MAX, 65 | generation: 0, 66 | entries: [Entry::new(); 64], 67 | } 68 | } 69 | 70 | /// Check if the asid is still valid. 71 | #[inline] 72 | pub fn is_valid(&self, asid: Asid) -> bool { 73 | asid.generation == self.generation 74 | } 75 | 76 | /// Converts an asid to entry number and bit. 77 | fn asid_to_entry_and_bit(asid: Asid) -> (usize, u64) { 78 | unsafe { 79 | core::intrinsics::assume(asid.number < 4096); 80 | } 81 | ((asid.number >> 6) as usize, asid.number as u64 & 63) 82 | } 83 | 84 | /// Allocates a new Asid. 85 | pub fn alloc(&mut self, old: Asid) -> Asid { 86 | // Roll-over if needed. 87 | if self.global_mask == 0 { 88 | self.global_mask = core::u64::MAX; 89 | for i in 0..64 { 90 | self.entries[i] = Entry::new(); 91 | } 92 | 93 | self.generation = self.generation.wrapping_add(1); 94 | } 95 | 96 | // Try to reuse the old asid. 97 | // Only possible if it was used in the previous generation 98 | // and no other domain has used this already. 99 | let (global_free, free) = if old.generation == self.generation.wrapping_sub(1) && { 100 | let (index, bit) = Self::asid_to_entry_and_bit(old); 101 | self.entries[index].used_in_this_generation & (1u64 << bit) > 0 102 | } { 103 | // No need to invalidate the asid since it was not used since the previous 104 | // generation and the TLB entries are thus still from the same domain. 105 | let (index, bit) = Self::asid_to_entry_and_bit(old); 106 | self.entries[index].used_in_this_generation ^= 1u64 << bit; 107 | (index, bit) 108 | } else { 109 | // Search in the global mask for an entry with free asids. 110 | let global_free = self.global_mask.trailing_zeros(); 111 | unsafe { 112 | core::intrinsics::assume(global_free < 64); 113 | } 114 | 115 | // Find a free asid and mark it as used. 116 | let free = self.entries[global_free as usize] 117 | .used_free 118 | .trailing_zeros() as u64; 119 | invalidate_asid(free); 120 | (global_free as usize, free) 121 | }; 122 | 123 | unsafe { 124 | core::intrinsics::assume(free < 64); 125 | } 126 | 127 | self.entries[global_free].used_free ^= 1 << free; 128 | 129 | // Need to update global mask if there are no asids left in this entry now. 130 | if self.entries[global_free].used_free == 0 { 131 | self.global_mask ^= 1 << global_free; 132 | } 133 | 134 | Asid { 135 | generation: self.generation, 136 | number: ((global_free << 6) | free as usize) as u16, 137 | } 138 | } 139 | 140 | /// Frees an old asid. 141 | pub fn free(&mut self, which: Asid) { 142 | if which.generation == self.generation { 143 | let which = which.number; 144 | unsafe { 145 | core::intrinsics::assume(which < 4096); 146 | } 147 | 148 | let global_entry = (which >> 6) as usize; 149 | self.entries[global_entry].used_free ^= 1 << (which & 63) as u64; 150 | if self.entries[global_entry].used_free != 0 { 151 | self.global_mask |= (1 << global_entry) as u64; 152 | } 153 | } 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /kernel/src/mm/buddy.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::VirtAddr; 2 | use core::cmp; 3 | use core::intrinsics::unlikely; 4 | use core::mem::MaybeUninit; 5 | 6 | /// Amount of top nodes. 7 | pub const MAX_LEVEL: usize = 18; // TODO: choose a good size? 8 | 9 | /// Amount of nodes. 10 | pub const NODE_COUNT: usize = (1 << MAX_LEVEL) - 1; 11 | 12 | /// Amount of bytes needed. 13 | const NODE_BYTES_NEEDED: usize = NODE_COUNT; 14 | 15 | /// Max offset. 16 | pub const MAX_OFFSET: usize = (1 << (MAX_LEVEL - 1)) - 1; 17 | 18 | /// Tree entries. 19 | type Entries = [u8; NODE_BYTES_NEEDED]; 20 | type MaybeUninitEntries = [MaybeUninit; NODE_BYTES_NEEDED]; 21 | 22 | /// The buddy tree. 23 | #[repr(transparent)] 24 | pub struct Tree(Entries); 25 | 26 | impl Tree { 27 | /// Initializes the tree. 28 | /// This is unsafe as we can't verify that the `tree_location` is valid 29 | /// and will live long enough. 30 | pub(crate) unsafe fn from(tree_location: VirtAddr) -> &'static mut Self { 31 | // Is power of 2? We don't care about the case of x == 0 here. 32 | fn is_pow2(x: usize) -> bool { 33 | x & (x - 1) == 0 34 | } 35 | 36 | // Limit scope of unsafety, this procedure is safe. 37 | fn fill_nodes(entries: &mut MaybeUninitEntries) { 38 | let mut size = (MAX_LEVEL + 1) as u8; 39 | for (i, entry) in entries.iter_mut().enumerate() { 40 | if is_pow2(i + 1) { 41 | size -= 1; 42 | } 43 | 44 | *entry = MaybeUninit::new(size); 45 | } 46 | } 47 | 48 | // Safety: 49 | // Assumptions caller must guarantee are in the method docs. 50 | let array = &mut *(tree_location.as_mut::()); 51 | // This is safe. 52 | fill_nodes(array); 53 | // Safety: 54 | // `MaybeUninit` and `u8` have the same ABI, size & alignment. 55 | // Thus, `Entries` and `MaybeUninitEntries` may be transmuted to each other. 56 | // `Tree` and `Entries` have the same ABI, size & alignment due to the repr(transparent). 57 | &mut *(array as *mut _ as *mut Tree) 58 | } 59 | 60 | /// Left index of a node. 61 | #[inline] 62 | fn left_index(&self, index: usize) -> usize { 63 | (index << 1) | 1 64 | } 65 | 66 | /// Right index of a node. 67 | #[inline] 68 | fn right_index(&self, index: usize) -> usize { 69 | self.left_index(index) + 1 70 | } 71 | 72 | /// Parent index of a node. 73 | #[inline] 74 | fn parent_index(&self, index: usize) -> usize { 75 | ((index + 1) >> 1) - 1 76 | } 77 | 78 | /// Allocate in tree. 79 | pub fn alloc(&mut self, order: usize) -> Option { 80 | if unlikely(self.0[0] < 1 + order as u8) { 81 | return None; 82 | } 83 | 84 | // Find node with smallest size large enough to hold the requested size 85 | let wanted_level = MAX_LEVEL - 1 - order; 86 | let mut index = 0; 87 | for _ in 0..wanted_level { 88 | let left_index = self.left_index(index); 89 | let right_index = self.right_index(index); 90 | 91 | // Because of the check at the beginning, we know one of these two is big enough 92 | index = if self.0[left_index] > order as u8 { 93 | left_index 94 | } else { 95 | debug_assert!(self.0[right_index] > order as u8); 96 | right_index 97 | }; 98 | } 99 | 100 | // Calculate offset from the index 101 | let first_index_in_this_level = (1 << wanted_level) - 1; 102 | let index_in_this_level = index - first_index_in_this_level; 103 | let offset = index_in_this_level << order; 104 | 105 | // Update the values in the tree so that each node still contains the largest available 106 | // power of two size in their subtree. 107 | self.0[index] = 0; 108 | while index > 0 { 109 | index = self.parent_index(index); 110 | let left_index = self.left_index(index); 111 | let right_index = self.right_index(index); 112 | let max = cmp::max(self.0[left_index], self.0[right_index]); 113 | self.0[index] = max; 114 | } 115 | 116 | Some(offset) 117 | } 118 | 119 | // Deallocate in tree. 120 | pub fn dealloc(&mut self, order: usize, offset: usize) { 121 | // Calculate the index at which this allocation happened. 122 | let mut size = (order + 1) as u8; 123 | let wanted_level = MAX_LEVEL - size as usize; 124 | let index_in_this_level = offset >> order; 125 | let first_index_in_this_level = (1 << wanted_level) - 1; 126 | let mut index = index_in_this_level + first_index_in_this_level; 127 | 128 | // Update value in the tree to undo the allocation. 129 | debug_assert_eq!(self.0[index], 0); 130 | self.0[index] = size; 131 | 132 | // Update all parents in the tree. 133 | while index > 0 { 134 | index = self.parent_index(index); 135 | size += 1; 136 | 137 | let left_index = self.left_index(index); 138 | let right_index = self.right_index(index); 139 | 140 | // This node becomes a complete node again if both the children are complete nodes. 141 | self.0[index] = 142 | if self.0[left_index] == self.0[right_index] && self.0[left_index] == size - 1 { 143 | size 144 | } else { 145 | cmp::max(self.0[left_index], self.0[right_index]) 146 | }; 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/simd.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::x86_64::address::VirtAddr; 2 | use crate::arch::x86_64::{cr4_read, cr4_write, xsetbv}; 3 | use alloc::alloc::{alloc, dealloc, handle_alloc_error}; 4 | use core::alloc::Layout; 5 | use core::ptr::{copy_nonoverlapping, null, write_bytes}; 6 | use raw_cpuid::CpuId; 7 | 8 | /// SIMD save routine. 9 | static mut SIMD_SAVE_ROUTINE: unsafe fn(region: *mut u8) -> () = simd_invalid_routine; 10 | 11 | /// SIMD restore routine. 12 | static mut SIMD_RESTORE_ROUTINE: unsafe fn(region: *mut u8) -> () = simd_invalid_routine; 13 | 14 | /// SIMD save region size. 15 | static mut SIMD_SAVE_SIZE: u32 = 0; 16 | 17 | /// SIMD save region alignment. 18 | static mut SIMD_SAVE_ALIGN: u32 = 64; 19 | 20 | /// SIMD initial state. 21 | static mut SIMD_INIT: *const u8 = null(); 22 | 23 | /// SIMD state 24 | #[repr(transparent)] 25 | pub struct SimdState { 26 | ptr: VirtAddr, 27 | } 28 | 29 | impl SimdState { 30 | /// Create SIMD save region. 31 | pub fn new() -> Self { 32 | let ptr = alloc_simd_save_region(); 33 | unsafe { 34 | copy_nonoverlapping(SIMD_INIT, ptr, SIMD_SAVE_SIZE as usize); 35 | Self { 36 | ptr: VirtAddr::from(ptr), 37 | } 38 | } 39 | } 40 | 41 | /// Gets the raw pointer. 42 | #[inline] 43 | fn raw_ptr(&self) -> *mut u8 { 44 | self.ptr.as_mut::() 45 | } 46 | 47 | /// Save SIMD region. 48 | #[inline] 49 | pub fn save(&self) { 50 | unsafe { SIMD_SAVE_ROUTINE(self.raw_ptr()) } 51 | } 52 | 53 | /// Restore SIMD region. 54 | #[inline] 55 | pub fn restore(&self) { 56 | unsafe { SIMD_RESTORE_ROUTINE(self.raw_ptr()) } 57 | } 58 | } 59 | 60 | impl Drop for SimdState { 61 | fn drop(&mut self) { 62 | unsafe { 63 | dealloc(self.raw_ptr(), simd_layout()); 64 | } 65 | } 66 | } 67 | 68 | /// Sets up SIMD. 69 | pub fn setup_simd() { 70 | let cpuid = CpuId::new(); 71 | 72 | // Set OSFXSR and OSXMMEXCPT bits, at least SSE2 is available. 73 | let mut cr4 = cr4_read(); 74 | cr4 |= (1 << 9) | (1 << 10); 75 | 76 | // Check for XSAVE support etc. 77 | if cpuid.get_feature_info().unwrap().has_xsave() { 78 | let state = cpuid.get_extended_state_info().unwrap(); 79 | 80 | // Enable XSAVE 81 | cr4 |= 1 << 18; 82 | 83 | // XCR0 will have x87 and SSE states for sure. 84 | #[allow(clippy::identity_op)] 85 | let mut xcr0 = (1 << 0) | (1 << 1); 86 | 87 | if state.xcr0_supports_avx_256() { 88 | xcr0 |= 1 << 2; 89 | } 90 | 91 | unsafe { 92 | cr4_write(cr4); 93 | xsetbv(0, xcr0); 94 | SIMD_SAVE_SIZE = cpuid 95 | .get_extended_state_info() 96 | .unwrap() 97 | .xsave_area_size_enabled_features(); 98 | assert!(SIMD_SAVE_SIZE > 0); 99 | if state.has_xsaves_xrstors() { 100 | SIMD_SAVE_ROUTINE = simd_routine_xsaves; 101 | SIMD_RESTORE_ROUTINE = simd_routine_xrstors; 102 | } else if state.has_xsaveopt() { 103 | SIMD_SAVE_ROUTINE = simd_routine_xsaveopt; 104 | SIMD_RESTORE_ROUTINE = simd_routine_xrstor; 105 | } else { 106 | SIMD_SAVE_ROUTINE = simd_routine_xsave; 107 | SIMD_RESTORE_ROUTINE = simd_routine_xrstor; 108 | }; 109 | } 110 | } else { 111 | unsafe { 112 | cr4_write(cr4); 113 | SIMD_SAVE_SIZE = 512; 114 | SIMD_SAVE_ALIGN = 16; 115 | SIMD_SAVE_ROUTINE = simd_routine_fxsave; 116 | SIMD_RESTORE_ROUTINE = simd_routine_fxrstor; 117 | } 118 | } 119 | 120 | // Setup initial state 121 | unsafe { 122 | let region = alloc_simd_save_region(); 123 | write_bytes(region, 0, SIMD_SAVE_SIZE as usize); 124 | SIMD_SAVE_ROUTINE(region); 125 | SIMD_INIT = region; 126 | } 127 | } 128 | 129 | /// Gets the SIMD layout. 130 | fn simd_layout() -> Layout { 131 | unsafe { Layout::from_size_align(SIMD_SAVE_SIZE as usize, SIMD_SAVE_ALIGN as usize).unwrap() } 132 | } 133 | 134 | /// Allocate SIMD save region. 135 | pub fn alloc_simd_save_region() -> *mut u8 { 136 | let layout = simd_layout(); 137 | unsafe { 138 | let ptr = alloc(layout); 139 | if ptr.is_null() { 140 | handle_alloc_error(layout); 141 | } 142 | ptr 143 | } 144 | } 145 | 146 | /// EDX:EAX works as a mask for XCR0. 147 | /// We don't need to store & restore the x87 state because we never ever use it. 148 | const ROUTINE_EAX: u32 = 0b110; 149 | const ROUTINE_EDX: u32 = 0; 150 | 151 | /// Invalid SIMD save routine. 152 | fn simd_invalid_routine(_region: *mut u8) { 153 | unreachable!("simd routine should be selected"); 154 | } 155 | 156 | /// SIMD save routine using FXSAVE. 157 | unsafe fn simd_routine_fxsave(region: *mut u8) { 158 | llvm_asm!("fxsave ($0)" :: "r" (region) : "memory"); 159 | } 160 | 161 | /// SIMD save routine using XSAVE. 162 | unsafe fn simd_routine_xsave(region: *mut u8) { 163 | llvm_asm!("xsave ($0)" :: "r" (region), "{eax}" (ROUTINE_EAX), "{edx}" (ROUTINE_EDX) : "memory"); 164 | } 165 | 166 | /// SIMD save routine using XSAVEOPT. 167 | unsafe fn simd_routine_xsaveopt(region: *mut u8) { 168 | llvm_asm!("xsaveopt ($0)" :: "r" (region), "{eax}" (ROUTINE_EAX), "{edx}" (ROUTINE_EDX) : "memory"); 169 | } 170 | 171 | /// SIMD save routine using XSAVES. 172 | unsafe fn simd_routine_xsaves(region: *mut u8) { 173 | llvm_asm!("xsaves ($0)" :: "r" (region), "{eax}" (ROUTINE_EAX), "{edx}" (ROUTINE_EDX) : "memory"); 174 | } 175 | 176 | /// SIMD save routine using FXRSTOR. 177 | unsafe fn simd_routine_fxrstor(region: *mut u8) { 178 | llvm_asm!("fxrstor ($0)" :: "r" (region) : "memory"); 179 | } 180 | 181 | /// SIMD save routine using XRSTOR. 182 | unsafe fn simd_routine_xrstor(region: *mut u8) { 183 | llvm_asm!("xrstor ($0)" :: "r" (region), "{eax}" (ROUTINE_EAX), "{edx}" (ROUTINE_EDX) : "memory"); 184 | } 185 | 186 | /// SIMD save routine using XRSTORS. 187 | unsafe fn simd_routine_xrstors(region: *mut u8) { 188 | llvm_asm!("xrstors ($0)" :: "r" (region), "{eax}" (ROUTINE_EAX), "{edx}" (ROUTINE_EDX) : "memory"); 189 | } 190 | -------------------------------------------------------------------------------- /kernel/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature( 3 | llvm_asm, 4 | abi_x86_interrupt, 5 | core_intrinsics, 6 | ptr_internals, 7 | alloc_error_handler, 8 | lang_items, 9 | atomic_mut_ptr, 10 | const_in_array_repeat_expressions, 11 | bool_to_option, 12 | maybe_uninit_extra, 13 | maybe_uninit_ref 14 | )] 15 | #![cfg_attr(feature = "integration-test", allow(unused_imports), allow(dead_code))] 16 | #![allow(clippy::verbose_bit_mask)] 17 | #![allow(clippy::new_without_default)] 18 | 19 | #[macro_use] 20 | extern crate static_assertions; 21 | 22 | #[macro_use] 23 | extern crate alloc; 24 | 25 | #[macro_use] 26 | extern crate memoffset; 27 | 28 | #[macro_use] 29 | extern crate wasm_call; 30 | 31 | use core::panic::PanicInfo; 32 | use core::slice; 33 | 34 | use arch::interrupts; 35 | 36 | use crate::arch::address::{PhysAddr, VirtAddr}; 37 | use crate::arch::hpet; 38 | use crate::arch::paging::{ActiveMapping, EntryFlags}; 39 | use crate::mm::mapper::MemoryMapper; 40 | use crate::mm::tcb_alloc::with_thread; 41 | use crate::tasking::scheduler::{self, thread_exit, with_core_scheduler, with_current_thread}; 42 | use crate::tasking::scheme_container::schemes; 43 | use crate::tasking::thread::Thread; 44 | use crate::util::boot_module::{BootModule, BootModuleProvider}; 45 | use crate::util::tar::Tar; 46 | use alloc::boxed::Box; 47 | 48 | #[macro_use] 49 | mod util; 50 | #[macro_use] 51 | mod arch; 52 | mod mm; 53 | mod sync; 54 | mod tasking; 55 | #[cfg(feature = "integration-test")] 56 | mod tests; 57 | mod wasm; 58 | 59 | #[panic_handler] 60 | #[cfg(not(feature = "integration-test"))] 61 | fn panic(info: &PanicInfo) -> ! { 62 | // TODO: notify other processors/cores 63 | interrupts::disable(); 64 | println!("{:#?}", info); 65 | loop { 66 | arch::halt(); 67 | } 68 | } 69 | 70 | /// Run. 71 | pub fn kernel_run(reserved_end: VirtAddr, _boot_modules: impl BootModuleProvider) { 72 | unsafe { 73 | // May only be called once. 74 | mm::init(reserved_end); 75 | } 76 | arch::late_init(); 77 | tasking::scheduler::init(); 78 | 79 | #[cfg(not(feature = "integration-test"))] 80 | kernel_main(_boot_modules); 81 | #[cfg(feature = "integration-test")] 82 | { 83 | use crate::arch::qemu; 84 | kernel_test_main(); 85 | unsafe { 86 | qemu::qemu_exit(0); 87 | } 88 | } 89 | } 90 | 91 | /// Handle module. 92 | fn handle_module(module: BootModule) -> Option<()> { 93 | println!("Handle module {:?}", module); 94 | 95 | // Safety: module data is correct. 96 | let tar = unsafe { 97 | Tar::from_slice(slice::from_raw_parts( 98 | module.range.start.as_const(), 99 | module.range.len, 100 | )) 101 | }?; 102 | 103 | // For now, just try to run all files in the tar. 104 | // Might need a manifest or something alike in the future. 105 | for file in tar { 106 | // TODO: we should probably have a manifest file which describes what services should be 107 | // in the same domain. 108 | //let domain = ProtectionDomain::new().expect("domain"); 109 | let domain = with_current_thread(|t| t.domain().clone()); 110 | wasm::main::run(file.as_slice(), domain).unwrap_or_else(|e| { 111 | println!("Could not start: {:?}", e); 112 | }); 113 | } 114 | 115 | Some(()) 116 | } 117 | 118 | /// Kernel main, called after initialization is done. 119 | #[cfg(not(feature = "integration-test"))] 120 | fn kernel_main(boot_modules: impl BootModuleProvider) { 121 | // Make sure the boot modules are mapped. 122 | if let Some(range) = boot_modules.range() { 123 | // Safety: we are the only running thread right now, so no locking is required. 124 | let mut mapping = unsafe { ActiveMapping::get_unlocked() }; 125 | mapping 126 | .map_range_physical( 127 | range.start, 128 | PhysAddr::new(range.start.as_usize()), 129 | range.len, 130 | EntryFlags::PRESENT, 131 | ) 132 | .expect("mapping modules"); 133 | } 134 | 135 | interrupts::enable(); 136 | interrupts::setup_timer(); 137 | scheduler::thread_yield(); 138 | 139 | { 140 | let hpet = hpet().unwrap(); 141 | let start = hpet.counter(); 142 | for i in 0..10000000 { 143 | scheduler::thread_yield(); 144 | } 145 | let t = hpet.counter() - start; 146 | println!("{}ns", hpet.counter_to_ns(t) / 10000000); 147 | } 148 | 149 | // TODO: debug code 150 | unsafe { 151 | let entry = VirtAddr::new(thread_test as usize); 152 | //let domain = ProtectionDomain::new().unwrap(); 153 | let domain = with_current_thread(|t| t.domain().clone()); 154 | let t = Thread::create(domain, entry, 1234).unwrap(); 155 | scheduler::add_and_schedule_thread(t); 156 | }; 157 | 158 | // Handle boot modules. 159 | for module in boot_modules { 160 | handle_module(module).unwrap_or_else(|| { 161 | println!("Failed to handle module {:?}", module); 162 | }); 163 | } 164 | 165 | loop { 166 | arch::halt(); 167 | } 168 | } 169 | 170 | extern "C" fn thread_test(_arg: u64) { 171 | let hpet = hpet().unwrap(); 172 | let self_scheme = schemes().read().open_self(Box::new([])).unwrap(); 173 | let (scheme, _handle) = self_scheme.scheme_and_handle().unwrap(); 174 | //scheme.open(-1).unwrap(); 175 | let a = hpet.counter(); 176 | for i in 0..(10000 - 1) { 177 | //scheme.open(i).unwrap(); 178 | } 179 | let b = hpet.counter(); 180 | println!("open: {}ns", hpet.counter_to_ns(b - a) / (10000 - 1)); 181 | println!(); 182 | println!(); 183 | let x = with_core_scheduler(|s| s.current_thread_id()); 184 | let a = hpet.counter(); 185 | for _ in 0..10000 { 186 | with_thread(x, |t| assert_eq!(t.id, x)); 187 | } 188 | let b = hpet.counter(); 189 | println!("with_thread: {}ns", hpet.counter_to_ns(b - a) / 10000); 190 | println!(); 191 | println!(); 192 | 193 | thread_exit(123); 194 | } 195 | 196 | /// Kernel test main, called after arch init is done. 197 | #[cfg(feature = "integration-test")] 198 | fn kernel_test_main() { 199 | tests::test_main(); 200 | } 201 | 202 | #[lang = "eh_personality"] 203 | extern "C" fn eh_personality() {} 204 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/address.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::x86_64::paging::PAGE_SIZE; 2 | use core::fmt::{Debug, Error, Formatter}; 3 | use core::ops::Add; 4 | use core::ops::{AddAssign, Sub, SubAssign}; 5 | 6 | /// A 64-bit physical address. 7 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 8 | #[repr(transparent)] 9 | pub struct PhysAddr(usize); 10 | 11 | /// A canonical form, 64-bit virtual address. 12 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 13 | #[repr(transparent)] 14 | pub struct VirtAddr(usize); 15 | 16 | #[allow(dead_code)] 17 | impl PhysAddr { 18 | /// Creates a new physical address. 19 | #[inline] 20 | pub fn new(addr: usize) -> Self { 21 | // Defined limit by the architecture spec. 22 | debug_assert_eq!( 23 | addr >> 52, 24 | 0, 25 | "physical address cannot be more than 52-bits: {:#x}", 26 | addr 27 | ); 28 | Self(addr) 29 | } 30 | 31 | /// Creates a new physical address that points to null. 32 | pub const fn null() -> Self { 33 | Self(0) 34 | } 35 | 36 | /// Checks if the address is null. 37 | pub fn is_null(self) -> bool { 38 | self.0 == 0 39 | } 40 | 41 | /// Checks if the address is page aligned. 42 | pub fn is_page_aligned(self) -> bool { 43 | self.0 & (PAGE_SIZE - 1) == 0 44 | } 45 | 46 | /// Checks if the address is 2M aligned. 47 | pub fn is_2m_aligned(self) -> bool { 48 | self.0 & 0x1ff_fff == 0 49 | } 50 | 51 | /// Converts the physical address to a usize. 52 | #[inline] 53 | pub fn as_usize(self) -> usize { 54 | self.0 55 | } 56 | 57 | /// Converts the physical address to a u64. 58 | #[inline] 59 | pub fn as_u64(self) -> u64 { 60 | self.0 as u64 61 | } 62 | 63 | /// Aligns a memory address down. 64 | pub fn align_down(self) -> Self { 65 | PhysAddr(align_down(self.0)) 66 | } 67 | 68 | /// Aligns a memory address up. 69 | pub fn align_up(self) -> Self { 70 | PhysAddr(align_up(self.0)) 71 | } 72 | } 73 | 74 | impl Add for PhysAddr { 75 | type Output = Self; 76 | 77 | fn add(self, rhs: usize) -> Self::Output { 78 | PhysAddr::new(self.0 + rhs) 79 | } 80 | } 81 | 82 | impl AddAssign for PhysAddr { 83 | fn add_assign(&mut self, rhs: usize) { 84 | self.0 += rhs; 85 | } 86 | } 87 | 88 | impl Debug for PhysAddr { 89 | fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { 90 | write!(f, "PhysAddr({:#x})", self.0) 91 | } 92 | } 93 | 94 | #[allow(dead_code)] 95 | impl VirtAddr { 96 | /// Creates a canonical form, virtual address. 97 | #[inline] 98 | pub fn new(addr: usize) -> Self { 99 | debug_assert!( 100 | { 101 | let x = addr >> 47; 102 | x == 0 || x == 0x1ffff 103 | }, 104 | "Virtual address is not in canonical form: {:#x}", 105 | addr 106 | ); 107 | Self(addr) 108 | } 109 | 110 | /// Creates a new virtual address from a raw pointer. 111 | pub fn from(ptr: *mut T) -> Self { 112 | Self::new(ptr as usize) 113 | } 114 | 115 | /// Creates a new virtual address that points to null. 116 | pub const fn null() -> Self { 117 | Self(0) 118 | } 119 | 120 | /// Checks if the address is null. 121 | pub fn is_null(self) -> bool { 122 | self.0 == 0 123 | } 124 | 125 | /// Converts the virtual address to a usize. 126 | #[inline] 127 | pub fn as_usize(self) -> usize { 128 | self.0 129 | } 130 | 131 | /// Converts the virtual address to a u64. 132 | #[inline] 133 | pub fn as_u64(self) -> u64 { 134 | self.0 as u64 135 | } 136 | 137 | /// Converts the virtual address to a mutable pointer. 138 | #[inline] 139 | pub fn as_mut(self) -> *mut T { 140 | self.0 as *mut T 141 | } 142 | 143 | /// Converts the virtual address to a const pointer. 144 | #[inline] 145 | pub fn as_const(self) -> *const T { 146 | self.0 as *const T 147 | } 148 | 149 | /// Checks if the address is page aligned. 150 | pub fn is_page_aligned(self) -> bool { 151 | self.0 & (PAGE_SIZE - 1) == 0 152 | } 153 | 154 | /// Checks if the address is 2M aligned. 155 | pub fn is_2m_aligned(self) -> bool { 156 | self.0 & 0x1ff_fff == 0 157 | } 158 | 159 | /// Gets the level 4 index for paging. 160 | pub fn p4_index(self) -> usize { 161 | (self.0 >> 39) & 511 162 | } 163 | 164 | /// Gets the level 3 index for paging. 165 | pub fn p3_index(self) -> usize { 166 | (self.0 >> 30) & 511 167 | } 168 | 169 | /// Gets the level 2 index for paging. 170 | pub fn p2_index(self) -> usize { 171 | (self.0 >> 21) & 511 172 | } 173 | 174 | /// Gets the level 1 index for paging. 175 | pub fn p1_index(self) -> usize { 176 | (self.0 >> 12) & 511 177 | } 178 | 179 | /// Aligns a memory address down. 180 | pub fn align_down(self) -> Self { 181 | VirtAddr(align_down(self.0)) 182 | } 183 | 184 | /// Aligns a memory address up. 185 | pub fn align_up(self) -> Self { 186 | VirtAddr(align_up(self.0)) 187 | } 188 | } 189 | 190 | /// Align the value down to page size multiple. 191 | pub fn align_down(value: usize) -> usize { 192 | value & !(PAGE_SIZE - 1) 193 | } 194 | 195 | /// Align the value up to page size multiple. 196 | pub fn align_up(value: usize) -> usize { 197 | (value + PAGE_SIZE - 1) & !(PAGE_SIZE - 1) 198 | } 199 | 200 | impl Debug for VirtAddr { 201 | fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { 202 | write!(f, "VirtAddr({:#x})", self.0) 203 | } 204 | } 205 | 206 | impl Add for VirtAddr { 207 | type Output = Self; 208 | 209 | fn add(self, rhs: usize) -> Self::Output { 210 | VirtAddr::new(self.0 + rhs) 211 | } 212 | } 213 | 214 | impl Sub for VirtAddr { 215 | type Output = Self; 216 | 217 | fn sub(self, rhs: usize) -> Self::Output { 218 | VirtAddr::new(self.0 - rhs) 219 | } 220 | } 221 | 222 | impl AddAssign for VirtAddr { 223 | fn add_assign(&mut self, rhs: usize) { 224 | self.0 += rhs; 225 | } 226 | } 227 | 228 | impl SubAssign for VirtAddr { 229 | fn sub_assign(&mut self, rhs: usize) { 230 | self.0 -= rhs; 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /kernel/src/mm/tcb_alloc.rs: -------------------------------------------------------------------------------- 1 | //! TCB allocation at a fixed space. 2 | //! The goal is to provide a way to quickly map an id to a TCB reference. 3 | 4 | use crate::arch::address::VirtAddr; 5 | use crate::arch::paging::{ActiveMapping, EntryFlags, PAGE_SIZE}; 6 | use crate::arch::{TCB_LEN, TCB_START}; 7 | use crate::mm::mapper::MemoryMapper; 8 | use crate::sync::spinlock::Spinlock; 9 | use crate::tasking::thread::{Thread, ThreadId}; 10 | use crate::util::mem_funcs::page_clear; 11 | use core::mem::{align_of, size_of, MaybeUninit}; 12 | use core::sync::atomic::{AtomicU16, Ordering}; 13 | 14 | static TCB_PAGE_LOCK: Spinlock<()> = Spinlock::new(()); 15 | 16 | struct Metadata { 17 | free: AtomicU16, 18 | } 19 | 20 | /// A TCB may be uninitialised. 21 | /// We also want to align them on a cache line to minimise cache ping-pong. 22 | /// The extra field is to reserve bytes for meta data. 23 | #[repr(align(64))] 24 | struct ThreadBlock(MaybeUninit, Metadata); 25 | 26 | /// How many TCBs fit inside on `TcbPage`? 27 | const TCB_COUNT: usize = PAGE_SIZE / size_of::(); 28 | 29 | struct TcbPage { 30 | threads: [ThreadBlock; TCB_COUNT], 31 | } 32 | 33 | impl TcbPage { 34 | /// Gets the metadata for this page. 35 | pub fn meta_data(&self) -> &Metadata { 36 | &self.threads[0].1 37 | } 38 | } 39 | 40 | const_assert!(size_of::() <= PAGE_SIZE); 41 | const_assert!(align_of::() <= PAGE_SIZE); 42 | const_assert!(TCB_COUNT <= 16); 43 | const_assert_eq!(TCB_COUNT & (TCB_COUNT - 1), 0); // TCB_COUNT must be a power of two for efficiency 44 | 45 | /// Pagefault TCB allocation handling. 46 | pub fn pagefault_tcb_alloc(fault_addr: VirtAddr, write: bool) { 47 | assert!(write, "Attempting read to not existing thread"); 48 | 49 | let _guard = TCB_PAGE_LOCK.lock(); 50 | let fault_addr = fault_addr.align_down(); 51 | 52 | // Safety: 53 | // No concurrent access on the shared page tables because these are unique for the TCB, 54 | // and we're locking. 55 | let mut mapping = unsafe { ActiveMapping::get_unlocked() }; 56 | 57 | // Once we get this lock, the page might've been already mapped by another CPU. 58 | if mapping.translate(fault_addr).is_some() { 59 | return; 60 | } 61 | 62 | if mapping 63 | .get_and_map_single( 64 | fault_addr, 65 | EntryFlags::PRESENT | EntryFlags::NX | EntryFlags::WRITABLE | EntryFlags::GLOBAL, 66 | ) 67 | .is_ok() 68 | { 69 | let ptr = fault_addr.as_const::(); 70 | // Clear out data 71 | unsafe { 72 | page_clear(ptr as *mut _); 73 | } 74 | // Safety: same as before. 75 | let tcb_page = unsafe { &*ptr }; 76 | // Relaxed is fine since we're the ones initialising it, and others had to wait behind the lock. 77 | tcb_page.meta_data().free.store(u16::MAX, Ordering::Relaxed); 78 | } else { 79 | // TODO: OOM handling? 80 | unimplemented!(); 81 | } 82 | } 83 | 84 | /// Converts a thread id to an address. 85 | #[inline] 86 | fn tid_to_addr(tid: ThreadId) -> (usize, usize) { 87 | let page_nr = (tid.as_u32() as usize) / TCB_COUNT; 88 | ( 89 | TCB_START + page_nr * PAGE_SIZE, 90 | tid.as_u32() as usize % TCB_COUNT, 91 | ) 92 | } 93 | 94 | /// Allocates a tcb. 95 | pub fn tcb_alloc(thread: Thread) { 96 | let tid = thread.id; 97 | let (page_addr, offset) = tid_to_addr(tid); 98 | assert!(page_addr < TCB_START + TCB_LEN); 99 | // Safety: 100 | // The thread id is unique. 101 | // We only access the thread slot that is our own slot. 102 | // The issue is that we can't borrow the whole `TcbPage` or array as mutable. 103 | // So we have to borrow the slot as mutable using pointers. 104 | let page = unsafe { 105 | // Only non-mutable references are ever made to `TcbPage`. 106 | let page = &*(page_addr as *const TcbPage); 107 | let ptr = (&page.threads[offset].0) as *const _ as *mut MaybeUninit; 108 | core::ptr::write(ptr, MaybeUninit::new(thread)); 109 | page 110 | }; 111 | page.meta_data() 112 | .free 113 | .fetch_xor((1 << offset) as _, Ordering::AcqRel); 114 | } 115 | 116 | /// Deallocates a tcb. 117 | pub fn tcb_dealloc(tid: ThreadId) { 118 | // We want the deallocation to be under a lock to prevent racing in the pagefault handler. 119 | let _guard = TCB_PAGE_LOCK.lock(); 120 | 121 | let (page_addr, offset) = tid_to_addr(tid); 122 | let page = unsafe { &*(page_addr as *const TcbPage) }; 123 | let old_free = page 124 | .meta_data() 125 | .free 126 | .fetch_or((1 << offset) as _, Ordering::AcqRel); 127 | if old_free & (1 << offset) == 0 { 128 | // Safety: it was initialised and will be dropped only once. 129 | unsafe { 130 | // This will also invalidate the thread id. 131 | drop(page.threads[offset].0.assume_init_read()); 132 | } 133 | if old_free | (1 << offset) == u16::MAX { 134 | // Safety: 135 | // No concurrent access on the shared page tables because these are unique for the TCB, 136 | // and we're locking. 137 | let mut mapping = unsafe { ActiveMapping::get_unlocked() }; 138 | mapping.free_and_unmap_single(VirtAddr::new(page_addr)); 139 | } 140 | } 141 | } 142 | 143 | /// Executes something in context of a thread. 144 | /// 145 | /// # Panic 146 | /// 147 | /// Panics if the thread is not valid anymore. 148 | /// Callers should've been notified when a thread they're interested in ceases to exist. 149 | pub fn with_thread(tid: ThreadId, f: F) -> T 150 | where 151 | F: FnOnce(&Thread) -> T, 152 | { 153 | let (page_addr, offset) = tid_to_addr(tid); 154 | // Safety: 155 | // Only non-mutable references are ever made to `TcbPage`. 156 | let page = unsafe { &*(page_addr as *const TcbPage) }; 157 | let block = &page.threads[offset]; 158 | unsafe { 159 | // Safety: 160 | // We want to verify the thread id to be of the same generation. 161 | // We can't use `get_ref` on an uninitialized thread because that's undefined behaviour. 162 | // That means we have to read the field without calling `get_ref` first. 163 | // If there is no thread here, its id and generation will be zero. 164 | let tid_ptr = 165 | (block.0.as_ptr() as *const u8).add(offset_of!(Thread, id)) as *const ThreadId; 166 | assert_eq!(*tid_ptr, tid, "thread generation mismatch"); 167 | 168 | // We now know that the thread was initialized, otherwise the assert would've failed. 169 | f(block.0.assume_init_ref()) 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/mboot.s: -------------------------------------------------------------------------------- 1 | .set MB_MAGIC, 0xE85250D6 // Multiboot magic 2 | .set MB_ARCH, 0 // i386 protected mode 3 | .set TAG_REQUIRED, 0 // Required tag 4 | .set TAG_OPTIONAL, 1 // Optional tag 5 | 6 | // Multiboot header 7 | .section .mboot 8 | .align 8 9 | mboot_hdr_start: 10 | .long MB_MAGIC 11 | .long MB_ARCH 12 | .long mboot_hdr_end - mboot_hdr_start 13 | .long -(MB_MAGIC + MB_ARCH + (mboot_hdr_end - mboot_hdr_start)) 14 | 15 | // Information request tag 16 | .align 8 17 | info_req_start: 18 | .word 1 // Type 19 | .word TAG_REQUIRED // Flags 20 | .long info_req_end - info_req_start // Size of this tag 21 | .long 1 // Request: command line 22 | .long 6 // Request: memory map 23 | .long 15 // Request: ACPI 24 | info_req_end: 25 | 26 | // Framebuffer tag 27 | .align 8 28 | lfb_start: 29 | .word 5 // Type 30 | .word TAG_REQUIRED // Flags 31 | .long lfb_end - lfb_start // Size of this tag 32 | .long 1024 // Width (can be overridden in grub.cfg) 33 | .long 768 // Height (can be overridden in grub.cfg) 34 | .long 32 // Depth (can be overridden in grub.cfg) 35 | lfb_end: 36 | 37 | // End tag 38 | .align 8 39 | end_tag_start: 40 | .word 0 // Type 41 | .word 0 // Flags 42 | .long end_tag_end - end_tag_start // Size of this tag 43 | end_tag_end: 44 | mboot_hdr_end: 45 | 46 | // Preallocate for paging 47 | .section .bss, "aw", @nobits 48 | .align 0x1000 49 | boot_pml4: 50 | .skip 0x1000 51 | boot_pml3: 52 | .skip 0x1000 53 | boot_pml2: 54 | .skip 0x1000 55 | boot_pml1_1: 56 | .skip 0x1000 57 | boot_pml1_2: 58 | .skip 0x1000 59 | 60 | .section .text 61 | .code32 62 | 63 | .global start 64 | .type start, @function 65 | start: 66 | // Warning: keep the value of ebx, because that's the register that points to the multiboot struct. 67 | 68 | // Note: I don't bother with checking if Long Mode is supported. 69 | // The cpu will just reset when it tries to go to Long Mode if it's not supported. 70 | 71 | // Check magic 72 | cmp $0x36d76289, %eax 73 | jne halt 74 | 75 | cld 76 | mov $STACK_TOP, %esp 77 | 78 | // Map kernel code & data PMLs 79 | movl $(boot_pml3 + 0x3), boot_pml4 + 0 * 8 80 | movl $(2 << (52 - 32)), boot_pml4 + 0 * 8 + 4 // Used entry count 81 | movl $(boot_pml2 + 0x3), boot_pml3 + 0 * 8 82 | movl $(1 << (52 - 32)), boot_pml3 + 0 * 8 + 4 // Used entry count 83 | movl $(boot_pml1_1 + 0x3), boot_pml2 + 0 * 8 84 | movl $(boot_pml1_2 + 0x3), boot_pml2 + 1 * 8 85 | movl $(2 << (52 - 32)), boot_pml2 + 0 * 8 + 4 // Used entry count 86 | movl $(511 << (52 - 32)), boot_pml1_1 + 0 * 8 + 4 // Used entry count 87 | movl $(512 << (52 - 32)), boot_pml1_2 + 0 * 8 + 4 // Used entry count 88 | 89 | // Recursive map 90 | movl $(boot_pml4 + 0x3), boot_pml4 + 511 * 8 91 | movl $(1 << (63 - 32)), boot_pml4 + 511 * 8 + 4 // NX-bit 92 | 93 | // Identity map the first 4MiB (except page 0) 94 | mov $0x1003, %esi 95 | mov $(boot_pml1_1 + 8 * 1), %edi // Continues to boot_pml1_2 96 | mov $(511 + 512), %ecx 97 | 1: 98 | mov %esi, (%edi) 99 | add $0x1000, %esi 100 | add $8, %edi 101 | loop 1b 102 | 103 | /** 104 | * Setup PAT 105 | * Keep the lower half the same as the startup defaults, but modify the higher half 106 | * PATs in order (lower): WB, WT, UC-, UC (same as defaults) 107 | * (higher): WC, WP, *reserved*, *reserved* 108 | */ 109 | mov $(0x06 << 0 | 0x04 << 8 | 0x07 << 16 | 0x00 << 24), %eax 110 | mov $(0x01 << 0 | 0x05 << 8 | 0x00 << 16 | 0x00 << 24), %edx 111 | mov $0x0277, %ecx 112 | wrmsr 113 | 114 | // Enable the FPU 115 | mov %cr0, %eax 116 | and $(~(1 << 2)), %ax 117 | or $(1 << 1), %ax 118 | mov %eax, %cr0 119 | fninit 120 | 121 | // Enable: PSE, PAE, PGE 122 | mov %cr4, %eax 123 | orl $(1 << 4 | 1 << 5 | 1 << 7), %eax 124 | mov %eax, %cr4 125 | 126 | // Enable: long mode and NX bit 127 | mov $0xC0000080, %ecx 128 | rdmsr 129 | orl $(1 << 8 | 1 << 11), %eax 130 | wrmsr 131 | 132 | // Enable paging 133 | mov $boot_pml4, %eax 134 | mov %eax, %cr3 135 | mov %cr0, %eax 136 | // Enable: PG and WP bit 137 | orl $(1 << 31 | 1 << 16), %eax 138 | mov %eax, %cr0 139 | 140 | // Setup rest of TSS descriptor 141 | movl $tss, %eax 142 | movw %ax, tss_base0 143 | shr $16, %eax 144 | movb %al, tss_base1 145 | movb %ah, tss_base2 146 | 147 | // Switch to long mode 148 | lgdt gdt_descriptor 149 | jmp $0x8, $1f 150 | 151 | .code64 152 | 1: 153 | // The upper 32 bits are undefined when switching from 32-bit to 64-bit or vice versa. 154 | // Clear the top bits of the stack to prevent issues. 155 | // ebx contains our multiboot ptr, also clear ebx upper bits by already moving it to the argument register. 156 | mov %esp, %esp 157 | mov %ebx, %edi 158 | 159 | // Switch segments 160 | xor %ax, %ax 161 | movw %ax, %ds 162 | movw %ax, %es 163 | movw %ax, %ss 164 | movw $16, %ax 165 | ltr %ax 166 | 167 | .extern entry 168 | call entry 169 | 170 | halt: 171 | hlt 172 | jmp halt 173 | 174 | .section .rodata 175 | gdt: 176 | // NULL segment 177 | .quad 0 178 | // Kernel code segment, only one segment really needed 179 | .quad (1 << 43) | (1 << 44) | (1 << 47) | (1 << 53) 180 | // TSS 181 | .word tss_end - tss - 1 182 | tss_base0: .word 0 // base 15:00 183 | tss_base1: .byte 0 // base 23:16 184 | .byte 0b10001001 185 | .byte 0 186 | tss_base2: .byte 0 // base 31:24 187 | .long 0 // base 63:32, will be zero because lower memory 188 | .long 0 189 | gdt_descriptor: 190 | .word gdt_descriptor - gdt - 1 191 | .quad gdt 192 | tss: 193 | .long 0 // reserved0 194 | .quad 0 // rsp0 195 | .quad 0 // rsp1 196 | .quad 0 // rsp2 197 | .quad 0 // reserved1 198 | .quad INTERRUPT_STACK_TOP // ist1 199 | .quad 0 // ist2 200 | .quad 0 // ist3 201 | .quad 0 // ist4 202 | .quad 0 // ist5 203 | .quad 0 // ist6 204 | .quad 0 // ist7 205 | .quad 0 // reserved2 206 | .word 0 // reserved3 207 | .word 104 // IO map base address offset. 208 | tss_end: 209 | 210 | .section .bss, "aw", @nobits 211 | .global STACK_BOTTOM 212 | STACK_BOTTOM: 213 | .skip 32768*4 214 | STACK_TOP: 215 | .global INTERRUPT_STACK_BOTTOM 216 | INTERRUPT_STACK_BOTTOM: 217 | .skip 32768 218 | .global INTERRUPT_STACK_TOP 219 | INTERRUPT_STACK_TOP: 220 | -------------------------------------------------------------------------------- /kernel/src/tasking/scheme.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::{preempt_disable, preempt_enable}; 2 | use crate::mm::tcb_alloc::with_thread; 3 | use crate::sync::thread_block_guard::ThreadBlockGuard; 4 | use crate::sync::wait_queue::WaitQueue; 5 | use crate::tasking::file::{FileHandle, InnerFileHandle}; 6 | use crate::tasking::scheduler::{self, with_current_thread}; 7 | use crate::tasking::scheme_container::SchemeId; 8 | use crate::tasking::thread::ThreadId; 9 | use crate::wasm::wasi::Errno; 10 | use alloc::sync::Weak; 11 | use atomic::Atomic; 12 | use core::mem::size_of; 13 | use core::slice; 14 | use core::sync::atomic::{AtomicU64, Ordering}; 15 | 16 | /// Reply payload. 17 | /// We only wait at most for one reply. The reply data is very simple, it's just a status + data pair. 18 | /// In the case we have a non-blocking send, we don't have reply data. 19 | #[derive(Copy, Clone)] 20 | #[repr(C)] 21 | pub struct ReplyPayload { 22 | status: Errno, 23 | value: u64, 24 | } 25 | 26 | /// Reply payload inside the Tcb. 27 | pub struct ReplyPayloadTcb { 28 | status: Atomic, 29 | value: AtomicU64, 30 | } 31 | 32 | /// Reply from userspace. 33 | #[derive(Copy, Clone)] 34 | #[repr(C)] 35 | pub struct Reply { 36 | to: ThreadId, 37 | payload: ReplyPayload, 38 | } 39 | 40 | #[repr(C)] 41 | pub enum CommandData { 42 | Open(i32), // TODO: test 43 | Read(InnerFileHandle), 44 | } 45 | 46 | #[repr(C)] 47 | pub struct Command { 48 | thread_id: ThreadId, 49 | payload: CommandData, 50 | } 51 | 52 | pub type SchemePtr = Weak; 53 | 54 | // TODO: capability instead of thread sender 55 | pub struct Scheme { 56 | /// Identifier: needed for `blocked_on` in tcb. 57 | id: SchemeId, 58 | /// Command queue. 59 | command_queue: WaitQueue, 60 | } 61 | 62 | impl ReplyPayload { 63 | /// Creates `ReplyData` from `ReplyDataTcb`. 64 | pub fn from(reply_data_tcb: &ReplyPayloadTcb) -> Self { 65 | let status = reply_data_tcb.status.load(Ordering::Acquire); 66 | let value = reply_data_tcb.value.load(Ordering::Relaxed); 67 | Self { status, value } 68 | } 69 | } 70 | 71 | impl ReplyPayloadTcb { 72 | /// Creates a new `ReplyDataTcb`. 73 | pub fn new() -> Self { 74 | Self { 75 | status: Atomic::new(Errno::Success), 76 | value: AtomicU64::new(0), 77 | } 78 | } 79 | 80 | /// Stores a new reply. 81 | pub fn store(&self, data: ReplyPayload) { 82 | self.value.store(data.value, Ordering::Relaxed); 83 | self.status.store(data.status, Ordering::Release); 84 | } 85 | } 86 | 87 | impl Scheme { 88 | /// Creates a new scheme. 89 | pub(crate) fn new(id: SchemeId) -> Self { 90 | Self { 91 | id, 92 | command_queue: WaitQueue::new(), 93 | } 94 | } 95 | 96 | /// Sends a blocking IPC message to the scheme. 97 | pub fn send_command_blocking(&self, payload: CommandData) -> ReplyPayload { 98 | with_current_thread(|t| { 99 | // Blocks the thread, sends the command and notifies the receiving thread. 100 | { 101 | preempt_disable(); 102 | let _block_guard = ThreadBlockGuard::activate(); 103 | //self.a.lock().insert(t.id, t.clone()); 104 | t.set_ipc_blocked_on(self.id); 105 | self.command_queue.push_back(Command { 106 | payload, 107 | thread_id: t.id, 108 | }); 109 | preempt_enable(); 110 | } 111 | 112 | t.set_ipc_blocked_on(SchemeId::sentinel()); 113 | 114 | // Response to sender comes here. 115 | ReplyPayload::from(&t.reply) 116 | }) 117 | } 118 | 119 | pub fn send_replies(&self, buffer: &[u8]) -> Result { 120 | // TODO: document 121 | let buffer = unsafe { 122 | slice::from_raw_parts( 123 | buffer as *const _ as *const Reply, 124 | buffer.len() / size_of::(), 125 | ) 126 | }; 127 | 128 | for reply in buffer { 129 | self.send_reply(*reply); 130 | } 131 | 132 | Ok(buffer.len() * size_of::()) 133 | } 134 | 135 | /// Opens a file inside the scheme. 136 | pub(crate) fn open(&self, lol: i32) -> Result { 137 | let response = self.send_command_blocking(CommandData::Open(lol)); 138 | match response.status { 139 | Errno::Success => Ok(FileHandle::Inner(InnerFileHandle(response.value))), 140 | e => Err(e), 141 | } 142 | } 143 | 144 | pub fn write(&self, handle: FileHandle, buffer: &[u8]) -> Result { 145 | // TODO: needs grants 146 | match handle { 147 | FileHandle::Own => self.send_replies(buffer), 148 | FileHandle::Inner(handle) => self.regular_write(handle, buffer), 149 | } 150 | } 151 | 152 | pub fn read(&self, handle: FileHandle, buffer: &mut [u8]) -> Result { 153 | match handle { 154 | FileHandle::Own => self.receive_commands_blocking(buffer), 155 | FileHandle::Inner(handle) => self.regular_read(handle, buffer), 156 | } 157 | } 158 | 159 | pub fn send_reply(&self, reply: Reply) { 160 | let success = with_thread(reply.to, |receiver| { 161 | if receiver.ipc_blocked_on() != self.id { 162 | false 163 | } else { 164 | receiver.reply.store(reply.payload); 165 | true 166 | } 167 | }); 168 | /*let success = if let Some(receiver) = self.a.lock().remove(&reply.to) { 169 | receiver.reply.store(reply.payload); 170 | true 171 | } else { 172 | false 173 | };*/ 174 | 175 | // This needs to be outside the lock. 176 | if success { 177 | scheduler::wakeup_and_yield(reply.to); 178 | } 179 | } 180 | 181 | pub fn receive_commands_blocking(&self, buffer: &mut [u8]) -> Result { 182 | // TODO: document 183 | let buffer = unsafe { 184 | slice::from_raw_parts_mut( 185 | buffer as *mut _ as *mut Command, 186 | buffer.len() / size_of::(), 187 | ) 188 | }; 189 | 190 | let x = self.command_queue.pop_front_many(buffer); 191 | Ok(x * size_of::()) 192 | // TODO: map memory if required? 193 | } 194 | 195 | pub fn regular_read(&self, handle: InnerFileHandle, buffer: &mut [u8]) -> Result { 196 | // TODO: share memory or smth 197 | let reply = self.send_command_blocking(CommandData::Read(handle)); 198 | // TODO: ?? 199 | 200 | Ok(reply.value as usize) 201 | } 202 | 203 | pub fn regular_write(&self, handle: InnerFileHandle, buffer: &[u8]) -> Result { 204 | // TODO 205 | Ok(0) 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /kernel/src/tasking/protection_domain.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::asid::Asid; 2 | use crate::arch::paging::{ 3 | cpu_page_mapping_switch_to, get_cpu_page_mapping, ActiveMapping, CpuPageMapping, EntryFlags, 4 | PAGE_SIZE, 5 | }; 6 | use crate::arch::{get_per_cpu_data, preempt_disable, preempt_enable}; 7 | use crate::mm::mapper::{MemoryError, MemoryMapper}; 8 | use crate::mm::vma_allocator::VmaAllocator; 9 | use crate::sync::spinlock::Spinlock; 10 | use crate::tasking::scheduler::with_current_thread; 11 | use alloc::sync::Arc; 12 | use atomic::Atomic; 13 | use core::ops::DerefMut; 14 | 15 | /// Hardware memory protection domain. 16 | /// Responsible for safely getting both an active mapping & getting an address allocator. 17 | pub struct ProtectionDomain(Arc); 18 | 19 | /// Inner structure of a ProtectionDomain. 20 | struct ProtectionDomainInner { 21 | vma_allocator: Spinlock, 22 | mapping: CpuPageMapping, 23 | asid: bool, 24 | // TODO: expand on multi-core systems 25 | current_asid: Atomic, 26 | } 27 | 28 | /// Temporary switch guard. Returns to old page mapping when dropped. 29 | pub struct SwitchGuard(CpuPageMapping); 30 | 31 | impl SwitchGuard { 32 | /// Creates a new switch guard for a new mapping. 33 | unsafe fn new(new_mapping: CpuPageMapping) -> Self { 34 | preempt_disable(); 35 | let result = Self(get_cpu_page_mapping()); 36 | cpu_page_mapping_switch_to(new_mapping); 37 | result 38 | } 39 | } 40 | 41 | impl Drop for SwitchGuard { 42 | fn drop(&mut self) { 43 | unsafe { 44 | cpu_page_mapping_switch_to(self.0); 45 | } 46 | preempt_enable(); 47 | } 48 | } 49 | 50 | impl ProtectionDomain { 51 | /// Creates a new protection domain. 52 | pub fn new() -> Result { 53 | Ok(unsafe { Self::from_existing_mapping(ActiveMapping::get_new()?) }) 54 | } 55 | 56 | /// Creates a domain from an existing cpu page mapping. 57 | pub unsafe fn from_existing_mapping(mapping: CpuPageMapping) -> Self { 58 | let (current_asid, asid) = if let Some(asid_manager) = get_per_cpu_data().asid_manager() { 59 | (asid_manager.borrow_mut().alloc(Asid::null()), true) 60 | } else { 61 | (Asid::null(), false) 62 | }; 63 | 64 | Self(Arc::new(ProtectionDomainInner { 65 | vma_allocator: Spinlock::new(VmaAllocator::new()), 66 | mapping, 67 | asid, 68 | current_asid: Atomic::new(current_asid), 69 | })) 70 | } 71 | 72 | /// Assign asid if necessary 73 | pub fn assign_asid_if_necessary(&self) { 74 | if let Some(asid_manager) = get_per_cpu_data().asid_manager() { 75 | let mut asid_manager = asid_manager.borrow_mut(); 76 | let old = self.0.current_asid.load(atomic::Ordering::Acquire); 77 | if !asid_manager.is_valid(old) { 78 | self.0 79 | .current_asid 80 | .store(asid_manager.alloc(old), atomic::Ordering::Release); 81 | } 82 | } 83 | } 84 | 85 | /// Temporarily switch to this mapping. 86 | pub unsafe fn temporarily_switch(&self) -> SwitchGuard { 87 | SwitchGuard::new(self.0.mapping) 88 | } 89 | 90 | /// Gets the cpu page mapping 91 | #[inline] 92 | pub fn cpu_page_mapping(&self) -> CpuPageMapping { 93 | if self.0.asid { 94 | self.0 95 | .mapping 96 | .with_asid(self.0.current_asid.load(atomic::Ordering::Acquire)) 97 | } else { 98 | self.0.mapping 99 | } 100 | } 101 | 102 | /// Checks if we can avoid locks for this domain. 103 | #[inline] 104 | fn can_avoid_locks(&self) -> bool { 105 | // We can avoid locks if we have only one thread containing this domain. 106 | // That's because to clone this domain, you need to have access to a thread which 107 | // has access to this domain. 108 | // Since this code is also executing from a thread containing this domain, 109 | // we know that this is the only executing code that has access to this domain. 110 | // That means we can avoid locking because this thread is the only accessor. 111 | Arc::strong_count(&self.0) == 1 112 | } 113 | 114 | /// Clones this domain reference. 115 | pub fn clone(&self) -> Self { 116 | Self(self.0.clone()) 117 | } 118 | 119 | /// Execute action with both the Vma allocator and active mapping. 120 | #[inline] 121 | pub fn with(&self, f: F) -> T 122 | where 123 | F: FnOnce(&mut VmaAllocator, &mut ActiveMapping) -> T, 124 | { 125 | debug_assert_eq!( 126 | self.0.mapping.as_phys_addr(), 127 | get_cpu_page_mapping().as_phys_addr() 128 | ); 129 | 130 | if self.can_avoid_locks() { 131 | let inner = unsafe { &mut *self.0.vma_allocator.get_cell().get() }; 132 | // Safety: only we can access the active mapping, since we're the only thread. 133 | // Otherwise can_avoid_locks would've returned false. 134 | f(inner, &mut unsafe { ActiveMapping::get_unlocked() }) 135 | } else { 136 | let mut inner = self.0.vma_allocator.lock(); 137 | let inner = inner.deref_mut(); 138 | // Safety: inner lock also covers the active mapping. 139 | f(inner, &mut unsafe { ActiveMapping::get_unlocked() }) 140 | } 141 | } 142 | } 143 | 144 | impl Drop for ProtectionDomainInner { 145 | fn drop(&mut self) { 146 | debug_assert_ne!(self.mapping, get_cpu_page_mapping()); 147 | 148 | // Free the old asid. 149 | if let Some(asid_manager) = get_per_cpu_data().asid_manager() { 150 | asid_manager 151 | .borrow_mut() 152 | .free(self.current_asid.load(atomic::Ordering::Acquire)); 153 | } 154 | 155 | // The PMM expects a virtual address because it needs to update the list. 156 | // We can use the mapping system to map a page without allocating a frame, 157 | // and then unmapping _with_ freeing the frame. 158 | with_current_thread(|thread| { 159 | thread.domain().with(|vma, mapping| { 160 | let paddr = self.mapping.as_phys_addr(); 161 | 162 | let _ = vma 163 | .alloc_region(PAGE_SIZE) 164 | .ok_or(MemoryError::NoMoreVMA) 165 | .and_then(|vaddr| { 166 | mapping.map_range_physical( 167 | vaddr, 168 | paddr, 169 | PAGE_SIZE, 170 | EntryFlags::PRESENT | EntryFlags::NX | EntryFlags::WRITABLE, 171 | )?; 172 | Ok(vaddr) 173 | }) 174 | .map(|vaddr| mapping.unmap_single(vaddr)); 175 | }); 176 | }); 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /lib/wasm-call/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate proc_macro; 2 | 3 | use proc_macro::TokenStream; 4 | use proc_macro_error::*; 5 | use quote::{format_ident, quote}; 6 | use syn::parse::{Parse, ParseStream, Result}; 7 | use syn::punctuated::Punctuated; 8 | use syn::{parenthesized, token, Ident, Token}; 9 | use syn::{parse_macro_input, Type}; 10 | use quote::ToTokens; 11 | 12 | struct AbiFunctions { 13 | functions: Punctuated, 14 | } 15 | 16 | struct AbiFunction { 17 | name: Ident, 18 | _colon_token: Token![:], 19 | _paren_token: token::Paren, 20 | params: Punctuated, 21 | _rarrow_token: token::RArrow, 22 | return_type: Type, 23 | } 24 | 25 | struct AbiFunctionParam { 26 | name: Ident, 27 | _colon_token: Token![:], 28 | ty: Type, 29 | } 30 | 31 | impl Parse for AbiFunctions { 32 | fn parse(input: ParseStream) -> Result { 33 | Ok(Self { 34 | functions: input.parse_terminated(AbiFunction::parse)?, 35 | }) 36 | } 37 | } 38 | 39 | impl Parse for AbiFunction { 40 | fn parse(input: ParseStream) -> Result { 41 | let content; 42 | let name = input.parse()?; 43 | let _colon_token = input.parse()?; 44 | let _paren_token = parenthesized!(content in input); 45 | let params = content.parse_terminated(AbiFunctionParam::parse)?; 46 | let _rarrow_token = input.parse()?; 47 | let return_type = input.parse()?; 48 | Ok(Self { 49 | name, 50 | _colon_token, 51 | _paren_token, 52 | params, 53 | _rarrow_token, 54 | return_type, 55 | }) 56 | } 57 | } 58 | 59 | impl Parse for AbiFunctionParam { 60 | fn parse(input: ParseStream) -> Result { 61 | Ok(Self { 62 | name: input.parse()?, 63 | _colon_token: input.parse()?, 64 | ty: input.parse()?, 65 | }) 66 | } 67 | } 68 | 69 | impl ToTokens for AbiFunctionParam { 70 | fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { 71 | let name = &self.name; 72 | let ty = &self.ty; 73 | (quote! { 74 | #name: #ty 75 | }) 76 | .to_tokens(tokens); 77 | } 78 | } 79 | 80 | #[proc_macro] 81 | #[proc_macro_error] 82 | pub fn abi_functions(input: TokenStream) -> TokenStream { 83 | let input = parse_macro_input!(input as AbiFunctions); 84 | 85 | let mut trait_funcs = Vec::new(); 86 | let mut glue_functions = Vec::new(); 87 | let mut map_entries = Vec::new(); 88 | 89 | for func in input.functions { 90 | let name = func.name.to_token_stream(); 91 | let return_type = func.return_type; 92 | let params = func.params.iter(); 93 | let mut translated_types = Vec::new(); 94 | 95 | let errno_return = match &return_type { 96 | Type::Path(p) => { 97 | assert_eq!(p.path.segments.len(), 1); 98 | match p.path.segments[0].ident.to_string().as_str() { 99 | "Errno" => true, 100 | _ => { 101 | emit_error!(return_type, "unexpected return type"); 102 | false 103 | } 104 | } 105 | } 106 | Type::Tuple(t) => { 107 | if !t.elems.is_empty() { 108 | emit_error!(t, "unexpected tuple"); 109 | } 110 | false 111 | } 112 | _ => { 113 | emit_error!( 114 | return_type, 115 | "unexpected return type {:?}", 116 | return_type.to_token_stream() 117 | ); 118 | false 119 | } 120 | }; 121 | 122 | let trait_return_type = if errno_return { 123 | quote! { WasmStatus } 124 | } else { 125 | quote! { () } 126 | }; 127 | 128 | trait_funcs.push(quote! { 129 | #name(&self, #(#params),*) -> #trait_return_type 130 | }); 131 | 132 | for param in &func.params { 133 | translated_types.push(match ¶m.ty { 134 | Type::Slice(_) | Type::Reference(_) | Type::Ptr(_) => { 135 | emit_error!(param, "use the wasm pointer type"); 136 | quote! {} // Just here for the compiler 137 | } 138 | Type::Path(p) => { 139 | assert_eq!(p.path.segments.len(), 1); 140 | match p.path.segments[0].ident.to_string().as_str() { 141 | "i64" | "u64" | "Rights" => quote! { types::I64 }, 142 | "u32" | "i32" | "Fd" | "ExitCode" | "WasmPtr" | "Size" | "LookupFlags" | "OFlags" | "FdFlags" => quote! { types::I32 }, 143 | "i16" | "u16" => quote! { types::I16 }, 144 | "i8" | "u8" => quote! { types::I8 }, 145 | _ => unimplemented!("{:?}", p.path.to_token_stream()), 146 | } 147 | } 148 | _ => unimplemented!(), 149 | }); 150 | } 151 | 152 | // Glue code generation 153 | let glue_name = format_ident!("__abi_{}", func.name); 154 | let member_func_name = &func.name; 155 | let param_names = func.params.iter().map(|p| &p.name); 156 | let params = func.params.iter(); 157 | 158 | let function_body_call = quote! { 159 | vmctx.#member_func_name(#(#param_names),*) 160 | }; 161 | 162 | let function_body = if errno_return { 163 | quote! { 164 | if let Err(e) = #function_body_call { 165 | e 166 | } else { 167 | Errno::Success 168 | } 169 | } 170 | } else { 171 | function_body_call 172 | }; 173 | 174 | let sig_returns = if errno_return { 175 | quote! { AbiParam::new(types::I32) } 176 | } else { 177 | quote! {} 178 | }; 179 | 180 | glue_functions.push(quote! { 181 | extern "C" fn #glue_name(vmctx: &VmContext, #(#params),*) -> #return_type { 182 | #function_body 183 | } 184 | }); 185 | 186 | // Map code generation 187 | let key = syn::LitStr::new( 188 | &member_func_name.to_token_stream().to_string(), 189 | member_func_name.span(), 190 | ); 191 | map_entries.push(quote! { 192 | map.insert(#key, (VirtAddr::new(#glue_name as usize), Signature { 193 | params: vec![AbiParam::special(WASM_VMCTX_TYPE, ArgumentPurpose::VMContext), #(AbiParam::new(#translated_types)),*], 194 | returns: vec![#sig_returns], 195 | call_conv: WASM_CALL_CONV, 196 | })); 197 | }); 198 | } 199 | 200 | let result = quote! { 201 | trait AbiFunctions { 202 | #(fn #trait_funcs;)* 203 | } 204 | 205 | #(#glue_functions)* 206 | 207 | lazy_static! { 208 | static ref ABI_MAP: BTreeMap<&'static str, (VirtAddr, Signature)> = { 209 | let mut map = BTreeMap::new(); 210 | #(#map_entries)* 211 | map 212 | }; 213 | } 214 | }; 215 | 216 | TokenStream::from(result) 217 | } 218 | -------------------------------------------------------------------------------- /kernel/src/mm/vma_allocator.rs: -------------------------------------------------------------------------------- 1 | //! Allocator used to split an address space domain into virtual memory areas. 2 | 3 | use crate::arch; 4 | use crate::arch::address::VirtAddr; 5 | use crate::arch::paging::{ActiveMapping, EntryFlags, PAGE_SIZE}; 6 | use crate::mm::avl_interval_tree::AVLIntervalTree; 7 | use crate::mm::mapper::{MemoryError, MemoryMapper}; 8 | use crate::util::mem_funcs::page_clear; 9 | use core::intrinsics::{likely, unlikely}; 10 | 11 | /// Virtual memory allocator. 12 | pub struct VmaAllocator { 13 | tree: AVLIntervalTree, 14 | } 15 | 16 | /// Virtual memory area. 17 | #[derive(Debug, Eq, PartialEq)] 18 | pub struct Vma { 19 | start: VirtAddr, 20 | size: usize, 21 | } 22 | 23 | pub trait MappableVma { 24 | /// Gets the starting address. 25 | fn address(&self) -> VirtAddr; 26 | 27 | /// Gets the size. 28 | fn size(&self) -> usize; 29 | 30 | /// Checks if the address is contained within the area. 31 | fn is_contained(&self, addr: VirtAddr) -> bool { 32 | self.address().as_usize() <= addr.as_usize() 33 | && (self.address() + self.size()).as_usize() > addr.as_usize() 34 | } 35 | 36 | /// Unmaps the mapped memory. 37 | fn unmap(&self, mapping: &mut ActiveMapping); 38 | } 39 | 40 | /// Mapped of a Vma (may be partially). 41 | #[derive(Debug, Eq, PartialEq)] 42 | pub struct MappedVma { 43 | vma: Vma, 44 | } 45 | 46 | /// Lazily mapped Vma, mapped on access. 47 | #[derive(Debug)] 48 | pub struct LazilyMappedVma { 49 | vma: Vma, 50 | /// The flags to use when mapping the memory. 51 | flags: EntryFlags, 52 | /// The size of the real mapped part. 53 | allocated_size: usize, 54 | } 55 | 56 | impl Vma { 57 | /// Dummy Vma. 58 | pub const fn dummy() -> Self { 59 | Self { 60 | start: VirtAddr::null(), 61 | size: 0, 62 | } 63 | } 64 | 65 | /// Convert to mapped Vma. 66 | pub fn map( 67 | self, 68 | mapping: &mut ActiveMapping, 69 | map_off: usize, 70 | map_size: usize, 71 | flags: EntryFlags, 72 | ) -> Result { 73 | debug_assert!(map_off % PAGE_SIZE == 0); 74 | debug_assert!(map_size % PAGE_SIZE == 0); 75 | 76 | if unlikely(map_off >= self.size || map_off + map_size > self.size) { 77 | Err(MemoryError::InvalidRange) 78 | } else { 79 | mapping.map_range(self.start + map_off, map_size, flags)?; 80 | Ok(MappedVma { vma: self }) 81 | } 82 | } 83 | 84 | /// Convert to a lazily mapped Vma. 85 | pub fn map_lazily( 86 | self, 87 | mapping: &mut ActiveMapping, 88 | allocated_size: usize, 89 | flags: EntryFlags, 90 | ) -> Result { 91 | debug_assert!(allocated_size % PAGE_SIZE == 0); 92 | 93 | if allocated_size > self.size { 94 | Err(MemoryError::InvalidRange) 95 | } else { 96 | mapping.map_range(self.start, allocated_size, flags)?; 97 | 98 | Ok(LazilyMappedVma { 99 | vma: self, 100 | flags, 101 | allocated_size, 102 | }) 103 | } 104 | } 105 | 106 | /// Gets the starting address. 107 | #[inline] 108 | pub fn address(&self) -> VirtAddr { 109 | self.start 110 | } 111 | 112 | /// Gets the length. 113 | #[inline] 114 | pub fn size(&self) -> usize { 115 | self.size 116 | } 117 | } 118 | 119 | impl MappedVma { 120 | /// Dummy Vma. 121 | pub const fn dummy() -> Self { 122 | Self { vma: Vma::dummy() } 123 | } 124 | 125 | /// Is dummy? 126 | pub fn is_dummy(&self) -> bool { 127 | *self == Self::dummy() 128 | } 129 | 130 | /// Forget mapping. Unsafe because if you don't manually free it you leak memory. 131 | pub unsafe fn forget_mapping(&mut self) { 132 | self.vma.size = 0; 133 | } 134 | } 135 | 136 | impl MappableVma for MappedVma { 137 | #[inline] 138 | fn address(&self) -> VirtAddr { 139 | self.vma.address() 140 | } 141 | 142 | #[inline] 143 | fn size(&self) -> usize { 144 | self.vma.size() 145 | } 146 | 147 | fn unmap(&self, mapping: &mut ActiveMapping) { 148 | drop_mapping(mapping, self.address(), self.size()); 149 | } 150 | } 151 | 152 | impl LazilyMappedVma { 153 | /// Dummy mapped Vma. 154 | pub const fn dummy() -> Self { 155 | Self { 156 | vma: Vma::dummy(), 157 | flags: EntryFlags::empty(), 158 | allocated_size: 0, 159 | } 160 | } 161 | 162 | /// Expands the allocated size. 163 | /// Returns the old size on success, an error on failure. 164 | pub fn expand(&mut self, amount: usize) -> Result { 165 | let old_size = self.allocated_size; 166 | let new_size = old_size 167 | .checked_add(amount) 168 | .ok_or(MemoryError::InvalidRange)?; 169 | 170 | if new_size > self.vma.size { 171 | Err(MemoryError::InvalidRange) 172 | } else { 173 | self.allocated_size = new_size; 174 | Ok(old_size) 175 | } 176 | } 177 | 178 | /// Try handle a page fault. 179 | pub fn try_handle_page_fault( 180 | &mut self, 181 | mapping: &mut ActiveMapping, 182 | fault_addr: VirtAddr, 183 | ) -> bool { 184 | if likely(self.is_contained(fault_addr)) { 185 | let flags = self.flags(); 186 | let map_addr = fault_addr.align_down(); 187 | 188 | // After the mapping is successful, we need to clear the memory to avoid information leaks. 189 | if mapping.get_and_map_single(map_addr, flags).is_ok() { 190 | let ptr: *mut u8 = map_addr.as_mut(); 191 | // Safe because valid pointer and valid size. 192 | unsafe { 193 | page_clear(ptr); 194 | } 195 | 196 | return true; 197 | } 198 | } 199 | 200 | false 201 | } 202 | 203 | /// Gets the flags to use when mapping the memory. 204 | #[inline] 205 | pub fn flags(&self) -> EntryFlags { 206 | self.flags 207 | } 208 | } 209 | 210 | impl MappableVma for LazilyMappedVma { 211 | #[inline] 212 | fn address(&self) -> VirtAddr { 213 | self.vma.address() 214 | } 215 | 216 | #[inline] 217 | fn size(&self) -> usize { 218 | self.allocated_size 219 | } 220 | 221 | fn unmap(&self, mapping: &mut ActiveMapping) { 222 | drop_mapping(mapping, self.address(), self.size()); 223 | } 224 | } 225 | 226 | fn drop_mapping(mapping: &mut ActiveMapping, start: VirtAddr, size: usize) { 227 | // We don't need to tell the exact mapped range, we own all of this. 228 | // For an empty mapping, the size will be zero, so we don't have to check that. 229 | mapping.free_and_unmap_range(start, size); 230 | } 231 | 232 | impl VmaAllocator { 233 | /// Creates a new VMA allocator. 234 | pub fn new() -> Self { 235 | let mut tree = AVLIntervalTree::new(); 236 | tree.insert(arch::USER_START, arch::USER_LEN); 237 | 238 | Self { tree } 239 | } 240 | 241 | /// Inserts a region. 242 | pub fn insert_region(&mut self, addr: VirtAddr, len: usize) { 243 | debug_assert!(addr.is_page_aligned()); 244 | debug_assert!(len % PAGE_SIZE == 0); 245 | self.tree.return_interval(addr.as_usize(), len); 246 | } 247 | 248 | /// Allocates a region. 249 | pub fn alloc_region(&mut self, len: usize) -> Option { 250 | debug_assert!(len % PAGE_SIZE == 0); 251 | self.tree.find_len(len).map(VirtAddr::new) 252 | } 253 | 254 | /// Creates a new Vma of the requested size. 255 | pub fn create_vma(&mut self, size: usize) -> Result { 256 | debug_assert!(size % PAGE_SIZE == 0); 257 | 258 | self.alloc_region(size) 259 | .map(|start| Vma { start, size }) 260 | .ok_or(MemoryError::NoMoreVMA) 261 | } 262 | 263 | /// Destroy a Vma. 264 | pub fn destroy_vma(&mut self, mapping: &mut ActiveMapping, vma: &M) { 265 | self.insert_region(vma.address(), vma.size()); 266 | vma.unmap(mapping); 267 | } 268 | } 269 | -------------------------------------------------------------------------------- /kernel/src/tasking/scheduler.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::VirtAddr; 2 | use crate::arch::get_per_cpu_data; 3 | use crate::arch::paging::{get_cpu_page_mapping, CpuPageMapping}; 4 | use crate::mm::tcb_alloc::{tcb_alloc, tcb_dealloc, with_thread}; 5 | use crate::mm::vma_allocator::MappedVma; 6 | use crate::sync::spinlock::Spinlock; 7 | use crate::tasking::protection_domain::ProtectionDomain; 8 | use crate::tasking::thread::{Stack, Thread, ThreadId, ThreadStatus}; 9 | use alloc::collections::VecDeque; 10 | use atomic::Atomic; 11 | use core::intrinsics::{likely, unlikely}; 12 | use core::sync::atomic::Ordering; 13 | use spin::Once; 14 | 15 | /// Per-core queues. 16 | struct Queues { 17 | run_queue: VecDeque, 18 | } 19 | 20 | /// Per-core scheduler. 21 | pub struct Scheduler { 22 | queues: Spinlock, 23 | garbage: Atomic, 24 | current_thread_id: Atomic, 25 | idle_thread_id: ThreadId, 26 | } 27 | 28 | impl Scheduler { 29 | /// New scheduler. 30 | fn new(idle_protection_domain: ProtectionDomain) -> Self { 31 | // This state will be overwritten on the first context switch with data from the current running code. 32 | let idle_thread = Thread::new(Stack::new(MappedVma::dummy()), idle_protection_domain); 33 | let idle_thread_id = idle_thread.id; 34 | tcb_alloc(idle_thread); 35 | 36 | Self { 37 | queues: Spinlock::new(Queues { 38 | run_queue: VecDeque::new(), 39 | }), 40 | garbage: Atomic::new(ThreadId::zero()), 41 | current_thread_id: Atomic::new(idle_thread_id), 42 | idle_thread_id, 43 | } 44 | } 45 | 46 | /// Adds a thread to the runqueue. 47 | pub fn queue_thread(&self, thread: ThreadId) { 48 | self.queues.lock().run_queue.push_back(thread); 49 | } 50 | 51 | /// Gets the next thread to run. 52 | #[inline] 53 | fn next_thread(&self, queues: &mut Queues) -> ThreadId { 54 | if let Some(thread) = queues.run_queue.pop_front() { 55 | thread 56 | } else { 57 | self.idle_thread_id 58 | } 59 | } 60 | 61 | /// Execute something with the current thread reference. 62 | pub fn with_current_thread(&self, f: F) -> T 63 | where 64 | F: FnOnce(&Thread) -> T, 65 | { 66 | with_thread(self.current_thread_id.load(Ordering::Acquire), f) 67 | } 68 | 69 | /// Gets the current thread id. 70 | #[inline] 71 | pub fn current_thread_id(&self) -> ThreadId { 72 | self.idle_thread_id 73 | } 74 | 75 | /// Moves a thread from the blocked queue to the runqueue if it was in the blocked queue. 76 | /// Returns true if it was in the blocked queue. 77 | pub(crate) fn move_wakeup(&self, thread_id: ThreadId) { 78 | let mut queues = self.queues.lock(); 79 | queues.run_queue.push_front(thread_id); 80 | } 81 | 82 | /// Sets the scheduler up for switching to the next thread and gets the next thread stack address. 83 | fn next_thread_state(&self, old_stack: VirtAddr) -> NextThreadState { 84 | // Cleanup old thread. 85 | // Relaxed ordering is fine because this is only for this core. 86 | let garbage = self.garbage.load(Ordering::Relaxed); 87 | if unlikely(garbage != ThreadId::zero()) { 88 | tcb_dealloc(garbage); 89 | self.garbage.store(ThreadId::zero(), Ordering::Relaxed); 90 | } 91 | 92 | let mut queues = self.queues.lock(); 93 | 94 | let old_thread_id = self.current_thread_id.load(Ordering::Acquire); 95 | 96 | let (old_mapping, old_thread_status) = with_thread(old_thread_id, |old_thread| { 97 | let old_thread_status = old_thread.status(); 98 | 99 | if likely(!matches!(old_thread_status, ThreadStatus::Exit(_))) { 100 | old_thread.save_simd(); 101 | old_thread.stack.set_current_location(old_stack); 102 | } 103 | 104 | (old_thread.domain().cpu_page_mapping(), old_thread_status) 105 | }); 106 | 107 | match old_thread_status { 108 | ThreadStatus::Runnable => { 109 | if likely(old_thread_id != self.idle_thread_id) { 110 | queues.run_queue.push_back(old_thread_id); 111 | } 112 | } 113 | 114 | ThreadStatus::Blocked => {} 115 | 116 | ThreadStatus::Exit(_) => { 117 | debug_assert_eq!(self.garbage.load(Ordering::Relaxed), ThreadId::zero()); 118 | // Safety: We call this from an uninterrupted place and we are not referencing thread memory here. 119 | unsafe { 120 | with_thread(old_thread_id, |old_thread| { 121 | old_thread.unmap_memory(); 122 | }); 123 | } 124 | self.garbage.store(old_thread_id, Ordering::Relaxed); 125 | } 126 | }; 127 | 128 | /*print!("runqueue: "); 129 | for x in &queues.run_queue { 130 | print!("{:?} ", x.id); 131 | } 132 | println!(); 133 | print!("blocked: "); 134 | for x in &queues.blocked_threads { 135 | print!("{:?} ", x.id); 136 | } 137 | println!();*/ 138 | 139 | let next_thread_id = self.next_thread(&mut queues); 140 | debug_assert_eq!( 141 | { with_thread(next_thread_id, |next_thread| next_thread.status(),) }, 142 | ThreadStatus::Runnable 143 | ); 144 | 145 | self.current_thread_id 146 | .store(next_thread_id, Ordering::Release); 147 | 148 | self.with_current_thread(|current_thread| { 149 | current_thread.restore_simd(); 150 | let domain = current_thread.domain(); 151 | domain.assign_asid_if_necessary(); 152 | NextThreadState(current_thread.stack.get_current_location(), { 153 | let new_mapping = domain.cpu_page_mapping(); 154 | if old_mapping == new_mapping { 155 | CpuPageMapping::sentinel() 156 | } else { 157 | new_mapping 158 | } 159 | }) 160 | }) 161 | } 162 | } 163 | 164 | /// Switches to the next thread. 165 | #[inline] 166 | fn switch_to_next() { 167 | extern "C" { 168 | fn _switch_to_next(); 169 | } 170 | 171 | unsafe { 172 | _switch_to_next(); 173 | } 174 | } 175 | 176 | /// Yield the current thread. 177 | #[inline] 178 | pub fn thread_yield() { 179 | // If we manually switch and the `preempt_count` isn't zero, that indicates an issue in the code. 180 | debug_assert_eq!( 181 | get_per_cpu_data().preempt_count(), 182 | 0, 183 | "trying to preempt while holding a spinlock" 184 | ); 185 | 186 | switch_to_next(); 187 | } 188 | 189 | /// Wakeup and yield. 190 | pub fn wakeup_and_yield(tid: ThreadId) { 191 | with_thread(tid, |t| t.wakeup()); 192 | thread_yield(); 193 | } 194 | 195 | /// Exit the thread. 196 | pub fn thread_exit(exit_code: u32) -> ! { 197 | extern "C" { 198 | fn _thread_exit() -> !; 199 | } 200 | 201 | with_core_scheduler(|s| { 202 | s.with_current_thread(|thread| { 203 | assert_ne!( 204 | thread.id, s.idle_thread_id, 205 | "Attempting to kill the idle thread" 206 | ); 207 | thread.set_status(ThreadStatus::Exit(exit_code)) 208 | }) 209 | }); 210 | println!("thread exit: {}", exit_code); 211 | 212 | unsafe { 213 | _thread_exit(); 214 | } 215 | } 216 | 217 | #[repr(C)] 218 | struct NextThreadState(VirtAddr, CpuPageMapping); 219 | 220 | /// Saves the old state and gets the next state. 221 | #[no_mangle] 222 | extern "C" fn next_thread_state(old_stack: VirtAddr) -> NextThreadState { 223 | with_core_scheduler(|scheduler| scheduler.next_thread_state(old_stack)) 224 | } 225 | 226 | // TODO: make this per core once we go multicore 227 | static SCHEDULER: Once = Once::new(); 228 | 229 | /// Adds and schedules a thread. 230 | pub fn add_and_schedule_thread(thread: Thread) { 231 | let tid = thread.id; 232 | tcb_alloc(thread); 233 | with_core_scheduler(|scheduler| scheduler.queue_thread(tid)); 234 | } 235 | 236 | /// Execute something using this core-local scheduler. 237 | pub fn with_core_scheduler(f: F) -> T 238 | where 239 | F: FnOnce(&Scheduler) -> T, 240 | { 241 | f(&SCHEDULER.try_get().expect("core scheduler")) 242 | } 243 | 244 | /// Execute something using the current thread reference. 245 | pub fn with_current_thread(f: F) -> T 246 | where 247 | F: FnOnce(&Thread) -> T, 248 | { 249 | with_core_scheduler(|s| s.with_current_thread(f)) 250 | } 251 | 252 | /// Inits scheduler. 253 | pub fn init() { 254 | SCHEDULER.call_once(|| { 255 | let idle_protection_domain = 256 | unsafe { ProtectionDomain::from_existing_mapping(get_cpu_page_mapping()) }; 257 | Scheduler::new(idle_protection_domain) 258 | }); 259 | } 260 | -------------------------------------------------------------------------------- /kernel/src/wasm/vmctx.rs: -------------------------------------------------------------------------------- 1 | use crate::arch::address::VirtAddr; 2 | use crate::arch::paging::PAGE_SIZE; 3 | use crate::wasm::table::Table; 4 | use alloc::alloc::{alloc, dealloc, handle_alloc_error}; 5 | use alloc::vec::Vec; 6 | use core::alloc::Layout; 7 | use core::mem::{align_of, size_of}; 8 | use core::slice; 9 | use cranelift_wasm::{Global, GlobalInit, SignatureIndex, TableIndex}; 10 | 11 | pub const WASM_PAGE_SIZE: usize = 64 * 1024; 12 | 13 | pub const HEAP_SIZE: u64 = 4 * 1024 * 1024 * 1024; // 4 GiB 14 | 15 | pub const HEAP_GUARD_SIZE: u64 = PAGE_SIZE as u64; 16 | 17 | /// Table representation as it is for the VmContext. 18 | #[repr(C)] 19 | #[derive(Debug)] 20 | pub struct VmTable { 21 | /// Base address to the function pointers. 22 | pub base_address: VirtAddr, 23 | /// Amount of items currently in. 24 | pub amount_items: u32, 25 | } 26 | 27 | /// A single element in the table representation for a VmContext. 28 | #[repr(C)] 29 | #[derive(Debug, Copy, Clone)] 30 | pub struct VmTableElement { 31 | pub address: VirtAddr, 32 | pub sig_idx: u64, 33 | } 34 | 35 | #[repr(C)] 36 | pub struct VmFunctionImportEntry { 37 | pub address: VirtAddr, 38 | } 39 | 40 | /// Context for a Wasm execution. 41 | /// Layout of the VmContext: 42 | /// 43 | /// ----------------------------- 44 | /// | Heap pointer | 45 | /// ----------------------------- 46 | /// | all globals | 47 | /// ----------------------------- 48 | /// | all VmFunctionImportEntry | 49 | /// ----------------------------- 50 | /// | all VmTable | 51 | /// ----------------------------- 52 | /// 53 | #[repr(C, align(16))] 54 | pub struct VmContext { 55 | // Note: Variable size struct, heap pointer provided for convenience. 56 | pub heap_ptr: VirtAddr, 57 | } 58 | 59 | // All globals have the same size right now. 60 | // TODO: make sure not all globals take the same amount of bytes 61 | type VmGlobal = [u8; 8]; 62 | 63 | pub struct VmContextContainer { 64 | ptr: VirtAddr, 65 | num_imported_funcs: u32, 66 | num_globals: u32, 67 | tables: Vec, 68 | } 69 | 70 | impl VmTableElement { 71 | /// Offset of the field `address`. 72 | #[inline] 73 | pub fn address_offset() -> i32 { 74 | offset_of!(Self, address) as i32 75 | } 76 | 77 | /// Offset of the field `sig_idx`. 78 | #[inline] 79 | pub fn sig_idx_offset() -> i32 { 80 | offset_of!(Self, sig_idx) as i32 81 | } 82 | } 83 | 84 | impl VmTable { 85 | /// Offset of the field `base_address`. 86 | #[inline] 87 | pub fn base_address_offset() -> i32 { 88 | offset_of!(Self, base_address) as i32 89 | } 90 | 91 | /// Offset of the field `amount_items`. 92 | #[inline] 93 | pub fn amount_items_offset() -> i32 { 94 | offset_of!(Self, amount_items) as i32 95 | } 96 | } 97 | 98 | impl VmTableElement { 99 | /// Null. 100 | pub fn null() -> Self { 101 | Self { 102 | address: VirtAddr::null(), 103 | sig_idx: core::u64::MAX, // Important: check func_env 104 | } 105 | } 106 | 107 | /// Creates a new table element. 108 | pub fn new(address: VirtAddr, sig_idx: SignatureIndex) -> Self { 109 | Self { 110 | address, 111 | sig_idx: sig_idx.as_u32() as u64, 112 | } 113 | } 114 | } 115 | 116 | impl VmContext { 117 | /// Heap offset in the context. 118 | pub fn heap_offset() -> i32 { 119 | offset_of!(VmContext, heap_ptr) as i32 120 | } 121 | 122 | /// Heap offset field size. 123 | pub const fn heap_offset_field_size() -> i32 { 124 | size_of::() as i32 125 | } 126 | 127 | /// Offset of the globals. 128 | pub fn globals_offset() -> i32 { 129 | Self::heap_offset() + Self::heap_offset_field_size() 130 | } 131 | 132 | /// Offset of a global entry. 133 | pub fn global_entry_offset(index: u32) -> isize { 134 | Self::globals_offset() as isize + (size_of::() * index as usize) as isize 135 | } 136 | 137 | /// Offset of imported functions. 138 | pub fn imported_funcs_offset(num_globals: u32) -> isize { 139 | Self::global_entry_offset(num_globals) 140 | } 141 | 142 | /// Offset of an imported function entry. 143 | pub fn imported_func_entry_offset(num_globals: u32, index: u32) -> isize { 144 | Self::imported_funcs_offset(num_globals) as isize 145 | + (size_of::() * index as usize) as isize 146 | } 147 | 148 | /// Offset of the tables. 149 | pub fn tables_offset(num_globals: u32, num_imported_funcs: u32) -> isize { 150 | Self::imported_func_entry_offset(num_globals, num_imported_funcs) 151 | } 152 | 153 | /// Offset of a table. 154 | pub fn table_entry_offset(num_globals: u32, num_imported_funcs: u32, index: u32) -> isize { 155 | Self::tables_offset(num_globals, num_imported_funcs) 156 | + (index as usize * size_of::()) as isize 157 | } 158 | 159 | /// Calculates the size of the context. 160 | pub fn size(num_globals: u32, num_imported_funcs: u32, num_tables: u32) -> usize { 161 | Self::table_entry_offset(num_globals, num_imported_funcs, num_tables) as usize 162 | } 163 | } 164 | 165 | #[allow(clippy::cast_ptr_alignment)] 166 | impl VmContextContainer { 167 | /// Creates a new container for a VmContext. 168 | pub unsafe fn new( 169 | heap: VirtAddr, 170 | num_globals: u32, 171 | num_imported_funcs: u32, 172 | tables: Vec
, 173 | ) -> Self { 174 | // Allocate the memory for the VmContext. 175 | let layout = Self::layout(num_globals, num_imported_funcs, tables.len() as u32); 176 | let ptr = alloc(layout); 177 | if ptr.is_null() { 178 | handle_alloc_error(layout); 179 | } 180 | 181 | // Set the heap pointer here already. 182 | let heap_ptr = ptr.offset(VmContext::heap_offset() as isize) as *mut VirtAddr; 183 | *heap_ptr = heap; 184 | 185 | Self { 186 | ptr: VirtAddr::from(ptr), 187 | num_imported_funcs, 188 | num_globals, 189 | tables, 190 | } 191 | } 192 | 193 | /// Gets the pointer to the context. 194 | pub fn ptr(&self) -> *const VmContext { 195 | self.ptr.as_const::() 196 | } 197 | 198 | /// Gets the raw u8 mutable pointer to the context. 199 | pub fn ptr_mut_u8(&mut self) -> *mut u8 { 200 | self.ptr.as_mut::() 201 | } 202 | 203 | /// Gets the function imports as a slice. 204 | /// Unsafe because you might be able to get multiple mutable references. 205 | pub unsafe fn function_imports_as_mut_slice(&mut self) -> &mut [VmFunctionImportEntry] { 206 | // Safety: we allocated the memory correctly and the bounds are correct at this point. 207 | let ptr = self 208 | .ptr_mut_u8() 209 | .offset(VmContext::imported_funcs_offset(self.num_globals) as isize) 210 | as *mut VmFunctionImportEntry; 211 | slice::from_raw_parts_mut(ptr, self.num_imported_funcs as usize) 212 | } 213 | 214 | /// Gets a mut slice to the tables. 215 | pub fn get_table(&mut self, idx: TableIndex) -> &mut Table { 216 | &mut self.tables[idx.as_u32() as usize] 217 | } 218 | 219 | /// Write the table data to the VmContext. 220 | pub fn write_tables_to_vmctx(&mut self) { 221 | // Safety: we allocated the memory correctly and the bounds are correct at this point. 222 | let vm_tables = unsafe { 223 | let ptr = self.ptr_mut_u8().offset(VmContext::tables_offset( 224 | self.num_globals, 225 | self.num_imported_funcs, 226 | )) as *mut VmTable; 227 | slice::from_raw_parts_mut(ptr, self.tables.len()) 228 | }; 229 | 230 | for (table, vm_table) in self.tables.iter().zip(vm_tables.iter_mut()) { 231 | *vm_table = table.as_vm_table(); 232 | } 233 | } 234 | 235 | /// Sets a global. 236 | /// Unsafe because index might be outside bounds. 237 | pub unsafe fn set_global(&mut self, idx: u32, global: &Global) { 238 | debug_assert!(idx < self.num_globals); 239 | let ptr = self 240 | .ptr_mut_u8() 241 | .offset(VmContext::global_entry_offset(idx)); 242 | 243 | match global.initializer { 244 | GlobalInit::I32Const(v) => (ptr as *mut i32).write(v), 245 | _ => unimplemented!(), 246 | } 247 | } 248 | 249 | /// Calculates the allocation layout of the VmContext. 250 | fn layout(num_globals: u32, num_imported_funcs: u32, num_tables: u32) -> Layout { 251 | let size = VmContext::size(num_globals, num_imported_funcs, num_tables); 252 | let align = align_of::(); 253 | Layout::from_size_align(size, align).unwrap() 254 | } 255 | } 256 | 257 | impl Drop for VmContextContainer { 258 | fn drop(&mut self) { 259 | unsafe { 260 | dealloc( 261 | self.ptr_mut_u8(), 262 | Self::layout( 263 | self.num_globals, 264 | self.num_imported_funcs, 265 | self.tables.len() as u32, 266 | ), 267 | ); 268 | } 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /kernel/src/wasm/wasi/mod.rs: -------------------------------------------------------------------------------- 1 | //! Wasi implementation 2 | //! See https://github.com/WebAssembly/WASI/blob/master/phases/snapshot/docs.md 3 | 4 | #![allow(clippy::too_many_arguments)] 5 | #![allow(clippy::identity_op)] 6 | 7 | mod definitions; 8 | 9 | pub use definitions::*; 10 | 11 | use crate::arch::address::VirtAddr; 12 | use crate::tasking::file::{FileDescriptor, FileHandle, FileIdx}; 13 | use crate::tasking::scheduler::{self, with_current_thread}; 14 | use crate::tasking::scheme::Scheme; 15 | use crate::tasking::scheme_container::schemes; 16 | use crate::wasm::main::{WASM_CALL_CONV, WASM_VMCTX_TYPE}; 17 | use crate::wasm::vmctx::VmContext; 18 | use alloc::boxed::Box; 19 | use alloc::collections::BTreeMap; 20 | use alloc::sync::Arc; 21 | use core::convert::{TryFrom, TryInto}; 22 | use core::slice; 23 | use cranelift_codegen::ir::{types, AbiParam, ArgumentPurpose, Signature}; 24 | use lazy_static::lazy_static; 25 | 26 | abi_functions! { 27 | environ_sizes_get: (environc: WasmPtr, environ_buf_size: WasmPtr) -> Errno, 28 | environ_get: (environ: WasmPtr>, environ_buf: WasmPtr) -> Errno, 29 | fd_close: (fd: Fd) -> Errno, 30 | fd_read: (fd: Fd, iovs: WasmPtr, iovs_len: Size, nread: WasmPtr) -> Errno, 31 | fd_write: (fd: Fd, iovs: WasmPtr, iovs_len: Size, nwritten: WasmPtr) -> Errno, 32 | fd_prestat_get: (fd: Fd, prestat: WasmPtr) -> Errno, 33 | fd_prestat_dir_name: (fd: Fd, path: WasmPtr, path_len: Size) -> Errno, 34 | path_open: (dir_fd: Fd, dir_flags: LookupFlags, path: WasmPtr, path_len: Size, o_flags: OFlags, fs_rights_base: Rights, fs_rights_inheriting: Rights, fd_flags: FdFlags, fd: WasmPtr) -> Errno, 35 | proc_exit: (exit_code: ExitCode) -> (), 36 | } 37 | 38 | // TODO: capabilities 39 | impl AbiFunctions for VmContext { 40 | fn environ_sizes_get( 41 | &self, 42 | environc: WasmPtr, 43 | environ_buf_size: WasmPtr, 44 | ) -> WasmStatus { 45 | environc.cell(self)?.set(1); 46 | // This is the sum of the string lengths in bytes (including \0 terminators) 47 | let abcdefg = "RUST_BACKTRACE=1"; 48 | environ_buf_size 49 | .cell(self)? 50 | .set(1 + abcdefg.bytes().len() as u32 /* TODO: make safe */); 51 | //environc.cell(self)?.set(0); 52 | //environ_buf_size.cell(self)?.set(0); 53 | Ok(()) 54 | } 55 | 56 | fn environ_get(&self, environ: WasmPtr>, environ_buf: WasmPtr) -> WasmStatus { 57 | // The bytes should be all after each other consecutively in `environ_buf`. 58 | let abcdefg = "RUST_BACKTRACE=1"; 59 | let slice = environ_buf.slice( 60 | &self, 61 | (1 + abcdefg.bytes().len()) as u32, /* TODO: make safe */ 62 | )?; 63 | for (byte, cell) in abcdefg.bytes().zip(slice.iter()) { 64 | cell.set(byte); 65 | } 66 | slice[slice.len() - 1].set(0); 67 | 68 | // Write pointers to the environment variables in the buffer. 69 | let slice = environ.slice(&self, 1)?; 70 | slice[0].set(WasmPtr::from(environ_buf.offset())); 71 | 72 | Ok(()) 73 | } 74 | 75 | fn fd_close(&self, fd: Fd) -> WasmStatus { 76 | println!("fd_close: {}", fd); 77 | Ok(()) 78 | } 79 | 80 | fn fd_read( 81 | &self, 82 | fd: Fd, 83 | iovs: WasmPtr, 84 | iovs_len: u32, 85 | nread: WasmPtr, 86 | ) -> WasmStatus { 87 | self.with_fd_handle(fd, |scheme, handle| { 88 | let mut read = 0usize; 89 | let iovs = iovs.slice(self, iovs_len)?; 90 | for iov in iovs { 91 | let iov = iov.get(); 92 | let buf = iov.buf.slice(self, iov.buf_len)?; 93 | 94 | // TODO: safety 95 | let buf = 96 | unsafe { slice::from_raw_parts_mut(buf as *const _ as *mut u8, buf.len()) }; 97 | let read_now = scheme.read(handle, buf)?; 98 | read = read.saturating_add(read_now); 99 | } 100 | 101 | nread.cell(self)?.set(read.try_into().unwrap_or(u32::MAX)); 102 | 103 | Ok(()) 104 | }) 105 | } 106 | 107 | fn fd_write( 108 | &self, 109 | fd: Fd, 110 | iovs: WasmPtr, 111 | iovs_len: u32, 112 | nwritten: WasmPtr, 113 | ) -> WasmStatus { 114 | //println!("fd_write {} iovs_len={}", fd, iovs_len); 115 | 116 | // TODO: debug 117 | if fd < 3 { 118 | let iovs = iovs.slice(self, iovs_len)?; 119 | 120 | // TODO: overflow? 121 | let mut written = 0; 122 | 123 | for iov in iovs { 124 | let iov = iov.get(); 125 | 126 | let buf = iov.buf.slice(self, iov.buf_len)?; 127 | 128 | // TODO: just prints to stdout for now 129 | for b in buf { 130 | print!("{}", b.get() as char); 131 | } 132 | 133 | written += iov.buf_len; 134 | } 135 | 136 | nwritten.cell(&self)?.set(written); 137 | return Ok(()); 138 | } 139 | 140 | self.with_fd_handle(fd, |scheme, handle| { 141 | let mut written = 0usize; 142 | let iovs = iovs.slice(self, iovs_len)?; 143 | for iov in iovs { 144 | let iov = iov.get(); 145 | let buf = iov.buf.slice(self, iov.buf_len)?; 146 | 147 | // TODO: safety 148 | let buf = unsafe { slice::from_raw_parts(buf as *const _ as *const u8, buf.len()) }; 149 | let written_now = scheme.write(handle, buf)?; 150 | written = written.saturating_add(written_now); 151 | } 152 | 153 | nwritten 154 | .cell(self)? 155 | .set(written.try_into().unwrap_or(u32::MAX)); 156 | 157 | Ok(()) 158 | }) 159 | } 160 | 161 | fn fd_prestat_get(&self, fd: Fd, prestat: WasmPtr) -> WasmStatus { 162 | self.with_fd(fd, |fd| { 163 | // TODO: check if it's a directory, if it's not: return ENOTDIR 164 | let pre_open_path = fd.pre_open_path().ok_or(Errno::NotSup)?; 165 | if let Ok(pr_name_len) = u32::try_from(pre_open_path.len() + 1) { 166 | prestat.cell(self)?.set(PreStat { 167 | tag: 0, 168 | inner: PreStatInner { 169 | dir: PreStatDir { pr_name_len }, 170 | }, 171 | }); 172 | //println!("fd_prestat_get: write {}", pr_name_len); 173 | Ok(()) 174 | } else { 175 | Err(Errno::NameTooLong) 176 | } 177 | }) 178 | } 179 | 180 | fn fd_prestat_dir_name(&self, fd: Fd, path: WasmPtr, path_len: Size) -> WasmStatus { 181 | self.with_fd(fd, |fd| { 182 | // TODO: check if it's a directory, if it's not: return ENOTDIR 183 | let pre_open_path = fd.pre_open_path().ok_or(Errno::NotSup)?; 184 | if pre_open_path.len() + 1 > path_len as usize { 185 | Err(Errno::NameTooLong) 186 | } else { 187 | //println!("fd_prestat_dir_name: {:?}", pre_open_path); 188 | path.write_from_slice_with_null(self, path_len, pre_open_path) 189 | } 190 | }) 191 | } 192 | 193 | fn path_open( 194 | &self, 195 | dir_fd: Fd, 196 | dir_flags: LookupFlags, 197 | path: WasmPtr, 198 | path_len: Size, 199 | o_flags: OFlags, 200 | fs_rights_base: Rights, 201 | fs_rights_inheriting: Rights, 202 | fd_flags: FdFlags, 203 | fd: WasmPtr, 204 | ) -> WasmStatus { 205 | // TODO: handle the flags and rights 206 | println!("path_open: {} {}", dir_fd, path.str(self, path_len)?); 207 | 208 | /*self.with_fd(dir_fd, |dir_fd| { 209 | 210 | // TODO 211 | fd.cell(&self)?.set(3); // TODO: hack 212 | Ok(()) 213 | })*/ 214 | let idx = with_current_thread(|t| { 215 | t.file_descriptor_table().insert_lowest({ 216 | schemes() 217 | .read() 218 | .open_self(Box::new([])) 219 | .expect("self scheme") 220 | }) 221 | }) 222 | .unwrap(); // TODO 223 | 224 | fd.cell(self)?.set(idx as u32); 225 | 226 | Ok(()) 227 | } 228 | 229 | fn proc_exit(&self, exit_code: ExitCode) { 230 | scheduler::thread_exit(exit_code); 231 | } 232 | } 233 | 234 | impl VmContext { 235 | /// Execute with fd handle context. 236 | fn with_fd_handle(&self, fd: Fd, f: F) -> WasmResult 237 | where 238 | F: FnOnce(Arc, FileHandle) -> WasmResult, 239 | { 240 | with_current_thread(|thread| { 241 | let tbl = thread.file_descriptor_table(); 242 | let fd = tbl.get(fd as FileIdx).ok_or(Errno::BadF)?; 243 | let (scheme, handle) = fd.scheme_and_handle()?; 244 | drop(tbl); 245 | f(scheme, handle) 246 | }) 247 | } 248 | 249 | /// Execute with full fd context. 250 | fn with_fd(&self, fd: Fd, f: F) -> WasmResult 251 | where 252 | F: FnOnce(&FileDescriptor) -> WasmResult, 253 | { 254 | with_current_thread(|thread| { 255 | let tbl = thread.file_descriptor_table(); 256 | f(tbl.get(fd as FileIdx).ok_or(Errno::BadF)?) 257 | }) 258 | } 259 | } 260 | 261 | /// Gets the address for a wasi syscall and validate signature. 262 | pub fn get_address_for_wasi_and_validate_sig(name: &str, sig: &Signature) -> Option { 263 | let (addr, reference_sig) = ABI_MAP.get(name)?; 264 | 265 | if reference_sig != sig { 266 | None 267 | } else { 268 | Some(*addr) 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /kernel/src/wasm/wasi/definitions.rs: -------------------------------------------------------------------------------- 1 | use crate::tasking::scheduler::with_current_thread; 2 | use crate::wasm::vmctx::VmContext; 3 | use bitflags::bitflags; 4 | use core::cell::Cell; 5 | use core::marker::PhantomData; 6 | use core::mem::{align_of, size_of}; 7 | use core::{iter, slice}; 8 | 9 | #[repr(u32)] // 32-bit for Cranelift 10 | #[allow(dead_code)] 11 | #[derive(Debug, Copy, Clone)] 12 | pub enum Errno { 13 | /// No error occurred. 14 | Success, 15 | /// Argument list too long. 16 | ArgListTooBig, 17 | /// Permission denied. 18 | Access, 19 | /// Address in use. 20 | AddrInUse, 21 | /// Address not available. 22 | AddrNotAvail, 23 | /// Address family not supported. 24 | AfNoSupport, 25 | /// Resource unavailable, or operation would block. 26 | Again, 27 | /// Connection already in progress. 28 | Already, 29 | /// Bad file descriptor. 30 | BadF, 31 | /// Bad message. 32 | BadMsg, 33 | /// Device or resource busy. 34 | Busy, 35 | /// Operation canceled. 36 | Canceled, 37 | /// No child process. 38 | Child, 39 | /// Connection aborted. 40 | ConnAborted, 41 | /// Connection refused. 42 | ConnRefused, 43 | /// Connection reset. 44 | ConnReset, 45 | /// Resource deadlock would occur. 46 | DeadLk, 47 | /// Destination address required. 48 | DestAddrReq, 49 | /// Mathematics argument out of domain of function. 50 | Dom, 51 | /// Reserved. 52 | Dquot, 53 | /// File exists. 54 | Exist, 55 | /// Bad address. 56 | Fault, 57 | /// File too large. 58 | FBig, 59 | /// Host is unreachable. 60 | HostUnreach, 61 | /// Identifier removed. 62 | Idrm, 63 | /// Illegal byte sequence. 64 | Ilseq, 65 | /// Operation in progress. 66 | Inprogress, 67 | /// Interrupted function. 68 | Intr, 69 | /// Invalid argument. 70 | Inval, 71 | /// I/O error. 72 | Io, 73 | /// Socket is connected. 74 | IsConn, 75 | /// Is a directory. 76 | Isdir, 77 | /// Too many levels of symbolic links. 78 | Loop, 79 | /// File descriptor value too large. 80 | MFile, 81 | /// Too many links. 82 | Mlink, 83 | /// Message too large. 84 | MsgSize, 85 | /// Reserved. 86 | Multihop, 87 | /// Filename too long. 88 | NameTooLong, 89 | /// Network is down. 90 | NetDown, 91 | /// Connection aborted by network. 92 | NetReset, 93 | /// Network unreachable. 94 | NetUnreach, 95 | /// Too many files open in system. 96 | NFile, 97 | /// No buffer space available. 98 | NoBufs, 99 | /// No such device. 100 | NoDev, 101 | /// No such file or directory. 102 | NoEnt, 103 | /// Executable file format error. 104 | NoExec, 105 | /// No locks available. 106 | NoLck, 107 | /// Reserved. 108 | NoLink, 109 | /// Not enough space. 110 | NoMem, 111 | /// No message of the desired type. 112 | NoMsg, 113 | /// Protocol not available. 114 | NoProtoopt, 115 | /// No space left on device. 116 | NoSpc, 117 | /// Function not supported. 118 | NoSys, 119 | /// The socket is not connected. 120 | NotConn, 121 | /// Not a directory or a symbolic link to a directory. 122 | NotDir, 123 | /// Directory not empty. 124 | NotEmpty, 125 | /// State not recoverable. 126 | NotRecoverable, 127 | /// Not a socket. 128 | NotSock, 129 | /// Not supported, or operation not supported on socket. 130 | NotSup, 131 | /// Inappropriate I/O control operation. 132 | NoTty, 133 | /// No such device or address. 134 | Nxio, 135 | /// Value too large to be stored in data type. 136 | Overflow, 137 | /// Previous owner died. 138 | Ownerdead, 139 | /// Operation not permitted. 140 | Perm, 141 | /// Broken pipe. 142 | Pipe, 143 | /// Protocol error. 144 | Proto, 145 | /// Protocol not supported. 146 | Protonosupport, 147 | /// Protocol wrong type for socket. 148 | Prototype, 149 | /// Result too large. 150 | Range, 151 | /// Read-only file system. 152 | Rofs, 153 | /// Invalid seek. 154 | Spipe, 155 | /// No such process. 156 | Srch, 157 | /// Reserved. 158 | Stale, 159 | /// Connection timed out. 160 | TimedOut, 161 | /// Text file busy. 162 | Txtbsy, 163 | /// Cross-device link. 164 | Xdev, 165 | /// Extension: Capabilities insufficient. 166 | NotCapable, 167 | } 168 | 169 | /// WebAssembly pointer type to use in ABI functions. 170 | #[repr(transparent)] 171 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 172 | pub struct WasmPtr { 173 | offset: u32, 174 | _phantom: PhantomData, 175 | } 176 | 177 | impl WasmPtr { 178 | /// Creates a WasmPtr from an offset. 179 | pub fn from(offset: u32) -> Self { 180 | Self { 181 | offset, 182 | _phantom: PhantomData, 183 | } 184 | } 185 | 186 | /// Gets the offset from the base memory address. 187 | #[inline] 188 | pub fn offset(&self) -> u32 { 189 | self.offset 190 | } 191 | 192 | /// Internal helper function to get a real pointer or an error from a WasmPtr. 193 | fn get_ptr_and_verify(&self, ctx: &VmContext, size: usize) -> WasmResult<*const u8> { 194 | let alignment = align_of::() as u32; 195 | if self.offset % alignment != 0 196 | || self.offset as usize + size > with_current_thread(|thread| thread.heap_size()) 197 | { 198 | Err(Errno::Fault) 199 | } else { 200 | // Safety: pointer is correctly aligned and points to real data. 201 | unsafe { Ok(ctx.heap_ptr.as_const::().add(self.offset as usize)) } 202 | } 203 | } 204 | 205 | /// Gets a cell from a Wasm pointer, does checks for alignment and bounds. 206 | /// Returns Ok(Cell) on success and Err(Errno) on fail. 207 | pub fn cell<'c>(&self, ctx: &VmContext) -> WasmResult<&'c Cell> { 208 | // Safety: pointer is correctly aligned and points to real data. 209 | self.get_ptr_and_verify(ctx, size_of::()) 210 | .map(|p| unsafe { &*(p as *const Cell) }) 211 | } 212 | 213 | /// Gets a slice of cells from a Wasm pointer, does checks for alignment and bounds. 214 | /// Returns Ok(slice) on success and Err(Errno) on fail. 215 | pub fn slice<'s>(&self, ctx: &VmContext, len: Size) -> WasmResult<&'s [Cell]> { 216 | let len = len as usize; 217 | 218 | // Safety: pointer is correctly aligned and points to real data. 219 | self.get_ptr_and_verify( 220 | ctx, 221 | (size_of::() + (size_of::() % align_of::())) * len, 222 | ) 223 | .map(|p| unsafe { slice::from_raw_parts(p as *const Cell, len) }) 224 | } 225 | 226 | /// Gets a string from a Wasm pointer, does checks for valid UTF-8 string. 227 | /// Returns Ok(str) on success and Err(Errno) on fail. 228 | pub fn str<'s>(&self, ctx: &VmContext, len: Size) -> WasmResult<&'s str> { 229 | let len = len as usize; 230 | self.get_ptr_and_verify(ctx, len) 231 | .map(|p| unsafe { slice::from_raw_parts(p, len) }) 232 | .and_then(|p| core::str::from_utf8(p).map_err(|_| Errno::Inval)) 233 | } 234 | } 235 | 236 | impl WasmPtr { 237 | /// Write from source slice and add a null byte. 238 | pub fn write_from_slice_with_null( 239 | &self, 240 | ctx: &VmContext, 241 | len: Size, 242 | src: &[u8], 243 | ) -> WasmResult<()> { 244 | let slice = self.slice(ctx, len)?; 245 | for (dst, src) in slice.iter().zip(src.iter().chain(iter::once(&0))) { 246 | dst.set(*src); 247 | } 248 | Ok(()) 249 | } 250 | } 251 | 252 | /// Size type. 253 | pub type Size = u32; 254 | 255 | /// File descriptor. 256 | pub type Fd = u32; 257 | 258 | /// Exit code for process. 259 | pub type ExitCode = u32; 260 | 261 | pub type WasmResult = Result; 262 | pub type WasmStatus = WasmResult<()>; 263 | 264 | bitflags! { 265 | #[repr(C)] 266 | pub struct LookupFlags: u32 { 267 | const SYMLINK_FOLLOW = 1 << 0; 268 | } 269 | } 270 | 271 | bitflags! { 272 | #[repr(C)] 273 | pub struct OFlags: u16 { 274 | const CREAT = 1 << 0; 275 | const DIRECTORY = 1 << 1; 276 | const EXCL = 1 << 2; 277 | const TRUNC = 1 << 3; 278 | } 279 | } 280 | 281 | bitflags! { 282 | #[repr(C)] 283 | pub struct FdFlags: u16 { 284 | const APPEND = 1 << 0; 285 | const DSYNC = 1 << 1; 286 | const NONBLOCK = 1 << 2; 287 | const RSYNC = 1 << 3; 288 | const SYNC = 1 << 4; 289 | } 290 | } 291 | 292 | bitflags! { 293 | #[repr(C)] 294 | pub struct Rights: u64 { 295 | const FD_DATASYNC = 1 << 0; 296 | const FD_READ = 1 << 1; 297 | const FD_SEEK = 1 << 2; 298 | const FD_FDSTAT_SET_FLAGS = 1 << 3; 299 | const FD_SYNC = 1 << 4; 300 | const FD_TELL = 1 << 5; 301 | const FD_WRITE = 1 << 6; 302 | const FD_ADVISE = 1 << 7; 303 | const FD_ALLOCATE = 1 << 8; 304 | const PATH_CREATE_DIRECTORY = 1 << 9; 305 | const PATH_CREATE_FILE = 1 << 10; 306 | const PATH_LINK_SOURCE = 1 << 11; 307 | const PATH_LINK_TARGET = 1 << 12; 308 | const PATH_OPEN = 1 << 13; 309 | const FD_READDIR = 1 << 14; 310 | const PATH_READLINK = 1 << 15; 311 | const PATH_RENAME_SOURCE = 1 << 16; 312 | const PATH_RENAME_TARGET = 1 << 17; 313 | const PATH_FILESTAT_GET = 1 << 18; 314 | const PATH_FILESTAT_SET_SIZE = 1 << 19; 315 | const PATH_FILESTAT_SET_TIMES = 1 << 20; 316 | const FD_FILESTAT_GET = 1 << 21; 317 | const FD_FILESTAT_SET_SIZE = 1 << 22; 318 | const FD_FILESTAT_SET_TIMES = 1 << 23; 319 | const PATH_SYMLINK = 1 << 24; 320 | const PATH_REMOVE_DIRECTORY = 1 << 25; 321 | const PATH_UNLINK_FILE = 1 << 26; 322 | const POLL_FD_READWRITE = 1 << 27; 323 | const SOCK_SHUTDOWN = 1 << 28; 324 | } 325 | } 326 | 327 | #[repr(C)] 328 | #[derive(Debug, Copy, Clone)] 329 | pub struct CioVec { 330 | pub buf: WasmPtr, 331 | pub buf_len: u32, 332 | } 333 | 334 | #[repr(C)] 335 | #[derive(Clone, Copy)] 336 | pub struct PreStatDir { 337 | pub pr_name_len: Size, 338 | } 339 | 340 | #[repr(C)] 341 | pub union PreStatInner { 342 | pub dir: PreStatDir, 343 | } 344 | 345 | #[repr(C)] 346 | pub struct PreStat { 347 | pub tag: u8, 348 | pub inner: PreStatInner, 349 | } 350 | --------------------------------------------------------------------------------