├── .gitignore ├── .gitlab-ci.yml ├── .gitmodules ├── .helix ├── config.toml └── languages.toml ├── ARM-AARCH64-PORT-OUTLINE.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── build.rs ├── clippy.sh ├── config.toml.example ├── linkers ├── aarch64.ld ├── i686.ld ├── riscv64.ld └── x86_64.ld ├── res └── unifont.font ├── rustfmt.toml ├── src ├── acpi │ ├── gtdt.rs │ ├── hpet.rs │ ├── madt │ │ ├── arch │ │ │ ├── aarch64.rs │ │ │ ├── other.rs │ │ │ └── x86.rs │ │ └── mod.rs │ ├── mod.rs │ ├── rsdp.rs │ ├── rsdt.rs │ ├── rxsdt.rs │ ├── sdt.rs │ ├── spcr.rs │ └── xsdt.rs ├── allocator │ ├── linked_list.rs │ ├── mod.rs │ └── slab.rs ├── arch │ ├── aarch64 │ │ ├── consts.rs │ │ ├── debug.rs │ │ ├── device │ │ │ ├── cpu │ │ │ │ ├── mod.rs │ │ │ │ └── registers │ │ │ │ │ ├── control_regs.rs │ │ │ │ │ └── mod.rs │ │ │ ├── generic_timer.rs │ │ │ ├── irqchip │ │ │ │ ├── gic.rs │ │ │ │ ├── gicv3.rs │ │ │ │ ├── irq_bcm2835.rs │ │ │ │ ├── irq_bcm2836.rs │ │ │ │ ├── mod.rs │ │ │ │ └── null.rs │ │ │ ├── mod.rs │ │ │ ├── rtc.rs │ │ │ ├── serial.rs │ │ │ └── uart_pl011.rs │ │ ├── interrupt │ │ │ ├── exception.rs │ │ │ ├── handler.rs │ │ │ ├── irq.rs │ │ │ ├── mod.rs │ │ │ ├── syscall.rs │ │ │ └── trace.rs │ │ ├── ipi.rs │ │ ├── macros.rs │ │ ├── misc.rs │ │ ├── mod.rs │ │ ├── paging │ │ │ ├── entry.rs │ │ │ ├── mapper.rs │ │ │ └── mod.rs │ │ ├── rmm.rs │ │ ├── start.rs │ │ ├── stop.rs │ │ ├── time.rs │ │ └── vectors.rs │ ├── mod.rs │ ├── riscv64 │ │ ├── consts.rs │ │ ├── debug.rs │ │ ├── device │ │ │ ├── cpu │ │ │ │ └── mod.rs │ │ │ ├── irqchip │ │ │ │ ├── clint.rs │ │ │ │ ├── clint_sbi.rs │ │ │ │ ├── hlic.rs │ │ │ │ ├── mod.rs │ │ │ │ └── plic.rs │ │ │ ├── mod.rs │ │ │ └── serial.rs │ │ ├── interrupt │ │ │ ├── exception.rs │ │ │ ├── handler.rs │ │ │ ├── mod.rs │ │ │ ├── syscall.rs │ │ │ └── trace.rs │ │ ├── ipi.rs │ │ ├── macros.rs │ │ ├── misc.rs │ │ ├── mod.rs │ │ ├── paging │ │ │ ├── entry.rs │ │ │ ├── mapper.rs │ │ │ └── mod.rs │ │ ├── rmm.rs │ │ ├── start.rs │ │ ├── stop.rs │ │ └── time.rs │ ├── x86 │ │ ├── consts.rs │ │ ├── gdt.rs │ │ ├── interrupt │ │ │ ├── exception.rs │ │ │ ├── handler.rs │ │ │ ├── irq.rs │ │ │ ├── mod.rs │ │ │ └── syscall.rs │ │ ├── macros.rs │ │ ├── mod.rs │ │ ├── paging │ │ │ ├── mapper.rs │ │ │ └── mod.rs │ │ ├── rmm.rs │ │ └── start.rs │ ├── x86_64 │ │ ├── alternative.rs │ │ ├── consts.rs │ │ ├── cpuid.rs │ │ ├── gdt.rs │ │ ├── interrupt │ │ │ ├── exception.rs │ │ │ ├── handler.rs │ │ │ ├── irq.rs │ │ │ ├── mod.rs │ │ │ └── syscall.rs │ │ ├── macros.rs │ │ ├── misc.rs │ │ ├── mod.rs │ │ ├── paging │ │ │ ├── mapper.rs │ │ │ └── mod.rs │ │ ├── rmm.rs │ │ └── start.rs │ └── x86_shared │ │ ├── cpuid.rs │ │ ├── debug.rs │ │ ├── device │ │ ├── cpu.rs │ │ ├── hpet.rs │ │ ├── ioapic.rs │ │ ├── local_apic.rs │ │ ├── mod.rs │ │ ├── pic.rs │ │ ├── pit.rs │ │ ├── rtc.rs │ │ ├── serial.rs │ │ ├── system76_ec.rs │ │ └── tsc.rs │ │ ├── idt.rs │ │ ├── interrupt │ │ ├── ipi.rs │ │ ├── mod.rs │ │ └── trace.rs │ │ ├── ipi.rs │ │ ├── mod.rs │ │ ├── pti.rs │ │ ├── stop.rs │ │ └── time.rs ├── asm │ ├── x86 │ │ └── trampoline.asm │ └── x86_64 │ │ └── trampoline.asm ├── common │ ├── aligned_box.rs │ ├── int_like.rs │ ├── mod.rs │ └── unique.rs ├── context │ ├── arch │ │ ├── aarch64.rs │ │ ├── riscv64.rs │ │ ├── x86.rs │ │ └── x86_64.rs │ ├── context.rs │ ├── file.rs │ ├── memory.rs │ ├── mod.rs │ ├── signal.rs │ ├── switch.rs │ └── timeout.rs ├── cpu_set.rs ├── cpu_stats.rs ├── debugger.rs ├── devices │ ├── graphical_debug │ │ ├── debug.rs │ │ ├── display.rs │ │ └── mod.rs │ ├── mod.rs │ └── uart_16550.rs ├── dtb │ ├── irqchip.rs │ └── mod.rs ├── elf.rs ├── event.rs ├── externs.rs ├── log.rs ├── main.rs ├── memory │ ├── kernel_mapper.rs │ └── mod.rs ├── panic.rs ├── percpu.rs ├── profiling.rs ├── ptrace.rs ├── scheme │ ├── acpi.rs │ ├── debug.rs │ ├── dtb.rs │ ├── event.rs │ ├── irq.rs │ ├── memory.rs │ ├── mod.rs │ ├── pipe.rs │ ├── proc.rs │ ├── root.rs │ ├── serio.rs │ ├── sys │ │ ├── block.rs │ │ ├── context.rs │ │ ├── cpu.rs │ │ ├── exe.rs │ │ ├── fdstat.rs │ │ ├── iostat.rs │ │ ├── irq.rs │ │ ├── log.rs │ │ ├── mod.rs │ │ ├── scheme.rs │ │ ├── scheme_num.rs │ │ ├── stat.rs │ │ ├── syscall.rs │ │ └── uname.rs │ ├── time.rs │ └── user.rs ├── startup │ ├── memory.rs │ └── mod.rs ├── sync │ ├── mod.rs │ ├── wait_condition.rs │ └── wait_queue.rs ├── syscall │ ├── debug.rs │ ├── fs.rs │ ├── futex.rs │ ├── mod.rs │ ├── privilege.rs │ ├── process.rs │ ├── time.rs │ └── usercopy.rs └── time.rs └── targets ├── aarch64-unknown-kernel.json ├── i686-unknown-kernel.json ├── riscv64-unknown-kernel.json └── x86_64-unknown-kernel.json /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | /config.toml 3 | .gitlab-ci-local/ 4 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: "redoxos/redoxer:latest" 2 | 3 | variables: 4 | GIT_SUBMODULE_STRATEGY: recursive 5 | 6 | stages: 7 | - host 8 | - build 9 | - test 10 | # TODO: benchmarks and profiling (maybe manually enabled for relevant MRs)? 11 | 12 | build: 13 | stage: build 14 | script: 15 | - mkdir -p target/${ARCH} 16 | - TARGET=${ARCH}-unknown-redox redoxer env make BUILD=target/${ARCH} 17 | parallel: 18 | matrix: 19 | - ARCH: [x86_64, i686, aarch64, riscv64gc] 20 | 21 | fmt: 22 | stage: host 23 | script: 24 | - rustup component add rustfmt-preview 25 | - cargo fmt -- --check 26 | 27 | unit_test: 28 | stage: test 29 | script: 30 | - TARGET=x86_64-unknown-redox redoxer test 31 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "slab_allocator"] 2 | path = slab_allocator 3 | url = https://gitlab.redox-os.org/redox-os/slab_allocator 4 | [submodule "rmm"] 5 | path = rmm 6 | url = https://gitlab.redox-os.org/redox-os/rmm.git 7 | branch = master 8 | [submodule "redox-path"] 9 | path = redox-path 10 | url = https://gitlab.redox-os.org/redox-os/redox-path.git 11 | branch = main 12 | -------------------------------------------------------------------------------- /.helix/config.toml: -------------------------------------------------------------------------------- 1 | [editor] 2 | auto-format = false 3 | -------------------------------------------------------------------------------- /.helix/languages.toml: -------------------------------------------------------------------------------- 1 | [[language]] 2 | name = "rust" 3 | 4 | [[language-server.rust-analyzer.config.cargo]] 5 | extraEnv = ["RUST_TARGET_PATH=targets"] 6 | # Select one of targets to make lsp work for your confguration 7 | # Do not commit this change 8 | # TODO: find a better way to do this 9 | # target = "aarch64-unknown-kernel" 10 | 11 | [[language-server.rust-analyzer.config.check]] 12 | targets = ["x86_64-unknown-kernel", "i686-unknown-kernel", "aarch64-unknown-kernel"] 13 | 14 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kernel" 3 | version = "0.5.12" 4 | build = "build.rs" 5 | edition = "2021" 6 | 7 | [build-dependencies] 8 | cc = "1.0" 9 | rustc-cfg = "0.5" 10 | toml = "0.8" 11 | 12 | [dependencies] 13 | bitflags = "2" 14 | bitfield = "0.13.2" 15 | hashbrown = { version = "0.14.3", default-features = false, features = ["ahash", "inline-more"] } 16 | linked_list_allocator = "0.9.0" 17 | log = "0.4" 18 | redox-path = "0.2.0" 19 | redox_syscall = { git = "https://gitlab.redox-os.org/redox-os/syscall.git", branch = "master", default-features = false } 20 | slab_allocator = { path = "slab_allocator", optional = true } 21 | spin = "0.9.8" 22 | spinning_top = { version = "0.3", features = ["arc_lock"] } 23 | rmm = { path = "rmm", default-features = false } 24 | arrayvec = { version = "0.7.4", default-features = false } 25 | slab = { version = "0.4", default-features = false } 26 | # TODO: Remove 27 | indexmap = { version = "2.5.0", default-features = false } 28 | 29 | [dependencies.goblin] 30 | version = "0.2.1" 31 | default-features = false 32 | features = ["elf32", "elf64"] 33 | 34 | [dependencies.rustc-demangle] 35 | version = "0.1.16" 36 | default-features = false 37 | 38 | [target.'cfg(any(target_arch = "aarch64", target_arch = "riscv64"))'.dependencies] 39 | byteorder = { version = "1", default-features = false } 40 | fdt = { git = "https://github.com/repnop/fdt.git", rev = "2fb1409edd1877c714a0aa36b6a7c5351004be54" } 41 | 42 | [target.'cfg(any(target_arch = "x86", target_arch = "x86_64"))'.dependencies] 43 | raw-cpuid = "10.2.0" 44 | x86 = { version = "0.47.0", default-features = false } 45 | 46 | [target.'cfg(any(target_arch = "riscv64", target_arch = "riscv32"))'.dependencies] 47 | sbi-rt = "0.0.3" 48 | 49 | [features] 50 | default = [ 51 | "acpi", 52 | #TODO: issues with Alder Lake and newer CPUs: "multi_core", 53 | "graphical_debug", 54 | "serial_debug", 55 | "self_modifying", 56 | "x86_kvm_pv", 57 | ] 58 | 59 | # Activates some limited code-overwriting optimizations, based on CPU features. 60 | self_modifying = [] 61 | 62 | acpi = [] 63 | graphical_debug = [] 64 | lpss_debug = [] 65 | multi_core = ["acpi"] 66 | profiling = [] 67 | #TODO: remove when threading issues are fixed 68 | pti = [] 69 | qemu_debug = [] 70 | serial_debug = [] 71 | system76_ec_debug = [] 72 | slab = ["slab_allocator"] 73 | sys_stat = [] 74 | x86_kvm_pv = [] 75 | 76 | debugger = ["syscall_debug"] 77 | syscall_debug = [] 78 | 79 | sys_fdstat = [] 80 | 81 | [profile.dev] 82 | # Avoids having to define the eh_personality lang item and reduces kernel size 83 | panic = "abort" 84 | 85 | [profile.release] 86 | # Avoids having to define the eh_personality lang item and reduces kernel size 87 | panic = "abort" 88 | 89 | lto = true 90 | 91 | debug = "full" 92 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Jeremy Soller 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SOURCE:=$(dir $(realpath $(lastword $(MAKEFILE_LIST)))) 2 | BUILD?=$(CURDIR) 3 | export RUST_TARGET_PATH=$(SOURCE)/targets 4 | 5 | ifeq ($(TARGET),) 6 | ARCH?=$(shell uname -m) 7 | else 8 | ARCH?=$(shell echo "$(TARGET)" | cut -d - -f1) 9 | endif 10 | 11 | ifeq ($(ARCH),riscv64gc) 12 | override ARCH:=riscv64 13 | endif 14 | GNU_TARGET=$(ARCH)-unknown-redox 15 | 16 | 17 | all: $(BUILD)/kernel $(BUILD)/kernel.sym 18 | 19 | LD_SCRIPT=$(SOURCE)/linkers/$(ARCH).ld 20 | TARGET_SPEC=$(RUST_TARGET_PATH)/$(ARCH)-unknown-kernel.json 21 | 22 | $(BUILD)/kernel.all: $(LD_SCRIPT) $(TARGET_SPEC) $(shell find $(SOURCE) -name "*.rs" -type f) 23 | cargo rustc \ 24 | --bin kernel \ 25 | --manifest-path "$(SOURCE)/Cargo.toml" \ 26 | --target "$(TARGET_SPEC)" \ 27 | --release \ 28 | -Z build-std=core,alloc \ 29 | -- \ 30 | -C link-arg=-T -Clink-arg="$(LD_SCRIPT)" \ 31 | -C link-arg=-z -Clink-arg=max-page-size=0x1000 \ 32 | --emit link="$(BUILD)/kernel.all" 33 | 34 | $(BUILD)/kernel.sym: $(BUILD)/kernel.all 35 | $(GNU_TARGET)-objcopy \ 36 | --only-keep-debug \ 37 | "$(BUILD)/kernel.all" \ 38 | "$(BUILD)/kernel.sym" 39 | 40 | $(BUILD)/kernel: $(BUILD)/kernel.all 41 | $(GNU_TARGET)-objcopy \ 42 | --strip-debug \ 43 | "$(BUILD)/kernel.all" \ 44 | "$(BUILD)/kernel" 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kernel 2 | 3 | Redox OS Microkernel 4 | 5 | [![docs](https://img.shields.io/badge/docs-master-blue.svg)](https://docs.rs/redox_syscall/latest/syscall/) 6 | [![SLOCs counter](https://tokei.rs/b1/github/redox-os/kernel?category=code)](https://github.com/XAMPPRocky/tokei) 7 | [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) 8 | 9 | ## Requirements 10 | 11 | * [`nasm`](https://nasm.us/) needs to be available on the PATH at build time. 12 | 13 | ## Building The Documentation 14 | 15 | Use this command: 16 | 17 | ```sh 18 | cargo doc --open --target x86_64-unknown-none 19 | ``` 20 | 21 | ## Debugging 22 | 23 | ### QEMU 24 | 25 | Running [QEMU](https://www.qemu.org) with the `-s` flag will set up QEMU to listen on port `1234` for a GDB client to connect to it. To debug the redox kernel run. 26 | 27 | ```sh 28 | make qemu gdb=yes 29 | ``` 30 | 31 | This will start a virtual machine with and listen on port `1234` for a GDB or LLDB client. 32 | 33 | ### GDB 34 | 35 | If you are going to use [GDB](https://www.gnu.org/software/gdb/), run these commands to load debug symbols and connect to your running kernel: 36 | 37 | ``` 38 | (gdb) symbol-file build/kernel.sym 39 | (gdb) target remote localhost:1234 40 | ``` 41 | 42 | ### LLDB 43 | 44 | If you are going to use [LLDB](https://lldb.llvm.org/), run these commands to start debugging: 45 | 46 | ``` 47 | (lldb) target create -s build/kernel.sym build/kernel 48 | (lldb) gdb-remote localhost:1234 49 | ``` 50 | 51 | After connecting to your kernel you can set some interesting breakpoints and `continue` 52 | the process. See your debuggers man page for more information on useful commands to run. 53 | 54 | ## Notes 55 | 56 | - Always use `foo.get(n)` instead of `foo[n]` and try to cover for the possibility of `Option::None`. Doing the regular way may work fine for applications, but never in the kernel. No possible panics should ever exist in kernel space, because then the whole OS would just stop working. 57 | 58 | - If you receive a kernel panic in QEMU, use `pkill qemu-system` to kill the frozen QEMU process. 59 | 60 | ## How To Contribute 61 | 62 | To learn how to contribute to this system component you need to read the following document: 63 | 64 | - [CONTRIBUTING.md](https://gitlab.redox-os.org/redox-os/redox/-/blob/master/CONTRIBUTING.md) 65 | 66 | ## Development 67 | 68 | To learn how to do development with this system component inside the Redox build system you need to read the [Build System](https://doc.redox-os.org/book/build-system-reference.html) and [Coding and Building](https://doc.redox-os.org/book/coding-and-building.html) pages. 69 | 70 | ### How To Build 71 | 72 | To build this system component you need to download the Redox build system, you can learn how to do it on the [Building Redox](https://doc.redox-os.org/book/podman-build.html) page. 73 | 74 | This is necessary because they only work with cross-compilation to a Redox virtual machine, but you can do some testing from Linux. 75 | 76 | ## Funding - _Unix-style Signals and Process Management_ 77 | 78 | This project is funded through [NGI Zero Core](https://nlnet.nl/core), a fund established by [NLnet](https://nlnet.nl) with financial support from the European Commission's [Next Generation Internet](https://ngi.eu) program. Learn more at the [NLnet project page](https://nlnet.nl/project/RedoxOS-Signals). 79 | 80 | [NLnet foundation logo](https://nlnet.nl) 81 | [NGI Zero Logo](https://nlnet.nl/core) 82 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use rustc_cfg::Cfg; 2 | use std::{env, path::Path, process::Command}; 3 | use toml::Table; 4 | 5 | fn parse_kconfig(arch: &str) -> Option<()> { 6 | println!("cargo:rerun-if-changed=config.toml"); 7 | 8 | assert!(Path::new("config.toml.example").try_exists().unwrap()); 9 | if !Path::new("config.toml").try_exists().unwrap() { 10 | std::fs::copy("config.toml.example", "config.toml").unwrap(); 11 | } 12 | let config_str = std::fs::read_to_string("config.toml").unwrap(); 13 | let root: Table = toml::from_str(&config_str).unwrap(); 14 | 15 | let altfeatures = root 16 | .get("arch")? 17 | .as_table() 18 | .unwrap() 19 | .get(arch)? 20 | .as_table() 21 | .unwrap() 22 | .get("features")? 23 | .as_table() 24 | .unwrap(); 25 | 26 | let self_modifying = env::var("CARGO_FEATURE_SELF_MODIFYING").is_ok(); 27 | 28 | for (name, value) in altfeatures { 29 | let mut choice = value.as_str().unwrap(); 30 | assert!(matches!(choice, "always" | "never" | "auto")); 31 | 32 | if !self_modifying && choice == "auto" { 33 | choice = "never"; 34 | } 35 | 36 | println!("cargo:rustc-cfg=cpu_feature_{choice}=\"{name}\""); 37 | } 38 | 39 | Some(()) 40 | } 41 | 42 | fn main() { 43 | println!("cargo:rustc-env=TARGET={}", env::var("TARGET").unwrap()); 44 | 45 | let out_dir = env::var("OUT_DIR").unwrap(); 46 | let cfg = Cfg::of(env::var("TARGET").unwrap().as_str()).unwrap(); 47 | let arch_str = cfg.target_arch.as_str(); 48 | 49 | match arch_str { 50 | "aarch64" => { 51 | println!("cargo:rustc-cfg=dtb"); 52 | } 53 | "x86" => { 54 | println!("cargo:rerun-if-changed=src/asm/x86/trampoline.asm"); 55 | 56 | let status = Command::new("nasm") 57 | .arg("-f") 58 | .arg("bin") 59 | .arg("-o") 60 | .arg(format!("{}/trampoline", out_dir)) 61 | .arg("src/asm/x86/trampoline.asm") 62 | .status() 63 | .expect("failed to run nasm"); 64 | if !status.success() { 65 | panic!("nasm failed with exit status {}", status); 66 | } 67 | } 68 | "x86_64" => { 69 | println!("cargo:rerun-if-changed=src/asm/x86_64/trampoline.asm"); 70 | 71 | let status = Command::new("nasm") 72 | .arg("-f") 73 | .arg("bin") 74 | .arg("-o") 75 | .arg(format!("{}/trampoline", out_dir)) 76 | .arg("src/asm/x86_64/trampoline.asm") 77 | .status() 78 | .expect("failed to run nasm"); 79 | if !status.success() { 80 | panic!("nasm failed with exit status {}", status); 81 | } 82 | } 83 | "riscv64" => { 84 | println!("cargo:rustc-cfg=dtb"); 85 | } 86 | _ => (), 87 | } 88 | 89 | let _ = parse_kconfig(arch_str); 90 | } 91 | -------------------------------------------------------------------------------- /clippy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export RUST_TARGET_PATH="${PWD}/targets" 6 | export RUSTFLAGS="-C soft-float -C debuginfo=2" 7 | cargo clippy --lib --release --target x86_64-unknown-none "$@" 8 | -------------------------------------------------------------------------------- /config.toml.example: -------------------------------------------------------------------------------- 1 | [arch.x86_64.features] 2 | smap = "auto" 3 | fsgsbase = "auto" 4 | xsave = "auto" 5 | xsaveopt = "auto" 6 | 7 | # vim: ft=toml 8 | -------------------------------------------------------------------------------- /linkers/aarch64.ld: -------------------------------------------------------------------------------- 1 | ENTRY(kstart) 2 | OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64") 3 | 4 | KERNEL_OFFSET = 0xFFFFFF0000000000; 5 | 6 | SECTIONS { 7 | . = KERNEL_OFFSET; 8 | 9 | . += SIZEOF_HEADERS; 10 | 11 | /* Force the zero page to be part of a segment by creating a 12 | * dummy section in the zero page. 13 | * Limine will map the segment with the lowest vaddr value at 14 | * 0xFFFFFFFF80000000 even if the segment has a higher vaddr. 15 | * As such without the zero page being part of a segment, the 16 | * kernel would be loaded at an offset from the expected 17 | * location. As the redox kernel is not currently relocatable, 18 | * this would result in a crash. A similar issue likely exists 19 | * with multiboot/multiboot2 and the paddr of the segment. 20 | */ 21 | .dummy ALIGN(8) : AT(ADDR(.dummy) - KERNEL_OFFSET) {} 22 | 23 | . = ALIGN(4096); 24 | 25 | .text : AT(ADDR(.text) - KERNEL_OFFSET) { 26 | __text_start = .; 27 | *(.text*) 28 | __usercopy_start = .; 29 | *(.usercopy-fns) 30 | __usercopy_end = .; 31 | . = ALIGN(4096); 32 | __text_end = .; 33 | } 34 | 35 | .rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) { 36 | __rodata_start = .; 37 | *(.rodata*) 38 | . = ALIGN(4096); 39 | __rodata_end = .; 40 | } 41 | 42 | .data : AT(ADDR(.data) - KERNEL_OFFSET) { 43 | __data_start = .; 44 | *(.data*) 45 | . = ALIGN(4096); 46 | __data_end = .; 47 | __bss_start = .; 48 | *(.bss*) 49 | . = ALIGN(4096); 50 | __bss_end = .; 51 | } 52 | 53 | __end = .; 54 | 55 | /DISCARD/ : { 56 | *(.comment*) 57 | *(.eh_frame*) 58 | *(.gcc_except_table*) 59 | *(.note*) 60 | *(.rel.eh_frame*) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /linkers/i686.ld: -------------------------------------------------------------------------------- 1 | ENTRY(kstart) 2 | OUTPUT_FORMAT(elf32-i386) 3 | 4 | KERNEL_OFFSET = 0xC0000000; 5 | 6 | SECTIONS { 7 | . = KERNEL_OFFSET; 8 | 9 | . += SIZEOF_HEADERS; 10 | 11 | /* Force the zero page to be part of a segment by creating a 12 | * dummy section in the zero page. 13 | * Limine will map the segment with the lowest vaddr value at 14 | * 0xFFFFFFFF80000000 even if the segment has a higher vaddr. 15 | * As such without the zero page being part of a segment, the 16 | * kernel would be loaded at an offset from the expected 17 | * location. As the redox kernel is not currently relocatable, 18 | * this would result in a crash. A similar issue likely exists 19 | * with multiboot/multiboot2 and the paddr of the segment. 20 | */ 21 | .dummy : AT(ADDR(.dummy) - KERNEL_OFFSET) {} 22 | 23 | .text ALIGN(4K) : AT(ADDR(.text) - KERNEL_OFFSET) { 24 | __text_start = .; 25 | *(.text*) 26 | __usercopy_start = .; 27 | *(.usercopy-fns) 28 | __usercopy_end = .; 29 | } 30 | 31 | .rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET) { 32 | __text_end = .; 33 | __rodata_start = .; 34 | *(.rodata*) 35 | } 36 | 37 | .data ALIGN(4K) : AT(ADDR(.data) - KERNEL_OFFSET) { 38 | __rodata_end = .; 39 | __data_start = .; 40 | *(.data*) 41 | . = ALIGN(4K); 42 | __data_end = .; 43 | __bss_start = .; 44 | *(.bss*) 45 | . = ALIGN(4K); 46 | } 47 | 48 | __end = .; 49 | 50 | /DISCARD/ : { 51 | *(.comment*) 52 | *(.eh_frame*) 53 | *(.gcc_except_table*) 54 | *(.note*) 55 | *(.rel.eh_frame*) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /linkers/riscv64.ld: -------------------------------------------------------------------------------- 1 | ENTRY(kstart) 2 | OUTPUT_FORMAT("elf64-littleriscv", "elf64-littleriscv", "elf64-littleriscv" ) 3 | 4 | KERNEL_OFFSET = 0xFFFFFF0000000000; 5 | 6 | SECTIONS { 7 | . = KERNEL_OFFSET; 8 | 9 | . += SIZEOF_HEADERS; 10 | 11 | /* Force the zero page to be part of a segment by creating a 12 | * dummy section in the zero page. 13 | * Linker will map the segment with the lowest vaddr value at 14 | * 0xFFFFFF0000000000 even if the segment has a higher vaddr. 15 | * As such without the zero page being part of a segment, the 16 | * kernel would be loaded at an offset from the expected 17 | * location. As the redox kernel is not currently relocatable, 18 | * this would result in a crash. A similar issue likely exists 19 | * with multiboot/multiboot2 and the paddr of the segment. 20 | */ 21 | .dummy ALIGN(8) : AT(ADDR(.dummy) - KERNEL_OFFSET) {} 22 | 23 | . = ALIGN(4096); 24 | 25 | .text : AT(ADDR(.text) - KERNEL_OFFSET) { 26 | __text_start = .; 27 | *(.early_init.text*) 28 | . = ALIGN(4096); 29 | *(.text*) 30 | __usercopy_start = .; 31 | *(.usercopy-fns) 32 | __usercopy_end = .; 33 | . = ALIGN(4096); 34 | __text_end = .; 35 | } 36 | 37 | .rodata : AT(ADDR(.rodata) - KERNEL_OFFSET) { 38 | __rodata_start = .; 39 | *(.rodata*) 40 | . = ALIGN(4096); 41 | __rodata_end = .; 42 | } 43 | 44 | .data : AT(ADDR(.data) - KERNEL_OFFSET) { 45 | __data_start = .; 46 | *(.data*) 47 | *(.sdata*) 48 | . = ALIGN(4096); 49 | __data_end = .; 50 | *(.got*) 51 | . = ALIGN(4096); 52 | __bss_start = .; 53 | *(.bss*) 54 | *(.sbss*) 55 | . = ALIGN(4096); 56 | __bss_end = .; 57 | } 58 | 59 | __end = .; 60 | 61 | /DISCARD/ : { 62 | *(.comment*) 63 | *(.eh_frame*) 64 | *(.gcc_except_table*) 65 | *(.note*) 66 | *(.rel.eh_frame*) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /linkers/x86_64.ld: -------------------------------------------------------------------------------- 1 | ENTRY(kstart) 2 | OUTPUT_FORMAT(elf64-x86-64) 3 | 4 | KERNEL_OFFSET = 0xFFFFFFFF80000000; 5 | 6 | SECTIONS { 7 | . = KERNEL_OFFSET; 8 | 9 | . += SIZEOF_HEADERS; 10 | 11 | /* Force the zero page to be part of a segment by creating a 12 | * dummy section in the zero page. 13 | * Limine will map the segment with the lowest vaddr value at 14 | * 0xFFFFFFFF80000000 even if the segment has a higher vaddr. 15 | * As such without the zero page being part of a segment, the 16 | * kernel would be loaded at an offset from the expected 17 | * location. As the redox kernel is not currently relocatable, 18 | * this would result in a crash. A similar issue likely exists 19 | * with multiboot/multiboot2 and the paddr of the segment. 20 | */ 21 | .dummy : AT(ADDR(.dummy) - KERNEL_OFFSET) {} 22 | 23 | .text ALIGN(4K) : AT(ADDR(.text) - KERNEL_OFFSET) { 24 | __text_start = .; 25 | *(.text*) 26 | __usercopy_start = .; 27 | *(.usercopy-fns) 28 | __usercopy_end = .; 29 | } 30 | 31 | .rodata ALIGN(4K) : AT(ADDR(.rodata) - KERNEL_OFFSET) { 32 | __text_end = .; 33 | __rodata_start = .; 34 | *(.rodata*) 35 | __altcode_start = .; 36 | KEEP(*(.altcode*)) 37 | __altcode_end = .; 38 | . = ALIGN(8); 39 | __altrelocs_start = .; 40 | KEEP(*(.altrelocs*)) 41 | __altrelocs_end = .; 42 | __altfeatures_start = .; 43 | KEEP(*(.altfeatures*)) 44 | __altfeatures_end = .; 45 | } 46 | 47 | .data ALIGN(4K) : AT(ADDR(.data) - KERNEL_OFFSET) { 48 | __rodata_end = .; 49 | __data_start = .; 50 | *(.data*) 51 | . = ALIGN(4K); 52 | __data_end = .; 53 | __bss_start = .; 54 | *(.bss*) 55 | } 56 | 57 | __end = .; 58 | 59 | /DISCARD/ : { 60 | *(.comment*) 61 | *(.eh_frame*) 62 | *(.gcc_except_table*) 63 | *(.note*) 64 | *(.rel.eh_frame*) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /res/unifont.font: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redox-os/kernel/e67cca7bce085190e538e2141784125bb99ddc2f/res/unifont.font -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | blank_lines_lower_bound = 0 # default 2 | blank_lines_upper_bound = 1 # default 3 | brace_style = "SameLineWhere" # default 4 | disable_all_formatting = false # default 5 | edition = "2021" 6 | empty_item_single_line = true # default 7 | fn_single_line = false # default 8 | force_explicit_abi = true # default 9 | format_strings = false # default 10 | hard_tabs = false # default 11 | show_parse_errors = true # default 12 | imports_granularity = "Crate" # default = Preserve 13 | imports_indent = "Block" # default 14 | imports_layout = "Mixed" # default 15 | indent_style = "Block" # default 16 | max_width = 100 # default 17 | newline_style = "Unix" # default = Auto 18 | skip_children = false # default 19 | tab_spaces = 4 # default 20 | trailing_comma = "Vertical" # default 21 | where_single_line = false # default 22 | -------------------------------------------------------------------------------- /src/acpi/gtdt.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use core::mem; 3 | 4 | use super::{find_sdt, sdt::Sdt}; 5 | use crate::{ 6 | device::generic_timer::GenericTimer, 7 | dtb::irqchip::{register_irq, IRQ_CHIP}, 8 | }; 9 | 10 | #[derive(Clone, Copy, Debug)] 11 | #[repr(C, packed)] 12 | pub struct Gtdt { 13 | pub header: Sdt, 14 | pub cnt_control_base: u64, 15 | _reserved: u32, 16 | pub secure_el1_timer_gsiv: u32, 17 | pub secure_el1_timer_flags: u32, 18 | pub non_secure_el1_timer_gsiv: u32, 19 | pub non_secure_el1_timer_flags: u32, 20 | pub virtual_el1_timer_gsiv: u32, 21 | pub virtual_el1_timer_flags: u32, 22 | pub el2_timer_gsiv: u32, 23 | pub el2_timer_flags: u32, 24 | pub cnt_read_base: u64, 25 | pub platform_timer_count: u32, 26 | pub platform_timer_offset: u32, 27 | /*TODO: we don't need these yet, and they cause short tables to fail parsing 28 | pub virtual_el2_timer_gsiv: u32, 29 | pub virtual_el2_timer_flags: u32, 30 | */ 31 | //TODO: platform timer structure (at platform timer offset, with platform timer count) 32 | } 33 | 34 | impl Gtdt { 35 | pub fn init() { 36 | let gtdt_sdt = find_sdt("GTDT"); 37 | let gtdt = if gtdt_sdt.len() == 1 { 38 | match Gtdt::new(gtdt_sdt[0]) { 39 | Some(gtdt) => gtdt, 40 | None => { 41 | log::warn!("Failed to parse GTDT"); 42 | return; 43 | } 44 | } 45 | } else { 46 | log::warn!("Unable to find GTDT"); 47 | return; 48 | }; 49 | 50 | let gsiv = gtdt.non_secure_el1_timer_gsiv; 51 | log::info!("generic_timer gsiv = {}", gsiv); 52 | let mut timer = GenericTimer { 53 | clk_freq: 0, 54 | reload_count: 0, 55 | }; 56 | timer.init(); 57 | register_irq(gsiv, Box::new(timer)); 58 | unsafe { IRQ_CHIP.irq_enable(gsiv as u32) }; 59 | } 60 | 61 | pub fn new(sdt: &'static Sdt) -> Option<&'static Gtdt> { 62 | if &sdt.signature == b"GTDT" && sdt.length as usize >= mem::size_of::() { 63 | Some(unsafe { &*((sdt as *const Sdt) as *const Gtdt) }) 64 | } else { 65 | None 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/acpi/madt/arch/aarch64.rs: -------------------------------------------------------------------------------- 1 | use alloc::{boxed::Box, vec::Vec}; 2 | 3 | use super::{Madt, MadtEntry}; 4 | use crate::{ 5 | device::irqchip::{ 6 | gic::{GenericInterruptController, GicCpuIf, GicDistIf}, 7 | gicv3::{GicV3, GicV3CpuIf}, 8 | }, 9 | dtb::irqchip::{IrqChipItem, IRQ_CHIP}, 10 | memory::{map_device_memory, PhysicalAddress, PAGE_SIZE}, 11 | }; 12 | 13 | pub(super) fn init(madt: Madt) { 14 | let mut gicd_opt = None; 15 | let mut giccs = Vec::new(); 16 | for madt_entry in madt.iter() { 17 | println!(" {:#x?}", madt_entry); 18 | match madt_entry { 19 | MadtEntry::Gicc(gicc) => { 20 | giccs.push(gicc); 21 | } 22 | MadtEntry::Gicd(gicd) => { 23 | if gicd_opt.is_some() { 24 | log::warn!("Only one GICD should be present on a system, ignoring this one"); 25 | } else { 26 | gicd_opt = Some(gicd); 27 | } 28 | } 29 | _ => {} 30 | } 31 | } 32 | let Some(gicd) = gicd_opt else { 33 | log::warn!("No GICD found"); 34 | return; 35 | }; 36 | let mut gic_dist_if = GicDistIf::default(); 37 | unsafe { 38 | let phys = PhysicalAddress::new(gicd.physical_base_address as usize); 39 | let virt = map_device_memory(phys, PAGE_SIZE); 40 | gic_dist_if.init(virt.data()); 41 | }; 42 | log::info!("{:#x?}", gic_dist_if); 43 | match gicd.gic_version { 44 | 1 | 2 => { 45 | for gicc in giccs { 46 | let mut gic_cpu_if = GicCpuIf::default(); 47 | unsafe { 48 | let phys = PhysicalAddress::new(gicc.physical_base_address as usize); 49 | let virt = map_device_memory(phys, PAGE_SIZE); 50 | gic_cpu_if.init(virt.data()) 51 | }; 52 | log::info!("{:#x?}", gic_cpu_if); 53 | let gic = GenericInterruptController { 54 | gic_dist_if, 55 | gic_cpu_if, 56 | irq_range: (0, 0), 57 | }; 58 | let chip = IrqChipItem { 59 | phandle: 0, 60 | parents: Vec::new(), 61 | children: Vec::new(), 62 | ic: Box::new(gic), 63 | }; 64 | unsafe { IRQ_CHIP.irq_chip_list.chips.push(chip) }; 65 | //TODO: support more GICCs 66 | break; 67 | } 68 | } 69 | 3 => { 70 | for gicc in giccs { 71 | let mut gic_cpu_if = GicV3CpuIf; 72 | unsafe { gic_cpu_if.init() }; 73 | log::info!("{:#x?}", gic_cpu_if); 74 | let gic = GicV3 { 75 | gic_dist_if, 76 | gic_cpu_if, 77 | //TODO: get GICRs 78 | gicrs: Vec::new(), 79 | irq_range: (0, 0), 80 | }; 81 | let chip = IrqChipItem { 82 | phandle: 0, 83 | parents: Vec::new(), 84 | children: Vec::new(), 85 | ic: Box::new(gic), 86 | }; 87 | unsafe { IRQ_CHIP.irq_chip_list.chips.push(chip) }; 88 | //TODO: support more GICCs 89 | break; 90 | } 91 | } 92 | _ => { 93 | log::warn!("unsupported GIC version {}", gicd.gic_version); 94 | } 95 | } 96 | unsafe { IRQ_CHIP.init(None) }; 97 | } 98 | -------------------------------------------------------------------------------- /src/acpi/madt/arch/other.rs: -------------------------------------------------------------------------------- 1 | use super::Madt; 2 | 3 | pub(super) fn init(madt: Madt) { 4 | for madt_entry in madt.iter() { 5 | println!(" {:#x?}", madt_entry); 6 | } 7 | 8 | log::warn!("MADT not yet handled on this platform"); 9 | } 10 | -------------------------------------------------------------------------------- /src/acpi/rsdp.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | memory::{Frame, KernelMapper}, 3 | paging::{Page, PageFlags, PhysicalAddress, VirtualAddress}, 4 | }; 5 | 6 | /// RSDP 7 | #[derive(Copy, Clone, Debug)] 8 | #[repr(C, packed)] 9 | pub struct RSDP { 10 | signature: [u8; 8], 11 | _checksum: u8, 12 | _oemid: [u8; 6], 13 | revision: u8, 14 | rsdt_address: u32, 15 | _length: u32, 16 | xsdt_address: u64, 17 | _extended_checksum: u8, 18 | _reserved: [u8; 3], 19 | } 20 | 21 | impl RSDP { 22 | fn get_already_supplied_rsdp(rsdp_ptr: *const u8) -> RSDP { 23 | // TODO: Validate 24 | unsafe { *(rsdp_ptr as *const RSDP) } 25 | } 26 | pub fn get_rsdp( 27 | mapper: &mut KernelMapper, 28 | already_supplied_rsdp: Option<*const u8>, 29 | ) -> Option { 30 | if let Some(rsdp_ptr) = already_supplied_rsdp { 31 | Some(Self::get_already_supplied_rsdp(rsdp_ptr)) 32 | } else { 33 | Self::get_rsdp_by_searching(mapper) 34 | } 35 | } 36 | /// Search for the RSDP 37 | pub fn get_rsdp_by_searching(mapper: &mut KernelMapper) -> Option { 38 | let start_addr = 0xE_0000; 39 | let end_addr = 0xF_FFFF; 40 | 41 | // Map all of the ACPI RSDP space 42 | { 43 | let start_frame = Frame::containing(PhysicalAddress::new(start_addr)); 44 | let end_frame = Frame::containing(PhysicalAddress::new(end_addr)); 45 | for frame in Frame::range_inclusive(start_frame, end_frame) { 46 | let page = Page::containing_address(VirtualAddress::new(frame.base().data())); 47 | let result = unsafe { 48 | mapper 49 | .get_mut() 50 | .expect("KernelMapper locked re-entrant while locating RSDPs") 51 | .map_phys(page.start_address(), frame.base(), PageFlags::new()) 52 | .expect("failed to map page while searching for RSDP") 53 | }; 54 | result.flush(); 55 | } 56 | } 57 | 58 | RSDP::search(start_addr, end_addr) 59 | } 60 | 61 | fn search(start_addr: usize, end_addr: usize) -> Option { 62 | for i in 0..(end_addr + 1 - start_addr) / 16 { 63 | let rsdp = unsafe { &*((start_addr + i * 16) as *const RSDP) }; 64 | if &rsdp.signature == b"RSD PTR " { 65 | return Some(*rsdp); 66 | } 67 | } 68 | None 69 | } 70 | 71 | /// Get the RSDT or XSDT address 72 | pub fn sdt_address(&self) -> usize { 73 | if self.revision >= 2 { 74 | self.xsdt_address as usize 75 | } else { 76 | self.rsdt_address as usize 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/acpi/rsdt.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use core::{convert::TryFrom, mem}; 3 | 4 | use super::{rxsdt::Rxsdt, sdt::Sdt}; 5 | 6 | #[derive(Debug)] 7 | pub struct Rsdt(&'static Sdt); 8 | 9 | impl Rsdt { 10 | pub fn new(sdt: &'static Sdt) -> Option { 11 | if &sdt.signature == b"RSDT" { 12 | Some(Rsdt(sdt)) 13 | } else { 14 | None 15 | } 16 | } 17 | pub fn as_slice(&self) -> &[u8] { 18 | let length = 19 | usize::try_from(self.0.length).expect("expected 32-bit length to fit within usize"); 20 | 21 | unsafe { core::slice::from_raw_parts(self.0 as *const _ as *const u8, length) } 22 | } 23 | } 24 | 25 | impl Rxsdt for Rsdt { 26 | fn iter(&self) -> Box> { 27 | Box::new(RsdtIter { sdt: self.0, i: 0 }) 28 | } 29 | } 30 | 31 | pub struct RsdtIter { 32 | sdt: &'static Sdt, 33 | i: usize, 34 | } 35 | 36 | impl Iterator for RsdtIter { 37 | type Item = usize; 38 | fn next(&mut self) -> Option { 39 | if self.i < self.sdt.data_len() / mem::size_of::() { 40 | let item = unsafe { 41 | (self.sdt.data_address() as *const u32) 42 | .add(self.i) 43 | .read_unaligned() 44 | }; 45 | self.i += 1; 46 | Some(item as usize) 47 | } else { 48 | None 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/acpi/rxsdt.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | 3 | pub trait Rxsdt { 4 | fn iter(&self) -> Box>; 5 | } 6 | -------------------------------------------------------------------------------- /src/acpi/sdt.rs: -------------------------------------------------------------------------------- 1 | use core::mem; 2 | 3 | #[derive(Copy, Clone, Debug)] 4 | #[repr(C, packed)] 5 | pub struct Sdt { 6 | pub signature: [u8; 4], 7 | pub length: u32, 8 | pub revision: u8, 9 | pub checksum: u8, 10 | pub oem_id: [u8; 6], 11 | pub oem_table_id: [u8; 8], 12 | pub oem_revision: u32, 13 | pub creator_id: u32, 14 | pub creator_revision: u32, 15 | } 16 | 17 | impl Sdt { 18 | /// Get the address of this tables data 19 | pub fn data_address(&self) -> usize { 20 | self as *const _ as usize + mem::size_of::() 21 | } 22 | 23 | /// Get the length of this tables data 24 | pub fn data_len(&self) -> usize { 25 | let total_size = self.length as usize; 26 | let header_size = mem::size_of::(); 27 | if total_size >= header_size { 28 | total_size - header_size 29 | } else { 30 | 0 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/acpi/xsdt.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use core::{convert::TryFrom, mem}; 3 | 4 | use super::{rxsdt::Rxsdt, sdt::Sdt}; 5 | 6 | #[derive(Debug)] 7 | pub struct Xsdt(&'static Sdt); 8 | 9 | impl Xsdt { 10 | pub fn new(sdt: &'static Sdt) -> Option { 11 | if &sdt.signature == b"XSDT" { 12 | Some(Xsdt(sdt)) 13 | } else { 14 | None 15 | } 16 | } 17 | pub fn as_slice(&self) -> &[u8] { 18 | let length = 19 | usize::try_from(self.0.length).expect("expected 32-bit length to fit within usize"); 20 | 21 | unsafe { core::slice::from_raw_parts(self.0 as *const _ as *const u8, length) } 22 | } 23 | } 24 | 25 | impl Rxsdt for Xsdt { 26 | fn iter(&self) -> Box> { 27 | Box::new(XsdtIter { sdt: self.0, i: 0 }) 28 | } 29 | } 30 | 31 | pub struct XsdtIter { 32 | sdt: &'static Sdt, 33 | i: usize, 34 | } 35 | 36 | impl Iterator for XsdtIter { 37 | type Item = usize; 38 | fn next(&mut self) -> Option { 39 | if self.i < self.sdt.data_len() / mem::size_of::() { 40 | let item = unsafe { 41 | core::ptr::read_unaligned((self.sdt.data_address() as *const u64).add(self.i)) 42 | }; 43 | self.i += 1; 44 | Some(item as usize) 45 | } else { 46 | None 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/allocator/linked_list.rs: -------------------------------------------------------------------------------- 1 | use crate::memory::KernelMapper; 2 | use core::{ 3 | alloc::{GlobalAlloc, Layout}, 4 | ptr::{self, NonNull}, 5 | }; 6 | use linked_list_allocator::Heap; 7 | use spin::Mutex; 8 | 9 | static HEAP: Mutex> = Mutex::new(None); 10 | 11 | pub struct Allocator; 12 | 13 | impl Allocator { 14 | pub unsafe fn init(offset: usize, size: usize) { 15 | *HEAP.lock() = Some(Heap::new(offset, size)); 16 | } 17 | } 18 | 19 | unsafe impl GlobalAlloc for Allocator { 20 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 21 | while let Some(ref mut heap) = *HEAP.lock() { 22 | match heap.allocate_first_fit(layout) { 23 | Err(()) => { 24 | let size = heap.size(); 25 | super::map_heap( 26 | &mut KernelMapper::lock(), 27 | crate::KERNEL_HEAP_OFFSET + size, 28 | crate::KERNEL_HEAP_SIZE, 29 | ); 30 | heap.extend(crate::KERNEL_HEAP_SIZE); 31 | } 32 | other => { 33 | return other 34 | .ok() 35 | .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) 36 | } 37 | } 38 | } 39 | panic!("__rust_allocate: heap not initialized"); 40 | } 41 | 42 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 43 | if let Some(ref mut heap) = *HEAP.lock() { 44 | heap.deallocate(NonNull::new_unchecked(ptr), layout) 45 | } else { 46 | panic!("__rust_deallocate: heap not initialized"); 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/allocator/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | memory::KernelMapper, 3 | paging::{mapper::PageFlushAll, Page, PageFlags, VirtualAddress}, 4 | }; 5 | use rmm::Flusher; 6 | 7 | #[cfg(not(feature = "slab"))] 8 | pub use self::linked_list::Allocator; 9 | 10 | #[cfg(feature = "slab")] 11 | pub use self::slab::Allocator; 12 | 13 | #[cfg(not(feature = "slab"))] 14 | mod linked_list; 15 | 16 | #[cfg(feature = "slab")] 17 | mod slab; 18 | 19 | unsafe fn map_heap(mapper: &mut KernelMapper, offset: usize, size: usize) { 20 | let mapper = mapper 21 | .get_mut() 22 | .expect("failed to obtain exclusive access to KernelMapper while extending heap"); 23 | let mut flush_all = PageFlushAll::new(); 24 | 25 | let heap_start_page = Page::containing_address(VirtualAddress::new(offset)); 26 | let heap_end_page = Page::containing_address(VirtualAddress::new(offset + size - 1)); 27 | for page in Page::range_inclusive(heap_start_page, heap_end_page) { 28 | let result = mapper 29 | .map( 30 | page.start_address(), 31 | PageFlags::new() 32 | .write(true) 33 | .global(cfg!(not(feature = "pti"))), 34 | ) 35 | .expect("failed to map kernel heap"); 36 | flush_all.consume(result); 37 | } 38 | 39 | flush_all.flush(); 40 | } 41 | 42 | pub unsafe fn init() { 43 | let offset = crate::KERNEL_HEAP_OFFSET; 44 | let size = crate::KERNEL_HEAP_SIZE; 45 | 46 | // Map heap pages 47 | map_heap(&mut KernelMapper::lock(), offset, size); 48 | 49 | // Initialize global heap 50 | Allocator::init(offset, size); 51 | } 52 | -------------------------------------------------------------------------------- /src/allocator/slab.rs: -------------------------------------------------------------------------------- 1 | use core::alloc::{Alloc, AllocErr, Layout}; 2 | use slab_allocator::Heap; 3 | use spin::Mutex; 4 | 5 | static HEAP: Mutex> = Mutex::new(None); 6 | 7 | pub struct Allocator; 8 | 9 | impl Allocator { 10 | pub unsafe fn init(offset: usize, size: usize) { 11 | *HEAP.lock() = Some(Heap::new(offset, size)); 12 | } 13 | } 14 | 15 | unsafe impl<'a> Alloc for &'a Allocator { 16 | unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { 17 | if let Some(ref mut heap) = *HEAP.lock() { 18 | heap.allocate(layout) 19 | } else { 20 | panic!("__rust_allocate: heap not initialized"); 21 | } 22 | } 23 | 24 | unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { 25 | if let Some(ref mut heap) = *HEAP.lock() { 26 | heap.deallocate(ptr, layout) 27 | } else { 28 | panic!("__rust_deallocate: heap not initialized"); 29 | } 30 | } 31 | 32 | fn oom(&mut self, error: AllocErr) -> ! { 33 | panic!("Out of memory: {:?}", error); 34 | } 35 | 36 | fn usable_size(&self, layout: &Layout) -> (usize, usize) { 37 | if let Some(ref mut heap) = *HEAP.lock() { 38 | heap.usable_size(layout) 39 | } else { 40 | panic!("__rust_usable_size: heap not initialized"); 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/arch/aarch64/consts.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | // Because the memory map is so important to not be aliased, it is defined here, in one place 4 | // The lower 256 PML4 entries are reserved for userspace 5 | // Each PML4 entry references up to 512 GB of memory 6 | // The second from the top (510) PML4 is reserved for the kernel 7 | /// The size of a single PML4 8 | pub const PML4_SIZE: usize = 0x0000_0080_0000_0000; 9 | pub const PML4_MASK: usize = 0x0000_ff80_0000_0000; 10 | 11 | /// Offset of recursive paging (deprecated, but still reserved) 12 | pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize; 13 | pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE; 14 | 15 | /// Offset of kernel 16 | pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; 17 | pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE; 18 | 19 | /// Offset to kernel heap 20 | pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; 21 | pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; 22 | /// Size of kernel heap 23 | pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB 24 | 25 | /// Offset of temporary mapping for misc kernel bring-up actions 26 | pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; 27 | 28 | /// Offset to kernel percpu variables 29 | pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_TMP_MISC_OFFSET - PML4_SIZE; 30 | pub const KERNEL_PERCPU_PML4: usize = (KERNEL_PERCPU_OFFSET & PML4_MASK) / PML4_SIZE; 31 | /// Size of kernel percpu variables 32 | pub const KERNEL_PERCPU_SHIFT: u8 = 16; // 2^16 = 64 KiB 33 | pub const KERNEL_PERCPU_SIZE: usize = 1_usize << KERNEL_PERCPU_SHIFT; 34 | 35 | /// Offset of physmap 36 | // This needs to match RMM's PHYS_OFFSET 37 | pub const PHYS_OFFSET: usize = 0xFFFF_8000_0000_0000; 38 | pub const PHYS_PML4: usize = (PHYS_OFFSET & PML4_MASK) / PML4_SIZE; 39 | 40 | /// End offset of the user image, i.e. kernel start 41 | pub const USER_END_OFFSET: usize = 256 * PML4_SIZE; 42 | -------------------------------------------------------------------------------- /src/arch/aarch64/debug.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use spin::MutexGuard; 3 | 4 | use crate::log::{Log, LOG}; 5 | 6 | #[cfg(feature = "serial_debug")] 7 | use super::device::serial::{SerialKind, COM1}; 8 | #[cfg(feature = "graphical_debug")] 9 | use crate::devices::graphical_debug::{DebugDisplay, DEBUG_DISPLAY}; 10 | 11 | pub struct Writer<'a> { 12 | log: MutexGuard<'a, Option>, 13 | #[cfg(feature = "graphical_debug")] 14 | display: MutexGuard<'a, Option>, 15 | #[cfg(feature = "serial_debug")] 16 | serial: MutexGuard<'a, Option>, 17 | } 18 | 19 | impl<'a> Writer<'a> { 20 | pub fn new() -> Writer<'a> { 21 | Writer { 22 | log: LOG.lock(), 23 | #[cfg(feature = "graphical_debug")] 24 | display: DEBUG_DISPLAY.lock(), 25 | #[cfg(feature = "serial_debug")] 26 | serial: COM1.lock(), 27 | } 28 | } 29 | 30 | pub fn write(&mut self, buf: &[u8], preserve: bool) { 31 | if preserve { 32 | if let Some(ref mut log) = *self.log { 33 | log.write(buf); 34 | } 35 | } 36 | 37 | #[cfg(feature = "graphical_debug")] 38 | { 39 | if let Some(ref mut display) = *self.display { 40 | let _ = display.write(buf); 41 | } 42 | } 43 | 44 | #[cfg(feature = "serial_debug")] 45 | { 46 | if let Some(ref mut serial) = *self.serial { 47 | serial.write(buf); 48 | } 49 | } 50 | } 51 | } 52 | 53 | impl<'a> fmt::Write for Writer<'a> { 54 | fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { 55 | self.write(s.as_bytes(), true); 56 | Ok(()) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/arch/aarch64/device/cpu/registers/control_regs.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | //! Functions to read and write control registers. 4 | 5 | use core::arch::asm; 6 | 7 | bitflags! { 8 | pub struct MairEl1: u64 { 9 | const DEVICE_MEMORY = 0x00 << 16; 10 | const NORMAL_UNCACHED_MEMORY = 0x44 << 8; 11 | const NORMAL_WRITEBACK_MEMORY = 0xff; 12 | } 13 | } 14 | 15 | pub unsafe fn ttbr0_el1() -> u64 { 16 | let ret: u64; 17 | asm!("mrs {}, ttbr0_el1", out(reg) ret); 18 | ret 19 | } 20 | 21 | pub unsafe fn ttbr0_el1_write(val: u64) { 22 | asm!("msr ttbr0_el1, {}", in(reg) val); 23 | } 24 | 25 | pub unsafe fn ttbr1_el1() -> u64 { 26 | let ret: u64; 27 | asm!("mrs {}, ttbr1_el1", out(reg) ret); 28 | ret 29 | } 30 | 31 | pub unsafe fn ttbr1_el1_write(val: u64) { 32 | asm!("msr ttbr1_el1, {}", in(reg) val); 33 | } 34 | 35 | pub unsafe fn mair_el1() -> MairEl1 { 36 | let ret: u64; 37 | asm!("mrs {}, mair_el1", out(reg) ret); 38 | MairEl1::from_bits_truncate(ret) 39 | } 40 | 41 | pub unsafe fn mair_el1_write(val: MairEl1) { 42 | asm!("msr mair_el1, {}", in(reg) val.bits()); 43 | } 44 | 45 | pub unsafe fn tpidr_el0() -> u64 { 46 | let ret: u64; 47 | asm!("mrs {}, tpidr_el0", out(reg) ret); 48 | ret 49 | } 50 | 51 | pub unsafe fn tpidr_el0_write(val: u64) { 52 | asm!("msr tpidr_el0, {}", in(reg) val); 53 | } 54 | 55 | pub unsafe fn tpidr_el1() -> u64 { 56 | let ret: u64; 57 | asm!("mrs {}, tpidr_el1", out(reg) ret); 58 | ret 59 | } 60 | 61 | pub unsafe fn tpidr_el1_write(val: u64) { 62 | asm!("msr tpidr_el1, {}", in(reg) val); 63 | } 64 | 65 | pub unsafe fn tpidrro_el0() -> u64 { 66 | let ret: u64; 67 | asm!("mrs {}, tpidrro_el0", out(reg) ret); 68 | ret 69 | } 70 | 71 | pub unsafe fn tpidrro_el0_write(val: u64) { 72 | asm!("msr tpidrro_el0, {}", in(reg) val); 73 | } 74 | 75 | pub unsafe fn esr_el1() -> u32 { 76 | let ret: u32; 77 | asm!("mrs {0:w}, esr_el1", out(reg) ret); 78 | ret 79 | } 80 | 81 | pub unsafe fn cntfreq_el0() -> u32 { 82 | let ret: usize; 83 | asm!("mrs {}, cntfrq_el0", out(reg) ret); 84 | ret as u32 85 | } 86 | 87 | pub unsafe fn tmr_ctrl() -> u32 { 88 | let ret: usize; 89 | asm!("mrs {}, cntp_ctl_el0", out(reg) ret); 90 | ret as u32 91 | } 92 | 93 | pub unsafe fn tmr_ctrl_write(val: u32) { 94 | asm!("msr cntp_ctl_el0, {}", in(reg) val as usize); 95 | } 96 | 97 | pub unsafe fn tmr_tval() -> u32 { 98 | let ret: usize; 99 | asm!("mrs {0}, cntp_tval_el0", out(reg) ret); 100 | ret as u32 101 | } 102 | 103 | pub unsafe fn tmr_tval_write(val: u32) { 104 | asm!("msr cntp_tval_el0, {}", in(reg) val as usize); 105 | } 106 | 107 | pub unsafe fn midr() -> u32 { 108 | let ret: usize; 109 | asm!("mrs {}, midr_el1", out(reg) ret); 110 | ret as u32 111 | } 112 | -------------------------------------------------------------------------------- /src/arch/aarch64/device/cpu/registers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod control_regs; 2 | -------------------------------------------------------------------------------- /src/arch/aarch64/device/generic_timer.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use log::{debug, error, info}; 3 | 4 | use super::ic_for_chip; 5 | use crate::{ 6 | context, 7 | context::timeout, 8 | device::cpu::registers::control_regs, 9 | dtb::{ 10 | get_interrupt, 11 | irqchip::{register_irq, InterruptHandler, IRQ_CHIP}, 12 | }, 13 | interrupt::irq::trigger, 14 | time, 15 | }; 16 | use fdt::Fdt; 17 | 18 | bitflags! { 19 | struct TimerCtrlFlags: u32 { 20 | const ENABLE = 1 << 0; 21 | const IMASK = 1 << 1; 22 | const ISTATUS = 1 << 2; 23 | } 24 | } 25 | 26 | pub unsafe fn init(fdt: &Fdt) { 27 | let mut timer = GenericTimer { 28 | clk_freq: 0, 29 | reload_count: 0, 30 | }; 31 | timer.init(); 32 | if let Some(node) = fdt.find_compatible(&["arm,armv7-timer"]) { 33 | let irq = get_interrupt(fdt, &node, 1).unwrap(); 34 | debug!("irq = {:?}", irq); 35 | if let Some(ic_idx) = ic_for_chip(&fdt, &node) { 36 | //PHYS_NONSECURE_PPI only 37 | let virq = IRQ_CHIP.irq_chip_list.chips[ic_idx] 38 | .ic 39 | .irq_xlate(irq) 40 | .unwrap(); 41 | info!("generic_timer virq = {}", virq); 42 | register_irq(virq as u32, Box::new(timer)); 43 | IRQ_CHIP.irq_enable(virq as u32); 44 | } else { 45 | error!("Failed to find irq parent for generic timer"); 46 | } 47 | } 48 | } 49 | 50 | pub struct GenericTimer { 51 | pub clk_freq: u32, 52 | pub reload_count: u32, 53 | } 54 | 55 | impl GenericTimer { 56 | pub fn init(&mut self) { 57 | let clk_freq = unsafe { control_regs::cntfreq_el0() }; 58 | self.clk_freq = clk_freq; 59 | self.reload_count = clk_freq / 100; 60 | 61 | unsafe { control_regs::tmr_tval_write(self.reload_count) }; 62 | 63 | let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); 64 | ctrl.insert(TimerCtrlFlags::ENABLE); 65 | ctrl.remove(TimerCtrlFlags::IMASK); 66 | unsafe { 67 | control_regs::tmr_ctrl_write(ctrl.bits()); 68 | } 69 | } 70 | 71 | #[allow(unused)] 72 | fn disable() { 73 | let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); 74 | ctrl.remove(TimerCtrlFlags::ENABLE); 75 | unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; 76 | } 77 | 78 | #[allow(unused)] 79 | pub fn set_irq(&mut self) { 80 | let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); 81 | ctrl.remove(TimerCtrlFlags::IMASK); 82 | unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; 83 | } 84 | 85 | pub fn clear_irq(&mut self) { 86 | let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); 87 | 88 | if ctrl.contains(TimerCtrlFlags::ISTATUS) { 89 | ctrl.insert(TimerCtrlFlags::IMASK); 90 | unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; 91 | } 92 | } 93 | 94 | pub fn reload_count(&mut self) { 95 | let mut ctrl = TimerCtrlFlags::from_bits_truncate(unsafe { control_regs::tmr_ctrl() }); 96 | ctrl.insert(TimerCtrlFlags::ENABLE); 97 | ctrl.remove(TimerCtrlFlags::IMASK); 98 | unsafe { control_regs::tmr_tval_write(self.reload_count) }; 99 | unsafe { control_regs::tmr_ctrl_write(ctrl.bits()) }; 100 | } 101 | } 102 | 103 | impl InterruptHandler for GenericTimer { 104 | fn irq_handler(&mut self, irq: u32) { 105 | 106 | self.clear_irq(); 107 | { 108 | *time::OFFSET.lock() += self.clk_freq as u128; 109 | } 110 | 111 | timeout::trigger(); 112 | 113 | context::switch::tick(); 114 | 115 | unsafe { 116 | trigger(irq); 117 | } 118 | self.reload_count(); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/arch/aarch64/device/irqchip/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::dtb::irqchip::{InterruptController, IRQ_CHIP}; 2 | use alloc::boxed::Box; 3 | use fdt::{node::FdtNode, Fdt}; 4 | 5 | pub(crate) mod gic; 6 | pub(crate) mod gicv3; 7 | mod irq_bcm2835; 8 | mod irq_bcm2836; 9 | mod null; 10 | 11 | pub(crate) fn new_irqchip(ic_str: &str) -> Option> { 12 | if ic_str.contains("arm,gic-v3") { 13 | Some(Box::new(gicv3::GicV3::new())) 14 | } else if ic_str.contains("arm,cortex-a15-gic") || ic_str.contains("arm,gic-400") { 15 | Some(Box::new(gic::GenericInterruptController::new())) 16 | } else if ic_str.contains("brcm,bcm2836-l1-intc") { 17 | Some(Box::new(irq_bcm2836::Bcm2836ArmInterruptController::new())) 18 | } else if ic_str.contains("brcm,bcm2836-armctrl-ic") { 19 | Some(Box::new(irq_bcm2835::Bcm2835ArmInterruptController::new())) 20 | } else { 21 | log::warn!("no driver for interrupt controller {:?}", ic_str); 22 | //TODO: return None and handle it properly 23 | Some(Box::new(null::Null)) 24 | } 25 | } 26 | 27 | pub(crate) fn ic_for_chip(fdt: &Fdt, node: &FdtNode) -> Option { 28 | if let Some(_) = node.property("interrupts-extended") { 29 | log::error!("multi-parented device not supported"); 30 | None 31 | } else if let Some(irqc_phandle) = node 32 | .property("interrupt-parent") 33 | .or(fdt.root().property("interrupt-parent")) 34 | .and_then(|f| f.as_usize()) 35 | { 36 | unsafe { IRQ_CHIP.phandle_to_ic_idx(irqc_phandle as u32) } 37 | } else { 38 | log::error!("no irq parent found"); 39 | None 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/arch/aarch64/device/irqchip/null.rs: -------------------------------------------------------------------------------- 1 | use fdt::Fdt; 2 | use syscall::{ 3 | error::{Error, EINVAL}, 4 | Result, 5 | }; 6 | 7 | use super::InterruptController; 8 | use crate::dtb::irqchip::{InterruptHandler, IrqCell, IrqDesc}; 9 | 10 | pub struct Null; 11 | 12 | impl InterruptHandler for Null { 13 | fn irq_handler(&mut self, _irq: u32) {} 14 | } 15 | 16 | impl InterruptController for Null { 17 | fn irq_init( 18 | &mut self, 19 | _fdt_opt: Option<&Fdt>, 20 | _irq_desc: &mut [IrqDesc; 1024], 21 | _ic_idx: usize, 22 | _irq_idx: &mut usize, 23 | ) -> Result<()> { 24 | Ok(()) 25 | } 26 | fn irq_ack(&mut self) -> u32 { 27 | unimplemented!() 28 | } 29 | fn irq_eoi(&mut self, _irq_num: u32) {} 30 | fn irq_enable(&mut self, _irq_num: u32) {} 31 | fn irq_disable(&mut self, _irq_num: u32) {} 32 | fn irq_xlate(&self, _irq_data: IrqCell) -> Result { 33 | Err(Error::new(EINVAL)) 34 | } 35 | fn irq_to_virq(&self, _hwirq: u32) -> Option { 36 | None 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/arch/aarch64/device/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::info; 2 | use core::sync::atomic::{AtomicUsize, Ordering}; 3 | use fdt::Fdt; 4 | 5 | pub mod cpu; 6 | pub mod generic_timer; 7 | pub mod irqchip; 8 | pub mod rtc; 9 | pub mod serial; 10 | pub mod uart_pl011; 11 | 12 | use crate::dtb::irqchip::IRQ_CHIP; 13 | use irqchip::ic_for_chip; 14 | 15 | pub static ROOT_IC_IDX: AtomicUsize = AtomicUsize::new(0); 16 | pub static ROOT_IC_IDX_IS_SET: AtomicUsize = AtomicUsize::new(0); 17 | 18 | unsafe fn init_root_ic(fdt: &Fdt) { 19 | 20 | let is_set = ROOT_IC_IDX_IS_SET.load(Ordering::Relaxed); 21 | if is_set != 0 { 22 | let ic_idx = ROOT_IC_IDX.load(Ordering::Relaxed); 23 | info!("Already selected {} as root ic", ic_idx); 24 | return ; 25 | } 26 | 27 | let root_irqc_phandle = fdt 28 | .root() 29 | .property("interrupt-parent") 30 | .unwrap() 31 | .as_usize() 32 | .unwrap(); 33 | let ic_idx = IRQ_CHIP 34 | .phandle_to_ic_idx(root_irqc_phandle as u32) 35 | .unwrap(); 36 | info!("select {} as root ic", ic_idx); 37 | ROOT_IC_IDX.store(ic_idx, Ordering::Relaxed); 38 | } 39 | 40 | pub unsafe fn init_devicetree(fdt: &Fdt) { 41 | info!("IRQCHIP INIT"); 42 | crate::dtb::irqchip::init(&fdt); 43 | init_root_ic(&fdt); 44 | info!("GIT INIT"); 45 | generic_timer::init(fdt); 46 | info!("SERIAL INIT"); 47 | serial::init(fdt); 48 | info!("RTC INIT"); 49 | rtc::init(fdt); 50 | } 51 | 52 | #[derive(Default)] 53 | pub struct ArchPercpuMisc; 54 | -------------------------------------------------------------------------------- /src/arch/aarch64/device/rtc.rs: -------------------------------------------------------------------------------- 1 | use crate::{dtb::get_mmio_address, time}; 2 | use core::ptr::read_volatile; 3 | 4 | static RTC_DR: usize = 0x000; 5 | 6 | pub unsafe fn init(fdt: &fdt::Fdt) { 7 | if let Some(node) = fdt.find_compatible(&["arm,pl031"]) { 8 | match node 9 | .reg() 10 | .and_then(|mut iter| iter.next()) 11 | .and_then(|region| get_mmio_address(fdt, &node, ®ion)) 12 | { 13 | Some(phys) => { 14 | let mut rtc = Pl031rtc { phys }; 15 | log::info!("PL031 RTC at {:#x}", rtc.phys); 16 | *time::START.lock() = (rtc.time() as u128) * time::NANOS_PER_SEC; 17 | } 18 | None => { 19 | log::warn!("No PL031 RTC registers"); 20 | } 21 | } 22 | } else { 23 | log::warn!("No PL031 RTC found"); 24 | } 25 | } 26 | 27 | struct Pl031rtc { 28 | pub phys: usize, 29 | } 30 | 31 | impl Pl031rtc { 32 | unsafe fn read(&self, reg: usize) -> u32 { 33 | read_volatile((crate::PHYS_OFFSET + self.phys + reg) as *const u32) 34 | } 35 | 36 | pub fn time(&mut self) -> u64 { 37 | let seconds = unsafe { self.read(RTC_DR) } as u64; 38 | seconds 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/arch/aarch64/interrupt/irq.rs: -------------------------------------------------------------------------------- 1 | use crate::{arch::device::ROOT_IC_IDX, dtb::irqchip::IRQ_CHIP}; 2 | use core::sync::atomic::Ordering; 3 | 4 | #[cfg(feature = "sys_stat")] 5 | use crate::percpu::PercpuBlock; 6 | 7 | unsafe fn irq_ack() -> (u32, Option) { 8 | let ic = &mut IRQ_CHIP.irq_chip_list.chips[ROOT_IC_IDX.load(Ordering::Relaxed)].ic; 9 | let irq = ic.irq_ack(); 10 | (irq, ic.irq_to_virq(irq)) 11 | } 12 | 13 | exception_stack!(irq_at_el0, |_stack| { 14 | let (irq, virq) = irq_ack(); 15 | if let Some(virq) = virq 16 | && virq < 1024 17 | { 18 | IRQ_CHIP.trigger_virq(virq as u32); 19 | } else { 20 | println!("unexpected irq num {}", irq); 21 | } 22 | }); 23 | 24 | exception_stack!(irq_at_el1, |_stack| { 25 | let (irq, virq) = irq_ack(); 26 | if let Some(virq) = virq 27 | && virq < 1024 28 | { 29 | IRQ_CHIP.trigger_virq(virq as u32); 30 | } else { 31 | println!("unexpected irq num {}", irq); 32 | } 33 | }); 34 | 35 | //TODO 36 | pub unsafe fn trigger(irq: u32) { 37 | #[cfg(feature = "sys_stat")] 38 | PercpuBlock::current().stats.add_irq(irq); 39 | 40 | extern "C" { 41 | fn irq_trigger(irq: u32); 42 | } 43 | 44 | irq_trigger(irq); 45 | IRQ_CHIP.irq_eoi(irq); 46 | } 47 | 48 | /* 49 | pub unsafe fn irq_handler_gentimer(irq: u32) { 50 | GENTIMER.clear_irq(); 51 | { 52 | *time::OFFSET.lock() += GENTIMER.clk_freq as u128; 53 | } 54 | 55 | timeout::trigger(); 56 | 57 | context::switch::tick(); 58 | 59 | trigger(irq); 60 | GENTIMER.reload_count(); 61 | } 62 | */ 63 | -------------------------------------------------------------------------------- /src/arch/aarch64/interrupt/mod.rs: -------------------------------------------------------------------------------- 1 | //! Interrupt instructions 2 | 3 | use core::arch::asm; 4 | 5 | #[macro_use] 6 | pub mod handler; 7 | 8 | pub mod exception; 9 | pub mod irq; 10 | pub mod syscall; 11 | pub mod trace; 12 | 13 | pub use self::handler::InterruptStack; 14 | 15 | /// Clear interrupts 16 | #[inline(always)] 17 | pub unsafe fn disable() { 18 | asm!("msr daifset, #2"); 19 | } 20 | 21 | /// Set interrupts and halt 22 | /// This will atomically wait for the next interrupt 23 | /// Performing enable followed by halt is not guaranteed to be atomic, use this instead! 24 | #[inline(always)] 25 | pub unsafe fn enable_and_halt() { 26 | asm!("msr daifclr, #2"); 27 | asm!("wfi"); 28 | } 29 | 30 | /// Set interrupts and nop 31 | /// This will enable interrupts and allow the IF flag to be processed 32 | /// Simply enabling interrupts does not gurantee that they will trigger, use this instead! 33 | #[inline(always)] 34 | pub unsafe fn enable_and_nop() { 35 | asm!("msr daifclr, #2"); 36 | asm!("nop"); 37 | } 38 | 39 | /// Halt instruction 40 | #[inline(always)] 41 | pub unsafe fn halt() { 42 | asm!("wfi"); 43 | } 44 | 45 | /// Pause instruction 46 | /// Safe because it is similar to a NOP, and has no memory effects 47 | #[inline(always)] 48 | pub fn pause() { 49 | unsafe { asm!("nop") }; 50 | } 51 | -------------------------------------------------------------------------------- /src/arch/aarch64/interrupt/syscall.rs: -------------------------------------------------------------------------------- 1 | #[no_mangle] 2 | pub unsafe extern "C" fn do_exception_unhandled() {} 3 | 4 | #[no_mangle] 5 | pub unsafe extern "C" fn do_exception_synchronous() {} 6 | 7 | #[allow(dead_code)] 8 | #[repr(C, packed)] 9 | pub struct SyscallStack { 10 | pub elr_el1: usize, 11 | pub padding: usize, 12 | pub tpidr: usize, 13 | pub tpidrro: usize, 14 | pub rflags: usize, 15 | pub esr: usize, 16 | pub sp: usize, 17 | pub lr: usize, 18 | pub fp: usize, 19 | pub x28: usize, 20 | pub x27: usize, 21 | pub x26: usize, 22 | pub x25: usize, 23 | pub x24: usize, 24 | pub x23: usize, 25 | pub x22: usize, 26 | pub x21: usize, 27 | pub x20: usize, 28 | pub x19: usize, 29 | pub x18: usize, 30 | pub x17: usize, 31 | pub x16: usize, 32 | pub x15: usize, 33 | pub x14: usize, 34 | pub x13: usize, 35 | pub x12: usize, 36 | pub x11: usize, 37 | pub x10: usize, 38 | pub x9: usize, 39 | pub x8: usize, 40 | pub x7: usize, 41 | pub x6: usize, 42 | pub x5: usize, 43 | pub x4: usize, 44 | pub x3: usize, 45 | pub x2: usize, 46 | pub x1: usize, 47 | pub x0: usize, 48 | } 49 | pub use super::handler::enter_usermode; 50 | -------------------------------------------------------------------------------- /src/arch/aarch64/interrupt/trace.rs: -------------------------------------------------------------------------------- 1 | use core::{arch::asm, mem}; 2 | 3 | pub struct StackTrace { 4 | pub fp: usize, 5 | pub pc_ptr: *const usize, 6 | } 7 | 8 | impl StackTrace { 9 | #[inline(always)] 10 | pub unsafe fn start() -> Option { 11 | let fp: usize; 12 | asm!("mov {}, fp", out(reg) fp); 13 | let pc_ptr = fp.checked_add(mem::size_of::())?; 14 | Some(StackTrace { 15 | fp, 16 | pc_ptr: pc_ptr as *const usize, 17 | }) 18 | } 19 | 20 | pub unsafe fn next(self) -> Option { 21 | let fp = *(self.fp as *const usize); 22 | let pc_ptr = fp.checked_add(mem::size_of::())?; 23 | Some(StackTrace { 24 | fp: fp, 25 | pc_ptr: pc_ptr as *const usize, 26 | }) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/arch/aarch64/ipi.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, Copy, Debug)] 2 | #[repr(u8)] 3 | pub enum IpiKind { 4 | Wakeup = 0x40, 5 | Tlb = 0x41, 6 | } 7 | 8 | #[derive(Clone, Copy, Debug)] 9 | #[repr(u8)] 10 | pub enum IpiTarget { 11 | Other = 3, 12 | } 13 | 14 | #[cfg(not(feature = "multi_core"))] 15 | #[inline(always)] 16 | pub fn ipi(_kind: IpiKind, _target: IpiTarget) {} 17 | 18 | #[cfg(feature = "multi_core")] 19 | #[inline(always)] 20 | pub fn ipi(_kind: IpiKind, _target: IpiTarget) {} 21 | 22 | #[cfg(not(feature = "multi_core"))] 23 | #[inline(always)] 24 | pub fn ipi_single(_kind: IpiKind, _target: crate::cpu_set::LogicalCpuId) {} 25 | 26 | #[cfg(feature = "multi_core")] 27 | #[inline(always)] 28 | pub fn ipi_single(_kind: IpiKind, _target: crate::cpu_set::LogicalCpuId) {} 29 | -------------------------------------------------------------------------------- /src/arch/aarch64/macros.rs: -------------------------------------------------------------------------------- 1 | /// Print to console 2 | #[macro_export] 3 | macro_rules! print { 4 | ($($arg:tt)*) => ({ 5 | use core::fmt::Write; 6 | let _ = write!($crate::arch::debug::Writer::new(), $($arg)*); 7 | }); 8 | } 9 | 10 | /// Print with new line to console 11 | #[macro_export] 12 | macro_rules! println { 13 | () => (print!("\n")); 14 | ($fmt:expr) => (print!(concat!($fmt, "\n"))); 15 | ($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*)); 16 | } 17 | -------------------------------------------------------------------------------- /src/arch/aarch64/misc.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | cpu_set::LogicalCpuId, 3 | paging::{RmmA, RmmArch}, 4 | percpu::PercpuBlock, 5 | }; 6 | 7 | impl PercpuBlock { 8 | pub fn current() -> &'static Self { 9 | unsafe { &*(crate::device::cpu::registers::control_regs::tpidr_el1() as *const Self) } 10 | } 11 | } 12 | 13 | #[cold] 14 | pub unsafe fn init(cpu_id: LogicalCpuId) { 15 | let frame = crate::memory::allocate_frame().expect("failed to allocate percpu memory"); 16 | let virt = RmmA::phys_to_virt(frame.base()).data() as *mut PercpuBlock; 17 | 18 | virt.write(PercpuBlock::init(cpu_id)); 19 | 20 | crate::device::cpu::registers::control_regs::tpidr_el1_write(virt as u64); 21 | } 22 | -------------------------------------------------------------------------------- /src/arch/aarch64/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub mod macros; 3 | 4 | /// Constants like memory locations 5 | pub mod consts; 6 | 7 | /// Debugging support 8 | pub mod debug; 9 | 10 | /// Devices 11 | pub mod device; 12 | 13 | /// Interrupt instructions 14 | pub mod interrupt; 15 | 16 | /// Inter-processor interrupts 17 | pub mod ipi; 18 | 19 | /// Miscellaneous 20 | pub mod misc; 21 | 22 | /// Paging 23 | pub mod paging; 24 | 25 | pub mod rmm; 26 | 27 | /// Initialization and start function 28 | pub mod start; 29 | 30 | /// Stop function 31 | pub mod stop; 32 | 33 | // Interrupt vectors 34 | pub mod vectors; 35 | 36 | pub mod time; 37 | 38 | pub use ::rmm::AArch64Arch as CurrentRmmArch; 39 | 40 | pub use arch_copy_to_user as arch_copy_from_user; 41 | 42 | #[naked] 43 | #[link_section = ".usercopy-fns"] 44 | pub unsafe extern "C" fn arch_copy_to_user(dst: usize, src: usize, len: usize) -> u8 { 45 | // x0, x1, x2 46 | core::arch::naked_asm!( 47 | " 48 | mov x4, x0 49 | mov x0, 0 50 | 2: 51 | cmp x2, 0 52 | b.eq 3f 53 | 54 | ldrb w3, [x1] 55 | strb w3, [x4] 56 | 57 | add x4, x4, 1 58 | add x1, x1, 1 59 | sub x2, x2, 1 60 | 61 | b 2b 62 | 3: 63 | ret 64 | " 65 | ); 66 | } 67 | 68 | pub const KFX_SIZE: usize = 1024; 69 | 70 | // This function exists as the KFX size is dynamic on x86_64. 71 | pub fn kfx_size() -> usize { 72 | KFX_SIZE 73 | } 74 | -------------------------------------------------------------------------------- /src/arch/aarch64/paging/entry.rs: -------------------------------------------------------------------------------- 1 | //! # Page table entry 2 | //! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html) 3 | 4 | bitflags! { 5 | pub struct EntryFlags: usize { 6 | const NO_CACHE = 1 << 2; 7 | const DEV_MEM = 2 << 2; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/arch/aarch64/paging/mapper.rs: -------------------------------------------------------------------------------- 1 | use crate::ipi::{ipi, IpiKind, IpiTarget}; 2 | 3 | use super::RmmA; 4 | 5 | pub use rmm::{Flusher, PageFlush, PageFlushAll}; 6 | 7 | pub struct InactiveFlusher { 8 | _inner: (), 9 | } 10 | impl Flusher for InactiveFlusher { 11 | fn consume(&mut self, flush: PageFlush) { 12 | // TODO: Push to TLB "mailbox" or tell it to reload CR3 if there are too many entries. 13 | unsafe { 14 | flush.ignore(); 15 | } 16 | } 17 | } 18 | impl Drop for InactiveFlusher { 19 | fn drop(&mut self) { 20 | ipi(IpiKind::Tlb, IpiTarget::Other); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/arch/aarch64/paging/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Paging 2 | //! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html) 3 | 4 | use crate::device::cpu::registers::control_regs; 5 | 6 | pub use super::CurrentRmmArch as RmmA; 7 | pub use rmm::{Arch as RmmArch, PageFlags, PhysicalAddress, TableKind, VirtualAddress}; 8 | 9 | pub type PageMapper = rmm::PageMapper; 10 | 11 | pub mod entry; 12 | pub mod mapper; 13 | 14 | /// Size of pages 15 | pub const PAGE_SIZE: usize = RmmA::PAGE_SIZE; 16 | pub const PAGE_MASK: usize = RmmA::PAGE_OFFSET_MASK; 17 | 18 | /// Setup Memory Access Indirection Register 19 | #[cold] 20 | unsafe fn init_mair() { 21 | let mut val: control_regs::MairEl1 = control_regs::mair_el1(); 22 | 23 | val.insert(control_regs::MairEl1::DEVICE_MEMORY); 24 | val.insert(control_regs::MairEl1::NORMAL_UNCACHED_MEMORY); 25 | val.insert(control_regs::MairEl1::NORMAL_WRITEBACK_MEMORY); 26 | 27 | control_regs::mair_el1_write(val); 28 | } 29 | 30 | /// Initialize MAIR 31 | #[cold] 32 | pub unsafe fn init() { 33 | init_mair(); 34 | } 35 | 36 | /// Page 37 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 38 | pub struct Page { 39 | number: usize, 40 | } 41 | 42 | impl Page { 43 | pub fn start_address(self) -> VirtualAddress { 44 | VirtualAddress::new(self.number * PAGE_SIZE) 45 | } 46 | 47 | pub fn containing_address(address: VirtualAddress) -> Page { 48 | //TODO assert!(address.data() < 0x0000_8000_0000_0000 || address.data() >= 0xffff_8000_0000_0000, 49 | // "invalid address: 0x{:x}", address.data()); 50 | Page { 51 | number: address.data() / PAGE_SIZE, 52 | } 53 | } 54 | 55 | pub fn range_inclusive(start: Page, r#final: Page) -> PageIter { 56 | PageIter { 57 | start, 58 | end: r#final.next(), 59 | } 60 | } 61 | pub fn next(self) -> Page { 62 | self.next_by(1) 63 | } 64 | pub fn next_by(self, n: usize) -> Page { 65 | Self { 66 | number: self.number + n, 67 | } 68 | } 69 | pub fn offset_from(self, other: Self) -> usize { 70 | self.number - other.number 71 | } 72 | } 73 | 74 | pub struct PageIter { 75 | start: Page, 76 | end: Page, 77 | } 78 | 79 | impl Iterator for PageIter { 80 | type Item = Page; 81 | 82 | fn next(&mut self) -> Option { 83 | if self.start < self.end { 84 | let page = self.start; 85 | self.start = self.start.next(); 86 | Some(page) 87 | } else { 88 | None 89 | } 90 | } 91 | } 92 | 93 | /// Round down to the nearest multiple of page size 94 | pub fn round_down_pages(number: usize) -> usize { 95 | number - number % PAGE_SIZE 96 | } 97 | /// Round up to the nearest multiple of page size 98 | pub fn round_up_pages(number: usize) -> usize { 99 | round_down_pages(number + PAGE_SIZE - 1) 100 | } 101 | -------------------------------------------------------------------------------- /src/arch/aarch64/rmm.rs: -------------------------------------------------------------------------------- 1 | use rmm::{Arch, PageFlags, VirtualAddress}; 2 | 3 | pub unsafe fn page_flags(virt: VirtualAddress) -> PageFlags { 4 | use crate::kernel_executable_offsets::*; 5 | let virt_addr = virt.data(); 6 | 7 | if virt_addr >= __text_start() && virt_addr < __text_end() { 8 | // Remap text read-only, execute 9 | PageFlags::new().execute(true) 10 | } else if virt_addr >= __rodata_start() && virt_addr < __rodata_end() { 11 | // Remap rodata read-only, no execute 12 | PageFlags::new() 13 | } else { 14 | // Remap everything else read-write, no execute 15 | PageFlags::new().write(true) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/arch/aarch64/stop.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | pub unsafe fn kreset() -> ! { 4 | println!("kreset"); 5 | 6 | asm!("hvc #0", 7 | in("x0") 0x8400_0009_usize, 8 | options(noreturn), 9 | ) 10 | } 11 | 12 | pub unsafe fn emergency_reset() -> ! { 13 | asm!("hvc #0", 14 | in("x0") 0x8400_0009_usize, 15 | options(noreturn), 16 | ) 17 | } 18 | 19 | pub unsafe fn kstop() -> ! { 20 | println!("kstop"); 21 | 22 | asm!("hvc #0", 23 | in("x0") 0x8400_0008_usize, 24 | options(noreturn), 25 | ) 26 | } 27 | -------------------------------------------------------------------------------- /src/arch/aarch64/time.rs: -------------------------------------------------------------------------------- 1 | use crate::time::NANOS_PER_SEC; 2 | 3 | pub fn monotonic_absolute() -> u128 { 4 | //TODO: aarch64 generic timer counter 5 | let ticks: usize; 6 | unsafe { core::arch::asm!("mrs {}, cntpct_el0", out(reg) ticks) }; 7 | let freq: usize; 8 | unsafe { core::arch::asm!("mrs {}, cntfrq_el0", out(reg) freq) }; 9 | 10 | ticks as u128 * NANOS_PER_SEC / freq as u128 11 | } 12 | -------------------------------------------------------------------------------- /src/arch/aarch64/vectors.rs: -------------------------------------------------------------------------------- 1 | core::arch::global_asm!( 2 | " 3 | // Exception vector stubs 4 | // 5 | // Unhandled exceptions spin in a wfi loop for the moment 6 | // This can be macro-ified 7 | 8 | .globl exception_vector_base 9 | 10 | .align 11 11 | exception_vector_base: 12 | 13 | // Synchronous 14 | .align 7 15 | __vec_00: 16 | b synchronous_exception_at_el1_with_sp0 17 | b __vec_00 18 | 19 | // IRQ 20 | .align 7 21 | __vec_01: 22 | b irq_at_el1 23 | b __vec_01 24 | 25 | // FIQ 26 | .align 7 27 | __vec_02: 28 | b unhandled_exception 29 | b __vec_02 30 | 31 | // SError 32 | .align 7 33 | __vec_03: 34 | b unhandled_exception 35 | b __vec_03 36 | 37 | // Synchronous 38 | .align 7 39 | __vec_04: 40 | b synchronous_exception_at_el1_with_spx 41 | b __vec_04 42 | 43 | // IRQ 44 | .align 7 45 | __vec_05: 46 | b irq_at_el1 47 | b __vec_05 48 | 49 | // FIQ 50 | .align 7 51 | __vec_06: 52 | b unhandled_exception 53 | b __vec_06 54 | 55 | // SError 56 | .align 7 57 | __vec_07: 58 | b unhandled_exception 59 | b __vec_07 60 | 61 | // Synchronous 62 | .align 7 63 | __vec_08: 64 | b synchronous_exception_at_el0 65 | b __vec_08 66 | 67 | // IRQ 68 | .align 7 69 | __vec_09: 70 | b irq_at_el0 71 | b __vec_09 72 | 73 | // FIQ 74 | .align 7 75 | __vec_10: 76 | b unhandled_exception 77 | b __vec_10 78 | 79 | // SError 80 | .align 7 81 | __vec_11: 82 | b unhandled_exception 83 | b __vec_11 84 | 85 | // Synchronous 86 | .align 7 87 | __vec_12: 88 | b unhandled_exception 89 | b __vec_12 90 | 91 | // IRQ 92 | .align 7 93 | __vec_13: 94 | b unhandled_exception 95 | b __vec_13 96 | 97 | // FIQ 98 | .align 7 99 | __vec_14: 100 | b unhandled_exception 101 | b __vec_14 102 | 103 | // SError 104 | .align 7 105 | __vec_15: 106 | b unhandled_exception 107 | b __vec_15 108 | 109 | .align 7 110 | exception_vector_end: 111 | " 112 | ); 113 | -------------------------------------------------------------------------------- /src/arch/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_arch = "aarch64")] 2 | #[macro_use] 3 | pub mod aarch64; 4 | #[cfg(target_arch = "aarch64")] 5 | pub use self::aarch64::*; 6 | 7 | #[cfg(target_arch = "x86")] 8 | #[macro_use] 9 | pub mod x86; 10 | #[cfg(target_arch = "x86")] 11 | pub use self::x86::*; 12 | 13 | #[cfg(target_arch = "x86_64")] 14 | #[macro_use] 15 | pub mod x86_64; 16 | #[cfg(target_arch = "x86_64")] 17 | pub use self::x86_64::*; 18 | 19 | #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] 20 | #[macro_use] 21 | mod x86_shared; 22 | 23 | #[cfg(target_arch = "riscv64")] 24 | #[macro_use] 25 | pub mod riscv64; 26 | #[cfg(target_arch = "riscv64")] 27 | pub use self::riscv64::*; 28 | -------------------------------------------------------------------------------- /src/arch/riscv64/consts.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | use super::CurrentRmmArch; 4 | use rmm::Arch; 5 | 6 | const PML4_SHIFT: usize = (CurrentRmmArch::PAGE_LEVELS - 1) * CurrentRmmArch::PAGE_ENTRY_SHIFT 7 | + CurrentRmmArch::PAGE_SHIFT; 8 | /// The size of a single PML4 9 | pub const PML4_SIZE: usize = 1_usize << PML4_SHIFT; 10 | pub const PML4_MASK: usize = CurrentRmmArch::PAGE_ENTRY_MASK << PML4_SHIFT; 11 | 12 | /// Offset of recursive paging (deprecated, but still reserved) 13 | pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize; 14 | pub const RECURSIVE_PAGE_PTE3: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE; 15 | 16 | /// Offset of kernel 17 | pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; 18 | pub const KERNEL_PTE3: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE; 19 | 20 | /// Offset to kernel heap 21 | pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; 22 | pub const KERNEL_HEAP_PTE3: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; 23 | /// Size of kernel heap 24 | pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB 25 | 26 | /// Offset of temporary mapping for misc kernel bring-up actions 27 | pub const KERNEL_TMP_MISC_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; 28 | 29 | /// Offset to kernel percpu variables 30 | pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_TMP_MISC_OFFSET - PML4_SIZE; 31 | pub const KERNEL_PERCPU_PML4: usize = (KERNEL_PERCPU_OFFSET & PML4_MASK) / PML4_SIZE; 32 | /// Size of kernel percpu variables 33 | pub const KERNEL_PERCPU_SHIFT: u8 = 16; // 2^16 = 64 KiB 34 | pub const KERNEL_PERCPU_SIZE: usize = 1_usize << KERNEL_PERCPU_SHIFT; 35 | 36 | /// Offset of physmap 37 | // This needs to match RMM's PHYS_OFFSET 38 | pub const PHYS_OFFSET: usize = (-1_isize << (CurrentRmmArch::PAGE_ADDRESS_SHIFT - 1)) as usize; 39 | pub const PHYS_PML4: usize = (PHYS_OFFSET & PML4_MASK) / PML4_SIZE; 40 | 41 | /// End offset of the user image, i.e. kernel start 42 | pub const USER_END_OFFSET: usize = 1_usize << (CurrentRmmArch::PAGE_ADDRESS_SHIFT - 1); 43 | -------------------------------------------------------------------------------- /src/arch/riscv64/debug.rs: -------------------------------------------------------------------------------- 1 | use crate::log::{Log, LOG}; 2 | use core::fmt; 3 | use spin::MutexGuard; 4 | 5 | #[cfg(feature = "serial_debug")] 6 | use super::device::serial::{SerialPort, COM1}; 7 | 8 | #[cfg(feature = "graphical_debug")] 9 | use crate::devices::graphical_debug::{DebugDisplay, DEBUG_DISPLAY}; 10 | 11 | pub struct Writer<'a> { 12 | log: MutexGuard<'a, Option>, 13 | #[cfg(feature = "serial_debug")] 14 | serial: MutexGuard<'a, Option>, 15 | #[cfg(feature = "graphical_debug")] 16 | display: MutexGuard<'a, Option>, 17 | } 18 | 19 | impl<'a> Writer<'a> { 20 | pub fn new() -> Writer<'a> { 21 | Writer { 22 | log: LOG.lock(), 23 | #[cfg(feature = "graphical_debug")] 24 | display: DEBUG_DISPLAY.lock(), 25 | #[cfg(feature = "serial_debug")] 26 | serial: COM1.lock(), 27 | } 28 | } 29 | 30 | pub fn write(&mut self, buf: &[u8], preserve: bool) { 31 | if preserve { 32 | if let Some(ref mut log) = *self.log { 33 | log.write(buf); 34 | } 35 | } 36 | 37 | #[cfg(feature = "graphical_debug")] 38 | { 39 | if let Some(ref mut display) = *self.display { 40 | let _ = display.write(buf); 41 | } 42 | } 43 | 44 | #[cfg(feature = "serial_debug")] 45 | { 46 | if let Some(ref mut serial) = *self.serial { 47 | serial.write(buf); 48 | } 49 | } 50 | } 51 | } 52 | 53 | impl<'a> fmt::Write for Writer<'a> { 54 | fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { 55 | self.write(s.as_bytes(), true); 56 | Ok(()) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/arch/riscv64/device/cpu/mod.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::{Result, Write}; 2 | 3 | pub fn cpu_info(_w: &mut W) -> Result { 4 | unimplemented!() 5 | } 6 | -------------------------------------------------------------------------------- /src/arch/riscv64/device/irqchip/clint.rs: -------------------------------------------------------------------------------- 1 | use spin::Mutex; 2 | use syscall::{Io, Mmio}; 3 | use crate::context::switch::tick; 4 | 5 | #[repr(packed(4))] 6 | #[repr(C)] 7 | struct ClintRegs { 8 | /// per-hart MSIP registers 9 | /// bit 0: trigger IPI for the hart 10 | msip: [Mmio; 4095], // +0000 -- 3fff 11 | _rsrv1: u32, 12 | /// per-hart MTIMECMP registers 13 | /// timer interrupt trigger threshold 14 | mtimecmp: [Mmio; 4095], // +4000 - bff7 15 | mtime: Mmio // current time 16 | } 17 | 18 | pub struct Clint { 19 | regs: &'static mut ClintRegs, 20 | freq: u64 21 | } 22 | 23 | pub static CLINT: Mutex> = Mutex::new(None); 24 | 25 | impl Clint { 26 | pub fn new(addr: *mut u8, size: usize, freq: usize) -> Self { 27 | assert!(size >= core::mem::size_of::()); 28 | Self { 29 | regs: unsafe { (addr as *mut ClintRegs).as_mut().unwrap() }, 30 | freq: freq as u64 31 | } 32 | } 33 | 34 | pub fn init(self: &mut Self) { 35 | (*self.regs).mtimecmp[0].write((*self.regs).mtime.read() + self.freq / 100); 36 | } 37 | 38 | pub fn timer_irq(self: &mut Self, hart: usize) { 39 | (*self.regs).mtimecmp[hart].write((*self.regs).mtimecmp[hart].read() + self.freq / 100); 40 | tick(); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/arch/riscv64/device/irqchip/mod.rs: -------------------------------------------------------------------------------- 1 | use self::clint::Clint; 2 | use crate::dtb::irqchip::InterruptController; 3 | use alloc::boxed::Box; 4 | use fdt::Fdt; 5 | 6 | pub(crate) mod hlic; 7 | mod plic; 8 | 9 | #[path = "clint_sbi.rs"] 10 | mod clint; 11 | 12 | // pub mod clint; // actual clint.rs off limits if SBI is present 13 | 14 | pub fn new_irqchip(ic_str: &str) -> Option> { 15 | if ic_str.contains("riscv,cpu-intc") { 16 | Some(Box::new(hlic::Hlic::new())) 17 | } else if ic_str.contains("riscv,plic0") || ic_str.contains("sifive,plic-1.0.0") { 18 | Some(Box::new(plic::Plic::new())) 19 | } else { 20 | log::warn!("no driver for interrupt controller {:?}", ic_str); 21 | None 22 | } 23 | } 24 | 25 | pub unsafe fn init_clint(fdt: &Fdt) { 26 | let cpus = fdt.find_node("/cpus").unwrap(); 27 | let clock_freq = cpus 28 | .property("timebase-frequency") 29 | .unwrap() 30 | .as_usize() 31 | .unwrap(); 32 | 33 | let clint_node = fdt.find_node("/soc/clint").unwrap(); 34 | assert!(clint_node 35 | .compatible() 36 | .unwrap() 37 | .all() 38 | .find(|x| ((*x).eq("riscv,clint0"))) 39 | .is_some()); 40 | 41 | let clint = Clint::new(clock_freq, &clint_node); 42 | *clint::CLINT.lock() = Some(clint); 43 | clint::CLINT.lock().as_mut().unwrap().init(0); 44 | } 45 | -------------------------------------------------------------------------------- /src/arch/riscv64/device/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | arch::{device::irqchip::hlic, time}, 3 | dtb::DTB_BINARY, 4 | }; 5 | use fdt::{ 6 | node::{FdtNode, NodeProperty}, 7 | Fdt, 8 | }; 9 | 10 | pub mod cpu; 11 | pub(crate) mod irqchip; 12 | pub mod serial; 13 | 14 | use crate::arch::device::irqchip::init_clint; 15 | 16 | fn string_property(name: &str) -> bool { 17 | name == "compatible" 18 | || name == "model" 19 | || name == "device_type" 20 | || name == "status" 21 | || name == "riscv,isa-base" 22 | || name == "riscv,isa" 23 | || name == "mmu-type" 24 | || name == "stdout-path" 25 | } 26 | 27 | fn print_property(prop: &NodeProperty, n_spaces: usize) { 28 | (0..n_spaces).for_each(|_| print!(" ")); 29 | print!("{} =", prop.name); 30 | if string_property(prop.name) 31 | && let Some(str) = prop.as_str() 32 | { 33 | println!(" \"{}\"", str); 34 | } else if let Some(value) = prop.as_usize() { 35 | println!(" 0x{:08x}", value); 36 | } else { 37 | for v in prop.value { 38 | print!(" {:02x}", v); 39 | } 40 | println!(); 41 | } 42 | } 43 | fn print_node(node: &FdtNode<'_, '_>, n_spaces: usize) { 44 | (0..n_spaces).for_each(|_| print!(" ")); 45 | println!("{}/", node.name); 46 | for prop in node.properties() { 47 | print_property(&prop, n_spaces + 4); 48 | } 49 | 50 | for child in node.children() { 51 | print_node(&child, n_spaces + 4); 52 | } 53 | } 54 | 55 | pub(crate) fn dump_fdt(fdt: &Fdt) { 56 | if let Some(root) = fdt.find_node("/") { 57 | print_node(&root, 0); 58 | } 59 | } 60 | 61 | unsafe fn init_intc(cpu: &FdtNode) { 62 | let intc_node = cpu 63 | .children() 64 | .find(|x| x.name == "interrupt-controller") 65 | .unwrap(); 66 | assert_eq!(intc_node.compatible().unwrap().first(), "riscv,cpu-intc"); 67 | // This controller is hardwired into interrupt handler code and has no Mmios 68 | hlic::init(); // enable interrupts at HLIC level 69 | } 70 | 71 | pub unsafe fn init() { 72 | let data = DTB_BINARY.get().unwrap(); 73 | let fdt = Fdt::new(data).unwrap(); 74 | 75 | crate::dtb::irqchip::init(&fdt); 76 | 77 | let cpu = fdt.find_node(format!("/cpus/cpu@{}", 0).as_str()).unwrap(); 78 | init_intc(&cpu); 79 | init_time(&fdt); 80 | } 81 | 82 | fn init_time(fdt: &Fdt) { 83 | let cpus = fdt.find_node("/cpus").unwrap(); 84 | let clock_freq = cpus 85 | .property("timebase-frequency") 86 | .unwrap() 87 | .as_usize() 88 | .unwrap(); 89 | time::init(clock_freq); 90 | } 91 | 92 | pub unsafe fn init_noncore() { 93 | let data = DTB_BINARY.get().unwrap(); 94 | let fdt = Fdt::new(data).unwrap(); 95 | 96 | init_clint(&fdt); 97 | serial::init(&fdt); 98 | } 99 | 100 | #[derive(Default)] 101 | pub struct ArchPercpuMisc; 102 | -------------------------------------------------------------------------------- /src/arch/riscv64/device/serial.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use fdt::Fdt; 3 | use log::info; 4 | use spin::Mutex; 5 | use syscall::Mmio; 6 | 7 | use crate::{ 8 | devices::uart_16550, 9 | dtb::{ 10 | diag_uart_range, get_interrupt, interrupt_parent, 11 | irqchip::{register_irq, InterruptHandler, IRQ_CHIP}, 12 | }, 13 | scheme::{ 14 | debug::{debug_input, debug_notify}, 15 | irq::irq_trigger, 16 | }, 17 | }; 18 | 19 | pub struct SerialPort { 20 | inner: &'static mut uart_16550::SerialPort>, 21 | } 22 | impl SerialPort { 23 | pub fn write(&mut self, buf: &[u8]) { 24 | self.inner.write(buf) 25 | } 26 | pub fn receive(&mut self) { 27 | while let Some(c) = self.inner.receive() { 28 | debug_input(c); 29 | } 30 | debug_notify(); 31 | } 32 | } 33 | 34 | pub static COM1: Mutex> = Mutex::new(None); 35 | 36 | pub struct Com1Irq {} 37 | 38 | impl InterruptHandler for Com1Irq { 39 | fn irq_handler(&mut self, irq: u32) { 40 | if let Some(ref mut serial_port) = *COM1.lock() { 41 | serial_port.receive(); 42 | }; 43 | unsafe { 44 | irq_trigger(irq as u8); 45 | IRQ_CHIP.irq_eoi(irq); 46 | } 47 | } 48 | } 49 | 50 | pub unsafe fn init_early(dtb: &Fdt) { 51 | if COM1.lock().is_some() { 52 | // Hardcoded UART 53 | return; 54 | } 55 | 56 | if let Some((phys, _, _, _, compatible)) = diag_uart_range(dtb) { 57 | let virt = crate::PHYS_OFFSET + phys; 58 | let port = if compatible == "ns16550a" { 59 | let serial_port = uart_16550::SerialPort::>::new(virt); 60 | serial_port.init(); 61 | Some(SerialPort { inner: serial_port }) 62 | } else { 63 | None 64 | }; 65 | match port { 66 | Some(port) => { 67 | *COM1.lock() = Some(port); 68 | } 69 | None => {} 70 | } 71 | } 72 | } 73 | 74 | pub unsafe fn init(fdt: &Fdt) -> Option<()> { 75 | if let Some(node) = fdt.find_compatible(&["ns16550a"]) { 76 | let intr = get_interrupt(fdt, &node, 0).unwrap(); 77 | let interrupt_parent = interrupt_parent(fdt, &node)?; 78 | let phandle = interrupt_parent.property("phandle")?.as_usize()? as u32; 79 | let ic_idx = IRQ_CHIP.phandle_to_ic_idx(phandle)?; 80 | 81 | let virq = IRQ_CHIP.irq_chip_list.chips[ic_idx] 82 | .ic 83 | .irq_xlate(intr) 84 | .unwrap(); 85 | info!("serial_port virq = {}", virq); 86 | register_irq(virq as u32, Box::new(Com1Irq {})); 87 | IRQ_CHIP.irq_enable(virq as u32); 88 | } 89 | if let Some(ref mut _serial_port) = *COM1.lock() { 90 | // serial_port.enable_irq(); // FIXME receive int is enabled by default in 16550. Disable by default? 91 | } 92 | Some(()) 93 | } 94 | -------------------------------------------------------------------------------- /src/arch/riscv64/interrupt/mod.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | #[macro_use] 4 | mod handler; 5 | 6 | mod exception; 7 | pub mod syscall; 8 | pub mod trace; 9 | 10 | pub use handler::InterruptStack; 11 | 12 | /// Clear interrupts 13 | #[inline(always)] 14 | pub unsafe fn disable() { 15 | asm!("csrci sstatus, 1 << 1") 16 | } 17 | 18 | /// Set interrupts 19 | #[inline(always)] 20 | pub unsafe fn enable() { 21 | asm!("csrsi sstatus, 1 << 1") 22 | } 23 | 24 | /// Set interrupts and halt 25 | /// This will atomically wait for the next interrupt 26 | /// Performing enable followed by halt is not guaranteed to be atomic, use this instead! 27 | #[inline(always)] 28 | pub unsafe fn enable_and_halt() { 29 | asm!("csrsi sstatus, 1 << 1", "wfi") 30 | } 31 | 32 | /// Set interrupts and nop 33 | /// This will enable interrupts and allow the IF flag to be processed 34 | /// Simply enabling interrupts does not gurantee that they will trigger, use this instead! 35 | #[inline(always)] 36 | pub unsafe fn enable_and_nop() { 37 | asm!("csrsi sstatus, 1 << 1", "nop") 38 | } 39 | 40 | /// Halt instruction 41 | #[inline(always)] 42 | pub unsafe fn halt() { 43 | asm!("wfi", options(nomem, nostack)) 44 | } 45 | 46 | /// Pause instruction 47 | /// Safe because it is similar to a NOP, and has no memory effects 48 | #[inline(always)] 49 | pub fn pause() { 50 | unsafe { 51 | // It's a hint instruction, safe to execute without Zihintpause extension 52 | asm!("pause", options(nomem, nostack)); 53 | } 54 | } 55 | 56 | pub unsafe fn init() { 57 | // Setup interrupt handlers 58 | asm!( 59 | "la t0, exception_handler", // WARL=0 - direct mode combined handler 60 | "csrw stvec, t0" 61 | ); 62 | } 63 | -------------------------------------------------------------------------------- /src/arch/riscv64/interrupt/syscall.rs: -------------------------------------------------------------------------------- 1 | pub use super::handler::enter_usermode; 2 | -------------------------------------------------------------------------------- /src/arch/riscv64/interrupt/trace.rs: -------------------------------------------------------------------------------- 1 | use core::{arch::asm, mem}; 2 | 3 | pub struct StackTrace { 4 | pub fp: usize, 5 | pub pc_ptr: *const usize, 6 | } 7 | 8 | impl StackTrace { 9 | #[inline(always)] 10 | pub unsafe fn start() -> Option { 11 | let fp: usize; 12 | asm!("mv {}, fp", out(reg) fp); 13 | 14 | let pc_ptr = fp.checked_sub(mem::size_of::())?; 15 | let fp = pc_ptr.checked_sub(mem::size_of::())?; 16 | Some(StackTrace { 17 | fp, 18 | pc_ptr: pc_ptr as *const usize, 19 | }) 20 | } 21 | 22 | pub unsafe fn next(self) -> Option { 23 | let fp = *(self.fp as *const usize); 24 | let pc_ptr = fp.checked_sub(mem::size_of::())?; 25 | let fp = pc_ptr.checked_sub(mem::size_of::())?; 26 | Some(StackTrace { 27 | fp: fp, 28 | pc_ptr: pc_ptr as *const usize, 29 | }) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/arch/riscv64/ipi.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, Copy, Debug)] 2 | #[repr(u8)] 3 | pub enum IpiKind { 4 | Wakeup = 0x40, 5 | Tlb = 0x41, 6 | Switch = 0x42, 7 | Pit = 0x43, 8 | } 9 | 10 | #[derive(Clone, Copy, Debug)] 11 | #[repr(u8)] 12 | pub enum IpiTarget { 13 | Current = 1, 14 | All = 2, 15 | Other = 3, 16 | } 17 | 18 | #[cfg(not(feature = "multi_core"))] 19 | #[inline(always)] 20 | pub fn ipi(_kind: IpiKind, _target: IpiTarget) {} 21 | 22 | #[cfg(feature = "multi_core")] 23 | #[inline(always)] 24 | pub fn ipi(_kind: IpiKind, _target: IpiTarget) {} 25 | 26 | #[cfg(not(feature = "multi_core"))] 27 | #[inline(always)] 28 | pub fn ipi_single(_kind: IpiKind, _target: crate::cpu_set::LogicalCpuId) {} 29 | 30 | #[cfg(feature = "multi_core")] 31 | #[inline(always)] 32 | pub fn ipi_single(_kind: IpiKind, _target: crate::cpu_set::LogicalCpuId) {} 33 | -------------------------------------------------------------------------------- /src/arch/riscv64/macros.rs: -------------------------------------------------------------------------------- 1 | /// Print to console 2 | #[macro_export] 3 | macro_rules! print { 4 | ($($arg:tt)*) => ({ 5 | use core::fmt::Write; 6 | let _ = write!($crate::arch::debug::Writer::new(), $($arg)*); 7 | }); 8 | } 9 | 10 | /// Print with new line to console 11 | #[macro_export] 12 | macro_rules! println { 13 | () => (print!("\n")); 14 | ($fmt:expr) => (print!(concat!($fmt, "\n"))); 15 | ($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*)); 16 | } 17 | -------------------------------------------------------------------------------- /src/arch/riscv64/misc.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | use crate::{ 4 | cpu_set::LogicalCpuId, 5 | paging::{RmmA, RmmArch}, 6 | percpu::PercpuBlock, 7 | }; 8 | 9 | #[repr(C)] 10 | pub struct ArchPercpu { 11 | // These fields must be kept first and in this order. Assembly in exception.rs depends on it 12 | pub tmp: usize, 13 | pub s_sp: usize, 14 | 15 | pub percpu: PercpuBlock, 16 | } 17 | 18 | impl PercpuBlock { 19 | pub fn current() -> &'static Self { 20 | unsafe { 21 | let tp: *const ArchPercpu; 22 | asm!( "mv t0, tp", out("t0") tp ); 23 | let arch_percpu = &*tp; 24 | &arch_percpu.percpu 25 | } 26 | } 27 | } 28 | 29 | #[cold] 30 | pub unsafe fn init(cpu_id: LogicalCpuId) { 31 | let frame = crate::memory::allocate_frame().expect("failed to allocate percpu memory"); 32 | let virt = RmmA::phys_to_virt(frame.base()).data() as *mut ArchPercpu; 33 | 34 | virt.write(ArchPercpu { 35 | tmp: 0, 36 | s_sp: 0, 37 | percpu: PercpuBlock::init(cpu_id), 38 | }); 39 | 40 | asm!( 41 | "mv tp, {}", 42 | "csrw sscratch, tp", 43 | in(reg) virt as usize 44 | ); 45 | } 46 | -------------------------------------------------------------------------------- /src/arch/riscv64/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub mod macros; 3 | 4 | pub mod consts; 5 | pub mod debug; 6 | pub mod device; 7 | pub mod interrupt; 8 | pub mod ipi; 9 | pub mod misc; 10 | pub mod paging; 11 | pub mod rmm; 12 | pub mod start; 13 | pub mod stop; 14 | pub mod time; 15 | 16 | pub use ::rmm::RiscV64Sv48Arch as CurrentRmmArch; 17 | use core::arch::naked_asm; 18 | 19 | pub use arch_copy_to_user as arch_copy_from_user; 20 | 21 | #[link_section = ".usercopy-fns"] 22 | #[naked] 23 | pub unsafe extern "C" fn arch_copy_to_user(dst: usize, src: usize, len: usize) -> u8 { 24 | naked_asm!( 25 | " 26 | addi sp, sp, -16 27 | sd fp, 0(sp) 28 | sd ra, 8(sp) 29 | addi fp, sp, 16 30 | li t1, 1 << 18 // SUM 31 | csrs sstatus, t1 32 | jal 2f 33 | csrc sstatus, t1 34 | ld ra, -8(fp) 35 | ld fp, -16(fp) 36 | addi sp, sp, 16 37 | ret 38 | 39 | 2: or t0, a0, a1 40 | andi t0, t0, 7 41 | bne t0, x0, 4f 42 | srli t2, a2, 3 43 | andi a2, a2, 7 44 | beq t2, x0, 4f 45 | 3: ld t0, 0(a1) 46 | sd t0, 0(a0) 47 | addi a0, a0, 8 48 | addi a1, a1, 8 49 | addi t2, t2, -1 50 | bne t2, x0, 3b 51 | 52 | 4: beq a2, x0, 5f 53 | lb t0, 0(a1) 54 | sb t0, 0(a0) 55 | addi a0, a0, 1 56 | addi a1, a1, 1 57 | addi a2, a2, -1 58 | bne a2, x0, 4b 59 | 5: mv a0, x0 60 | ret 61 | " 62 | ) 63 | } 64 | 65 | pub const KFX_SIZE: usize = 1024; 66 | 67 | // This function exists as the KFX size is dynamic on x86_64. 68 | pub fn kfx_size() -> usize { 69 | KFX_SIZE 70 | } 71 | -------------------------------------------------------------------------------- /src/arch/riscv64/paging/entry.rs: -------------------------------------------------------------------------------- 1 | /// A page table entry 2 | #[repr(packed(8))] 3 | pub struct Entry(u64); 4 | 5 | bitflags! { 6 | pub struct EntryFlags: usize { 7 | const NO_CACHE = 1 << 4; 8 | const DEV_MEM = 0; 9 | const WRITE_COMBINING = 0; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/arch/riscv64/paging/mapper.rs: -------------------------------------------------------------------------------- 1 | use crate::ipi::{ipi, IpiKind, IpiTarget}; 2 | 3 | use super::RmmA; 4 | 5 | pub use rmm::{Flusher, PageFlush, PageFlushAll}; 6 | 7 | pub struct InactiveFlusher { 8 | _inner: (), 9 | } 10 | impl InactiveFlusher { 11 | // TODO: cpu id 12 | pub fn new() -> Self { 13 | Self { _inner: () } 14 | } 15 | } 16 | 17 | impl Flusher for InactiveFlusher { 18 | fn consume(&mut self, flush: PageFlush) { 19 | // TODO: Push to TLB "mailbox" or tell it to reload CR3 if there are too many entries. 20 | unsafe { 21 | flush.ignore(); 22 | } 23 | } 24 | } 25 | impl Drop for InactiveFlusher { 26 | fn drop(&mut self) { 27 | ipi(IpiKind::Tlb, IpiTarget::Other); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/arch/riscv64/paging/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | pub use super::CurrentRmmArch as RmmA; 4 | pub use rmm::{Arch as RmmArch, PageFlags, PhysicalAddress, TableKind, VirtualAddress}; 5 | 6 | pub type PageMapper = rmm::PageMapper; 7 | pub use crate::rmm::KernelMapper; 8 | 9 | pub mod entry; 10 | pub mod mapper; 11 | 12 | /// Number of entries per page table 13 | pub const ENTRY_COUNT: usize = RmmA::PAGE_ENTRIES; 14 | 15 | /// Size of pages 16 | pub const PAGE_SIZE: usize = RmmA::PAGE_SIZE; 17 | pub const PAGE_MASK: usize = RmmA::PAGE_OFFSET_MASK; 18 | 19 | #[cold] 20 | pub unsafe fn init() { 21 | // Assuming SBI already set up PMAs correctly for us 22 | // TODO: detect Svpbmt present/enabled and override device memory with PBMT=IO 23 | } 24 | 25 | /// Page 26 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 27 | pub struct Page { 28 | number: usize, 29 | } 30 | 31 | impl Page { 32 | pub fn start_address(self) -> VirtualAddress { 33 | VirtualAddress::new(self.number * PAGE_SIZE) 34 | } 35 | 36 | pub fn containing_address(address: VirtualAddress) -> Page { 37 | Page { 38 | number: address.data() / PAGE_SIZE, 39 | } 40 | } 41 | 42 | pub fn range_inclusive(start: Page, r#final: Page) -> PageIter { 43 | PageIter { 44 | start, 45 | end: r#final.next(), 46 | } 47 | } 48 | 49 | pub fn next(self) -> Page { 50 | self.next_by(1) 51 | } 52 | pub fn next_by(self, n: usize) -> Page { 53 | Self { 54 | number: self.number + n, 55 | } 56 | } 57 | 58 | pub fn offset_from(self, other: Self) -> usize { 59 | self.number - other.number 60 | } 61 | } 62 | 63 | pub struct PageIter { 64 | start: Page, 65 | end: Page, 66 | } 67 | 68 | impl Iterator for PageIter { 69 | type Item = Page; 70 | 71 | fn next(&mut self) -> Option { 72 | if self.start < self.end { 73 | let page = self.start; 74 | self.start = self.start.next(); 75 | Some(page) 76 | } else { 77 | None 78 | } 79 | } 80 | } 81 | 82 | /// Round down to the nearest multiple of page size 83 | pub fn round_down_pages(number: usize) -> usize { 84 | number - number % PAGE_SIZE 85 | } 86 | /// Round up to the nearest multiple of page size 87 | pub fn round_up_pages(number: usize) -> usize { 88 | round_down_pages(number + PAGE_SIZE - 1) 89 | } 90 | -------------------------------------------------------------------------------- /src/arch/riscv64/rmm.rs: -------------------------------------------------------------------------------- 1 | use rmm::{Arch, PageFlags, VirtualAddress}; 2 | 3 | pub struct KernelMapper { 4 | mapper: crate::paging::PageMapper, 5 | ro: bool, 6 | } 7 | impl KernelMapper { 8 | pub fn lock() -> Self { 9 | unimplemented!() 10 | } 11 | pub fn get_mut(&mut self) -> Option<&mut crate::paging::PageMapper> { 12 | unimplemented!() 13 | } 14 | } 15 | 16 | pub unsafe fn page_flags(virt: VirtualAddress) -> PageFlags { 17 | use crate::kernel_executable_offsets::*; 18 | let virt_addr = virt.data(); 19 | 20 | if virt_addr >= __text_start() && virt_addr < __text_end() { 21 | // Remap text read-only, execute 22 | PageFlags::new().execute(true) 23 | } else if virt_addr >= __rodata_start() && virt_addr < __rodata_end() { 24 | // Remap rodata read-only, no execute 25 | PageFlags::new() 26 | } else { 27 | // Remap everything else read-write, no execute 28 | PageFlags::new().write(true) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/arch/riscv64/stop.rs: -------------------------------------------------------------------------------- 1 | pub unsafe fn kreset() -> ! { 2 | println!("kreset"); 3 | unimplemented!() 4 | } 5 | 6 | pub unsafe fn emergency_reset() -> ! { 7 | unimplemented!() 8 | } 9 | 10 | pub unsafe fn kstop() -> ! { 11 | println!("kstop"); 12 | unimplemented!() 13 | } 14 | -------------------------------------------------------------------------------- /src/arch/riscv64/time.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | arch::asm, 3 | sync::atomic::{AtomicUsize, Ordering}, 4 | }; 5 | 6 | static MTIME_FREQ_HZ: AtomicUsize = AtomicUsize::new(0); 7 | 8 | pub fn init(freq_hz: usize) { 9 | MTIME_FREQ_HZ.store(freq_hz, Ordering::Relaxed); 10 | } 11 | 12 | pub fn monotonic_absolute() -> u128 { 13 | let freq_hz = MTIME_FREQ_HZ.load(Ordering::Relaxed); 14 | if freq_hz > 0 { 15 | let counter: usize; 16 | unsafe { 17 | asm!( 18 | "rdtime t0", 19 | lateout("t0") counter 20 | ); 21 | }; 22 | counter as u128 * 1_000_000_000u128 / freq_hz as u128 23 | } else { 24 | 0 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/arch/x86/consts.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | // Because the memory map is so important to not be aliased, it is defined here, in one place 3 | // The lower 256 PML4 entries are reserved for userspace 4 | // Each PML4 entry references up to 512 GB of memory 5 | // The second from the top (510) PML4 is reserved for the kernel 6 | 7 | /// Offset of kernel (256 MiB max) 8 | pub const KERNEL_OFFSET: usize = 0xC000_0000; 9 | 10 | // Framebuffer mapped by bootloader to 0xD000_0000 (128 MiB max) 11 | 12 | // Offset to APIC mappings (optional) 13 | pub const LAPIC_OFFSET: usize = 0xD800_0000; 14 | pub const IOAPIC_OFFSET: usize = LAPIC_OFFSET + 4096; 15 | pub const HPET_OFFSET: usize = IOAPIC_OFFSET + 4096; 16 | 17 | /// Offset to kernel heap (256 MiB max) 18 | pub const KERNEL_HEAP_OFFSET: usize = 0xE000_0000; 19 | /// Size of kernel heap 20 | pub const KERNEL_HEAP_SIZE: usize = rmm::MEGABYTE; 21 | 22 | /// Offset to kernel percpu variables (256 MiB max) 23 | pub const KERNEL_PERCPU_OFFSET: usize = 0xF000_0000; 24 | /// Size of kernel percpu variables 25 | pub const KERNEL_PERCPU_SHIFT: u8 = 16; // 2^16 = 64 KiB 26 | pub const KERNEL_PERCPU_SIZE: usize = 1_usize << KERNEL_PERCPU_SHIFT; 27 | 28 | /// Offset of physmap (1 GiB max) 29 | // This needs to match RMM's PHYS_OFFSET 30 | pub const PHYS_OFFSET: usize = 0x8000_0000; 31 | 32 | /// End offset of the user image, i.e. kernel start 33 | pub const USER_END_OFFSET: usize = 0x8000_0000; 34 | -------------------------------------------------------------------------------- /src/arch/x86/interrupt/mod.rs: -------------------------------------------------------------------------------- 1 | //! Interrupt instructions 2 | 3 | pub use crate::arch::x86_shared::interrupt::*; 4 | 5 | #[macro_use] 6 | pub mod handler; 7 | 8 | pub mod exception; 9 | pub mod irq; 10 | pub mod syscall; 11 | 12 | pub use self::handler::InterruptStack; 13 | -------------------------------------------------------------------------------- /src/arch/x86/interrupt/syscall.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | ptrace, syscall, 3 | syscall::flag::{PTRACE_FLAG_IGNORE, PTRACE_STOP_POST_SYSCALL, PTRACE_STOP_PRE_SYSCALL}, 4 | }; 5 | 6 | pub unsafe fn init() {} 7 | 8 | macro_rules! with_interrupt_stack { 9 | (|$stack:ident| $code:block) => {{ 10 | let allowed = ptrace::breakpoint_callback(PTRACE_STOP_PRE_SYSCALL, None) 11 | .and_then(|_| ptrace::next_breakpoint().map(|f| !f.contains(PTRACE_FLAG_IGNORE))); 12 | 13 | if allowed.unwrap_or(true) { 14 | // If the syscall is `clone`, the clone won't return here. Instead, 15 | // it'll return early and leave any undropped values. This is 16 | // actually GOOD, because any references are at that point UB 17 | // anyway, because they are based on the wrong stack. 18 | let $stack = &mut *$stack; 19 | $code 20 | } 21 | 22 | ptrace::breakpoint_callback(PTRACE_STOP_POST_SYSCALL, None); 23 | }}; 24 | } 25 | 26 | interrupt_stack!(syscall, |stack| { 27 | with_interrupt_stack!(|stack| { 28 | let scratch = &stack.scratch; 29 | let preserved = &stack.preserved; 30 | let ret = syscall::syscall( 31 | scratch.eax, 32 | preserved.ebx, 33 | scratch.ecx, 34 | scratch.edx, 35 | preserved.esi, 36 | preserved.edi, 37 | ); 38 | stack.scratch.eax = ret; 39 | }) 40 | }); 41 | 42 | pub use super::handler::enter_usermode; 43 | -------------------------------------------------------------------------------- /src/arch/x86/mod.rs: -------------------------------------------------------------------------------- 1 | pub use crate::arch::x86_shared::*; 2 | 3 | #[macro_use] 4 | pub mod macros; 5 | 6 | /// Constants like memory locations 7 | pub mod consts; 8 | 9 | /// Global descriptor table 10 | pub mod gdt; 11 | 12 | /// Interrupt instructions 13 | #[macro_use] 14 | pub mod interrupt; 15 | 16 | /// Paging 17 | pub mod paging; 18 | 19 | pub mod rmm; 20 | 21 | /// Initialization and start function 22 | pub mod start; 23 | 24 | pub use ::rmm::X86Arch as CurrentRmmArch; 25 | 26 | // Flags 27 | pub mod flags { 28 | pub const SHIFT_SINGLESTEP: usize = 8; 29 | pub const FLAG_SINGLESTEP: usize = 1 << SHIFT_SINGLESTEP; 30 | } 31 | 32 | #[naked] 33 | #[link_section = ".usercopy-fns"] 34 | pub unsafe extern "C" fn arch_copy_to_user(dst: usize, src: usize, len: usize) -> u8 { 35 | core::arch::naked_asm!( 36 | " 37 | push edi 38 | push esi 39 | 40 | mov edi, [esp + 12] # dst 41 | mov esi, [esp + 16] # src 42 | mov ecx, [esp + 20] # len 43 | rep movsb 44 | 45 | pop esi 46 | pop edi 47 | 48 | xor eax, eax 49 | ret 50 | " 51 | ); 52 | } 53 | pub use arch_copy_to_user as arch_copy_from_user; 54 | 55 | pub const KFX_SIZE: usize = 512; 56 | 57 | // This function exists as the KFX size is dynamic on x86_64. 58 | pub fn kfx_size() -> usize { 59 | KFX_SIZE 60 | } 61 | -------------------------------------------------------------------------------- /src/arch/x86/paging/mapper.rs: -------------------------------------------------------------------------------- 1 | use crate::ipi::{ipi, IpiKind, IpiTarget}; 2 | 3 | use super::RmmA; 4 | 5 | pub use rmm::{Flusher, PageFlush, PageFlushAll}; 6 | 7 | pub struct InactiveFlusher { 8 | _inner: (), 9 | } 10 | impl Flusher for InactiveFlusher { 11 | fn consume(&mut self, flush: PageFlush) { 12 | // TODO: Push to TLB "mailbox" or tell it to reload CR3 if there are too many entries. 13 | unsafe { 14 | flush.ignore(); 15 | } 16 | } 17 | } 18 | impl Drop for InactiveFlusher { 19 | fn drop(&mut self) { 20 | ipi(IpiKind::Tlb, IpiTarget::Other); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/arch/x86/paging/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Paging 2 | //! Some code was borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html) 3 | 4 | use x86::msr; 5 | 6 | pub use super::CurrentRmmArch as RmmA; 7 | pub use rmm::{Arch as RmmArch, PageFlags, PhysicalAddress, TableKind, VirtualAddress}; 8 | 9 | pub type PageMapper = rmm::PageMapper; 10 | 11 | pub mod entry { 12 | bitflags! { 13 | pub struct EntryFlags: usize { 14 | const NO_CACHE = 1 << 4; 15 | const HUGE_PAGE = 1 << 7; 16 | const GLOBAL = 1 << 8; 17 | const DEV_MEM = 0; 18 | } 19 | } 20 | } 21 | 22 | pub mod mapper; 23 | 24 | /// Size of pages 25 | pub const PAGE_SIZE: usize = RmmA::PAGE_SIZE; 26 | pub const PAGE_MASK: usize = RmmA::PAGE_OFFSET_MASK; 27 | 28 | /// Setup page attribute table 29 | #[cold] 30 | unsafe fn init_pat() { 31 | let uncacheable = 0; 32 | let write_combining = 1; 33 | let write_through = 4; 34 | //let write_protected = 5; 35 | let write_back = 6; 36 | let uncached = 7; 37 | 38 | let pat0 = write_back; 39 | let pat1 = write_through; 40 | let pat2 = uncached; 41 | let pat3 = uncacheable; 42 | 43 | let pat4 = write_combining; 44 | let pat5 = pat1; 45 | let pat6 = pat2; 46 | let pat7 = pat3; 47 | 48 | msr::wrmsr( 49 | msr::IA32_PAT, 50 | pat7 << 56 51 | | pat6 << 48 52 | | pat5 << 40 53 | | pat4 << 32 54 | | pat3 << 24 55 | | pat2 << 16 56 | | pat1 << 8 57 | | pat0, 58 | ); 59 | } 60 | 61 | #[cold] 62 | pub unsafe fn init() { 63 | init_pat(); 64 | } 65 | 66 | /// Page 67 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 68 | pub struct Page { 69 | number: usize, 70 | } 71 | 72 | impl Page { 73 | pub fn start_address(self) -> VirtualAddress { 74 | VirtualAddress::new(self.number * PAGE_SIZE) 75 | } 76 | 77 | pub fn containing_address(address: VirtualAddress) -> Page { 78 | //TODO assert!(address.data() < 0x0000_8000_0000_0000 || address.data() >= 0xffff_8000_0000_0000, 79 | // "invalid address: 0x{:x}", address.data()); 80 | Page { 81 | number: address.data() / PAGE_SIZE, 82 | } 83 | } 84 | 85 | pub fn range_inclusive(start: Page, r#final: Page) -> PageIter { 86 | PageIter { 87 | start, 88 | end: r#final.next(), 89 | } 90 | } 91 | pub fn next(self) -> Page { 92 | self.next_by(1) 93 | } 94 | pub fn next_by(self, n: usize) -> Page { 95 | Self { 96 | number: self.number + n, 97 | } 98 | } 99 | pub fn offset_from(self, other: Self) -> usize { 100 | self.number - other.number 101 | } 102 | } 103 | 104 | pub struct PageIter { 105 | start: Page, 106 | end: Page, 107 | } 108 | 109 | impl Iterator for PageIter { 110 | type Item = Page; 111 | 112 | fn next(&mut self) -> Option { 113 | if self.start < self.end { 114 | let page = self.start; 115 | self.start = self.start.next(); 116 | Some(page) 117 | } else { 118 | None 119 | } 120 | } 121 | } 122 | 123 | /// Round down to the nearest multiple of page size 124 | pub fn round_down_pages(number: usize) -> usize { 125 | number - number % PAGE_SIZE 126 | } 127 | /// Round up to the nearest multiple of page size 128 | pub fn round_up_pages(number: usize) -> usize { 129 | round_down_pages(number + PAGE_SIZE - 1) 130 | } 131 | -------------------------------------------------------------------------------- /src/arch/x86/rmm.rs: -------------------------------------------------------------------------------- 1 | use rmm::{Arch, PageFlags, VirtualAddress}; 2 | 3 | pub unsafe fn page_flags(virt: VirtualAddress) -> PageFlags { 4 | use crate::kernel_executable_offsets::*; 5 | let virt_addr = virt.data(); 6 | 7 | if virt_addr >= __text_start() && virt_addr < __text_end() { 8 | // Remap text read-only, execute 9 | PageFlags::new().execute(true) 10 | } else if virt_addr >= __rodata_start() && virt_addr < __rodata_end() { 11 | // Remap rodata read-only, no execute 12 | PageFlags::new() 13 | } else { 14 | // Remap everything else read-write, no execute 15 | PageFlags::new().write(true) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/arch/x86_64/consts.rs: -------------------------------------------------------------------------------- 1 | // Because the memory map is so important to not be aliased, it is defined here, in one place. 2 | // 3 | // - The lower half (256 PML4 entries; 128 TiB) is reserved for userspace. These mappings are 4 | // associated with _address spaces_, and change when context switching, unless the address spaces 5 | // match. 6 | // - The upper half is reserved for the kernel. Kernel mappings are preserved across context 7 | // switches. 8 | // 9 | // Each PML4 entry references 512 GiB of virtual memory. 10 | 11 | /// The size of a single PML4 12 | pub const PML4_SIZE: usize = 0x0000_0080_0000_0000; 13 | pub const PML4_MASK: usize = 0x0000_ff80_0000_0000; 14 | 15 | /// Offset of kernel 16 | pub const KERNEL_MAX_SIZE: usize = 1_usize << 31; 17 | pub const KERNEL_OFFSET: usize = KERNEL_MAX_SIZE.wrapping_neg(); 18 | pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE; 19 | 20 | /// Offset to kernel heap 21 | pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; 22 | pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; 23 | /// Size of kernel heap 24 | pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024; // 1 MB 25 | 26 | /// Offset of physmap 27 | // This needs to match RMM's PHYS_OFFSET 28 | pub const PHYS_OFFSET: usize = 0xFFFF_8000_0000_0000; 29 | pub const PHYS_PML4: usize = (PHYS_OFFSET & PML4_MASK) / PML4_SIZE; 30 | 31 | /// End offset of the user image, i.e. kernel start 32 | // TODO: Make this offset at least PAGE_SIZE less? There are known hardware bugs on some arches, 33 | // for example on x86 if instructions execute near the 48-bit canonical address boundary. 34 | pub const USER_END_OFFSET: usize = 256 * PML4_SIZE; 35 | -------------------------------------------------------------------------------- /src/arch/x86_64/cpuid.rs: -------------------------------------------------------------------------------- 1 | use raw_cpuid::{ExtendedFeatures, FeatureInfo}; 2 | 3 | pub use crate::arch::x86_shared::cpuid::*; 4 | 5 | pub fn feature_info() -> FeatureInfo { 6 | cpuid() 7 | .get_feature_info() 8 | .expect("x86_64 requires CPUID leaf=0x01 to be present") 9 | } 10 | 11 | pub fn has_ext_feat(feat: impl FnOnce(ExtendedFeatures) -> bool) -> bool { 12 | cpuid().get_extended_feature_info().map_or(false, feat) 13 | } 14 | -------------------------------------------------------------------------------- /src/arch/x86_64/interrupt/mod.rs: -------------------------------------------------------------------------------- 1 | //! Interrupt instructions 2 | 3 | pub use crate::arch::x86_shared::interrupt::*; 4 | 5 | #[macro_use] 6 | pub mod handler; 7 | 8 | pub mod exception; 9 | pub mod irq; 10 | pub mod syscall; 11 | 12 | pub use self::handler::InterruptStack; 13 | -------------------------------------------------------------------------------- /src/arch/x86_64/macros.rs: -------------------------------------------------------------------------------- 1 | /// Print to console 2 | #[macro_export] 3 | macro_rules! print { 4 | ($($arg:tt)*) => ({ 5 | use core::fmt::Write; 6 | let _ = write!($crate::arch::debug::Writer::new(), $($arg)*); 7 | }); 8 | } 9 | 10 | /// Print with new line to console 11 | #[macro_export] 12 | macro_rules! println { 13 | () => (print!("\n")); 14 | ($fmt:expr) => (print!(concat!($fmt, "\n"))); 15 | ($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*)); 16 | } 17 | 18 | macro_rules! expand_bool( 19 | ($value:expr) => { 20 | concat!($value) 21 | } 22 | ); 23 | 24 | macro_rules! alternative( 25 | (feature: $feature:literal, then: [$($then:expr),*], default: [$($default:expr),*]) => { 26 | alternative2!(feature1: $feature, then1: [$($then),*], feature2: "", then2: [""], default: [$($default),*]) 27 | } 28 | ); 29 | macro_rules! saturating_sub( 30 | ($lhs:literal, $rhs:literal) => { concat!( 31 | "((", $lhs, ")>(", $rhs, "))*((", $lhs, ")-(", $rhs, "))", 32 | ) } 33 | ); 34 | // Use feature1 if present, otherwise try using feature2, otherwise use default. 35 | // 36 | // cpu_feature_always simply means it is always enabled. Thus, if feature2, which has lower 37 | // priority, is "always" but feature1 is "auto", feature2 will still be checked for, and feature2 38 | // will become the fallback code. 39 | // 40 | // An empty string as feature is equivalent with "never". 41 | macro_rules! alternative2( 42 | (feature1: $feature1:literal, then1: [$($then1:expr),*], feature2: $feature2:literal, then2: [$($then2:expr),*], default: [$($default:expr),*]) => { 43 | concat!(" 44 | .set true, 1 45 | .set false, 0 46 | 40: 47 | .if ", expand_bool!(cfg!(cpu_feature_always = $feature1)), " 48 | ", $($then1,)* " 49 | .elseif ", expand_bool!(cfg!(cpu_feature_always = $feature2)), " 50 | ", $($then2,)* " 51 | .else 52 | ", $($default,)* " 53 | .endif 54 | 42: 55 | .if ", expand_bool!(cfg!(cpu_feature_auto = $feature1)), " 56 | .skip -", saturating_sub!("51f - 50f", "42b - 40b"), ", 0x90 57 | .endif 58 | .if ", expand_bool!(cfg!(cpu_feature_auto = $feature2)), " 59 | .skip -", saturating_sub!("61f - 60f", "42b - 40b"), ", 0x90 60 | .endif 61 | 41: 62 | ", 63 | // FIXME: The assembler apparently complains "invalid number of bytes" despite it being 64 | // quite obvious what saturating_sub does. 65 | 66 | // Declare them in reverse order. Last relocation wins! 67 | alternative_auto!("6", $feature2, [$($then2),*]), 68 | alternative_auto!("5", $feature1, [$($then1),*]), 69 | ) 70 | }; 71 | ); 72 | macro_rules! alternative_auto( 73 | ($first_digit:literal, $feature:literal, [$($then:expr),*]) => { concat!( 74 | ".if ", expand_bool!(cfg!(cpu_feature_auto = $feature)), " 75 | .pushsection .altcode.", $feature, ",\"a\" 76 | ", $first_digit, "0: 77 | ", $($then,)* " 78 | ", $first_digit, "1: 79 | .popsection 80 | .pushsection .altfeatures.", $feature, ",\"a\" 81 | 70: .ascii \"", $feature, "\" 82 | 71: 83 | .popsection 84 | .pushsection .altrelocs.", $feature, ",\"a\" 85 | .quad 70b 86 | .quad 71b - 70b 87 | .quad 40b 88 | .quad 42b - 40b 89 | .quad 41b - 40b 90 | .quad 0 91 | .quad ", $first_digit, "0b 92 | .quad ", $first_digit, "1b - ", $first_digit, "0b 93 | .popsection 94 | .endif 95 | ", 96 | ) } 97 | ); 98 | -------------------------------------------------------------------------------- /src/arch/x86_64/misc.rs: -------------------------------------------------------------------------------- 1 | use x86::controlregs::Cr4; 2 | 3 | use crate::{ 4 | cpu_set::LogicalCpuId, 5 | cpuid::{cpuid, has_ext_feat}, 6 | }; 7 | 8 | pub unsafe fn init(cpu_id: LogicalCpuId) { 9 | if has_ext_feat(|feat| feat.has_umip()) { 10 | // UMIP (UserMode Instruction Prevention) forbids userspace from calling SGDT, SIDT, SLDT, 11 | // SMSW and STR. KASLR is currently not implemented, but this protects against leaking 12 | // addresses. 13 | x86::controlregs::cr4_write(x86::controlregs::cr4() | Cr4::CR4_ENABLE_UMIP); 14 | } 15 | if has_ext_feat(|feat| feat.has_smep()) { 16 | // SMEP (Supervisor-Mode Execution Prevention) forbids the kernel from executing 17 | // instruction on any page marked "userspace-accessible". This improves security for 18 | // obvious reasons. 19 | x86::controlregs::cr4_write(x86::controlregs::cr4() | Cr4::CR4_ENABLE_SMEP); 20 | } 21 | 22 | if let Some(feats) = cpuid().get_extended_processor_and_feature_identifiers() 23 | && feats.has_rdtscp() 24 | { 25 | x86::msr::wrmsr(x86::msr::IA32_TSC_AUX, cpu_id.get().into()); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/arch/x86_64/mod.rs: -------------------------------------------------------------------------------- 1 | pub use crate::arch::x86_shared::*; 2 | 3 | pub mod alternative; 4 | 5 | #[macro_use] 6 | pub mod macros; 7 | 8 | /// Constants like memory locations 9 | pub mod consts; 10 | 11 | /// CPUID wrapper 12 | pub mod cpuid; 13 | 14 | /// Global descriptor table 15 | pub mod gdt; 16 | 17 | /// Interrupt instructions 18 | #[macro_use] 19 | pub mod interrupt; 20 | 21 | /// Miscellaneous processor features 22 | pub mod misc; 23 | 24 | /// Paging 25 | pub mod paging; 26 | 27 | pub mod rmm; 28 | 29 | /// Initialization and start function 30 | pub mod start; 31 | 32 | pub use ::rmm::X8664Arch as CurrentRmmArch; 33 | 34 | // Flags 35 | pub mod flags { 36 | pub const SHIFT_SINGLESTEP: usize = 8; 37 | pub const FLAG_SINGLESTEP: usize = 1 << SHIFT_SINGLESTEP; 38 | } 39 | 40 | // TODO: Maybe support rewriting relocations (using LD's --emit-relocs) when working with entire 41 | // functions? 42 | #[naked] 43 | #[link_section = ".usercopy-fns"] 44 | pub unsafe extern "C" fn arch_copy_to_user(dst: usize, src: usize, len: usize) -> u8 { 45 | // TODO: spectre_v1 46 | 47 | core::arch::naked_asm!(alternative!( 48 | feature: "smap", 49 | then: [" 50 | xor eax, eax 51 | mov rcx, rdx 52 | stac 53 | rep movsb 54 | clac 55 | ret 56 | "], 57 | default: [" 58 | xor eax, eax 59 | mov rcx, rdx 60 | rep movsb 61 | ret 62 | "] 63 | )); 64 | } 65 | pub use arch_copy_to_user as arch_copy_from_user; 66 | 67 | pub use alternative::kfx_size; 68 | -------------------------------------------------------------------------------- /src/arch/x86_64/paging/mapper.rs: -------------------------------------------------------------------------------- 1 | use crate::ipi::{ipi, IpiKind, IpiTarget}; 2 | 3 | use super::RmmA; 4 | 5 | pub use rmm::{Flusher, PageFlush, PageFlushAll}; 6 | 7 | pub struct InactiveFlusher { 8 | _inner: (), 9 | } 10 | impl Flusher for InactiveFlusher { 11 | fn consume(&mut self, flush: PageFlush) { 12 | // TODO: Push to TLB "mailbox" or tell it to reload CR3 if there are too many entries. 13 | unsafe { 14 | flush.ignore(); 15 | } 16 | } 17 | } 18 | impl Drop for InactiveFlusher { 19 | fn drop(&mut self) { 20 | ipi(IpiKind::Tlb, IpiTarget::Other); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/arch/x86_64/rmm.rs: -------------------------------------------------------------------------------- 1 | use rmm::{Arch, PageFlags, VirtualAddress}; 2 | 3 | pub unsafe fn page_flags(virt: VirtualAddress) -> PageFlags { 4 | use crate::kernel_executable_offsets::*; 5 | let virt_addr = virt.data(); 6 | 7 | (if virt_addr >= __text_start() && virt_addr < __text_end() { 8 | // Remap text read-only, execute 9 | PageFlags::new().execute(true) 10 | } else if virt_addr >= __rodata_start() && virt_addr < __rodata_end() { 11 | // Remap rodata read-only, no execute 12 | PageFlags::new() 13 | } else { 14 | // Remap everything else read-write, no execute 15 | PageFlags::new().write(true) 16 | }) 17 | .global(cfg!(not(feature = "pti"))) 18 | } 19 | -------------------------------------------------------------------------------- /src/arch/x86_shared/cpuid.rs: -------------------------------------------------------------------------------- 1 | use raw_cpuid::{CpuId, CpuIdResult}; 2 | 3 | pub fn cpuid() -> CpuId { 4 | // FIXME check for cpuid availability during early boot and error out if it doesn't exist. 5 | CpuId::with_cpuid_fn(|a, c| { 6 | #[cfg(target_arch = "x86")] 7 | let result = unsafe { core::arch::x86::__cpuid_count(a, c) }; 8 | #[cfg(target_arch = "x86_64")] 9 | let result = unsafe { core::arch::x86_64::__cpuid_count(a, c) }; 10 | CpuIdResult { 11 | eax: result.eax, 12 | ebx: result.ebx, 13 | ecx: result.ecx, 14 | edx: result.edx, 15 | } 16 | }) 17 | } 18 | -------------------------------------------------------------------------------- /src/arch/x86_shared/debug.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | #[cfg(feature = "qemu_debug")] 3 | use spin::Mutex; 4 | use spin::MutexGuard; 5 | 6 | #[cfg(any(feature = "lpss_debug", feature = "serial_debug"))] 7 | use crate::devices::uart_16550::SerialPort; 8 | use crate::log::{Log, LOG}; 9 | #[cfg(feature = "lpss_debug")] 10 | use crate::syscall::io::Mmio; 11 | #[cfg(any(feature = "qemu_debug", feature = "serial_debug"))] 12 | use crate::syscall::io::Pio; 13 | #[cfg(feature = "qemu_debug")] 14 | use syscall::io::Io; 15 | 16 | #[cfg(feature = "serial_debug")] 17 | use super::device::serial::COM1; 18 | #[cfg(feature = "lpss_debug")] 19 | use super::device::serial::LPSS; 20 | #[cfg(feature = "system76_ec_debug")] 21 | use super::device::system76_ec::{System76Ec, SYSTEM76_EC}; 22 | #[cfg(feature = "graphical_debug")] 23 | use crate::devices::graphical_debug::{DebugDisplay, DEBUG_DISPLAY}; 24 | 25 | #[cfg(feature = "qemu_debug")] 26 | pub static QEMU: Mutex> = Mutex::new(Pio::::new(0x402)); 27 | 28 | pub struct Writer<'a> { 29 | log: MutexGuard<'a, Option>, 30 | #[cfg(feature = "graphical_debug")] 31 | display: MutexGuard<'a, Option>, 32 | #[cfg(feature = "lpss_debug")] 33 | lpss: MutexGuard<'a, Option<&'static mut SerialPort>>>, 34 | #[cfg(feature = "qemu_debug")] 35 | qemu: MutexGuard<'a, Pio>, 36 | #[cfg(feature = "serial_debug")] 37 | serial: MutexGuard<'a, SerialPort>>, 38 | #[cfg(feature = "system76_ec_debug")] 39 | system76_ec: MutexGuard<'a, Option>, 40 | } 41 | 42 | impl<'a> Writer<'a> { 43 | pub fn new() -> Writer<'a> { 44 | Writer { 45 | log: LOG.lock(), 46 | #[cfg(feature = "graphical_debug")] 47 | display: DEBUG_DISPLAY.lock(), 48 | #[cfg(feature = "lpss_debug")] 49 | lpss: LPSS.lock(), 50 | #[cfg(feature = "qemu_debug")] 51 | qemu: QEMU.lock(), 52 | #[cfg(feature = "serial_debug")] 53 | serial: COM1.lock(), 54 | #[cfg(feature = "system76_ec_debug")] 55 | system76_ec: SYSTEM76_EC.lock(), 56 | } 57 | } 58 | 59 | pub fn write(&mut self, buf: &[u8], preserve: bool) { 60 | if preserve { 61 | if let Some(ref mut log) = *self.log { 62 | log.write(buf); 63 | } 64 | } 65 | 66 | #[cfg(feature = "graphical_debug")] 67 | { 68 | if let Some(ref mut display) = *self.display { 69 | display.write(buf); 70 | } 71 | } 72 | 73 | #[cfg(feature = "lpss_debug")] 74 | { 75 | if let Some(ref mut lpss) = *self.lpss { 76 | lpss.write(buf); 77 | } 78 | } 79 | 80 | #[cfg(feature = "qemu_debug")] 81 | { 82 | for &b in buf { 83 | self.qemu.write(b); 84 | } 85 | } 86 | 87 | #[cfg(feature = "serial_debug")] 88 | { 89 | self.serial.write(buf); 90 | } 91 | 92 | #[cfg(feature = "system76_ec_debug")] 93 | { 94 | if let Some(ref mut system76_ec) = *self.system76_ec { 95 | system76_ec.print_slice(buf); 96 | } 97 | } 98 | } 99 | } 100 | 101 | impl<'a> fmt::Write for Writer<'a> { 102 | fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { 103 | self.write(s.as_bytes(), true); 104 | Ok(()) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/arch/x86_shared/device/hpet.rs: -------------------------------------------------------------------------------- 1 | use super::pit; 2 | use crate::acpi::hpet::Hpet; 3 | 4 | const LEG_RT_CNF: u64 = 2; 5 | const ENABLE_CNF: u64 = 1; 6 | 7 | const TN_VAL_SET_CNF: u64 = 0x40; 8 | const TN_TYPE_CNF: u64 = 0x08; 9 | const TN_INT_ENB_CNF: u64 = 0x04; 10 | 11 | pub(crate) const CAPABILITY_OFFSET: usize = 0x00; 12 | const GENERAL_CONFIG_OFFSET: usize = 0x10; 13 | const GENERAL_INTERRUPT_OFFSET: usize = 0x20; 14 | pub(crate) const MAIN_COUNTER_OFFSET: usize = 0xF0; 15 | // const NUM_TIMER_CAP_MASK: u64 = 0x0f00; 16 | const LEG_RT_CAP: u64 = 0x8000; 17 | const T0_CONFIG_CAPABILITY_OFFSET: usize = 0x100; 18 | pub(crate) const T0_COMPARATOR_OFFSET: usize = 0x108; 19 | 20 | const PER_INT_CAP: u64 = 0x10; 21 | 22 | pub unsafe fn init(hpet: &mut Hpet) -> bool { 23 | println!("HPET Before Init"); 24 | debug(hpet, true); 25 | 26 | // Disable HPET 27 | { 28 | let mut config_word = hpet.read_u64(GENERAL_CONFIG_OFFSET); 29 | config_word &= !(LEG_RT_CNF | ENABLE_CNF); 30 | hpet.write_u64(GENERAL_CONFIG_OFFSET, config_word); 31 | } 32 | 33 | let capability = hpet.read_u64(CAPABILITY_OFFSET); 34 | if capability & LEG_RT_CAP == 0 { 35 | log::warn!("HPET missing capability LEG_RT_CAP"); 36 | return false; 37 | } 38 | 39 | let period_fs = capability >> 32; 40 | let divisor = (pit::RATE as u64 * 1_000_000) / period_fs; 41 | 42 | let t0_capabilities = hpet.read_u64(T0_CONFIG_CAPABILITY_OFFSET); 43 | if t0_capabilities & PER_INT_CAP == 0 { 44 | log::warn!("HPET T0 missing capability PER_INT_CAP"); 45 | return false; 46 | } 47 | 48 | let counter = hpet.read_u64(MAIN_COUNTER_OFFSET); 49 | 50 | let t0_config_word: u64 = TN_VAL_SET_CNF | TN_TYPE_CNF | TN_INT_ENB_CNF; 51 | hpet.write_u64(T0_CONFIG_CAPABILITY_OFFSET, t0_config_word); 52 | // set accumulator value 53 | hpet.write_u64(T0_COMPARATOR_OFFSET, counter + divisor); 54 | // set interval 55 | hpet.write_u64(T0_COMPARATOR_OFFSET, divisor); 56 | 57 | // Enable interrupts from the HPET 58 | { 59 | let mut config_word: u64 = hpet.read_u64(GENERAL_CONFIG_OFFSET); 60 | config_word |= LEG_RT_CNF | ENABLE_CNF; 61 | hpet.write_u64(GENERAL_CONFIG_OFFSET, config_word); 62 | } 63 | 64 | println!("HPET After Init"); 65 | debug(hpet, false); 66 | 67 | true 68 | } 69 | 70 | pub unsafe fn debug(hpet: &mut Hpet, print_caps: bool) { 71 | println!("HPET @ {:#x}", { hpet.base_address.address }); 72 | 73 | if print_caps { 74 | let capability = hpet.read_u64(CAPABILITY_OFFSET); 75 | println!(" caps: {:#x}", capability); 76 | println!(" clock period: {}", (capability >> 32) as u32); 77 | println!(" ID: {:#x}", (capability >> 16) as u16); 78 | println!(" LEG_RT_CAP: {}", capability & (1 << 15) == (1 << 15)); 79 | println!( 80 | " COUNT_SIZE_CAP: {}", 81 | capability & (1 << 13) == (1 << 13) 82 | ); 83 | println!(" timers: {}", (capability >> 8) as u8 & 0x1F); 84 | println!(" revision: {}", capability as u8); 85 | } 86 | 87 | let config_word = hpet.read_u64(GENERAL_CONFIG_OFFSET); 88 | println!(" config: {:#x}", config_word); 89 | 90 | let interrupt_status = hpet.read_u64(GENERAL_INTERRUPT_OFFSET); 91 | println!(" interrupt status: {:#x}", interrupt_status); 92 | 93 | let counter = hpet.read_u64(MAIN_COUNTER_OFFSET); 94 | println!(" counter: {:#x}", counter); 95 | 96 | let t0_capabilities = hpet.read_u64(T0_CONFIG_CAPABILITY_OFFSET); 97 | println!(" T0 caps: {:#x}", t0_capabilities); 98 | println!( 99 | " interrupt routing: {:#x}", 100 | (t0_capabilities >> 32) as u32 101 | ); 102 | println!(" flags: {:#x}", t0_capabilities as u16); 103 | 104 | let t0_comparator = hpet.read_u64(T0_COMPARATOR_OFFSET); 105 | println!(" T0 comparator: {:#x}", t0_comparator); 106 | } 107 | -------------------------------------------------------------------------------- /src/arch/x86_shared/device/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::memory::KernelMapper; 2 | 3 | pub mod cpu; 4 | #[cfg(feature = "acpi")] 5 | pub mod hpet; 6 | pub mod ioapic; 7 | pub mod local_apic; 8 | pub mod pic; 9 | pub mod pit; 10 | pub mod serial; 11 | #[cfg(feature = "system76_ec_debug")] 12 | pub mod system76_ec; 13 | 14 | #[cfg(feature = "x86_kvm_pv")] 15 | pub mod tsc; 16 | 17 | pub unsafe fn init() { 18 | pic::init(); 19 | local_apic::init(&mut KernelMapper::lock()); 20 | 21 | // Run here for the side-effect of printing if KVM was used to avoid interleaved logs. 22 | tsc::get_kvm_support(); 23 | } 24 | pub unsafe fn init_after_acpi() { 25 | // this will disable the IOAPIC if needed. 26 | //ioapic::init(mapper); 27 | } 28 | 29 | #[cfg(feature = "acpi")] 30 | unsafe fn init_hpet() -> bool { 31 | use crate::acpi::ACPI_TABLE; 32 | if let Some(ref mut hpet) = *ACPI_TABLE.hpet.write() { 33 | if cfg!(target_arch = "x86") { 34 | //TODO: fix HPET on i686 35 | log::warn!("HPET found but implemented on i686"); 36 | return false; 37 | } 38 | hpet::init(hpet) 39 | } else { 40 | false 41 | } 42 | } 43 | 44 | #[cfg(not(feature = "acpi"))] 45 | unsafe fn init_hpet() -> bool { 46 | false 47 | } 48 | 49 | pub unsafe fn init_noncore() { 50 | log::info!("Initializing system timer"); 51 | 52 | #[cfg(feature = "x86_kvm_pv")] 53 | if tsc::init() { 54 | log::info!("TSC used as system clock source"); 55 | } 56 | 57 | if init_hpet() { 58 | log::info!("HPET used as system timer"); 59 | } else { 60 | pit::init(); 61 | log::info!("PIT used as system timer"); 62 | } 63 | 64 | log::info!("Initializing serial"); 65 | serial::init(); 66 | log::info!("Finished initializing devices"); 67 | } 68 | 69 | pub unsafe fn init_ap() { 70 | local_apic::init_ap(); 71 | 72 | #[cfg(feature = "x86_kvm_pv")] 73 | tsc::init(); 74 | } 75 | 76 | #[derive(Default)] 77 | pub struct ArchPercpuMisc { 78 | #[cfg(feature = "x86_kvm_pv")] 79 | pub tsc_info: tsc::TscPercpu, 80 | } 81 | -------------------------------------------------------------------------------- /src/arch/x86_shared/device/pic.rs: -------------------------------------------------------------------------------- 1 | use core::cell::SyncUnsafeCell; 2 | 3 | use crate::{ 4 | arch::interrupt::irq, 5 | syscall::io::{Io, Pio}, 6 | }; 7 | 8 | static MASTER: SyncUnsafeCell = SyncUnsafeCell::new(Pic::new(0x20)); 9 | static SLAVE: SyncUnsafeCell = SyncUnsafeCell::new(Pic::new(0xA0)); 10 | 11 | // SAFETY: must be main thread 12 | pub unsafe fn master<'a>() -> &'a mut Pic { 13 | &mut *MASTER.get() 14 | } 15 | // SAFETY: must be main thread 16 | pub unsafe fn slave<'a>() -> &'a mut Pic { 17 | &mut *SLAVE.get() 18 | } 19 | 20 | pub unsafe fn init() { 21 | let master = master(); 22 | let slave = slave(); 23 | 24 | // Start initialization 25 | master.cmd.write(0x11); 26 | slave.cmd.write(0x11); 27 | 28 | // Set offsets 29 | master.data.write(0x20); 30 | slave.data.write(0x28); 31 | 32 | // Set up cascade 33 | master.data.write(4); 34 | slave.data.write(2); 35 | 36 | // Set up interrupt mode (1 is 8086/88 mode, 2 is auto EOI) 37 | master.data.write(1); 38 | slave.data.write(1); 39 | 40 | // Unmask interrupts 41 | master.data.write(0); 42 | slave.data.write(0); 43 | 44 | // Ack remaining interrupts 45 | master.ack(); 46 | slave.ack(); 47 | 48 | // probably already set to PIC, but double-check 49 | irq::set_irq_method(irq::IrqMethod::Pic); 50 | } 51 | 52 | pub unsafe fn disable() { 53 | master().data.write(0xFF); 54 | slave().data.write(0xFF); 55 | } 56 | 57 | pub struct Pic { 58 | cmd: Pio, 59 | data: Pio, 60 | } 61 | 62 | impl Pic { 63 | pub const fn new(port: u16) -> Pic { 64 | Pic { 65 | cmd: Pio::new(port), 66 | data: Pio::new(port + 1), 67 | } 68 | } 69 | 70 | pub fn ack(&mut self) { 71 | self.cmd.write(0x20); 72 | } 73 | 74 | pub fn mask_set(&mut self, irq: u8) { 75 | assert!(irq < 8); 76 | 77 | let mut mask = self.data.read(); 78 | mask |= 1 << irq; 79 | self.data.write(mask); 80 | } 81 | 82 | pub fn mask_clear(&mut self, irq: u8) { 83 | assert!(irq < 8); 84 | 85 | let mut mask = self.data.read(); 86 | mask &= !(1 << irq); 87 | self.data.write(mask); 88 | } 89 | /// A bitmap of all currently servicing IRQs. Spurious IRQs will not have this bit set 90 | pub fn isr(&mut self) -> u8 { 91 | self.cmd.write(0x0A); 92 | self.cmd.read() // note that cmd is read, rather than data 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/arch/x86_shared/device/pit.rs: -------------------------------------------------------------------------------- 1 | use core::cell::SyncUnsafeCell; 2 | 3 | use crate::syscall::io::{Io, Pio}; 4 | 5 | static CHAN0: SyncUnsafeCell> = SyncUnsafeCell::new(Pio::new(0x40)); 6 | //pub static mut CHAN1: Pio = Pio::new(0x41); 7 | //pub static mut CHAN2: Pio = Pio::new(0x42); 8 | static COMMAND: SyncUnsafeCell> = SyncUnsafeCell::new(Pio::new(0x43)); 9 | 10 | // SAFETY: must be externally syncd 11 | pub unsafe fn chan0<'a>() -> &'a mut Pio { 12 | &mut *CHAN0.get() 13 | } 14 | // SAFETY: must be externally syncd 15 | pub unsafe fn command<'a>() -> &'a mut Pio { 16 | &mut *COMMAND.get() 17 | } 18 | 19 | const SELECT_CHAN0: u8 = 0b00 << 6; 20 | const ACCESS_LATCH: u8 = 0b00 << 4; 21 | const ACCESS_LOHI: u8 = 0b11 << 4; 22 | const MODE_2: u8 = 0b010 << 1; 23 | 24 | // 1 / (1.193182 MHz) = 838,095,110 femtoseconds ~= 838.095 ns 25 | pub const PERIOD_FS: u128 = 838_095_110; 26 | 27 | // 4847 / (1.193182 MHz) = 4,062,247 ns ~= 4.1 ms or 246 Hz 28 | pub const CHAN0_DIVISOR: u16 = 4847; 29 | 30 | // Calculated interrupt period in nanoseconds based on divisor and period 31 | pub const RATE: u128 = (CHAN0_DIVISOR as u128 * PERIOD_FS) / 1_000_000; 32 | 33 | pub unsafe fn init() { 34 | command().write(SELECT_CHAN0 | ACCESS_LOHI | MODE_2); 35 | chan0().write(CHAN0_DIVISOR as u8); 36 | chan0().write((CHAN0_DIVISOR >> 8) as u8); 37 | } 38 | 39 | pub unsafe fn read() -> u16 { 40 | command().write(SELECT_CHAN0 | ACCESS_LATCH); 41 | let low = chan0().read(); 42 | let high = chan0().read(); 43 | let counter = ((high as u16) << 8) | (low as u16); 44 | // Counter is inverted, subtract from CHAN0_DIVISOR 45 | CHAN0_DIVISOR.saturating_sub(counter) 46 | } 47 | -------------------------------------------------------------------------------- /src/arch/x86_shared/device/rtc.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redox-os/kernel/e67cca7bce085190e538e2141784125bb99ddc2f/src/arch/x86_shared/device/rtc.rs -------------------------------------------------------------------------------- /src/arch/x86_shared/device/serial.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "lpss_debug")] 2 | use crate::syscall::io::Mmio; 3 | use crate::{devices::uart_16550::SerialPort, syscall::io::Pio}; 4 | use spin::Mutex; 5 | 6 | pub static COM1: Mutex>> = Mutex::new(SerialPort::>::new(0x3F8)); 7 | pub static COM2: Mutex>> = Mutex::new(SerialPort::>::new(0x2F8)); 8 | // pub static COM3: Mutex>> = Mutex::new(SerialPort::>::new(0x3E8)); 9 | // pub static COM4: Mutex>> = Mutex::new(SerialPort::>::new(0x2E8)); 10 | 11 | #[cfg(feature = "lpss_debug")] 12 | pub static LPSS: Mutex>>> = Mutex::new(None); 13 | 14 | pub unsafe fn init() { 15 | COM1.lock().init(); 16 | COM2.lock().init(); 17 | 18 | #[cfg(feature = "lpss_debug")] 19 | { 20 | // TODO: Make this configurable 21 | let address = crate::PHYS_OFFSET + 0xFE032000; 22 | 23 | { 24 | use crate::{ 25 | memory::{Frame, PhysicalAddress}, 26 | paging::{entry::EntryFlags, ActivePageTable, Page, VirtualAddress}, 27 | }; 28 | 29 | let mut active_table = ActivePageTable::new(); 30 | let page = Page::containing_address(VirtualAddress::new(address)); 31 | let frame = Frame::containing(PhysicalAddress::new(address - crate::PHYS_OFFSET)); 32 | let result = active_table.map_to( 33 | page, 34 | frame, 35 | EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, 36 | ); 37 | result.flush(&mut active_table); 38 | } 39 | 40 | let lpss = SerialPort::>::new(crate::PHYS_OFFSET + 0xFE032000); 41 | lpss.init(); 42 | 43 | *LPSS.lock() = Some(lpss); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/arch/x86_shared/device/system76_ec.rs: -------------------------------------------------------------------------------- 1 | use spin::Mutex; 2 | use syscall::io::{Io, Pio}; 3 | 4 | pub static SYSTEM76_EC: Mutex> = Mutex::new(None); 5 | 6 | pub fn init() { 7 | *SYSTEM76_EC.lock() = System76Ec::new(); 8 | } 9 | 10 | pub struct System76Ec { 11 | base: u16, 12 | } 13 | 14 | impl System76Ec { 15 | pub fn new() -> Option { 16 | let mut system76_ec = Self { base: 0x0E00 }; 17 | if system76_ec.probe() { 18 | Some(system76_ec) 19 | } else { 20 | None 21 | } 22 | } 23 | 24 | #[inline(always)] 25 | pub fn read(&mut self, addr: u8) -> u8 { 26 | Pio::::new(self.base + addr as u16).read() 27 | } 28 | 29 | #[inline(always)] 30 | pub fn write(&mut self, addr: u8, data: u8) { 31 | Pio::::new(self.base + addr as u16).write(data) 32 | } 33 | 34 | pub fn probe(&mut self) -> bool { 35 | // Send probe command 36 | self.write(0, 1); 37 | 38 | // Wait for response 39 | let mut timeout = 1_000_000; 40 | while timeout > 0 { 41 | if self.read(0) == 0 { 42 | break; 43 | } 44 | timeout -= 1; 45 | } 46 | if timeout == 0 { 47 | return false; 48 | } 49 | 50 | // Return false on command error 51 | if self.read(1) != 0 { 52 | return false; 53 | } 54 | 55 | // Must receive 0x76, 0xEC as signature 56 | self.read(2) == 0x76 && self.read(3) == 0xEC 57 | } 58 | 59 | pub fn flush(&mut self) { 60 | // Send command 61 | self.write(0, 4); 62 | 63 | // TODO: timeout 64 | while self.read(0) != 0 {} 65 | 66 | // Clear length 67 | self.write(3, 0); 68 | } 69 | 70 | pub fn print(&mut self, byte: u8) { 71 | // Read length 72 | let len = self.read(3); 73 | // Write data at offset 74 | self.write(len + 4, byte); 75 | // Update length 76 | self.write(3, len + 1); 77 | 78 | // If we hit the end of the buffer, or were given a newline, flush 79 | if byte == b'\n' || len >= 128 { 80 | self.flush(); 81 | } 82 | } 83 | 84 | pub fn print_slice(&mut self, bytes: &[u8]) { 85 | for &byte in bytes { 86 | self.print(byte); 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/arch/x86_shared/interrupt/ipi.rs: -------------------------------------------------------------------------------- 1 | use crate::{context, device::local_apic::the_local_apic, percpu::PercpuBlock}; 2 | 3 | interrupt!(wakeup, || { 4 | the_local_apic().eoi(); 5 | }); 6 | 7 | interrupt!(tlb, || { 8 | PercpuBlock::current().maybe_handle_tlb_shootdown(); 9 | 10 | the_local_apic().eoi(); 11 | }); 12 | 13 | interrupt!(switch, || { 14 | the_local_apic().eoi(); 15 | 16 | let _ = context::switch(); 17 | }); 18 | 19 | interrupt!(pit, || { 20 | the_local_apic().eoi(); 21 | 22 | // Switch after a sufficient amount of time since the last switch. 23 | context::switch::tick(); 24 | }); 25 | -------------------------------------------------------------------------------- /src/arch/x86_shared/interrupt/mod.rs: -------------------------------------------------------------------------------- 1 | //! Interrupt instructions 2 | 3 | pub mod ipi; 4 | pub mod trace; 5 | 6 | pub use super::idt::{available_irqs_iter, is_reserved, set_reserved}; 7 | 8 | /// Clear interrupts 9 | #[inline(always)] 10 | pub unsafe fn disable() { 11 | core::arch::asm!("cli", options(nomem, nostack)); 12 | } 13 | 14 | /// Set interrupts and halt 15 | /// This will atomically wait for the next interrupt 16 | /// Performing enable followed by halt is not guaranteed to be atomic, use this instead! 17 | #[inline(always)] 18 | pub unsafe fn enable_and_halt() { 19 | core::arch::asm!("sti; hlt", options(nomem, nostack)); 20 | } 21 | 22 | /// Set interrupts and nop 23 | /// This will enable interrupts and allow the IF flag to be processed 24 | /// Simply enabling interrupts does not gurantee that they will trigger, use this instead! 25 | #[inline(always)] 26 | pub unsafe fn enable_and_nop() { 27 | core::arch::asm!("sti; nop", options(nomem, nostack)); 28 | } 29 | 30 | /// Halt instruction 31 | #[inline(always)] 32 | pub unsafe fn halt() { 33 | core::arch::asm!("hlt", options(nomem, nostack)); 34 | } 35 | 36 | /// Pause instruction 37 | /// Safe because it is similar to a NOP, and has no memory effects 38 | #[inline(always)] 39 | pub fn pause() { 40 | unsafe { 41 | core::arch::asm!("pause", options(nomem, nostack)); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/arch/x86_shared/interrupt/trace.rs: -------------------------------------------------------------------------------- 1 | use core::mem; 2 | 3 | pub struct StackTrace { 4 | pub fp: usize, 5 | pub pc_ptr: *const usize, 6 | } 7 | 8 | impl StackTrace { 9 | #[inline(always)] 10 | pub unsafe fn start() -> Option { 11 | let mut fp: usize; 12 | #[cfg(target_arch = "x86")] 13 | core::arch::asm!("mov {}, ebp", out(reg) fp); 14 | #[cfg(target_arch = "x86_64")] 15 | core::arch::asm!("mov {}, rbp", out(reg) fp); 16 | let pc_ptr = fp.checked_add(mem::size_of::())?; 17 | Some(Self { 18 | fp, 19 | pc_ptr: pc_ptr as *const usize, 20 | }) 21 | } 22 | 23 | pub unsafe fn next(self) -> Option { 24 | let fp = *(self.fp as *const usize); 25 | let pc_ptr = fp.checked_add(mem::size_of::())?; 26 | Some(Self { 27 | fp: fp, 28 | pc_ptr: pc_ptr as *const usize, 29 | }) 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/arch/x86_shared/ipi.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, Copy, Debug)] 2 | #[repr(u8)] 3 | pub enum IpiKind { 4 | Wakeup = 0x40, 5 | Tlb = 0x41, 6 | Switch = 0x42, 7 | Pit = 0x43, 8 | 9 | #[cfg(feature = "profiling")] 10 | Profile = 0x44, 11 | } 12 | 13 | #[derive(Clone, Copy, Debug)] 14 | #[repr(u8)] 15 | pub enum IpiTarget { 16 | Current = 1, 17 | All = 2, 18 | Other = 3, 19 | } 20 | 21 | #[cfg(not(feature = "multi_core"))] 22 | #[inline(always)] 23 | pub fn ipi(_kind: IpiKind, _target: IpiTarget) {} 24 | 25 | #[cfg(feature = "multi_core")] 26 | #[inline(always)] 27 | pub fn ipi(kind: IpiKind, target: IpiTarget) { 28 | use crate::device::local_apic::the_local_apic; 29 | 30 | #[cfg(feature = "profiling")] 31 | if matches!(kind, IpiKind::Profile) { 32 | let icr = (target as u64) << 18 | 1 << 14 | 0b100 << 8; 33 | unsafe { the_local_apic().set_icr(icr) }; 34 | return; 35 | } 36 | 37 | let icr = (target as u64) << 18 | 1 << 14 | (kind as u64); 38 | unsafe { the_local_apic().set_icr(icr) }; 39 | } 40 | use crate::cpu_set::LogicalCpuId; 41 | 42 | #[cfg(feature = "multi_core")] 43 | #[inline(always)] 44 | pub fn ipi_single(kind: IpiKind, target: LogicalCpuId) { 45 | use crate::device::local_apic::the_local_apic; 46 | 47 | unsafe { 48 | // TODO: Distinguish between logical and physical CPU IDs 49 | the_local_apic().ipi(target.get(), kind); 50 | } 51 | } 52 | 53 | #[cfg(not(feature = "multi_core"))] 54 | #[inline(always)] 55 | pub fn ipi_single(_kind: IpiKind, _target: LogicalCpuId) {} 56 | -------------------------------------------------------------------------------- /src/arch/x86_shared/mod.rs: -------------------------------------------------------------------------------- 1 | /// CPUID wrapper 2 | pub mod cpuid; 3 | 4 | /// Debugging support 5 | pub mod debug; 6 | 7 | /// Devices 8 | pub mod device; 9 | 10 | /// Interrupt descriptor table 11 | pub mod idt; 12 | 13 | /// Interrupt instructions 14 | #[macro_use] 15 | pub mod interrupt; 16 | 17 | /// Inter-processor interrupts 18 | pub mod ipi; 19 | 20 | /// Page table isolation 21 | pub mod pti; 22 | 23 | /// Stop function 24 | pub mod stop; 25 | 26 | pub mod time; 27 | -------------------------------------------------------------------------------- /src/arch/x86_shared/pti.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "pti")] 2 | use core::ptr; 3 | 4 | #[cfg(feature = "pti")] 5 | use crate::memory::Frame; 6 | #[cfg(feature = "pti")] 7 | use crate::paging::entry::EntryFlags; 8 | #[cfg(feature = "pti")] 9 | use crate::paging::ActivePageTable; 10 | 11 | #[cfg(feature = "pti")] 12 | #[thread_local] 13 | pub static mut PTI_CPU_STACK: [u8; 256] = [0; 256]; 14 | 15 | #[cfg(feature = "pti")] 16 | #[thread_local] 17 | pub static mut PTI_CONTEXT_STACK: usize = 0; 18 | 19 | #[cfg(feature = "pti")] 20 | #[inline(always)] 21 | unsafe fn switch_stack(old: usize, new: usize) { 22 | let old_rsp: usize; 23 | asm!("", out("rsp") old_rsp); 24 | 25 | let offset_rsp = old - old_rsp; 26 | 27 | let new_rsp = new - offset_rsp; 28 | 29 | ptr::copy_nonoverlapping(old_rsp as *const u8, new_rsp as *mut u8, offset_rsp); 30 | 31 | asm!("", out("rsp") new_rsp); 32 | } 33 | 34 | #[cfg(feature = "pti")] 35 | #[inline(always)] 36 | pub unsafe fn map() { 37 | // { 38 | // let mut active_table = unsafe { ActivePageTable::new() }; 39 | // 40 | // // Map kernel heap 41 | // let address = active_table.p4()[::KERNEL_HEAP_PML4].address(); 42 | // let frame = Frame::containing(address); 43 | // let mut flags = active_table.p4()[::KERNEL_HEAP_PML4].flags(); 44 | // flags.remove(EntryFlags::PRESENT); 45 | // active_table.p4_mut()[::KERNEL_HEAP_PML4].set(frame, flags); 46 | // 47 | // // Reload page tables 48 | // active_table.flush_all(); 49 | // } 50 | 51 | // Switch to per-context stack 52 | switch_stack( 53 | PTI_CPU_STACK.as_ptr() as usize + PTI_CPU_STACK.len(), 54 | PTI_CONTEXT_STACK, 55 | ); 56 | } 57 | 58 | #[cfg(feature = "pti")] 59 | #[inline(always)] 60 | pub unsafe extern "C" fn unmap() { 61 | // Switch to per-CPU stack 62 | switch_stack( 63 | PTI_CONTEXT_STACK, 64 | PTI_CPU_STACK.as_ptr() as usize + PTI_CPU_STACK.len(), 65 | ); 66 | 67 | // { 68 | // let mut active_table = unsafe { ActivePageTable::new() }; 69 | // 70 | // // Unmap kernel heap 71 | // let address = active_table.p4()[::KERNEL_HEAP_PML4].address(); 72 | // let frame = Frame::containing(address); 73 | // let mut flags = active_table.p4()[::KERNEL_HEAP_PML4].flags(); 74 | // flags.insert(EntryFlags::PRESENT); 75 | // active_table.p4_mut()[::KERNEL_HEAP_PML4].set(frame, flags); 76 | // 77 | // // Reload page tables 78 | // active_table.flush_all(); 79 | // } 80 | } 81 | 82 | #[cfg(not(feature = "pti"))] 83 | #[inline(always)] 84 | pub unsafe fn map() {} 85 | 86 | #[cfg(not(feature = "pti"))] 87 | #[inline(always)] 88 | pub unsafe extern "C" fn unmap() {} 89 | -------------------------------------------------------------------------------- /src/arch/x86_shared/stop.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "acpi")] 2 | use crate::{context, scheme::acpi, time}; 3 | 4 | use crate::syscall::io::{Io, Pio}; 5 | 6 | pub unsafe fn kreset() -> ! { 7 | log::info!("kreset"); 8 | 9 | // 8042 reset 10 | { 11 | println!("Reset with 8042"); 12 | let mut port = Pio::::new(0x64); 13 | while port.readf(2) {} 14 | port.write(0xFE); 15 | } 16 | 17 | emergency_reset(); 18 | } 19 | 20 | #[cfg(target_arch = "x86")] 21 | pub unsafe fn emergency_reset() -> ! { 22 | // Use triple fault to guarantee reset 23 | core::arch::asm!( 24 | " 25 | cli 26 | sidt [esp+16] 27 | // set IDT limit to zero 28 | mov word ptr [esp+16], 0 29 | lidt [esp+16] 30 | int $3 31 | ", 32 | options(noreturn) 33 | ); 34 | } 35 | 36 | #[cfg(target_arch = "x86_64")] 37 | pub unsafe fn emergency_reset() -> ! { 38 | // Use triple fault to guarantee reset 39 | core::arch::asm!( 40 | " 41 | cli 42 | sidt [rsp+16] 43 | // set IDT limit to zero 44 | mov word ptr [rsp+16], 0 45 | lidt [rsp+16] 46 | int $3 47 | ", 48 | options(noreturn) 49 | ); 50 | } 51 | 52 | #[cfg(feature = "acpi")] 53 | fn userspace_acpi_shutdown() { 54 | log::info!("Notifying any potential ACPI driver"); 55 | // Tell whatever driver that handles ACPI, that it should enter the S5 state (i.e. 56 | // shutdown). 57 | if !acpi::register_kstop() { 58 | // There was no context to switch to. 59 | log::info!("No ACPI driver was alive to handle shutdown."); 60 | return; 61 | } 62 | log::info!("Waiting one second for ACPI driver to run the shutdown sequence."); 63 | let initial = time::monotonic(); 64 | 65 | // Since this driver is a userspace process, and we do not use any magic like directly 66 | // context switching, we have to wait for the userspace driver to complete, with a timeout. 67 | // 68 | // We switch context, and wait for one second. 69 | loop { 70 | // TODO: Switch directly to whichever process is handling the kstop pipe. We would add an 71 | // event flag like EVENT_DIRECT, which has already been suggested for IRQs. 72 | // TODO: Waitpid with timeout? Because, what if the ACPI driver would crash? 73 | let _ = context::switch(); 74 | 75 | let current = time::monotonic(); 76 | if current - initial > time::NANOS_PER_SEC { 77 | log::info!("Timeout reached, thus falling back to other shutdown methods."); 78 | return; 79 | } 80 | } 81 | } 82 | 83 | pub unsafe fn kstop() -> ! { 84 | log::info!("Running kstop()"); 85 | 86 | #[cfg(feature = "acpi")] 87 | userspace_acpi_shutdown(); 88 | 89 | // Magic shutdown code for bochs and qemu (older versions). 90 | for c in "Shutdown".bytes() { 91 | let port = 0x8900; 92 | println!("Shutdown with outb(0x{:X}, '{}')", port, c as char); 93 | Pio::::new(port).write(c); 94 | } 95 | 96 | // Magic shutdown using qemu default ACPI method 97 | { 98 | let port = 0x604; 99 | let data = 0x2000; 100 | println!("Shutdown with outb(0x{:X}, 0x{:X})", port, data); 101 | Pio::::new(port).write(data); 102 | } 103 | 104 | // Magic code for VMWare. Also a hard lock. 105 | println!("Shutdown with cli hlt"); 106 | loop { 107 | core::arch::asm!("cli; hlt"); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/arch/x86_shared/time.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "acpi")] 2 | use super::device::hpet; 3 | use super::device::pit; 4 | 5 | pub fn monotonic_absolute() -> u128 { 6 | // The paravirtualized TSC is already guaranteed to be monotonic, and thus doesn't need to be 7 | // readjusted. 8 | #[cfg(feature = "x86_kvm_pv")] 9 | if let Some(ns) = super::device::tsc::monotonic_absolute() { 10 | return ns; 11 | } 12 | 13 | *crate::time::OFFSET.lock() + hpet_or_pit() 14 | } 15 | fn hpet_or_pit() -> u128 { 16 | #[cfg(feature = "acpi")] 17 | if let Some(ref hpet) = *crate::acpi::ACPI_TABLE.hpet.read() { 18 | //TODO: handle rollover? 19 | //TODO: improve performance 20 | 21 | // Current count 22 | let counter = unsafe { hpet.read_u64(hpet::MAIN_COUNTER_OFFSET) }; 23 | // Comparator holds next interrupt count 24 | let comparator = unsafe { hpet.read_u64(hpet::T0_COMPARATOR_OFFSET) }; 25 | // Get period in femtoseconds 26 | let capability = unsafe { hpet.read_u64(hpet::CAPABILITY_OFFSET) }; 27 | 28 | // There seems to be a bug in qemu on macos that causes the calculation to produce 0 for 29 | // period_fs and hence a divide by zero calculating the divisor - workaround it while we 30 | // try and get a fix from qemu: https://gitlab.com/qemu-project/qemu/-/issues/1570 31 | let mut period_fs = capability >> 32; 32 | if period_fs == 0 { 33 | period_fs = 10_000_000; 34 | } 35 | 36 | // Calculate divisor 37 | let divisor = (pit::RATE as u64 * 1_000_000) / period_fs; 38 | // Calculate last interrupt 39 | let last_interrupt = comparator.saturating_sub(divisor); 40 | // Calculate ticks since last interrupt 41 | let elapsed = counter.saturating_sub(last_interrupt); 42 | // Calculate nanoseconds since last interrupt 43 | return (elapsed as u128 * period_fs as u128) / 1_000_000; 44 | } 45 | // Read ticks since last interrupt 46 | let elapsed = unsafe { pit::read() }; 47 | // Calculate nanoseconds since last interrupt 48 | (elapsed as u128 * pit::PERIOD_FS) / 1_000_000 49 | } 50 | -------------------------------------------------------------------------------- /src/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod aligned_box; 2 | #[macro_use] 3 | pub mod int_like; 4 | pub mod unique; 5 | 6 | /// Debug macro, lifted from the std 7 | #[macro_export] 8 | macro_rules! dbg { 9 | () => { 10 | $crate::println!("[{}:{}]", file!(), line!()); 11 | }; 12 | ($val:expr) => { 13 | // Use of `match` here is intentional because it affects the lifetimes 14 | // of temporaries - https://stackoverflow.com/a/48732525/1063961 15 | match $val { 16 | tmp => { 17 | $crate::println!("[{}:{}] {} = {:#?}", 18 | file!(), line!(), stringify!($val), &tmp); 19 | tmp 20 | } 21 | } 22 | }; 23 | // Trailing comma with single argument is ignored 24 | ($val:expr,) => { $crate::dbg!($val) }; 25 | ($($val:expr),+ $(,)?) => { 26 | ($($crate::dbg!($val)),+,) 27 | }; 28 | } 29 | -------------------------------------------------------------------------------- /src/common/unique.rs: -------------------------------------------------------------------------------- 1 | use core::{fmt, ptr::NonNull}; 2 | 3 | /// A small wrapper around NonNull that is Send + Sync, which is 4 | /// only correct if the pointer is never accessed from multiple 5 | /// locations across threads. Which is always, if the pointer is 6 | /// unique. 7 | pub struct Unique(NonNull); 8 | 9 | impl Copy for Unique {} 10 | impl Clone for Unique { 11 | fn clone(&self) -> Self { 12 | *self 13 | } 14 | } 15 | unsafe impl Send for Unique {} 16 | unsafe impl Sync for Unique {} 17 | 18 | impl Unique { 19 | pub unsafe fn new_unchecked(ptr: *mut T) -> Self { 20 | Self(NonNull::new_unchecked(ptr)) 21 | } 22 | pub fn as_ptr(self) -> *mut T { 23 | self.0.as_ptr() 24 | } 25 | } 26 | impl fmt::Debug for Unique { 27 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 28 | write!(f, "{:?}", self.0) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/context/file.rs: -------------------------------------------------------------------------------- 1 | //! File structs 2 | 3 | use crate::{ 4 | event, 5 | scheme::{self, SchemeId}, 6 | syscall::error::{Error, Result, EBADF}, 7 | }; 8 | use alloc::sync::Arc; 9 | use spin::RwLock; 10 | use syscall::{schemev2::NewFdFlags, RwFlags, O_APPEND, O_NONBLOCK}; 11 | 12 | /// A file description 13 | #[derive(Clone, Copy, Debug)] 14 | pub struct FileDescription { 15 | /// The current file offset (seek) 16 | pub offset: u64, 17 | /// The scheme that this file refers to 18 | pub scheme: SchemeId, 19 | /// The number the scheme uses to refer to this file 20 | pub number: usize, 21 | /// The flags passed to open or fcntl(SETFL) 22 | pub flags: u32, 23 | pub internal_flags: InternalFlags, 24 | } 25 | bitflags! { 26 | #[derive(Clone, Copy, Debug)] 27 | pub struct InternalFlags: u32 { 28 | const POSITIONED = 1; 29 | } 30 | } 31 | impl FileDescription { 32 | pub fn rw_flags(&self, rw: RwFlags) -> u32 { 33 | let mut ret = self.flags & !(O_NONBLOCK | O_APPEND) as u32; 34 | if rw.contains(RwFlags::APPEND) { 35 | ret |= O_APPEND as u32; 36 | } 37 | if rw.contains(RwFlags::NONBLOCK) { 38 | ret |= O_NONBLOCK as u32; 39 | } 40 | ret 41 | } 42 | } 43 | impl InternalFlags { 44 | pub fn from_extra0(fl: u8) -> Option { 45 | Some( 46 | NewFdFlags::from_bits(fl)? 47 | .iter() 48 | .map(|fd| { 49 | if fd == NewFdFlags::POSITIONED { 50 | Self::POSITIONED 51 | } else { 52 | Self::empty() 53 | } 54 | }) 55 | .collect(), 56 | ) 57 | } 58 | } 59 | 60 | /// A file descriptor 61 | #[derive(Clone, Debug)] 62 | #[must_use = "File descriptors must be closed"] 63 | pub struct FileDescriptor { 64 | /// Corresponding file description 65 | pub description: Arc>, 66 | /// Cloexec flag 67 | pub cloexec: bool, 68 | } 69 | 70 | impl FileDescription { 71 | /// Try closing a file, although at this point the description will be destroyed anyway, if 72 | /// doing so fails. 73 | pub fn try_close(self) -> Result<()> { 74 | event::unregister_file(self.scheme, self.number); 75 | 76 | let scheme = scheme::schemes() 77 | .get(self.scheme) 78 | .ok_or(Error::new(EBADF))? 79 | .clone(); 80 | 81 | scheme.on_close(self.number) 82 | } 83 | } 84 | 85 | impl FileDescriptor { 86 | pub fn close(self) -> Result<()> { 87 | if let Ok(file) = Arc::try_unwrap(self.description).map(RwLock::into_inner) { 88 | file.try_close()?; 89 | } 90 | Ok(()) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/context/signal.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic::Ordering; 2 | 3 | use crate::{context, syscall::flag::SigcontrolFlags}; 4 | 5 | pub fn signal_handler() { 6 | let context_lock = context::current(); 7 | let mut context_guard = context_lock.write(); 8 | let context = &mut *context_guard; 9 | 10 | let being_sigkilled = context.being_sigkilled; 11 | 12 | if being_sigkilled { 13 | drop(context_guard); 14 | drop(context_lock); 15 | crate::syscall::process::exit_this_context(None); 16 | } 17 | 18 | /*let thumbs_down = ptrace::breakpoint_callback( 19 | PTRACE_STOP_SIGNAL, 20 | Some(ptrace_event!(PTRACE_STOP_SIGNAL)), 21 | ) 22 | .and_then(|_| ptrace::next_breakpoint().map(|f| f.contains(PTRACE_FLAG_IGNORE)));*/ 23 | 24 | // TODO: thumbs_down 25 | let Some((thread_ctl, proc_ctl, st)) = context.sigcontrol() else { 26 | // Discard signal if sigcontrol is unset. 27 | log::trace!("no sigcontrol, returning"); 28 | return; 29 | }; 30 | if thread_ctl.currently_pending_unblocked(proc_ctl) == 0 { 31 | // The context is currently Runnable. When transitioning into Blocked, it will check for 32 | // signals (with the context lock held, which is required when sending signals). After 33 | // that, any detection of pending unblocked signals by the sender, will result in the 34 | // context being unblocked, and signals sent. 35 | 36 | // TODO: prioritize signals over regular program execution 37 | return; 38 | } 39 | let control_flags = 40 | SigcontrolFlags::from_bits_retain(thread_ctl.control_flags.load(Ordering::Acquire)); 41 | 42 | if control_flags.contains(SigcontrolFlags::INHIBIT_DELIVERY) { 43 | // Signals are inhibited to protect critical sections inside libc, but this code will run 44 | // every time the context is switched to. 45 | log::trace!("Inhibiting delivery, returning"); 46 | return; 47 | } 48 | 49 | let sigh_instr_ptr = st.user_handler.get(); 50 | 51 | let Some(regs) = context.regs_mut() else { 52 | // TODO: is this even reachable? 53 | log::trace!("No registers, returning"); 54 | return; 55 | }; 56 | 57 | let ip = regs.instr_pointer(); 58 | let archdep_reg = regs.sig_archdep_reg(); 59 | 60 | regs.set_instr_pointer(sigh_instr_ptr); 61 | 62 | let (thread_ctl, _, _) = context 63 | .sigcontrol() 64 | .expect("cannot have been unset while holding the lock"); 65 | 66 | thread_ctl.saved_ip.set(ip); 67 | thread_ctl.saved_archdep_reg.set(archdep_reg); 68 | 69 | thread_ctl.control_flags.store( 70 | (control_flags | SigcontrolFlags::INHIBIT_DELIVERY).bits(), 71 | Ordering::Release, 72 | ); 73 | } 74 | pub fn excp_handler(excp: syscall::Exception) { 75 | let current = context::current(); 76 | 77 | let mut context = current.write(); 78 | 79 | let Some(eh) = context.sig.as_ref().and_then(|s| s.excp_handler) else { 80 | // TODO: Let procmgr print this? 81 | log::info!( 82 | "UNHANDLED EXCEPTION, CPU {}, PID {}, NAME {}, CONTEXT {current:p}", 83 | crate::cpu_id(), 84 | context.pid, 85 | context.name 86 | ); 87 | drop(context); 88 | // TODO: Allow exceptions to be caught by tracer etc, without necessarily exiting the 89 | // context (closing files, dropping AddrSpace, etc) 90 | crate::syscall::process::exit_this_context(Some(excp)); 91 | }; 92 | // TODO 93 | /* 94 | let Some(regs) = context.regs_mut() else { 95 | // TODO: unhandled exception in this case too? 96 | return; 97 | }; 98 | let old_ip = regs.instr_pointer(); 99 | let old_archdep_reg = regs.ar 100 | let (tctl, pctl, sigst) = context.sigcontrol().expect("already checked"); 101 | tctl.saved_ip.set(excp.rsp); 102 | tctl.saved_archdep_reg*/ 103 | } 104 | -------------------------------------------------------------------------------- /src/context/timeout.rs: -------------------------------------------------------------------------------- 1 | use alloc::collections::VecDeque; 2 | use spin::{Mutex, MutexGuard, Once}; 3 | 4 | use crate::{ 5 | event, 6 | scheme::SchemeId, 7 | syscall::{ 8 | data::TimeSpec, 9 | flag::{CLOCK_MONOTONIC, CLOCK_REALTIME, EVENT_READ}, 10 | }, 11 | time, 12 | }; 13 | 14 | #[derive(Debug)] 15 | struct Timeout { 16 | pub scheme_id: SchemeId, 17 | pub event_id: usize, 18 | pub clock: usize, 19 | pub time: u128, 20 | } 21 | 22 | type Registry = VecDeque; 23 | 24 | static REGISTRY: Once> = Once::new(); 25 | 26 | /// Initialize registry, called if needed 27 | fn init_registry() -> Mutex { 28 | Mutex::new(Registry::new()) 29 | } 30 | 31 | /// Get the global timeouts list 32 | fn registry() -> MutexGuard<'static, Registry> { 33 | REGISTRY.call_once(init_registry).lock() 34 | } 35 | 36 | pub fn register(scheme_id: SchemeId, event_id: usize, clock: usize, time: TimeSpec) { 37 | let mut registry = registry(); 38 | registry.push_back(Timeout { 39 | scheme_id, 40 | event_id, 41 | clock, 42 | time: (time.tv_sec as u128 * time::NANOS_PER_SEC) + (time.tv_nsec as u128), 43 | }); 44 | } 45 | 46 | pub fn trigger() { 47 | let mut registry = registry(); 48 | 49 | let mono = time::monotonic(); 50 | let real = time::realtime(); 51 | 52 | let mut i = 0; 53 | while i < registry.len() { 54 | let trigger = match registry[i].clock { 55 | CLOCK_MONOTONIC => { 56 | let time = registry[i].time; 57 | mono >= time 58 | } 59 | CLOCK_REALTIME => { 60 | let time = registry[i].time; 61 | real >= time 62 | } 63 | clock => { 64 | println!("timeout::trigger: unknown clock {}", clock); 65 | true 66 | } 67 | }; 68 | 69 | if trigger { 70 | let timeout = registry.remove(i).unwrap(); 71 | event::trigger(timeout.scheme_id, timeout.event_id, EVENT_READ); 72 | } else { 73 | i += 1; 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/devices/graphical_debug/debug.rs: -------------------------------------------------------------------------------- 1 | use core::{cmp, ptr}; 2 | 3 | use super::Display; 4 | 5 | static FONT: &[u8] = include_bytes!("../../../res/unifont.font"); 6 | 7 | pub struct DebugDisplay { 8 | pub(super) display: Display, 9 | x: usize, 10 | y: usize, 11 | w: usize, 12 | h: usize, 13 | } 14 | 15 | impl DebugDisplay { 16 | pub(super) fn new(display: Display) -> DebugDisplay { 17 | let w = display.width / 8; 18 | let h = display.height / 16; 19 | DebugDisplay { 20 | display, 21 | x: 0, 22 | y: 0, 23 | w, 24 | h, 25 | } 26 | } 27 | 28 | fn write_char(&mut self, c: char) { 29 | if self.x >= self.w || c == '\n' { 30 | self.x = 0; 31 | self.y += 1; 32 | } 33 | 34 | if self.y >= self.h { 35 | let new_y = self.h - 1; 36 | let d_y = self.y - new_y; 37 | 38 | self.scroll(d_y * 16); 39 | 40 | unsafe { 41 | self.display 42 | .sync(0, 0, self.display.width, self.display.height); 43 | } 44 | 45 | self.y = new_y; 46 | } 47 | 48 | if c != '\n' { 49 | self.char(self.x * 8, self.y * 16, c, 0xFFFFFF); 50 | 51 | unsafe { 52 | self.display.sync(self.x * 8, self.y * 16, 8, 16); 53 | } 54 | 55 | self.x += 1; 56 | } 57 | } 58 | 59 | pub fn write(&mut self, buf: &[u8]) { 60 | for &b in buf { 61 | self.write_char(b as char); 62 | } 63 | } 64 | 65 | /// Draw a character 66 | fn char(&mut self, x: usize, y: usize, character: char, color: u32) { 67 | if x + 8 <= self.display.width && y + 16 <= self.display.height { 68 | let mut dst = unsafe { self.display.data_mut().add(y * self.display.stride + x) }; 69 | 70 | let font_i = 16 * (character as usize); 71 | if font_i + 16 <= FONT.len() { 72 | for row in 0..16 { 73 | let row_data = FONT[font_i + row]; 74 | for col in 0..8 { 75 | if (row_data >> (7 - col)) & 1 == 1 { 76 | unsafe { 77 | *dst.add(col) = color; 78 | } 79 | } 80 | } 81 | dst = unsafe { dst.add(self.display.stride) }; 82 | } 83 | } 84 | } 85 | } 86 | 87 | /// Scroll the screen 88 | fn scroll(&mut self, lines: usize) { 89 | let offset = cmp::min(self.display.height, lines) * self.display.stride; 90 | let size = (self.display.stride * self.display.height) - offset; 91 | unsafe { 92 | let ptr = self.display.data_mut(); 93 | ptr::copy(ptr.add(offset), ptr, size); 94 | ptr::write_bytes(ptr.add(size), 0, offset); 95 | } 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/devices/graphical_debug/display.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use core::{ptr, slice}; 3 | 4 | /// A display 5 | pub(super) struct Display { 6 | pub(super) width: usize, 7 | pub(super) height: usize, 8 | pub(super) stride: usize, 9 | onscreen_ptr: *mut u32, 10 | offscreen: Option>, 11 | } 12 | 13 | unsafe impl Send for Display {} 14 | 15 | impl Display { 16 | pub(super) fn new( 17 | width: usize, 18 | height: usize, 19 | stride: usize, 20 | onscreen_ptr: *mut u32, 21 | ) -> Display { 22 | unsafe { 23 | ptr::write_bytes(onscreen_ptr, 0, stride * height); 24 | } 25 | Display { 26 | width, 27 | height, 28 | stride, 29 | onscreen_ptr, 30 | offscreen: None, 31 | } 32 | } 33 | 34 | pub(super) fn heap_init(&mut self) { 35 | let onscreen = 36 | unsafe { slice::from_raw_parts(self.onscreen_ptr, self.stride * self.height) }; 37 | self.offscreen = Some(onscreen.to_vec().into_boxed_slice()); 38 | } 39 | 40 | pub(super) fn data_mut(&mut self) -> *mut u32 { 41 | match &mut self.offscreen { 42 | Some(offscreen) => offscreen.as_mut_ptr(), 43 | None => self.onscreen_ptr, 44 | } 45 | } 46 | 47 | /// Sync from offscreen to onscreen, unsafe because it trusts provided x, y, w, h 48 | pub(super) unsafe fn sync(&mut self, x: usize, y: usize, w: usize, mut h: usize) { 49 | if let Some(offscreen) = &self.offscreen { 50 | let mut offset = y * self.stride + x; 51 | while h > 0 { 52 | ptr::copy( 53 | offscreen.as_ptr().add(offset), 54 | self.onscreen_ptr.add(offset), 55 | w, 56 | ); 57 | offset += self.stride; 58 | h -= 1; 59 | } 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/devices/graphical_debug/mod.rs: -------------------------------------------------------------------------------- 1 | use core::str; 2 | use spin::Mutex; 3 | 4 | pub use self::debug::DebugDisplay; 5 | use self::display::Display; 6 | 7 | pub mod debug; 8 | pub mod display; 9 | 10 | pub static DEBUG_DISPLAY: Mutex> = Mutex::new(None); 11 | 12 | pub static FRAMEBUFFER: Mutex<(usize, usize, usize)> = Mutex::new((0, 0, 0)); 13 | 14 | #[allow(unused)] 15 | pub fn init(env: &[u8]) { 16 | println!("Starting graphical debug"); 17 | 18 | let mut phys = 0; 19 | let mut virt = 0; 20 | let mut width = 0; 21 | let mut height = 0; 22 | let mut stride = 0; 23 | 24 | //TODO: should errors be reported? 25 | for line in str::from_utf8(env).unwrap_or("").lines() { 26 | let mut parts = line.splitn(2, '='); 27 | let name = parts.next().unwrap_or(""); 28 | let value = parts.next().unwrap_or(""); 29 | 30 | if name == "FRAMEBUFFER_ADDR" { 31 | phys = usize::from_str_radix(value, 16).unwrap_or(0); 32 | } 33 | 34 | if name == "FRAMEBUFFER_VIRT" { 35 | virt = usize::from_str_radix(value, 16).unwrap_or(0); 36 | } 37 | 38 | if name == "FRAMEBUFFER_WIDTH" { 39 | width = usize::from_str_radix(value, 16).unwrap_or(0); 40 | } 41 | 42 | if name == "FRAMEBUFFER_HEIGHT" { 43 | height = usize::from_str_radix(value, 16).unwrap_or(0); 44 | } 45 | 46 | if name == "FRAMEBUFFER_STRIDE" { 47 | stride = usize::from_str_radix(value, 16).unwrap_or(0); 48 | } 49 | } 50 | 51 | *FRAMEBUFFER.lock() = (phys, virt, stride * height * 4); 52 | 53 | if phys == 0 || virt == 0 || width == 0 || height == 0 || stride == 0 { 54 | println!("Framebuffer not found"); 55 | return; 56 | } 57 | 58 | println!( 59 | "Framebuffer {}x{} stride {} at {:X} mapped to {:X}", 60 | width, height, stride, phys, virt 61 | ); 62 | 63 | { 64 | let display = Display::new(width, height, stride, virt as *mut u32); 65 | let debug_display = DebugDisplay::new(display); 66 | *DEBUG_DISPLAY.lock() = Some(debug_display); 67 | } 68 | } 69 | 70 | #[allow(unused)] 71 | pub fn init_heap() { 72 | if let Some(debug_display) = &mut *DEBUG_DISPLAY.lock() { 73 | debug_display.display.heap_init(); 74 | } 75 | } 76 | 77 | #[allow(unused)] 78 | pub fn fini() { 79 | DEBUG_DISPLAY.lock().take(); 80 | 81 | println!("Finished graphical debug"); 82 | } 83 | -------------------------------------------------------------------------------- /src/devices/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "graphical_debug")] 2 | pub mod graphical_debug; 3 | pub mod uart_16550; 4 | -------------------------------------------------------------------------------- /src/log.rs: -------------------------------------------------------------------------------- 1 | use alloc::collections::VecDeque; 2 | use spin::{Mutex, Once}; 3 | 4 | pub static LOG: Mutex> = Mutex::new(None); 5 | 6 | pub fn init() { 7 | *LOG.lock() = Some(Log::new(1024 * 1024)); 8 | } 9 | 10 | pub struct Log { 11 | data: VecDeque, 12 | size: usize, 13 | } 14 | 15 | impl Log { 16 | pub fn new(size: usize) -> Log { 17 | Log { 18 | data: VecDeque::with_capacity(size), 19 | size, 20 | } 21 | } 22 | 23 | pub fn read(&self) -> (&[u8], &[u8]) { 24 | self.data.as_slices() 25 | } 26 | 27 | pub fn write(&mut self, buf: &[u8]) { 28 | for &b in buf { 29 | while self.data.len() + 1 >= self.size { 30 | self.data.pop_front(); 31 | } 32 | self.data.push_back(b); 33 | } 34 | } 35 | } 36 | 37 | struct RedoxLogger { 38 | log_func: fn(&log::Record), 39 | } 40 | 41 | impl ::log::Log for RedoxLogger { 42 | fn enabled(&self, _: &log::Metadata<'_>) -> bool { 43 | false 44 | } 45 | fn log(&self, record: &log::Record<'_>) { 46 | (self.log_func)(record) 47 | } 48 | fn flush(&self) {} 49 | } 50 | 51 | pub fn init_logger(log_func: fn(&log::Record)) { 52 | let mut called = false; 53 | let logger = LOGGER.call_once(|| { 54 | ::log::set_max_level(::log::LevelFilter::Info); 55 | called = true; 56 | 57 | RedoxLogger { log_func } 58 | }); 59 | if !called { 60 | log::error!("Tried to reinitialize the logger, which is not possible. Ignoring.") 61 | } 62 | match ::log::set_logger(logger) { 63 | Ok(_) => log::info!("Logger initialized."), 64 | Err(e) => println!("Logger setup failed! error: {}", e), 65 | } 66 | } 67 | 68 | static LOGGER: Once = Once::new(); 69 | -------------------------------------------------------------------------------- /src/memory/kernel_mapper.rs: -------------------------------------------------------------------------------- 1 | use crate::cpu_set::LogicalCpuId; 2 | use core::sync::{ 3 | atomic, 4 | atomic::{AtomicUsize, Ordering}, 5 | }; 6 | use rmm::{PageMapper, TableKind}; 7 | 8 | const NO_PROCESSOR: usize = !0; 9 | static LOCK_OWNER: AtomicUsize = AtomicUsize::new(NO_PROCESSOR); 10 | static LOCK_COUNT: AtomicUsize = AtomicUsize::new(0); 11 | 12 | // TODO: Support, perhaps via const generics, embedding address checking in PageMapper, thereby 13 | // statically enforcing that the kernel mapper can only map things in the kernel half, and vice 14 | // versa. 15 | /// A guard to the global lock protecting the upper 128 TiB of kernel address space. 16 | /// 17 | /// NOTE: Use this with great care! Since heap allocations may also require this lock when the heap 18 | /// needs to be expended, it must not be held while memory allocations are done! 19 | // TODO: Make the lock finer-grained so that e.g. the heap part can be independent from e.g. 20 | // PHYS_PML4? 21 | pub struct KernelMapper { 22 | mapper: crate::paging::PageMapper, 23 | ro: bool, 24 | } 25 | impl KernelMapper { 26 | fn lock_inner(current_processor: usize) -> bool { 27 | loop { 28 | match LOCK_OWNER.compare_exchange_weak( 29 | NO_PROCESSOR, 30 | current_processor, 31 | Ordering::Acquire, 32 | Ordering::Relaxed, 33 | ) { 34 | Ok(_) => break, 35 | // already owned by this hardware thread 36 | Err(id) if id == current_processor => break, 37 | // either CAS failed, or some other hardware thread holds the lock 38 | Err(_) => core::hint::spin_loop(), 39 | } 40 | } 41 | 42 | let prev_count = LOCK_COUNT.fetch_add(1, Ordering::Relaxed); 43 | atomic::compiler_fence(Ordering::Acquire); 44 | 45 | prev_count > 0 46 | } 47 | pub unsafe fn lock_for_manual_mapper( 48 | current_processor: LogicalCpuId, 49 | mapper: crate::paging::PageMapper, 50 | ) -> Self { 51 | let ro = Self::lock_inner(current_processor.get() as usize); 52 | Self { mapper, ro } 53 | } 54 | pub fn lock_manually(current_processor: LogicalCpuId) -> Self { 55 | unsafe { 56 | Self::lock_for_manual_mapper( 57 | current_processor, 58 | PageMapper::current(TableKind::Kernel, crate::memory::TheFrameAllocator), 59 | ) 60 | } 61 | } 62 | pub fn lock() -> Self { 63 | Self::lock_manually(crate::cpu_id()) 64 | } 65 | pub fn get_mut(&mut self) -> Option<&mut crate::paging::PageMapper> { 66 | if self.ro { 67 | None 68 | } else { 69 | Some(&mut self.mapper) 70 | } 71 | } 72 | } 73 | impl core::ops::Deref for KernelMapper { 74 | type Target = crate::paging::PageMapper; 75 | 76 | fn deref(&self) -> &Self::Target { 77 | &self.mapper 78 | } 79 | } 80 | impl Drop for KernelMapper { 81 | fn drop(&mut self) { 82 | if LOCK_COUNT.fetch_sub(1, Ordering::Relaxed) == 1 { 83 | LOCK_OWNER.store(NO_PROCESSOR, Ordering::Release); 84 | } 85 | atomic::compiler_fence(Ordering::Release); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/scheme/event.rs: -------------------------------------------------------------------------------- 1 | use alloc::sync::Arc; 2 | use core::mem; 3 | use syscall::O_NONBLOCK; 4 | 5 | use crate::{ 6 | context::file::InternalFlags, 7 | event::{next_queue_id, queues, queues_mut, EventQueue, EventQueueId}, 8 | syscall::{ 9 | data::Event, 10 | error::*, 11 | usercopy::{UserSliceRo, UserSliceWo}, 12 | }, 13 | }; 14 | 15 | use super::{CallerCtx, KernelScheme, OpenResult}; 16 | 17 | pub struct EventScheme; 18 | 19 | impl KernelScheme for EventScheme { 20 | fn kopen(&self, _path: &str, _flags: usize, _ctx: CallerCtx) -> Result { 21 | let id = next_queue_id(); 22 | queues_mut().insert(id, Arc::new(EventQueue::new(id))); 23 | 24 | Ok(OpenResult::SchemeLocal(id.get(), InternalFlags::empty())) 25 | } 26 | 27 | fn close(&self, id: usize) -> Result<()> { 28 | let id = EventQueueId::from(id); 29 | queues_mut() 30 | .remove(&id) 31 | .ok_or(Error::new(EBADF)) 32 | .and(Ok(())) 33 | } 34 | fn kread(&self, id: usize, buf: UserSliceWo, flags: u32, _stored_flags: u32) -> Result { 35 | let id = EventQueueId::from(id); 36 | 37 | let queue = { 38 | let handles = queues(); 39 | let handle = handles.get(&id).ok_or(Error::new(EBADF))?; 40 | handle.clone() 41 | }; 42 | 43 | queue.read(buf, flags & O_NONBLOCK as u32 == 0) 44 | } 45 | 46 | fn kwrite( 47 | &self, 48 | id: usize, 49 | buf: UserSliceRo, 50 | _flags: u32, 51 | _stored_flags: u32, 52 | ) -> Result { 53 | let id = EventQueueId::from(id); 54 | 55 | let queue = { 56 | let handles = queues(); 57 | let handle = handles.get(&id).ok_or(Error::new(EBADF))?; 58 | handle.clone() 59 | }; 60 | let mut events_written = 0; 61 | 62 | for chunk in buf.in_exact_chunks(mem::size_of::()) { 63 | let event = unsafe { chunk.read_exact::()? }; 64 | if queue.write(&[event])? == 0 { 65 | break; 66 | } 67 | events_written += 1; 68 | } 69 | 70 | Ok(events_written * mem::size_of::()) 71 | } 72 | 73 | fn kfpath(&self, _id: usize, buf: UserSliceWo) -> Result { 74 | buf.copy_common_bytes_from_slice(b"event:") 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/scheme/serio.rs: -------------------------------------------------------------------------------- 1 | //! PS/2 unfortunately requires a kernel driver to prevent race conditions due 2 | //! to how status is utilized 3 | use core::{ 4 | str, 5 | sync::atomic::{AtomicUsize, Ordering}, 6 | }; 7 | 8 | use spin::RwLock; 9 | 10 | use crate::{ 11 | event, 12 | scheme::*, 13 | sync::WaitQueue, 14 | syscall::{ 15 | flag::{EventFlags, EVENT_READ, O_NONBLOCK}, 16 | usercopy::UserSliceWo, 17 | }, 18 | }; 19 | 20 | static NEXT_ID: AtomicUsize = AtomicUsize::new(0); 21 | 22 | /// Input queue 23 | static INPUT: [WaitQueue; 2] = [WaitQueue::new(), WaitQueue::new()]; 24 | 25 | #[derive(Clone, Copy)] 26 | struct Handle { 27 | index: usize, 28 | } 29 | 30 | // Using BTreeMap as hashbrown doesn't have a const constructor. 31 | static HANDLES: RwLock> = RwLock::new(BTreeMap::new()); 32 | 33 | /// Add to the input queue 34 | pub fn serio_input(index: usize, data: u8) { 35 | #[cfg(feature = "profiling")] 36 | crate::profiling::serio_command(index, data); 37 | 38 | INPUT[index].send(data); 39 | 40 | for (id, _handle) in HANDLES.read().iter() { 41 | event::trigger(GlobalSchemes::Serio.scheme_id(), *id, EVENT_READ); 42 | } 43 | } 44 | 45 | pub struct SerioScheme; 46 | 47 | impl KernelScheme for SerioScheme { 48 | fn kopen(&self, path: &str, _flags: usize, ctx: CallerCtx) -> Result { 49 | if ctx.uid != 0 { 50 | return Err(Error::new(EPERM)); 51 | } 52 | 53 | let index = path.parse::().or(Err(Error::new(ENOENT)))?; 54 | if index >= INPUT.len() { 55 | return Err(Error::new(ENOENT)); 56 | } 57 | 58 | let id = NEXT_ID.fetch_add(1, Ordering::Relaxed); 59 | HANDLES.write().insert(id, Handle { index }); 60 | 61 | Ok(OpenResult::SchemeLocal(id, InternalFlags::empty())) 62 | } 63 | 64 | fn fevent(&self, id: usize, _flags: EventFlags) -> Result { 65 | let _handle = { 66 | let handles = HANDLES.read(); 67 | *handles.get(&id).ok_or(Error::new(EBADF))? 68 | }; 69 | 70 | Ok(EventFlags::empty()) 71 | } 72 | 73 | fn fsync(&self, id: usize) -> Result<()> { 74 | let _handle = { 75 | let handles = HANDLES.read(); 76 | *handles.get(&id).ok_or(Error::new(EBADF))? 77 | }; 78 | 79 | Ok(()) 80 | } 81 | 82 | /// Close the file `number` 83 | fn close(&self, id: usize) -> Result<()> { 84 | let _handle = { 85 | let mut handles = HANDLES.write(); 86 | handles.remove(&id).ok_or(Error::new(EBADF))? 87 | }; 88 | 89 | Ok(()) 90 | } 91 | fn kread(&self, id: usize, buf: UserSliceWo, flags: u32, _stored_flags: u32) -> Result { 92 | let handle = { 93 | let handles = HANDLES.read(); 94 | *handles.get(&id).ok_or(Error::new(EBADF))? 95 | }; 96 | 97 | INPUT[handle.index].receive_into_user( 98 | buf, 99 | flags & O_NONBLOCK as u32 == 0, 100 | "SerioScheme::read", 101 | ) 102 | } 103 | 104 | fn kfpath(&self, id: usize, buf: UserSliceWo) -> Result { 105 | let handle = { 106 | let handles = HANDLES.read(); 107 | *handles.get(&id).ok_or(Error::new(EBADF))? 108 | }; 109 | let path = format!("serio:{}", handle.index).into_bytes(); 110 | 111 | buf.copy_common_bytes_from_slice(&path) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/scheme/sys/block.rs: -------------------------------------------------------------------------------- 1 | use alloc::{string::String, vec::Vec}; 2 | use core::fmt::Write; 3 | 4 | use crate::{context, syscall::error::Result}; 5 | 6 | pub fn resource() -> Result> { 7 | let mut string = String::new(); 8 | 9 | { 10 | let mut rows = Vec::new(); 11 | { 12 | let contexts = context::contexts(); 13 | for context_lock in contexts.iter().filter_map(|r| r.upgrade()) { 14 | let context = context_lock.read(); 15 | rows.push((context.pid, context.name.clone(), context.status_reason)); 16 | } 17 | } 18 | 19 | for row in rows.iter() { 20 | let id: usize = row.0.into(); 21 | let name = &row.1; 22 | 23 | let _ = writeln!(string, "{}: {}", id, name); 24 | 25 | if !row.2.is_empty() { 26 | let _ = writeln!(string, " {}", row.2); 27 | } 28 | } 29 | } 30 | 31 | Ok(string.into_bytes()) 32 | } 33 | -------------------------------------------------------------------------------- /src/scheme/sys/cpu.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::{ 4 | device::cpu::cpu_info, 5 | syscall::error::{Error, Result, EIO}, 6 | }; 7 | 8 | pub fn resource() -> Result> { 9 | let mut string = format!("CPUs: {}\n", crate::cpu_count()); 10 | 11 | match cpu_info(&mut string) { 12 | Ok(()) => Ok(string.into_bytes()), 13 | Err(_) => Err(Error::new(EIO)), 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/scheme/sys/exe.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::{context, syscall::error::Result}; 4 | 5 | pub fn resource() -> Result> { 6 | Ok(context::current().read().name.as_bytes().to_vec()) 7 | } 8 | -------------------------------------------------------------------------------- /src/scheme/sys/fdstat.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | context, 3 | context::{file::FileDescription, memory::AddrSpaceWrapper}, 4 | scheme, 5 | syscall::error::Result, 6 | }; 7 | use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; 8 | use core::{fmt::Write, hash::Hash}; 9 | use hashbrown::HashMap; 10 | use spin::RwLock; 11 | 12 | pub fn resource() -> Result> { 13 | #[derive(Debug)] 14 | struct Ref(Arc); 15 | impl Hash for Ref { 16 | fn hash(&self, state: &mut H) { 17 | state.write_usize(Arc::as_ptr(&self.0) as usize); 18 | } 19 | } 20 | impl PartialEq for Ref { 21 | fn eq(&self, other: &Self) -> bool { 22 | Arc::as_ptr(&self.0) == Arc::as_ptr(&other.0) 23 | } 24 | } 25 | impl Eq for Ref {} 26 | #[derive(Default)] 27 | struct Descr { 28 | owners: HashMap, String>, 29 | scheme: Box, 30 | } 31 | let mut map = HashMap::>, Descr>::new(); 32 | 33 | let mut report = String::new(); 34 | 'contexts: for context in context::contexts().iter().filter_map(|c| c.upgrade()) { 35 | let context = context.read(); 36 | let files = context.files.read(); 37 | writeln!(report, "'{}' {{", context.name).unwrap(); 38 | 39 | for file in files.iter().filter_map(|f| f.clone()) { 40 | writeln!( 41 | report, 42 | "\tS{}W{}", 43 | Arc::strong_count(&file.description), 44 | Arc::weak_count(&file.description) 45 | ) 46 | .unwrap(); 47 | let fr = Ref(file.description.clone()); 48 | let Some(a) = context.addr_space.clone() else { 49 | continue 'contexts; 50 | }; 51 | let descr = map.entry(fr).or_default(); 52 | 53 | let scheme_id = file.description.read().scheme; 54 | let scheme = scheme::schemes() 55 | .names 56 | .iter() 57 | .flat_map(|(_, v)| v.iter()) 58 | .find_map(|(name, id)| { 59 | if *id == scheme_id { 60 | Some(name.clone()) 61 | } else { 62 | None 63 | } 64 | }); 65 | descr 66 | .owners 67 | .entry(Ref(a)) 68 | .or_insert(context.name.clone().into_owned()); 69 | descr.scheme = scheme.unwrap_or(Box::from("[unknown]")); 70 | } 71 | writeln!(report, "}}").unwrap(); 72 | } 73 | writeln!(report, "==========").unwrap(); 74 | let mut singletons = 0; 75 | for (fr, ma) in map.iter() { 76 | if ma.owners.len() == 1 { 77 | singletons += 1; 78 | } 79 | writeln!( 80 | report, 81 | "{:p}: {:?}; {}", 82 | fr.0, 83 | ma.owners.values().cloned().collect::>(), 84 | ma.scheme 85 | ) 86 | .unwrap(); 87 | } 88 | writeln!(report, "==========").unwrap(); 89 | writeln!( 90 | report, 91 | "{} singletons out of {} total", 92 | singletons, 93 | map.len() 94 | ) 95 | .unwrap(); 96 | 97 | Ok(report.into()) 98 | } 99 | -------------------------------------------------------------------------------- /src/scheme/sys/iostat.rs: -------------------------------------------------------------------------------- 1 | use crate::{context, scheme, syscall::error::Result}; 2 | use alloc::{string::String, vec::Vec}; 3 | use core::fmt::Write; 4 | 5 | pub fn resource() -> Result> { 6 | let mut string = String::new(); 7 | 8 | { 9 | let mut rows = Vec::new(); 10 | { 11 | let contexts = context::contexts(); 12 | for context_ref in contexts.iter().filter_map(|r| r.upgrade()) { 13 | let context = context_ref.read(); 14 | rows.push(( 15 | context.debug_id, 16 | context.name.clone(), 17 | context.files.read().clone(), 18 | )); 19 | } 20 | } 21 | 22 | for (id, name, fs) in rows.iter() { 23 | let _ = writeln!(string, "{}: {}", id, name); 24 | 25 | for (fd, f) in fs.iter().enumerate() { 26 | let file = match *f { 27 | None => continue, 28 | Some(ref file) => file.clone(), 29 | }; 30 | 31 | let description = file.description.read(); 32 | 33 | let _scheme = { 34 | let schemes = scheme::schemes(); 35 | match schemes.get(description.scheme) { 36 | Some(scheme) => scheme.clone(), 37 | None => { 38 | let _ = writeln!( 39 | string, 40 | " {:>4}: {:>8} {:>8} {:>08X}: no scheme", 41 | fd, 42 | description.scheme.get(), 43 | description.number, 44 | description.flags 45 | ); 46 | continue; 47 | } 48 | } 49 | }; 50 | 51 | /* 52 | let mut fpath = [0; 4096]; 53 | match scheme.fpath(description.number, &mut fpath) { 54 | Ok(path_len) => { 55 | let fname = str::from_utf8(&fpath[..path_len]).unwrap_or("?"); 56 | let _ = writeln!(string, "{:>6}: {:>8} {:>8} {:>08X}: {}", fd, description.scheme.get(), description.number, description.flags, fname); 57 | }, 58 | Err(err) => { 59 | let _ = writeln!(string, "{:>6}: {:>8} {:>8} {:>08X}: {}", fd, description.scheme.get(), description.number, description.flags, err); 60 | } 61 | } 62 | */ 63 | } 64 | } 65 | } 66 | 67 | Ok(string.into_bytes()) 68 | } 69 | -------------------------------------------------------------------------------- /src/scheme/sys/irq.rs: -------------------------------------------------------------------------------- 1 | use alloc::{string::String, vec::Vec}; 2 | use core::fmt::Write; 3 | 4 | use crate::syscall::error::Result; 5 | 6 | pub fn resource() -> Result> { 7 | let mut string = String::new(); 8 | 9 | { 10 | let counts = crate::scheme::irq::COUNTS.lock(); 11 | for (i, count) in counts.iter().enumerate() { 12 | let _ = writeln!(string, "{}: {}", i, count); 13 | } 14 | } 15 | 16 | Ok(string.into_bytes()) 17 | } 18 | -------------------------------------------------------------------------------- /src/scheme/sys/log.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::{log::LOG, syscall::error::Result}; 4 | 5 | pub fn resource() -> Result> { 6 | let mut vec = Vec::new(); 7 | 8 | if let Some(ref log) = *LOG.lock() { 9 | let slices = log.read(); 10 | vec.reserve_exact(slices.0.len() + slices.1.len()); 11 | vec.extend_from_slice(slices.0); 12 | vec.extend_from_slice(slices.1); 13 | } 14 | 15 | Ok(vec) 16 | } 17 | -------------------------------------------------------------------------------- /src/scheme/sys/scheme.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::{context, scheme, syscall::error::Result}; 4 | 5 | pub fn resource() -> Result> { 6 | let scheme_ns = context::current().read().ens; 7 | 8 | let mut data = Vec::new(); 9 | 10 | let schemes = scheme::schemes(); 11 | for (name, _scheme_id) in schemes.iter_name(scheme_ns) { 12 | data.extend_from_slice(name.as_bytes()); 13 | data.push(b'\n'); 14 | } 15 | 16 | Ok(data) 17 | } 18 | -------------------------------------------------------------------------------- /src/scheme/sys/scheme_num.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::{context, scheme, syscall::error::Result}; 4 | 5 | pub fn resource() -> Result> { 6 | let scheme_ns = context::current().read().ens; 7 | 8 | let mut data = Vec::new(); 9 | 10 | let schemes = scheme::schemes(); 11 | for (name, &scheme_id) in schemes.iter_name(scheme_ns) { 12 | data.extend_from_slice(format!("{:>4}: ", scheme_id.get()).as_bytes()); 13 | data.extend_from_slice(name.as_bytes()); 14 | data.push(b'\n'); 15 | } 16 | 17 | Ok(data) 18 | } 19 | -------------------------------------------------------------------------------- /src/scheme/sys/stat.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | context::{contexts, ContextRef, Status}, 3 | cpu_stats::{get_context_switch_count, get_contexts_count, irq_counts}, 4 | percpu::get_all_stats, 5 | syscall::error::Result, 6 | time::START, 7 | }; 8 | use alloc::{string::String, vec::Vec}; 9 | 10 | /// Get the sys:stat data as displayed to the user. 11 | pub fn resource() -> Result> { 12 | let start_time_sec = *START.lock() / 1_000_000_000; 13 | 14 | let (contexts_running, contexts_blocked) = get_contexts_stats(); 15 | let res = format!( 16 | "{}{}\n\ 17 | boot_time: {start_time_sec}\n\ 18 | context_switches: {}\n\ 19 | contexts_created: {}\n\ 20 | contexts_running: {contexts_running}\n\ 21 | contexts_blocked: {contexts_blocked}", 22 | get_cpu_stats(), 23 | get_irq_stats(), 24 | get_context_switch_count(), 25 | get_contexts_count(), 26 | ); 27 | 28 | Ok(res.into_bytes()) 29 | } 30 | 31 | /// Formats CPU stats. 32 | fn get_cpu_stats() -> String { 33 | let mut cpu_data = String::new(); 34 | let stats = get_all_stats(); 35 | 36 | let mut total_user = 0; 37 | let mut total_nice = 0; 38 | let mut total_kernel = 0; 39 | let mut total_idle = 0; 40 | let mut total_irq = 0; 41 | for (id, stat) in stats { 42 | total_user += stat.user; 43 | total_nice += stat.nice; 44 | total_kernel += stat.kernel; 45 | total_idle += stat.idle; 46 | total_irq += stat.irq; 47 | cpu_data += &format!("{}\n", stat.to_string(id)); 48 | } 49 | format!( 50 | "cpu {total_user} {total_nice} {total_kernel} {total_idle} {total_irq}\n\ 51 | {cpu_data}" 52 | ) 53 | } 54 | 55 | /// Formats IRQ stats. 56 | fn get_irq_stats() -> String { 57 | let irq = irq_counts(); 58 | let mut irq_total = 0; 59 | let per_irq = irq 60 | .iter() 61 | .map(|c| { 62 | irq_total += *c; 63 | format!("{c}") 64 | }) 65 | .collect::>() 66 | .join(" "); 67 | format!("IRQs {irq_total} {per_irq}") 68 | } 69 | 70 | /// Format contexts stats. 71 | fn get_contexts_stats() -> (u64, u64) { 72 | let mut running = 0; 73 | let mut blocked = 0; 74 | 75 | let statuses = contexts() 76 | .iter() 77 | .filter_map(ContextRef::upgrade) 78 | .map(|context| context.read_arc().status.clone()) 79 | .collect::>(); 80 | 81 | for status in statuses { 82 | if matches!(status, Status::Runnable) { 83 | running += 1; 84 | } else if !matches!(status, Status::Dead) { 85 | blocked += 1; 86 | } 87 | } 88 | (running, blocked) 89 | } 90 | -------------------------------------------------------------------------------- /src/scheme/sys/syscall.rs: -------------------------------------------------------------------------------- 1 | use alloc::{string::String, vec::Vec}; 2 | use core::fmt::Write; 3 | 4 | use crate::{context, syscall, syscall::error::Result}; 5 | 6 | pub fn resource() -> Result> { 7 | let mut string = String::new(); 8 | 9 | { 10 | let mut rows = Vec::new(); 11 | { 12 | let contexts = context::contexts(); 13 | for context_ref in contexts.iter().filter_map(|r| r.upgrade()) { 14 | let context = context_ref.read(); 15 | rows.push(( 16 | context.debug_id, 17 | context.name.clone(), 18 | context.current_syscall(), 19 | )); 20 | } 21 | } 22 | 23 | for &(id, ref name, sc) in rows.iter() { 24 | let _ = writeln!(string, "{}: {}", id, name); 25 | 26 | if let Some([a, b, c, d, e, f]) = sc { 27 | let _ = writeln!( 28 | string, 29 | " {}", 30 | syscall::debug::format_call(a, b, c, d, e, f) 31 | ); 32 | } 33 | } 34 | } 35 | 36 | Ok(string.into_bytes()) 37 | } 38 | -------------------------------------------------------------------------------- /src/scheme/sys/uname.rs: -------------------------------------------------------------------------------- 1 | use crate::syscall::error::Result; 2 | use alloc::vec::Vec; 3 | 4 | pub fn resource() -> Result> { 5 | Ok(format!( 6 | "Redox\n\n{}\n\n{}\n", 7 | env!("CARGO_PKG_VERSION"), 8 | env!("TARGET").split('-').next().unwrap() 9 | ) 10 | .into_bytes()) 11 | } 12 | -------------------------------------------------------------------------------- /src/scheme/time.rs: -------------------------------------------------------------------------------- 1 | use alloc::collections::BTreeMap; 2 | use core::{ 3 | mem, str, 4 | sync::atomic::{AtomicUsize, Ordering}, 5 | }; 6 | use spin::RwLock; 7 | 8 | use crate::{ 9 | context::{file::InternalFlags, timeout}, 10 | syscall::{ 11 | data::TimeSpec, 12 | error::*, 13 | flag::{EventFlags, CLOCK_MONOTONIC, CLOCK_REALTIME}, 14 | usercopy::{UserSliceRo, UserSliceWo}, 15 | }, 16 | time, 17 | }; 18 | 19 | use super::{CallerCtx, GlobalSchemes, KernelScheme, OpenResult}; 20 | 21 | static NEXT_ID: AtomicUsize = AtomicUsize::new(1); 22 | // Using BTreeMap as hashbrown doesn't have a const constructor. 23 | static HANDLES: RwLock> = RwLock::new(BTreeMap::new()); 24 | 25 | pub struct TimeScheme; 26 | 27 | impl KernelScheme for TimeScheme { 28 | fn kopen(&self, path: &str, _flags: usize, _ctx: CallerCtx) -> Result { 29 | let clock = path.parse::().map_err(|_| Error::new(ENOENT))?; 30 | 31 | match clock { 32 | CLOCK_REALTIME => (), 33 | CLOCK_MONOTONIC => (), 34 | _ => return Err(Error::new(ENOENT)), 35 | } 36 | 37 | let id = NEXT_ID.fetch_add(1, Ordering::Relaxed); 38 | HANDLES.write().insert(id, clock); 39 | 40 | Ok(OpenResult::SchemeLocal(id, InternalFlags::empty())) 41 | } 42 | 43 | fn fcntl(&self, _id: usize, _cmd: usize, _arg: usize) -> Result { 44 | Ok(0) 45 | } 46 | 47 | fn fevent(&self, id: usize, _flags: EventFlags) -> Result { 48 | HANDLES 49 | .read() 50 | .get(&id) 51 | .ok_or(Error::new(EBADF)) 52 | .and(Ok(EventFlags::empty())) 53 | } 54 | 55 | fn fsync(&self, id: usize) -> Result<()> { 56 | HANDLES.read().get(&id).ok_or(Error::new(EBADF))?; 57 | Ok(()) 58 | } 59 | 60 | fn close(&self, id: usize) -> Result<()> { 61 | HANDLES 62 | .write() 63 | .remove(&id) 64 | .ok_or(Error::new(EBADF)) 65 | .and(Ok(())) 66 | } 67 | fn kread(&self, id: usize, buf: UserSliceWo, _flags: u32, _stored_flags: u32) -> Result { 68 | let clock = *HANDLES.read().get(&id).ok_or(Error::new(EBADF))?; 69 | 70 | let mut bytes_read = 0; 71 | 72 | for current_chunk in buf.in_exact_chunks(mem::size_of::()) { 73 | let arch_time = match clock { 74 | CLOCK_REALTIME => time::realtime(), 75 | CLOCK_MONOTONIC => time::monotonic(), 76 | _ => return Err(Error::new(EINVAL)), 77 | }; 78 | let time = TimeSpec { 79 | tv_sec: (arch_time / time::NANOS_PER_SEC) as i64, 80 | tv_nsec: (arch_time % time::NANOS_PER_SEC) as i32, 81 | }; 82 | current_chunk.copy_exactly(&time)?; 83 | 84 | bytes_read += mem::size_of::(); 85 | } 86 | 87 | Ok(bytes_read) 88 | } 89 | 90 | fn kwrite( 91 | &self, 92 | id: usize, 93 | buf: UserSliceRo, 94 | _flags: u32, 95 | _stored_flags: u32, 96 | ) -> Result { 97 | let clock = *HANDLES.read().get(&id).ok_or(Error::new(EBADF))?; 98 | 99 | let mut bytes_written = 0; 100 | 101 | for current_chunk in buf.in_exact_chunks(mem::size_of::()) { 102 | let time = unsafe { current_chunk.read_exact::()? }; 103 | 104 | timeout::register(GlobalSchemes::Time.scheme_id(), id, clock, time); 105 | 106 | bytes_written += mem::size_of::(); 107 | } 108 | 109 | Ok(bytes_written) 110 | } 111 | fn kfpath(&self, id: usize, buf: UserSliceWo) -> Result { 112 | let clock = *HANDLES.read().get(&id).ok_or(Error::new(EBADF))?; 113 | 114 | let scheme_path = format!("time:{}", clock).into_bytes(); 115 | buf.copy_common_bytes_from_slice(&scheme_path) 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/startup/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod memory; 2 | -------------------------------------------------------------------------------- /src/sync/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::{wait_condition::WaitCondition, wait_queue::WaitQueue}; 2 | 3 | pub mod wait_condition; 4 | pub mod wait_queue; 5 | -------------------------------------------------------------------------------- /src/sync/wait_condition.rs: -------------------------------------------------------------------------------- 1 | use alloc::{ 2 | sync::{Arc, Weak}, 3 | vec::Vec, 4 | }; 5 | use spin::Mutex; 6 | use spinning_top::RwSpinlock; 7 | 8 | use crate::context::{self, Context}; 9 | 10 | #[derive(Debug)] 11 | pub struct WaitCondition { 12 | contexts: Mutex>>>, 13 | } 14 | 15 | impl WaitCondition { 16 | pub const fn new() -> WaitCondition { 17 | WaitCondition { 18 | contexts: Mutex::new(Vec::new()), 19 | } 20 | } 21 | 22 | // Notify all waiters 23 | pub fn notify(&self) -> usize { 24 | let mut contexts = self.contexts.lock(); 25 | let len = contexts.len(); 26 | while let Some(context_weak) = contexts.pop() { 27 | if let Some(context_ref) = context_weak.upgrade() { 28 | context_ref.write().unblock(); 29 | } 30 | } 31 | len 32 | } 33 | 34 | // Notify as though a signal woke the waiters 35 | pub unsafe fn notify_signal(&self) -> usize { 36 | let contexts = self.contexts.lock(); 37 | let len = contexts.len(); 38 | for context_weak in contexts.iter() { 39 | if let Some(context_ref) = context_weak.upgrade() { 40 | context_ref.write().unblock(); 41 | } 42 | } 43 | len 44 | } 45 | 46 | // Wait until notified. Unlocks guard when blocking is ready. Returns false if resumed by a signal or the notify_signal function 47 | pub fn wait(&self, guard: T, reason: &'static str) -> bool { 48 | let current_context_ref = context::current(); 49 | { 50 | { 51 | let mut context = current_context_ref.write(); 52 | if let Some((control, pctl, _)) = context.sigcontrol() 53 | && control.currently_pending_unblocked(pctl) != 0 54 | { 55 | return false; 56 | } 57 | context.block(reason); 58 | } 59 | 60 | self.contexts 61 | .lock() 62 | .push(Arc::downgrade(¤t_context_ref)); 63 | 64 | drop(guard); 65 | } 66 | 67 | context::switch(); 68 | 69 | let mut waited = true; 70 | 71 | { 72 | let mut contexts = self.contexts.lock(); 73 | 74 | // TODO: retain 75 | let mut i = 0; 76 | while i < contexts.len() { 77 | if Weak::as_ptr(&contexts[i]) == Arc::as_ptr(¤t_context_ref) { 78 | contexts.remove(i); 79 | waited = false; 80 | break; 81 | } else { 82 | i += 1; 83 | } 84 | } 85 | } 86 | 87 | waited 88 | } 89 | } 90 | 91 | impl Drop for WaitCondition { 92 | fn drop(&mut self) { 93 | unsafe { self.notify_signal() }; 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/sync/wait_queue.rs: -------------------------------------------------------------------------------- 1 | use alloc::collections::VecDeque; 2 | use spin::Mutex; 3 | use syscall::{EAGAIN, EINTR}; 4 | 5 | use crate::{ 6 | sync::WaitCondition, 7 | syscall::{ 8 | error::{Error, Result, EINVAL}, 9 | usercopy::UserSliceWo, 10 | }, 11 | }; 12 | 13 | #[derive(Debug)] 14 | pub struct WaitQueue { 15 | pub inner: Mutex>, 16 | pub condition: WaitCondition, 17 | } 18 | 19 | impl WaitQueue { 20 | pub const fn new() -> WaitQueue { 21 | WaitQueue { 22 | inner: Mutex::new(VecDeque::new()), 23 | condition: WaitCondition::new(), 24 | } 25 | } 26 | pub fn is_currently_empty(&self) -> bool { 27 | self.inner.lock().is_empty() 28 | } 29 | 30 | pub fn receive(&self, block: bool, reason: &'static str) -> Result { 31 | loop { 32 | let mut inner = self.inner.lock(); 33 | 34 | if let Some(t) = inner.pop_front() { 35 | return Ok(t); 36 | } else if block { 37 | if !self.condition.wait(inner, reason) { 38 | return Err(Error::new(EINTR)); 39 | } 40 | continue; 41 | } else { 42 | return Err(Error::new(EAGAIN)); 43 | } 44 | } 45 | } 46 | 47 | pub fn receive_into_user( 48 | &self, 49 | buf: UserSliceWo, 50 | block: bool, 51 | reason: &'static str, 52 | ) -> Result { 53 | loop { 54 | let mut inner = self.inner.lock(); 55 | 56 | if inner.is_empty() { 57 | if block { 58 | if !self.condition.wait(inner, reason) { 59 | return Err(Error::new(EINTR)); 60 | } 61 | continue; 62 | } else if buf.is_empty() { 63 | return Ok(0); 64 | } else if buf.len() < core::mem::size_of::() { 65 | return Err(Error::new(EINVAL)); 66 | } else { 67 | // TODO: EWOULDBLOCK? 68 | return Err(Error::new(EAGAIN)); 69 | } 70 | } 71 | 72 | let (s1, s2) = inner.as_slices(); 73 | let s1_bytes = unsafe { 74 | core::slice::from_raw_parts( 75 | s1.as_ptr().cast::(), 76 | s1.len() * core::mem::size_of::(), 77 | ) 78 | }; 79 | let s2_bytes = unsafe { 80 | core::slice::from_raw_parts( 81 | s2.as_ptr().cast::(), 82 | s2.len() * core::mem::size_of::(), 83 | ) 84 | }; 85 | 86 | let mut bytes_copied = buf.copy_common_bytes_from_slice(s1_bytes)?; 87 | 88 | if let Some(buf_for_s2) = buf.advance(s1_bytes.len()) { 89 | bytes_copied += buf_for_s2.copy_common_bytes_from_slice(s2_bytes)?; 90 | } 91 | 92 | let _ = inner.drain(..bytes_copied / core::mem::size_of::()); 93 | 94 | return Ok(bytes_copied); 95 | } 96 | } 97 | 98 | pub fn send(&self, value: T) -> usize { 99 | let len = { 100 | let mut inner = self.inner.lock(); 101 | inner.push_back(value); 102 | inner.len() 103 | }; 104 | self.condition.notify(); 105 | len 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/syscall/privilege.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::{context, scheme, syscall::error::*}; 4 | 5 | use super::{ 6 | copy_path_to_buf, 7 | usercopy::{UserSlice, UserSliceRo}, 8 | }; 9 | 10 | pub fn mkns(mut user_buf: UserSliceRo) -> Result { 11 | let (uid, from) = match context::current().read() { 12 | ref cx => (cx.euid, cx.ens), 13 | }; 14 | 15 | // TODO: Lift this restriction later? 16 | if uid != 0 { 17 | return Err(Error::new(EACCES)); 18 | } 19 | 20 | let mut names = Vec::with_capacity(user_buf.len() / core::mem::size_of::<[usize; 2]>()); 21 | 22 | while let Some((current_name_ptr_buf, next_part)) = 23 | user_buf.split_at(core::mem::size_of::<[usize; 2]>()) 24 | { 25 | let mut iter = current_name_ptr_buf.usizes(); 26 | let ptr = iter.next().ok_or(Error::new(EINVAL))??; 27 | let len = iter.next().ok_or(Error::new(EINVAL))??; 28 | 29 | let raw_path = UserSlice::new(ptr, len)?; 30 | 31 | // TODO: Max scheme size limit? 32 | let max_len = 256; 33 | 34 | names.push(copy_path_to_buf(raw_path, max_len)?.into_boxed_str()); 35 | 36 | user_buf = next_part; 37 | } 38 | 39 | let to = scheme::schemes_mut().make_ns(from, names)?; 40 | Ok(to.into()) 41 | } 42 | -------------------------------------------------------------------------------- /src/syscall/time.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | context, 3 | syscall::{ 4 | data::TimeSpec, 5 | error::*, 6 | flag::{CLOCK_MONOTONIC, CLOCK_REALTIME}, 7 | }, 8 | time, 9 | }; 10 | 11 | use super::usercopy::{UserSliceRo, UserSliceWo}; 12 | 13 | pub fn clock_gettime(clock: usize, buf: UserSliceWo) -> Result<()> { 14 | let arch_time = match clock { 15 | CLOCK_REALTIME => time::realtime(), 16 | CLOCK_MONOTONIC => time::monotonic(), 17 | _ => return Err(Error::new(EINVAL)), 18 | }; 19 | 20 | buf.copy_exactly(&TimeSpec { 21 | tv_sec: (arch_time / time::NANOS_PER_SEC) as i64, 22 | tv_nsec: (arch_time % time::NANOS_PER_SEC) as i32, 23 | }) 24 | } 25 | 26 | /// Nanosleep will sleep by switching the current context 27 | pub fn nanosleep(req_buf: UserSliceRo, rem_buf_opt: Option) -> Result<()> { 28 | let req = unsafe { req_buf.read_exact::()? }; 29 | 30 | let start = time::monotonic(); 31 | let end = start + (req.tv_sec as u128 * time::NANOS_PER_SEC) + (req.tv_nsec as u128); 32 | 33 | let current_context = context::current(); 34 | { 35 | let mut context = current_context.write(); 36 | 37 | if let Some((tctl, pctl, _)) = context.sigcontrol() { 38 | if tctl.currently_pending_unblocked(pctl) != 0 { 39 | return Err(Error::new(EINTR)); 40 | } 41 | } 42 | 43 | context.wake = Some(end); 44 | context.block("nanosleep"); 45 | } 46 | 47 | // TODO: The previous wakeup reason was most likely signals, but is there any other possible 48 | // reason? 49 | context::switch(); 50 | 51 | let was_interrupted = current_context.write().wake.take().is_some(); 52 | 53 | if let Some(rem_buf) = rem_buf_opt { 54 | let current = time::monotonic(); 55 | 56 | rem_buf.copy_exactly(&if current < end { 57 | let diff = end - current; 58 | TimeSpec { 59 | tv_sec: (diff / time::NANOS_PER_SEC) as i64, 60 | tv_nsec: (diff % time::NANOS_PER_SEC) as i32, 61 | } 62 | } else { 63 | TimeSpec { 64 | tv_sec: 0, 65 | tv_nsec: 0, 66 | } 67 | })?; 68 | } 69 | 70 | if was_interrupted { 71 | Err(Error::new(EINTR)) 72 | } else { 73 | Ok(()) 74 | } 75 | } 76 | 77 | pub fn sched_yield() -> Result<()> { 78 | context::switch(); 79 | // TODO: Do this check in userspace 80 | context::signal::signal_handler(); 81 | Ok(()) 82 | } 83 | -------------------------------------------------------------------------------- /src/time.rs: -------------------------------------------------------------------------------- 1 | use spin::Mutex; 2 | 3 | use crate::syscall::error::{Error, Result, EINVAL}; 4 | 5 | pub const NANOS_PER_SEC: u128 = 1_000_000_000; 6 | 7 | // TODO: seqlock? 8 | /// Kernel start time, measured in nanoseconds since Unix epoch 9 | pub static START: Mutex = Mutex::new(0); 10 | /// Kernel up time, measured in nanoseconds since `START_TIME` 11 | pub static OFFSET: Mutex = Mutex::new(0); 12 | 13 | pub fn monotonic() -> u128 { 14 | crate::arch::time::monotonic_absolute() 15 | } 16 | 17 | pub fn realtime() -> u128 { 18 | *START.lock() + monotonic() 19 | } 20 | 21 | pub fn sys_update_time_offset(buf: &[u8]) -> Result { 22 | let start = <[u8; 16]>::try_from(buf).map_err(|_| Error::new(EINVAL))?; 23 | *START.lock() = u128::from_ne_bytes(start); 24 | Ok(16) 25 | } 26 | -------------------------------------------------------------------------------- /targets/aarch64-unknown-kernel.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "aarch64-unknown-none", 3 | "abi": "softfloat", 4 | "target-endian": "little", 5 | "target-pointer-width": "64", 6 | "target-c-int-width": "32", 7 | "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", 8 | "arch": "aarch64", 9 | "os": "none", 10 | "env": "", 11 | "vendor": "unknown", 12 | "linker": "rust-lld", 13 | "linker-flavor": "gnu-lld", 14 | "features": "+strict-align,-neon,-fp-armv8,+tpidr-el1", 15 | "dynamic-linking": false, 16 | "executables": true, 17 | "relocation-model": "pic", 18 | "disable-redzone": true, 19 | "frame-pointer": "always", 20 | "exe-suffix": "", 21 | "has-rpath": false, 22 | "no-default-libraries": true, 23 | "position-independent-executables": false 24 | } 25 | -------------------------------------------------------------------------------- /targets/i686-unknown-kernel.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "i686-unknown-none", 3 | "target-endian": "little", 4 | "target-pointer-width": "32", 5 | "target-c-int-width": "32", 6 | "data-layout": "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-f64:32:64-f80:32-n8:16:32-S128", 7 | "arch": "x86", 8 | "os": "none", 9 | "env": "", 10 | "vendor": "unknown", 11 | "linker": "rust-lld", 12 | "linker-flavor": "gnu-lld", 13 | "features": "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float", 14 | "dynamic-linking": false, 15 | "executables": true, 16 | "relocation-model": "static", 17 | "code-model": "kernel", 18 | "disable-redzone": true, 19 | "frame-pointer": "always", 20 | "exe-suffix": "", 21 | "has-rpath": false, 22 | "no-default-libraries": true, 23 | "position-independent-executables": false 24 | } 25 | -------------------------------------------------------------------------------- /targets/riscv64-unknown-kernel.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "riscv64-unknown-none", 3 | "llvm-abiname": "lp64", 4 | "target-endian": "little", 5 | "target-pointer-width": "64", 6 | "target-c-int-width": "32", 7 | "data-layout": "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128", 8 | "arch": "riscv64", 9 | "os": "none", 10 | "env": "", 11 | "vendor": "unknown", 12 | "linker": "rust-lld", 13 | "linker-flavor": "gnu-lld", 14 | "features": "+m,+a,+c,+zihintpause", 15 | "dynamic-linking": false, 16 | "executables": true, 17 | "relocation-model": "pic", 18 | "disable-redzone": true, 19 | "frame-pointer": "always", 20 | "exe-suffix": "", 21 | "has-rpath": false, 22 | "no-default-libraries": true, 23 | "position-independent-executables": false 24 | } 25 | -------------------------------------------------------------------------------- /targets/x86_64-unknown-kernel.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "x86_64-unknown-none", 3 | "target-endian": "little", 4 | "target-pointer-width": "64", 5 | "target-c-int-width": "32", 6 | "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", 7 | "arch": "x86_64", 8 | "os": "none", 9 | "env": "", 10 | "vendor": "unknown", 11 | "linker": "rust-lld", 12 | "linker-flavor": "gnu-lld", 13 | "features": "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,+soft-float", 14 | "dynamic-linking": false, 15 | "executables": true, 16 | "relocation-model": "static", 17 | "code-model": "kernel", 18 | "disable-redzone": true, 19 | "frame-pointer": "always", 20 | "exe-suffix": "", 21 | "has-rpath": false, 22 | "no-default-libraries": true, 23 | "position-independent-executables": false 24 | } 25 | --------------------------------------------------------------------------------