├── ors-kernel ├── src │ ├── fs.rs │ ├── devices.rs │ ├── sync.rs │ ├── console │ │ ├── Tamzen7x14b.ttf │ │ ├── Tamzen7x14r.ttf │ │ ├── theme.rs │ │ ├── kbd.rs │ │ └── screen.rs │ ├── devices │ │ ├── virtio.rs │ │ ├── qemu.rs │ │ ├── serial.rs │ │ ├── virtio │ │ │ ├── configuration.rs │ │ │ ├── block.rs │ │ │ └── queue.rs │ │ └── pci.rs │ ├── logger.rs │ ├── graphics │ │ ├── color.rs │ │ ├── rect.rs │ │ ├── font.rs │ │ ├── frame_buffer.rs │ │ └── text_buffer.rs │ ├── fs │ │ ├── volume │ │ │ └── virtio.rs │ │ ├── fat │ │ │ ├── fat_entry.rs │ │ │ ├── boot_sector.rs │ │ │ └── low_level.rs │ │ └── volume.rs │ ├── sync │ │ ├── lazy.rs │ │ ├── once.rs │ │ ├── spin.rs │ │ ├── mutex.rs │ │ └── queue.rs │ ├── print.rs │ ├── segmentation.rs │ ├── acpi.rs │ ├── main.rs │ ├── paging.rs │ ├── graphics.rs │ ├── context.rs │ ├── console.rs │ ├── cpu.rs │ ├── x64.rs │ ├── allocator.rs │ ├── phys_memory.rs │ ├── interrupts.rs │ └── task.rs ├── .cargo │ └── config.toml ├── Cargo.toml ├── build.rs ├── x86_64-unknown-none-ors.json └── asm.s ├── .gitignore ├── qemu ├── OVMF_CODE.fd ├── OVMF_VARS.fd ├── make_and_run_image.sh ├── run_image.sh └── make_image.sh ├── Cargo.toml ├── docs ├── screenshots │ └── 2022-01-30.png └── screenshots.md ├── ors-common ├── Cargo.toml └── src │ ├── lib.rs │ ├── frame_buffer.rs │ ├── memory_map.rs │ └── non_contiguous.rs ├── ors-loader ├── .cargo │ └── config.toml ├── Cargo.toml └── src │ ├── fs.rs │ └── main.rs ├── Makefile ├── LICENSE └── README.md /ors-kernel/src/fs.rs: -------------------------------------------------------------------------------- 1 | pub mod fat; 2 | pub mod volume; 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | .vscode/ 4 | 5 | mnt 6 | disk.img 7 | -------------------------------------------------------------------------------- /qemu/OVMF_CODE.fd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yubrot/ors/HEAD/qemu/OVMF_CODE.fd -------------------------------------------------------------------------------- /qemu/OVMF_VARS.fd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yubrot/ors/HEAD/qemu/OVMF_VARS.fd -------------------------------------------------------------------------------- /ors-kernel/src/devices.rs: -------------------------------------------------------------------------------- 1 | pub mod pci; 2 | pub mod qemu; 3 | pub mod serial; 4 | pub mod virtio; 5 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "ors-common", 4 | "ors-loader", 5 | "ors-kernel", 6 | ] 7 | -------------------------------------------------------------------------------- /docs/screenshots/2022-01-30.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yubrot/ors/HEAD/docs/screenshots/2022-01-30.png -------------------------------------------------------------------------------- /docs/screenshots.md: -------------------------------------------------------------------------------- 1 | # 2022/01/30 2 | 3 |

4 | 5 |

6 | -------------------------------------------------------------------------------- /ors-kernel/src/sync.rs: -------------------------------------------------------------------------------- 1 | pub mod lazy; 2 | pub mod mutex; 3 | pub mod once; 4 | pub mod queue; 5 | pub mod spin; 6 | -------------------------------------------------------------------------------- /ors-kernel/src/console/Tamzen7x14b.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yubrot/ors/HEAD/ors-kernel/src/console/Tamzen7x14b.ttf -------------------------------------------------------------------------------- /ors-kernel/src/console/Tamzen7x14r.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yubrot/ors/HEAD/ors-kernel/src/console/Tamzen7x14r.ttf -------------------------------------------------------------------------------- /ors-common/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "ors-common" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | -------------------------------------------------------------------------------- /ors-loader/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "x86_64-unknown-uefi" 3 | 4 | [unstable] 5 | build-std = ["core", "compiler_builtins", "alloc"] 6 | build-std-features = ["compiler-builtins-mem"] 7 | -------------------------------------------------------------------------------- /ors-common/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(maybe_uninit_uninit_array)] // for non_contiguous 2 | #![feature(maybe_uninit_array_assume_init)] // for non_contiguous 3 | #![no_std] 4 | 5 | #[cfg(test)] 6 | extern crate alloc; 7 | 8 | pub mod frame_buffer; 9 | pub mod memory_map; 10 | pub mod non_contiguous; 11 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: ors-loader.efi ors-kernel.elf 2 | 3 | ors-loader.efi: 4 | cd ors-loader && cargo build 5 | 6 | ors-kernel.elf: 7 | cd ors-kernel && cargo build 8 | 9 | qemu: ors-loader.efi ors-kernel.elf 10 | cd ors-kernel && cargo run 11 | 12 | rerun: 13 | cd ors-kernel && ../qemu/run_image.sh ./disk.img 14 | -------------------------------------------------------------------------------- /ors-kernel/src/devices/virtio.rs: -------------------------------------------------------------------------------- 1 | //! VirtIO Drivers 2 | //! 3 | //! ors implements VirtIO Legacy Driver: 4 | //! https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.pdf 5 | 6 | pub mod block; 7 | mod configuration; 8 | mod queue; 9 | 10 | pub use configuration::Configuration; 11 | pub use queue::{Buffer, VirtQueue}; 12 | -------------------------------------------------------------------------------- /ors-kernel/src/devices/qemu.rs: -------------------------------------------------------------------------------- 1 | use x86_64::instructions::port::Port; 2 | 3 | static mut QEMU_DEBUG_EXIT: Port = Port::new(0xf4); 4 | 5 | #[derive(PartialEq, Eq, Debug, Clone, Copy)] 6 | #[repr(u32)] 7 | pub enum ExitCode { 8 | Success = 0x10, 9 | Failure = 0x11, 10 | } 11 | 12 | pub fn exit(exit_code: ExitCode) { 13 | unsafe { QEMU_DEBUG_EXIT.write(exit_code as u32) } 14 | } 15 | -------------------------------------------------------------------------------- /qemu/make_and_run_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | if [ $# -lt 1 ]; then 4 | echo "Usage: $0 []" 5 | exit 1 6 | fi 7 | 8 | DEVENV_DIR=$(dirname "$0") 9 | DISK_IMG=./disk.img 10 | MOUNT_POINT=./mnt 11 | BOOTLOADER_EFI=$1 12 | KERNEL_ELF=$2 13 | 14 | $DEVENV_DIR/make_image.sh $DISK_IMG $MOUNT_POINT $BOOTLOADER_EFI $KERNEL_ELF 15 | $DEVENV_DIR/run_image.sh $DISK_IMG 16 | -------------------------------------------------------------------------------- /ors-kernel/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "./x86_64-unknown-none-ors.json" 3 | 4 | [unstable] 5 | build-std = ["core", "compiler_builtins", "alloc"] 6 | build-std-features = ["compiler-builtins-mem"] 7 | 8 | [target.'cfg(target_os = "none")'] 9 | # FIXME: This assumes that the ors-loader.efi has already been built 10 | runner = ['../qemu/make_and_run_image.sh', '../target/x86_64-unknown-uefi/debug/ors-loader.efi'] 11 | -------------------------------------------------------------------------------- /ors-common/src/frame_buffer.rs: -------------------------------------------------------------------------------- 1 | #[repr(C)] 2 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] 3 | pub enum PixelFormat { 4 | Rgb, 5 | Bgr, 6 | } 7 | 8 | #[repr(C)] 9 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] 10 | pub struct FrameBuffer { 11 | pub frame_buffer: *mut u8, 12 | pub stride: u32, 13 | pub resolution: (u32, u32), // (horizontal, vertical) 14 | pub format: PixelFormat, 15 | } 16 | -------------------------------------------------------------------------------- /ors-loader/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["yubrot "] 3 | edition = "2021" 4 | name = "ors-loader" 5 | version = "0.1.0" 6 | 7 | [dependencies] 8 | goblin = {version = "0.4", features = ["elf32", "elf64", "endian_fd"], default-features = false} 9 | log = {version = "0.4", default-features = false} 10 | ors-common = {path = "../ors-common"} 11 | uefi = {version = "0.14", features = ["alloc", "logger", "exts"]} 12 | uefi-services = "0.11" 13 | x86_64 = "0.14" 14 | -------------------------------------------------------------------------------- /ors-kernel/src/logger.rs: -------------------------------------------------------------------------------- 1 | pub fn register() { 2 | log::set_logger(&KernelLogger).unwrap(); 3 | log::set_max_level(log::LevelFilter::Info); 4 | } 5 | 6 | struct KernelLogger; 7 | 8 | impl log::Log for KernelLogger { 9 | fn enabled(&self, _metadata: &log::Metadata) -> bool { 10 | true 11 | } 12 | 13 | fn log(&self, record: &log::Record) { 14 | sprintln!("{}: {}", record.level(), record.args()); 15 | } 16 | 17 | fn flush(&self) {} 18 | } 19 | -------------------------------------------------------------------------------- /ors-kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "ors-kernel" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | ab_glyph = {version = "0.2", default-features = false, features = ["libm"]} 8 | acpi = "4" 9 | bit_field = "0.10" 10 | derive-new = {version = "0.5", default-features = false} 11 | heapless = "0.7" 12 | libm = "0.2" 13 | log = {version = "0.4", default-features = false} 14 | ors-common = {path = "../ors-common"} 15 | pc-keyboard = "0.5" 16 | spin = "0.9" 17 | static_assertions = "1" 18 | uart_16550 = "0.2" 19 | x86_64 = "0.14" 20 | -------------------------------------------------------------------------------- /ors-kernel/src/devices/serial.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::spin::{Spin, SpinGuard}; 2 | pub use uart_16550::SerialPort as Port; 3 | 4 | const DEFAULT_PORT_ADDRESS: u16 = 0x3f8; 5 | 6 | static DEFAULT_PORT: Spin = Spin::new(unsafe { Port::new(DEFAULT_PORT_ADDRESS) }); 7 | 8 | pub fn default_port() -> SpinGuard<'static, Port> { 9 | DEFAULT_PORT.lock() 10 | } 11 | 12 | /// Default port with no locking mechanism. 13 | /// Used for debugging output in interrupt handlers and panic handlers. 14 | pub fn raw_default_port() -> Port { 15 | unsafe { Port::new(DEFAULT_PORT_ADDRESS) } 16 | } 17 | -------------------------------------------------------------------------------- /ors-common/src/memory_map.rs: -------------------------------------------------------------------------------- 1 | use core::slice; 2 | 3 | #[repr(C)] 4 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] 5 | pub struct MemoryMap { 6 | pub descriptors: *const Descriptor, 7 | pub descriptors_len: u64, 8 | } 9 | 10 | impl MemoryMap { 11 | pub fn descriptors(&self) -> &[Descriptor] { 12 | unsafe { slice::from_raw_parts(self.descriptors, self.descriptors_len as usize) } 13 | } 14 | } 15 | 16 | #[repr(C)] 17 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] 18 | pub struct Descriptor { 19 | pub phys_start: u64, 20 | pub phys_end: u64, 21 | } 22 | -------------------------------------------------------------------------------- /qemu/run_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | if [ $# -lt 1 ]; then 4 | echo "Usage: $0 " 5 | exit 1 6 | fi 7 | 8 | DEVENV_DIR=$(dirname "$0") 9 | DISK_IMG=$1 10 | 11 | if [ ! -f $DISK_IMG ]; then 12 | echo "No such file: $DISK_IMG" 13 | exit 1 14 | fi 15 | 16 | set +e 17 | qemu-system-x86_64 \ 18 | -smp 4 \ 19 | -m 1G \ 20 | -drive if=pflash,format=raw,readonly=on,file=$DEVENV_DIR/OVMF_CODE.fd \ 21 | -drive if=pflash,format=raw,file=$DEVENV_DIR/OVMF_VARS.fd \ 22 | -drive if=none,id=drive0,format=raw,file=$DISK_IMG \ 23 | -device isa-debug-exit,iobase=0xf4,iosize=0x04 \ 24 | -device virtio-blk-pci,drive=drive0 \ 25 | -serial mon:stdio \ 26 | $QEMU_OPTS 27 | [ $? -eq 33 -o $? -eq 0 ] 28 | -------------------------------------------------------------------------------- /ors-kernel/src/graphics/color.rs: -------------------------------------------------------------------------------- 1 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 2 | pub struct Color { 3 | pub r: u8, 4 | pub g: u8, 5 | pub b: u8, 6 | } 7 | 8 | impl Color { 9 | pub const fn new(r: u8, g: u8, b: u8) -> Self { 10 | Self { r, g, b } 11 | } 12 | 13 | pub fn mix(self, other: Self, f: f32) -> Self { 14 | let r = self.r as f32 * (1.0 - f) + other.r as f32 * f; 15 | let g = self.g as f32 * (1.0 - f) + other.g as f32 * f; 16 | let b = self.b as f32 * (1.0 - f) + other.b as f32 * f; 17 | Self::new(r as u8, g as u8, b as u8) 18 | } 19 | } 20 | 21 | impl From<(u8, u8, u8)> for Color { 22 | fn from((r, g, b): (u8, u8, u8)) -> Self { 23 | Self::new(r, g, b) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /ors-kernel/build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::path::PathBuf; 3 | use std::process::Command; 4 | 5 | fn main() { 6 | let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); 7 | println!("cargo:rustc-link-search={}", out_dir.display()); 8 | 9 | // asm.s -> asm.o -> libasm.a 10 | let out_asm = { 11 | let mut path = out_dir.clone(); 12 | path.push("asm.o"); 13 | path 14 | }; 15 | Command::new("nasm") 16 | .args(&["-f", "elf64", "-o", out_asm.to_str().unwrap(), "asm.s"]) 17 | .status() 18 | .unwrap(); 19 | Command::new("ar") 20 | .args(&["crus", "libasm.a", "asm.o"]) 21 | .current_dir(&out_dir) 22 | .status() 23 | .unwrap(); 24 | println!("cargo:rustc-link-lib=static=asm"); 25 | } 26 | -------------------------------------------------------------------------------- /ors-kernel/x86_64-unknown-none-ors.json: -------------------------------------------------------------------------------- 1 | { 2 | "arch": "x86_64", 3 | "code-model": "kernel", 4 | "cpu": "x86-64", 5 | "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128", 6 | "disable-redzone": true, 7 | "executables": true, 8 | "exe-suffix": ".elf", 9 | "features": "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float", 10 | "linker-flavor": "ld.lld", 11 | "llvm-target": "x86_64-unknown-none-elf", 12 | "max-atomic-width": 64, 13 | "os": "none", 14 | "panic-strategy": "abort", 15 | "relro-level": "off", 16 | "position-independent-executables": false, 17 | "post-link-args": { 18 | "ld.lld": [ 19 | "--entry=kernel_main", 20 | "--image-base=0x100000", 21 | "--static" 22 | ] 23 | }, 24 | "target-pointer-width": "64" 25 | } 26 | -------------------------------------------------------------------------------- /qemu/make_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | if [ $# -lt 3 ]; then 4 | echo "Usage: $0 []" 5 | exit 1 6 | fi 7 | 8 | DEVENV_DIR=$(dirname "$0") 9 | DISK_IMG=$1 10 | MOUNT_POINT=$2 11 | BOOTLOADER_EFI=$3 12 | KERNEL_ELF=$4 13 | 14 | if [ ! -f $BOOTLOADER_EFI ]; then 15 | echo "No such file: $BOOTLOADER_EFI" 16 | exit 1 17 | fi 18 | 19 | # Create a disk image and format it to FAT 20 | rm -f $DISK_IMG 21 | qemu-img create -f raw $DISK_IMG 200M 22 | mkfs.fat -n 'ORS' -s 2 -f 2 -R 32 -F 32 $DISK_IMG 23 | 24 | # Initialize disk image 25 | mkdir -p $MOUNT_POINT 26 | sudo mount -o loop $DISK_IMG $MOUNT_POINT 27 | sudo mkdir -p $MOUNT_POINT/EFI/BOOT 28 | sudo cp $BOOTLOADER_EFI $MOUNT_POINT/EFI/BOOT/BOOTX64.EFI 29 | if [ "$KERNEL_ELF" != "" ]; then 30 | sudo cp $KERNEL_ELF $MOUNT_POINT/ors-kernel.elf 31 | fi 32 | sleep 0.5 33 | sudo umount $MOUNT_POINT 34 | 35 | -------------------------------------------------------------------------------- /ors-kernel/src/fs/volume/virtio.rs: -------------------------------------------------------------------------------- 1 | mod virtio { 2 | pub use crate::devices::virtio::block::*; 3 | } 4 | use super::{Sector, Volume, VolumeError, VolumeErrorKind}; 5 | use derive_new::new; 6 | 7 | impl From for VolumeErrorKind { 8 | fn from(e: virtio::Error) -> Self { 9 | match e { 10 | virtio::Error::Io => Self::Io, 11 | virtio::Error::OutOfRange => Self::OutOfRange, 12 | _ => Self::Unknown, 13 | } 14 | } 15 | } 16 | 17 | /// Let the entire VirtIO block as a single volume. 18 | #[derive(Debug, Clone, Copy, new)] 19 | pub struct VirtIOBlockVolume(&'static virtio::Block); 20 | 21 | impl Volume for VirtIOBlockVolume { 22 | fn sector_count(&self) -> usize { 23 | self.0.capacity() as usize 24 | } 25 | 26 | fn sector_size(&self) -> usize { 27 | virtio::Block::SECTOR_SIZE 28 | } 29 | 30 | fn read(&self, sector: Sector, buf: &mut [u8]) -> Result<(), VolumeError> { 31 | self.0 32 | .read(sector.index() as u64, buf) 33 | .map_err(|k| VolumeError::new(sector, k.into())) 34 | } 35 | 36 | fn write(&self, sector: Sector, buf: &[u8]) -> Result<(), VolumeError> { 37 | self.0 38 | .write(sector.index() as u64, buf) 39 | .map_err(|k| VolumeError::new(sector, k.into())) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /ors-kernel/src/sync/lazy.rs: -------------------------------------------------------------------------------- 1 | use super::once::Once; 2 | use core::cell::Cell; 3 | use core::fmt; 4 | use core::ops::Deref; 5 | 6 | /// Almost same as `spin::Lazy`, except it uses `ors_kernel::sync::once::Once`. 7 | pub struct Lazy T> { 8 | cell: Once, 9 | init: Cell>, 10 | } 11 | 12 | impl Lazy { 13 | pub const fn new(f: F) -> Self { 14 | Self { 15 | cell: Once::new(), 16 | init: Cell::new(Some(f)), 17 | } 18 | } 19 | 20 | pub fn as_mut_ptr(&self) -> *mut T { 21 | self.cell.as_mut_ptr() 22 | } 23 | } 24 | 25 | impl T> Lazy { 26 | pub fn force(this: &Self) -> &T { 27 | this.cell.call_once(|| match this.init.take() { 28 | Some(f) => f(), 29 | None => panic!("Lazy instance has previously been poisoned"), 30 | }) 31 | } 32 | } 33 | 34 | unsafe impl Sync for Lazy where Once: Sync {} 35 | 36 | impl fmt::Debug for Lazy { 37 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 38 | f.debug_struct("Lazy") 39 | .field("cell", &self.cell) 40 | .field("init", &"..") 41 | .finish() 42 | } 43 | } 44 | 45 | impl T> Deref for Lazy { 46 | type Target = T; 47 | 48 | fn deref(&self) -> &T { 49 | Self::force(self) 50 | } 51 | } 52 | 53 | impl Default for Lazy T> { 54 | fn default() -> Self { 55 | Self::new(T::default) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright yubrot (c) 2021 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of yubrot nor the names of other 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /ors-kernel/src/graphics/rect.rs: -------------------------------------------------------------------------------- 1 | use derive_new::new; 2 | 3 | #[derive(PartialEq, Eq, Debug, Clone, Copy, Hash, new)] 4 | pub struct Rect { 5 | pub x: i32, 6 | pub y: i32, 7 | pub w: u32, 8 | pub h: u32, 9 | } 10 | 11 | impl Rect { 12 | pub fn intersect(self, other: Self) -> Option { 13 | let lx = self.x.max(other.x); 14 | let ly = self.y.max(other.y); 15 | let rx = (self.x + self.w as i32).min(other.x + other.w as i32); 16 | let ry = (self.y + self.h as i32).min(other.y + other.h as i32); 17 | if rx < lx || ry < ly { 18 | return None; 19 | } 20 | Some(Self { 21 | x: lx, 22 | y: ly, 23 | w: (rx - lx) as u32, 24 | h: (ry - ly) as u32, 25 | }) 26 | } 27 | 28 | pub fn contains(self, x: i32, y: i32) -> bool { 29 | self.x <= x && x < self.x + self.w as i32 && self.y <= y && y < self.y + self.h as i32 30 | } 31 | 32 | pub fn offset(self, x: i32, y: i32) -> Self { 33 | Self::new(self.x + x, self.y + y, self.w, self.h) 34 | } 35 | } 36 | 37 | #[cfg(test)] 38 | mod tests { 39 | use super::*; 40 | use log::info; 41 | 42 | #[test_case] 43 | fn test_rect() { 44 | info!("TESTING graphics::rect"); 45 | assert!(Rect::new(0, 0, 100, 100).contains(50, 50)); 46 | assert!(!Rect::new(0, 0, 100, 100).contains(-5, 10)); 47 | assert_eq!( 48 | Rect::new(0, 0, 100, 100).intersect(Rect::new(15, 10, 120, 60)), 49 | Some(Rect::new(15, 10, 85, 60)) 50 | ); 51 | assert_eq!( 52 | Rect::new(30, 40, 60, 60).intersect(Rect::new(10, 10, 80, 20)), 53 | None 54 | ); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /ors-kernel/src/print.rs: -------------------------------------------------------------------------------- 1 | use crate::console; 2 | use crate::devices; 3 | use core::fmt; 4 | 5 | #[derive(Debug)] 6 | pub struct KernelWrite; 7 | 8 | impl fmt::Write for KernelWrite { 9 | fn write_str(&mut self, s: &str) -> fmt::Result { 10 | devices::serial::default_port().write_str(s)?; 11 | console::ConsoleWrite.write_str(s)?; 12 | Ok(()) 13 | } 14 | } 15 | 16 | #[allow(unused_macros)] 17 | macro_rules! kprintln { 18 | ($( $t:tt )*) => {{ 19 | use core::fmt::Write; 20 | writeln!(crate::print::KernelWrite, $( $t )*).unwrap(); 21 | }}; 22 | } 23 | 24 | #[allow(unused_macros)] 25 | macro_rules! kprint { 26 | ($( $t:tt )*) => {{ 27 | use core::fmt::Write; 28 | write!(crate::print::KernelWrite, $( $t )*).unwrap(); 29 | }}; 30 | } 31 | 32 | #[allow(unused_macros)] 33 | macro_rules! cprintln { 34 | ($( $t:tt )*) => {{ 35 | use core::fmt::Write; 36 | writeln!(crate::console::ConsoleWrite, $( $t )*).unwrap(); 37 | }}; 38 | } 39 | 40 | #[allow(unused_macros)] 41 | macro_rules! cprint { 42 | ($( $t:tt )*) => {{ 43 | use core::fmt::Write; 44 | write!(crate::console::ConsoleWrite, $( $t )*).unwrap(); 45 | }}; 46 | } 47 | 48 | /// Write to raw_default_port. Used for debugging output in interrupt handlers and panic handlers. 49 | #[allow(unused_macros)] 50 | macro_rules! sprintln { 51 | ($( $t:tt )*) => {{ 52 | use core::fmt::Write; 53 | writeln!(crate::devices::serial::raw_default_port(), $( $t )*).unwrap(); 54 | }}; 55 | } 56 | 57 | #[allow(unused_macros)] 58 | macro_rules! sprint { 59 | ($( $t:tt )*) => {{ 60 | use core::fmt::Write; 61 | write!(crate::devices::serial::raw_default_port(), $( $t )*).unwrap(); 62 | }}; 63 | } 64 | -------------------------------------------------------------------------------- /ors-kernel/src/console/theme.rs: -------------------------------------------------------------------------------- 1 | use super::ansi::ColorScheme; 2 | 3 | #[derive(Debug)] 4 | pub struct OneMonokai; 5 | 6 | impl ColorScheme for OneMonokai { 7 | fn foreground(&self) -> (u8, u8, u8) { 8 | (0xab, 0xb2, 0xbf) 9 | } 10 | 11 | fn background(&self) -> (u8, u8, u8) { 12 | (0x28, 0x2c, 0x34) 13 | } 14 | 15 | fn black(&self) -> (u8, u8, u8) { 16 | (0x2d, 0x31, 0x39) 17 | } 18 | fn red(&self) -> (u8, u8, u8) { 19 | (0xe0, 0x6c, 0x75) 20 | } 21 | 22 | fn green(&self) -> (u8, u8, u8) { 23 | (0x98, 0xc3, 0x79) 24 | } 25 | 26 | fn yellow(&self) -> (u8, u8, u8) { 27 | (0xe5, 0xc0, 0x7b) 28 | } 29 | 30 | fn blue(&self) -> (u8, u8, u8) { 31 | (0x52, 0x8b, 0xff) 32 | } 33 | 34 | fn magenta(&self) -> (u8, u8, u8) { 35 | (0xc6, 0x78, 0xdd) 36 | } 37 | 38 | fn cyan(&self) -> (u8, u8, u8) { 39 | (0x56, 0xb2, 0xc2) 40 | } 41 | 42 | fn white(&self) -> (u8, u8, u8) { 43 | (0xd7, 0xda, 0xe0) 44 | } 45 | 46 | fn bright_black(&self) -> (u8, u8, u8) { 47 | (0x7f, 0x84, 0x8e) 48 | } 49 | 50 | fn bright_red(&self) -> (u8, u8, u8) { 51 | (0xf4, 0x47, 0x47) 52 | } 53 | 54 | fn bright_green(&self) -> (u8, u8, u8) { 55 | (0x98, 0xc3, 0x79) 56 | } 57 | 58 | fn bright_yellow(&self) -> (u8, u8, u8) { 59 | (0xe5, 0xc0, 0x7b) 60 | } 61 | 62 | fn bright_blue(&self) -> (u8, u8, u8) { 63 | (0x52, 0x8b, 0xff) 64 | } 65 | 66 | fn bright_magenta(&self) -> (u8, u8, u8) { 67 | (0x7e, 0x00, 0x97) 68 | } 69 | 70 | fn bright_cyan(&self) -> (u8, u8, u8) { 71 | (0x56, 0xb6, 0xc2) 72 | } 73 | 74 | fn bright_white(&self) -> (u8, u8, u8) { 75 | (0xd7, 0xda, 0xe0) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /ors-kernel/src/fs/fat/fat_entry.rs: -------------------------------------------------------------------------------- 1 | use super::Cluster; 2 | use core::fmt; 3 | 4 | /// Deserialized FAT entry. 5 | #[derive(PartialEq, Eq, Debug, Clone, Copy)] 6 | pub(super) enum FatEntry { 7 | Unused, 8 | Reserved, 9 | UsedChained(Cluster), 10 | UsedEoc, 11 | Bad, 12 | } 13 | 14 | impl FatEntry { 15 | pub(super) fn chain(self) -> Option { 16 | match self { 17 | Self::UsedChained(c) => Some(c), 18 | _ => None, 19 | } 20 | } 21 | } 22 | 23 | impl fmt::Display for FatEntry { 24 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 25 | match self { 26 | FatEntry::Unused => write!(f, "unused"), 27 | FatEntry::Reserved => write!(f, "reserved"), 28 | FatEntry::UsedChained(c) => write!(f, "used({})", c), 29 | FatEntry::UsedEoc => write!(f, "used(eoc)"), 30 | FatEntry::Bad => write!(f, "bad"), 31 | } 32 | } 33 | } 34 | 35 | impl From for FatEntry { 36 | fn from(c: Cluster) -> Self { 37 | Self::UsedChained(c) 38 | } 39 | } 40 | 41 | impl From for FatEntry { 42 | fn from(value: u32) -> Self { 43 | match value & 0x0fffffff { 44 | 0 => Self::Unused, 45 | 1 => Self::Reserved, 46 | n @ 0x00000002..=0x0ffffff6 => Self::UsedChained(Cluster::from_index(n as usize)), 47 | 0x0ffffff7 => Self::Bad, 48 | 0x0ffffff8..=0x0fffffff => Self::UsedEoc, 49 | 0x10000000..=0xffffffff => unreachable!(), 50 | } 51 | } 52 | } 53 | 54 | impl Into for FatEntry { 55 | fn into(self) -> u32 { 56 | match self { 57 | Self::Unused => 0, 58 | Self::Reserved => 1, 59 | Self::UsedChained(cluster) => cluster.index() as u32, 60 | Self::UsedEoc => 0x0fffffff, 61 | Self::Bad => 0x0ffffff7, 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /ors-kernel/src/segmentation.rs: -------------------------------------------------------------------------------- 1 | use crate::x64::{self, Segment}; 2 | use log::trace; 3 | use spin::Once; 4 | 5 | static mut GDT: x64::GlobalDescriptorTable = x64::GlobalDescriptorTable::new(); 6 | static mut TSS: x64::TaskStateSegment = x64::TaskStateSegment::new(); 7 | 8 | static KERNEL_CS: Once = Once::new(); 9 | static KERNEL_SS: Once = Once::new(); 10 | 11 | pub const DOUBLE_FAULT_IST_INDEX: u16 = 0; 12 | 13 | pub fn cs() -> x64::SegmentSelector { 14 | *KERNEL_CS 15 | .get() 16 | .expect("segmentation::cs is called before segmentation::initialize") 17 | } 18 | 19 | pub fn ss() -> x64::SegmentSelector { 20 | *KERNEL_SS 21 | .get() 22 | .expect("segmentation::ss is called before segmentation::initialize") 23 | } 24 | 25 | pub unsafe fn initialize() { 26 | // TODO: GDT needs to be created for each processor. 27 | trace!("INITIALIZING segmentation"); 28 | TSS.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = { 29 | const STACK_SIZE: usize = 4096 * 5; 30 | static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE]; 31 | let stack_start = x64::VirtAddr::from_ptr(&STACK[0]); 32 | let stack_end = stack_start + STACK_SIZE; 33 | stack_end 34 | }; 35 | let code_selector = GDT.add_entry(x64::Descriptor::kernel_code_segment()); 36 | let data_selector = GDT.add_entry(x64::Descriptor::kernel_data_segment()); 37 | let tss_selector = GDT.add_entry(x64::Descriptor::tss_segment(&TSS)); 38 | let null_ss = x64::SegmentSelector::new(0, x64::PrivilegeLevel::Ring0); 39 | GDT.load(); 40 | x64::DS::set_reg(null_ss); 41 | x64::ES::set_reg(null_ss); 42 | x64::FS::set_reg(null_ss); 43 | x64::GS::set_reg(null_ss); 44 | x64::CS::set_reg(code_selector); 45 | x64::SS::set_reg(data_selector); 46 | x64::load_tss(tss_selector); 47 | 48 | KERNEL_CS.call_once(|| code_selector); 49 | KERNEL_SS.call_once(|| data_selector); 50 | } 51 | -------------------------------------------------------------------------------- /ors-kernel/src/sync/once.rs: -------------------------------------------------------------------------------- 1 | use crate::interrupts::Cli; 2 | use core::fmt; 3 | 4 | /// `spin::Once` with `crate::interrupts::Cli` to avoid deadlocks. 5 | pub struct Once { 6 | inner: spin::Once, 7 | } 8 | 9 | impl Once { 10 | pub const INIT: Self = Self::new(); 11 | 12 | pub const fn new() -> Self { 13 | Self { 14 | inner: spin::Once::new(), 15 | } 16 | } 17 | 18 | pub const fn initialized(data: T) -> Self { 19 | Self { 20 | inner: spin::Once::initialized(data), 21 | } 22 | } 23 | 24 | pub fn as_mut_ptr(&self) -> *mut T { 25 | self.inner.as_mut_ptr() 26 | } 27 | 28 | pub fn get(&self) -> Option<&T> { 29 | self.inner.get() 30 | } 31 | 32 | pub unsafe fn get_unchecked(&self) -> &T { 33 | self.inner.get_unchecked() 34 | } 35 | 36 | pub fn get_mut(&mut self) -> Option<&mut T> { 37 | self.inner.get_mut() 38 | } 39 | 40 | pub fn try_into_inner(self) -> Option { 41 | self.inner.try_into_inner() 42 | } 43 | 44 | pub fn is_completed(&self) -> bool { 45 | self.inner.is_completed() 46 | } 47 | 48 | pub fn call_once T>(&self, f: F) -> &T { 49 | // We try get() at first to avoid Cli overhead 50 | match self.inner.get() { 51 | Some(data) => data, 52 | None => { 53 | let cli = Cli::new(); 54 | let data = self.inner.call_once(f); 55 | drop(cli); 56 | data 57 | } 58 | } 59 | } 60 | 61 | pub fn wait(&self) -> &T { 62 | self.inner.wait() 63 | } 64 | 65 | pub fn poll(&self) -> Option<&T> { 66 | self.inner.poll() 67 | } 68 | } 69 | 70 | impl fmt::Debug for Once { 71 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 72 | self.inner.fmt(f) 73 | } 74 | } 75 | 76 | impl From for Once { 77 | fn from(data: T) -> Self { 78 | Self::initialized(data) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /ors-kernel/src/acpi.rs: -------------------------------------------------------------------------------- 1 | use crate::x64; 2 | use acpi::platform::address::AddressSpace; 3 | use acpi::platform::interrupt::Apic; 4 | use acpi::platform::{PmTimer, ProcessorInfo}; 5 | use acpi::{AcpiHandler, AcpiTables, PlatformInfo}; 6 | use spin::Once; 7 | 8 | static PLATFORM_INFO: Once = Once::new(); 9 | 10 | /// Caller must ensure that the given rsdp is valid. 11 | pub unsafe fn initialize(handler: impl AcpiHandler, rsdp: usize) { 12 | PLATFORM_INFO.call_once(|| { 13 | // https://wiki.osdev.org/MADT 14 | AcpiTables::from_rsdp(handler, rsdp) 15 | .unwrap() 16 | .platform_info() 17 | .unwrap() 18 | }); 19 | } 20 | 21 | fn platform_info() -> &'static PlatformInfo { 22 | PLATFORM_INFO 23 | .get() 24 | .expect("acpi::platform_info is called before acpi::initialize") 25 | } 26 | 27 | pub fn apic_info() -> &'static Apic { 28 | match platform_info().interrupt_model { 29 | acpi::InterruptModel::Apic(ref apic) => apic, 30 | _ => panic!("Could not find APIC"), 31 | } 32 | } 33 | 34 | pub fn processor_info() -> &'static ProcessorInfo { 35 | platform_info() 36 | .processor_info 37 | .as_ref() 38 | .expect("Could not find processor information") 39 | } 40 | 41 | pub fn pm_timer() -> &'static PmTimer { 42 | platform_info() 43 | .pm_timer 44 | .as_ref() 45 | .expect("Could not find ACPI PM Timer") 46 | } 47 | 48 | pub fn wait_milliseconds_with_pm_timer(msec: u32) { 49 | // https://wiki.osdev.org/ACPI_Timer 50 | let pm_timer = pm_timer(); 51 | assert_eq!(pm_timer.base.address_space, AddressSpace::SystemIo); // TODO: MMIO Support 52 | assert_eq!(pm_timer.base.bit_width, 32); 53 | let mut time = x64::Port::::new(pm_timer.base.address as u16); 54 | 55 | const PM_TIMER_FREQ: usize = 3579545; 56 | let start = unsafe { time.read() }; 57 | let mut end = start.wrapping_add((PM_TIMER_FREQ * msec as usize / 1000) as u32); 58 | if !pm_timer.supports_32bit { 59 | end &= 0x00ffffff; 60 | } 61 | if end < start { 62 | while unsafe { time.read() } >= start {} 63 | } 64 | while unsafe { time.read() } < end {} 65 | } 66 | -------------------------------------------------------------------------------- /ors-kernel/src/sync/spin.rs: -------------------------------------------------------------------------------- 1 | use crate::interrupts::Cli; 2 | use core::fmt; 3 | use core::mem; 4 | use core::ops::{Deref, DerefMut}; 5 | 6 | /// `spin::Mutex` with `crate::interrupts::Cli` to avoid deadlocks. 7 | #[derive(Debug)] 8 | pub struct Spin { 9 | inner: spin::Mutex, 10 | } 11 | 12 | impl Spin { 13 | pub fn get_mut(&mut self) -> &mut T { 14 | self.inner.get_mut() 15 | } 16 | 17 | pub fn lock(&self) -> SpinGuard { 18 | let cli = Cli::new(); 19 | let inner = self.inner.lock(); 20 | SpinGuard { inner, cli } 21 | } 22 | 23 | pub fn try_lock(&self) -> Option> { 24 | let cli = Cli::new(); 25 | let inner = self.inner.try_lock()?; 26 | Some(SpinGuard { inner, cli }) 27 | } 28 | 29 | pub fn is_locked(&self) -> bool { 30 | self.inner.is_locked() 31 | } 32 | } 33 | 34 | impl Spin { 35 | pub const fn new(value: T) -> Self { 36 | Self { 37 | inner: spin::Mutex::new(value), 38 | } 39 | } 40 | 41 | pub fn into_inner(self) -> T { 42 | self.inner.into_inner() 43 | } 44 | } 45 | 46 | pub struct SpinGuard<'a, T: 'a + ?Sized> { 47 | inner: spin::MutexGuard<'a, T>, 48 | cli: Cli, 49 | } 50 | 51 | impl<'a, T: 'a + ?Sized> SpinGuard<'a, T> { 52 | pub fn leak(this: Self) -> &'a mut T { 53 | let inner = spin::MutexGuard::leak(this.inner); 54 | mem::forget(this.cli); 55 | inner 56 | } 57 | } 58 | 59 | impl<'a, T: 'a + ?Sized> Deref for SpinGuard<'a, T> { 60 | type Target = T; 61 | 62 | fn deref(&self) -> &Self::Target { 63 | &*self.inner 64 | } 65 | } 66 | 67 | impl<'a, T: 'a + ?Sized> DerefMut for SpinGuard<'a, T> { 68 | fn deref_mut(&mut self) -> &mut Self::Target { 69 | &mut *self.inner 70 | } 71 | } 72 | 73 | impl<'a, T: 'a + fmt::Debug + ?Sized> fmt::Debug for SpinGuard<'a, T> { 74 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 75 | fmt::Debug::fmt(&**self, f) 76 | } 77 | } 78 | 79 | impl<'a, T: 'a + fmt::Display + ?Sized> fmt::Display for SpinGuard<'a, T> { 80 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 81 | fmt::Display::fmt(&**self, f) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /ors-kernel/src/console/kbd.rs: -------------------------------------------------------------------------------- 1 | use super::Input; 2 | use log::trace; 3 | use pc_keyboard::layouts::Jis109Key; 4 | use pc_keyboard::{DecodedKey, HandleControl, KeyCode, KeyState, Keyboard, ScancodeSet1}; 5 | 6 | pub struct Decoder { 7 | inner: Keyboard, 8 | lctrl: bool, 9 | rctrl: bool, 10 | } 11 | 12 | impl Decoder { 13 | pub fn new() -> Self { 14 | Self { 15 | inner: Keyboard::new(Jis109Key, ScancodeSet1, HandleControl::Ignore), 16 | lctrl: false, 17 | rctrl: false, 18 | } 19 | } 20 | 21 | pub fn add(&mut self, byte: u8) -> Option { 22 | if let Ok(Some(e)) = self.inner.add_byte(byte) { 23 | if e.code == KeyCode::ControlLeft { 24 | self.lctrl = e.state == KeyState::Down; 25 | } 26 | if e.code == KeyCode::ControlRight { 27 | self.rctrl = e.state == KeyState::Down; 28 | } 29 | match self.inner.process_keyevent(e)? { 30 | DecodedKey::RawKey(KeyCode::Insert) => Some(Input::Insert), 31 | DecodedKey::RawKey(KeyCode::Home) => Some(Input::Home), 32 | DecodedKey::RawKey(KeyCode::End) => Some(Input::End), 33 | DecodedKey::RawKey(KeyCode::PageUp) => Some(Input::PageUp), 34 | DecodedKey::RawKey(KeyCode::PageDown) => Some(Input::PageDown), 35 | DecodedKey::RawKey(KeyCode::ArrowUp) => Some(Input::ArrowUp), 36 | DecodedKey::RawKey(KeyCode::ArrowDown) => Some(Input::ArrowDown), 37 | DecodedKey::RawKey(KeyCode::ArrowLeft) => Some(Input::ArrowLeft), 38 | DecodedKey::RawKey(KeyCode::ArrowRight) => Some(Input::ArrowRight), 39 | DecodedKey::Unicode( 40 | // BS | HT | LF | DEL | printable characters 41 | c @ ('\x08' | '\x09' | '\x0a' | '\x7f' | ' '..='~'), 42 | ) => { 43 | if self.lctrl || self.rctrl { 44 | Some(Input::Ctrl(c)) 45 | } else { 46 | Some(Input::Char(c)) 47 | } 48 | } 49 | key => { 50 | trace!("kbd: Unhandled key: {:?}", key); 51 | None 52 | } 53 | } 54 | } else { 55 | None 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /ors-loader/src/fs.rs: -------------------------------------------------------------------------------- 1 | //! A thin wrapper of the UEFI file system protocol. 2 | 3 | use alloc::boxed::Box; 4 | use alloc::vec::Vec; 5 | use uefi::prelude::*; 6 | use uefi::proto::media::file::{ 7 | Directory, File, FileAttribute, FileInfo, FileMode, FileType, RegularFile, 8 | }; 9 | 10 | pub fn open_root_dir(image: Handle, bs: &BootServices) -> Directory { 11 | let sfs = bs.get_image_file_system(image).unwrap_success(); 12 | // NOTE: Is it safe? Internally BootServices::get_image_file_system does something similar. 13 | unsafe { &mut *sfs.interface.get() } 14 | .open_volume() 15 | .unwrap_success() 16 | } 17 | 18 | pub fn create(dir: &mut Directory, filename: &str, create_dir: bool) -> FileType { 19 | let attr = if create_dir { 20 | FileAttribute::DIRECTORY 21 | } else { 22 | FileAttribute::empty() 23 | }; 24 | dir.open(filename, FileMode::CreateReadWrite, attr) 25 | .expect_success("Failed to create file") 26 | .into_type() 27 | .unwrap_success() 28 | } 29 | 30 | pub fn open(dir: &mut Directory, filename: &str) -> FileType { 31 | dir.open(filename, FileMode::Read, FileAttribute::empty()) 32 | .expect_success("Failed to open file") 33 | .into_type() 34 | .unwrap_success() 35 | } 36 | 37 | pub fn create_file(dir: &mut Directory, filename: &str) -> RegularFile { 38 | match create(dir, filename, false) { 39 | FileType::Regular(file) => file, 40 | FileType::Dir(_) => panic!("Not a regular file: {}", filename), 41 | } 42 | } 43 | 44 | pub fn open_file(dir: &mut Directory, filename: &str) -> RegularFile { 45 | match open(dir, filename) { 46 | FileType::Regular(file) => file, 47 | FileType::Dir(_) => panic!("Not a regular file: {}", filename), 48 | } 49 | } 50 | 51 | pub fn read_file_to_vec(file: &mut RegularFile) -> Vec { 52 | let size = get_file_info(file).file_size() as usize; 53 | let mut buf = vec![0; size]; 54 | file.read(&mut buf).unwrap_success(); 55 | buf 56 | } 57 | 58 | pub fn get_file_info(file: &mut impl File) -> Box { 59 | file.get_boxed_info::().unwrap_success() 60 | } 61 | 62 | macro_rules! fwrite { 63 | ($file:expr, $format:tt $( $rest:tt )*) => { 64 | $file.write(format!($format $( $rest )*).as_bytes()).unwrap_success() 65 | }; 66 | } 67 | 68 | macro_rules! fwriteln { 69 | ($file:expr, $format:tt $( $rest:tt )*) => { 70 | fwrite!($file, concat!($format, "\n") $( $rest )*) 71 | }; 72 | } 73 | -------------------------------------------------------------------------------- /ors-kernel/src/sync/mutex.rs: -------------------------------------------------------------------------------- 1 | use super::spin::Spin; 2 | use crate::task; 3 | use core::cell::UnsafeCell; 4 | use core::fmt; 5 | use core::ops::{Deref, DerefMut}; 6 | 7 | /// A mutex implementation based on `spin::Spin` and `task::scheduler`. 8 | #[derive(Debug)] 9 | pub struct Mutex { 10 | locked: Spin, 11 | data: UnsafeCell, 12 | } 13 | 14 | impl Mutex { 15 | fn chan(&self) -> task::WaitChannel { 16 | task::WaitChannel::from_ptr(self) 17 | } 18 | 19 | pub fn get_mut(&mut self) -> &mut T { 20 | self.data.get_mut() 21 | } 22 | 23 | pub fn lock(&self) -> MutexGuard { 24 | MutexGuard::new(self) 25 | } 26 | } 27 | 28 | impl Mutex { 29 | pub const fn new(value: T) -> Self { 30 | Self { 31 | locked: Spin::new(false), 32 | data: UnsafeCell::new(value), 33 | } 34 | } 35 | 36 | pub fn into_inner(self) -> T { 37 | self.data.into_inner() 38 | } 39 | } 40 | 41 | pub struct MutexGuard<'a, T: 'a + ?Sized> { 42 | mutex: &'a Mutex, 43 | } 44 | 45 | impl<'a, T: 'a + ?Sized> MutexGuard<'a, T> { 46 | fn new(mutex: &'a Mutex) -> Self { 47 | loop { 48 | let mut locked = mutex.locked.lock(); 49 | if !*locked { 50 | *locked = true; // acquire lock 51 | break; 52 | } 53 | task::scheduler().block(mutex.chan(), None, locked); 54 | } 55 | Self { mutex } 56 | } 57 | } 58 | 59 | impl<'a, T: 'a + ?Sized> Drop for MutexGuard<'a, T> { 60 | fn drop(&mut self) { 61 | *self.mutex.locked.lock() = false; 62 | task::scheduler().release(self.mutex.chan()); 63 | } 64 | } 65 | 66 | impl<'a, T: 'a + ?Sized> Deref for MutexGuard<'a, T> { 67 | type Target = T; 68 | 69 | fn deref(&self) -> &Self::Target { 70 | unsafe { &*self.mutex.data.get() } 71 | } 72 | } 73 | 74 | impl<'a, T: 'a + ?Sized> DerefMut for MutexGuard<'a, T> { 75 | fn deref_mut(&mut self) -> &mut Self::Target { 76 | unsafe { &mut *self.mutex.data.get() } 77 | } 78 | } 79 | 80 | impl<'a, T: 'a + fmt::Debug + ?Sized> fmt::Debug for MutexGuard<'a, T> { 81 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 82 | fmt::Debug::fmt(&**self, f) 83 | } 84 | } 85 | 86 | impl<'a, T: 'a + fmt::Display + ?Sized> fmt::Display for MutexGuard<'a, T> { 87 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 88 | fmt::Display::fmt(&**self, f) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /ors-kernel/asm.s: -------------------------------------------------------------------------------- 1 | ; System V AMD64 Calling Convention 2 | ; Registers: RDI, RSI, RDX, RCX, R8, R9 3 | 4 | bits 64 5 | 6 | extern kernel_main2 7 | 8 | section .bss align=16 9 | kernel_main_stack: 10 | resb 1024 * 1024 11 | 12 | section .text 13 | global kernel_main 14 | kernel_main: 15 | mov rsp, kernel_main_stack + 1024 * 1024 16 | call kernel_main2 17 | .fin: 18 | hlt 19 | jmp .fin 20 | 21 | global get_cr3 ; fn get_cr3() -> u64; 22 | get_cr3: 23 | mov rax, cr3 24 | ret 25 | 26 | global switch_context 27 | switch_context: ; fn switch_context(next_ctx: *const Context, current_ctx: *mut Context); 28 | ; Save 29 | mov [rsi + 0x40], rax 30 | mov [rsi + 0x48], rbx 31 | mov [rsi + 0x50], rcx 32 | mov [rsi + 0x58], rdx 33 | mov [rsi + 0x60], rdi 34 | mov [rsi + 0x68], rsi 35 | lea rax, [rsp + 8] ; Save RSP by removing the offset of the return address (which was pushed by call inst) 36 | mov [rsi + 0x70], rax ; -> current_ctx.rsp 37 | mov [rsi + 0x78], rbp 38 | mov [rsi + 0x80], r8 39 | mov [rsi + 0x88], r9 40 | mov [rsi + 0x90], r10 41 | mov [rsi + 0x98], r11 42 | mov [rsi + 0xa0], r12 43 | mov [rsi + 0xa8], r13 44 | mov [rsi + 0xb0], r14 45 | mov [rsi + 0xb8], r15 46 | mov rax, cr3 47 | mov [rsi + 0x00], rax 48 | mov rax, [rsp] ; Load the return address (which was pushed by call inst) 49 | mov [rsi + 0x08], rax ; -> current_ctx.rip 50 | pushfq 51 | pop qword [rsi + 0x10] 52 | mov ax, cs 53 | mov [rsi + 0x20], rax 54 | mov bx, ss 55 | mov [rsi + 0x28], rbx 56 | mov cx, fs 57 | mov [rsi + 0x30], rcx 58 | mov dx, gs 59 | mov [rsi + 0x38], rdx 60 | fxsave [rsi + 0xc0] 61 | ; Mark as saved 62 | mov al, 1 63 | xchg [rsi + 0x2c0], al 64 | ; Restore 65 | ; Build an stack frame for iret to switch the context 66 | push qword [rdi + 0x28] ; SS 67 | push qword [rdi + 0x70] ; RSP 68 | push qword [rdi + 0x10] ; RFLAGS 69 | push qword [rdi + 0x20] ; CS 70 | push qword [rdi + 0x08] ; RIP 71 | ; Inverse of save 72 | fxrstor [rdi + 0xc0] 73 | mov rax, [rdi + 0x00] 74 | mov cr3, rax 75 | mov rax, [rdi + 0x30] 76 | mov fs, ax 77 | mov rax, [rdi + 0x38] 78 | mov gs, ax 79 | mov rax, [rdi + 0x40] 80 | mov rbx, [rdi + 0x48] 81 | mov rcx, [rdi + 0x50] 82 | mov rdx, [rdi + 0x58] 83 | mov rsi, [rdi + 0x68] 84 | mov rbp, [rdi + 0x78] 85 | mov r8, [rdi + 0x80] 86 | mov r9, [rdi + 0x88] 87 | mov r10, [rdi + 0x90] 88 | mov r11, [rdi + 0x98] 89 | mov r12, [rdi + 0xa0] 90 | mov r13, [rdi + 0xa8] 91 | mov r14, [rdi + 0xb0] 92 | mov r15, [rdi + 0xb8] 93 | mov rdi, [rdi + 0x60] 94 | o64 iret 95 | -------------------------------------------------------------------------------- /ors-kernel/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | #![feature(custom_test_frameworks)] 4 | #![feature(abi_x86_interrupt)] 5 | #![feature(alloc_error_handler)] 6 | #![feature(const_mut_refs)] 7 | #![feature(maybe_uninit_uninit_array)] 8 | #![feature(maybe_uninit_array_assume_init)] 9 | #![feature(const_fn_fn_ptr_basics)] 10 | #![test_runner(crate::test_runner)] 11 | #![reexport_test_harness_main = "test_main"] 12 | 13 | extern crate alloc; 14 | 15 | #[macro_use] 16 | pub mod print; 17 | pub mod acpi; 18 | pub mod allocator; 19 | pub mod console; 20 | pub mod context; 21 | pub mod cpu; 22 | pub mod devices; 23 | pub mod fs; 24 | pub mod graphics; 25 | pub mod interrupts; 26 | pub mod logger; 27 | pub mod paging; 28 | pub mod phys_memory; 29 | pub mod segmentation; 30 | mod shell; 31 | pub mod sync; 32 | pub mod task; 33 | pub mod x64; 34 | 35 | use ors_common::frame_buffer::FrameBuffer as RawFrameBuffer; 36 | use ors_common::memory_map::MemoryMap; 37 | 38 | #[no_mangle] 39 | pub extern "sysv64" fn kernel_main2(fb: &RawFrameBuffer, mm: &MemoryMap, rsdp: u64) { 40 | x64::interrupts::enable(); // To ensure that interrupts are enabled by default 41 | 42 | let cli = interrupts::Cli::new(); 43 | logger::register(); 44 | unsafe { segmentation::initialize() }; 45 | unsafe { paging::initialize() }; 46 | unsafe { phys_memory::frame_manager().initialize(mm) }; 47 | unsafe { acpi::initialize(paging::KernelAcpiHandler, rsdp as usize) }; 48 | cpu::initialize(); 49 | unsafe { interrupts::initialize() }; 50 | task::initialize_scheduler(); 51 | devices::pci::initialize_devices(); 52 | devices::virtio::block::initialize(); 53 | devices::serial::default_port().init(); 54 | console::initialize((*fb).into()); 55 | task::scheduler().add(task::Priority::L1, shell::run, 0); 56 | drop(cli); 57 | 58 | #[cfg(test)] 59 | test_main(); 60 | 61 | loop { 62 | x64::hlt() 63 | } 64 | } 65 | 66 | #[panic_handler] 67 | fn panic(info: &core::panic::PanicInfo) -> ! { 68 | sprintln!("{}", info); 69 | 70 | #[cfg(test)] 71 | devices::qemu::exit(devices::qemu::ExitCode::Failure); 72 | 73 | loop { 74 | x64::hlt() 75 | } 76 | } 77 | 78 | #[global_allocator] 79 | static ALLOCATOR: allocator::KernelAllocator = allocator::KernelAllocator::new(); 80 | 81 | #[alloc_error_handler] 82 | fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! { 83 | panic!("Allocation error: {:?}", layout) 84 | } 85 | 86 | #[cfg(test)] 87 | fn test_runner(tests: &[&dyn Fn()]) { 88 | use log::info; 89 | 90 | info!("RUNNING {} tests", tests.len()); 91 | for test in tests { 92 | test(); 93 | } 94 | 95 | devices::qemu::exit(devices::qemu::ExitCode::Success); 96 | } 97 | -------------------------------------------------------------------------------- /ors-kernel/src/graphics/font.rs: -------------------------------------------------------------------------------- 1 | use super::{Color, FrameBufferExt, FrameBufferFormat, VecBuffer}; 2 | use ab_glyph::{Font, FontRef, ScaleFont}; 3 | use alloc::collections::BTreeMap; 4 | 5 | #[derive(Debug)] 6 | pub struct MonospaceFont<'a> { 7 | size: u32, 8 | normal: FontRef<'a>, // TODO: Use instead of FontRef 9 | bold: FontRef<'a>, 10 | format: FrameBufferFormat, 11 | cache: BTreeMap, 12 | } 13 | 14 | impl<'a> MonospaceFont<'a> { 15 | pub fn new(size: u32, normal: &'a [u8], bold: &'a [u8], format: FrameBufferFormat) -> Self { 16 | Self { 17 | size, 18 | normal: FontRef::try_from_slice(normal).unwrap(), 19 | bold: FontRef::try_from_slice(bold).unwrap(), 20 | format, 21 | cache: BTreeMap::new(), 22 | } 23 | } 24 | 25 | pub fn unit_width(&self) -> u32 { 26 | (self.size + 1) / 2 27 | } 28 | 29 | pub fn unit_height(&self) -> u32 { 30 | self.size 31 | } 32 | 33 | pub fn format(&self) -> FrameBufferFormat { 34 | self.format 35 | } 36 | 37 | pub fn get(&mut self, ch: char, fg: Color, bg: Color, style: FontStyle) -> &VecBuffer { 38 | let key = CacheKey { ch, fg, bg, style }; 39 | let Self { size, format, .. } = *self; 40 | let unit_width = self.unit_width(); 41 | let unit_height = self.unit_height(); 42 | let font = match style { 43 | FontStyle::Normal => &self.normal, 44 | FontStyle::Bold => &self.bold, 45 | } 46 | .as_scaled(size as f32); 47 | self.cache.entry(key).or_insert_with(|| { 48 | let mut glyph = font.scaled_glyph(ch); 49 | glyph.position = ab_glyph::point(0.0, font.ascent()); 50 | let mut buf = VecBuffer::new(unit_width as usize, unit_height as usize, format); 51 | buf.clear(bg); 52 | if let Some(q) = font.outline_glyph(glyph) { 53 | let min_x = q.px_bounds().min.x as i32; 54 | let min_y = q.px_bounds().min.y as i32; 55 | q.draw(|x, y, c| { 56 | buf.write_pixel(min_x + x as i32, min_y + y as i32, bg.mix(fg, c)); 57 | }); 58 | } 59 | buf 60 | }) 61 | } 62 | } 63 | 64 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 65 | struct CacheKey { 66 | ch: char, 67 | fg: Color, 68 | bg: Color, 69 | style: FontStyle, 70 | } 71 | 72 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 73 | pub enum FontStyle { 74 | Normal, 75 | Bold, 76 | } 77 | 78 | impl FontStyle { 79 | pub fn is_bold(self) -> bool { 80 | matches!(self, Self::Bold) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /ors-kernel/src/paging.rs: -------------------------------------------------------------------------------- 1 | use crate::x64::{self, PageSize}; 2 | use acpi::{AcpiHandler, PhysicalMapping}; 3 | use core::ptr::NonNull; 4 | use log::trace; 5 | use spin::Lazy; 6 | 7 | const EMPTY_PAGE_TABLE: x64::PageTable = x64::PageTable::new(); 8 | 9 | static PAGE_TABLE: Lazy = Lazy::new(|| unsafe { initialize_identity_mapping() }); 10 | static mut PML4_TABLE: x64::PageTable = x64::PageTable::new(); 11 | static mut PDP_TABLE: x64::PageTable = x64::PageTable::new(); 12 | static mut PAGE_DIRECTORY: [x64::PageTable; 64] = [EMPTY_PAGE_TABLE; 64]; // supports up to 64GiB 13 | 14 | pub unsafe fn initialize() { 15 | trace!("INITIALIZING paging"); 16 | x64::Cr3::write(*PAGE_TABLE, x64::Cr3Flags::empty()); 17 | } 18 | 19 | unsafe fn initialize_identity_mapping() -> x64::PhysFrame { 20 | // Initialize identity mapping (always available but user inaccessible) 21 | use x64::PageTableFlags as Flags; 22 | 23 | let flags = Flags::PRESENT | Flags::WRITABLE | Flags::GLOBAL; 24 | 25 | unsafe fn phys_frame(page_table: &'static x64::PageTable) -> x64::PhysFrame { 26 | // `&'static x64::PageTable` are frame aligned 27 | x64::PhysFrame::from_start_address( 28 | // The virtual address of the `page_table` is identical to its physical address 29 | x64::PhysAddr::new(page_table as *const x64::PageTable as u64), 30 | ) 31 | .unwrap() 32 | } 33 | 34 | // PML4_TABLE[0] -> PDP_TABLE 35 | PML4_TABLE[0].set_frame(phys_frame(&PDP_TABLE), flags); 36 | 37 | for (i, d) in PAGE_DIRECTORY.iter_mut().enumerate() { 38 | // PDP_TABLE[i] -> PAGE_DIRECTORY[i] 39 | PDP_TABLE[i].set_frame(phys_frame(d), flags); 40 | 41 | for (j, p) in PAGE_DIRECTORY[i].iter_mut().enumerate() { 42 | // PAGE_DIRECTORY[i][j] -> (identical mapping) 43 | let addr = 44 | x64::PhysAddr::new(i as u64 * x64::Size1GiB::SIZE + j as u64 * x64::Size2MiB::SIZE); 45 | p.set_addr(addr, flags | Flags::HUGE_PAGE); 46 | } 47 | } 48 | 49 | phys_frame(&PML4_TABLE) 50 | } 51 | 52 | #[allow(dead_code)] 53 | unsafe fn mapper() -> impl x64::Mapper + x64::Translate { 54 | let _ = Lazy::force(&PAGE_TABLE); 55 | // Since ors uses identity mapping, we can use OffsetPageTable with offset=0. 56 | // TODO: Replace it with manually implemented one 57 | x64::OffsetPageTable::new(&mut PML4_TABLE, x64::VirtAddr::zero()) 58 | } 59 | 60 | pub fn as_virt_addr(addr: x64::PhysAddr) -> Option { 61 | if addr.as_u64() < x64::Size1GiB::SIZE * 64 { 62 | // Physical memory areas of up to 64 GiB are identity-mapped. 63 | Some(x64::VirtAddr::new(addr.as_u64())) 64 | } else { 65 | None 66 | } 67 | } 68 | 69 | pub fn as_phys_addr(addr: x64::VirtAddr) -> Option { 70 | if addr.as_u64() < x64::Size1GiB::SIZE * 64 { 71 | // Virtual memory areas of up to 64 GiB are identity-mapped. 72 | Some(x64::PhysAddr::new(addr.as_u64())) 73 | } else { 74 | // TODO: How this should be handled? 75 | // unsafe { mapper().translate_addr(addr) } 76 | None 77 | } 78 | } 79 | 80 | #[derive(Clone, Debug)] 81 | pub struct KernelAcpiHandler; 82 | 83 | impl AcpiHandler for KernelAcpiHandler { 84 | unsafe fn map_physical_region(&self, addr: usize, size: usize) -> PhysicalMapping { 85 | let ptr = as_virt_addr(x64::PhysAddr::new(addr as u64)) 86 | .unwrap() 87 | .as_mut_ptr(); 88 | PhysicalMapping::new(addr, NonNull::new(ptr).unwrap(), size, size, self.clone()) 89 | } 90 | 91 | fn unmap_physical_region(_region: &PhysicalMapping) {} 92 | } 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ors 2 | 3 | ors is an experimental x86_64 OS implementation with Rust. 4 | 5 |

6 | 7 |

8 | 9 | ## Setup 10 | 11 | ```bash 12 | # Rust nightly required at the moment 13 | rustup default nightly 14 | 15 | # Build ors-loader.efi and ors-kernel.elf 16 | make 17 | 18 | # Run on QEMU 19 | make qemu 20 | # ... is equivalent to 21 | ./qemu/make_and_run_image.sh \ 22 | target/x86_64-unknown-uefi/debug/ors-loader.efi \ 23 | target/x86_64-unknown-none-ors/debug/ors-kernel.elf 24 | ``` 25 | 26 | ## Comparison 27 | 28 | ors is based on [MikanOS](https://github.com/uchan-nos/mikanos), [blog_os (Second Edition)](https://os.phil-opp.com/), and [xv6](https://github.com/mit-pdos/xv6-public). 29 | 30 | | | ors | MikanOS | blog_os | xv6 | 31 | | ------------------- | --------------- | -------------- | ---------------- | ------------- | 32 | | Target | x86_64 | x86_64 | x86_64 | x86 [^1] | 33 | | Written in | Rust | C++ | Rust | C | 34 | | Boot by | UEFI BIOS | UEFI BIOS | Legacy BIOS [^2] | Legacy BIOS | 35 | | Screen Rendering | GOP by UEFI | GOP by UEFI | VGA Text Mode | VGA Text Mode | 36 | | Serial Port | 16550 UART | - | 16650 UART | 16650 UART | 37 | | Hardware Interrupts | APIC | APIC | 8259 PIC | APIC | 38 | | Keyboard Support | PS/2 | USB (xHCI) | PS/2 | PS/2 | 39 | | Mouse Support | - | USB (xHCI) | - | - | 40 | | Block Support | VirtIO over PCI | - [^3] | - | IDE [^4] | 41 | | Timers | APIC + ACPI PM | APIC + ACPI PM | 8259 PIC | APIC | 42 | | Multitasking | Preemptive | Preemptive | WIP [^5] | Preemptive | 43 | | File System | FAT | FAT [^6] | - | original [^7] | 44 | 45 | [^1]: Maintaining the x86 version have stopped, and switched to the [RISC-V version](https://github.com/mit-pdos/xv6-riscv) 46 | [^2]: [UEFI is planned](https://github.com/phil-opp/blog_os/issues/349) 47 | [^3]: Supports only very limited reading (by UEFI Block I/O) 48 | [^4]: [RISC-V version of xv6](https://github.com/mit-pdos/xv6-riscv) supports VirtIO over MMIO 49 | [^5]: blog_os supports [Cooperative Multitasking](https://os.phil-opp.com/async-await/) at the moment 50 | [^6]: Read-only support 51 | [^7]: Simpler but similar to modern UNIX file systems, including crash recovering 52 | 53 | ## Roadmap 54 | 55 | - [ ] Complete [ゼロからの OS 自作入門](https://www.amazon.co.jp/gp/product/B08Z3MNR9J) 56 | - [x] Chapter 0-3: Boot loader 57 | - [x] Chapter 4-5: Screen rendering 58 | - [x] Chapter 6, 12: User inputs 59 | - [x] Chapter 7: Interrupts 60 | - [x] Chapter 8: Physical memory management 61 | - [x] Chapter 9-10 (skipped) 62 | - [x] Chapter 11: Timers 63 | - [x] Chapter 13-14: Multitasking 64 | - [x] Chapter 15-16: Terminal and comamnds 65 | - [x] Chapter 17: File system 66 | - [ ] Chapter 18: User applications 67 | - [x] Chapter 19: Paging 68 | - [ ] Chapter 20: System calls 69 | - [ ] TBD 70 | - [ ] Chapter 27: Application memory management 71 | - [ ] TBD 72 | - [x] Complete [Writing an OS in Rust](https://os.phil-opp.com/) (second edition) 73 | - [x] Bare Bones 74 | - [x] Interrupts 75 | - [x] Memory Management 76 | - [x] Multitasking (Incomplete) 77 | - [ ] Compare with [xv6](https://github.com/mit-pdos/xv6-public) 78 | - [ ] Enable multicore 79 | - [ ] Try to implement TCP protocol stack 80 | 81 | ## Resources 82 | 83 | - ors uses [Tamzen font](https://github.com/sunaku/tamzen-font). 84 | - ors uses [One Monokai Theme](https://github.com/azemoh/vscode-one-monokai) as a color scheme of the terminal. 85 | -------------------------------------------------------------------------------- /ors-kernel/src/graphics/frame_buffer.rs: -------------------------------------------------------------------------------- 1 | use super::Color; 2 | use alloc::vec; 3 | use alloc::vec::Vec; 4 | use core::slice; 5 | use ors_common::frame_buffer::{FrameBuffer as RawFrameBuffer, PixelFormat as RawPixelFormat}; 6 | 7 | #[derive(PartialEq, Eq, Debug, Clone, Copy)] 8 | pub enum FrameBufferFormat { 9 | Rgbx, // [R, G, B, _, R, G, B, _, ..; stride * height * 4] 10 | Bgrx, // [B, G, R, _, B, G, R, _, ..; stride * height * 4] 11 | } 12 | 13 | impl FrameBufferFormat { 14 | pub fn encoder(&self) -> fn(Color) -> [u8; 4] { 15 | match self { 16 | Self::Rgbx => |c| [c.r, c.g, c.b, 255], 17 | Self::Bgrx => |c| [c.b, c.g, c.r, 255], 18 | } 19 | } 20 | 21 | pub fn decoder(&self) -> fn([u8; 4]) -> Color { 22 | match self { 23 | Self::Rgbx => |a| Color::new(a[0], a[1], a[2]), 24 | Self::Bgrx => |a| Color::new(a[2], a[1], a[0]), 25 | } 26 | } 27 | } 28 | 29 | impl From for FrameBufferFormat { 30 | fn from(f: RawPixelFormat) -> Self { 31 | match f { 32 | RawPixelFormat::Rgb => Self::Rgbx, 33 | RawPixelFormat::Bgr => Self::Bgrx, 34 | } 35 | } 36 | } 37 | 38 | pub trait FrameBuffer { 39 | fn bytes(&self) -> &[u8]; 40 | fn bytes_mut(&mut self) -> &mut [u8]; 41 | fn width(&self) -> usize; 42 | fn height(&self) -> usize; 43 | fn stride(&self) -> usize; 44 | fn format(&self) -> FrameBufferFormat; 45 | } 46 | 47 | #[derive(Debug, Clone)] 48 | pub struct VecBuffer { 49 | data: Vec, 50 | width: usize, 51 | height: usize, 52 | format: FrameBufferFormat, 53 | } 54 | 55 | impl VecBuffer { 56 | pub fn new(width: usize, height: usize, format: FrameBufferFormat) -> Self { 57 | Self { 58 | data: vec![0; width * height * 4], 59 | width, 60 | height, 61 | format, 62 | } 63 | } 64 | } 65 | 66 | impl FrameBuffer for VecBuffer { 67 | fn bytes(&self) -> &[u8] { 68 | self.data.as_slice() 69 | } 70 | 71 | fn bytes_mut(&mut self) -> &mut [u8] { 72 | self.data.as_mut_slice() 73 | } 74 | 75 | fn width(&self) -> usize { 76 | self.width 77 | } 78 | 79 | fn height(&self) -> usize { 80 | self.height 81 | } 82 | 83 | fn stride(&self) -> usize { 84 | self.width 85 | } 86 | 87 | fn format(&self) -> FrameBufferFormat { 88 | self.format 89 | } 90 | } 91 | 92 | #[derive(Debug)] 93 | pub struct ScreenBuffer { 94 | ptr: *mut u8, 95 | stride: usize, 96 | width: usize, 97 | height: usize, 98 | format: FrameBufferFormat, 99 | } 100 | 101 | impl FrameBuffer for ScreenBuffer { 102 | fn bytes(&self) -> &[u8] { 103 | unsafe { 104 | slice::from_raw_parts( 105 | self.ptr as *const u8, 106 | (self.stride * self.height * 4) as usize, 107 | ) 108 | } 109 | } 110 | 111 | fn bytes_mut(&mut self) -> &mut [u8] { 112 | unsafe { slice::from_raw_parts_mut(self.ptr, (self.stride * self.height * 4) as usize) } 113 | } 114 | 115 | fn width(&self) -> usize { 116 | self.width 117 | } 118 | 119 | fn height(&self) -> usize { 120 | self.height 121 | } 122 | 123 | fn format(&self) -> FrameBufferFormat { 124 | self.format 125 | } 126 | 127 | fn stride(&self) -> usize { 128 | self.stride 129 | } 130 | } 131 | 132 | impl From for ScreenBuffer { 133 | fn from(fb: RawFrameBuffer) -> Self { 134 | Self { 135 | ptr: fb.frame_buffer, 136 | stride: fb.stride as usize, 137 | width: fb.resolution.0 as usize, 138 | height: fb.resolution.1 as usize, 139 | format: fb.format.into(), 140 | } 141 | } 142 | } 143 | 144 | unsafe impl Send for ScreenBuffer {} 145 | 146 | unsafe impl Sync for ScreenBuffer {} 147 | -------------------------------------------------------------------------------- /ors-kernel/src/graphics.rs: -------------------------------------------------------------------------------- 1 | mod color; 2 | mod font; 3 | mod frame_buffer; 4 | mod rect; 5 | mod text_buffer; 6 | 7 | pub use color::Color; 8 | pub use font::{FontStyle, MonospaceFont}; 9 | pub use frame_buffer::{FrameBuffer, FrameBufferFormat, ScreenBuffer, VecBuffer}; 10 | pub use rect::Rect; 11 | pub use text_buffer::MonospaceTextBuffer; 12 | 13 | pub trait FrameBufferExt: FrameBuffer { 14 | fn rect(&self) -> Rect { 15 | Rect::new(0, 0, self.width() as u32, self.height() as u32) 16 | } 17 | 18 | fn pixel_index(&self, x: i32, y: i32) -> Option { 19 | if self.rect().contains(x, y) { 20 | Some((y as usize * self.stride() + x as usize) * 4) 21 | } else { 22 | None 23 | } 24 | } 25 | 26 | fn read_pixel(&self, x: i32, y: i32) -> Option { 27 | let i = self.pixel_index(x, y)?; 28 | let format = self.format(); 29 | let src = self.bytes(); 30 | let color = format.decoder()([src[i], src[i + 1], src[i + 2], src[i + 3]]); 31 | Some(color) 32 | } 33 | 34 | fn write_pixel(&mut self, x: i32, y: i32, color: Color) -> bool { 35 | if let Some(i) = self.pixel_index(x, y) { 36 | let format = self.format(); 37 | let dest = self.bytes_mut(); 38 | let color = format.encoder()(color); 39 | dest[i..i + 4].copy_from_slice(&color); 40 | true 41 | } else { 42 | false 43 | } 44 | } 45 | 46 | fn blit(&mut self, x: i32, y: i32, fb: &impl FrameBuffer) { 47 | if let Some(rect) = self.rect().intersect(fb.rect().offset(x, y)) { 48 | let oy = (rect.y - y) as usize; 49 | let ox = (rect.x - x) as usize; 50 | let src_stride = fb.stride(); 51 | let src = fb.bytes(); 52 | let dest_stride = self.stride(); 53 | let dest = self.bytes_mut(); 54 | let l = rect.w as usize * 4; 55 | 56 | for dy in 0..rect.h as usize { 57 | let i = ((rect.y as usize + dy) * dest_stride + rect.x as usize) * 4; 58 | let j = ((oy + dy) * src_stride + ox) * 4; 59 | dest[i..i + l].copy_from_slice(&src[j..j + l]); 60 | } 61 | } 62 | } 63 | 64 | fn fill_rect(&mut self, rect: Rect, color: Color) { 65 | if let Some(rect) = self.rect().intersect(rect) { 66 | let x = rect.x as usize; 67 | let y = rect.y as usize; 68 | let w = rect.w as usize; 69 | let h = rect.h as usize; 70 | let stride = self.stride(); 71 | let color = self.format().encoder()(color); 72 | let dest = self.bytes_mut(); 73 | for oy in 0..h { 74 | let i = ((y + oy) * stride + x) * 4; 75 | if oy == 0 { 76 | const CHUNK: usize = 16; 77 | for ox in 0..w.min(CHUNK) { 78 | dest[i + ox * 4..i + ox * 4 + 4].copy_from_slice(&color); 79 | } 80 | if 16 < w { 81 | for ox in (1..w / CHUNK).map(|n| n * CHUNK) { 82 | let (a, b) = dest.split_at_mut(i + ox * 4); 83 | b[0..CHUNK * 4].copy_from_slice(&mut a[i..i + CHUNK * 4]); 84 | } 85 | let mw = w % CHUNK; 86 | let (a, b) = dest.split_at_mut(i + (w - mw) * 4); 87 | b[0..mw * 4].copy_from_slice(&mut a[i..i + mw * 4]); 88 | } 89 | } else { 90 | let (a, b) = dest.split_at_mut(i); 91 | b[0..w * 4].copy_from_slice(&mut a[i - stride * 4..i - (stride - w) * 4]); 92 | } 93 | } 94 | } 95 | } 96 | 97 | fn clear(&mut self, color: Color) { 98 | self.fill_rect(self.rect(), color); 99 | } 100 | } 101 | 102 | impl FrameBufferExt for T {} 103 | -------------------------------------------------------------------------------- /ors-kernel/src/sync/queue.rs: -------------------------------------------------------------------------------- 1 | use crate::task; 2 | use heapless::mpmc::MpMcQueue; 3 | 4 | /// `heapless::mpmc::MpMcQueue` with task scheduler integration. 5 | pub struct Queue { 6 | inner: MpMcQueue, 7 | } 8 | 9 | impl Queue { 10 | pub const fn new() -> Self { 11 | Self { 12 | inner: MpMcQueue::new(), 13 | } 14 | } 15 | 16 | fn empty_chan(&self) -> task::WaitChannel { 17 | task::WaitChannel::from_ptr_index(self, 0) 18 | } 19 | 20 | fn full_chan(&self) -> task::WaitChannel { 21 | task::WaitChannel::from_ptr_index(self, 1) 22 | } 23 | 24 | pub fn enqueue(&self, mut item: T) { 25 | loop { 26 | match self.inner.enqueue(item).or_else(|item| { 27 | task::scheduler().switch( 28 | || { 29 | let ret = self.inner.enqueue(item); 30 | let switch = match ret { 31 | Ok(_) => None, 32 | Err(_) => Some(task::Switch::Blocked(self.full_chan(), None)), 33 | }; 34 | (switch, ret) 35 | }, 36 | 0, 37 | ) 38 | }) { 39 | Ok(()) => break, 40 | Err(i) => item = i, 41 | } 42 | } 43 | task::scheduler().release(self.empty_chan()); 44 | } 45 | 46 | pub fn enqueue_timeout(&self, item: T, timeout: usize) -> Result<(), T> { 47 | self.inner 48 | .enqueue(item) 49 | .or_else(|item| { 50 | task::scheduler().switch( 51 | || { 52 | let ret = self.inner.enqueue(item); 53 | let switch = match ret { 54 | Ok(_) => None, 55 | Err(_) => Some(task::Switch::Blocked(self.full_chan(), Some(timeout))), 56 | }; 57 | (switch, ret) 58 | }, 59 | 0, 60 | ) 61 | }) 62 | .or_else(|item| self.inner.enqueue(item))?; 63 | task::scheduler().release(self.empty_chan()); 64 | Ok(()) 65 | } 66 | 67 | pub fn try_enqueue(&self, item: T) -> Result<(), T> { 68 | self.inner.enqueue(item)?; 69 | task::scheduler().release(self.empty_chan()); 70 | Ok(()) 71 | } 72 | 73 | pub fn dequeue(&self) -> T { 74 | let item = loop { 75 | match self.inner.dequeue().or_else(|| { 76 | task::scheduler().switch( 77 | || { 78 | let ret = self.inner.dequeue(); 79 | let switch = match ret { 80 | Some(_) => None, 81 | None => Some(task::Switch::Blocked(self.empty_chan(), None)), 82 | }; 83 | (switch, ret) 84 | }, 85 | 0, 86 | ) 87 | }) { 88 | Some(item) => break item, 89 | None => {} 90 | } 91 | }; 92 | task::scheduler().release(self.full_chan()); 93 | item 94 | } 95 | 96 | pub fn dequeue_timeout(&self, timeout: usize) -> Option { 97 | let item = self 98 | .inner 99 | .dequeue() 100 | .or_else(|| { 101 | task::scheduler().switch( 102 | || { 103 | let ret = self.inner.dequeue(); 104 | let switch = match ret { 105 | Some(_) => None, 106 | None => Some(task::Switch::Blocked(self.empty_chan(), Some(timeout))), 107 | }; 108 | (switch, ret) 109 | }, 110 | 0, 111 | ) 112 | }) 113 | .or_else(|| self.inner.dequeue())?; 114 | task::scheduler().release(self.full_chan()); 115 | Some(item) 116 | } 117 | 118 | pub fn try_dequeue(&self) -> Option { 119 | let value = self.inner.dequeue()?; 120 | task::scheduler().release(self.full_chan()); 121 | Some(value) 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /ors-kernel/src/context.rs: -------------------------------------------------------------------------------- 1 | use crate::cpu::{Cpu, CpuThreadState}; 2 | use crate::segmentation; 3 | use core::mem; 4 | use core::sync::atomic::{AtomicBool, Ordering}; 5 | 6 | #[repr(C, align(16))] 7 | #[derive(Debug)] 8 | pub struct Context { 9 | pub cr3: u64, // 0x00 10 | pub rip: u64, // 0x08 11 | pub rflags: u64, // 0x10 12 | pub _reserved1: u64, // 0x18 13 | pub cs: u64, // 0x20 14 | pub ss: u64, // 0x28 15 | pub fs: u64, // 0x30 16 | pub gs: u64, // 0x38 17 | pub rax: u64, // 0x40 18 | pub rbx: u64, // 0x48 19 | pub rcx: u64, // 0x50 20 | pub rdx: u64, // 0x58 21 | pub rdi: u64, // 0x60 22 | pub rsi: u64, // 0x68 23 | pub rsp: u64, // 0x70 24 | pub rbp: u64, // 0x78 25 | pub r8: u64, // 0x80 26 | pub r9: u64, // 0x88 27 | pub r10: u64, // 0x90 28 | pub r11: u64, // 0x98 29 | pub r12: u64, // 0xa0 30 | pub r13: u64, // 0xa8 31 | pub r14: u64, // 0xb0 32 | pub r15: u64, // 0xb8 33 | pub fxsave_area: [u8; 512], // 0xc0 34 | pub saved: AtomicBool, // 0x2c0, used to confirm the end of the context saving process 35 | pub cts: CpuThreadState, 36 | } 37 | 38 | impl Context { 39 | pub const INTERRUPT_FLAG: u64 = 0x200; // Maskable interrupt enabled 40 | 41 | pub fn new(stack_end: *mut u8, entry_point: E, args: E::Arg) -> Self { 42 | let mut ctx = Self::uninitialized(); 43 | ctx.cr3 = unsafe { get_cr3() }; 44 | ctx.rflags = Self::INTERRUPT_FLAG | 0x2; // bit=1 is always 1 in eflags 45 | ctx.cs = unsafe { mem::transmute::<_, u16>(segmentation::cs()) } as u64; 46 | ctx.ss = unsafe { mem::transmute::<_, u16>(segmentation::ss()) } as u64; 47 | ctx.rsp = stack_end as u64 & !0xf; // 16-byte aligned for sysv64 48 | ctx.rsp -= 8; // adjust to call 49 | unsafe { *(&mut ctx.fxsave_area[24] as *mut u8 as *mut u32) = 0x1f80 }; // mask all MXCSR exceptions 50 | entry_point.prepare_context(&mut ctx, args); 51 | ctx.saved.store(true, Ordering::SeqCst); 52 | ctx 53 | } 54 | 55 | /// Used to write a context that is currently running. 56 | /// Switching to an uninitialized context is undefined behavior. 57 | pub fn uninitialized() -> Self { 58 | Self { 59 | cr3: 0, 60 | rip: 0, 61 | rflags: 0, 62 | _reserved1: 0, 63 | cs: 0, 64 | ss: 0, 65 | fs: 0, 66 | gs: 0, 67 | rax: 0, 68 | rbx: 0, 69 | rcx: 0, 70 | rdx: 0, 71 | rdi: 0, 72 | rsi: 0, 73 | rsp: 0, 74 | rbp: 0, 75 | r8: 0, 76 | r9: 0, 77 | r10: 0, 78 | r11: 0, 79 | r12: 0, 80 | r13: 0, 81 | r14: 0, 82 | r15: 0, 83 | fxsave_area: [0; 512], 84 | saved: AtomicBool::new(false), 85 | cts: CpuThreadState::new(), 86 | } 87 | } 88 | 89 | /// Mark the context as not saved. 90 | pub fn mark_as_not_saved(&self) { 91 | self.saved.store(false, Ordering::SeqCst); 92 | } 93 | 94 | /// Wait until the context has been saved. 95 | pub fn wait_saved(&self) { 96 | while !self.saved.load(Ordering::Relaxed) { 97 | core::hint::spin_loop(); 98 | } 99 | } 100 | 101 | /// Perform context switching. The current context will be saved to `current_ctx`. 102 | pub unsafe fn switch(next_ctx: *const Self, current_ctx: *mut Self) { 103 | let mut cpu = Cpu::current().state().lock(); 104 | (*current_ctx).cts = cpu.thread_state; 105 | cpu.thread_state = (*next_ctx).cts; 106 | drop(cpu); 107 | switch_context(next_ctx, current_ctx); 108 | } 109 | } 110 | 111 | extern "C" { 112 | fn get_cr3() -> u64; 113 | fn switch_context(next_ctx: *const Context, current_ctx: *mut Context); 114 | } 115 | 116 | pub trait EntryPoint { 117 | type Arg; 118 | fn prepare_context(self, ctx: &mut Context, arg: Self::Arg); 119 | } 120 | -------------------------------------------------------------------------------- /ors-kernel/src/console.rs: -------------------------------------------------------------------------------- 1 | use crate::graphics::ScreenBuffer; 2 | use crate::interrupts::{ticks, TIMER_FREQ}; 3 | use crate::sync::queue::Queue; 4 | use crate::task; 5 | use alloc::boxed::Box; 6 | use core::convert::TryInto; 7 | use core::fmt; 8 | use core::sync::atomic::{AtomicBool, Ordering}; 9 | use log::trace; 10 | 11 | mod ansi; 12 | mod kbd; 13 | mod screen; 14 | mod theme; 15 | 16 | const OUT_CHUNK_SIZE: usize = 64; 17 | 18 | static IN: Queue = Queue::new(); 19 | static OUT: Queue, 128> = Queue::new(); 20 | static OUT_READY: AtomicBool = AtomicBool::new(false); 21 | static RAW_IN: Queue = Queue::new(); 22 | 23 | pub fn initialize(buf: ScreenBuffer) { 24 | trace!("INITIALIZING console"); 25 | let buf = Box::into_raw(Box::new(buf)) as u64; 26 | task::scheduler().add(task::Priority::MAX, handle_output, buf); 27 | task::scheduler().add(task::Priority::MAX, handle_raw_input, 0); 28 | } 29 | 30 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] 31 | pub enum Input { 32 | Char(char), 33 | Ctrl(char), 34 | Insert, 35 | Home, 36 | End, 37 | PageUp, 38 | PageDown, 39 | ArrowUp, 40 | ArrowDown, 41 | ArrowLeft, 42 | ArrowRight, 43 | } 44 | 45 | pub fn input_queue() -> &'static Queue { 46 | &IN 47 | } 48 | 49 | #[derive(Debug, Clone, Copy)] 50 | pub struct ConsoleWrite; 51 | 52 | impl fmt::Write for ConsoleWrite { 53 | fn write_str(&mut self, mut s: &str) -> fmt::Result { 54 | if OUT_READY.load(Ordering::Acquire) { 55 | while s.len() > 0 { 56 | let mut i = s.len().min(OUT_CHUNK_SIZE); 57 | while !s.is_char_boundary(i) { 58 | i -= 1; 59 | } 60 | let (chunk, next_s) = s.split_at(i); 61 | OUT.enqueue(chunk.into()); 62 | s = next_s; 63 | } 64 | } 65 | Ok(()) 66 | } 67 | } 68 | 69 | extern "C" fn handle_output(buf: u64) -> ! { 70 | const RENDER_FREQ: usize = 30; 71 | const RENDER_INTERVAL: usize = TIMER_FREQ / RENDER_FREQ; 72 | 73 | let buf = unsafe { Box::from_raw(buf as *mut ScreenBuffer) }; 74 | let mut screen = screen::Screen::new(*buf, theme::OneMonokai); 75 | let mut next_render_ticks = 0; 76 | let mut decoder = ansi::Decoder::new(); 77 | 78 | OUT_READY.store(true, Ordering::SeqCst); 79 | 80 | loop { 81 | let t = ticks(); 82 | if next_render_ticks <= t { 83 | screen.render(); 84 | next_render_ticks = ticks() + RENDER_INTERVAL; 85 | } 86 | 87 | if let Some(out) = OUT.dequeue_timeout(next_render_ticks - t) { 88 | for ch in out.chars() { 89 | match decoder.add_char(ch) { 90 | Some(ansi::DecodeResult::Just(ch)) => screen.put_char(ch), 91 | Some(ansi::DecodeResult::EscapeSequence(es)) => { 92 | screen.handle_escape_sequence(es) 93 | } 94 | None => {} 95 | } 96 | } 97 | } 98 | } 99 | } 100 | 101 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 102 | pub enum RawInput { 103 | Kbd(u8), 104 | Com1(u8), 105 | } 106 | 107 | pub fn accept_raw_input(input: RawInput) { 108 | // Normally this function is called from interrupt handlers, 109 | // so failure of enqueuing is ignored without blocking. 110 | let _ = RAW_IN.try_enqueue(input); 111 | } 112 | 113 | extern "C" fn handle_raw_input(_: u64) -> ! { 114 | let mut kbd_decoder = kbd::Decoder::new(); 115 | let mut com1_decoder = ansi::Decoder::new(); 116 | 117 | loop { 118 | let input = RAW_IN.dequeue(); 119 | if let Some(input) = match input { 120 | RawInput::Kbd(input) => kbd_decoder.add(input), 121 | RawInput::Com1(0x7f) => Some(Input::Char('\x08')), // DEL -> BS 122 | RawInput::Com1(0x0d) => Some(Input::Char('\x0A')), // CR -> LF 123 | RawInput::Com1(input) if input <= 0x7e => com1_decoder 124 | .add_char(char::from(input)) 125 | .and_then(|input| input.try_into().ok()), 126 | _ => { 127 | trace!("console: Unhandled raw-input: {:?}", input); 128 | None 129 | } 130 | } { 131 | let _ = IN.try_enqueue(input); 132 | } 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /ors-kernel/src/cpu.rs: -------------------------------------------------------------------------------- 1 | //! This module works on the assumption that the processor information is initialized by 2 | //! calling `initialize` before any processor other than BSP is enabled. 3 | 4 | use crate::acpi; 5 | use crate::task::Task; 6 | use crate::x64; 7 | use ors_common::non_contiguous::Array; 8 | use spin::{Mutex, Once}; 9 | 10 | static SYSTEM_INFO: Once = Once::new(); 11 | static BOOT_STRAP_CPU_STATE: Mutex = Mutex::new(CpuState::new()); 12 | 13 | #[derive(Debug)] 14 | struct SystemInfo { 15 | lapic: x64::LApic, 16 | boot_strap_lapic_id: u32, 17 | application_cpu_state: Array, 64>, 18 | } 19 | 20 | pub fn initialize() { 21 | SYSTEM_INFO.call_once(move || { 22 | let processor_info = acpi::processor_info(); 23 | let mut application_cpu_state = Array::new(); 24 | for ap in processor_info.application_processors.iter() { 25 | application_cpu_state.insert(ap.local_apic_id, Mutex::new(CpuState::new())); 26 | } 27 | SystemInfo { 28 | lapic: x64::LApic::new(acpi::apic_info().local_apic_address), 29 | boot_strap_lapic_id: processor_info.boot_processor.local_apic_id, 30 | application_cpu_state, 31 | } 32 | }); 33 | } 34 | 35 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 36 | pub struct Cpu(CpuKind); 37 | 38 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 39 | enum CpuKind { 40 | BootStrap( 41 | Option, // known lapic_id (== SYSTEM_INFO.boot_strap_lapic_id) 42 | ), 43 | Application( 44 | u32, // lapic_id (!= SYSTEM_INFO.boot_strap_lapic_id) 45 | ), 46 | } 47 | 48 | impl Cpu { 49 | pub fn current() -> Self { 50 | if let Some(info) = SYSTEM_INFO.get() { 51 | let id = unsafe { info.lapic.apic_id() }; 52 | if id == info.boot_strap_lapic_id { 53 | Self(CpuKind::BootStrap(Some(id))) 54 | } else { 55 | Self(CpuKind::Application(id)) 56 | } 57 | } else { 58 | Self(CpuKind::BootStrap(None)) // works under the module assumption 59 | } 60 | } 61 | 62 | pub fn boot_strap() -> Self { 63 | Self(CpuKind::BootStrap(None)) 64 | } 65 | 66 | pub fn list() -> impl Iterator { 67 | core::iter::once(CpuKind::BootStrap(None)) 68 | .chain(SYSTEM_INFO.get().into_iter().flat_map(|info| { 69 | info.application_cpu_state 70 | .iter() 71 | .map(|(lapic_id, _)| CpuKind::Application(*lapic_id)) 72 | })) 73 | .map(|kind| Self(kind)) 74 | } 75 | 76 | pub fn lapic_id(self) -> Option { 77 | match self.0 { 78 | CpuKind::BootStrap(Some(lapic_id)) => Some(lapic_id), 79 | CpuKind::BootStrap(None) => SYSTEM_INFO.get().map(|info| info.boot_strap_lapic_id), 80 | CpuKind::Application(lapic_id) => Some(lapic_id), 81 | } 82 | } 83 | 84 | /// Get the state of this CPU. 85 | /// This Mutex does not get interrupt lock (`crate::interrupts::Cli`). Moreover, acquiring and 86 | /// releasing `crate::sync::mutex::Mutex` will lock this mutex through interrupt lock. 87 | /// We need to be careful about deadlocks when using this method. 88 | pub fn state(self) -> &'static Mutex { 89 | match self.0 { 90 | CpuKind::BootStrap(_) => &BOOT_STRAP_CPU_STATE, 91 | CpuKind::Application(lapic_id) => SYSTEM_INFO 92 | .get() 93 | .expect("Non-BSP CPU found before cpu::initialize") 94 | .application_cpu_state 95 | .get(lapic_id) 96 | .expect("Unknown CPU"), 97 | } 98 | } 99 | } 100 | 101 | #[derive(Debug)] 102 | pub struct CpuState { 103 | pub running_task: Option, 104 | pub thread_state: CpuThreadState, 105 | } 106 | 107 | impl CpuState { 108 | const fn new() -> Self { 109 | Self { 110 | running_task: None, 111 | thread_state: CpuThreadState::new(), 112 | } 113 | } 114 | } 115 | 116 | #[repr(C)] 117 | #[derive(Debug, Clone, Copy)] 118 | pub struct CpuThreadState { 119 | pub ncli: u32, // Depth of pushcli (processing with interrupts disabled) nesting 120 | pub zcli: bool, // Were interrupts disabled before pushcli? 121 | } 122 | 123 | impl CpuThreadState { 124 | pub const fn new() -> Self { 125 | Self { 126 | ncli: 0, 127 | zcli: false, // interrupts are enabled by default 128 | } 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /ors-kernel/src/console/screen.rs: -------------------------------------------------------------------------------- 1 | use super::ansi::{Color, ColorScheme, EscapeSequence, Sgr}; 2 | use crate::graphics::{FontStyle, FrameBuffer, MonospaceFont, MonospaceTextBuffer}; 3 | 4 | const FONT_SIZE: u32 = 14; 5 | static FONT_NORMAL: &[u8] = include_bytes!("Tamzen7x14r.ttf"); 6 | static FONT_BOLD: &[u8] = include_bytes!("Tamzen7x14b.ttf"); 7 | 8 | pub struct Screen<'a, T, S> { 9 | buf: MonospaceTextBuffer<'a, T>, 10 | theme: S, 11 | fg: Color, 12 | bg: Color, 13 | font_style: FontStyle, 14 | } 15 | 16 | impl<'a, T: FrameBuffer, S: ColorScheme> Screen<'a, T, S> { 17 | pub fn new(buf: T, theme: S) -> Self { 18 | let format = buf.format(); 19 | Self { 20 | buf: MonospaceTextBuffer::new( 21 | buf, 22 | MonospaceFont::new(FONT_SIZE, FONT_NORMAL, FONT_BOLD, format), 23 | ), 24 | theme, 25 | fg: Color::Default, 26 | bg: Color::Default, 27 | font_style: FontStyle::Normal, 28 | } 29 | } 30 | 31 | pub fn render(&mut self) { 32 | self.buf.render(); 33 | } 34 | 35 | pub fn put_char(&mut self, ch: char) { 36 | self.buf.put( 37 | ch, 38 | self.theme.get_fg(self.fg).into(), 39 | self.theme.get_bg(self.bg).into(), 40 | self.font_style, 41 | ); 42 | } 43 | 44 | pub fn erase( 45 | &mut self, 46 | before_cursor_lines: bool, 47 | before_cursor_chars: bool, 48 | after_cursor_chars: bool, 49 | after_cursor_lines: bool, 50 | ) { 51 | self.buf.erase( 52 | self.theme.get_bg(self.bg).into(), 53 | before_cursor_lines, 54 | before_cursor_chars, 55 | after_cursor_chars, 56 | after_cursor_lines, 57 | ); 58 | } 59 | 60 | pub fn handle_escape_sequence(&mut self, es: EscapeSequence) { 61 | use EscapeSequence::*; 62 | 63 | match es { 64 | CursorUp(n) => self.buf.move_cursor(0, -(n as i32)), 65 | CursorDown(n) => self.buf.move_cursor(0, n as i32), 66 | CursorForward(n) => self.buf.move_cursor(n as i32, 0), 67 | CursorBack(n) => self.buf.move_cursor(-(n as i32), 0), 68 | CursorNextLine(n) => self.buf.move_cursor(i32::MIN, n as i32), 69 | CursorPreviousLine(n) => self.buf.move_cursor(i32::MIN, -(n as i32)), 70 | CursorHorizontalAbsolute(n) => self.buf.set_cursor(Some(n - 1), None), 71 | CursorPosition(n, m) => self.buf.set_cursor(Some(m - 1), Some(n - 1)), 72 | EraseInDisplay(0) => self.erase(false, false, true, true), 73 | EraseInDisplay(1) => self.erase(true, true, false, false), 74 | EraseInDisplay(2) => self.erase(true, true, true, true), 75 | EraseInLine(0) => self.erase(false, false, true, false), 76 | EraseInLine(1) => self.erase(false, true, false, false), 77 | EraseInLine(2) => self.erase(false, true, true, false), 78 | HorizontalVerticalPosition(n, m) => self.buf.set_cursor(Some(m - 1), Some(n - 1)), 79 | Sgr(a) => self.handle_sgr(a), 80 | Sgr2(a, b) => { 81 | self.handle_sgr(a); 82 | self.handle_sgr(b); 83 | } 84 | Sgr3(a, b, c) => { 85 | self.handle_sgr(a); 86 | self.handle_sgr(b); 87 | self.handle_sgr(c); 88 | } 89 | _ => {} 90 | } 91 | } 92 | 93 | pub fn handle_sgr(&mut self, sgr: Sgr) { 94 | use Sgr::*; 95 | 96 | match sgr { 97 | Reset => { 98 | self.fg = Color::Default; 99 | self.bg = Color::Default; 100 | self.font_style = FontStyle::Normal; 101 | } 102 | Bold => { 103 | self.font_style = FontStyle::Bold; 104 | self.fg = self.fg.brighter(); 105 | } 106 | Faint | ResetBoldFaint => { 107 | self.font_style = FontStyle::Normal; 108 | self.fg = self.fg.dimmer(); 109 | } 110 | Italic(_) => {} // Unsupported 111 | Underline(_) => {} // Unsupported 112 | Blinking(_) => {} // Unsupported 113 | Inverse(_) => {} // Unsupported 114 | Hidden(_) => {} // Unsupported 115 | Strikethrough(_) => {} // Unsupported 116 | Fg(color) => { 117 | self.fg = if self.font_style.is_bold() { 118 | color.brighter() 119 | } else { 120 | color.dimmer() 121 | } 122 | } 123 | Bg(color) => self.bg = color, 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /ors-kernel/src/devices/virtio/configuration.rs: -------------------------------------------------------------------------------- 1 | use crate::devices::pci; 2 | use crate::x64; 3 | 4 | // const DEVICE_STATUS_FAILED: u8 = 128; // something went wrong in the guest 5 | const DEVICE_STATUS_ACKNOWLEDGE: u8 = 1; // the guest OS has found the device and recognized it 6 | const DEVICE_STATUS_DRIVER: u8 = 2; // the guest OS knows how to drive the device 7 | const DEVICE_STATUS_FEATURES_OK: u8 = 8; // the driver has acknowledged all the features it understands, and feature negotiation is complete 8 | const DEVICE_STATUS_DRIVER_OK: u8 = 4; // the driver is set up and ready to drive the device 9 | 10 | #[derive(Debug, Clone, Copy)] 11 | pub struct Configuration { 12 | addr: u16, 13 | msi_x_enabled: bool, 14 | } 15 | 16 | impl Configuration { 17 | pub fn new(addr: u16, msi_x_enabled: bool) -> Self { 18 | Self { 19 | addr, 20 | msi_x_enabled, 21 | } 22 | } 23 | 24 | pub unsafe fn from_pci_device(device: pci::Device) -> Result { 25 | assert!(device.is_virtio()); 26 | // > Legacy drivers skipped the Device Layout Detection step, 27 | // > assuming legacy device configuration space in BAR0 in I/O space unconditionally. 28 | let io_addr = device 29 | .read_bar(0) 30 | .io_port() 31 | .ok_or("BAR0 is not an I/O address")?; 32 | 33 | Ok(Self::new( 34 | io_addr, 35 | device.msi_x().map_or(false, |m| m.is_enabled()), 36 | )) 37 | } 38 | 39 | unsafe fn read(self, offset: u16) -> T { 40 | x64::Port::new(self.addr + offset).read() 41 | } 42 | 43 | unsafe fn write(self, offset: u16, value: T) { 44 | x64::Port::new(self.addr + offset).write(value) 45 | } 46 | 47 | /// Perform general driver initialization. 48 | /// After calling this, caller must perform device-specific setup (including virtqueue setup) 49 | /// and then call `Configuration::set_driver_ok`. 50 | pub unsafe fn initialize(self, negotiate: impl FnOnce(u32) -> u32) -> Result<(), &'static str> { 51 | // 3.1.1 Driver Requirements: Device Initialization 52 | self.set_device_status(self.device_status() | DEVICE_STATUS_ACKNOWLEDGE); 53 | self.set_device_status(self.device_status() | DEVICE_STATUS_DRIVER); 54 | const RING_INDIRECT_DESC: u32 = 1 << 28; 55 | const RING_EVENT_IDX: u32 = 1 << 29; 56 | let features = self.device_features(); 57 | self.set_driver_features(negotiate(features) & !RING_INDIRECT_DESC & !RING_EVENT_IDX); 58 | self.set_device_status(self.device_status() | DEVICE_STATUS_FEATURES_OK); 59 | 60 | if (self.device_status() & DEVICE_STATUS_FEATURES_OK) == 0 { 61 | return Err("FEATURES_OK"); 62 | } 63 | 64 | Ok(()) 65 | } 66 | 67 | pub unsafe fn set_driver_ok(self) { 68 | self.set_device_status(self.device_status() | DEVICE_STATUS_DRIVER_OK); 69 | } 70 | 71 | unsafe fn device_features(self) -> u32 { 72 | self.read(0) 73 | } 74 | 75 | unsafe fn set_driver_features(self, value: u32) { 76 | self.write(0x04, value) 77 | } 78 | 79 | pub unsafe fn queue_address(self) -> u32 { 80 | self.read(0x08) 81 | } 82 | 83 | pub unsafe fn set_queue_address(self, value: u32) { 84 | self.write(0x08, value) 85 | } 86 | 87 | pub unsafe fn queue_size(self) -> u32 { 88 | self.read(0x0c) 89 | } 90 | 91 | // On Legacy Interface, 92 | // > There was no mechanism to negotiate the queue size. 93 | 94 | pub unsafe fn queue_select(self) -> u16 { 95 | self.read(0x0e) 96 | } 97 | 98 | pub unsafe fn set_queue_select(self, value: u16) { 99 | self.write(0x0e, value) 100 | } 101 | 102 | pub unsafe fn set_queue_notify(self, value: u16) { 103 | self.write(0x10, value) 104 | } 105 | 106 | unsafe fn device_status(self) -> u8 { 107 | self.read(0x12) 108 | } 109 | 110 | unsafe fn set_device_status(self, value: u8) { 111 | self.write(0x12, value) 112 | } 113 | 114 | // 0x13: ISR status (Unused when MSI-X is enabled) 115 | 116 | pub unsafe fn set_config_msix_vector(self, value: u16) { 117 | assert!(self.msi_x_enabled); 118 | self.write(0x14, value) 119 | } 120 | 121 | pub unsafe fn set_queue_msix_vector(self, value: u16) { 122 | assert!(self.msi_x_enabled); 123 | self.write(0x16, value) 124 | } 125 | 126 | fn device_specific_offset(self) -> u16 { 127 | if self.msi_x_enabled { 128 | 0x18 129 | } else { 130 | 0x14 131 | } 132 | } 133 | 134 | pub unsafe fn read_device_specific(self, offset: u16) -> T { 135 | self.read(self.device_specific_offset() + offset) 136 | } 137 | 138 | pub unsafe fn write_device_specific(self, offset: u16, value: T) { 139 | self.write(self.device_specific_offset() + offset, value) 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /ors-kernel/src/x64.rs: -------------------------------------------------------------------------------- 1 | //! Re-exporting x86_64 crate items and some additional definitions 2 | 3 | pub use x86_64::instructions::hlt; 4 | pub use x86_64::instructions::interrupts; 5 | pub use x86_64::instructions::port::{Port, PortRead, PortWrite, PortWriteOnly}; 6 | pub use x86_64::instructions::segmentation::{Segment, CS, DS, ES, FS, GS, SS}; 7 | pub use x86_64::instructions::tables::load_tss; 8 | pub use x86_64::registers::control::{Cr2, Cr3, Cr3Flags}; 9 | pub use x86_64::structures::gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector}; 10 | pub use x86_64::structures::idt::{ 11 | InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode, 12 | }; 13 | pub use x86_64::structures::paging::page_table::PageTableFlags; 14 | pub use x86_64::structures::paging::{ 15 | FrameAllocator, FrameDeallocator, Mapper, OffsetPageTable, PageSize, PageTable, PhysFrame, 16 | Size1GiB, Size2MiB, Size4KiB, Translate, 17 | }; 18 | pub use x86_64::structures::tss::TaskStateSegment; 19 | pub use x86_64::structures::DescriptorTablePointer; 20 | pub use x86_64::{PhysAddr, PrivilegeLevel, VirtAddr}; 21 | 22 | use core::ptr; 23 | 24 | #[derive(Debug, Clone, Copy)] 25 | pub struct LApic { 26 | ptr: *mut u32, 27 | } 28 | 29 | impl LApic { 30 | pub fn new(addr: u64) -> Self { 31 | Self { 32 | ptr: addr as *mut u32, 33 | } 34 | } 35 | 36 | pub unsafe fn read(&self, offset: usize) -> u32 { 37 | ptr::read_volatile(self.ptr.add(offset)) 38 | } 39 | 40 | pub unsafe fn write(&self, offset: usize, value: u32) { 41 | ptr::write_volatile(self.ptr.add(offset), value) 42 | } 43 | 44 | pub unsafe fn apic_id(&self) -> u32 { 45 | self.read(0x0020 / 4) >> 24 46 | } 47 | 48 | pub unsafe fn ver(&self) -> u32 { 49 | self.read(0x0030 / 4) 50 | } 51 | 52 | pub unsafe fn set_tpr(&self, value: u32) { 53 | self.write(0x0080 / 4, value) 54 | } 55 | 56 | pub unsafe fn set_eoi(&self, value: u32) { 57 | self.write(0x00B0 / 4, value) 58 | } 59 | 60 | pub unsafe fn set_svr(&self, value: u32) { 61 | self.write(0x00F0 / 4, value) 62 | } 63 | 64 | pub unsafe fn icrlo(&self) -> u32 { 65 | self.read(0x0300 / 4) 66 | } 67 | 68 | pub unsafe fn set_icrlo(&self, value: u32) { 69 | self.write(0x0300 / 4, value) 70 | } 71 | 72 | pub unsafe fn set_icrhi(&self, value: u32) { 73 | self.write(0x0310 / 4, value) 74 | } 75 | 76 | // Local Vector Table 0 (TIMER) 77 | pub unsafe fn set_timer(&self, value: u32) { 78 | self.write(0x0320 / 4, value) 79 | } 80 | 81 | pub unsafe fn set_pcint(&self, value: u32) { 82 | self.write(0x0340 / 4, value) 83 | } 84 | 85 | // Local Vector Table 1 (LINT0) 86 | pub unsafe fn set_lint0(&self, value: u32) { 87 | self.write(0x0350 / 4, value) 88 | } 89 | 90 | // Local Vector Table 2 (LINT1) 91 | pub unsafe fn set_lint1(&self, value: u32) { 92 | self.write(0x0360 / 4, value) 93 | } 94 | 95 | // Local Vector Table 3 (ERROR) 96 | pub unsafe fn set_error(&self, value: u32) { 97 | self.write(0x0370 / 4, value) 98 | } 99 | 100 | // Timer Initial Count 101 | pub unsafe fn set_ticr(&self, value: u32) { 102 | self.write(0x0380 / 4, value) 103 | } 104 | 105 | // Timer Current Count 106 | pub unsafe fn tccr(&self) -> u32 { 107 | self.read(0x0390 / 4) 108 | } 109 | 110 | // Timer Divide Configuration 111 | pub unsafe fn set_tdcr(&self, value: u32) { 112 | self.write(0x03E0 / 4, value) 113 | } 114 | } 115 | 116 | unsafe impl Sync for LApic {} 117 | 118 | unsafe impl Send for LApic {} 119 | 120 | #[derive(Debug)] 121 | pub struct IoApic { 122 | ptr: *mut IoApicMmio, 123 | } 124 | 125 | #[repr(C)] 126 | struct IoApicMmio { 127 | reg: u32, 128 | pad: [u32; 3], 129 | data: u32, 130 | } 131 | 132 | impl IoApic { 133 | pub fn new(addr: u64) -> Self { 134 | Self { 135 | ptr: addr as *mut IoApicMmio, 136 | } 137 | } 138 | 139 | pub unsafe fn read(&self, reg: u32) -> u32 { 140 | ptr::write_volatile(&mut (*self.ptr).reg, reg); 141 | ptr::read_volatile(&mut (*self.ptr).data) 142 | } 143 | 144 | pub unsafe fn write(&self, reg: u32, value: u32) { 145 | ptr::write_volatile(&mut (*self.ptr).reg, reg); 146 | ptr::write_volatile(&mut (*self.ptr).data, value); 147 | } 148 | 149 | pub unsafe fn apic_id(&self) -> u8 { 150 | (self.read(0x00) >> 24) as u8 151 | } 152 | 153 | pub unsafe fn ver(&self) -> u32 { 154 | self.read(0x01) 155 | } 156 | 157 | pub unsafe fn redirection_table_at(&self, index: u32) -> u64 { 158 | // configuration bits (low) 159 | (self.read(0x10 + 2 * index) as u64) | 160 | // a bitmask telling which CPUs can serve that interrupt (high) 161 | ((self.read(0x10 + 2 * index + 1) as u64) << 32) 162 | } 163 | 164 | pub unsafe fn set_redirection_table_at(&self, index: u32, value: u64) { 165 | self.write(0x10 + 2 * index, value as u32); 166 | self.write(0x10 + 2 * index + 1, (value >> 32) as u32); 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /ors-kernel/src/allocator.rs: -------------------------------------------------------------------------------- 1 | use crate::paging::{as_phys_addr, as_virt_addr}; 2 | use crate::phys_memory::{frame_manager, Frame}; 3 | use crate::sync::spin::Spin; 4 | use crate::x64; 5 | use alloc::alloc::{GlobalAlloc, Layout}; 6 | use core::ptr; 7 | use log::trace; 8 | 9 | #[derive(Debug)] 10 | enum AllocationMode { 11 | Block(usize), 12 | Frame(usize), 13 | } 14 | 15 | impl From for AllocationMode { 16 | fn from(l: Layout) -> Self { 17 | let size = l.size().max(l.align()); 18 | match BLOCK_SIZES.iter().position(|s| *s >= size) { 19 | Some(index) => Self::Block(index), 20 | None => Self::Frame((size + Frame::SIZE - 1) / Frame::SIZE), 21 | } 22 | } 23 | } 24 | 25 | const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048]; 26 | 27 | pub struct KernelAllocator { 28 | available_blocks: Spin<[*mut u8; BLOCK_SIZES.len()]>, 29 | } 30 | 31 | impl KernelAllocator { 32 | pub const fn new() -> Self { 33 | Self { 34 | available_blocks: Spin::new([ptr::null_mut(); BLOCK_SIZES.len()]), 35 | } 36 | } 37 | } 38 | 39 | unsafe impl Sync for KernelAllocator {} 40 | 41 | unsafe impl GlobalAlloc for KernelAllocator { 42 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 43 | match layout.into() { 44 | AllocationMode::Block(index) => { 45 | let mut available_blocks = self.available_blocks.lock(); 46 | let mut ptr = available_blocks[index]; 47 | if ptr.is_null() { 48 | ptr = allocate_frame_for_block(index); 49 | } 50 | if !ptr.is_null() { 51 | available_blocks[index] = (ptr as *const u64).read() as *mut u8; 52 | } 53 | trace!( 54 | "allocator: allocate block (size = {}) -> {:?}", 55 | BLOCK_SIZES[index], 56 | x64::VirtAddr::from_ptr(ptr) 57 | ); 58 | ptr 59 | } 60 | AllocationMode::Frame(num) => match frame_manager().allocate(num) { 61 | Ok(frame) => { 62 | let addr = as_virt_addr(frame.phys_addr()).unwrap(); 63 | trace!("allocator: allocate frame (num = {}) -> {:?}", num, addr); 64 | addr.as_mut_ptr() 65 | } 66 | Err(_) => ptr::null_mut(), 67 | }, 68 | } 69 | } 70 | 71 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 72 | match layout.into() { 73 | AllocationMode::Block(index) => { 74 | trace!( 75 | "allocator: deallocate block (size = {}) -> {:?}", 76 | BLOCK_SIZES[index], 77 | x64::VirtAddr::from_ptr(ptr) 78 | ); 79 | let mut available_blocks = self.available_blocks.lock(); 80 | let next = available_blocks[index]; 81 | (ptr as *mut u64).write(next as u64); 82 | available_blocks[index] = ptr; 83 | } 84 | AllocationMode::Frame(num) => { 85 | let addr = x64::VirtAddr::from_ptr(ptr as *const u8); 86 | trace!("allocator: deallocate frame (num = {}) -> {:?}", num, addr); 87 | let frame = Frame::from_phys_addr(as_phys_addr(addr).unwrap()); 88 | frame_manager().free(frame, num); 89 | } 90 | } 91 | } 92 | } 93 | 94 | fn allocate_frame_for_block(index: usize) -> *mut u8 { 95 | let block_size = BLOCK_SIZES[index]; 96 | let num_blocks_per_frame = Frame::SIZE / block_size; 97 | // NOTE: Frames for AllocationMode::Block are never deallocated 98 | let ptr: *mut u8 = match frame_manager().allocate(1) { 99 | Ok(frame) => as_virt_addr(frame.phys_addr()).unwrap().as_mut_ptr(), 100 | Err(_) => return ptr::null_mut(), 101 | }; 102 | trace!( 103 | "allocator: allocate_frame_for_block(size = {}) -> {:?}", 104 | block_size, 105 | x64::VirtAddr::from_ptr(ptr) 106 | ); 107 | for i in 0..num_blocks_per_frame { 108 | let current = unsafe { ptr.add(i * block_size) }; 109 | let next = if i == num_blocks_per_frame - 1 { 110 | ptr::null_mut() 111 | } else { 112 | unsafe { current.add(block_size) } 113 | }; 114 | unsafe { (current as *mut u64).write(next as u64) }; 115 | } 116 | ptr 117 | } 118 | 119 | #[cfg(test)] 120 | mod tests { 121 | use alloc::boxed::Box; 122 | use log::info; 123 | 124 | #[test_case] 125 | fn test_frame() { 126 | info!("TESTING allocator::test_frame"); 127 | 128 | let a = Box::new([0u8; 4096]); 129 | let b = Box::new([0u8; 4096]); 130 | drop(a); 131 | let c = Box::new([0u8; 4096]); 132 | drop(b); 133 | drop(c); 134 | 135 | let d = Box::new([0u8; 4096 + 2048]); 136 | let e = Box::new([0u8; 4096 * 2]); 137 | let f = Box::new([0u8; 4096 * 3]); 138 | drop(d); 139 | drop(e); 140 | drop(f); 141 | } 142 | 143 | #[test_case] 144 | fn test_block1() { 145 | info!("TESTING allocator::test_block1"); 146 | 147 | let a = Box::new([0u8; 8]); 148 | let b = Box::new([0u8; 8]); 149 | drop(b); 150 | let c = Box::new([0u8; 8]); 151 | let d = Box::new([0u8; 8]); 152 | drop(d); 153 | drop(a); 154 | let e = Box::new([0u8; 8]); 155 | drop(c); 156 | drop(e); 157 | let _ = [Box::new([0u8; 8]), Box::new([0u8; 8]), Box::new([0u8; 8])]; 158 | } 159 | 160 | #[test_case] 161 | fn test_block2() { 162 | info!("TESTING allocator::test_block2"); 163 | 164 | let a = Box::new([0u8; 1024]); 165 | let b = Box::new([0u8; 1024]); 166 | let c = Box::new([0u8; 1024]); 167 | let d = Box::new([0u8; 1024]); 168 | let e = Box::new([0u8; 1024]); 169 | drop(b); 170 | drop(d); 171 | let f = Box::new([0u8; 1024]); 172 | let g = Box::new([0u8; 1024]); 173 | let h = Box::new([0u8; 1024]); 174 | drop(a); 175 | drop(c); 176 | drop(e); 177 | drop(g); 178 | drop(f); 179 | drop(h); 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /ors-common/src/non_contiguous.rs: -------------------------------------------------------------------------------- 1 | use core::convert::TryInto; 2 | use core::fmt::Debug; 3 | use core::iter::FromIterator; 4 | use core::mem; 5 | use core::mem::MaybeUninit; 6 | 7 | /// Allocation-free fixed size array with non-contiguous indices. 8 | #[derive(Debug, Clone)] 9 | pub struct Array { 10 | len: usize, 11 | buckets: [Option<(I, V)>; N], 12 | } 13 | 14 | pub trait ArrayIndex: Eq + Copy { 15 | fn array_index(self) -> usize; 16 | } 17 | 18 | impl ArrayIndex for T 19 | where 20 | T: Eq + Copy + TryInto, 21 | >::Error: Debug, 22 | { 23 | fn array_index(self) -> usize { 24 | self.try_into().unwrap() 25 | } 26 | } 27 | 28 | impl Array { 29 | pub fn new() -> Self { 30 | // TODO: [const { None }; N] with inline const expressions 31 | let mut buckets = MaybeUninit::uninit_array(); 32 | for bucket in &mut buckets[..] { 33 | bucket.write(None); 34 | } 35 | Self { 36 | len: 0, 37 | buckets: unsafe { MaybeUninit::array_assume_init(buckets) }, 38 | } 39 | } 40 | 41 | pub fn len(&self) -> usize { 42 | self.len 43 | } 44 | 45 | pub fn clear(&mut self) { 46 | for bucket in self.buckets.iter_mut() { 47 | *bucket = None; 48 | } 49 | self.len = 0; 50 | } 51 | 52 | pub fn get(&self, i: I) -> Option<&V> { 53 | match self.bucket_index(i) { 54 | Some(BucketIndex::Occupied(index)) => { 55 | let bucket = self.buckets[index].as_ref(); 56 | Some(&bucket.unwrap().1) 57 | } 58 | _ => None, 59 | } 60 | } 61 | 62 | pub fn get_mut(&mut self, i: I) -> Option<&mut V> { 63 | match self.bucket_index(i) { 64 | Some(BucketIndex::Occupied(index)) => { 65 | let bucket = self.buckets[index].as_mut(); 66 | Some(&mut bucket.unwrap().1) 67 | } 68 | _ => None, 69 | } 70 | } 71 | 72 | pub fn insert(&mut self, i: I, v: V) -> Option { 73 | match self.bucket_index(i).expect("Array is full") { 74 | BucketIndex::Vacant(index) => { 75 | self.buckets[index] = Some((i, v)); 76 | self.len += 1; 77 | None 78 | } 79 | BucketIndex::Occupied(index) => { 80 | let bucket = self.buckets[index].as_mut(); 81 | Some(mem::replace(&mut bucket.unwrap().1, v)) 82 | } 83 | } 84 | } 85 | 86 | pub fn iter(&self) -> impl Iterator { 87 | self.into_iter() 88 | } 89 | 90 | pub fn iter_mut(&mut self) -> impl Iterator { 91 | self.into_iter() 92 | } 93 | 94 | fn bucket_index(&self, i: I) -> Option { 95 | for offset in 0..N { 96 | let index = (i.array_index() + offset) % N; // open addressing 97 | match self.buckets[index] { 98 | None => return Some(BucketIndex::Vacant(index)), 99 | Some((j, _)) if i == j => return Some(BucketIndex::Occupied(index)), 100 | Some(_) => {} 101 | } 102 | } 103 | None 104 | } 105 | } 106 | 107 | #[derive(Debug)] 108 | enum BucketIndex { 109 | Vacant(usize), 110 | Occupied(usize), // TODO: Support remove operation with Robin Hood Hashing method 111 | } 112 | 113 | impl Default for Array { 114 | fn default() -> Self { 115 | Self::new() 116 | } 117 | } 118 | 119 | impl FromIterator<(I, V)> for Array { 120 | fn from_iter>(iter: T) -> Self { 121 | let mut array = Self::new(); 122 | for (i, v) in iter { 123 | array.insert(i, v); 124 | } 125 | array 126 | } 127 | } 128 | 129 | impl IntoIterator for Array { 130 | type Item = (I, V); 131 | type IntoIter = core::iter::Flatten<<[Option<(I, V)>; N] as IntoIterator>::IntoIter>; 132 | 133 | fn into_iter(self) -> Self::IntoIter { 134 | IntoIterator::into_iter(self.buckets).flatten() 135 | } 136 | } 137 | 138 | impl<'a, I: ArrayIndex, V, const N: usize> IntoIterator for &'a Array { 139 | type Item = &'a (I, V); 140 | type IntoIter = core::iter::Flatten>>; 141 | 142 | fn into_iter(self) -> Self::IntoIter { 143 | self.buckets.iter().flatten() 144 | } 145 | } 146 | 147 | impl<'a, I: ArrayIndex, V, const N: usize> IntoIterator for &'a mut Array { 148 | type Item = &'a mut (I, V); 149 | type IntoIter = core::iter::Flatten>>; 150 | 151 | fn into_iter(self) -> Self::IntoIter { 152 | self.buckets.iter_mut().flatten() 153 | } 154 | } 155 | 156 | #[cfg(test)] 157 | mod tests { 158 | use super::*; 159 | use alloc::collections::BTreeMap; 160 | use alloc::vec; 161 | 162 | #[test] 163 | fn test_array() { 164 | let mut array: Array = Array::new(); 165 | assert_eq!(array.len(), 0); 166 | assert_eq!(array.insert(1, 1), None); 167 | assert_eq!(array.insert(2, 2), None); 168 | assert_eq!(array.insert(4, 3), None); 169 | assert_eq!(array.insert(8, 4), None); 170 | assert_eq!(array.len(), 4); 171 | 172 | assert_eq!(array.get(0), None); 173 | assert_eq!(array.get(1), Some(&1)); 174 | assert_eq!(array.get(2), Some(&2)); 175 | assert_eq!(array.get(3), None); 176 | 177 | assert_eq!(array.insert(3, 5), None); 178 | assert_eq!(array.insert(4, 6), Some(3)); 179 | assert_eq!(array.insert(5, 7), None); 180 | assert_eq!(array.len(), 6); 181 | 182 | assert_eq!(array.insert(17, 8), None); 183 | assert_eq!(array.insert(18, 9), None); 184 | 185 | assert_eq!( 186 | array.into_iter().collect::>(), 187 | vec![ 188 | (1, 1), 189 | (2, 2), 190 | (3, 5), 191 | (4, 6), 192 | (5, 7), 193 | (8, 4), 194 | (17, 8), 195 | (18, 9) 196 | ] 197 | .into_iter() 198 | .collect() 199 | ); 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /ors-loader/src/main.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![no_main] 3 | #![feature(abi_efiapi)] 4 | #![feature(vec_into_raw_parts)] 5 | 6 | #[macro_use] 7 | extern crate alloc; 8 | 9 | #[macro_use] 10 | mod fs; 11 | 12 | use alloc::vec::Vec; 13 | use core::{mem, slice}; 14 | use goblin::elf; 15 | use log::trace; 16 | use ors_common::{frame_buffer, memory_map}; 17 | use uefi::prelude::*; 18 | use uefi::proto::console::gop::{GraphicsOutput, PixelFormat}; 19 | use uefi::table::boot::{AllocateType, MemoryDescriptor, MemoryType}; 20 | use uefi::table::cfg::ACPI_GUID; 21 | use uefi::table::Runtime; 22 | use x86_64::instructions::hlt; 23 | 24 | const UEFI_PAGE_SIZE: usize = 0x1000; 25 | 26 | #[entry] 27 | fn efi_main(image: Handle, mut st: SystemTable) -> Status { 28 | uefi_services::init(&mut st).unwrap_success(); 29 | 30 | st.stdout().reset(false).unwrap_success(); 31 | 32 | trace!("dump_memory_map"); 33 | dump_memory_map("memmap", image, &st); 34 | 35 | trace!("load_kernel"); 36 | let entry_point_addr = load_kernel("ors-kernel.elf", image, &st); 37 | 38 | trace!("entry_point_addr = 0x{:x}", entry_point_addr); 39 | let entry_point: extern "sysv64" fn(&frame_buffer::FrameBuffer, &memory_map::MemoryMap, u64) = 40 | unsafe { mem::transmute(entry_point_addr) }; 41 | 42 | trace!("get_frame_buffer"); 43 | let frame_buffer = get_frame_buffer(st.boot_services()); 44 | 45 | trace!("get_rsdp"); 46 | let rsdp = get_rsdp(&st); 47 | 48 | trace!("exit_boot_services"); 49 | let (_st, memory_map) = exit_boot_services(image, st); 50 | 51 | entry_point(&frame_buffer, &memory_map, rsdp); 52 | 53 | loop { 54 | hlt() 55 | } 56 | } 57 | 58 | fn get_rsdp(st: &SystemTable) -> u64 { 59 | st.config_table() 60 | .iter() 61 | .find(|config| config.guid == ACPI_GUID) 62 | .map(|config| config.address as u64) 63 | .expect("Could not find RSDP") 64 | } 65 | 66 | fn dump_memory_map(path: &str, image: Handle, st: &SystemTable) { 67 | let enough_mmap_size = 68 | st.boot_services().memory_map_size().map_size + 8 * mem::size_of::(); 69 | let mut mmap_buf = vec![0; enough_mmap_size]; 70 | let (_, descriptors) = st 71 | .boot_services() 72 | .memory_map(&mut mmap_buf) 73 | .unwrap_success(); 74 | 75 | let mut root_dir = fs::open_root_dir(image, st.boot_services()); 76 | let mut file = fs::create_file(&mut root_dir, path); 77 | fwriteln!( 78 | file, 79 | "Index, Type, Type(name), PhysicalStart, NumberOfPages, Attribute" 80 | ); 81 | for (i, d) in descriptors.enumerate() { 82 | fwriteln!( 83 | file, 84 | "{}, {:x}, {:?}, {:08x}, {:x}, {:x}", 85 | i, 86 | d.ty.0, 87 | d.ty, 88 | d.phys_start, 89 | d.page_count, 90 | d.att.bits() & 0xfffff 91 | ); 92 | } 93 | } 94 | 95 | fn load_kernel(path: &str, image: Handle, st: &SystemTable) -> usize { 96 | let mut root_dir = fs::open_root_dir(image, st.boot_services()); 97 | let mut file = fs::open_file(&mut root_dir, path); 98 | let buf = fs::read_file_to_vec(&mut file); 99 | load_elf(&buf, st) 100 | } 101 | 102 | fn load_elf(src: &[u8], st: &SystemTable) -> usize { 103 | let elf = elf::Elf::parse(&src).expect("Failed to parse ELF"); 104 | 105 | let mut dest_start = usize::MAX; 106 | let mut dest_end = 0; 107 | for ph in elf.program_headers.iter() { 108 | if ph.p_type != elf::program_header::PT_LOAD { 109 | continue; 110 | } 111 | dest_start = dest_start.min(ph.p_vaddr as usize); 112 | dest_end = dest_end.max((ph.p_vaddr + ph.p_memsz) as usize); 113 | } 114 | 115 | st.boot_services() 116 | .allocate_pages( 117 | AllocateType::Address(dest_start), 118 | MemoryType::LOADER_DATA, 119 | (dest_end - dest_start + UEFI_PAGE_SIZE - 1) / UEFI_PAGE_SIZE, 120 | ) 121 | .expect_success("Failed to allocate pages for kernel"); 122 | 123 | for ph in elf.program_headers.iter() { 124 | if ph.p_type != elf::program_header::PT_LOAD { 125 | continue; 126 | } 127 | let ofs = ph.p_offset as usize; 128 | let fsize = ph.p_filesz as usize; 129 | let msize = ph.p_memsz as usize; 130 | let dest = unsafe { slice::from_raw_parts_mut(ph.p_vaddr as *mut u8, msize) }; 131 | dest[..fsize].copy_from_slice(&src[ofs..ofs + fsize]); 132 | dest[fsize..].fill(0); 133 | } 134 | 135 | elf.entry as usize 136 | } 137 | 138 | fn get_frame_buffer(bs: &BootServices) -> frame_buffer::FrameBuffer { 139 | let gop = bs.locate_protocol::().unwrap_success(); 140 | let gop = unsafe { &mut *gop.get() }; 141 | frame_buffer::FrameBuffer { 142 | frame_buffer: gop.frame_buffer().as_mut_ptr(), 143 | stride: gop.current_mode_info().stride() as u32, 144 | resolution: ( 145 | gop.current_mode_info().resolution().0 as u32, 146 | gop.current_mode_info().resolution().1 as u32, 147 | ), 148 | format: match gop.current_mode_info().pixel_format() { 149 | PixelFormat::Rgb => frame_buffer::PixelFormat::Rgb, 150 | PixelFormat::Bgr => frame_buffer::PixelFormat::Bgr, 151 | f => panic!("Unsupported pixel format: {:?}", f), 152 | }, 153 | } 154 | } 155 | 156 | fn exit_boot_services( 157 | image: Handle, 158 | st: SystemTable, 159 | ) -> (SystemTable, memory_map::MemoryMap) { 160 | let enough_mmap_size = 161 | st.boot_services().memory_map_size().map_size + 8 * mem::size_of::(); 162 | let mmap_buf = vec![0; enough_mmap_size].leak(); 163 | let mut descriptors = Vec::with_capacity(enough_mmap_size); 164 | let (st, raw_descriptors) = st 165 | .exit_boot_services(image, mmap_buf) 166 | .expect_success("Failed to exit boot services"); 167 | 168 | // uefi::MemoryDescriptor -> memory_map::Descriptor 169 | for d in raw_descriptors { 170 | if is_available_after_exit_boot_services(d.ty) { 171 | descriptors.push(memory_map::Descriptor { 172 | phys_start: d.phys_start, 173 | phys_end: d.phys_start + d.page_count * UEFI_PAGE_SIZE as u64, 174 | }); 175 | } 176 | } 177 | let memory_map = { 178 | let (ptr, len, _) = descriptors.into_raw_parts(); 179 | memory_map::MemoryMap { 180 | descriptors: ptr as *const memory_map::Descriptor, 181 | descriptors_len: len as u64, 182 | } 183 | }; 184 | (st, memory_map) 185 | } 186 | 187 | fn is_available_after_exit_boot_services(ty: MemoryType) -> bool { 188 | matches!( 189 | ty, 190 | MemoryType::CONVENTIONAL | MemoryType::BOOT_SERVICES_CODE | MemoryType::BOOT_SERVICES_DATA 191 | ) 192 | } 193 | -------------------------------------------------------------------------------- /ors-kernel/src/phys_memory.rs: -------------------------------------------------------------------------------- 1 | // A frame represents a memory section on a physical address, 2 | // and does not manage the usage of linear (virtual) addresses. 3 | 4 | use crate::sync::spin::{Spin, SpinGuard}; 5 | use crate::x64; 6 | use core::mem; 7 | use log::trace; 8 | 9 | static FRAME_MANAGER: Spin = Spin::new(BitmapFrameManager::new()); 10 | 11 | pub fn frame_manager() -> SpinGuard<'static, BitmapFrameManager> { 12 | FRAME_MANAGER.lock() 13 | } 14 | 15 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 16 | pub struct Frame(usize); 17 | 18 | impl Frame { 19 | pub unsafe fn from_phys_addr(addr: x64::PhysAddr) -> Self { 20 | Self(addr.as_u64() as usize / Frame::SIZE) 21 | } 22 | 23 | pub fn phys_addr(self) -> x64::PhysAddr { 24 | x64::PhysAddr::new((self.0 * Frame::SIZE) as u64) 25 | } 26 | 27 | pub fn phys_frame(self) -> x64::PhysFrame { 28 | x64::PhysFrame::from_start_address(self.phys_addr()).unwrap() 29 | } 30 | 31 | fn offset(self, offset: usize) -> Self { 32 | Self(self.0 + offset) 33 | } 34 | 35 | const MIN: Self = Self(1); // TODO: Why 1 instead of 0? 36 | const MAX: Self = Self(FRAME_COUNT); 37 | 38 | pub const SIZE: usize = 4096; // 4KiB (= 2 ** 12) 39 | } 40 | 41 | const MAX_PHYSICAL_MEMORY_BYTES: usize = 128 * 1024 * 1024 * 1024; // 128GiB 42 | const FRAME_COUNT: usize = MAX_PHYSICAL_MEMORY_BYTES / Frame::SIZE; 43 | 44 | type MapLine = usize; 45 | const BITS_PER_MAP_LINE: usize = 8 * mem::size_of::(); 46 | const MAP_LINE_COUNT: usize = FRAME_COUNT / BITS_PER_MAP_LINE; 47 | 48 | pub struct BitmapFrameManager { 49 | alloc_map: [MapLine; MAP_LINE_COUNT], 50 | begin: Frame, 51 | end: Frame, 52 | } 53 | 54 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] 55 | pub enum AllocateError { 56 | NotEnoughFrame, 57 | } 58 | 59 | impl BitmapFrameManager { 60 | pub const fn new() -> Self { 61 | Self { 62 | alloc_map: [0; MAP_LINE_COUNT], 63 | begin: Frame::MIN, 64 | end: Frame::MAX, 65 | } 66 | } 67 | 68 | pub fn total_frames(&self) -> usize { 69 | self.end.0 - self.begin.0 70 | } 71 | 72 | pub fn available_frames(&self) -> usize { 73 | (self.begin.0..self.end.0) 74 | .filter(|i| self.get_bit(Frame(*i))) 75 | .count() 76 | } 77 | 78 | pub fn availability_in_range(&self, a: f64, b: f64) -> f64 { 79 | assert!(0.0 <= a && a < b && b <= 1.0); 80 | let a = self.begin.0 + ((self.end.0 - self.begin.0) as f64 * a) as usize; 81 | let b = self.begin.0 + ((self.end.0 - self.begin.0) as f64 * b) as usize; 82 | let n = (a..b).filter(|i| self.get_bit(Frame(*i))).count(); 83 | n as f64 / (b - a) as f64 84 | } 85 | 86 | fn set_memory_range(&mut self, begin: Frame, end: Frame) { 87 | self.begin = begin; 88 | self.end = end; 89 | } 90 | 91 | fn get_bit(&self, frame: Frame) -> bool { 92 | let line_index = frame.0 / BITS_PER_MAP_LINE; 93 | let bit_index = frame.0 % BITS_PER_MAP_LINE; 94 | (self.alloc_map[line_index] & (1 << bit_index)) != 0 95 | } 96 | 97 | fn set_bit(&mut self, frame: Frame, allocated: bool) { 98 | let line_index = frame.0 / BITS_PER_MAP_LINE; 99 | let bit_index = frame.0 % BITS_PER_MAP_LINE; 100 | 101 | if allocated { 102 | self.alloc_map[line_index] |= 1 << bit_index; 103 | } else { 104 | self.alloc_map[line_index] &= !(1 << bit_index); 105 | } 106 | } 107 | 108 | fn mark_allocated_in_bytes(&mut self, start: Frame, bytes: usize) { 109 | self.mark_allocated(start, bytes / Frame::SIZE, true) 110 | } 111 | 112 | pub fn allocate(&mut self, num_frames: usize) -> Result { 113 | // Doing the first fit allocation 114 | let mut frame = self.begin; 115 | 'search: loop { 116 | for i in 0..num_frames { 117 | if frame.offset(i) >= self.end { 118 | Err(AllocateError::NotEnoughFrame)? 119 | } 120 | if self.get_bit(frame.offset(i)) { 121 | frame = frame.offset(i + 1); 122 | continue 'search; 123 | } 124 | } 125 | self.mark_allocated(frame, num_frames, false); 126 | return Ok(frame); 127 | } 128 | } 129 | 130 | fn mark_allocated(&mut self, frame: Frame, num_frames: usize, init: bool) { 131 | for i in 0..num_frames { 132 | if !init { 133 | trace!("phys_memory: allocate {:?}", frame.offset(i).phys_addr()); 134 | } 135 | self.set_bit(frame.offset(i), true); 136 | } 137 | } 138 | 139 | pub fn free(&mut self, frame: Frame, num_frames: usize) { 140 | for i in 0..num_frames { 141 | trace!("phys_memory: deallocate {:?}", frame.offset(i).phys_addr()); 142 | self.set_bit(frame.offset(i), false); 143 | } 144 | } 145 | 146 | /// Caller must ensure that the given MemoryMap is valid. 147 | pub unsafe fn initialize(&mut self, mm: &ors_common::memory_map::MemoryMap) { 148 | trace!("INITIALIZING PhysMemoryManager"); 149 | let mut phys_available_end = 0; 150 | for d in mm.descriptors() { 151 | let phys_start = d.phys_start as usize; 152 | let phys_end = d.phys_end as usize; 153 | if phys_available_end < d.phys_start as usize { 154 | self.mark_allocated_in_bytes( 155 | Frame::from_phys_addr(x64::PhysAddr::new(phys_available_end as u64)), 156 | phys_start - phys_available_end, 157 | ); 158 | } 159 | phys_available_end = phys_end; 160 | } 161 | self.set_memory_range( 162 | Frame::MIN, 163 | Frame::from_phys_addr(x64::PhysAddr::new(phys_available_end as u64)), 164 | ); 165 | } 166 | } 167 | 168 | unsafe impl x64::FrameAllocator for BitmapFrameManager { 169 | fn allocate_frame(&mut self) -> Option> { 170 | match self.allocate(1) { 171 | Ok(frame) => Some(frame.phys_frame()), 172 | Err(_) => None, 173 | } 174 | } 175 | } 176 | 177 | impl x64::FrameDeallocator for BitmapFrameManager { 178 | unsafe fn deallocate_frame(&mut self, frame: x64::PhysFrame) { 179 | self.free(Frame::from_phys_addr(frame.start_address()), 1) 180 | } 181 | } 182 | 183 | #[cfg(test)] 184 | mod tests { 185 | use super::frame_manager; 186 | use log::info; 187 | 188 | #[test_case] 189 | fn test_frame_manager() { 190 | info!("TESTING phys_memory::test_frame_manager"); 191 | 192 | let a = frame_manager().allocate(1).unwrap(); 193 | let b = frame_manager().allocate(1).unwrap(); 194 | assert_ne!(a, b); 195 | 196 | let c = frame_manager().allocate(3).unwrap(); 197 | assert_ne!(b, c); 198 | 199 | frame_manager().free(a, 1); 200 | frame_manager().free(b, 1); 201 | frame_manager().free(c, 3); 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /ors-kernel/src/devices/virtio/block.rs: -------------------------------------------------------------------------------- 1 | use super::{Buffer, Configuration, VirtQueue}; 2 | use crate::cpu::Cpu; 3 | use crate::devices::pci; 4 | use crate::interrupts::virtio_block_irq; 5 | use crate::sync::spin::Spin; 6 | use crate::task; 7 | use core::mem; 8 | use core::sync::atomic::{fence, Ordering}; 9 | use derive_new::new; 10 | use heapless::Vec; 11 | use log::trace; 12 | use spin::Once; 13 | 14 | static BLOCKS: Once> = Once::new(); 15 | 16 | pub fn initialize() { 17 | BLOCKS.call_once(|| { 18 | trace!("INITIALIZING VirtIO Blocks"); 19 | unsafe { Block::scan::<8>() } 20 | }); 21 | } 22 | 23 | pub fn list() -> &'static Vec { 24 | BLOCKS 25 | .get() 26 | .expect("block::list is called before block::initialize") 27 | } 28 | 29 | #[derive(Debug)] 30 | pub struct Block { 31 | configuration: Configuration, 32 | requestq: Spin>>, 33 | } 34 | 35 | impl Block { 36 | unsafe fn scan() -> Vec { 37 | let mut blocks = Vec::new(); 38 | 39 | for device in pci::devices() { 40 | if device.is_virtio() && device.subsystem_id() == 0x02 { 41 | match Block::from_pci_device(*device, blocks.len()) { 42 | Ok(block) => match blocks.push(block) { 43 | Ok(()) => {} 44 | Err(block) => { 45 | // FIXME: To remove mem::forget, we need to reset the device 46 | mem::forget(block); 47 | trace!("virtio: More than {} blocks are unsupported", N); 48 | } 49 | }, 50 | Err(msg) => trace!("virtio: Failed to initialize block: {}", msg), 51 | } 52 | } 53 | } 54 | 55 | blocks 56 | } 57 | 58 | unsafe fn from_pci_device(device: pci::Device, index: usize) -> Result { 59 | if let Some(msi_x) = device.msi_x() { 60 | if msi_x.table().len() == 0 { 61 | return Err("MSI-X support does not have enough table entries"); 62 | } 63 | 64 | let bsp = Cpu::boot_strap().lapic_id().unwrap(); 65 | let irq = virtio_block_irq(index).ok_or("IRQ numbers exhausted")?; 66 | msi_x.table().entry(0).enable(bsp, irq); // for requestq 67 | msi_x.enable(); 68 | } else { 69 | // Interrupts other than MSI-X is not implemented 70 | return Err("MSI-X unsupported"); 71 | } 72 | 73 | let configuration = Configuration::from_pci_device(device)?; 74 | configuration.initialize(Self::negotiate)?; 75 | let requestq = Spin::new(VirtQueue::new(configuration, 0, Some(0))?); 76 | configuration.set_driver_ok(); 77 | 78 | Ok(Self { 79 | configuration, 80 | requestq, 81 | }) 82 | } 83 | 84 | /// Capacity of the device (expressed in `Self::SECTOR_SIZE` sectors) 85 | pub fn capacity(&self) -> u64 { 86 | let lower = unsafe { self.configuration.read_device_specific::(0x0) } as u64; 87 | let upper = unsafe { self.configuration.read_device_specific::(0x4) } as u64; 88 | lower | (upper << 32) 89 | } 90 | 91 | fn check_capacity(&self, sector: u64, len: usize) -> Result<(), Error> { 92 | let num_additional_sectors = (len.max(1) - 1) / Self::SECTOR_SIZE; 93 | if sector + (num_additional_sectors as u64) < self.capacity() { 94 | Ok(()) 95 | } else { 96 | Err(Error::OutOfRange) 97 | } 98 | } 99 | 100 | fn request( 101 | &self, 102 | header: RequestHeader, 103 | body: Buffer>, 104 | ) -> Result<(), Error> { 105 | let mut footer = RequestFooter::new(0); 106 | let complete_channel = task::WaitChannel::from_ptr(&footer); 107 | 108 | let mut buffers = [ 109 | Buffer::from_ref(&header, None).unwrap(), 110 | body, 111 | Buffer::from_ref_mut(&mut footer, Some(complete_channel)).unwrap(), 112 | ] 113 | .into_iter(); 114 | 115 | let mut requestq = self.requestq.lock(); 116 | loop { 117 | match requestq.transfer(buffers) { 118 | Ok(()) => break, 119 | Err(b) => { 120 | buffers = b; 121 | task::scheduler().block(self.queue_wait_channel(), None, requestq); 122 | requestq = self.requestq.lock(); 123 | } 124 | } 125 | } 126 | unsafe { self.configuration.set_queue_notify(0) }; 127 | 128 | task::scheduler().block(complete_channel, None, requestq); 129 | fence(Ordering::SeqCst); 130 | footer.into_result() 131 | } 132 | 133 | fn queue_wait_channel(&self) -> task::WaitChannel { 134 | task::WaitChannel::from_ptr(self) 135 | } 136 | 137 | /// Read data from this device. 138 | pub fn read(&self, sector: u64, buf: &mut [u8]) -> Result<(), Error> { 139 | self.check_capacity(sector, buf.len())?; 140 | let header = RequestHeader::new(RequestHeader::IN, 0, sector); 141 | let body = Buffer::from_bytes_mut(buf, None).unwrap(); 142 | self.request(header, body) 143 | } 144 | 145 | /// Write data into this device. 146 | pub fn write(&self, sector: u64, buf: &[u8]) -> Result<(), Error> { 147 | self.check_capacity(sector, buf.len())?; 148 | let header = RequestHeader::new(RequestHeader::OUT, 0, sector); 149 | let body = Buffer::from_bytes(buf, None).unwrap(); 150 | self.request(header, body) 151 | } 152 | 153 | /// Collect the processed requests. 154 | /// This method is supposed to be called from Used Buffer Notification (interrupt). 155 | pub fn collect(&self) { 156 | let mut requestq = self.requestq.lock(); 157 | requestq.collect(|chan| { 158 | if let Some(chan) = chan { 159 | task::scheduler().release(chan); 160 | } 161 | }); 162 | task::scheduler().release(self.queue_wait_channel()); 163 | } 164 | 165 | fn negotiate(features: u32) -> u32 { 166 | // TODO: Understand the detailed semantics of these features 167 | // Currently we only support features that are enabled in xv6-riscv 168 | const RO: u32 = 1 << 5; 169 | const SCSI: u32 = 1 << 7; 170 | const CONFIG_WCE: u32 = 1 << 11; 171 | const MQ: u32 = 1 << 12; 172 | const ANY_LAYOUT: u32 = 1 << 27; 173 | features & !RO & !SCSI & !CONFIG_WCE & !MQ & !ANY_LAYOUT 174 | } 175 | 176 | pub const SECTOR_SIZE: usize = 512; 177 | } 178 | 179 | unsafe impl Sync for Block {} 180 | 181 | unsafe impl Send for Block {} 182 | 183 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] 184 | #[non_exhaustive] 185 | pub enum Error { 186 | Io, 187 | Unsupported, 188 | OutOfRange, 189 | Unknown, 190 | } 191 | 192 | #[repr(C)] 193 | #[derive(Debug, new)] 194 | struct RequestHeader { 195 | ty: u32, 196 | _reserved: u32, 197 | sector: u64, 198 | } 199 | 200 | impl RequestHeader { 201 | const IN: u32 = 0; 202 | const OUT: u32 = 1; 203 | } 204 | 205 | #[repr(C)] 206 | #[derive(Debug, new)] 207 | struct RequestFooter { 208 | status: u8, 209 | } 210 | 211 | impl RequestFooter { 212 | fn into_result(self) -> Result<(), Error> { 213 | match self.status { 214 | Self::STATUS_OK => Ok(()), 215 | Self::STATUS_IOERR => Err(Error::Io), 216 | Self::STATUS_UNSUPP => Err(Error::Unsupported), 217 | _ => Err(Error::Unknown), 218 | } 219 | } 220 | 221 | const STATUS_OK: u8 = 0; 222 | const STATUS_IOERR: u8 = 1; 223 | const STATUS_UNSUPP: u8 = 2; 224 | } 225 | -------------------------------------------------------------------------------- /ors-kernel/src/fs/volume.rs: -------------------------------------------------------------------------------- 1 | use crate::sync::mutex::{Mutex, MutexGuard}; 2 | use crate::sync::spin::Spin; 3 | use alloc::collections::VecDeque; 4 | use alloc::sync::Arc; 5 | use alloc::vec; 6 | use alloc::vec::Vec; 7 | use core::fmt; 8 | use core::mem::ManuallyDrop; 9 | use core::ops::{Deref, DerefMut}; 10 | use derive_new::new; 11 | 12 | pub mod virtio; 13 | 14 | /// A unit of volume read/write. 15 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 16 | pub struct Sector(usize); 17 | 18 | impl Sector { 19 | pub fn from_index(index: usize) -> Self { 20 | Self(index) 21 | } 22 | 23 | pub fn index(self) -> usize { 24 | self.0 25 | } 26 | 27 | pub fn offset(self, s: usize) -> Self { 28 | Self(self.0 + s) 29 | } 30 | 31 | pub const INVALID: Self = Self(usize::MAX); 32 | } 33 | 34 | impl fmt::Display for Sector { 35 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 36 | self.0.fmt(f) 37 | } 38 | } 39 | 40 | /// Storage area used by the file system. 41 | pub trait Volume { 42 | fn sector_count(&self) -> usize; 43 | fn sector_size(&self) -> usize; 44 | fn read(&self, sector: Sector, buf: &mut [u8]) -> Result<(), VolumeError>; 45 | fn write(&self, sector: Sector, buf: &[u8]) -> Result<(), VolumeError>; 46 | } 47 | 48 | /// Error during volume operations. 49 | #[derive(PartialEq, Eq, Debug, Clone, Copy, new)] 50 | pub struct VolumeError { 51 | pub sector: Sector, 52 | pub kind: VolumeErrorKind, 53 | } 54 | 55 | impl fmt::Display for VolumeError { 56 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 57 | match self.kind { 58 | VolumeErrorKind::Io => write!(f, "I/O error")?, 59 | VolumeErrorKind::OutOfRange => write!(f, "Out of range")?, 60 | VolumeErrorKind::Unknown => write!(f, "Unknown error")?, 61 | } 62 | write!(f, " at sector={}", self.sector) 63 | } 64 | } 65 | 66 | #[derive(PartialEq, Eq, Debug, Clone, Copy)] 67 | pub enum VolumeErrorKind { 68 | Io, 69 | OutOfRange, 70 | Unknown, 71 | } 72 | 73 | /// A volume with in-memory buffering. 74 | #[derive(Debug)] 75 | pub struct BufferedVolume { 76 | volume: V, 77 | sectors: Spin, 78 | } 79 | 80 | impl BufferedVolume { 81 | const EXPECTED_CACHE_SIZE: usize = 8; 82 | 83 | pub fn new(volume: V) -> Self { 84 | Self { 85 | volume, 86 | sectors: Spin::new(BufferedSectors { 87 | lent: Vec::with_capacity(8), 88 | cached: VecDeque::with_capacity(Self::EXPECTED_CACHE_SIZE), 89 | }), 90 | } 91 | } 92 | } 93 | 94 | impl BufferedVolume { 95 | pub fn sector_count(&self) -> usize { 96 | self.volume.sector_count() 97 | } 98 | 99 | pub fn sector_size(&self) -> usize { 100 | self.volume.sector_size() 101 | } 102 | 103 | pub fn sector(&self, sector: Sector) -> Result { 104 | // NOTE: How can we optimize reading and writing of consecutive sectors? 105 | 106 | let mut sectors = self.sectors.lock(); 107 | 108 | if let Some(s) = sectors.lent.iter().find(|s| s.sector() == sector) { 109 | let r = BufferedSectorRef::new(&self.sectors, s); 110 | drop(sectors); 111 | // This is necessary since the first initialize happens after drop(sectors) at (*1) 112 | r.initialize(&self.volume)?; 113 | return Ok(r); 114 | } 115 | 116 | let s = match sectors.cached.iter().position(|s| s.sector() == sector) { 117 | // Found a cached BufferedSector, use it 118 | Some(index) => sectors.cached.remove(index).unwrap(), 119 | // Recycle the least recently used BufferedSector 120 | None if Self::EXPECTED_CACHE_SIZE <= sectors.cached.len() => { 121 | let mut s = sectors.cached.pop_back().unwrap(); 122 | // #63292: If UniqueArc is introduced, this unwrap may be removable 123 | Arc::get_mut(&mut s).unwrap().recycle(sector); 124 | s 125 | } 126 | // Create a new BufferedSector 127 | None => Arc::new(BufferedSector::new(sector, &self.volume)), 128 | }; 129 | let r = BufferedSectorRef::new(&self.sectors, &s); 130 | sectors.lent.push(s); 131 | drop(sectors); // (*1) 132 | 133 | // This must happen after drop(sectors) to perform (blocking) volume reading/writing 134 | r.initialize(&self.volume)?; 135 | Ok(r) 136 | } 137 | 138 | pub fn commit(&self) -> Result<(), VolumeError> { 139 | let sectors = self.sectors.lock(); 140 | // This temporary Vec is necessary since the cached sectors must be uniquely owned by BufferedVolume. 141 | let cached = sectors.cached.iter().map(|s| s.sector).collect::>(); 142 | drop(sectors); 143 | 144 | for s in cached { 145 | self.sector(s)?.commit(&self.volume)?; 146 | } 147 | Ok(()) 148 | } 149 | } 150 | 151 | #[derive(Debug)] 152 | struct BufferedSectors { 153 | lent: Vec>, // shared 154 | cached: VecDeque>, // uniquely owned 155 | } 156 | 157 | #[derive(Debug)] 158 | pub struct BufferedSector { 159 | sector: Sector, 160 | data: Mutex, 161 | } 162 | 163 | impl BufferedSector { 164 | fn new(sector: Sector, volume: &impl Volume) -> Self { 165 | Self { 166 | sector, 167 | data: Mutex::new(BufferedSectorData { 168 | sector: None, 169 | is_dirty: false, 170 | bytes: vec![0; volume.sector_size()], 171 | }), 172 | } 173 | } 174 | 175 | fn recycle(&mut self, sector: Sector) { 176 | self.sector = sector; 177 | } 178 | 179 | fn initialize(&self, volume: &impl Volume) -> Result<(), VolumeError> { 180 | self.data.lock().initialize(self.sector, volume) 181 | } 182 | 183 | fn commit(&self, volume: &impl Volume) -> Result<(), VolumeError> { 184 | self.data.lock().commit(volume) 185 | } 186 | 187 | pub fn sector(&self) -> Sector { 188 | self.sector 189 | } 190 | 191 | pub fn is_dirty(&self) -> bool { 192 | self.data.lock().is_dirty 193 | } 194 | 195 | pub fn mark_as_dirty(&self) { 196 | self.data.lock().is_dirty = true; 197 | } 198 | 199 | pub fn bytes(&self) -> MutexGuard> { 200 | self.data.lock() 201 | } 202 | } 203 | 204 | #[derive(Debug)] 205 | struct BufferedSectorData { 206 | sector: Option, 207 | is_dirty: bool, 208 | bytes: Vec, 209 | } 210 | 211 | impl BufferedSectorData { 212 | fn initialize(&mut self, sector: Sector, volume: &impl Volume) -> Result<(), VolumeError> { 213 | self.commit(volume)?; 214 | if self.sector != Some(sector) { 215 | volume.read(sector, self.bytes.as_mut())?; 216 | self.sector = Some(sector); 217 | } 218 | Ok(()) 219 | } 220 | 221 | fn commit(&mut self, volume: &impl Volume) -> Result<(), VolumeError> { 222 | if self.is_dirty { 223 | volume.write(self.sector.unwrap(), self.bytes.as_ref())?; 224 | self.is_dirty = false; 225 | } 226 | Ok(()) 227 | } 228 | } 229 | 230 | impl Deref for BufferedSectorData { 231 | type Target = [u8]; 232 | 233 | fn deref(&self) -> &Self::Target { 234 | self.bytes.as_ref() 235 | } 236 | } 237 | 238 | impl DerefMut for BufferedSectorData { 239 | fn deref_mut(&mut self) -> &mut Self::Target { 240 | self.bytes.as_mut() 241 | } 242 | } 243 | 244 | #[derive(Debug)] 245 | pub struct BufferedSectorRef<'a> { 246 | sectors: &'a Spin, 247 | sector: ManuallyDrop>, 248 | } 249 | 250 | impl<'a> BufferedSectorRef<'a> { 251 | fn new(sectors: &'a Spin, sector: &Arc) -> Self { 252 | Self { 253 | sectors, 254 | sector: ManuallyDrop::new(Arc::clone(sector)), 255 | } 256 | } 257 | } 258 | 259 | impl<'a> Clone for BufferedSectorRef<'a> { 260 | fn clone(&self) -> Self { 261 | Self::new(self.sectors, &self.sector) 262 | } 263 | } 264 | 265 | impl<'a> Drop for BufferedSectorRef<'a> { 266 | fn drop(&mut self) { 267 | let mut sectors = self.sectors.lock(); 268 | let sector = unsafe { ManuallyDrop::take(&mut self.sector) }; 269 | 270 | // This is the last owner except sectors.lent 271 | if Arc::strong_count(§or) == 2 { 272 | let index = sectors 273 | .lent 274 | .iter() 275 | .position(|s| s.sector() == sector.sector()) 276 | .unwrap(); 277 | drop(sector); // 2 -> 1 278 | 279 | // Move this BufferedSector from sectors.lent to the front of sectors.cached 280 | let sector = sectors.lent.swap_remove(index); 281 | sectors.cached.push_front(sector); 282 | } 283 | } 284 | } 285 | 286 | impl<'a> Deref for BufferedSectorRef<'a> { 287 | type Target = BufferedSector; 288 | 289 | fn deref(&self) -> &Self::Target { 290 | &self.sector 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /ors-kernel/src/graphics/text_buffer.rs: -------------------------------------------------------------------------------- 1 | use super::{Color, FontStyle, FrameBuffer, FrameBufferExt, MonospaceFont, VecBuffer}; 2 | use alloc::collections::VecDeque; 3 | use alloc::vec; 4 | use alloc::vec::Vec; 5 | 6 | #[derive(Debug)] 7 | pub struct MonospaceTextBuffer<'a, T> { 8 | lines: VecDeque, 9 | buf: T, 10 | render_diff: RenderDiff, 11 | font: MonospaceFont<'a>, 12 | cursor: (usize, usize), 13 | } 14 | 15 | impl<'a, T: FrameBuffer> MonospaceTextBuffer<'a, T> { 16 | pub fn new(buf: T, font: MonospaceFont<'a>) -> Self { 17 | assert_eq!(buf.format(), font.format()); 18 | let height = buf.height() / font.unit_height() as usize; 19 | let lines = vec![Line::new(&buf, &font); height].into(); 20 | Self { 21 | lines, 22 | buf, 23 | render_diff: None, 24 | font, 25 | cursor: (0, 0), 26 | } 27 | } 28 | 29 | pub fn move_cursor(&mut self, dx: i32, dy: i32) { 30 | let (x, y) = self.cursor; 31 | let y = (y as i32 + dy).clamp(0, self.lines.len() as i32 - 1) as usize; 32 | let x = (x as i32 + dx).clamp(0, self.lines[y].chars.len() as i32 - 1) as usize; 33 | self.cursor = (x, y); 34 | } 35 | 36 | pub fn set_cursor(&mut self, x: Option, y: Option) { 37 | let y = y 38 | .map(|n| n as usize) 39 | .unwrap_or(self.cursor.1) 40 | .clamp(0, self.lines.len() - 1); 41 | let x = x 42 | .map(|n| n as usize) 43 | .unwrap_or(self.cursor.0) 44 | .clamp(0, self.lines[y].chars.len() - 1); 45 | self.cursor = (x, y); 46 | } 47 | 48 | pub fn erase( 49 | &mut self, 50 | bg: Color, 51 | before_cursor_lines: bool, 52 | before_cursor_chars: bool, 53 | after_cursor_chars: bool, 54 | after_cursor_lines: bool, 55 | ) { 56 | let (x, y) = self.cursor; 57 | let mut start = usize::MAX; 58 | let mut end = 0; 59 | if before_cursor_lines { 60 | for (i, l) in self.lines.iter_mut().enumerate().take(y) { 61 | if l.erase(bg, 0, usize::MAX) { 62 | start = start.min(i); 63 | end = end.max(i + 1); 64 | } 65 | } 66 | } 67 | { 68 | let a = if before_cursor_chars { 0 } else { x }; 69 | let b = if after_cursor_chars { usize::MAX } else { x }; 70 | if self.lines[y].erase(bg, a, b) { 71 | start = start.min(y); 72 | end = end.max(y + 1); 73 | } 74 | } 75 | if after_cursor_lines { 76 | for (i, l) in self.lines.iter_mut().enumerate().skip(y + 1) { 77 | if l.erase(bg, 0, usize::MAX) { 78 | start = start.min(i); 79 | end = end.max(i + 1); 80 | } 81 | } 82 | } 83 | if start < end { 84 | extend_render_diff(&mut self.render_diff, start, end); 85 | } 86 | } 87 | 88 | pub fn next_line(&mut self, bg: Color) { 89 | let (_, y) = self.cursor; 90 | if y + 1 >= self.lines.len() { 91 | let mut first_line = self.lines.pop_front().unwrap(); // remove the first line 92 | first_line.erase(bg, 0, usize::MAX); 93 | self.lines.push_back(first_line); 94 | self.render_diff = Some((0, self.lines.len())); // all lines 95 | self.cursor = (0, self.lines.len() - 1); 96 | } else { 97 | self.cursor = (0, y + 1); 98 | } 99 | } 100 | 101 | pub fn put(&mut self, c: char, fg: Color, bg: Color, style: FontStyle) { 102 | let (x, y) = self.cursor; 103 | match self.lines[y].put(c, fg, bg, style, x) { 104 | LinePutResult::LineFeed => self.next_line(bg), 105 | LinePutResult::Wrapping => { 106 | self.next_line(bg); 107 | self.put(c, fg, bg, style); 108 | } 109 | LinePutResult::Next(changed, x) => { 110 | self.cursor = (x, y); 111 | if changed { 112 | extend_render_diff(&mut self.render_diff, y, y + 1); 113 | } 114 | } 115 | } 116 | } 117 | 118 | pub fn render(&mut self) { 119 | if let Some((a, b)) = self.render_diff { 120 | let pad_y = 121 | (self.buf.height() - self.lines.len() * self.font.unit_height() as usize) as i32; 122 | for (i, line) in self.lines.iter_mut().enumerate().skip(a).take(b - a) { 123 | line.render(&mut self.font); 124 | let pad_x = 125 | (self.buf.width() - line.chars.len() * self.font.unit_width() as usize) as i32; 126 | let ofs_y = (i * self.font.unit_height() as usize) as i32; 127 | self.buf.blit(pad_x / 2, pad_y / 2 + ofs_y, &line.buf); 128 | } 129 | self.render_diff = None; 130 | } 131 | } 132 | } 133 | 134 | #[derive(Debug, Clone)] 135 | struct Line { 136 | chars: Vec, 137 | buf: VecBuffer, 138 | render_diff: RenderDiff, 139 | } 140 | 141 | impl Line { 142 | fn new(parent_buf: &impl FrameBuffer, font: &MonospaceFont) -> Self { 143 | let width = parent_buf.width() / font.unit_width() as usize; 144 | Self { 145 | chars: vec![Char::void(); width], 146 | buf: VecBuffer::new( 147 | width * font.unit_width() as usize, 148 | font.unit_height() as usize, 149 | parent_buf.format(), 150 | ), 151 | render_diff: None, 152 | } 153 | } 154 | 155 | fn erase(&mut self, bg: Color, a: usize, b: usize /* inclusive */) -> bool { 156 | let b = b.saturating_add(1); 157 | let mut start = usize::MAX; 158 | let mut end = 0; 159 | for (i, c) in self.chars.iter_mut().enumerate().take(b).skip(a) { 160 | if c.erase(bg) { 161 | start = start.min(i); 162 | end = end.max(i + 1); 163 | } 164 | } 165 | if start < end { 166 | extend_render_diff(&mut self.render_diff, start, end); 167 | true 168 | } else { 169 | false 170 | } 171 | } 172 | 173 | fn put(&mut self, c: char, fg: Color, bg: Color, style: FontStyle, i: usize) -> LinePutResult { 174 | if c == '\n' { 175 | LinePutResult::LineFeed 176 | } else if i >= self.chars.len() { 177 | LinePutResult::Wrapping 178 | } else if self.chars[i].update(c, fg, bg, style) { 179 | extend_render_diff(&mut self.render_diff, i, i + 1); 180 | LinePutResult::Next(true, i + 1) 181 | } else { 182 | LinePutResult::Next(false, i + 1) 183 | } 184 | } 185 | 186 | fn render(&mut self, font: &mut MonospaceFont) { 187 | if let Some((a, b)) = self.render_diff { 188 | for (i, c) in self.chars.iter().copied().enumerate().take(b).skip(a) { 189 | let ofs_x = (i * font.unit_width() as usize) as i32; 190 | c.render_to(&mut self.buf, ofs_x, 0, font); 191 | } 192 | self.render_diff = None; 193 | } 194 | } 195 | } 196 | 197 | #[derive(Debug, Clone, Copy)] 198 | enum LinePutResult { 199 | LineFeed, 200 | Wrapping, 201 | Next(bool, usize), 202 | } 203 | 204 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] 205 | struct Char { 206 | // Since MonospaceFont caches the rendered glyphs, Char does not hold a VecBuffer. 207 | value: char, 208 | fg: Color, 209 | bg: Color, 210 | font_style: FontStyle, 211 | } 212 | 213 | impl Char { 214 | const fn new(value: char, fg: Color, bg: Color, font_style: FontStyle) -> Self { 215 | Self { 216 | value, 217 | fg, 218 | bg, 219 | font_style, 220 | } 221 | } 222 | 223 | const fn void() -> Self { 224 | Self::new( 225 | '\0', 226 | Color::new(255, 255, 255), 227 | Color::new(0, 0, 0), 228 | FontStyle::Normal, 229 | ) 230 | } 231 | 232 | fn erase(&mut self, bg: Color) -> bool { 233 | self.update(' ', self.fg, bg, self.font_style) 234 | } 235 | 236 | fn update(&mut self, c: char, fg: Color, bg: Color, style: FontStyle) -> bool { 237 | let new_self = Self::new(c, fg, bg, style); 238 | if *self != new_self { 239 | *self = new_self; 240 | true 241 | } else { 242 | false 243 | } 244 | } 245 | 246 | fn render_to(&self, buf: &mut impl FrameBuffer, x: i32, y: i32, font: &mut MonospaceFont) { 247 | buf.blit( 248 | x, 249 | y, 250 | font.get(self.value, self.fg, self.bg, self.font_style), 251 | ); 252 | } 253 | } 254 | 255 | type RenderDiff = Option<(usize, usize)>; 256 | 257 | fn extend_render_diff(a: &mut RenderDiff, start: usize, end: usize) { 258 | *a = match *a { 259 | None => Some((start, end)), 260 | Some((a, b)) => Some((a.min(start), b.max(end))), 261 | }; 262 | } 263 | 264 | // Workaround for linker error 265 | 266 | #[no_mangle] 267 | #[doc(hidden)] 268 | pub extern "C" fn fminf(x: f32, y: f32) -> f32 { 269 | libm::fminf(x, y) 270 | } 271 | 272 | #[no_mangle] 273 | #[doc(hidden)] 274 | pub extern "C" fn fmaxf(x: f32, y: f32) -> f32 { 275 | libm::fmaxf(x, y) 276 | } 277 | -------------------------------------------------------------------------------- /ors-kernel/src/interrupts.rs: -------------------------------------------------------------------------------- 1 | use crate::acpi; 2 | use crate::console; 3 | use crate::cpu::Cpu; 4 | use crate::segmentation::DOUBLE_FAULT_IST_INDEX; 5 | use crate::task; 6 | use crate::x64; 7 | use core::ops::Range; 8 | use core::sync::atomic::{AtomicUsize, Ordering}; 9 | use spin::Lazy; 10 | 11 | pub const TIMER_FREQ: usize = 250; 12 | 13 | static TICKS: AtomicUsize = AtomicUsize::new(0); 14 | 15 | pub fn ticks() -> usize { 16 | TICKS.load(Ordering::SeqCst) 17 | } 18 | 19 | /// Clear Interrupt Flag. Interrupts are disabled while this value is alive. 20 | #[derive(Debug)] 21 | pub struct Cli; 22 | 23 | impl Cli { 24 | pub fn new() -> Self { 25 | let cli = !x64::interrupts::are_enabled(); 26 | x64::interrupts::disable(); 27 | let mut cpu = Cpu::current().state().lock(); 28 | if cpu.thread_state.ncli == 0 { 29 | cpu.thread_state.zcli = cli; 30 | } 31 | cpu.thread_state.ncli += 1; 32 | Self 33 | } 34 | } 35 | 36 | impl Drop for Cli { 37 | fn drop(&mut self) { 38 | assert!( 39 | !x64::interrupts::are_enabled(), 40 | "Inconsistent interrupt flag" 41 | ); 42 | let mut cpu = Cpu::current().state().lock(); 43 | cpu.thread_state.ncli -= 1; 44 | let sti = cpu.thread_state.ncli == 0 && !cpu.thread_state.zcli; 45 | drop(cpu); 46 | if sti { 47 | x64::interrupts::enable(); 48 | } 49 | } 50 | } 51 | 52 | pub unsafe fn initialize() { 53 | IDT.load(); 54 | disable_pic_8259(); 55 | initialize_local_apic(); 56 | initialize_io_apic(); 57 | } 58 | 59 | const PIC_8259_IRQ_OFFSET: u32 = 32; // first 32 entries are reserved by CPU 60 | const IRQ_TIMER: u32 = PIC_8259_IRQ_OFFSET + 0; 61 | const IRQ_KBD: u32 = PIC_8259_IRQ_OFFSET + 1; // Keyboard on PS/2 port 62 | const IRQ_COM1: u32 = PIC_8259_IRQ_OFFSET + 4; // First serial port 63 | 64 | const VIRTIO_BLOCK_IRQ_OFFSET: u32 = PIC_8259_IRQ_OFFSET + 16; // next 16 entries are for 8259 PIC interrupts 65 | const IRQ_VIRTIO_BLOCK: Range = VIRTIO_BLOCK_IRQ_OFFSET..VIRTIO_BLOCK_IRQ_OFFSET + 8; 66 | 67 | static IDT: Lazy = Lazy::new(|| unsafe { prepare_idt() }); 68 | 69 | unsafe fn prepare_idt() -> x64::InterruptDescriptorTable { 70 | let mut idt = x64::InterruptDescriptorTable::new(); 71 | idt.breakpoint 72 | .set_handler_fn(breakpoint_handler) 73 | .disable_interrupts(true); 74 | idt.page_fault 75 | .set_handler_fn(page_fault_handler) 76 | .disable_interrupts(true); 77 | idt.double_fault 78 | .set_handler_fn(double_fault_handler) 79 | .set_stack_index(DOUBLE_FAULT_IST_INDEX) 80 | .disable_interrupts(true); 81 | idt[IRQ_TIMER as usize] 82 | .set_handler_fn(timer_handler) 83 | .disable_interrupts(true); 84 | idt[IRQ_KBD as usize] 85 | .set_handler_fn(kbd_handler) 86 | .disable_interrupts(true); 87 | idt[IRQ_COM1 as usize] 88 | .set_handler_fn(com1_handler) 89 | .disable_interrupts(true); 90 | 91 | for (i, irq) in IRQ_VIRTIO_BLOCK.enumerate() { 92 | idt[irq as usize] 93 | .set_handler_fn(get_virtio_block_handler(i)) 94 | .disable_interrupts(true); 95 | } 96 | 97 | idt 98 | } 99 | 100 | unsafe fn disable_pic_8259() { 101 | x64::Port::new(0xa1).write(0xffu8); 102 | x64::Port::new(0x21).write(0xffu8); 103 | } 104 | 105 | static LAPIC: Lazy = 106 | Lazy::new(|| x64::LApic::new(acpi::apic_info().local_apic_address)); 107 | 108 | unsafe fn initialize_local_apic() { 109 | // TODO: Understand the detailed semantics of these setup processes 110 | // https://wiki.osdev.org/APIC 111 | // https://github.com/mit-pdos/xv6-public/blob/master/lapic.c#L55 112 | const ENABLE: u32 = 0x100; 113 | const X1: u32 = 0b1011; // divide by 1 (Divide Configuration Register) 114 | const PERIODIC: u32 = 0x20000; // vs ONE_SHOT 115 | const MASKED: u32 = 0x10000; 116 | const BCAST: u32 = 0x80000; 117 | const INIT: u32 = 0x00500; 118 | const LEVEL: u32 = 0x08000; 119 | const DELIVS: u32 = 0x01000; 120 | 121 | // Enable the Local APIC to receive interrupts by configuring the Spurious Interrupt Vector Register. 122 | LAPIC.set_svr(ENABLE | 0xFF); 123 | 124 | // Measure the frequency of the Local APIC Timer 125 | LAPIC.set_tdcr(X1); 126 | LAPIC.set_timer(MASKED); 127 | LAPIC.set_ticr(u32::MAX); // start 128 | acpi::wait_milliseconds_with_pm_timer(100); 129 | let measured_lapic_timer_freq = (u32::MAX - LAPIC.tccr()) * 10; 130 | LAPIC.set_ticr(0); // stop 131 | 132 | // Enable timer interrupts 133 | LAPIC.set_tdcr(X1); 134 | LAPIC.set_timer(PERIODIC | IRQ_TIMER); 135 | LAPIC.set_ticr(measured_lapic_timer_freq / TIMER_FREQ as u32); 136 | 137 | // Disable logical interrupt lines 138 | LAPIC.set_lint0(MASKED); 139 | LAPIC.set_lint1(MASKED); 140 | 141 | // Disable performance counter overflow interrupts on machines that provide that interrupt entry. 142 | if (LAPIC.ver() >> 16) & 0xFF >= 4 { 143 | LAPIC.set_pcint(MASKED); 144 | } 145 | 146 | // TODO: Error interrupt? 147 | 148 | // Ack any outstanding interrupts 149 | LAPIC.set_eoi(0); 150 | 151 | // Send an Init Level De-Assert to synchronise arbitration ID's. 152 | LAPIC.set_icrhi(0); 153 | LAPIC.set_icrlo(BCAST | INIT | LEVEL); 154 | while (LAPIC.icrlo() & DELIVS) != 0 {} 155 | 156 | // Enable interrupts on the APIC (but not on the processor) 157 | LAPIC.set_tpr(0); 158 | } 159 | 160 | unsafe fn initialize_io_apic() { 161 | let ioapic = x64::IoApic::new(acpi::apic_info().io_apics.first().unwrap().address as u64); 162 | 163 | // https://wiki.osdev.org/APIC 164 | // https://github.com/mit-pdos/xv6-public/blob/master/ioapic.c#L49 165 | 166 | // const ACTIVELOW: u64 = 0x00002000; // Active low (vs high) 167 | // const LOGICAL: u64 = 0x00000800; // Destination is CPU id (vs APIC ID) 168 | const LEVEL: u64 = 0x00008000; // Level-triggered (vs edge-) 169 | const DISABLED: u64 = 0x00010000; // Interrupt disabled 170 | 171 | let max_intr = ioapic.ver() >> 16 & 0xFF; 172 | 173 | // Mark all interrupts edge-triggered, active high, disabled, and not routed to any CPUs. 174 | for i in 0..max_intr { 175 | ioapic.set_redirection_table_at(i, DISABLED | (PIC_8259_IRQ_OFFSET + i) as u64); 176 | } 177 | 178 | let bsp = (Cpu::boot_strap().lapic_id().unwrap() as u64) << (24 + 32); 179 | ioapic.set_redirection_table_at(IRQ_KBD - PIC_8259_IRQ_OFFSET, IRQ_KBD as u64 | bsp | LEVEL); 180 | ioapic.set_redirection_table_at( 181 | IRQ_COM1 - PIC_8259_IRQ_OFFSET, 182 | IRQ_COM1 as u64 | bsp | LEVEL, 183 | ); 184 | } 185 | 186 | // Be careful to avoid deadlocks: 187 | // https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html 188 | 189 | extern "x86-interrupt" fn breakpoint_handler(stack_frame: x64::InterruptStackFrame) { 190 | sprintln!("EXCEPTION: BREAKPOINT"); 191 | sprintln!("{:#?}", stack_frame); 192 | } 193 | 194 | extern "x86-interrupt" fn page_fault_handler( 195 | stack_frame: x64::InterruptStackFrame, 196 | error_code: x64::PageFaultErrorCode, 197 | ) { 198 | sprintln!("EXCEPTION: PAGE FAULT"); 199 | sprintln!("Address: {:?}", x64::Cr2::read()); 200 | sprintln!("Error Code: {:?}", error_code); 201 | sprintln!("{:#?}", stack_frame); 202 | 203 | loop { 204 | x64::hlt() 205 | } 206 | } 207 | 208 | extern "x86-interrupt" fn double_fault_handler( 209 | stack_frame: x64::InterruptStackFrame, 210 | _error_code: u64, 211 | ) -> ! { 212 | sprintln!("EXCEPTION: DOUBLE FAULT"); 213 | sprintln!("{:#?}", stack_frame); 214 | 215 | loop { 216 | x64::hlt() 217 | } 218 | } 219 | 220 | extern "x86-interrupt" fn timer_handler(_stack_frame: x64::InterruptStackFrame) { 221 | TICKS.fetch_add(1, Ordering::SeqCst); 222 | task::scheduler().elapse(); 223 | unsafe { LAPIC.set_eoi(0) }; 224 | task::scheduler().r#yield(); 225 | } 226 | 227 | extern "x86-interrupt" fn kbd_handler(_stack_frame: x64::InterruptStackFrame) { 228 | let v = unsafe { x64::Port::new(0x60).read() }; 229 | console::accept_raw_input(console::RawInput::Kbd(v)); 230 | unsafe { LAPIC.set_eoi(0) }; 231 | } 232 | 233 | extern "x86-interrupt" fn com1_handler(_stack_frame: x64::InterruptStackFrame) { 234 | use crate::devices::serial::default_port; 235 | 236 | let v = default_port().receive(); 237 | console::accept_raw_input(console::RawInput::Com1(v)); 238 | unsafe { LAPIC.set_eoi(0) }; 239 | } 240 | 241 | extern "x86-interrupt" fn virtio_block_handler( 242 | _stack_frame: x64::InterruptStackFrame, 243 | ) { 244 | use crate::devices::virtio::block; 245 | 246 | block::list()[N].collect(); 247 | unsafe { LAPIC.set_eoi(0) }; 248 | } 249 | 250 | fn get_virtio_block_handler(index: usize) -> extern "x86-interrupt" fn(x64::InterruptStackFrame) { 251 | match index { 252 | 0 => virtio_block_handler::<0>, 253 | 1 => virtio_block_handler::<1>, 254 | 2 => virtio_block_handler::<2>, 255 | 3 => virtio_block_handler::<3>, 256 | 4 => virtio_block_handler::<4>, 257 | 5 => virtio_block_handler::<5>, 258 | 6 => virtio_block_handler::<6>, 259 | 7 => virtio_block_handler::<7>, 260 | _ => panic!("Unsupported index"), 261 | } 262 | } 263 | 264 | pub fn virtio_block_irq(index: usize) -> Option { 265 | if index < IRQ_VIRTIO_BLOCK.len() { 266 | Some(IRQ_VIRTIO_BLOCK.start + index as u32) 267 | } else { 268 | None 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /ors-kernel/src/fs/fat/boot_sector.rs: -------------------------------------------------------------------------------- 1 | use super::{Cluster, Sector, SliceExt}; 2 | use core::fmt; 3 | 4 | /// Error while reading boot sector. 5 | #[derive(PartialEq, Eq, Debug, Clone, Copy, Hash)] 6 | pub enum Error { 7 | SignatureMismatch, 8 | Broken(&'static str), 9 | Unsupported(&'static str), 10 | } 11 | 12 | impl fmt::Display for Error { 13 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 14 | match self { 15 | Error::SignatureMismatch => write!(f, "Boot signature mismatch"), 16 | Error::Broken(s) => write!(f, "Broken boot sector: {}", s), 17 | Error::Unsupported(s) => write!(f, "Unsupported feature: {}", s), 18 | } 19 | } 20 | } 21 | 22 | /// Deserialized boot sector structure. 23 | #[derive(PartialEq, Eq, Debug, Clone, Copy)] 24 | pub struct BootSector { 25 | // `bpb_` means that it is part of the BIOS parameter block. 26 | /// Jump instruction to the bootstrap code. usually 0xEB??90 | 0xE9???? 27 | _jmp_boot: [u8; 3], 28 | /// Formatter's name. usually MSWIN4.1 29 | _oem_name: [u8; 8], 30 | /// Sector size in bytes. It must be same as the volume sector size. 512 | 1024 | 2048 | 4096 31 | bpb_byts_per_sec: u16, 32 | /// Cluster size in sectors. It must be power of two. Cluster is an allocation unit of FAT data and consists of contiguous sectors. 33 | bpb_sec_per_clus: u8, 34 | /// Number of sectors of reserved area. It must not be 0 since it includes this boot sector. 35 | bpb_rsvd_sec_cnt: u16, 36 | /// Number of FAT copies. It should be 2. 37 | bpb_num_fats: u8, 38 | /// Number of directory entries in the root directory. unused for FAT32. 39 | _bpb_root_ent_cnt: u16, 40 | /// Total number of sectors for FAT12/16. 0 for FAT32. 41 | _bpb_tot_sec_16: u16, 42 | /// Media type. ignored 43 | _bpb_media: u8, 44 | /// FAT size in sectors for FAT12/FAT16. 0 for FAT32. 45 | _bpb_fat_sz_16: u16, 46 | /// Track size in sectors. ignored 47 | _bpb_sec_per_trk: u16, 48 | /// Number of heads. ignored 49 | _bpb_num_heads: u16, 50 | /// Number of hidden sectors before this volume. 51 | _bpb_hidd_sec: u32, 52 | /// Total number of sectors for FAT32. 0 for FAT12/16. 53 | bpb_tot_sec_32: u32, 54 | // FAT32 only fields ------ 55 | /// FAT size in sectors for FAT32. 56 | bpb_fat_sz_32: u32, 57 | _bpb_ext_flags: u16, 58 | /// File system version. It must be 0x0000. 59 | _bpb_fs_ver: u16, 60 | /// Cluster number of the root directory. 61 | bpb_root_clus: u32, 62 | /// Sector number of the FSINFO. It must be 1. 63 | _bpb_fs_info: u16, 64 | /// Sector number where the boot sector backup is placed. 6 is recommended 65 | _bpb_bk_boot_sec: u16, 66 | _bpb_reserved: [u8; 12], 67 | // ------ 68 | /// Drive Number. ignored 69 | _drv_num: u8, 70 | _reserved: u8, 71 | /// Extended boot signature. It must be 0x29. 72 | _boot_sig: u8, 73 | /// Volume ID. usually format datetime. 74 | vol_id: u32, 75 | /// Volume Label. 76 | vol_lab: [u8; 11], 77 | /// FS type name. This field is not used to determine the FAT type. 78 | _fil_sys_type: [u8; 8], 79 | // boot_code 80 | // boot_sign 81 | } 82 | 83 | impl BootSector { 84 | pub fn volume_id(&self) -> u32 { 85 | self.vol_id 86 | } 87 | 88 | pub fn volume_label(&self) -> [u8; 11] { 89 | self.vol_lab 90 | } 91 | 92 | /// Sector size in bytes. 93 | pub fn sector_size(&self) -> usize { 94 | self.bpb_byts_per_sec as usize 95 | } 96 | 97 | /// Total number of sectors. 98 | pub fn total_sector_count(&self) -> usize { 99 | debug_assert_eq!(self._bpb_tot_sec_16, 0); 100 | self.bpb_tot_sec_32 as usize 101 | } 102 | 103 | /// FAT size in sectors. 104 | pub fn fat_size(&self) -> usize { 105 | debug_assert_eq!(self._bpb_fat_sz_16, 0); 106 | self.bpb_fat_sz_32 as usize 107 | } 108 | 109 | // A FAT volume consists of 110 | // Reserved area | FAT area | Root dir area (for FAT12/16) | Data area 111 | 112 | /// Fat area start sector. 113 | pub fn fat_area_start(&self) -> Sector { 114 | Sector::from_index(self.bpb_rsvd_sec_cnt as usize) 115 | } 116 | 117 | /// FAT area size in sectors. 118 | pub fn fat_area_size(&self) -> usize { 119 | self.fat_size() * self.bpb_num_fats as usize 120 | } 121 | 122 | /// Root dir area start sector. 123 | pub fn root_dir_area_start(&self) -> Sector { 124 | self.fat_area_start().offset(self.fat_area_size()) 125 | } 126 | 127 | /// Root dir area size in sectors. 128 | pub fn root_dir_area_size(&self) -> usize { 129 | debug_assert_eq!(self._bpb_root_ent_cnt, 0); 130 | 0 131 | // use super::dir_entry::DirEntry; 132 | // let sector_size = self.sector_size(); 133 | // (DirEntry::SIZE * self._bpb_root_ent_cnt as usize + sector_size - 1) / sector_size 134 | } 135 | 136 | /// Data area start sector. 137 | pub fn data_area_start(&self) -> Sector { 138 | self.root_dir_area_start().offset(self.root_dir_area_size()) 139 | } 140 | 141 | /// Data area size in sectors. 142 | pub fn data_area_size(&self) -> usize { 143 | self.total_sector_count() - self.data_area_start().index() 144 | } 145 | 146 | /// Cluster size in sectors. 147 | pub fn cluster_size(&self) -> usize { 148 | self.bpb_sec_per_clus as usize 149 | } 150 | 151 | /// Number of available clusters. 152 | pub fn cluster_count(&self) -> usize { 153 | self.data_area_size() / self.cluster_size() 154 | } 155 | 156 | pub(super) fn is_cluster_available(&self, n: Cluster) -> bool { 157 | // Cluster numbers start at 2, thus the maximum cluster number is `cluster_count() + 1`. 158 | 2 <= n.index() && n.index() <= self.cluster_count() + 1 159 | } 160 | 161 | /// Get the location of the FAT entry corresponding to the given cluster number. 162 | /// 163 | /// In FAT32, FAT (File Allocation Table) is an array of 32-bit FAT entries. 164 | /// Each FAT entry has a 1:1 correspondence with each cluster, 165 | /// and the value of the FAT entry indicates the status of the corresponding cluster. 166 | /// Notice that FAT[0] and FAT[1] are reserved, and correspondingly, cluster numbers also start at 2. 167 | /// It should also be noted that in FAT32, the upper 4 bits of the FAT entry are reserved. 168 | pub(super) fn fat_entry_location(&self, n: Cluster) -> (Sector, usize) { 169 | debug_assert!(self.is_cluster_available(n)); 170 | let bytes_offset = n.index() * 4; // 32-bit -> 4bytes 171 | let sector = self 172 | .fat_area_start() 173 | .offset(bytes_offset / self.sector_size()); 174 | let offset = bytes_offset % self.sector_size(); 175 | (sector, offset) 176 | } 177 | 178 | /// Get the location of the data corresponding to the given cluster number. 179 | pub(super) fn cluster_location(&self, n: Cluster) -> Sector { 180 | debug_assert!(self.is_cluster_available(n)); 181 | self.data_area_start() 182 | .offset((n.index() - 2) * self.cluster_size()) 183 | } 184 | 185 | pub(super) fn root_dir_cluster(&self) -> Cluster { 186 | Cluster::from_index(self.bpb_root_clus as usize) 187 | } 188 | } 189 | 190 | impl TryFrom<&'_ [u8]> for BootSector { 191 | type Error = Error; 192 | 193 | fn try_from(buf: &'_ [u8]) -> Result { 194 | if buf.len() < 512 || !matches!(buf[510..512], [0x55, 0xaa]) { 195 | Err(Error::SignatureMismatch)?; 196 | } 197 | 198 | let _jmp_boot = buf.array::<3>(0); 199 | let _oem_name = buf.array::<8>(3); 200 | let bpb_byts_per_sec = u16::from_le_bytes(buf.array::<2>(11)); 201 | let bpb_sec_per_clus = buf[13]; 202 | let bpb_rsvd_sec_cnt = u16::from_le_bytes(buf.array::<2>(14)); 203 | let bpb_num_fats = buf[16]; 204 | let _bpb_root_ent_cnt = u16::from_le_bytes(buf.array::<2>(17)); 205 | let _bpb_tot_sec_16 = u16::from_le_bytes(buf.array::<2>(19)); 206 | let _bpb_media = buf[21]; 207 | let _bpb_fat_sz_16 = u16::from_le_bytes(buf.array::<2>(22)); 208 | let _bpb_sec_per_trk = u16::from_le_bytes(buf.array::<2>(24)); 209 | let _bpb_num_heads = u16::from_le_bytes(buf.array::<2>(26)); 210 | let _bpb_hidd_sec = u32::from_le_bytes(buf.array::<4>(28)); 211 | let bpb_tot_sec_32 = u32::from_le_bytes(buf.array::<4>(32)); 212 | 213 | if !matches!(_jmp_boot, [0xeb, _, 0x90] | [0xe9, _, _]) { 214 | Err(Error::Broken("JmpBoot"))?; 215 | } 216 | if !matches!(bpb_byts_per_sec, 512 | 1024 | 2048 | 4096) { 217 | Err(Error::Broken("BytsPerSec"))?; 218 | } 219 | if !bpb_sec_per_clus.is_power_of_two() { 220 | Err(Error::Broken("SecPerClus"))?; 221 | } 222 | if bpb_rsvd_sec_cnt == 0 { 223 | Err(Error::Broken("RsvdSecCnt"))?; 224 | } 225 | if _bpb_root_ent_cnt != 0 || _bpb_tot_sec_16 != 0 || _bpb_fat_sz_16 != 0 { 226 | Err(Error::Unsupported("FAT12/16"))?; 227 | } 228 | 229 | let bpb_fat_sz_32 = u32::from_le_bytes(buf.array::<4>(36)); 230 | let _bpb_ext_flags = u16::from_le_bytes(buf.array::<2>(40)); 231 | let _bpb_fs_ver = u16::from_le_bytes(buf.array::<2>(42)); 232 | let bpb_root_clus = u32::from_le_bytes(buf.array::<4>(44)); 233 | let _bpb_fs_info = u16::from_le_bytes(buf.array::<2>(48)); 234 | let _bpb_bk_boot_sec = u16::from_le_bytes(buf.array::<2>(50)); 235 | let _bpb_reserved = buf.array::<12>(52); 236 | let _drv_num = buf[64]; 237 | let _reserved = buf[65]; 238 | let _boot_sig = buf[66]; 239 | let vol_id = u32::from_le_bytes(buf.array::<4>(67)); 240 | let vol_lab = buf.array::<11>(71); 241 | let _fil_sys_type = buf.array::<8>(82); 242 | 243 | if _bpb_fs_ver != 0x0000 { 244 | Err(Error::Unsupported("FSVer"))?; 245 | } 246 | if _bpb_fs_info != 1 { 247 | Err(Error::Broken("FSInfo"))?; 248 | } 249 | if _boot_sig != 0x29 { 250 | Err(Error::Broken("BootSig"))?; 251 | } 252 | 253 | Ok(Self { 254 | _jmp_boot, 255 | _oem_name, 256 | bpb_byts_per_sec, 257 | bpb_sec_per_clus, 258 | bpb_rsvd_sec_cnt, 259 | bpb_num_fats, 260 | _bpb_root_ent_cnt, 261 | _bpb_tot_sec_16, 262 | _bpb_media, 263 | _bpb_fat_sz_16, 264 | _bpb_sec_per_trk, 265 | _bpb_num_heads, 266 | _bpb_hidd_sec, 267 | bpb_tot_sec_32, 268 | bpb_fat_sz_32, 269 | _bpb_ext_flags, 270 | _bpb_fs_ver, 271 | bpb_root_clus, 272 | _bpb_fs_info, 273 | _bpb_bk_boot_sec, 274 | _bpb_reserved, 275 | _drv_num, 276 | _reserved, 277 | _boot_sig, 278 | vol_id, 279 | vol_lab, 280 | _fil_sys_type, 281 | }) 282 | } 283 | } 284 | -------------------------------------------------------------------------------- /ors-kernel/src/fs/fat/low_level.rs: -------------------------------------------------------------------------------- 1 | use super::{BootSector, BootSectorError, DirEntry, Error, FatEntry, Sector, SliceExt, Volume}; 2 | use crate::fs::volume::{BufferedSectorRef, BufferedVolume}; 3 | use alloc::vec; 4 | use core::fmt; 5 | use log::trace; 6 | 7 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 8 | pub(super) struct Cluster(usize); 9 | 10 | impl Cluster { 11 | pub(super) fn from_index(index: usize) -> Self { 12 | Self(index) 13 | } 14 | 15 | pub(super) fn index(self) -> usize { 16 | self.0 17 | } 18 | 19 | pub(super) fn offset(self, s: usize) -> Self { 20 | Self(self.0 + s) 21 | } 22 | } 23 | 24 | impl fmt::Display for Cluster { 25 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 26 | self.0.fmt(f) 27 | } 28 | } 29 | 30 | #[derive(Debug)] 31 | pub(super) struct Root { 32 | volume: BufferedVolume, 33 | bs: BootSector, 34 | } 35 | 36 | impl Root { 37 | pub(super) fn new(volume: V) -> Result { 38 | let sector_size = volume.sector_size(); 39 | let mut buf = vec![0; sector_size]; 40 | 41 | volume.read(Sector::from_index(0), buf.as_mut())?; 42 | let bs = BootSector::try_from(buf.as_ref())?; 43 | 44 | if bs.sector_size() != sector_size { 45 | Err(BootSectorError::Broken("BytsPerSec (mismatch)"))?; 46 | } 47 | if volume.sector_count() < bs.total_sector_count() { 48 | Err(BootSectorError::Broken("TotSec (mismatch)"))?; 49 | } 50 | 51 | let volume = BufferedVolume::new(volume); 52 | Ok(Self { volume, bs }) 53 | } 54 | 55 | pub(super) fn commit(&self) -> Result<(), Error> { 56 | Ok(self.volume.commit()?) 57 | } 58 | 59 | pub(super) fn boot_sector(&self) -> &BootSector { 60 | &self.bs 61 | } 62 | 63 | pub(super) fn fat(&self) -> BufferedFat { 64 | BufferedFat { 65 | root: self, 66 | last: None, 67 | } 68 | } 69 | 70 | pub(super) fn cluster(&self, cluster: Cluster) -> BufferedCluster { 71 | let first_sector = self.bs.cluster_location(cluster); 72 | BufferedCluster { 73 | cluster, 74 | volume: &self.volume, 75 | first_sector, 76 | sector_count: self.bs.cluster_size(), 77 | sector_size: self.bs.sector_size(), 78 | last: None, 79 | } 80 | } 81 | 82 | pub(super) fn chained_cluster(&self, cluster: Cluster) -> ChainedCluster { 83 | ChainedCluster { 84 | root: self, 85 | src: cluster, 86 | } 87 | } 88 | 89 | pub(super) fn dir_entries(&self, cluster: Cluster) -> DirEntries { 90 | DirEntries { 91 | root: self, 92 | cursor: Some((self.cluster(cluster), 0)), 93 | } 94 | } 95 | } 96 | 97 | #[derive(Debug)] 98 | pub(super) struct BufferedFat<'a, V> { 99 | root: &'a Root, 100 | last: Option>, // cached to reduce sector search 101 | } 102 | 103 | impl<'a, V: Volume> BufferedFat<'a, V> { 104 | pub(super) fn entries<'f>(&'f mut self) -> FatEntries<'f, 'a, V> { 105 | FatEntries { 106 | fat: self, 107 | cursor: Some(Cluster(2)), 108 | } 109 | } 110 | 111 | fn entry(&mut self, cluster: Cluster) -> Result<(&BufferedSectorRef<'a>, usize), Error> { 112 | let (sector, offset) = self.root.bs.fat_entry_location(cluster); 113 | if !matches!(self.last, Some(ref r) if r.sector() == sector) { 114 | self.last = Some(self.root.volume.sector(sector)?); 115 | } 116 | Ok((self.last.as_ref().unwrap(), offset)) 117 | } 118 | 119 | pub(super) fn allocate(&mut self) -> Result { 120 | // FIXME: This implementation is too slow since it always searches from the start 121 | for (c, entry) in self.entries() { 122 | if matches!(entry, FatEntry::Unused) { 123 | self.write(c, FatEntry::UsedEoc)?; 124 | return Ok(c); 125 | } 126 | } 127 | Err(Error::Full) 128 | } 129 | 130 | pub(super) fn release(&mut self, c: Cluster) -> Result<(), Error> { 131 | let mut next_c = Some(c); 132 | while let Some(c) = next_c { 133 | match self.read(c)? { 134 | FatEntry::UsedChained(c) => next_c = Some(c), 135 | FatEntry::UsedEoc => next_c = None, 136 | _ => break, 137 | } 138 | self.write(c, FatEntry::Unused)?; 139 | } 140 | Ok(()) 141 | } 142 | 143 | pub(super) fn read(&mut self, cluster: Cluster) -> Result { 144 | let (sector, offset) = self.entry(cluster)?; 145 | Ok(u32::from_le_bytes(sector.bytes().array::<4>(offset)).into()) 146 | } 147 | 148 | pub(super) fn write(&mut self, cluster: Cluster, value: FatEntry) -> Result<(), Error> { 149 | let (sector, offset) = self.entry(cluster)?; 150 | sector 151 | .bytes() 152 | .copy_from_array::<4>(offset, u32::to_le_bytes(value.into())); 153 | sector.mark_as_dirty(); 154 | Ok(()) 155 | } 156 | } 157 | 158 | #[derive(Debug)] 159 | pub(super) struct FatEntries<'f, 'a, V> { 160 | fat: &'f mut BufferedFat<'a, V>, 161 | cursor: Option, 162 | } 163 | 164 | impl<'f, 'a, V: Volume> Iterator for FatEntries<'f, 'a, V> { 165 | type Item = (Cluster, FatEntry); 166 | 167 | fn next(&mut self) -> Option { 168 | let n = core::mem::take(&mut self.cursor)?; 169 | if self.fat.root.bs.is_cluster_available(n) { 170 | let entry = self.fat.read(n).trace_err()?; 171 | self.cursor = Some(n.offset(1)); 172 | Some((n, entry)) 173 | } else { 174 | None 175 | } 176 | } 177 | } 178 | 179 | #[derive(Debug, Clone)] 180 | pub(super) struct BufferedCluster<'a, V> { 181 | cluster: Cluster, 182 | volume: &'a BufferedVolume, 183 | first_sector: Sector, 184 | sector_count: usize, 185 | sector_size: usize, 186 | last: Option>, // cached to reduce sector search 187 | } 188 | 189 | impl<'a, V: Volume> BufferedCluster<'a, V> { 190 | fn sector(&mut self, index: usize) -> Result<&BufferedSectorRef<'a>, Error> { 191 | debug_assert!(index < self.sector_count); 192 | let sector = self.first_sector.offset(index); 193 | if !matches!(self.last, Some(ref r) if r.sector() == sector) { 194 | self.last = Some(self.volume.sector(sector)?); 195 | } 196 | Ok(self.last.as_ref().unwrap()) 197 | } 198 | 199 | fn sector_range( 200 | &self, 201 | start: usize, 202 | end: usize, 203 | ) -> impl Iterator { 204 | debug_assert!(start <= end && end <= self.size()); 205 | let ss = start / self.sector_size; 206 | let es = end / self.sector_size; 207 | let so = start % self.sector_size; 208 | let eo = end % self.sector_size; 209 | let s = self.sector_size; 210 | (ss..=es).filter_map(move |sector| { 211 | let i = if sector == ss { so } else { 0 }; 212 | let j = if sector == es { eo } else { s }; 213 | (i < j).then(|| (sector, i, j)) 214 | }) 215 | } 216 | 217 | pub(super) fn cluster(&self) -> Cluster { 218 | self.cluster 219 | } 220 | 221 | pub(super) fn size(&self) -> usize { 222 | self.sector_size * self.sector_count 223 | } 224 | 225 | pub(super) fn read(&mut self, offset: usize, mut buf: &mut [u8]) -> Result<(), Error> { 226 | for (sector, i, j) in self.sector_range(offset, offset + buf.len()) { 227 | let s = self.sector(sector)?; 228 | buf[0..j - i].copy_from_slice(&s.bytes()[i..j]); 229 | buf = &mut buf[j - i..]; 230 | } 231 | Ok(()) 232 | } 233 | 234 | pub(super) fn write(&mut self, offset: usize, mut buf: &[u8]) -> Result<(), Error> { 235 | for (sector, i, j) in self.sector_range(offset, offset + buf.len()) { 236 | let s = self.sector(sector)?; 237 | s.bytes()[i..j].copy_from_slice(&buf[0..j - i]); 238 | s.mark_as_dirty(); 239 | buf = &buf[j - i..]; 240 | } 241 | Ok(()) 242 | } 243 | 244 | // for directory 245 | 246 | pub(super) fn dir_entries_count(&self) -> usize { 247 | self.size() / DirEntry::SIZE 248 | } 249 | 250 | pub(super) fn read_dir_entry(&mut self, index: usize) -> Result { 251 | let mut buf = [0; DirEntry::SIZE]; 252 | self.read(index * DirEntry::SIZE, buf.as_mut())?; 253 | Ok(buf.into()) 254 | } 255 | 256 | pub(super) fn write_dir_entry(&mut self, index: usize, entry: DirEntry) -> Result<(), Error> { 257 | let buf: [u8; 32] = entry.into(); 258 | self.write(index * DirEntry::SIZE, buf.as_ref()) 259 | } 260 | } 261 | 262 | #[derive(Debug)] 263 | pub(super) struct ChainedCluster<'a, V> { 264 | root: &'a Root, 265 | src: Cluster, 266 | } 267 | 268 | impl<'a, V: Volume> ChainedCluster<'a, V> { 269 | fn read(&self) -> Result, Error> { 270 | Ok(self.root.fat().read(self.src)?.chain()) 271 | } 272 | 273 | pub(super) fn get(self) -> Result>, Error> { 274 | Ok(self.read()?.map(|c| self.root.cluster(c))) 275 | } 276 | 277 | pub(super) fn prepare(self) -> Result, Error> { 278 | match self.read()? { 279 | Some(c) => Ok(self.root.cluster(c)), 280 | None => { 281 | let c = self.root.fat().allocate()?; 282 | self.root.fat().write(self.src, c.into())?; 283 | Ok(self.root.cluster(c)) 284 | } 285 | } 286 | } 287 | 288 | pub(super) fn release(self) -> Result<(), Error> { 289 | if let Some(c) = self.read()? { 290 | self.root.fat().write(self.src, FatEntry::Unused)?; 291 | self.root.fat().release(c)?; 292 | } 293 | Ok(()) 294 | } 295 | } 296 | 297 | #[derive(Debug)] 298 | pub(super) struct DirEntries<'a, V> { 299 | root: &'a Root, 300 | cursor: Option<(BufferedCluster<'a, V>, usize)>, 301 | } 302 | 303 | impl<'a, V: Volume> Iterator for DirEntries<'a, V> { 304 | type Item = (Cluster, usize, DirEntry); 305 | 306 | fn next(&mut self) -> Option { 307 | let (mut c, n) = core::mem::take(&mut self.cursor)?; 308 | if n < c.dir_entries_count() { 309 | let cluster = c.cluster; 310 | let entry = c.read_dir_entry(n).trace_err()?; 311 | if !matches!(entry, DirEntry::UnusedTerminal) { 312 | self.cursor = Some((c, n + 1)); 313 | } 314 | Some((cluster, n, entry)) 315 | } else { 316 | let fat_entry = self.root.fat().read(c.cluster).trace_err()?; 317 | self.cursor = Some((self.root.cluster(fat_entry.chain()?), 0)); 318 | self.next() 319 | } 320 | } 321 | } 322 | 323 | trait ResultExt { 324 | type Result; 325 | fn trace_err(self) -> Self::Result; 326 | } 327 | 328 | impl ResultExt for Result { 329 | type Result = Option; 330 | 331 | fn trace_err(self) -> Self::Result { 332 | match self { 333 | Ok(r) => Some(r), 334 | Err(e) => { 335 | trace!("{}", e); 336 | None 337 | } 338 | } 339 | } 340 | } 341 | -------------------------------------------------------------------------------- /ors-kernel/src/task.rs: -------------------------------------------------------------------------------- 1 | use crate::context::{Context, EntryPoint}; 2 | use crate::cpu::Cpu; 3 | use crate::interrupts::{ticks, Cli}; 4 | use crate::sync::spin::{Spin, SpinGuard}; 5 | use alloc::boxed::Box; 6 | use alloc::collections::{BTreeMap, BinaryHeap, VecDeque}; 7 | use alloc::vec; 8 | use alloc::vec::Vec; 9 | use core::cell::UnsafeCell; 10 | use core::cmp::Reverse; 11 | use core::mem::MaybeUninit; 12 | use core::sync::atomic::{AtomicI64, AtomicU64, Ordering}; 13 | use log::trace; 14 | use spin::Once; 15 | 16 | const DEFAULT_STACK_SIZE: usize = 4096 * 256; // 1MiB 17 | 18 | static SCHEDULER: Once = Once::new(); 19 | 20 | pub fn initialize_scheduler() { 21 | SCHEDULER.call_once(|| { 22 | trace!("INITIALIZING Task Scheduler"); 23 | TaskScheduler::new() 24 | }); 25 | } 26 | 27 | pub fn scheduler() -> &'static TaskScheduler { 28 | SCHEDULER 29 | .get() 30 | .expect("task::scheduler is called before task::initialize_scheduler") 31 | } 32 | 33 | #[derive(Debug)] 34 | pub struct TaskScheduler { 35 | queue: Spin, 36 | task_id_gen: AtomicU64, 37 | wait_channel_gen: AtomicI64, 38 | } 39 | 40 | impl TaskScheduler { 41 | pub fn new() -> Self { 42 | Self { 43 | queue: Spin::new(TaskQueue::new()), 44 | task_id_gen: AtomicU64::new(0), 45 | wait_channel_gen: AtomicI64::new(-1), 46 | } 47 | } 48 | 49 | fn issue_task_id(&self) -> TaskId { 50 | TaskId(self.task_id_gen.fetch_add(1, Ordering::SeqCst)) 51 | } 52 | 53 | pub fn issue_wait_channel(&self) -> WaitChannel { 54 | WaitChannel(self.wait_channel_gen.fetch_sub(1, Ordering::SeqCst)) 55 | } 56 | 57 | pub fn add( 58 | &self, 59 | priority: Priority, 60 | entry_point: extern "C" fn(u64) -> !, 61 | entry_arg: u64, 62 | ) -> TaskId { 63 | let id = self.issue_task_id(); 64 | let entry_point = TaskEntryPoint(entry_point); 65 | let task = Task::new(id, priority, entry_point, entry_arg); 66 | self.queue.lock().enqueue(task); 67 | id 68 | } 69 | 70 | pub fn switch( 71 | &self, 72 | scheduling_op: impl FnOnce() -> (Option, T), 73 | other_cli: u32, 74 | ) -> T { 75 | let cli = Cli::new(); // (*1) 76 | 77 | let cpu_state = Cpu::current().state(); 78 | assert_eq!(cpu_state.lock().thread_state.ncli, 1 + other_cli); // To ensure that this context does not hold locks (*1) 79 | 80 | let cpu_task = { 81 | // This assignment is necessary to avoid deadlocks 82 | let task = cpu_state.lock().running_task.take(); 83 | task.unwrap_or_else(|| Task::new_current(self.issue_task_id(), Priority::MIN)) 84 | }; 85 | // FIXME: This implicitly relies on the fact that cpu_task is retained (not dropped) by self.queue 86 | let current_ctx = cpu_task.ctx().get(); 87 | 88 | let (cpu_task, ret) = { 89 | let mut queue_lock = self.queue.lock(); 90 | // scheduling_op is called while self.queue is locked 91 | let (switch, ret) = scheduling_op(); 92 | let task = match switch { 93 | Some(switch) => queue_lock.dequeue(cpu_task, switch), 94 | // Task switching is cancelled, but we need to restore cpu_state.running_task 95 | None => cpu_task, 96 | }; 97 | (task, ret) 98 | }; 99 | let next_ctx = cpu_task.ctx().get(); 100 | assert!(cpu_state.lock().running_task.replace(cpu_task).is_none()); 101 | 102 | if current_ctx != next_ctx { 103 | unsafe { Context::switch(next_ctx, current_ctx) }; 104 | } 105 | 106 | drop(cli); 107 | ret 108 | } 109 | 110 | pub fn r#yield(&self) { 111 | self.switch(|| (Some(Switch::Yield), ()), 0) 112 | } 113 | 114 | /// Atomically release MutexGuard and block on chan. 115 | pub fn block(&self, chan: WaitChannel, timeout: Option, guard: SpinGuard<'_, T>) { 116 | self.switch( 117 | move || { 118 | drop(guard); 119 | (Some(Switch::Blocked(chan, timeout)), ()) 120 | }, 121 | 1, 122 | ) 123 | } 124 | 125 | pub fn sleep(&self, ticks: usize) { 126 | self.switch(|| (Some(Switch::Sleep(ticks)), ()), 0) 127 | } 128 | 129 | pub fn release(&self, chan: WaitChannel) { 130 | self.queue.lock().release(chan); 131 | } 132 | 133 | pub fn elapse(&self) { 134 | self.queue.lock().elapse(); 135 | } 136 | } 137 | 138 | #[derive(Debug, Clone, Copy)] 139 | pub enum Switch { 140 | Blocked(WaitChannel, Option), 141 | Sleep(usize), 142 | Yield, 143 | } 144 | 145 | #[derive(Debug)] 146 | struct TaskQueue { 147 | pending_id_gen: u64, 148 | runnable_tasks: [VecDeque; Priority::SIZE], 149 | pending_tasks: BTreeMap, 150 | blocks: BTreeMap>, 151 | timeouts: BinaryHeap)>>, 152 | } 153 | 154 | impl TaskQueue { 155 | fn new() -> Self { 156 | let mut runnable_tasks = MaybeUninit::uninit_array(); 157 | for tasks in &mut runnable_tasks[..] { 158 | tasks.write(VecDeque::new()); 159 | } 160 | Self { 161 | pending_id_gen: 0, 162 | runnable_tasks: unsafe { MaybeUninit::array_assume_init(runnable_tasks) }, 163 | pending_tasks: BTreeMap::new(), 164 | blocks: BTreeMap::new(), 165 | timeouts: BinaryHeap::new(), 166 | } 167 | } 168 | 169 | fn issue_pending_id(&mut self) -> PendingId { 170 | let id = PendingId(self.pending_id_gen); 171 | self.pending_id_gen += 1; 172 | id 173 | } 174 | 175 | fn enqueue(&mut self, task: Task) { 176 | self.runnable_tasks[task.priority().index()].push_back(task); 177 | } 178 | 179 | /// Dequeuing requires a task that is currently running. 180 | fn dequeue(&mut self, current_task: Task, current_switch: Switch) -> Task { 181 | let minimum_level_index = match current_switch { 182 | Switch::Yield => current_task.priority().index(), // current_task is still runnable 183 | _ => 0, 184 | }; 185 | 186 | // next_task is runnable, has the highest priority, and is at the front of the queue 187 | if let Some(next_task) = self 188 | .runnable_tasks 189 | .iter_mut() 190 | .enumerate() 191 | .rev() 192 | .take_while(|(i, _)| minimum_level_index <= *i) 193 | .find_map(|(_, queue)| queue.pop_front()) 194 | { 195 | // current_task.ctx will be saved "after" dequeuing: 196 | // TaskScheduler::switch -> Context::switch -> switch_context (asm.s) 197 | unsafe { &*current_task.ctx().get() }.mark_as_not_saved(); 198 | 199 | match current_switch { 200 | Switch::Blocked(chan, timeout) => { 201 | let id = self.issue_pending_id(); 202 | self.pending_tasks.insert(id, current_task); 203 | self.blocks.entry(chan).or_default().push(id); 204 | if let Some(t) = timeout { 205 | self.timeouts.push(Reverse((ticks() + t, id, Some(chan)))); 206 | } 207 | } 208 | Switch::Sleep(t) => { 209 | let id = self.issue_pending_id(); 210 | self.pending_tasks.insert(id, current_task); 211 | self.timeouts.push(Reverse((ticks() + t, id, None))); 212 | } 213 | Switch::Yield => { 214 | self.runnable_tasks[current_task.priority().index()].push_back(current_task); 215 | } 216 | } 217 | 218 | unsafe { &*next_task.ctx().get() }.wait_saved(); 219 | next_task 220 | } else { 221 | current_task // There are no tasks to switch 222 | } 223 | } 224 | 225 | fn release(&mut self, chan: WaitChannel) { 226 | if let Some(ids) = self.blocks.remove(&chan) { 227 | for id in ids { 228 | if let Some(task) = self.pending_tasks.remove(&id) { 229 | self.runnable_tasks[task.priority().index()].push_back(task); 230 | } 231 | } 232 | } 233 | } 234 | 235 | fn elapse(&mut self) { 236 | let ticks = ticks(); 237 | while match self.timeouts.peek() { 238 | Some(Reverse((t, id, chan))) if *t <= ticks => { 239 | if let Some(task) = self.pending_tasks.remove(id) { 240 | self.runnable_tasks[task.priority().index()].push_back(task); 241 | } 242 | if let Some(chan) = chan { 243 | if let Some(ids) = self.blocks.get_mut(chan) { 244 | ids.retain(|i| i != id); 245 | } 246 | } 247 | let _ = self.timeouts.pop(); 248 | true 249 | } 250 | _ => false, 251 | } {} 252 | } 253 | } 254 | 255 | #[repr(transparent)] 256 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 257 | struct PendingId(u64); 258 | 259 | #[repr(transparent)] 260 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 261 | pub struct WaitChannel(i64); 262 | 263 | impl WaitChannel { 264 | /// Create `WaitChannel` from a pointer. 265 | /// The uniqueness of the resulting `WaitChannel` depends on the uniqueness of the pointer. 266 | pub fn from_ptr(ptr: *const T) -> Self { 267 | Self::from_ptr_index(ptr, 0) 268 | } 269 | 270 | pub fn from_ptr_index(ptr: *const T, index: u32) -> Self { 271 | Self((ptr as *const () as i64 + index as i64) & i64::MAX) 272 | } 273 | } 274 | 275 | #[repr(transparent)] 276 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 277 | pub struct TaskId(u64); 278 | 279 | #[derive(Debug)] 280 | pub struct Task(Box); 281 | 282 | impl Task { 283 | fn new(id: TaskId, priority: Priority, entry_point: TaskEntryPoint, entry_arg: u64) -> Self { 284 | let mut stack = vec![0; DEFAULT_STACK_SIZE].into_boxed_slice(); 285 | let stack_end = unsafe { stack.as_mut_ptr().add(DEFAULT_STACK_SIZE) }; 286 | let ctx = Context::new(stack_end, entry_point, (id, entry_arg)); 287 | Self(Box::new(TaskData { 288 | id, 289 | priority, 290 | stack, 291 | ctx: UnsafeCell::new(ctx), 292 | })) 293 | } 294 | 295 | /// Used to treat a context that is currently running as a task. 296 | fn new_current(id: TaskId, priority: Priority) -> Self { 297 | Self(Box::new(TaskData { 298 | id, 299 | priority, 300 | stack: Default::default(), 301 | ctx: UnsafeCell::new(Context::uninitialized()), 302 | })) 303 | } 304 | 305 | pub fn id(&self) -> TaskId { 306 | self.0.id 307 | } 308 | 309 | pub fn priority(&self) -> Priority { 310 | self.0.priority 311 | } 312 | 313 | fn ctx(&self) -> &UnsafeCell { 314 | &self.0.ctx 315 | } 316 | } 317 | 318 | #[derive(Debug)] 319 | struct TaskData { 320 | id: TaskId, 321 | priority: Priority, 322 | #[allow(dead_code)] 323 | stack: Box<[u8]>, 324 | ctx: UnsafeCell, 325 | } 326 | 327 | #[derive(Debug)] 328 | struct TaskEntryPoint(extern "C" fn(u64) -> !); 329 | 330 | impl EntryPoint for TaskEntryPoint { 331 | type Arg = (TaskId, u64); 332 | 333 | fn prepare_context(self, ctx: &mut Context, arg: Self::Arg) { 334 | ctx.rip = task_init as u64; 335 | ctx.rdi = self.0 as u64; 336 | ctx.rsi = arg.0 .0; 337 | ctx.rdx = arg.1; 338 | } 339 | } 340 | 341 | extern "C" fn task_init(f: extern "C" fn(u64) -> !, _: TaskId, task_arg: u64) -> ! { 342 | // TODO: Some initialization routine? 343 | f(task_arg) 344 | } 345 | 346 | #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy, Hash)] 347 | pub enum Priority { 348 | L0, 349 | L1, 350 | L2, 351 | L3, 352 | } 353 | 354 | impl Priority { 355 | pub fn index(self) -> usize { 356 | match self { 357 | Self::L0 => 0, 358 | Self::L1 => 1, 359 | Self::L2 => 2, 360 | Self::L3 => 3, 361 | } 362 | } 363 | 364 | pub const MIN: Self = Self::L0; 365 | pub const MAX: Self = Self::L3; 366 | pub const SIZE: usize = 4; 367 | } 368 | -------------------------------------------------------------------------------- /ors-kernel/src/devices/virtio/queue.rs: -------------------------------------------------------------------------------- 1 | use super::Configuration; 2 | use crate::paging::{as_phys_addr, as_virt_addr}; 3 | use crate::phys_memory::{frame_manager, Frame}; 4 | use crate::x64; 5 | use alloc::vec::Vec; 6 | use core::mem; 7 | use core::ptr; 8 | use core::sync::atomic::{fence, Ordering}; 9 | use derive_new::new; 10 | 11 | #[derive(Debug)] 12 | pub struct VirtQueue { 13 | queue_size: usize, 14 | frame: Frame, 15 | descriptor_table: *mut Descriptor, 16 | available_ring: *mut AvailableRing, 17 | used_ring: *mut UsedRing, 18 | 19 | last_used_idx: u16, 20 | first_free_descriptor: u16, 21 | num_free_descriptors: usize, 22 | buffer_associated_data: Vec>, 23 | } 24 | 25 | impl VirtQueue { 26 | /// Prepare the `queue_index`-th queue for the specified `configuration`. 27 | pub unsafe fn new( 28 | configuration: Configuration, 29 | queue_index: u16, 30 | msi_x_vector: Option, 31 | ) -> Result { 32 | configuration.set_queue_select(queue_index); 33 | let queue_size = configuration.queue_size() as usize; 34 | if queue_size == 0 { 35 | return Err("Queue is unavailable"); 36 | } 37 | 38 | let layout = Self::compute_layout(queue_size); 39 | let frame = frame_manager() 40 | .allocate(layout.num_frames) 41 | .map_err(|_| "Cannot allocate frame for this queue")?; 42 | 43 | let base_ptr: *mut u8 = as_virt_addr(frame.phys_addr()).unwrap().as_mut_ptr(); 44 | ptr::write_bytes(base_ptr, 0, Frame::SIZE * layout.num_frames); // zeroing 45 | 46 | configuration.set_queue_address((frame.phys_addr().as_u64() / Frame::SIZE as u64) as u32); 47 | 48 | if let Some(vector) = msi_x_vector { 49 | configuration.set_queue_msix_vector(vector); 50 | } 51 | 52 | let descriptor_table = base_ptr.add(layout.descriptor_table_offset) as *mut Descriptor; 53 | let available_ring = base_ptr.add(layout.available_ring_offset) as *mut AvailableRing; 54 | let used_ring = base_ptr.add(layout.used_ring_offset) as *mut UsedRing; 55 | 56 | // Build an initial descriptor-chain 57 | for i in 0..queue_size - 1 { 58 | let descriptor = &mut *descriptor_table.add(i); 59 | descriptor.set_next(Some((i + 1) as u16)); 60 | } 61 | 62 | let mut buffer_associated_data = Vec::new(); 63 | buffer_associated_data.resize_with(queue_size, || None); 64 | 65 | Ok(Self { 66 | queue_size, 67 | frame, 68 | descriptor_table, 69 | available_ring, 70 | used_ring, 71 | 72 | last_used_idx: 0, 73 | first_free_descriptor: 0, 74 | num_free_descriptors: queue_size, 75 | buffer_associated_data, 76 | }) 77 | } 78 | 79 | fn compute_layout(queue_size: usize) -> VirtQueueLayout { 80 | // > For Legacy Interfaces, several additional restrictions are placed on the virtqueue layout: 81 | // > Each virtqueue occupies two or more physically-contiguous pages (usually defined as 4096 82 | // > bytes, but de-pending on the transport; henceforth referred to as Queue Align) and consists 83 | // > of three parts: 84 | // > | Descriptor Table | Available Ring (..padding..) | Used Ring | 85 | fn align(x: usize) -> usize { 86 | (x + Frame::SIZE - 1) & !(Frame::SIZE - 1) 87 | } 88 | 89 | let descriptor_table_size = 16 * queue_size; 90 | let available_ring_size = 6 + 2 * queue_size; 91 | let used_ring_size = 6 + 8 * queue_size; 92 | let a = align(descriptor_table_size + available_ring_size); 93 | let b = align(used_ring_size); 94 | VirtQueueLayout { 95 | num_frames: (a + b) / Frame::SIZE, 96 | descriptor_table_offset: 0, 97 | available_ring_offset: descriptor_table_size, 98 | used_ring_offset: a, 99 | } 100 | } 101 | 102 | fn descriptor_at(&self, i: u16) -> *mut Descriptor { 103 | self.descriptor_table.wrapping_add(i as usize) 104 | } 105 | 106 | fn available_ring_idx(&self) -> *mut u16 { 107 | unsafe { &mut (*self.available_ring).idx } 108 | } 109 | 110 | fn available_ring_at(&self, i: u16) -> *mut u16 { 111 | unsafe { 112 | (*self.available_ring) 113 | .ring 114 | .as_mut_ptr() 115 | .wrapping_add(i as usize % self.queue_size) 116 | } 117 | } 118 | 119 | fn used_ring_idx(&self) -> *mut u16 { 120 | unsafe { &mut (*self.used_ring).idx } 121 | } 122 | 123 | fn used_ring_at(&self, i: u16) -> *mut u32 { 124 | &mut unsafe { 125 | (*(*self.used_ring) 126 | .ring 127 | .as_mut_ptr() 128 | .wrapping_add(i as usize % self.queue_size)) 129 | .idx 130 | } 131 | } 132 | 133 | /// Transfer the buffers to the device by allocating descriptors and put them to the available ring. 134 | /// This method does not send an Available Buffer Notification. 135 | pub fn transfer>>( 136 | &mut self, 137 | buffers: I, 138 | ) -> Result<(), I> { 139 | if self.num_free_descriptors < buffers.len() { 140 | // not enough descriptors at the moment 141 | return Err(buffers); 142 | } 143 | 144 | let first = self.first_free_descriptor; 145 | let mut last = None; 146 | 147 | for buffer in buffers { 148 | let i = self.first_free_descriptor; 149 | last = Some(self.first_free_descriptor); 150 | 151 | // buffers[0] <-> first 152 | // buffers[1] <-> descriptor[first].next() 153 | // buffers[2] <-> descriptor[descriptor[first].next()].next() 154 | // ... 155 | unsafe { (*self.descriptor_at(i)).refer(buffer.addr, buffer.len, buffer.write) }; 156 | assert!(self.buffer_associated_data[i as usize] 157 | .replace(buffer.associated_data) 158 | .is_none()); 159 | 160 | match self.num_free_descriptors { 161 | 0 => panic!("virtio: buffers.len() is different from the actual length"), 162 | 1 => { 163 | assert!(unsafe { (*self.descriptor_at(i)).next() }.is_none()); 164 | self.num_free_descriptors = 0; 165 | } 166 | _ => { 167 | self.first_free_descriptor = 168 | unsafe { (*self.descriptor_at(i)).next() }.unwrap(); 169 | self.num_free_descriptors -= 1; 170 | } 171 | } 172 | } 173 | 174 | if let Some(last) = last { 175 | // unlink descriptors-chain 176 | unsafe { (*self.descriptor_at(last)).set_next(None) }; 177 | fence(Ordering::SeqCst); 178 | 179 | // enqueue 180 | unsafe { *self.available_ring_at(*self.available_ring_idx()) = first }; 181 | fence(Ordering::SeqCst); 182 | unsafe { *self.available_ring_idx() = (*self.available_ring_idx()).wrapping_add(1) }; 183 | fence(Ordering::SeqCst); 184 | } 185 | 186 | Ok(()) 187 | } 188 | 189 | /// Collect the processed buffers by consuming the used ring. 190 | /// This method is supposed to be called from Used Buffer Notification (interrupt). 191 | pub fn collect(&mut self, mut handle: impl FnMut(T)) { 192 | while self.last_used_idx != unsafe { *self.used_ring_idx() } { 193 | fence(Ordering::SeqCst); 194 | // dequeue 195 | let mut i = unsafe { *self.used_ring_at(self.last_used_idx) } as u16; 196 | self.last_used_idx = self.last_used_idx.wrapping_add(1); 197 | 198 | // free descriptors 199 | loop { 200 | let prev_first_free_descriptor = match self.num_free_descriptors { 201 | 0 => None, 202 | _ => Some(self.first_free_descriptor), 203 | }; 204 | self.first_free_descriptor = i; 205 | self.num_free_descriptors += 1; 206 | let chain = unsafe { (*self.descriptor_at(i)).next() }; 207 | unsafe { (*self.descriptor_at(i)).set_next(prev_first_free_descriptor) }; 208 | let associated_data = self.buffer_associated_data[i as usize].take(); 209 | handle(associated_data.unwrap()); 210 | 211 | match chain { 212 | Some(next) => i = next, 213 | None => break, 214 | } 215 | } 216 | } 217 | } 218 | } 219 | 220 | impl Drop for VirtQueue { 221 | fn drop(&mut self) { 222 | let layout = Self::compute_layout(self.queue_size); 223 | frame_manager().free(self.frame, layout.num_frames); 224 | } 225 | } 226 | 227 | #[derive(Debug, Clone, Copy)] 228 | struct VirtQueueLayout { 229 | num_frames: usize, 230 | descriptor_table_offset: usize, 231 | available_ring_offset: usize, 232 | used_ring_offset: usize, 233 | } 234 | 235 | #[derive(PartialEq, Eq, Debug, Clone, Copy, new)] 236 | pub struct Buffer { 237 | /// The address to the data being exchanged with the device. 238 | pub addr: x64::PhysAddr, 239 | /// Size of the data pointed to by the address. 240 | pub len: usize, 241 | /// Whether the data is device write-only (true) or device read-only (false). 242 | pub write: bool, 243 | /// Data associated with the buffer. Given to the `VirtQueue::collect` callback. 244 | pub associated_data: T, 245 | } 246 | 247 | impl Buffer { 248 | pub fn from_ref(d: &D, associated_data: T) -> Option { 249 | Some(Self::new( 250 | as_phys_addr(x64::VirtAddr::from_ptr(d))?, 251 | mem::size_of::(), 252 | false, 253 | associated_data, 254 | )) 255 | } 256 | 257 | pub fn from_ref_mut(d: &mut D, associated_data: T) -> Option { 258 | Some(Self::new( 259 | as_phys_addr(x64::VirtAddr::from_ptr(d))?, 260 | mem::size_of::(), 261 | true, 262 | associated_data, 263 | )) 264 | } 265 | 266 | pub fn from_bytes(bytes: &[u8], associated_data: T) -> Option { 267 | Some(Self::new( 268 | as_phys_addr(x64::VirtAddr::from_ptr(&bytes[0]))?, 269 | bytes.len(), 270 | false, 271 | associated_data, 272 | )) 273 | } 274 | 275 | pub fn from_bytes_mut(bytes: &mut [u8], associated_data: T) -> Option { 276 | Some(Self::new( 277 | as_phys_addr(x64::VirtAddr::from_ptr(&mut bytes[0]))?, 278 | bytes.len(), 279 | true, 280 | associated_data, 281 | )) 282 | } 283 | } 284 | 285 | // See VirtIO specification 286 | #[repr(C)] 287 | struct Descriptor { 288 | addr: u64, // guest-physical address 289 | len: u32, // length 290 | flags: u16, 291 | next: u16, // the buffers can be chained via `next` 292 | } 293 | 294 | impl Descriptor { 295 | fn refer(&mut self, addr: x64::PhysAddr, len: usize, write: bool) { 296 | self.addr = addr.as_u64(); 297 | self.len = len as u32; 298 | if write { 299 | self.flags |= Self::WRITE; 300 | } else { 301 | self.flags &= !Self::WRITE; 302 | } 303 | } 304 | 305 | fn next(&self) -> Option { 306 | if (self.flags & Self::NEXT) != 0 { 307 | Some(self.next) 308 | } else { 309 | None 310 | } 311 | } 312 | 313 | fn set_next(&mut self, next: Option) { 314 | match next { 315 | Some(next) => { 316 | self.flags |= Self::NEXT; 317 | self.next = next; 318 | } 319 | None => { 320 | self.flags &= !Self::NEXT; 321 | self.next = 0; 322 | } 323 | } 324 | } 325 | 326 | const NEXT: u16 = 1; // continuing via the next field 327 | const WRITE: u16 = 2; // device write-only (vs device read-only) 328 | } 329 | 330 | // driver write-only 331 | #[repr(C)] 332 | struct AvailableRing { 333 | _flags: u16, // This can be used to supress Used Buffer Notification (interrupt) 334 | idx: u16, 335 | ring: [u16; 0], 336 | } 337 | 338 | // driver read-only 339 | #[repr(C)] 340 | struct UsedRing { 341 | _flags: u16, // This can be used by device to supress Available Buffer Notification 342 | idx: u16, 343 | ring: [UsedElem; 0], 344 | // used_event: u16, 345 | } 346 | 347 | #[repr(C)] 348 | struct UsedElem { 349 | idx: u32, 350 | _len: u32, // Length of the Descriptor-chain. This value is unreliable in legacy interface. 351 | } 352 | -------------------------------------------------------------------------------- /ors-kernel/src/devices/pci.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use crate::x64; 4 | use bit_field::BitField; 5 | use core::ptr; 6 | use derive_new::new; 7 | use heapless::Vec; 8 | use log::trace; 9 | use spin::Once; 10 | 11 | static DEVICES: Once> = Once::new(); 12 | 13 | pub fn initialize_devices() { 14 | DEVICES.call_once(|| { 15 | trace!("INITIALIZING PCI devices"); 16 | unsafe { Device::scan::<32>() }.unwrap() 17 | }); 18 | } 19 | 20 | pub fn devices() -> &'static Vec { 21 | DEVICES 22 | .get() 23 | .expect("pci::devices is called before pci::initialize_devices") 24 | } 25 | 26 | // https://wiki.osdev.org/PCI 27 | // https://www.intel.com/content/www/us/en/developer/articles/technical/intel-sdm.html 28 | 29 | static mut CONFIG_ADDRESS: x64::PortWriteOnly = x64::PortWriteOnly::new(0x0cf8); 30 | static mut CONFIG_DATA: x64::Port = x64::Port::new(0x0cfc); 31 | 32 | #[derive(Debug, Clone, Copy)] 33 | struct ConfigAddress(u32); 34 | 35 | impl ConfigAddress { 36 | fn new(bus: u8, device: u8, function: u8, reg: u8) -> Self { 37 | let mut value = 0; 38 | value.set_bits(0..8, reg as u32); 39 | value.set_bits(8..11, function as u32); 40 | value.set_bits(11..16, device as u32); 41 | value.set_bits(16..24, bus as u32); 42 | value.set_bit(31, true); 43 | Self(value) 44 | } 45 | 46 | unsafe fn write(self) { 47 | CONFIG_ADDRESS.write(self.0) 48 | } 49 | } 50 | 51 | #[derive(Debug, Clone, Copy)] 52 | struct ConfigData(u32); 53 | 54 | impl ConfigData { 55 | unsafe fn read() -> Self { 56 | ConfigData(CONFIG_DATA.read()) 57 | } 58 | 59 | unsafe fn write(self) { 60 | CONFIG_DATA.write(self.0) 61 | } 62 | } 63 | 64 | #[derive(Debug, Clone, Copy, new)] 65 | pub struct Device { 66 | pub bus: u8, 67 | pub device: u8, 68 | pub function: u8, 69 | } 70 | 71 | #[derive(Debug, Clone)] 72 | pub enum ScanError { 73 | Full, 74 | } 75 | 76 | impl Device { 77 | unsafe fn read(self, addr: u8) -> u32 { 78 | ConfigAddress::new(self.bus, self.device, self.function, addr).write(); 79 | ConfigData::read().0 80 | } 81 | 82 | unsafe fn write(self, addr: u8, value: u32) { 83 | ConfigAddress::new(self.bus, self.device, self.function, addr).write(); 84 | ConfigData(value).write(); 85 | } 86 | 87 | pub unsafe fn vendor_id(self) -> u16 { 88 | self.read(0x00) as u16 89 | } 90 | 91 | pub unsafe fn is_vendor_intel(self) -> bool { 92 | self.vendor_id() == 0x8086 93 | } 94 | 95 | pub unsafe fn device_id(self) -> u16 { 96 | (self.read(0x00) >> 16) as u16 97 | } 98 | 99 | pub unsafe fn is_virtio(self) -> bool { 100 | // NOTE: Should this be named is_transitional_virtio? 101 | let vendor_id = self.vendor_id(); 102 | let device_id = self.device_id(); 103 | vendor_id == 0x1af4 && 0x1000 <= device_id && device_id <= 0x103f 104 | } 105 | 106 | pub unsafe fn command(self) -> u16 { 107 | self.read(0x04) as u16 108 | } 109 | 110 | pub unsafe fn status(self) -> u16 { 111 | (self.read(0x04) >> 16) as u16 112 | } 113 | 114 | pub unsafe fn device_type(self) -> DeviceType { 115 | let data = self.read(0x08); 116 | DeviceType::new((data >> 24) as u8, (data >> 16) as u8, (data >> 8) as u8) 117 | } 118 | 119 | pub unsafe fn header_type(self) -> u8 { 120 | let data = self.read(0x0c); 121 | (data >> 16) as u8 & 0x7f 122 | } 123 | 124 | pub unsafe fn is_single_function(self) -> bool { 125 | let data = self.read(0x0c); 126 | (data & (0x80 << 16)) == 0 127 | } 128 | 129 | // BIST 130 | 131 | pub unsafe fn num_bars(self) -> u8 { 132 | match self.header_type() { 133 | 0x00 => 6, 134 | 0x01 => 2, 135 | _ => 0, 136 | } 137 | } 138 | 139 | pub unsafe fn read_bar(self, index: u8) -> Bar { 140 | assert!(index < self.num_bars()); 141 | 142 | // https://wiki.osdev.org/PCI#Base_Address_Registers 143 | let bar = self.read(base_address_register_address(index)); 144 | if (bar & 0x1) != 0 { 145 | let bar = (bar & !0x3) as u16; 146 | Bar::IoPort(bar) 147 | } else { 148 | if (bar & 0x4) != 0 { 149 | let bar_lower = (bar as u64) & !0xf; 150 | let bar_upper = self.read(base_address_register_address(index + 1)); 151 | let bar_upper = (bar_upper as u64) << 32; 152 | Bar::MemoryAddress(bar_lower | bar_upper) 153 | } else { 154 | let bar = (bar as u64) & !0xf; 155 | Bar::MemoryAddress(bar) 156 | } 157 | } 158 | } 159 | 160 | pub unsafe fn bus_numbers(self) -> (u8, u8) { 161 | assert!(self.device_type().is_standard_pci_to_pci_bridge()); 162 | let data = self.read(0x18); 163 | (data as u8, (data >> 8) as u8) // (primary, secondary) 164 | } 165 | 166 | pub unsafe fn subsystem_vendor_id(self) -> u16 { 167 | assert_eq!(self.header_type(), 0x00); 168 | self.read(0x2C) as u16 169 | } 170 | 171 | pub unsafe fn subsystem_id(self) -> u16 { 172 | assert_eq!(self.header_type(), 0x00); 173 | (self.read(0x2C) >> 16) as u16 174 | } 175 | 176 | pub unsafe fn capability_pointer(self) -> Option { 177 | if matches!(self.header_type(), 0x00 | 0x01) && (self.status() & 0x16) != 0 { 178 | Some(self.read(0x34) as u8) 179 | } else { 180 | None 181 | } 182 | } 183 | 184 | pub unsafe fn capabilities(self) -> Capabilities { 185 | Capabilities::new(self, 0) 186 | } 187 | 188 | pub unsafe fn msi_x(self) -> Option { 189 | self.capabilities().find_map(|c| c.msi_x()) 190 | } 191 | 192 | pub unsafe fn interrupt_line(self) -> u8 { 193 | self.read(0x3C) as u8 194 | } 195 | 196 | pub unsafe fn interrupt_pin(self) -> u8 { 197 | (self.read(0x3C) >> 8) as u8 198 | } 199 | 200 | pub unsafe fn scan() -> Result, ScanError> { 201 | let mut devices = Vec::new(); 202 | 203 | // Checks whether the host bridge (bus=0, device=0) is a multifunction device 204 | if Self::new(0, 0, 0).is_single_function() { 205 | Self::scan_bus(0, &mut devices)?; 206 | } else { 207 | // Each host bridge with function=N is responsible for bus=N 208 | for function in 0..8 { 209 | if Self::new(0, 0, function).vendor_id() != 0xffff { 210 | Self::scan_bus(function, &mut devices)?; 211 | } 212 | } 213 | } 214 | Ok(devices) 215 | } 216 | 217 | unsafe fn scan_bus(bus: u8, dest: &mut Vec) -> Result<(), ScanError> { 218 | for device in 0..32 { 219 | if Self::new(bus, device, 0).vendor_id() != 0xffff { 220 | Self::scan_device(bus, device, dest)?; 221 | } 222 | } 223 | Ok(()) 224 | } 225 | 226 | unsafe fn scan_device( 227 | bus: u8, 228 | device: u8, 229 | dest: &mut Vec, 230 | ) -> Result<(), ScanError> { 231 | Self::scan_function(bus, device, 0, dest)?; 232 | if !Self::new(bus, device, 0).is_single_function() { 233 | for function in 1..8 { 234 | if Self::new(bus, device, function).vendor_id() != 0xffff { 235 | Self::scan_function(bus, device, function, dest)?; 236 | } 237 | } 238 | } 239 | Ok(()) 240 | } 241 | 242 | unsafe fn scan_function( 243 | bus: u8, 244 | device: u8, 245 | function: u8, 246 | dest: &mut Vec, 247 | ) -> Result<(), ScanError> { 248 | let d = Self::new(bus, device, function); 249 | dest.push(d).map_err(|_| ScanError::Full)?; 250 | 251 | if d.device_type().is_standard_pci_to_pci_bridge() { 252 | let (_, secondary_bus) = d.bus_numbers(); 253 | Self::scan_bus(secondary_bus, dest)?; 254 | } 255 | 256 | Ok(()) 257 | } 258 | } 259 | 260 | #[derive(PartialEq, Eq, Debug, Clone)] 261 | pub enum Bar { 262 | MemoryAddress(u64), 263 | IoPort(u16), 264 | } 265 | 266 | impl Bar { 267 | pub fn mmio_base(self) -> Option { 268 | match self { 269 | Bar::MemoryAddress(addr) => Some(addr as usize), 270 | Bar::IoPort(_) => None, 271 | } 272 | } 273 | 274 | pub fn io_port(self) -> Option { 275 | match self { 276 | Bar::MemoryAddress(_) => None, 277 | Bar::IoPort(port) => Some(port), 278 | } 279 | } 280 | } 281 | 282 | #[derive(Debug, Clone, Copy, new)] 283 | pub struct DeviceType { 284 | pub class_code: u8, 285 | pub subclass: u8, 286 | pub prog_interface: u8, 287 | } 288 | 289 | impl DeviceType { 290 | pub fn is_standard_pci_to_pci_bridge(self) -> bool { 291 | self.class_code == 0x06 && self.subclass == 0x04 292 | } 293 | 294 | pub fn is_xhci(self) -> bool { 295 | self.class_code == 0x0c && self.subclass == 0x03 && self.prog_interface == 0x30 296 | } 297 | } 298 | 299 | fn base_address_register_address(index: u8) -> u8 { 300 | assert!(index < 6); 301 | 0x10 + 4 * index 302 | } 303 | 304 | #[derive(Debug, Clone, Copy, new)] 305 | pub struct Capabilities { 306 | device: Device, 307 | pointer: u8, 308 | } 309 | 310 | impl Iterator for Capabilities { 311 | type Item = Capability; 312 | 313 | fn next(&mut self) -> Option { 314 | let p = if self.pointer == 0 { 315 | unsafe { self.device.capability_pointer() }? 316 | } else { 317 | unsafe { Capability::new(self.device, self.pointer).next_capability_pointer() }? 318 | }; 319 | self.pointer = p; 320 | Some(Capability::new(self.device, p)) 321 | } 322 | } 323 | 324 | #[derive(Debug, Clone, Copy, new)] 325 | pub struct Capability { 326 | device: Device, 327 | pointer: u8, 328 | } 329 | 330 | impl Capability { 331 | pub unsafe fn id(self) -> u8 { 332 | self.device.read(self.pointer) as u8 333 | } 334 | 335 | pub unsafe fn is_msi_x(self) -> bool { 336 | self.id() == 0x11 337 | } 338 | 339 | pub unsafe fn is_vendor_specific(self) -> bool { 340 | self.id() == 0x09 341 | } 342 | 343 | pub unsafe fn msi_x(self) -> Option { 344 | if self.is_msi_x() { 345 | Some(MsiX::new(self.device, self.pointer)) 346 | } else { 347 | None 348 | } 349 | } 350 | 351 | pub unsafe fn next_capability_pointer(self) -> Option { 352 | match (self.device.read(self.pointer) >> 8) as u8 { 353 | 0 => None, 354 | p => Some(p), 355 | } 356 | } 357 | } 358 | 359 | #[derive(Debug, Clone, Copy, new)] 360 | pub struct MsiX { 361 | device: Device, 362 | pointer: u8, 363 | } 364 | 365 | impl MsiX { 366 | unsafe fn message_control(self) -> u16 { 367 | (self.device.read(self.pointer) >> 16) as u16 368 | } 369 | 370 | pub unsafe fn is_enabled(self) -> bool { 371 | (self.device.read(self.pointer) & (1 << 31)) != 0 372 | } 373 | 374 | pub unsafe fn enable(self) { 375 | let value = self.device.read(self.pointer) | (1 << 31); 376 | self.device.write(self.pointer, value) 377 | } 378 | 379 | pub unsafe fn table_size(self) -> usize { 380 | (self.message_control() & 0x7ff) as usize + 1 381 | } 382 | 383 | /// Table BAR Indicator 384 | unsafe fn table_bir(self) -> u8 { 385 | self.device.read(self.pointer + 0x04) as u8 386 | } 387 | 388 | unsafe fn table_offset(self) -> u32 { 389 | self.device.read(self.pointer + 0x04) >> 8 390 | } 391 | 392 | unsafe fn table_bar(self) -> Bar { 393 | self.device.read_bar(self.table_bir()) 394 | } 395 | 396 | pub unsafe fn table(self) -> MsiXTable { 397 | let addr = self.table_bar().mmio_base().unwrap() + self.table_offset() as usize; 398 | MsiXTable { 399 | ptr: addr as *mut u32, 400 | len: self.table_size(), 401 | } 402 | } 403 | 404 | /// Pending Bit Array BAR Indicator 405 | pub unsafe fn pba_bir(self) -> u8 { 406 | self.device.read(self.pointer + 0x08) as u8 407 | } 408 | 409 | pub unsafe fn pba_offset(self) -> u32 { 410 | self.device.read(self.pointer + 0x08) >> 8 411 | } 412 | } 413 | 414 | #[derive(Debug, Clone, Copy)] 415 | pub struct MsiXTable { 416 | ptr: *mut u32, 417 | len: usize, 418 | } 419 | 420 | impl MsiXTable { 421 | pub fn len(self) -> usize { 422 | self.len 423 | } 424 | 425 | pub fn entry(self, index: usize) -> MsiXTableEntry { 426 | assert!(index < self.len()); 427 | MsiXTableEntry { 428 | ptr: unsafe { self.ptr.add(4 * index) }, 429 | } 430 | } 431 | 432 | pub fn entries(self) -> impl Iterator { 433 | (0..self.len()).map(move |i| self.entry(i)) 434 | } 435 | } 436 | 437 | #[derive(Debug, Clone, Copy)] 438 | pub struct MsiXTableEntry { 439 | ptr: *mut u32, 440 | } 441 | 442 | impl MsiXTableEntry { 443 | pub unsafe fn enable(self, lapic_id: u32, vector: u32) { 444 | assert!(lapic_id < 256); 445 | assert!(32 <= vector && vector <= 254); 446 | 447 | const ADDRESS_SUFFIX: u32 = 0xfee << 20; 448 | let reserved_bits = self.message_address() & 0xff0; 449 | self.set_message_address((lapic_id << 12) | ADDRESS_SUFFIX | reserved_bits); // TODO: Redirection Hint | Destination Mode (See Intel SDM) 450 | const LEVEL: u32 = 1 << 15; // Level-triggered (vs edge-) 451 | let reserved_bits = self.message_data() & 0xffff3800; 452 | self.set_message_data(vector | LEVEL | reserved_bits); // TODO: Delivery Mode (See Intel SDM) 453 | let reserved_bits = self.vector_control() & !1; // unmask 454 | self.set_vector_control(reserved_bits); 455 | } 456 | 457 | pub unsafe fn disable(self) { 458 | let value = self.vector_control() | 1; // mask 459 | self.set_vector_control(value); 460 | } 461 | 462 | unsafe fn message_address(self) -> u32 { 463 | // NOTE: It seems upper 32bits of Message address are not used in x86_64 464 | ptr::read_volatile(self.ptr) 465 | } 466 | 467 | unsafe fn set_message_address(self, value: u32) { 468 | ptr::write_volatile(self.ptr, value) 469 | } 470 | 471 | unsafe fn message_data(self) -> u32 { 472 | ptr::read_volatile(self.ptr.add(2)) 473 | } 474 | 475 | unsafe fn set_message_data(self, value: u32) { 476 | ptr::write_volatile(self.ptr.add(2), value) 477 | } 478 | 479 | unsafe fn vector_control(self) -> u32 { 480 | ptr::read_volatile(self.ptr.add(3)) 481 | } 482 | 483 | unsafe fn set_vector_control(self, value: u32) { 484 | ptr::write_volatile(self.ptr.add(3), value) 485 | } 486 | } 487 | --------------------------------------------------------------------------------