├── .arcconfig ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── abi ├── .gitignore ├── Cargo.lock ├── Cargo.toml └── src │ ├── caddr.rs │ └── lib.rs ├── default.nix ├── kernel ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Makefile ├── README.md └── src │ ├── arch │ └── x86_64 │ │ ├── addr.rs │ │ ├── cap │ │ ├── mod.rs │ │ └── paging │ │ │ ├── mod.rs │ │ │ ├── page.rs │ │ │ └── pml4.rs │ │ ├── debug.rs │ │ ├── init │ │ ├── interrupt.rs │ │ ├── mod.rs │ │ ├── multiboot.rs │ │ ├── paging.rs │ │ └── segmentation.rs │ │ ├── interrupt │ │ ├── apic.rs │ │ ├── bit_field.rs │ │ ├── dtables.rs │ │ ├── idt.rs │ │ ├── mod.rs │ │ ├── pic.rs │ │ └── switch.rs │ │ ├── linker.ld │ │ ├── mod.rs │ │ ├── paging │ │ ├── macros.rs │ │ ├── mod.rs │ │ ├── table.rs │ │ └── with.rs │ │ ├── segmentation │ │ ├── mod.rs │ │ └── tss.rs │ │ └── start.S │ ├── cap │ ├── channel.rs │ ├── cpool.rs │ ├── mod.rs │ ├── task.rs │ └── untyped.rs │ ├── common │ ├── mod.rs │ └── traits.rs │ ├── elf │ ├── loader.rs │ └── mod.rs │ ├── lib.rs │ ├── logging.rs │ ├── macros.rs │ ├── system_calls.rs │ ├── unwind.rs │ └── util │ ├── field_offset.rs │ ├── guard.rs │ ├── managed_arc │ ├── mod.rs │ ├── rwlock.rs │ └── weak_pool.rs │ ├── mod.rs │ ├── object.rs │ └── streamer.rs ├── lazy_static ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── appveyor.yml ├── src │ ├── core_lazy.rs │ ├── lazy.rs │ ├── lib.rs │ └── nightly_lazy.rs └── tests │ ├── no_std.rs │ └── test.rs ├── nix └── rust-nightly.nix ├── rinit ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Makefile └── src │ ├── lib.rs │ └── vga_buffer.rs ├── selfalloc ├── .gitignore ├── Cargo.toml └── src │ └── lib.rs ├── spin ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── examples │ └── debug.rs ├── script │ └── doc-upload.cfg └── src │ ├── lib.rs │ ├── mutex.rs │ ├── once.rs │ ├── rw_lock.rs │ └── util.rs ├── system ├── .gitignore ├── Cargo.lock ├── Cargo.toml └── src │ ├── call.rs │ ├── lib.rs │ └── unwind.rs ├── tests ├── run.sh └── userspace │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ ├── Makefile │ └── examples │ └── allocator.rs ├── userspace.mk └── x86_64.json /.arcconfig: -------------------------------------------------------------------------------- 1 | { 2 | "phabricator.uri" : "https://source.that.world/" 3 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Build directory 14 | /build/ 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2017 Wei Tang 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | kernel := kernel/build/$(ARCH)/libkernel.bin 2 | rinit := rinit/build/$(ARCH)/librinit.bin 3 | 4 | .PHONY: all clean run run-release rinit rinit-release kernel kernel-release doc-kernel doc-kernel-deploy 5 | 6 | kernel: 7 | @make -C kernel build 8 | 9 | kernel-release: 10 | @make -C kernel version=release build 11 | 12 | rinit: 13 | @make -C rinit build 14 | 15 | rinit-release: 16 | @make -C rinit version=release build 17 | 18 | run: kernel rinit 19 | @qemu-system-$(ARCH) -kernel $(kernel) -initrd $(rinit) -serial stdio --no-reboot 20 | 21 | run-release: kernel-release rinit-release 22 | @qemu-system-$(ARCH) -kernel $(kernel) -initrd $(rinit) -serial stdio --no-reboot 23 | 24 | debug: kernel rinit 25 | @qemu-system-$(ARCH) -d int -no-reboot -s -S -kernel $(kernel) -initrd $(rinit) -serial stdio 26 | 27 | noreboot: kernel rinit 28 | @qemu-system-$(ARCH) -d int -no-reboot -kernel $(kernel) -initrd $(rinit) -serial stdio 29 | 30 | noreboot-release: kernel-release rinit-release 31 | @qemu-system-$(ARCH) -d int -no-reboot -kernel $(kernel) -initrd $(rinit) -serial stdio 32 | 33 | test: kernel-release 34 | @make -C tests/userspace version=release kernel=$(shell realpath $(kernel)) test=allocator test 35 | 36 | gdb: 37 | @gdb $(kernel) -ex "target remote :1234" 38 | 39 | clean: 40 | @make -C kernel clean 41 | @make -C rinit clean 42 | @make -C tests/userspace kernel=none test=none clean 43 | 44 | doc-kernel: 45 | @rm -rf kernel/target/doc 46 | @cargo rustdoc --manifest-path kernel/Cargo.toml -- \ 47 | --no-defaults \ 48 | --passes strip-hidden \ 49 | --passes collapse-docs \ 50 | --passes unindent-comments \ 51 | --passes strip-priv-imports 52 | 53 | doc-kernel-deploy: doc-kernel 54 | @rsync -vraP --delete-after kernel/target/doc/ deploy@that.world:~/~docs/rux 55 | -------------------------------------------------------------------------------- /abi/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Build directory 14 | /build/ 15 | -------------------------------------------------------------------------------- /abi/Cargo.lock: -------------------------------------------------------------------------------- 1 | [root] 2 | name = "abi" 3 | version = "0.1.0" 4 | 5 | -------------------------------------------------------------------------------- /abi/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "abi" 3 | version = "0.1.0" 4 | authors = ["Wei Tang "] 5 | 6 | [features] 7 | default = [] 8 | kernel_debug = [] -------------------------------------------------------------------------------- /abi/src/caddr.rs: -------------------------------------------------------------------------------- 1 | use core::convert::From; 2 | use core::ops::Shl; 3 | 4 | #[derive(Debug, Clone, Copy)] 5 | pub struct CAddr(pub [u8; 8], pub usize); 6 | 7 | impl Shl for CAddr { 8 | type Output = CAddr; 9 | fn shl(self, rhs: usize) -> CAddr { 10 | assert!(rhs == 1); 11 | CAddr([self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], self.0[6], self.0[7], 0], 12 | self.1 - 1) 13 | } 14 | } 15 | 16 | impl From for CAddr { 17 | fn from(v: u8) -> CAddr { CAddr([v, 0, 0, 0, 0, 0, 0, 0], 1) } 18 | } 19 | 20 | impl From<[u8; 1]> for CAddr { 21 | fn from(v: [u8; 1]) -> CAddr { CAddr([v[0], 0, 0, 0, 0, 0, 0, 0], 1) } 22 | } 23 | 24 | impl From<[u8; 2]> for CAddr { 25 | fn from(v: [u8; 2]) -> CAddr { CAddr([v[0], v[1], 0, 0, 0, 0, 0, 0], 2) } 26 | } 27 | 28 | impl From<[u8; 3]> for CAddr { 29 | fn from(v: [u8; 3]) -> CAddr { CAddr([v[0], v[1], v[2], 0, 0, 0, 0, 0], 3) } 30 | } 31 | 32 | impl From<[u8; 4]> for CAddr { 33 | fn from(v: [u8; 4]) -> CAddr { CAddr([v[0], v[1], v[2], v[3], 0, 0, 0, 0], 4) } 34 | } 35 | 36 | impl From<[u8; 5]> for CAddr { 37 | fn from(v: [u8; 5]) -> CAddr { CAddr([v[0], v[1], v[2], v[3], v[4], 0, 0, 0], 5) } 38 | } 39 | 40 | impl From<[u8; 6]> for CAddr { 41 | fn from(v: [u8; 6]) -> CAddr { CAddr([v[0], v[1], v[2], v[3], v[4], v[5], 0, 0], 6) } 42 | } 43 | 44 | impl From<[u8; 7]> for CAddr { 45 | fn from(v: [u8; 7]) -> CAddr { CAddr([v[0], v[1], v[2], v[3], v[4], v[5], v[6], 0], 7) } 46 | } 47 | 48 | impl From<[u8; 8]> for CAddr { 49 | fn from(v: [u8; 8]) -> CAddr { CAddr([v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]], 8) } 50 | } 51 | -------------------------------------------------------------------------------- /abi/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(lang_items)] 2 | #![feature(asm)] 3 | #![no_std] 4 | 5 | mod caddr; 6 | 7 | pub use caddr::CAddr; 8 | 9 | /// A trait that allows setting a struct back to its default value. 10 | pub trait SetDefault { 11 | /// Set this struct back to its default value. 12 | fn set_default(&mut self); 13 | } 14 | 15 | #[derive(Debug)] 16 | pub struct CapSystemCall<'a> { 17 | pub target: &'a [u8], 18 | pub message: CapSendMessage 19 | } 20 | 21 | #[derive(Debug, Clone, Copy)] 22 | pub enum CapSendMessage { 23 | TCBYield 24 | } 25 | 26 | #[derive(Debug, Clone)] 27 | pub enum SystemCall { 28 | #[cfg(feature="kernel_debug")] 29 | DebugCPoolList, 30 | #[cfg(feature="kernel_debug")] 31 | DebugTestSucceed, 32 | #[cfg(feature="kernel_debug")] 33 | DebugTestFail, 34 | Print { 35 | request: ([u8; 32], usize) 36 | }, 37 | RetypeRawPageFree { 38 | request: CAddr, 39 | response: Option, 40 | }, 41 | MapRawPageFree { 42 | untyped: CAddr, 43 | toplevel_table: CAddr, 44 | request: (usize, CAddr), 45 | }, 46 | RetypeCPool { 47 | request: (CAddr, CAddr), 48 | }, 49 | ChannelTake { 50 | request: CAddr, 51 | response: Option, 52 | }, 53 | ChannelPut { 54 | request: (CAddr, ChannelMessage), 55 | }, 56 | RetypeTask { 57 | request: (CAddr, CAddr), 58 | }, 59 | TaskSetInstructionPointer { 60 | request: (CAddr, u64), 61 | }, 62 | TaskSetStackPointer { 63 | request: (CAddr, u64), 64 | }, 65 | TaskSetCPool { 66 | request: (CAddr, CAddr), 67 | }, 68 | TaskSetTopPageTable { 69 | request: (CAddr, CAddr), 70 | }, 71 | TaskSetBuffer { 72 | request: (CAddr, CAddr), 73 | }, 74 | TaskSetActive { 75 | request: CAddr 76 | }, 77 | TaskSetInactive { 78 | request: CAddr 79 | }, 80 | } 81 | 82 | /// Represents a task buffer used for system calls. 83 | pub struct TaskBuffer { 84 | pub call: Option, 85 | pub payload_length: usize, 86 | pub payload_data: [u8; 1024], 87 | } 88 | 89 | impl SetDefault for TaskBuffer { 90 | fn set_default(&mut self) { 91 | self.call = None; 92 | } 93 | } 94 | 95 | #[derive(Debug, Clone)] 96 | pub enum ChannelMessage { 97 | Raw(u64), 98 | Cap(Option), 99 | Payload, 100 | } 101 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? ( 2 | let 3 | nixpkgs = import ; 4 | pkgs_ = (nixpkgs {}); 5 | rustOverlay = (pkgs_.fetchFromGitHub { 6 | owner = "mozilla"; 7 | repo = "nixpkgs-mozilla"; 8 | rev = "d7ba4e48037c0f944d01d7902fcdc8fa0766df24"; 9 | sha256 = "033zk1pfnwh0ryrm1yzl9ybgqyhypgdxv1249a8z7cdy1rvb9zz4"; 10 | }); 11 | in (nixpkgs { 12 | overlays = [ 13 | (import (builtins.toPath "${rustOverlay}/rust-overlay.nix")) 14 | (self: super: 15 | with super; 16 | let nightly = lib.rustLib.fromManifest (lib.rustLib.manifest_v2_url { 17 | channel = "nightly"; 18 | date = "2018-05-17"; 19 | }) { 20 | inherit (self) stdenv fetchurl patchelf; 21 | }; 22 | rustc_ = nightly.rustc; 23 | cargo_ = nightly.cargo; 24 | rust-src_ = nightly.rust-src; 25 | rust_ = nightly.rust; 26 | in { 27 | rust = { 28 | rustc = rustc_; 29 | cargo = cargo_; 30 | rust-src = rust-src_; 31 | rust = rust_; 32 | }; 33 | }) 34 | ]; 35 | })) 36 | }: 37 | 38 | with pkgs; 39 | 40 | let 41 | 42 | x86_64-target-spec = stdenv.mkDerivation { 43 | name = "target-spec"; 44 | src = ./x86_64.json; 45 | phases = [ "buildPhase" ]; 46 | buildPhase = '' 47 | mkdir -p $out 48 | cp $src $out/x86_64.json 49 | ''; 50 | }; 51 | 52 | libcore = stdenv.mkDerivation { 53 | name = "libcore"; 54 | buildInputs = [ 55 | rust.rustc 56 | ]; 57 | phases = [ "buildPhase" ]; 58 | buildPhase = '' 59 | mkdir -p $out 60 | rustc --target=${x86_64-target-spec}/x86_64.json --out-dir=$out --crate-name=core --crate-type=lib ${rust.rust-src}/lib/rustlib/src/rust/src/libcore/lib.rs 61 | ''; 62 | }; 63 | 64 | libcompiler_builtins = stdenv.mkDerivation { 65 | name = "libcompiler_builtins"; 66 | buildInputs = [ 67 | rust.rustc 68 | ]; 69 | phases = [ "buildPhase" ]; 70 | buildPhase = '' 71 | mkdir -p $out 72 | rustc -L ${libcore} --cfg 'feature="compiler-builtins"' --target=${x86_64-target-spec}/x86_64.json --out-dir=$out --crate-name=compiler_builtins --crate-type=lib ${rust.rust-src}/lib/rustlib/src/rust/src/libcompiler_builtins/src/lib.rs 73 | ''; 74 | }; 75 | 76 | liballoc = stdenv.mkDerivation { 77 | name = "liballoc"; 78 | buildInputs = [ 79 | rust.rustc 80 | ]; 81 | phases = [ "buildPhase" ]; 82 | buildPhase = '' 83 | mkdir -p $out 84 | rustc -L ${libcore} -L ${libcompiler_builtins} --target=${x86_64-target-spec}/x86_64.json --out-dir=$out --crate-name=alloc --crate-type=lib ${rust.rust-src}/lib/rustlib/src/rust/src/liballoc/lib.rs 85 | ''; 86 | }; 87 | 88 | triple = "x86_64-none-elf"; 89 | 90 | userspace-linker = stdenv.mkDerivation { 91 | name = "userspace-linker"; 92 | phases = [ "buildPhase" ]; 93 | buildPhase = '' 94 | mkdir -p $out 95 | cat < $out/linker.ld 96 | ENTRY(start) 97 | OUTPUT_FORMAT(elf64-x86-64) 98 | EOT 99 | ''; 100 | }; 101 | 102 | in stdenv.mkDerivation { 103 | name = "rux-env"; 104 | buildInputs = [ 105 | gnumake 106 | (binutils-unwrapped.override { targetPlatform = { config = triple; isiOS = false; isAarch64 = false; }; }) 107 | qemu 108 | file 109 | gdb 110 | rust.rust 111 | rust.cargo 112 | curl 113 | ]; 114 | 115 | ARCH = "x86_64"; 116 | RUST_SRC = "${rust.rust-src}"; 117 | TARGET_SPEC = "${x86_64-target-spec}/x86_64.json"; 118 | USERSPACE_LINKER = "${userspace-linker}/linker.ld"; 119 | 120 | LIBCORE = "${libcore}"; 121 | LIBCOMPILER_BUILTINS = "${libcompiler_builtins}"; 122 | LIBALLOC = "${liballoc}"; 123 | 124 | LD = "${triple}-ld"; 125 | AS = "${triple}-as"; 126 | OBJDUMP = "${triple}-objdump"; 127 | OBJCOPY = "${triple}-objcopy"; 128 | } 129 | -------------------------------------------------------------------------------- /kernel/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Build directory 14 | /build/ 15 | -------------------------------------------------------------------------------- /kernel/Cargo.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | name = "abi" 3 | version = "0.1.0" 4 | 5 | [[package]] 6 | name = "bitflags" 7 | version = "0.8.2" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | 10 | [[package]] 11 | name = "kernel" 12 | version = "0.0.1" 13 | dependencies = [ 14 | "abi 0.1.0", 15 | "bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", 16 | "lazy_static 0.2.9", 17 | "rlibc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", 18 | "spin 0.4.4", 19 | ] 20 | 21 | [[package]] 22 | name = "lazy_static" 23 | version = "0.2.9" 24 | dependencies = [ 25 | "spin 0.4.4", 26 | ] 27 | 28 | [[package]] 29 | name = "rlibc" 30 | version = "1.0.0" 31 | source = "registry+https://github.com/rust-lang/crates.io-index" 32 | 33 | [[package]] 34 | name = "spin" 35 | version = "0.4.4" 36 | 37 | [metadata] 38 | "checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4" 39 | "checksum rlibc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc874b127765f014d792f16763a81245ab80500e2ad921ed4ee9e82481ee08fe" 40 | -------------------------------------------------------------------------------- /kernel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kernel" 3 | version = "0.0.1" 4 | authors = ["Wei Tang "] 5 | 6 | [lib] 7 | crate-type = ["staticlib"] 8 | 9 | [dependencies.abi] 10 | path = "../abi" 11 | 12 | [dependencies.lazy_static] 13 | version = "0.2" 14 | path = "../lazy_static" 15 | features = ["spin_no_std"] 16 | 17 | [dependencies.bitflags] 18 | version = "0.8" 19 | 20 | [dependencies.spin] 21 | path = "../spin" 22 | version = "0.4" 23 | 24 | [dependencies.rlibc] 25 | version = "1.0" 26 | 27 | [features] 28 | default = ["kernel_debug"] 29 | kernel_debug = ["abi/kernel_debug"] -------------------------------------------------------------------------------- /kernel/Makefile: -------------------------------------------------------------------------------- 1 | version ?= debug 2 | kernel := build/$(ARCH)/libkernel.bin 3 | rust_os := target/$(ARCH)/$(version)/libkernel.a 4 | 5 | linker_script := src/arch/$(ARCH)/linker.ld 6 | linker_flags := -T $(linker_script) 7 | linker_flags += -Map build/$(ARCH)/map.txt 8 | linker_flags += --gc-sections 9 | linker_flags += -z max-page-size=0x1000 10 | 11 | assembly_source_files := $(wildcard src/arch/$(ARCH)/*.S) 12 | assembly_object_files := $(patsubst src/arch/$(ARCH)/%.S, \ 13 | build/$(ARCH)/%.o, $(assembly_source_files)) 14 | 15 | .PHONY: all clean run cargo kernel 16 | 17 | all: $(kernel) 18 | 19 | clean: 20 | @rm -r build 21 | @rm -r target 22 | 23 | build: cargo $(rust_os) $(assembly_object_files) $(linker_script) 24 | @$(LD) $(linker_flags) -o $(kernel).elf64 $(assembly_object_files) $(rust_os) 25 | @$(OBJCOPY) $(kernel).elf64 -F elf32-i386 $(kernel) 26 | 27 | cargo: 28 | ifeq ($(version),release) 29 | @RUSTFLAGS="-L $(LIBCORE) -L $(LIBCOMPILER_BUILTINS)" cargo rustc --release --target $(TARGET_SPEC) 30 | else 31 | @RUSTFLAGS="-L $(LIBCORE) -L $(LIBCOMPILER_BUILTINS)" cargo rustc --target $(TARGET_SPEC) 32 | endif 33 | 34 | # compile assembly files 35 | build/$(ARCH)/%.o: src/arch/$(ARCH)/%.S 36 | @mkdir -p $(shell dirname $@) 37 | @$(AS) -o $@ $< 38 | -------------------------------------------------------------------------------- /kernel/README.md: -------------------------------------------------------------------------------- 1 | ## Improvements 2 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/addr.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::ops::{Add, AddAssign}; 3 | 4 | macro_rules! addr_common { 5 | ( $t:ty, $e:expr ) => { 6 | impl Add for $t { 7 | type Output = Self; 8 | 9 | fn add(self, _rhs: usize) -> Self { 10 | Self::from(self.into(): usize + _rhs) 11 | } 12 | } 13 | 14 | impl AddAssign for $t { 15 | fn add_assign(&mut self, _rhs: usize) { 16 | self.0 = self.0 + (_rhs as u64); 17 | } 18 | } 19 | 20 | impl fmt::Binary for $t { 21 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 22 | self.0.fmt(f) 23 | } 24 | } 25 | 26 | impl fmt::Display for $t { 27 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 28 | self.0.fmt(f) 29 | } 30 | } 31 | 32 | impl fmt::LowerHex for $t { 33 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 34 | self.0.fmt(f) 35 | } 36 | } 37 | 38 | impl fmt::Octal for $t { 39 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 40 | self.0.fmt(f) 41 | } 42 | } 43 | 44 | impl fmt::UpperHex for $t { 45 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 46 | self.0.fmt(f) 47 | } 48 | } 49 | 50 | impl From for $t { 51 | fn from(v: usize) -> Self { $e(v as u64) } 52 | } 53 | 54 | impl From for $t { 55 | fn from(v: u64) -> Self { $e(v as u64) } 56 | } 57 | 58 | impl From for $t { 59 | fn from(v: u32) -> Self { $e(v as u64) } 60 | } 61 | 62 | impl Into for $t { 63 | fn into(self) -> usize { self.0 as usize } 64 | } 65 | 66 | impl Into for $t { 67 | fn into(self) -> u64 { self.0 as u64 } 68 | } 69 | 70 | impl Into for $t { 71 | fn into(self) -> u32 { self.0 as u32 } 72 | } 73 | 74 | impl $t { 75 | pub const fn new(v: u64) -> $t { $e(v) } 76 | } 77 | } 78 | } 79 | 80 | /// Represent a physical memory address. 81 | #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] 82 | pub struct PAddr(u64); 83 | 84 | addr_common!(PAddr, PAddr); 85 | 86 | /// Represent a virtual (linear) memory address. 87 | #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] 88 | pub struct VAddr(u64); 89 | 90 | addr_common!(VAddr, VAddr); 91 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/cap/mod.rs: -------------------------------------------------------------------------------- 1 | macro_rules! doto_arch_any { 2 | ($any:expr, $f:tt $(,$param:expr)*) => { 3 | if $any.is::<::arch::cap::PML4Cap>() { 4 | $f ($any.into(): ::arch::cap::PML4Cap, $($param),*) 5 | } else if $any.is::<::arch::cap::PDPTCap>() { 6 | $f ($any.into(): ::arch::cap::PDPTCap, $($param),*) 7 | } else if $any.is::<::arch::cap::PDCap>() { 8 | $f ($any.into(): ::arch::cap::PDCap, $($param),*) 9 | } else if $any.is::<::arch::cap::PTCap>() { 10 | $f ($any.into(): ::arch::cap::PTCap, $($param),*) 11 | } else { 12 | panic!(); 13 | } 14 | } 15 | } 16 | 17 | /// Paging-related arch-specific capabilities. 18 | mod paging; 19 | 20 | pub use self::paging::{PML4Descriptor, PML4Cap, 21 | PDPTDescriptor, PDPTCap, 22 | PDDescriptor, PDCap, 23 | PTDescriptor, PTCap, 24 | PageDescriptor, PageCap, 25 | PAGE_LENGTH}; 26 | 27 | /// The top-level page table capability. In `x86_64`, this is PML4. 28 | pub type TopPageTableCap = PML4Cap; 29 | 30 | use common::*; 31 | use core::any::{TypeId}; 32 | use util::managed_arc::{ManagedArc, ManagedArcAny}; 33 | 34 | /// Create a managed Arc (capability) from an address of an 35 | /// architecture-specific kernel object. The `type_id` should be a 36 | /// [TypeId](https://doc.rust-lang.org/std/any/struct.TypeId.html) of 37 | /// an architecture-specific capability. If the `type_id` is not 38 | /// recognized, `None` is returned. 39 | /// 40 | /// # Safety 41 | /// 42 | /// `ptr` must be a physical address pointing to a valid kernel object 43 | /// of type `type_id`. 44 | pub unsafe fn upgrade_arch_any(ptr: PAddr, type_id: TypeId) -> Option { 45 | if type_id == TypeId::of::() { 46 | Some({ ManagedArc::from_ptr(ptr): PML4Cap }.into()) 47 | } else if type_id == TypeId::of::() { 48 | Some({ ManagedArc::from_ptr(ptr): PDPTCap }.into()) 49 | } else if type_id == TypeId::of::() { 50 | Some({ ManagedArc::from_ptr(ptr): PDCap }.into()) 51 | } else if type_id == TypeId::of::() { 52 | Some({ ManagedArc::from_ptr(ptr): PTCap }.into()) 53 | } else { 54 | None 55 | } 56 | } 57 | 58 | /// Drop an architecture-specific `any` capability. `ManagedArcAny` is 59 | /// not itself droppable. It must be converted to its real type before 60 | /// dropping. This function is used by `kernel::cap::drop_any`. 61 | pub fn drop_any(any: ManagedArcAny) { 62 | if any.is::() { 63 | any.into(): PML4Cap; 64 | } else if any.is::() { 65 | any.into(): PDPTCap; 66 | } else if any.is::() { 67 | any.into(): PDCap; 68 | } else if any.is::() { 69 | any.into(): PTCap; 70 | } else { 71 | panic!(); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/cap/paging/mod.rs: -------------------------------------------------------------------------------- 1 | mod page; 2 | mod pml4; 3 | 4 | use common::*; 5 | use arch::paging::{BASE_PAGE_LENGTH, 6 | PT, PTEntry, PT_P, PT_RW, PT_US, 7 | PD, PDEntry, PD_P, PD_RW, PD_US, 8 | PDPT, PDPTEntry, PDPT_P, PDPT_RW, PDPT_US}; 9 | use util::{MemoryObject, UniqueReadGuard, UniqueWriteGuard, RwLock}; 10 | use util::managed_arc::{ManagedArc, ManagedArcAny, ManagedWeakPool1Arc}; 11 | use core::marker::{PhantomData}; 12 | use core::any::{Any}; 13 | use cap::{UntypedDescriptor, SetDefault}; 14 | 15 | /// Page length used in current kernel. This is `BASE_PAGE_LENGTH` in x86_64. 16 | pub const PAGE_LENGTH: usize = BASE_PAGE_LENGTH; 17 | 18 | /// PML4 page table descriptor. 19 | pub struct PML4Descriptor { 20 | start_paddr: PAddr, 21 | #[allow(dead_code)] 22 | next: Option, 23 | } 24 | 25 | /// PML4 page table capability. 26 | pub type PML4Cap = ManagedArc>; 27 | 28 | 29 | /// PDPT page table descriptor. 30 | pub struct PDPTDescriptor { 31 | mapped_weak_pool: ManagedWeakPool1Arc, 32 | start_paddr: PAddr, 33 | #[allow(dead_code)] 34 | next: Option, 35 | } 36 | 37 | /// PDPT page table capability. 38 | pub type PDPTCap = ManagedArc>; 39 | 40 | 41 | /// PD page table descriptor. 42 | pub struct PDDescriptor { 43 | mapped_weak_pool: ManagedWeakPool1Arc, 44 | start_paddr: PAddr, 45 | #[allow(dead_code)] 46 | next: Option, 47 | } 48 | 49 | /// PD page table capability. 50 | pub type PDCap = ManagedArc>; 51 | 52 | 53 | /// PT page table descriptor. 54 | pub struct PTDescriptor { 55 | mapped_weak_pool: ManagedWeakPool1Arc, 56 | start_paddr: PAddr, 57 | #[allow(dead_code)] 58 | next: Option, 59 | } 60 | 61 | /// PT page table capability. 62 | pub type PTCap = ManagedArc>; 63 | 64 | /// Page descriptor. 65 | pub struct PageDescriptor { 66 | mapped_weak_pool: ManagedWeakPool1Arc, 67 | start_paddr: PAddr, 68 | #[allow(dead_code)] 69 | next: Option, 70 | _marker: PhantomData 71 | } 72 | 73 | /// Page capability. 74 | pub type PageCap = ManagedArc>>; 75 | 76 | macro_rules! paging_cap { 77 | ( $cap:ty, $desc:tt, $paging:ty, $entry:tt, $map_fn:ident, $sub_cap:ty, $access:expr ) => ( 78 | impl $cap { 79 | pub fn retype_from(untyped: &mut UntypedDescriptor) -> Self { 80 | let mut arc: Option = None; 81 | 82 | let start_paddr = unsafe { untyped.allocate(BASE_PAGE_LENGTH, BASE_PAGE_LENGTH) }; 83 | 84 | let mapped_weak_pool = unsafe { ManagedWeakPool1Arc::create( 85 | untyped.allocate(ManagedWeakPool1Arc::inner_length(), 86 | ManagedWeakPool1Arc::inner_alignment())) }; 87 | 88 | unsafe { 89 | untyped.derive(Self::inner_length(), Self::inner_alignment(), |paddr, next_child| { 90 | let mut desc = $desc { 91 | mapped_weak_pool: mapped_weak_pool, 92 | start_paddr: start_paddr, 93 | next: next_child, 94 | }; 95 | 96 | for item in desc.write().iter_mut() { 97 | *item = $entry::empty(); 98 | } 99 | 100 | arc = Some( 101 | Self::new(paddr, RwLock::new(desc)) 102 | ); 103 | 104 | arc.clone().unwrap().into() 105 | }); 106 | } 107 | 108 | arc.unwrap() 109 | } 110 | 111 | pub fn $map_fn(&mut self, index: usize, sub: &$sub_cap) { 112 | let mut current_desc = self.write(); 113 | let mut current = current_desc.write(); 114 | let sub_desc = sub.read(); 115 | assert!(!current[index].is_present()); 116 | 117 | sub_desc.mapped_weak_pool.read().downgrade_at(self, 0); 118 | current[index] = $entry::new(sub_desc.start_paddr(), $access); 119 | } 120 | } 121 | 122 | impl $desc { 123 | pub fn start_paddr(&self) -> PAddr { 124 | self.start_paddr 125 | } 126 | 127 | pub fn length(&self) -> usize { 128 | BASE_PAGE_LENGTH 129 | } 130 | 131 | fn page_object(&self) -> MemoryObject<$paging> { 132 | unsafe { MemoryObject::new(self.start_paddr) } 133 | } 134 | 135 | pub fn read(&self) -> UniqueReadGuard<$paging> { 136 | unsafe { UniqueReadGuard::new(self.page_object()) } 137 | } 138 | 139 | fn write(&mut self) -> UniqueWriteGuard<$paging> { 140 | unsafe { UniqueWriteGuard::new(self.page_object()) } 141 | } 142 | } 143 | ) 144 | } 145 | 146 | paging_cap!(PDPTCap, PDPTDescriptor, PDPT, PDPTEntry, map_pd, PDCap, PDPT_P | PDPT_RW | PDPT_US); 147 | paging_cap!(PDCap, PDDescriptor, PD, PDEntry, map_pt, PTCap, PD_P | PD_RW | PD_US); 148 | 149 | impl PTCap { 150 | pub fn retype_from(untyped: &mut UntypedDescriptor) -> Self { 151 | let mut arc: Option = None; 152 | 153 | let start_paddr = unsafe { untyped.allocate(BASE_PAGE_LENGTH, BASE_PAGE_LENGTH) }; 154 | 155 | let mapped_weak_pool = unsafe { ManagedWeakPool1Arc::create( 156 | untyped.allocate(ManagedWeakPool1Arc::inner_length(), 157 | ManagedWeakPool1Arc::inner_alignment())) }; 158 | 159 | unsafe { 160 | untyped.derive(Self::inner_length(), Self::inner_alignment(), |paddr, next_child| { 161 | let mut desc = PTDescriptor { 162 | mapped_weak_pool: mapped_weak_pool, 163 | start_paddr: start_paddr, 164 | next: next_child, 165 | }; 166 | 167 | for item in desc.write().iter_mut() { 168 | *item = PTEntry::empty(); 169 | } 170 | 171 | arc = Some( 172 | Self::new(paddr, RwLock::new(desc)) 173 | ); 174 | 175 | arc.clone().unwrap().into() 176 | }); 177 | } 178 | 179 | arc.unwrap() 180 | } 181 | 182 | pub fn map_page(&mut self, index: usize, sub: &PageCap) { 183 | let mut current_desc = self.write(); 184 | let mut current = current_desc.write(); 185 | let sub_desc = sub.read(); 186 | assert!(!current[index].is_present()); 187 | 188 | sub_desc.mapped_weak_pool.read().downgrade_at(self, 0); 189 | current[index] = PTEntry::new(sub_desc.start_paddr(), PT_P | PT_RW | PT_US); 190 | } 191 | } 192 | 193 | impl PTDescriptor { 194 | pub fn start_paddr(&self) -> PAddr { 195 | self.start_paddr 196 | } 197 | 198 | pub fn length(&self) -> usize { 199 | BASE_PAGE_LENGTH 200 | } 201 | 202 | fn page_object(&self) -> MemoryObject { 203 | unsafe { MemoryObject::new(self.start_paddr) } 204 | } 205 | 206 | pub fn read(&self) -> UniqueReadGuard { 207 | unsafe { UniqueReadGuard::new(self.page_object()) } 208 | } 209 | 210 | fn write(&mut self) -> UniqueWriteGuard { 211 | unsafe { UniqueWriteGuard::new(self.page_object()) } 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/cap/paging/page.rs: -------------------------------------------------------------------------------- 1 | use common::*; 2 | use arch::paging::{BASE_PAGE_LENGTH}; 3 | use util::{MemoryObject, UniqueReadGuard, UniqueWriteGuard, RwLock}; 4 | use util::managed_arc::{ManagedWeakPool1Arc}; 5 | use core::marker::{PhantomData}; 6 | use core::any::{Any}; 7 | use core::mem; 8 | use super::{PageDescriptor, PageCap, PAGE_LENGTH}; 9 | use cap::{UntypedDescriptor, SetDefault}; 10 | 11 | impl PageCap { 12 | pub fn retype_from(untyped: &mut UntypedDescriptor) -> Self { 13 | unsafe { Self::bootstrap(untyped.allocate(BASE_PAGE_LENGTH, BASE_PAGE_LENGTH), untyped) } 14 | } 15 | 16 | pub unsafe fn bootstrap(start_paddr: PAddr, untyped: &mut UntypedDescriptor) -> Self { 17 | assert!(mem::size_of::() <= PAGE_LENGTH); 18 | 19 | let mut arc: Option = None; 20 | 21 | let mapped_weak_pool = ManagedWeakPool1Arc::create( 22 | untyped.allocate(ManagedWeakPool1Arc::inner_length(), 23 | ManagedWeakPool1Arc::inner_alignment())); 24 | 25 | untyped.derive(Self::inner_length(), Self::inner_alignment(), |paddr, next_child| { 26 | let mut desc = PageDescriptor:: { 27 | mapped_weak_pool: mapped_weak_pool, 28 | start_paddr: start_paddr, 29 | next: next_child, 30 | _marker: PhantomData 31 | }; 32 | 33 | desc.write().set_default(); 34 | 35 | arc = Some( 36 | Self::new(paddr, RwLock::new(desc)) 37 | ); 38 | 39 | arc.clone().unwrap().into() 40 | }); 41 | 42 | arc.unwrap() 43 | } 44 | 45 | pub const fn length() -> usize { 46 | BASE_PAGE_LENGTH 47 | } 48 | } 49 | 50 | impl PageDescriptor { 51 | pub fn start_paddr(&self) -> PAddr { 52 | self.start_paddr 53 | } 54 | 55 | pub fn length(&self) -> usize { 56 | BASE_PAGE_LENGTH 57 | } 58 | 59 | fn page_object(&self) -> MemoryObject { 60 | unsafe { MemoryObject::new(self.start_paddr) } 61 | } 62 | 63 | pub fn read(&self) -> UniqueReadGuard { 64 | unsafe { UniqueReadGuard::new(self.page_object()) } 65 | } 66 | 67 | pub fn write(&mut self) -> UniqueWriteGuard { 68 | unsafe { UniqueWriteGuard::new(self.page_object()) } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/cap/paging/pml4.rs: -------------------------------------------------------------------------------- 1 | use common::*; 2 | use arch::{KERNEL_BASE}; 3 | use arch::init::{KERNEL_PDPT}; 4 | use arch::paging::{BASE_PAGE_LENGTH, PML4, PML4Entry, pml4_index}; 5 | use util::{MemoryObject, UniqueReadGuard, UniqueWriteGuard, RwLock}; 6 | use super::{PML4Descriptor, PML4Cap, PDPTCap, PDCap, PTCap, PageCap}; 7 | use cap::{self, UntypedDescriptor, CPoolDescriptor, SetDefault}; 8 | use core::any::Any; 9 | 10 | impl PML4Cap { 11 | pub fn retype_from(untyped: &mut UntypedDescriptor) -> Self { 12 | let mut arc: Option = None; 13 | 14 | let start_paddr = unsafe { untyped.allocate(BASE_PAGE_LENGTH, BASE_PAGE_LENGTH) }; 15 | 16 | unsafe { 17 | use arch::paging::{PML4_P, PML4_RW}; 18 | 19 | untyped.derive(Self::inner_length(), Self::inner_alignment(), |paddr, next_child| { 20 | let mut desc = PML4Descriptor { 21 | start_paddr: start_paddr, 22 | next: next_child, 23 | }; 24 | 25 | for item in desc.write().iter_mut() { 26 | *item = PML4Entry::empty(); 27 | } 28 | 29 | desc.write()[pml4_index(VAddr::from(KERNEL_BASE))] = 30 | PML4Entry::new(KERNEL_PDPT.paddr(), PML4_P | PML4_RW); 31 | 32 | arc = Some( 33 | Self::new(paddr, RwLock::new(desc)) 34 | ); 35 | 36 | arc.clone().unwrap().into() 37 | }); 38 | } 39 | 40 | arc.unwrap() 41 | } 42 | 43 | pub fn map_pdpt(&mut self, index: usize, sub: &PDPTCap) { 44 | use arch::paging::{pml4_index, PML4_P, PML4_RW, PML4_US}; 45 | 46 | let mut current_desc = self.write(); 47 | let mut current = current_desc.write(); 48 | let sub_desc = sub.read(); 49 | assert!(!(pml4_index(VAddr::from(KERNEL_BASE)) == index)); 50 | assert!(!current[index].is_present()); 51 | 52 | sub_desc.mapped_weak_pool.read().downgrade_at(self, 0); 53 | current[index] = PML4Entry::new(sub_desc.start_paddr(), PML4_P | PML4_RW | PML4_US); 54 | } 55 | 56 | pub fn map(&mut self, vaddr: VAddr, page: &PageCap, 57 | untyped: &mut UntypedDescriptor, cpool: &mut CPoolDescriptor) { 58 | use arch::paging::{pml4_index, pdpt_index, pd_index, pt_index}; 59 | 60 | log!("PML4 mapping: 0x{:x}", vaddr); 61 | 62 | let mut pdpt_cap: PDPTCap = { 63 | let index = pml4_index(vaddr); 64 | 65 | if !{ self.read().read()[index] }.is_present() { 66 | let pdpt_cap = PDPTCap::retype_from(untyped); 67 | self.map_pdpt(index, &pdpt_cap); 68 | cpool.downgrade_free(&pdpt_cap); 69 | } 70 | 71 | let position = (0..cpool.size()).position(|i| { 72 | let any = cpool.upgrade_any(i); 73 | if let Some(any) = any { 74 | if any.is::() { 75 | let cap: PDPTCap = any.into(); 76 | let cap_desc = cap.read(); 77 | cap_desc.start_paddr() == { self.read().read()[index] }.get_address() 78 | } else { 79 | cap::drop_any(any); 80 | false 81 | } 82 | } else { 83 | false 84 | } 85 | }).unwrap(); 86 | 87 | cpool.upgrade(position) 88 | }.unwrap(); 89 | 90 | let mut pd_cap: PDCap = { 91 | let index = pdpt_index(vaddr); 92 | 93 | if !{ pdpt_cap.read().read()[index] }.is_present() { 94 | let pd_cap = PDCap::retype_from(untyped); 95 | pdpt_cap.map_pd(index, &pd_cap); 96 | cpool.downgrade_free(&pd_cap); 97 | } 98 | 99 | let position = (0..cpool.size()).position(|i| { 100 | let any = cpool.upgrade_any(i); 101 | if let Some(any) = any { 102 | if any.is::() { 103 | let cap: PDCap = any.into(); 104 | let cap_desc = cap.read(); 105 | cap_desc.start_paddr() == { pdpt_cap.read().read()[index] }.get_address() 106 | } else { 107 | cap::drop_any(any); 108 | false 109 | } 110 | } else { 111 | false 112 | } 113 | }).unwrap(); 114 | 115 | cpool.upgrade(position) 116 | }.unwrap(); 117 | 118 | let mut pt_cap: PTCap = { 119 | let index = pd_index(vaddr); 120 | 121 | if !{ pd_cap.read().read()[index] }.is_present() { 122 | let pt_cap = PTCap::retype_from(untyped); 123 | pd_cap.map_pt(index, &pt_cap); 124 | cpool.downgrade_free(&pt_cap); 125 | } 126 | 127 | let position = (0..cpool.size()).position(|i| { 128 | let any = cpool.upgrade_any(i); 129 | if let Some(any) = any { 130 | if any.is::() { 131 | let cap: PTCap = any.into(); 132 | let cap_desc = cap.read(); 133 | cap_desc.start_paddr() == { pd_cap.read().read()[index] }.get_address() 134 | } else { 135 | cap::drop_any(any); 136 | false 137 | } 138 | } else { 139 | false 140 | } 141 | }).unwrap(); 142 | 143 | cpool.upgrade(position) 144 | }.unwrap(); 145 | 146 | pt_cap.map_page(pt_index(vaddr), page); 147 | } 148 | } 149 | 150 | impl PML4Descriptor { 151 | pub fn start_paddr(&self) -> PAddr { 152 | self.start_paddr 153 | } 154 | 155 | pub fn length(&self) -> usize { 156 | BASE_PAGE_LENGTH 157 | } 158 | 159 | fn page_object(&self) -> MemoryObject { 160 | unsafe { MemoryObject::new(self.start_paddr) } 161 | } 162 | 163 | pub fn read(&self) -> UniqueReadGuard { 164 | unsafe { UniqueReadGuard::new(self.page_object()) } 165 | } 166 | 167 | fn write(&mut self) -> UniqueWriteGuard { 168 | unsafe { UniqueWriteGuard::new(self.page_object()) } 169 | } 170 | 171 | pub fn switch_to(&mut self) { 172 | use arch::paging; 173 | 174 | unsafe { paging::switch_to(self.start_paddr); } 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/debug.rs: -------------------------------------------------------------------------------- 1 | /// Write a string to the output channel 2 | /// 3 | /// This method is unsafe because it does port accesses without synchronisation 4 | pub unsafe fn puts(s: &str) 5 | { 6 | for b in s.bytes() 7 | { 8 | putb(b); 9 | } 10 | } 11 | 12 | /// Write a single byte to the output channel 13 | /// 14 | /// This method is unsafe because it does port accesses without synchronisation 15 | pub unsafe fn putb(b: u8) 16 | { 17 | // Wait for the serial port's fifo to not be empty 18 | while (::arch::inportb(0x3F8+5) & 0x20) == 0 19 | { 20 | // Do nothing 21 | } 22 | // Send the byte out the serial port 23 | ::arch::outportb(0x3F8, b); 24 | 25 | // Also send to the bochs 0xe9 hack 26 | ::arch::outportb(0xe9, b); 27 | } 28 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/init/interrupt.rs: -------------------------------------------------------------------------------- 1 | use arch::interrupt::{self, IDT, IO_APIC, LOCAL_APIC, disable_pic}; 2 | 3 | /// Initialize interrupt. Disable PIC and then initialize APIC 4 | /// together with keyboard interrupt on I/O APIC. 5 | pub fn init() { 6 | unsafe { disable_pic() }; 7 | IDT.load(); 8 | 9 | { 10 | let mut local_apic = LOCAL_APIC.lock(); 11 | let mut io_apic = IO_APIC.lock(); 12 | let local_apic_id = local_apic.id() as u8; 13 | io_apic.set_irq(0x1, local_apic_id, interrupt::KEYBOARD_INTERRUPT_CODE); 14 | 15 | local_apic.set_siv(0x1FF); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/init/mod.rs: -------------------------------------------------------------------------------- 1 | /// [Multiboot](https://www.gnu.org/software/grub/manual/multiboot/multiboot.html) 2 | /// information parser. 3 | mod multiboot; 4 | 5 | /// Paging initialization code. 6 | mod paging; 7 | 8 | /// Interrupt initialization code. 9 | mod interrupt; 10 | 11 | /// Segmentation initialization code. 12 | mod segmentation; 13 | 14 | pub use self::paging::{KERNEL_PML4, KERNEL_PDPT, KERNEL_PD, 15 | OBJECT_POOL_PT, OBJECT_POOL_START_VADDR, 16 | LOCAL_APIC_PAGE_VADDR, IO_APIC_PAGE_VADDR}; 17 | pub use self::segmentation::set_kernel_stack; 18 | 19 | use ::kmain; 20 | use super::{kernel_end_paddr, kernel_start_paddr, kernel_start_vaddr}; 21 | 22 | use core::mem; 23 | use core::slice::{self, Iter}; 24 | 25 | use common::{PAddr, MemoryRegion}; 26 | 27 | extern { 28 | /// Multiboot signature exposed by linker. 29 | #[allow(dead_code)] 30 | static multiboot_sig: u32; 31 | /// Multiboot pointer exposed by linker. 32 | static multiboot_ptr: u64; 33 | } 34 | 35 | // Helper functions 36 | pub fn multiboot_paddr() -> PAddr { 37 | unsafe { PAddr::from(multiboot_ptr) } 38 | } 39 | 40 | /// Iterator for `Option`. It returns `None` if the 41 | /// inner `Option` is none. Otherwise return the value unwrapped. 42 | pub struct FreeRegionsIterator<'a>(Iter<'a, Option>); 43 | 44 | impl<'a> Iterator for FreeRegionsIterator<'a> { 45 | type Item = MemoryRegion; 46 | 47 | fn next(&mut self) -> Option { 48 | let item = self.0.next(); 49 | 50 | if item.is_none() { 51 | None 52 | } else { 53 | if item.unwrap().is_none() { 54 | None 55 | } else { 56 | Some(item.unwrap().unwrap()) 57 | } 58 | } 59 | } 60 | } 61 | 62 | /// Initialization information to be passed to `kmain`. It contains 63 | /// free regions and rinit and kernel memory region information. At 64 | /// most 16 free regions are supported. 65 | #[derive(Debug)] 66 | pub struct InitInfo { 67 | free_regions_size: usize, 68 | free_regions: [Option; 16], 69 | rinit_region: MemoryRegion, 70 | kernel_region: MemoryRegion, 71 | } 72 | 73 | impl InitInfo { 74 | /// Return a `FreeRegionsIterator` that allows iterating over all 75 | /// free regions. 76 | pub fn free_regions(&self) -> FreeRegionsIterator { 77 | FreeRegionsIterator(self.free_regions.iter()) 78 | } 79 | 80 | /// The kernel memory region. 81 | pub fn kernel_region(&self) -> MemoryRegion { 82 | self.kernel_region 83 | } 84 | 85 | /// The user-space rinit program memory region. 86 | pub fn rinit_region(&self) -> MemoryRegion { 87 | self.rinit_region 88 | } 89 | 90 | /// Create a new `InitInfo` using a kernel region and a rinit region. 91 | pub fn new(kernel_region: MemoryRegion, rinit_region: MemoryRegion) -> InitInfo { 92 | InitInfo { free_regions_size: 0, 93 | free_regions: [None; 16], 94 | kernel_region: kernel_region, 95 | rinit_region: rinit_region } 96 | } 97 | 98 | /// Append a new free region to the `InitInfo`. 99 | pub fn push_free_region(&mut self, region: MemoryRegion) { 100 | self.free_regions[self.free_regions_size] = Some(region); 101 | self.free_regions_size += 1; 102 | } 103 | } 104 | 105 | /// Read the multiboot structure. Construct an `InitInfo` with all 106 | /// free regions. A memory region that will be used for initial memory 107 | /// allocation is returned seperately. That region is always the same 108 | /// as the region of the kernel region. 109 | fn bootstrap_archinfo() -> (InitInfo, MemoryRegion) { 110 | let bootinfo = unsafe { 111 | multiboot::Multiboot::new(multiboot_paddr(), |addr, size| { 112 | let ptr = mem::transmute(super::kernel_paddr_to_vaddr(addr).into(): usize); 113 | Some(slice::from_raw_parts(ptr, size)) 114 | }) 115 | }.unwrap(); 116 | 117 | let rinit_module = bootinfo.modules().unwrap().next().unwrap(); 118 | log!("rinit module: {:?}", rinit_module); 119 | 120 | let mut archinfo = InitInfo::new( 121 | MemoryRegion::new(kernel_start_paddr(), 122 | kernel_end_paddr().into(): usize + 1 - 123 | kernel_start_paddr().into(): usize), 124 | MemoryRegion::new(rinit_module.start, 125 | rinit_module.end.into(): usize + 1 - 126 | rinit_module.start.into(): usize)); 127 | let mut alloc_region: Option = None; 128 | 129 | for area in bootinfo.memory_regions().unwrap() { 130 | use self::multiboot::{MemoryType}; 131 | 132 | if !(area.memory_type() == MemoryType::RAM) { 133 | continue; 134 | } 135 | 136 | let mut cur_region = MemoryRegion::new(area.base_address(), area.length() as usize); 137 | 138 | if cur_region.skip_up(&archinfo.kernel_region()) { 139 | assert!(cur_region.skip_up(&archinfo.rinit_region())); 140 | alloc_region = Some(cur_region); 141 | } else { 142 | archinfo.push_free_region(cur_region); 143 | } 144 | } 145 | 146 | (archinfo, alloc_region.unwrap()) 147 | } 148 | 149 | /// Kernel entrypoint. This function calls `bootstrap_archinfo`, and 150 | /// then use the information to initialize paging, segmentation, 151 | /// interrupt, and APIC. It then jumps to `kmain`. 152 | #[lang="start"] 153 | #[no_mangle] 154 | #[allow(private_no_mangle_fns)] 155 | pub fn kinit() { 156 | let (mut archinfo, mut alloc_region) = bootstrap_archinfo(); 157 | 158 | log!("kernel_start_vaddr: 0x{:x}", kernel_start_vaddr()); 159 | log!("archinfo: {:?}", archinfo); 160 | log!("alloc_region: {:?}", alloc_region); 161 | 162 | paging::init(&mut alloc_region); 163 | segmentation::init(); 164 | interrupt::init(); 165 | 166 | archinfo.push_free_region(alloc_region); 167 | 168 | { 169 | let local_apic = ::arch::interrupt::LOCAL_APIC.lock(); 170 | let io_apic = ::arch::interrupt::IO_APIC.lock(); 171 | log!("Local APIC id: 0x{:x}", local_apic.id()); 172 | log!("Local APIC version: 0x{:x}", local_apic.version()); 173 | log!("I/O APIC id: 0x{:x}", io_apic.id()); 174 | log!("I/O APIC version: 0x{:x}", io_apic.version()); 175 | } 176 | 177 | kmain(archinfo); 178 | } 179 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/init/segmentation.rs: -------------------------------------------------------------------------------- 1 | use arch::segmentation::{SegmentDescriptor, SegmentSelector, TaskStateSegment}; 2 | use core::mem::size_of; 3 | 4 | extern { 5 | /// GDT memory address exposed by linker. 6 | static mut GDT: [SegmentDescriptor; 9]; 7 | /// Initial stack address exposed by linker. 8 | static init_stack: u64; 9 | } 10 | 11 | /// Task State Segment static. 12 | static mut TSS: TaskStateSegment = TaskStateSegment::empty(); 13 | 14 | /// Load the task state register. 15 | pub unsafe fn load_tr(sel: SegmentSelector) { 16 | asm!("ltr $0" :: "r" (sel.bits())); 17 | } 18 | 19 | /// Set the current kernel stack. Essential for context switching. 20 | pub unsafe fn set_kernel_stack(addr: u64) { 21 | TSS.sp0 = addr; 22 | TSS.ist1 = addr; 23 | } 24 | 25 | /// Main function to initialize interrupt. 26 | pub fn init() { 27 | unsafe { 28 | use arch::segmentation::{DESC_P, DESC_DPL3, 29 | TYPE_SYS_TSS_AVAILABLE}; 30 | let kernel_stack = &init_stack as *const _ as u64; 31 | let tss_vaddr = &TSS as *const _ as u64; 32 | 33 | set_kernel_stack(kernel_stack); 34 | GDT[7] = SegmentDescriptor::new((tss_vaddr & 0xFFFFFFFF) as u32, 35 | size_of::() as u32); 36 | GDT[7].insert(DESC_P | TYPE_SYS_TSS_AVAILABLE | DESC_DPL3); 37 | GDT[8] = SegmentDescriptor::from_raw(tss_vaddr >> 32); 38 | 39 | log!("kernel_stack = 0x{:x}", kernel_stack); 40 | // asm!("ltr ax" :: "{rax}"(&GDT[7] as *const _ as usize) 41 | // : "rax" : "intel", "volatile"); 42 | load_tr(SegmentSelector::new(7)); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/interrupt/apic.rs: -------------------------------------------------------------------------------- 1 | use common::*; 2 | use arch::init::{LOCAL_APIC_PAGE_VADDR, IO_APIC_PAGE_VADDR}; 3 | use util::{Mutex}; 4 | use super::{InterruptVector}; 5 | 6 | /// Local APIC pointer. 7 | #[derive(Debug)] 8 | pub struct LocalAPIC { 9 | address: VAddr, 10 | } 11 | 12 | /// I/O APIC pointer. 13 | #[derive(Debug)] 14 | pub struct IOAPIC { 15 | address: VAddr, 16 | } 17 | 18 | /// The local APIC static. 19 | pub static LOCAL_APIC: Mutex = Mutex::new(LocalAPIC { 20 | address: LOCAL_APIC_PAGE_VADDR 21 | }); 22 | 23 | /// The I/O APIC static. 24 | pub static IO_APIC: Mutex = Mutex::new(IOAPIC { 25 | address: IO_APIC_PAGE_VADDR 26 | }); 27 | 28 | #[allow(dead_code)] 29 | impl LocalAPIC { 30 | /// Read a value from the local APIC. 31 | /// 32 | /// # Safety 33 | /// 34 | /// `reg` must be valid. 35 | unsafe fn read(&self, reg: u32) -> u32 { 36 | use core::intrinsics::{volatile_load}; 37 | volatile_load((self.address.into(): usize + reg as usize) as *const u32) 38 | } 39 | 40 | /// Write a value to the local APIC. 41 | /// 42 | /// # Safety 43 | /// 44 | /// `reg` must be valid. 45 | unsafe fn write(&mut self, reg: u32, value: u32) { 46 | use core::intrinsics::{volatile_store}; 47 | volatile_store((self.address.into(): usize + reg as usize) as *mut u32, value); 48 | } 49 | 50 | /// APIC id. 51 | pub fn id(&self) -> u32 { 52 | unsafe { self.read(0x20) } 53 | } 54 | 55 | /// APIC version. 56 | pub fn version(&self) -> u32 { 57 | unsafe { self.read(0x30) } 58 | } 59 | 60 | /// Spurious interrupt vector. 61 | pub fn siv(&self) -> u32 { 62 | unsafe { self.read(0xF0) } 63 | } 64 | 65 | /// Set the spurious interrupt vector. 66 | pub fn set_siv(&mut self, value: u32) { 67 | unsafe { self.write(0xF0, value) } 68 | } 69 | 70 | /// Send End of Interrupt. 71 | pub fn eoi(&mut self) { 72 | unsafe { self.write(0xB0, 0) } 73 | } 74 | 75 | /// Enable timer with a specific value. 76 | pub fn enable_timer(&mut self) { 77 | unsafe { 78 | self.write(0x3E0, 0x3); 79 | self.write(0x380, 0x10000); 80 | self.write(0x320, (1<<17) | 0x40); 81 | log!("timer register is 0b{:b}", self.read(0x320)); 82 | } 83 | } 84 | 85 | /// Current error status. 86 | pub fn error_status(&self) -> u32 { 87 | unsafe { self.read(0x280) } 88 | } 89 | } 90 | 91 | #[allow(dead_code)] 92 | impl IOAPIC { 93 | /// Read a value from the I/O APIC. 94 | /// 95 | /// # Safety 96 | /// 97 | /// `reg` must be valid. 98 | unsafe fn read(&self, reg: u32) -> u32 { 99 | use core::intrinsics::{volatile_load, volatile_store}; 100 | volatile_store((self.address.into(): usize + 0x0 as usize) as *mut u32, reg); 101 | volatile_load((self.address.into(): usize + 0x10 as usize) as *const u32) 102 | } 103 | 104 | /// Write a value to the I/O APIC. 105 | /// 106 | /// # Safety 107 | /// 108 | /// `reg` must be valid. 109 | unsafe fn write(&mut self, reg: u32, value: u32) { 110 | use core::intrinsics::volatile_store; 111 | volatile_store((self.address.into(): usize + 0x0 as usize) as *mut u32, reg); 112 | volatile_store((self.address.into(): usize + 0x10 as usize) as *mut u32, value); 113 | } 114 | 115 | /// I/O APIC id. 116 | pub fn id(&self) -> u32 { 117 | unsafe { self.read(0x0) } 118 | } 119 | 120 | /// I/O APIC version. 121 | pub fn version(&self) -> u32 { 122 | unsafe { self.read(0x1) } 123 | } 124 | 125 | /// I/O APIC arbitration id. 126 | pub fn arbitration_id(&self) -> u32 { 127 | unsafe { self.read(0x2) } 128 | } 129 | 130 | /// Set IRQ to an interrupt vector. 131 | pub fn set_irq(&mut self, irq: u8, apic_id: u8, vector: InterruptVector) { 132 | let vector = vector as u8; 133 | 134 | let low_index: u32 = 0x10 + (irq as u32) * 2; 135 | let high_index: u32 = 0x10 + (irq as u32) * 2 + 1; 136 | 137 | let mut high = unsafe { self.read(high_index) }; 138 | high &= !0xff000000; 139 | high |= (apic_id as u32) << 24; 140 | unsafe { self.write(high_index, high) }; 141 | 142 | let mut low = unsafe { self.read(low_index) }; 143 | low &= !(1<<16); 144 | low &= !(1<<11); 145 | low &= !0x700; 146 | low &= !0xff; 147 | low |= vector as u32; 148 | unsafe { self.write(low_index, low) }; 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/interrupt/bit_field.rs: -------------------------------------------------------------------------------- 1 | use core::mem::size_of; 2 | 3 | /// Represent a bit field. 4 | #[derive(Debug, Clone, Copy)] 5 | pub struct BitField(T); 6 | 7 | #[allow(dead_code)] 8 | impl BitField 9 | where T: Number 10 | { 11 | /// Create a bit field using value. 12 | pub const fn new(value: T) -> BitField { 13 | BitField(value) 14 | } 15 | 16 | /// Return the underlying number of the bit field. 17 | pub fn bits(&self) -> T { 18 | self.0 19 | } 20 | 21 | /// Get a specific bit in the bit field. 22 | pub fn get_bit(&self, bit: u8) -> bool { 23 | assert!(bit < self.length()); 24 | self.get_range(bit..(bit + 1)) == T::one() 25 | } 26 | 27 | /// Get a range of bits. 28 | pub fn get_range(&self, range: Range) -> T { 29 | assert!(range.start < self.length()); 30 | assert!(range.end <= self.length()); 31 | assert!(range.start < range.end); 32 | 33 | // shift away high bits 34 | let bits = self.0 << (self.length() - range.end) >> (self.length() - range.end); 35 | 36 | // shift away low bits 37 | bits >> range.start 38 | } 39 | 40 | /// Set a single bit. 41 | pub fn set_bit(&mut self, bit: u8, value: bool) -> &mut Self { 42 | assert!(bit < self.length()); 43 | if value { 44 | self.0 |= T::one() << bit; 45 | } else { 46 | self.0 &= !(T::one() << bit); 47 | } 48 | self 49 | } 50 | 51 | /// Set a range of bits. 52 | pub fn set_range(&mut self, range: Range, value: T) -> &mut Self { 53 | assert!(range.start < self.length()); 54 | assert!(range.end <= self.length()); 55 | assert!(range.start < range.end); 56 | assert!(value << (self.length() - (range.end - range.start)) >> 57 | (self.length() - (range.end - range.start)) == value, 58 | "value too big"); 59 | 60 | let bitmask: T = !(!T::zero() << (self.length() - range.end) >> 61 | (self.length() - range.end) >> 62 | range.start << range.start); 63 | 64 | let bits = self.0 & bitmask; 65 | // set bits 66 | self.0 = bits | (value << range.start); 67 | 68 | self 69 | } 70 | 71 | /// Get the length of the bit field. 72 | fn length(&self) -> u8 { 73 | size_of::() as u8 * 8 74 | } 75 | } 76 | 77 | use core::ops::{Range, Shl, Shr, BitAnd, BitOr, BitOrAssign, BitAndAssign, Not}; 78 | use core::fmt::Debug; 79 | 80 | /// Represent a number trait. 81 | pub trait Number: Debug + Copy + Eq + 82 | Not + Shl + Shr + 83 | BitAnd + BitOr + BitAndAssign + BitOrAssign { 84 | 85 | fn zero() -> Self; 86 | fn one() -> Self; 87 | } 88 | 89 | macro_rules! number_impl { 90 | ($($t:ty)*) => ($( 91 | impl Number for $t { 92 | fn zero() -> Self { 0 } 93 | fn one() -> Self { 1 } 94 | } 95 | )*) 96 | } 97 | 98 | number_impl! { u8 u16 u32 u64 usize } 99 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/interrupt/dtables.rs: -------------------------------------------------------------------------------- 1 | /// A struct describing a pointer to a descriptor table (GDT / IDT). 2 | /// This is in a format suitable for giving to 'lgdt' or 'lidt'. 3 | #[repr(C, packed)] 4 | pub struct DescriptorTablePointer { 5 | /// Size of the DT. 6 | pub limit: u16, 7 | /// Pointer to the memory region containing the DT. 8 | pub base: u64, 9 | } 10 | 11 | /// Load GDT table. 12 | #[allow(dead_code)] 13 | pub unsafe fn lgdt(gdt: &DescriptorTablePointer) { 14 | asm!("lgdt ($0)" :: "r" (gdt) : "memory"); 15 | } 16 | 17 | /// Load LDT table. 18 | #[allow(dead_code)] 19 | pub unsafe fn lldt(ldt: &DescriptorTablePointer) { 20 | asm!("lldt ($0)" :: "r" (ldt) : "memory"); 21 | } 22 | 23 | /// Load IDT table. 24 | pub unsafe fn lidt(idt: &DescriptorTablePointer) { 25 | asm!("lidt ($0)" :: "r" (idt) : "memory"); 26 | } 27 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/interrupt/idt.rs: -------------------------------------------------------------------------------- 1 | use arch::segmentation::{self, SegmentSelector}; 2 | use super::bit_field::BitField; 3 | use super::{HandlerFunc, InterruptVector}; 4 | 5 | /// Interrupt descriptor table. 6 | pub struct Idt([Entry; 256]); 7 | 8 | /// An entry of the interrupt descriptor table. 9 | #[derive(Debug, Clone, Copy)] 10 | #[repr(C, packed)] 11 | pub struct Entry { 12 | pointer_low: u16, 13 | gdt_selector: SegmentSelector, 14 | options: BitField, 15 | pointer_middle: u16, 16 | pointer_high: u32, 17 | reserved: u32, 18 | } 19 | 20 | /// Options in an entry of IDT. 21 | pub struct EntryOptions<'a>(&'a mut Entry); 22 | 23 | impl<'a> EntryOptions<'a> { 24 | /// Minimal settings of the entry. 25 | fn minimal(entry: &'a mut Entry) -> Self { 26 | let mut options = BitField::new(0); 27 | options.set_range(9..12, 0b111); // 'must-be-one' bits 28 | entry.options = options; 29 | EntryOptions(entry) 30 | } 31 | 32 | /// Create a new entry with default settings. 33 | fn new(entry: &'a mut Entry) -> Self { 34 | Self::minimal(entry) 35 | .set_present(true) 36 | .disable_interrupts(true) 37 | .set_stack_index(0x1) 38 | } 39 | 40 | /// Set the entry to be present. 41 | pub fn set_present(self, present: bool) -> Self { 42 | let mut options = self.0.options; 43 | options.set_bit(15, present); 44 | self.0.options = options; 45 | self 46 | } 47 | 48 | /// Disable interrupts when using this entry. 49 | pub fn disable_interrupts(self, disable: bool) -> Self { 50 | let mut options = self.0.options; 51 | options.set_bit(8, !disable); 52 | self.0.options = options; 53 | self 54 | } 55 | 56 | /// Set previlege level of this entry. 57 | pub fn set_privilege_level(self, dpl: u16) -> Self { 58 | let mut options = self.0.options; 59 | options.set_range(13..15, dpl); 60 | self.0.options = options; 61 | self 62 | } 63 | 64 | /// Set stack index to use in TSS for this interrupt entry. 65 | pub fn set_stack_index(self, index: u16) -> Self { 66 | let mut options = self.0.options; 67 | options.set_range(0..3, index); 68 | self.0.options = options; 69 | self 70 | } 71 | } 72 | 73 | impl Idt { 74 | /// Create a new IDT. 75 | pub fn new() -> Idt { 76 | Idt([Entry::missing(); 256]) 77 | } 78 | 79 | /// Set an interrupt vector using a handler. 80 | pub fn set_handler(&mut self, entry: InterruptVector, handler: HandlerFunc) 81 | -> EntryOptions 82 | { 83 | self.0[entry as usize] = Entry::new(segmentation::cs(), handler); 84 | EntryOptions(&mut self.0[entry as usize]) 85 | } 86 | 87 | /// Load this IDT. 88 | pub fn load(&self) { 89 | use super::dtables::{DescriptorTablePointer, lidt}; 90 | use core::mem::size_of; 91 | 92 | let ptr = DescriptorTablePointer { 93 | base: self as *const _ as u64, 94 | limit: (size_of::() - 1) as u16, 95 | }; 96 | 97 | unsafe { lidt(&ptr) }; 98 | } 99 | } 100 | 101 | impl Entry { 102 | /// Create a new entry using the handler and GDT selector. 103 | fn new(gdt_selector: SegmentSelector, handler: HandlerFunc) -> Self { 104 | let pointer = handler as u64; 105 | let mut entry = Entry { 106 | gdt_selector: gdt_selector, 107 | pointer_low: pointer as u16, 108 | pointer_middle: (pointer >> 16) as u16, 109 | pointer_high: (pointer >> 32) as u32, 110 | options: BitField::new(0), 111 | reserved: 0, 112 | }; 113 | EntryOptions::new(&mut entry); 114 | entry 115 | } 116 | 117 | /// Create a missing entry. 118 | fn missing() -> Self { 119 | let mut entry = Entry { 120 | gdt_selector: SegmentSelector::new(0), 121 | pointer_low: 0, 122 | pointer_middle: 0, 123 | pointer_high: 0, 124 | options: BitField::new(0), 125 | reserved: 0, 126 | }; 127 | EntryOptions::minimal(&mut entry); 128 | entry 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/interrupt/mod.rs: -------------------------------------------------------------------------------- 1 | /// Interrupt descriptor table module. 2 | mod idt; 3 | /// Helpers for representing bit fields. 4 | mod bit_field; 5 | /// Functions and data-structures to load descriptor tables. 6 | mod dtables; 7 | /// Advanced Programmable Interrupt Controller. 8 | mod apic; 9 | /// Programmable Interrupt Controller. 10 | mod pic; 11 | 12 | /// Context switching related functionality. 13 | #[macro_use] 14 | mod switch; 15 | 16 | use common::*; 17 | use self::switch::{last_exception_return_value, switch_to_raw}; 18 | 19 | pub use self::switch::{HandlerFunc, Registers}; 20 | pub use self::apic::{LOCAL_APIC, IO_APIC}; 21 | pub use self::pic::{disable_pic}; 22 | 23 | /// Interrupt vector type. 24 | pub type InterruptVector = u64; 25 | 26 | pub const TIMER_INTERRUPT_CODE: InterruptVector = 0x40; 27 | pub const SPURIOUS_INTERRUPT_CODE: InterruptVector = 0xFF; 28 | pub const KEYBOARD_INTERRUPT_CODE: InterruptVector = 0x21; 29 | pub const SYSTEM_CALL_INTERRUPT_CODE: InterruptVector = 0x80; 30 | pub const DEBUG_CALL_INTERRUPT_CODE: InterruptVector = 0x81; 31 | 32 | return_to_raw_fn!(timer_return_to_raw, TIMER_INTERRUPT_CODE); 33 | return_to_raw_fn!(spurious_return_to_raw, SPURIOUS_INTERRUPT_CODE); 34 | return_to_raw_fn!(keyboard_return_to_raw, KEYBOARD_INTERRUPT_CODE); 35 | return_to_raw_fn!(system_call_return_to_raw, SYSTEM_CALL_INTERRUPT_CODE); 36 | return_to_raw_fn!(debug_call_return_to_raw, DEBUG_CALL_INTERRUPT_CODE); 37 | 38 | lazy_static! { 39 | /// The interrupt descriptor table static. 40 | pub static ref IDT: idt::Idt = { 41 | let mut idt = idt::Idt::new(); 42 | 43 | idt.set_handler(SYSTEM_CALL_INTERRUPT_CODE, system_call_return_to_raw) 44 | .set_privilege_level(0x3); 45 | idt.set_handler(DEBUG_CALL_INTERRUPT_CODE, debug_call_return_to_raw) 46 | .set_privilege_level(0x3); 47 | idt.set_handler(KEYBOARD_INTERRUPT_CODE, keyboard_return_to_raw) 48 | .set_privilege_level(0x3); 49 | idt.set_handler(SPURIOUS_INTERRUPT_CODE, spurious_return_to_raw) 50 | .set_privilege_level(0x3); 51 | idt.set_handler(TIMER_INTERRUPT_CODE, timer_return_to_raw) 52 | .set_privilege_level(0x3); 53 | 54 | idt 55 | }; 56 | } 57 | 58 | /// Enum that represents exceptions. Abstracted from interrupt 59 | /// exception codes. 60 | #[derive(Debug)] 61 | pub enum Exception { 62 | SystemCall, 63 | DebugCall, 64 | Keyboard, 65 | Spurious, 66 | Timer 67 | } 68 | 69 | impl Exception { 70 | /// Create a new Exception using an exception code and an optional 71 | /// error code. 72 | fn new(code: u64, _error: Option) -> Exception { 73 | match code { 74 | TIMER_INTERRUPT_CODE => Exception::Timer, 75 | SPURIOUS_INTERRUPT_CODE => Exception::Spurious, 76 | KEYBOARD_INTERRUPT_CODE => Exception::Keyboard, 77 | SYSTEM_CALL_INTERRUPT_CODE => Exception::SystemCall, 78 | DEBUG_CALL_INTERRUPT_CODE => Exception::DebugCall, 79 | _ => panic!(), 80 | } 81 | } 82 | 83 | /// Send End of Interrupt signal if appropriate. 84 | pub unsafe fn send_eoi(&self) { 85 | match self { 86 | &Exception::Timer => LOCAL_APIC.lock().eoi(), 87 | &Exception::Keyboard => LOCAL_APIC.lock().eoi(), 88 | _ => (), 89 | } 90 | } 91 | } 92 | 93 | /// Represents a task runtime. Used by the task capability. 94 | #[derive(Debug)] 95 | pub struct TaskRuntime { 96 | instruction_pointer: u64, 97 | cpu_flags: u64, 98 | stack_pointer: u64, 99 | registers: Registers 100 | } 101 | 102 | impl Default for TaskRuntime { 103 | fn default() -> TaskRuntime { 104 | TaskRuntime { 105 | instruction_pointer: 0x0, 106 | cpu_flags: 0b11001000000110, 107 | stack_pointer: 0x0, 108 | registers: Registers::default(), 109 | } 110 | } 111 | } 112 | 113 | impl TaskRuntime { 114 | /// Switch to a task using the task runtime. 115 | /// 116 | /// # Safety 117 | /// 118 | /// `TaskRuntime` must have all values valid. `mode_change` must 119 | /// be set according to the task capability. 120 | pub unsafe fn switch_to(&mut self, mode_change: bool) -> Exception { 121 | let code_seg: u64 = if mode_change { 0x28 | 0x3 } else { 0x8 | 0x0 }; 122 | let data_seg: u64 = if mode_change { 0x30 | 0x3 } else { 0x10 | 0x0 }; 123 | 124 | switch::set_cur_registers(self.registers.clone()); 125 | switch_to_raw(self.stack_pointer, self.instruction_pointer, self.cpu_flags, code_seg, data_seg); 126 | self.registers = switch::cur_registers(); 127 | 128 | let exception_info = last_exception_return_value().unwrap(); 129 | 130 | self.instruction_pointer = exception_info.instruction_pointer; 131 | self.cpu_flags = exception_info.cpu_flags; 132 | self.stack_pointer = exception_info.stack_pointer; 133 | 134 | let exception = Exception::new(exception_info.exception_code, exception_info.error_code); 135 | exception.send_eoi(); 136 | 137 | return exception; 138 | } 139 | 140 | /// Set the instruction pointer of the task runtime. 141 | pub fn set_instruction_pointer(&mut self, instruction_pointer: VAddr) { 142 | self.instruction_pointer = instruction_pointer.into(); 143 | } 144 | 145 | /// Set the stack pointer of the task runtime. 146 | pub fn set_stack_pointer(&mut self, stack_pointer: VAddr) { 147 | self.stack_pointer = stack_pointer.into(); 148 | } 149 | } 150 | 151 | /// Enable interrupt. Not used. 152 | pub unsafe fn enable_interrupt() { } 153 | /// Disable interrupt. Not used. 154 | pub unsafe fn disable_interrupt() { } 155 | /// Set interrupt handler. Not used. 156 | pub unsafe fn set_interrupt_handler() { } 157 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/interrupt/pic.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use arch::{inportb, outportb}; 4 | 5 | const PIC1_COMMAND: u16 = 0x20; 6 | const PIC1_DATA: u16 = 0x21; 7 | const PIC2_COMMAND: u16 = 0xA0; 8 | const PIC2_DATA: u16 = 0xA1; 9 | 10 | const PIC_EOI: u8 = 0x20; 11 | const ICW1_ICW4: u8 = 0x01; 12 | const ICW1_SINGLE: u8 = 0x02; 13 | const ICW1_INTERVAL4: u8 = 0x04; 14 | const ICW1_LEVEL: u8 = 0x08; 15 | const ICW1_INIT: u8 = 0x10; 16 | const ICW4_8086: u8 = 0x01; 17 | const ICW4_AUTO: u8 = 0x02; 18 | const ICW4_BUF_SLAVE: u8 = 0x08; 19 | const ICW4_BUF_MASTER: u8 = 0x0C; 20 | const ICW4_SFNM: u8 = 0x10; 21 | 22 | /// Disable Programmable Interrupt Controller. 23 | pub unsafe fn disable_pic() { 24 | // Set ICW1 25 | outportb(0x20, 0x11); 26 | outportb(0xa0, 0x11); 27 | 28 | // Set IWC2 (IRQ base offsets) 29 | outportb(0x21, 0xe0); 30 | outportb(0xa1, 0xe8); 31 | 32 | // Set ICW3 33 | outportb(0x21, 4); 34 | outportb(0xa1, 2); 35 | 36 | // Set ICW4 37 | outportb(0x21, 1); 38 | outportb(0xa1, 1); 39 | 40 | // Set OCW1 (interrupt masks) 41 | outportb(0x21, 0xff); 42 | outportb(0xa1, 0xff); 43 | } 44 | 45 | /// Send End of Interrupt for PIC. 46 | pub unsafe fn send_pic_eoi(irq: u8) { 47 | if irq >= 8 { 48 | outportb(PIC2_COMMAND, PIC_EOI); 49 | } else { 50 | outportb(PIC1_COMMAND, PIC_EOI); 51 | } 52 | } 53 | 54 | /// Enable Programmable Interrupt Controller. 55 | pub unsafe fn enable_pic(master_offset: u8, slave_offset: u8) { 56 | let _ = inportb(PIC1_DATA); 57 | let _ = inportb(PIC2_DATA); 58 | 59 | outportb(PIC1_COMMAND, ICW1_INIT + ICW1_ICW4); 60 | outportb(PIC2_COMMAND, ICW1_INIT + ICW1_ICW4); 61 | outportb(PIC1_DATA, master_offset); 62 | outportb(PIC2_DATA, slave_offset); 63 | outportb(PIC1_DATA, 4); 64 | outportb(PIC2_DATA, 2); 65 | outportb(PIC1_DATA, ICW4_8086); 66 | outportb(PIC2_DATA, ICW4_8086); 67 | outportb(PIC1_DATA, 0x0); 68 | outportb(PIC2_DATA, 0x0); 69 | 70 | outportb(PIC1_COMMAND, 0x20); 71 | outportb(PIC2_COMMAND, 0x20); 72 | } 73 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/linker.ld: -------------------------------------------------------------------------------- 1 | ENTRY(start) 2 | OUTPUT_FORMAT(elf64-x86-64) 3 | 4 | KERNEL_BASE = 0xFFFFFFFF80000000; 5 | 6 | SECTIONS { 7 | 8 | . = 0x100000; 9 | 10 | . += SIZEOF_HEADERS; 11 | 12 | .init : AT(ADDR(.init)) { 13 | KEEP( *(.multiboot) ) 14 | *(.inittext) 15 | } 16 | 17 | . += KERNEL_BASE; 18 | 19 | .text ALIGN(0x1000) : AT(ADDR(.text) - KERNEL_BASE) { 20 | *(.text .text.*) 21 | } 22 | 23 | /* read-only data, page aligned to allow use of the no-execute feature */ 24 | .rodata ALIGN(0x1000) : AT(ADDR(.rodata) - KERNEL_BASE) { 25 | *(.rodata .rodata.*) 26 | } 27 | 28 | /* Read-write data, page aligned for the .padata section */ 29 | .data ALIGN(0x1000) : AT(ADDR(.data) - KERNEL_BASE) { 30 | *(.padata) 31 | *(.data .data.*) 32 | } 33 | 34 | /* Zero-initialised data */ 35 | .bss : AT(ADDR(.bss) - KERNEL_BASE) { 36 | *(.bss .bss.*) 37 | } 38 | 39 | kernel_end = .; 40 | 41 | /DISCARD/ : { 42 | *(.note .note.*) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/mod.rs: -------------------------------------------------------------------------------- 1 | /// Debug output channel (uses serial). 2 | #[path = "debug.rs"] 3 | pub mod debug; 4 | 5 | /// Paging-related functionality. 6 | mod paging; 7 | 8 | /// Kernel initialization code. 9 | mod init; 10 | 11 | /// Virtual and physical address representation. 12 | mod addr; 13 | 14 | /// Interrupt-related functionality. 15 | mod interrupt; 16 | 17 | /// Segment descriptor and task state segment representation. 18 | mod segmentation; 19 | 20 | /// Architecture-specific capabilities. Re-exported also in `kernel::cap`. 21 | #[macro_use] 22 | pub mod cap; 23 | const KERNEL_BASE: u64 = 0xFFFFFFFF80000000; 24 | 25 | extern { 26 | static kernel_end: u64; 27 | } 28 | 29 | fn kernel_start_paddr() -> PAddr { 30 | PAddr::from(0x100000: usize) 31 | } 32 | 33 | fn kernel_start_vaddr() -> VAddr { 34 | unsafe { kernel_paddr_to_vaddr(kernel_start_paddr()) } 35 | } 36 | 37 | fn kernel_end_paddr() -> PAddr { 38 | unsafe { PAddr::from((&kernel_end as *const _) as u64 - KERNEL_BASE) } 39 | } 40 | 41 | #[allow(dead_code)] 42 | fn kernel_end_vaddr() -> VAddr { 43 | unsafe { kernel_paddr_to_vaddr(kernel_end_paddr()) } 44 | } 45 | 46 | unsafe fn kernel_paddr_to_vaddr(addr: PAddr) -> VAddr { 47 | VAddr::from(addr.into(): u64 + KERNEL_BASE) 48 | } 49 | 50 | 51 | #[cfg(any(target_arch = "x86_64"))] 52 | pub unsafe fn outportb(port: u16, val: u8) 53 | { 54 | asm!("outb %al, %dx" : : "{dx}"(port), "{al}"(val)); 55 | } 56 | 57 | #[cfg(any(target_arch = "x86_64"))] 58 | pub unsafe fn inportb(port: u16) -> u8 59 | { 60 | let ret: u8; 61 | asm!("inb %dx, %al" : "={ax}"(ret): "{dx}"(port)); 62 | ret 63 | } 64 | 65 | #[cfg(any(target_arch = "x86_64"))] 66 | pub unsafe fn io_wait() { 67 | outportb(0x80, 0) 68 | } 69 | 70 | pub fn enable_timer() { 71 | interrupt::LOCAL_APIC.lock().enable_timer(); 72 | } 73 | 74 | // Public interfaces 75 | pub use self::paging::{MemoryObject}; 76 | pub use self::interrupt::{enable_interrupt, disable_interrupt, set_interrupt_handler, 77 | Exception, TaskRuntime}; 78 | pub use self::init::{InitInfo}; 79 | // pub use self::cap::{ArchCap, PageHalf, PageFull}; 80 | pub use self::addr::{PAddr, VAddr}; 81 | 82 | // pub type TopPageTableHalf = self::cap::PML4Half; 83 | // pub type TopPageTableFull = self::cap::PML4Full; 84 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/paging/macros.rs: -------------------------------------------------------------------------------- 1 | macro_rules! bit { 2 | ( $x:expr ) => { 3 | 1 << $x 4 | }; 5 | } 6 | 7 | macro_rules! check_flag { 8 | ($doc:meta, $fun:ident, $flag:ident) => ( 9 | #[$doc] 10 | pub fn $fun(&self) -> bool { 11 | self.contains($flag) 12 | } 13 | ) 14 | } 15 | 16 | #[allow(unused_macros)] 17 | macro_rules! is_bit_set { 18 | ($field:expr, $bit:expr) => ( 19 | $field & (1 << $bit) > 0 20 | ) 21 | } 22 | 23 | #[allow(unused_macros)] 24 | macro_rules! check_bit_fn { 25 | ($doc:meta, $fun:ident, $field:ident, $bit:expr) => ( 26 | #[$doc] 27 | pub fn $fun(&self) -> bool { 28 | is_bit_set!(self.$field, $bit) 29 | } 30 | ) 31 | } 32 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/paging/mod.rs: -------------------------------------------------------------------------------- 1 | use common::{PAddr, VAddr}; 2 | 3 | #[macro_use] 4 | mod macros; 5 | 6 | /// Representations of page tables. 7 | mod table; 8 | 9 | /// Memory objects implementation. 10 | mod with; 11 | 12 | /// Basic page length in x86_64 (4 KiB). 13 | pub const BASE_PAGE_LENGTH: usize = 4096; // 4 KiB 14 | 15 | /// Large page length in x86_64 (2 MiB). 16 | pub const LARGE_PAGE_LENGTH: usize = 1024 * 1024 * 2; // 2 MiB 17 | 18 | /// Huge page length in x86_64 (1 GiB). 19 | #[allow(dead_code)] 20 | pub const HUGE_PAGE_LENGTH: usize = 1024 * 1024 * 1024; // 1 GiB 21 | 22 | /// Cache line length in x86_64 (64 Bytes). 23 | #[allow(dead_code)] 24 | pub const CACHE_LINE_LENGTH: usize = 64; // 64 Bytes 25 | 26 | /// MAXPHYADDR, which is at most 52; (use CPUID for finding system value). 27 | pub const MAXPHYADDR: u64 = 52; 28 | 29 | /// Mask to find the physical address of an entry in a page-table. 30 | const ADDRESS_MASK: u64 = ((1 << MAXPHYADDR) - 1) & !0xfff; 31 | 32 | pub use self::table::*; 33 | pub use self::with::{MemoryObject}; 34 | 35 | /// Contains page-table root pointer. 36 | unsafe fn cr3() -> u64 { 37 | let ret: u64; 38 | asm!("mov %cr3, $0" : "=r" (ret)); 39 | ret 40 | } 41 | 42 | /// Switch page-table PML4 pointer. 43 | unsafe fn cr3_write(val: u64) { 44 | asm!("mov $0, %cr3" :: "r" (val) : "memory"); 45 | } 46 | 47 | /// Invalidate the given address in the TLB using the `invlpg` instruction. 48 | /// 49 | /// # Safety 50 | /// 51 | /// This function is unsafe as it causes a general protection fault (GP) if the current privilege 52 | /// level is not 0. 53 | pub unsafe fn flush(vaddr: VAddr) { 54 | asm!("invlpg ($0)" :: "r" (vaddr.into(): usize) : "memory"); 55 | } 56 | 57 | /// Invalidate the TLB completely by reloading the CR3 register. 58 | /// 59 | /// # Safety 60 | /// 61 | /// This function is unsafe as it causes a general protection fault (GP) if the current privilege 62 | /// level is not 0. 63 | pub unsafe fn flush_all() { 64 | cr3_write(cr3()) 65 | } 66 | 67 | /// Switch to a PML4 page table. 68 | /// 69 | /// # Safety 70 | /// 71 | /// The PML4 page table must have kernel mapped in 72 | /// `KERNEL_BASE`. `paddr` must point to a valid PML4 page table. 73 | pub unsafe fn switch_to(paddr: PAddr) { 74 | cr3_write(paddr.into()); 75 | } 76 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/paging/with.rs: -------------------------------------------------------------------------------- 1 | // TODO Disable interrupt before entering those. 2 | use core::mem::{size_of}; 3 | use util::{align_down, block_count}; 4 | use super::{PTEntry, PT_P, PT_RW, flush, BASE_PAGE_LENGTH}; 5 | use arch::init::{OBJECT_POOL_PT, OBJECT_POOL_START_VADDR}; 6 | use common::PAddr; 7 | 8 | use core::ptr::NonNull; 9 | use core::marker::{PhantomData, Unsize}; 10 | use core::ops::CoerceUnsized; 11 | use core::fmt; 12 | 13 | /// Represent a memory object, that converts a physical address to an 14 | /// accessible object. 15 | /// 16 | /// The struct is implemented using `ObjectPool`. When a new memory 17 | /// object is created, a new entry on the `ObjectPool` PT is created, 18 | /// thus makes it addressable. The entry is deleted once the memory 19 | /// object struct is dropped. 20 | /// 21 | /// # Safety 22 | /// 23 | /// If you are going to wrap MemoryObject in any other struct, you 24 | /// must make should that it is dropped last. However, Drop order in 25 | /// Rust is currently undefined. 26 | /// 27 | /// `ObjectGuard` requires T must be Sized. 28 | pub struct MemoryObject { 29 | paddr: PAddr, 30 | mapping_start_index: usize, 31 | mapping_size: usize, 32 | pointer: NonNull, 33 | _marker: PhantomData, 34 | } 35 | 36 | /// `ObjectGuard` pointers are not `Send` because the data they reference may be aliased. 37 | impl !Send for MemoryObject { } 38 | 39 | /// `ObjectGuard` pointers are not `Sync` because the data they reference may be aliased. 40 | impl !Sync for MemoryObject { } 41 | 42 | impl MemoryObject { 43 | /// Physical address of the memory object. 44 | pub fn paddr(&self) -> PAddr { 45 | self.paddr 46 | } 47 | 48 | /// Create a new memory object. 49 | /// 50 | /// # Safety 51 | /// 52 | /// PAddr must be a non-zero pointer. 53 | pub unsafe fn new(paddr: PAddr) -> Self where T: Sized { 54 | Self::slice(paddr, 1) 55 | } 56 | 57 | pub fn as_ptr(&self) -> *mut T { 58 | self.pointer.as_ptr() 59 | } 60 | 61 | pub unsafe fn as_ref(&self) -> &T { 62 | &*self.as_ptr() 63 | } 64 | 65 | pub unsafe fn as_mut(&mut self) -> &mut T { 66 | &mut *self.as_ptr() 67 | } 68 | 69 | /// Get a slice from the current memory object. 70 | pub unsafe fn slice(paddr: PAddr, size: usize) -> Self where T: Sized { 71 | let aligned = align_down(paddr, BASE_PAGE_LENGTH); 72 | let before_start = paddr.into(): usize - aligned.into(): usize; 73 | let size = size_of::() * size; 74 | let required_page_size = block_count((paddr + size).into(): usize - aligned.into(): usize, 75 | BASE_PAGE_LENGTH); 76 | 77 | let mut object_pool = OBJECT_POOL_PT.lock(); 78 | let mapping_start_index: usize = { 79 | let mut mapping_start_index: Option = None; 80 | 81 | for i in 0..object_pool.len() { 82 | let mut available = true; 83 | for j in 0..required_page_size { 84 | if object_pool[i + j].is_present() { 85 | available = false; 86 | break; 87 | } 88 | } 89 | 90 | if available { 91 | mapping_start_index = Some(i); 92 | break; 93 | } 94 | } 95 | 96 | mapping_start_index 97 | }.unwrap(); 98 | 99 | 100 | for i in 0..required_page_size { 101 | object_pool[mapping_start_index + i] = PTEntry::new(aligned + (i * BASE_PAGE_LENGTH), PT_P | PT_RW); 102 | flush(OBJECT_POOL_START_VADDR + (mapping_start_index * BASE_PAGE_LENGTH) + i * BASE_PAGE_LENGTH); 103 | } 104 | 105 | let vaddr = OBJECT_POOL_START_VADDR + ((mapping_start_index * BASE_PAGE_LENGTH) + before_start); 106 | 107 | MemoryObject:: { 108 | paddr: paddr, 109 | mapping_start_index: mapping_start_index, 110 | mapping_size: required_page_size, 111 | pointer: NonNull::new_unchecked(vaddr.into(): usize as *mut T), 112 | _marker: PhantomData 113 | } 114 | } 115 | } 116 | 117 | impl CoerceUnsized> for MemoryObject where T: Unsize { } 118 | 119 | impl Drop for MemoryObject { 120 | fn drop(&mut self) { 121 | let mut object_pool = OBJECT_POOL_PT.lock(); 122 | 123 | for i in 0..self.mapping_size { 124 | object_pool[self.mapping_start_index + i] = PTEntry::empty(); 125 | unsafe { flush(OBJECT_POOL_START_VADDR + (self.mapping_start_index * BASE_PAGE_LENGTH) + i * BASE_PAGE_LENGTH); } 126 | } 127 | } 128 | } 129 | 130 | impl fmt::Pointer for MemoryObject { 131 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 132 | fmt::Pointer::fmt(&self.pointer.as_ptr(), f) 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/segmentation/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | /// Task State Segment Representation. 4 | mod tss; 5 | 6 | pub use self::tss::{TaskStateSegment}; 7 | 8 | bitflags! { 9 | /// Specifies which element to load into a segment from 10 | /// descriptor tables (i.e., is a index to LDT or GDT table 11 | /// with some additional flags). 12 | #[repr(C)] 13 | pub flags SegmentSelector: u16 { 14 | // Requestor Privilege Level 15 | const RPL_0 = 0b00, 16 | const RPL_1 = 0b01, 17 | const RPL_2 = 0b10, 18 | const RPL_3 = 0b11, 19 | 20 | /// Table Indicator (TI) 0 means GDT is used. 21 | const TI_GDT = 0 << 3, 22 | /// Table Indicator (TI) 1 means LDT is used. 23 | const TI_LDT = 1 << 3, 24 | } 25 | } 26 | 27 | impl SegmentSelector { 28 | /// Create a new SegmentSelector. 29 | /// 30 | /// # Arguments 31 | /// * `index` index in GDT or LDT array. 32 | /// 33 | pub const fn new(index: u16) -> SegmentSelector { 34 | SegmentSelector { bits: index << 3 } 35 | } 36 | 37 | /// Create the selector from raw. 38 | pub const fn from_raw(bits: u16) -> SegmentSelector { 39 | SegmentSelector { bits: bits } 40 | } 41 | } 42 | 43 | bitflags! { 44 | /// Entry for GDT or LDT. Provides size and location of a segment. 45 | #[repr(C)] 46 | pub flags SegmentDescriptor: u64 { 47 | /// Descriptor type (0 = system; 1 = code or data). 48 | const DESC_S = 1 << (32+12), 49 | /// Descriptor privilege level 0. 50 | const DESC_DPL0 = 0b00 << (32+13), 51 | /// Descriptor privilege level 1. 52 | const DESC_DPL1 = 0b01 << (32+13), 53 | /// Descriptor privilege level 2. 54 | const DESC_DPL2 = 0b10 << (32+13), 55 | /// Descriptor privilege level 3. 56 | const DESC_DPL3 = 0b11 << (32+13), 57 | /// Descriptor is Present. 58 | const DESC_P = 1 << (32+15), 59 | /// Available for use by system software. 60 | const DESC_AVL = 1 << (32+20), 61 | /// 64-bit code segment (IA-32e mode only). 62 | const DESC_L = 1 << (32+21), 63 | /// Default operation size (0 = 16-bit segment, 1 = 32-bit segment) 64 | const DESC_DB = 1 << (32+22), 65 | /// Granularity. 66 | const DESC_G = 1 << (32+23), 67 | 68 | // System-Segment and Gate-Descriptor Types for IA32e mode. 69 | // When the S (descriptor type) flag in a segment descriptor is clear, 70 | // the descriptor type is a system descriptor. 71 | 72 | const TYPE_SYS_LDT = 0b0010 << (32+8), 73 | const TYPE_SYS_TSS_AVAILABLE = 0b1001 << (32+8), 74 | const TYPE_SYS_TSS_BUSY = 0b1011 << (32+8), 75 | const TYPE_SYS_CALL_GATE = 0b1100 << (32+8), 76 | const TYPE_SYS_INTERRUPT_GATE = 0b1110 << (32+8), 77 | const TYPE_SYS_TRAP_GATE = 0b1111 << (32+8), 78 | 79 | // Code- and Data-Segment Descriptor Types. 80 | // When the S (descriptor type) flag in a segment descriptor is set, 81 | // the descriptor is for either a code or a data segment. 82 | 83 | /// Data Read-Only 84 | const TYPE_D_RO = 0b0000 << (32+8), 85 | /// Data Read-Only, accessed 86 | const TYPE_D_ROA = 0b0001 << (32+8), 87 | /// Data Read/Write 88 | const TYPE_D_RW = 0b0010 << (32+8), 89 | /// Data Read/Write, accessed 90 | const TYPE_D_RWA = 0b0011 << (32+8), 91 | /// Data Read-Only, expand-down 92 | const TYPE_D_ROEXD = 0b0100 << (32+8), 93 | /// Data Read-Only, expand-down, accessed 94 | const TYPE_D_ROEXDA = 0b0101 << (32+8), 95 | /// Data Read/Write, expand-down 96 | const TYPE_D_RWEXD = 0b0110 << (32+8), 97 | /// Data Read/Write, expand-down, accessed 98 | const TYPE_D_RWEXDA = 0b0111 << (32+8), 99 | 100 | /// Code Execute-Only 101 | const TYPE_C_EO = 0b1000 << (32+8), 102 | /// Code Execute-Only, accessed 103 | const TYPE_C_EOA = 0b1001 << (32+8), 104 | /// Code Execute/Read 105 | const TYPE_C_ER = 0b1010 << (32+8), 106 | /// Code Execute/Read, accessed 107 | const TYPE_C_ERA = 0b1011 << (32+8), 108 | /// Code Execute-Only, conforming 109 | const TYPE_C_EOC = 0b1100 << (32+8), 110 | /// Code Execute-Only, conforming, accessed 111 | const TYPE_C_EOCA = 0b1101 << (32+8), 112 | /// Code Execute/Read, conforming 113 | const TYPE_C_ERC = 0b1110 << (32+8), 114 | /// Code Execute/Read, conforming, accessed 115 | const TYPE_C_ERCA = 0b1111 << (32+8), 116 | } 117 | } 118 | 119 | /// This is data-structure is a ugly mess thing so we provide some 120 | /// convenience function to program it. 121 | impl SegmentDescriptor { 122 | pub fn new(base: u32, limit: u32) -> SegmentDescriptor { 123 | let base_low: u64 = base as u64 & 0xffffff; 124 | let base_high: u64 = (base as u64 >> 24) & 0xff; 125 | 126 | let limit_low: u64 = limit as u64 & 0xffff; 127 | let limit_high: u64 = (limit as u64 & (0b1111 << 16)) >> 16; 128 | 129 | SegmentDescriptor { 130 | bits: limit_low | base_low << 16 | limit_high << (32 + 16) | base_high << (32 + 24), 131 | } 132 | } 133 | 134 | pub fn from_raw(raw: u64) -> SegmentDescriptor { 135 | SegmentDescriptor { 136 | bits: raw, 137 | } 138 | } 139 | } 140 | 141 | /// Reload stack segment register. 142 | pub unsafe fn load_ss(sel: SegmentSelector) { 143 | asm!("movw $0, %ss " :: "r" (sel.bits()) : "memory"); 144 | } 145 | 146 | /// Reload data segment register. 147 | pub unsafe fn load_ds(sel: SegmentSelector) { 148 | asm!("movw $0, %ds " :: "r" (sel.bits()) : "memory"); 149 | } 150 | 151 | /// Reload es segment register. 152 | pub unsafe fn load_es(sel: SegmentSelector) { 153 | asm!("movw $0, %es " :: "r" (sel.bits()) : "memory"); 154 | } 155 | 156 | /// Reload fs segment register. 157 | pub unsafe fn load_fs(sel: SegmentSelector) { 158 | asm!("movw $0, %fs " :: "r" (sel.bits()) : "memory"); 159 | } 160 | 161 | /// Reload gs segment register. 162 | pub unsafe fn load_gs(sel: SegmentSelector) { 163 | asm!("movw $0, %gs " :: "r" (sel.bits()) : "memory"); 164 | } 165 | 166 | /// Reload code segment register. 167 | /// Note this is special since we can not directly move 168 | /// to %cs. Instead we push the new segment selector 169 | /// and return value on the stack and use lretq 170 | /// to reload cs and continue at 1:. 171 | pub unsafe fn load_cs(sel: SegmentSelector) { 172 | asm!("pushq $0 173 | lea 1f(%rip), %rax 174 | pushq %rax 175 | lretq 176 | 1:" :: "r" (sel.bits() as u64) : "rax" "memory"); 177 | } 178 | 179 | /// Returns the current value of the code segment register. 180 | pub fn cs() -> SegmentSelector { 181 | let segment: u16; 182 | unsafe { asm!("mov %cs, $0" : "=r" (segment) ) }; 183 | SegmentSelector::from_raw(segment) 184 | } 185 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/segmentation/tss.rs: -------------------------------------------------------------------------------- 1 | /// Represents a Task State Segment. It holds the kernel stack 2 | /// information used by interrupts. 3 | #[repr(packed)] 4 | #[allow(dead_code)] 5 | pub struct TaskStateSegment { 6 | _reserved1: u32, 7 | pub sp0: u64, 8 | pub sp1: u64, 9 | pub sp2: u64, 10 | _reserved2: u32, 11 | _reserved3: u32, 12 | pub ist1: u64, 13 | pub ist2: u64, 14 | pub ist3: u64, 15 | pub ist4: u64, 16 | pub ist5: u64, 17 | pub ist6: u64, 18 | pub ist7: u64, 19 | _reserved4: u32, 20 | _reserved5: u32, 21 | _reserved6: u16, 22 | pub iomap_base: u16, 23 | } 24 | 25 | impl TaskStateSegment { 26 | /// Create an empty TSS. 27 | pub const fn empty() -> TaskStateSegment { 28 | TaskStateSegment { 29 | _reserved1: 0, 30 | _reserved2: 0, 31 | _reserved3: 0, 32 | _reserved4: 0, 33 | _reserved5: 0, 34 | _reserved6: 0, 35 | sp0: 0, 36 | sp1: 0, 37 | sp2: 0, 38 | ist1: 0, 39 | ist2: 0, 40 | ist3: 0, 41 | ist4: 0, 42 | ist5: 0, 43 | ist6: 0, 44 | ist7: 0, 45 | iomap_base: 0, 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /kernel/src/arch/x86_64/start.S: -------------------------------------------------------------------------------- 1 | /* 2 | * Rust BareBones OS 3 | * - By John Hodge (Mutabah/thePowersGang) 4 | * 5 | * arcm/amd64/start.S 6 | * - AMD64 Entrypoint 7 | * 8 | * == LICENCE == 9 | * This code has been put into the public domain, there are no restrictions on 10 | * its use, and the author takes no liability. 11 | */ 12 | 13 | /* The kernel is linked to run at -2GB. This allows efficient addressing */ 14 | KERNEL_BASE = 0xFFFFFFFF80000000 15 | 16 | /* === Multiboot Header === */ 17 | MULTIBOOT_PAGE_ALIGN = (1<<0) 18 | MULTIBOOT_MEMORY_INFO = (1<<1) 19 | /* REQVIDMODE is not used, because it is not supported by qemu */ 20 | MULTIBOOT_REQVIDMODE = (1<<2) 21 | MULTIBOOT_HEADER_MAGIC = 0x1BADB002 22 | MULTIBOOT_HEADER_FLAGS = (MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO) 23 | MULTIBOOT_CHECKSUM = -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) 24 | .section .multiboot, "a" 25 | .globl multiboot 26 | multiboot: 27 | .long MULTIBOOT_HEADER_MAGIC 28 | .long MULTIBOOT_HEADER_FLAGS 29 | .long MULTIBOOT_CHECKSUM 30 | .long multiboot 31 | /* a.out kludge (not used, the kernel is elf) */ 32 | .long 0, 0, 0, 0 /* load_addr, load_end_addr, bss_end_addr, entry_addr */ 33 | /* Video mode */ 34 | .long 0 /* Mode type (0: LFB) */ 35 | .long 0 /* Width (no preference) */ 36 | .long 0 /* Height (no preference) */ 37 | .long 32 /* Depth (32-bit preferred) */ 38 | 39 | #define DEBUG(c) mov $0x3f8, %dx ; mov $c, %al ; outb %al, %dx 40 | 41 | /* === Code === */ 42 | .section .inittext, "ax" 43 | .globl start 44 | .code32 45 | start: 46 | /* The kernel starts in protected mode (32-bit mode, we want to switch to long mode) */ 47 | 48 | /* 1. Save multiboot state */ 49 | mov %eax, multiboot_sig - KERNEL_BASE 50 | mov %ebx, multiboot_ptr - KERNEL_BASE 51 | 52 | /* 2. Ensure that the CPU support long mode */ 53 | mov $0x80000000, %eax 54 | cpuid 55 | /* - Check if CPUID supports the field we want to query */ 56 | cmp $0x80000001, %eax 57 | jbe not64bitCapable 58 | /* - Test the IA-32e bit */ 59 | mov $0x80000001, %eax 60 | cpuid 61 | test $0x20000000, %edx /* bit 29 = TM1 */ 62 | jz not64bitCapable 63 | test $0x00000200, %edx /* bit 9 = APIC */ 64 | jz not64bitCapable 65 | 66 | /* 3. Set up state for long mode */ 67 | /* Enable: 68 | PGE (Page Global Enable) 69 | + PAE (Physical Address Extension) 70 | + PSE (Page Size Extensions) 71 | */ 72 | mov %cr4, %eax 73 | or $(0x80|0x20|0x10), %eax 74 | mov %eax, %cr4 75 | 76 | /* Load PDP4 */ 77 | mov $(init_pml4 - KERNEL_BASE), %eax 78 | mov %eax, %cr3 79 | 80 | /* Enable IA-32e mode (Also enables SYSCALL and NX) */ 81 | mov $0xC0000080, %ecx 82 | rdmsr 83 | or $(1 << 11)|(1 << 8)|(1 << 0), %eax /* NXE, LME, SCE */ 84 | wrmsr 85 | 86 | /* Enable paging and enter long mode */ 87 | mov %cr0, %eax 88 | or $0x80010000, %eax /* PG & WP */ 89 | mov %eax, %cr0 90 | lgdt GDTPtr_low - KERNEL_BASE 91 | ljmp $0x08, $start64 92 | 93 | 94 | not64bitCapable: 95 | /* If the CPU isn't 64-bit capable, print a message to serial/b8000 then busy wait */ 96 | mov $0x3f8, %dx 97 | mov $'N', %al ; outb %al, %dx 98 | movw $0x100|'N', 0xb8000 99 | mov $'o', %al ; outb %al, %dx 100 | movw $0x100|'o', 0xb8002 101 | mov $'t', %al ; outb %al, %dx 102 | movw $0x100|'t', 0xb8004 103 | mov $'6', %al ; outb %al, %dx 104 | movw $0x100|'6', 0xb8006 105 | mov $'4', %al ; outb %al, %dx 106 | movw $0x100|'4', 0xb8008 107 | 108 | not64bitCapable.loop: 109 | hlt 110 | jmp not64bitCapable.loop 111 | 112 | .code64 113 | .globl start64 114 | start64: 115 | /* Running in 64-bit mode, jump to high memory */ 116 | lgdt GDTPtr 117 | mov $start64_high, %rax 118 | jmp *%rax 119 | 120 | .section .text 121 | .extern kinit 122 | .globl start64_high 123 | start64_high: 124 | /* and clear low-memory mapping */ 125 | mov $0, %rax 126 | mov %rax, init_pml4 - KERNEL_BASE + 0 127 | 128 | /* Set up segment registers */ 129 | mov $0x10, %ax 130 | mov %ax, %ss 131 | mov %ax, %ds 132 | mov %ax, %es 133 | mov %ax, %fs 134 | mov %ax, %gs 135 | 136 | /* Set up stack pointer */ 137 | mov $init_stack, %rsp 138 | 139 | /* call the rust code */ 140 | call kinit 141 | 142 | /* and if that returns (it shouldn't) loop forever */ 143 | start64.loop: 144 | hlt 145 | jmp start64.loop 146 | 147 | /* === Page-aligned data === */ 148 | .section .padata 149 | .globl init_pd 150 | .globl init_stack 151 | .globl kernel_stack_guard_page 152 | /* Initial paging structures, four levels */ 153 | /* The +3 for sub-pages indicates "present (1) + writable (2)" */ 154 | kernel_stack_guard_page: 155 | init_pml4: 156 | .quad low_pdpt - KERNEL_BASE + 3 /* low map for startup, will be cleared before rust code runs */ 157 | .rept 512 - 3 158 | .quad 0 159 | .endr 160 | .quad 0 /* If you so wish, this is a good place for the "Fractal" mapping */ 161 | .quad init_pdpt - KERNEL_BASE + 3 /* Final mapping */ 162 | low_pdpt: 163 | .quad init_pd - KERNEL_BASE + 3 /* early init identity map */ 164 | .rept 512 - 1 165 | .quad 0 166 | .endr 167 | init_pdpt: /* covers the top 512GB, 1GB each entry */ 168 | .rept 512 - 2 169 | .quad 0 170 | .endr 171 | .quad init_pd - KERNEL_BASE + 3 /* at -2GB, identity map the kernel image */ 172 | .quad 0 173 | init_pd: 174 | /* 0x80 = Page size extension */ 175 | .quad 0x000000 + 0x80 + 3 /* Map 2MB, enough for a 1MB kernel */ 176 | .quad 0x200000 + 0x80 + 3 /* - give it another 2MB, just in case */ 177 | .rept 512 - 2 178 | .quad 0 179 | .endr 180 | init_stack_base: 181 | .rept 0x1000 * 64 182 | .byte 0 183 | .endr 184 | init_stack: 185 | 186 | /* === General Data === */ 187 | .section .data 188 | .globl multiboot_sig 189 | .globl multiboot_ptr 190 | multiboot_sig: .long 0 191 | multiboot_ptr: .quad 0 192 | 193 | /* Global Descriptor Table */ 194 | GDTPtr_low: 195 | .word GDTEnd - GDT - 1 196 | .long GDT - KERNEL_BASE 197 | GDTPtr: 198 | .word GDTEnd - GDT - 1 199 | .quad GDT 200 | 201 | .globl GDT 202 | GDT: 203 | .long 0, 0 204 | .long 0x00000000, 0x00209A00 /* 0x08: 64-bit Code */ 205 | .long 0x00000000, 0x00009200 /* 0x10: 64-bit Data */ 206 | .long 0x00000000, 0x0040FA00 /* 0x18: 32-bit User Code */ 207 | .long 0x00000000, 0x0040F200 /* 0x20: User Data */ 208 | .long 0x00000000, 0x0020FA00 /* 0x28: 64-bit User Code */ 209 | .long 0x00000000, 0x0000F200 /* 0x30: User Data (64 version) */ 210 | .long 0, 0, 0, 0 /* TSS (extended into 16 bytes) */ 211 | GDTEnd: 212 | -------------------------------------------------------------------------------- /kernel/src/cap/channel.rs: -------------------------------------------------------------------------------- 1 | use common::*; 2 | use core::convert::From; 3 | use util::RwLock; 4 | use util::managed_arc::{ManagedArc, ManagedArcAny}; 5 | use abi::ChannelMessage; 6 | use super::{UntypedDescriptor, TaskCap, TaskBufferPageCap}; 7 | 8 | #[derive(Debug)] 9 | pub enum ChannelValue { 10 | Raw(u64), 11 | Cap(ManagedArcAny), 12 | Payload(TaskBufferPageCap), 13 | } 14 | 15 | impl ChannelValue { 16 | pub fn from_message(message: ChannelMessage, source_root: TaskCap) -> Option { 17 | match message { 18 | ChannelMessage::Raw(value) => Some(ChannelValue::Raw(value)), 19 | ChannelMessage::Cap(Some(caddr)) => { 20 | let source_root = source_root.read().upgrade_cpool().unwrap(); 21 | let obj = source_root.lookup_upgrade_any(caddr); 22 | if obj.is_some() { 23 | Some(ChannelValue::Cap(obj.unwrap())) 24 | } else { 25 | None 26 | } 27 | }, 28 | ChannelMessage::Cap(None) => None, 29 | ChannelMessage::Payload => { 30 | let source_root = source_root.read().upgrade_buffer().unwrap(); 31 | Some(ChannelValue::Payload(source_root)) 32 | } 33 | } 34 | } 35 | 36 | pub fn to_message(value: ChannelValue, target_root: TaskCap) -> ChannelMessage { 37 | match value { 38 | ChannelValue::Raw(value) => ChannelMessage::Raw(value), 39 | ChannelValue::Cap(arc) => { 40 | let target_root = target_root.read().upgrade_cpool().unwrap(); 41 | let target_desc = target_root.read(); 42 | let index = target_desc.downgrade_any_free(arc); 43 | ChannelMessage::Cap(index.map(|i| { CAddr::from(i as u8) })) 44 | }, 45 | ChannelValue::Payload(buffer_cap) => { 46 | let source_buffer = buffer_cap.read().read(); 47 | let mut target_buffer_cap = target_root.read().upgrade_buffer().unwrap(); 48 | let mut target_buffer = target_buffer_cap.write().write(); 49 | target_buffer.payload_length = source_buffer.payload_length; 50 | for i in 0..source_buffer.payload_length { 51 | target_buffer.payload_data[i] = source_buffer.payload_data[i]; 52 | } 53 | ChannelMessage::Payload 54 | } 55 | } 56 | } 57 | } 58 | 59 | /// Channel descriptor. 60 | #[derive(Debug)] 61 | pub struct ChannelDescriptor { 62 | value: Option, 63 | next: Option, 64 | } 65 | /// Channel capability. Reference-counted smart pointer to channel 66 | /// descriptor. 67 | /// 68 | /// Channels are used for inter-process communication of different 69 | /// tasks. 70 | pub type ChannelCap = ManagedArc>; 71 | 72 | impl ChannelCap { 73 | /// Create a channel capability from an untyped capability. 74 | pub fn retype_from(untyped: &mut UntypedDescriptor) -> Self { 75 | let mut arc: Option = None; 76 | 77 | unsafe { untyped.derive(Self::inner_length(), Self::inner_alignment(), |paddr, next_child| { 78 | arc = Some( 79 | Self::new(paddr, RwLock::new(ChannelDescriptor { 80 | value: None, 81 | next: next_child, 82 | })) 83 | ); 84 | 85 | arc.clone().unwrap().into() 86 | }) }; 87 | 88 | arc.unwrap() 89 | } 90 | } 91 | 92 | impl ChannelDescriptor { 93 | /// Put a value to the channel. 94 | pub fn put(&mut self, value: ChannelValue) { 95 | self.value = Some(value); 96 | } 97 | 98 | /// Take a value from the channel. If there's no value in the 99 | /// channel, `None` is returned. 100 | pub fn take(&mut self) -> Option { 101 | self.value.take() 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /kernel/src/cap/cpool.rs: -------------------------------------------------------------------------------- 1 | use common::*; 2 | use core::any::Any; 3 | use core::ops::Deref; 4 | use util::RwLock; 5 | use util::managed_arc::{ManagedArc, ManagedArcAny, ManagedWeakPool256Arc}; 6 | 7 | use super::{UntypedDescriptor}; 8 | 9 | /// Capability pool descriptor. 10 | #[derive(Debug)] 11 | pub struct CPoolDescriptor { 12 | weak_pool: ManagedWeakPool256Arc, 13 | next: Option, 14 | } 15 | /// Capability pool capability. Reference-counted smart pointer to 16 | /// capability pool descriptor. Capability pool itself is a 17 | /// `ManagedWeakPool` with 256 entries. 18 | /// 19 | /// Capability pool capability is used to hold multiple capabilities 20 | /// together so as to be addressable in user-space programs. 21 | pub type CPoolCap = ManagedArc>; 22 | 23 | fn downgrade_at_owning(arc: ManagedArc, index: usize, desc: &CPoolDescriptor) 24 | where ManagedArc: Any { 25 | desc.downgrade_at(&arc, index) 26 | } 27 | 28 | fn downgrade_free_owning(arc: ManagedArc, desc: &CPoolDescriptor) -> Option 29 | where ManagedArc: Any { 30 | desc.downgrade_free(&arc) 31 | } 32 | 33 | impl CPoolDescriptor { 34 | /// Create a new pointer to a capability descriptor using the 35 | /// index. If nothing is in the entry, `None` is returned. 36 | pub fn upgrade_any(&self, index: usize) -> Option { 37 | unsafe { self.weak_pool.read().upgrade_any(index, |ptr, type_id| { super::upgrade_any(ptr, type_id) }) } 38 | } 39 | 40 | /// Like `upgrade_any`, but returns a value with the specified 41 | /// type. 42 | pub fn upgrade(&self, index: usize) -> Option> 43 | where ManagedArc: Any { 44 | self.weak_pool.read().upgrade(index) 45 | } 46 | 47 | /// Downgrade a capability into the capability pool (weak pool) at 48 | /// a specified index. 49 | pub fn downgrade_at(&self, arc: &ManagedArc, index: usize) 50 | where ManagedArc: Any { 51 | self.weak_pool.read().downgrade_at(arc, index) 52 | } 53 | 54 | /// Downgrade a capability into the capability pool (weak pool) at 55 | /// a free index. 56 | pub fn downgrade_free(&self, arc: &ManagedArc) -> Option 57 | where ManagedArc: Any { 58 | self.weak_pool.read().downgrade_free(arc) 59 | } 60 | 61 | /// Downgrade a `ManagedArcAny` into the capability pool (weak 62 | /// pool) at a specified index. 63 | pub fn downgrade_any_at(&self, arc: ManagedArcAny, index: usize) { 64 | doto_any!(arc, downgrade_at_owning, index, self) 65 | } 66 | 67 | /// Downgrade a `ManagedArcAny` into the capability pool (weak 68 | /// pool) at a free index. 69 | pub fn downgrade_any_free(&self, arc: ManagedArcAny) -> Option { 70 | doto_any!(arc, downgrade_free_owning, self) 71 | } 72 | 73 | /// Size of the capability pool. 74 | pub fn size(&self) -> usize { 75 | 256 76 | } 77 | } 78 | 79 | impl CPoolCap { 80 | /// Create a capability pool capability from an untyped 81 | /// capability. 82 | pub fn retype_from(untyped: &mut UntypedDescriptor) -> Self { 83 | let mut arc: Option = None; 84 | 85 | let weak_pool = unsafe { ManagedWeakPool256Arc::create( 86 | untyped.allocate(ManagedWeakPool256Arc::inner_length(), 87 | ManagedWeakPool256Arc::inner_alignment())) }; 88 | 89 | unsafe { untyped.derive(Self::inner_length(), Self::inner_alignment(), |paddr, next_child| { 90 | arc = Some( 91 | Self::new(paddr, RwLock::new(CPoolDescriptor { 92 | weak_pool: weak_pool, 93 | next: next_child, 94 | })) 95 | ); 96 | 97 | arc.clone().unwrap().into() 98 | }) }; 99 | 100 | arc.unwrap() 101 | } 102 | 103 | fn lookup) -> R>(&self, caddr: CAddr, f: F) -> R { 104 | if caddr.1 == 0 { 105 | f(None) 106 | } else if caddr.1 == 1 { 107 | let cur_lookup_index = caddr.0[0]; 108 | f(Some((self.read().deref(), cur_lookup_index as usize))) 109 | } else { 110 | let cur_lookup_index = caddr.0[0]; 111 | let next_lookup_cpool: Option = self.read().upgrade(cur_lookup_index as usize); 112 | let next_caddr = caddr << 1; 113 | 114 | if next_lookup_cpool.is_some() { 115 | let next_lookup_cpool = next_lookup_cpool.unwrap(); 116 | next_lookup_cpool.lookup::(next_caddr, f) 117 | } else { 118 | f(None) 119 | } 120 | } 121 | } 122 | 123 | /// Lookup upgrading a capability from a capability address to a `ManagedArcAny`. 124 | pub fn lookup_upgrade_any(&self, caddr: CAddr) -> Option { 125 | self.lookup(caddr, |data| { 126 | data.map_or(None, |(cpool, index)| { 127 | cpool.upgrade_any(index) 128 | }) 129 | }) 130 | } 131 | 132 | /// Lookup upgrading a capability from a capability address. 133 | pub fn lookup_upgrade(&self, caddr: CAddr) -> Option> { 134 | self.lookup(caddr, |data| { 135 | data.map_or(None, |(cpool, index)| { 136 | cpool.upgrade(index) 137 | }) 138 | }) 139 | } 140 | 141 | /// Downgrade a capability into the capability pool at a specified capability address. 142 | pub fn lookup_downgrade_at(&self, arc: &ManagedArc, caddr: CAddr) 143 | where ManagedArc: Any { 144 | self.lookup(caddr, |data| { 145 | let (cpool, index) = data.unwrap(); 146 | cpool.downgrade_at(arc, index); 147 | }); 148 | } 149 | 150 | /// Downgrade a `ManagedArcAny` into the capability pool at a specified capability address. 151 | pub fn lookup_downgrade_any_at(&self, arc: ManagedArcAny, caddr: CAddr) { 152 | self.lookup(caddr, |data| { 153 | let (cpool, index) = data.unwrap(); 154 | cpool.downgrade_any_at(arc, index); 155 | }); 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /kernel/src/cap/mod.rs: -------------------------------------------------------------------------------- 1 | macro_rules! doto_any { 2 | ($any:expr, $f:tt $(,$param:expr)*) => { 3 | if $any.is::<::cap::CPoolCap>() { 4 | $f ($any.into(): ::cap::CPoolCap, $($param),*) 5 | } else if $any.is::<::cap::UntypedCap>() { 6 | $f ($any.into(): ::cap::UntypedCap, $($param),*) 7 | } else if $any.is::<::cap::TaskCap>() { 8 | $f ($any.into(): ::cap::TaskCap, $($param),*) 9 | } else if $any.is::<::cap::RawPageCap>() { 10 | $f ($any.into(): ::cap::RawPageCap, $($param),*) 11 | } else if $any.is::<::cap::TaskBufferPageCap>() { 12 | $f ($any.into(): ::cap::TaskBufferPageCap, $($param),*) 13 | } else if $any.is::<::cap::ChannelCap>() { 14 | $f ($any.into(): ::cap::ChannelCap, $($param),*) 15 | } else { 16 | doto_arch_any!($any, $f $(,$param)*) 17 | } 18 | } 19 | } 20 | 21 | /// Untyped capability implementation. 22 | mod untyped; 23 | /// Capability pool capability implementation. 24 | mod cpool; 25 | /// Task capability implementation. 26 | mod task; 27 | /// Channel capability implementation. 28 | mod channel; 29 | 30 | pub use self::untyped::{UntypedDescriptor, UntypedCap}; 31 | pub use self::cpool::{CPoolDescriptor, CPoolCap}; 32 | pub use self::task::{TaskDescriptor, TaskCap, TaskStatus, idle, task_iter}; 33 | pub use self::channel::{ChannelDescriptor, ChannelCap, ChannelValue}; 34 | 35 | pub use arch::cap::{TopPageTableCap, PageCap, PAGE_LENGTH}; 36 | 37 | use arch; 38 | use common::*; 39 | use core::any::{TypeId}; 40 | use core::mem::drop; 41 | use util::managed_arc::{ManagedArcAny, ManagedArc}; 42 | 43 | pub use abi::{SetDefault, TaskBuffer}; 44 | /// Raw page struct representing a whole page. 45 | pub struct RawPage(pub [u8; PAGE_LENGTH]); 46 | /// Raw page capability. Represents a page with no other information. 47 | pub type RawPageCap = PageCap; 48 | /// Task buffer page capability. Represents a page of task buffer. 49 | pub type TaskBufferPageCap = PageCap; 50 | 51 | impl SetDefault for RawPage { 52 | fn set_default(&mut self) { 53 | for raw in self.0.iter_mut() { 54 | *raw = 0x0; 55 | } 56 | } 57 | } 58 | 59 | /// Create a managed Arc (capability) from an address of an kernel 60 | /// object (architecture-specific or general). The `type_id` should be 61 | /// a [TypeId](https://doc.rust-lang.org/std/any/struct.TypeId.html) 62 | /// of a capability. If the `type_id` is not recognized, `None` is 63 | /// returned. 64 | /// 65 | /// # Safety 66 | /// 67 | /// `ptr` must be a physical address pointing to a valid kernel object 68 | /// of type `type_id`. 69 | pub unsafe fn upgrade_any(ptr: PAddr, type_id: TypeId) -> Option { 70 | if type_id == TypeId::of::() { 71 | Some({ ManagedArc::from_ptr(ptr): CPoolCap }.into()) 72 | } else if type_id == TypeId::of::() { 73 | Some({ ManagedArc::from_ptr(ptr): UntypedCap }.into()) 74 | } else if type_id == TypeId::of::() { 75 | Some({ ManagedArc::from_ptr(ptr): TaskCap }.into()) 76 | } else if type_id == TypeId::of::() { 77 | Some({ ManagedArc::from_ptr(ptr): RawPageCap }.into()) 78 | } else if type_id == TypeId::of::() { 79 | Some({ ManagedArc::from_ptr(ptr): TaskBufferPageCap }.into()) 80 | } else if type_id == TypeId::of::() { 81 | Some({ ManagedArc::from_ptr(ptr): ChannelCap }.into()) 82 | } else { 83 | arch::cap::upgrade_arch_any(ptr, type_id) 84 | } 85 | } 86 | 87 | /// Drop an architecture-specific `any` capability. `ManagedArcAny` is 88 | /// not itself droppable. It must be converted to its real type before 89 | /// dropping. 90 | pub fn drop_any(any: ManagedArcAny) { 91 | doto_any!(any, drop) 92 | } 93 | -------------------------------------------------------------------------------- /kernel/src/cap/task.rs: -------------------------------------------------------------------------------- 1 | use common::*; 2 | use core::iter::Iterator; 3 | use util::{RwLock, Mutex}; 4 | use util::managed_arc::{ManagedArc, ManagedArcAny, ManagedWeakPool3Arc}; 5 | use arch::{TaskRuntime, Exception}; 6 | 7 | use super::{UntypedDescriptor, TopPageTableCap, CPoolCap, TaskBufferPageCap, ChannelCap}; 8 | 9 | /// Switch to an idle task that runs in kernel-mode. This is used when 10 | /// no other tasks is runnable. Like normal context switching, this 11 | /// returns only when exceptions (interrupts) happen. 12 | pub fn idle() -> Exception { 13 | #[naked] 14 | unsafe fn idle_task() -> ! { 15 | asm!("hlt"); 16 | ::core::intrinsics::unreachable(); 17 | } 18 | 19 | let mut task_runtime = TaskRuntime::default(); 20 | task_runtime.set_instruction_pointer(VAddr::from(idle_task as *const () as u64)); 21 | 22 | unsafe { 23 | task_runtime.switch_to(false) 24 | } 25 | } 26 | 27 | /// Represent a task status. 28 | #[derive(Debug, Clone)] 29 | pub enum TaskStatus { 30 | Active, 31 | ChannelWait(ChannelCap), 32 | Inactive, 33 | } 34 | 35 | /// Task descriptor. 36 | #[derive(Debug)] 37 | pub struct TaskDescriptor { 38 | weak_pool: ManagedWeakPool3Arc, 39 | runtime: TaskRuntime, 40 | next: Option, 41 | next_task: Option, 42 | status: TaskStatus 43 | } 44 | /// Task capability. Reference-counted smart pointer to task 45 | /// descriptor. 46 | /// 47 | /// Tasks represents isolated processes running. 48 | pub type TaskCap = ManagedArc>; 49 | 50 | impl TaskCap { 51 | /// Create a task capability from an untyped capability. 52 | pub fn retype_from(untyped: &mut UntypedDescriptor) -> Self { 53 | let mut arc: Option = None; 54 | 55 | let weak_pool = unsafe { ManagedWeakPool3Arc::create( 56 | untyped.allocate(ManagedWeakPool3Arc::inner_length(), 57 | ManagedWeakPool3Arc::inner_alignment())) }; 58 | 59 | unsafe { untyped.derive(Self::inner_length(), Self::inner_alignment(), |paddr, next_child| { 60 | arc = Some( 61 | Self::new(paddr, RwLock::new(TaskDescriptor { 62 | weak_pool: weak_pool, 63 | runtime: TaskRuntime::default(), 64 | next: next_child, 65 | next_task: None, 66 | status: TaskStatus::Inactive, 67 | })) 68 | ); 69 | 70 | arc.clone().unwrap().into() 71 | }) }; 72 | 73 | register_task(arc.clone().unwrap()); 74 | 75 | arc.unwrap() 76 | } 77 | } 78 | 79 | impl TaskDescriptor { 80 | /// Set the task's instruction pointer. 81 | pub fn set_instruction_pointer(&mut self, instruction_pointer: VAddr) { 82 | self.runtime.set_instruction_pointer(instruction_pointer) 83 | } 84 | 85 | /// Set the task's stack pointer. 86 | pub fn set_stack_pointer(&mut self, stack_pointer: VAddr) { 87 | self.runtime.set_stack_pointer(stack_pointer) 88 | } 89 | 90 | /// Set the task's root capability pool. 91 | pub fn downgrade_cpool(&self, cpool: &CPoolCap) { 92 | self.weak_pool.read().downgrade_at(cpool, 0) 93 | } 94 | 95 | /// Read from the task's root capability pool. 96 | pub fn upgrade_cpool(&self) -> Option { 97 | self.weak_pool.read().upgrade(0) 98 | } 99 | 100 | /// Set the task's top page table. 101 | pub fn downgrade_top_page_table(&self, pml4: &TopPageTableCap) { 102 | self.weak_pool.read().downgrade_at(pml4, 1) 103 | } 104 | 105 | /// Read from the task's top page table. 106 | pub fn upgrade_top_page_table(&self) -> Option { 107 | self.weak_pool.read().upgrade(1) 108 | } 109 | 110 | /// Set the task's buffer. 111 | pub fn downgrade_buffer(&self, buffer: &TaskBufferPageCap) { 112 | self.weak_pool.read().downgrade_at(buffer, 2) 113 | } 114 | 115 | /// Read from the task's buffer. 116 | pub fn upgrade_buffer(&self) -> Option { 117 | self.weak_pool.read().upgrade(2) 118 | } 119 | 120 | /// Current task status. 121 | pub fn status(&self) -> TaskStatus { 122 | self.status.clone() 123 | } 124 | 125 | /// Set the current task status. 126 | pub fn set_status(&mut self, status: TaskStatus) { 127 | self.status = status; 128 | } 129 | 130 | /// Switch to the task. The function is returned when exception 131 | /// happens. 132 | pub fn switch_to(&mut self) -> Exception { 133 | if let Some(pml4) = self.upgrade_top_page_table() { 134 | pml4.write().switch_to(); 135 | } 136 | unsafe { self.runtime.switch_to(true) } 137 | } 138 | } 139 | 140 | /// The first task initialized by the kernel. 141 | static FIRST_TASK: Mutex> = Mutex::new(None); 142 | 143 | /// Register a new task. Using `FIRST_TASK` static, this forms a 144 | /// linked-list that allows an iterator to iterate over all created 145 | /// tasks. 146 | fn register_task(cap: TaskCap) { 147 | let mut first_task = FIRST_TASK.lock(); 148 | if first_task.is_none() { 149 | *first_task = Some(cap); 150 | } else { 151 | let mut first = first_task.as_mut().unwrap().write(); 152 | let mut second = cap.write(); 153 | let third_task = first.next_task.take(); 154 | 155 | second.next_task = third_task; 156 | first.next_task = Some(cap.clone()); 157 | } 158 | } 159 | 160 | /// A task iterator. 161 | pub struct TaskIterator { 162 | next: Option, 163 | } 164 | 165 | impl Iterator for TaskIterator { 166 | type Item = TaskCap; 167 | 168 | fn next(&mut self) -> Option { 169 | if let Some(current) = self.next.clone() { 170 | { 171 | let current_task = current.read(); 172 | self.next = current_task.next_task.clone(); 173 | } 174 | return Some(current); 175 | } else { 176 | None 177 | } 178 | } 179 | } 180 | 181 | /// Return a task iterator using `FIRST_TASK`. 182 | pub fn task_iter() -> TaskIterator { 183 | TaskIterator { 184 | next: FIRST_TASK.lock().clone(), 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /kernel/src/cap/untyped.rs: -------------------------------------------------------------------------------- 1 | use common::*; 2 | use util::{RwLock, align_up}; 3 | use util::managed_arc::{ManagedArc, ManagedArcAny}; 4 | 5 | /// Untyped descriptor. 6 | #[derive(Debug)] 7 | pub struct UntypedDescriptor { 8 | start_paddr: PAddr, 9 | length: usize, 10 | watermark: PAddr, 11 | first_child: Option 12 | } 13 | /// Untyped capability. Reference-counted smart pointer to untyped 14 | /// descriptor. 15 | /// 16 | /// Untyped capability represents free memory that can be retyped to 17 | /// different useful capabilities. 18 | pub type UntypedCap = ManagedArc>; 19 | 20 | impl UntypedCap { 21 | /// Bootstrap an untyped capability using a memory region information. 22 | /// 23 | /// # Safety 24 | /// 25 | /// Can only be used for free memory regions returned from 26 | /// `InitInfo`. 27 | pub unsafe fn bootstrap(start_paddr: PAddr, length: usize) -> Self { 28 | let des_paddr = align_up(start_paddr, UntypedCap::inner_alignment()); 29 | assert!(des_paddr + UntypedCap::inner_length() <= start_paddr + length); 30 | 31 | log!("des_paddr: {:?}", des_paddr); 32 | 33 | Self::new(des_paddr, RwLock::new(UntypedDescriptor { 34 | start_paddr: start_paddr, 35 | length: length, 36 | watermark: des_paddr + UntypedCap::inner_length(), 37 | first_child: None, 38 | })) 39 | } 40 | } 41 | 42 | impl UntypedDescriptor { 43 | /// Length of the untyped region. 44 | pub fn length(&self) -> usize { 45 | self.length 46 | } 47 | 48 | /// Start physical address of the untyped region. 49 | pub fn start_paddr(&self) -> PAddr { 50 | self.start_paddr 51 | } 52 | 53 | /// Allocate a memory region using the given length and 54 | /// alignment. Shift the watermark of the current descriptor 55 | /// passing over the allocated region. 56 | pub unsafe fn allocate(&mut self, length: usize, alignment: usize) -> PAddr { 57 | let paddr = align_up(self.watermark, alignment); 58 | assert!(paddr + length <= self.start_paddr + self.length); 59 | 60 | self.watermark = paddr + length; 61 | paddr 62 | } 63 | 64 | /// Derive and allocate a memory region to a capability that 65 | /// requires memory region. 66 | pub unsafe fn derive(&mut self, length: usize, alignment: usize, f: F) where F: FnOnce(PAddr, Option) -> ManagedArcAny { 67 | let paddr = self.allocate(length, alignment); 68 | self.first_child = Some(f(paddr, self.first_child.take())); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /kernel/src/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub use arch::{VAddr, PAddr}; 2 | pub use abi::{CAddr}; 3 | 4 | /// Represents a memory region with a start physical address and a 5 | /// length. 6 | #[derive(Debug, Copy, Clone)] 7 | pub struct MemoryRegion { 8 | start_paddr: PAddr, 9 | length: usize 10 | } 11 | 12 | impl MemoryRegion { 13 | /// Start address of the memory region. 14 | pub fn start_paddr(&self) -> PAddr { 15 | self.start_paddr 16 | } 17 | 18 | /// Length of the memory region. 19 | pub fn length(&self) -> usize { 20 | self.length 21 | } 22 | 23 | /// End address of the memory region. 24 | pub fn end_paddr(&self) -> PAddr { 25 | self.start_paddr + (self.length - 1) 26 | } 27 | 28 | /// Modify the current memory region so that it skip up to the 29 | /// argument `region`. 30 | pub fn skip_up(&mut self, region: &MemoryRegion) -> bool { 31 | if self.start_paddr() <= region.start_paddr() && 32 | self.end_paddr() >= region.end_paddr() 33 | { 34 | self.move_up(region.start_paddr() + region.length()); 35 | 36 | true 37 | } else { 38 | false 39 | } 40 | } 41 | 42 | /// Modify the current memory region so that it move to the 43 | /// beginning of `npaddr`. 44 | pub fn move_up(&mut self, npaddr: PAddr) { 45 | assert!(npaddr >= self.start_paddr); 46 | assert!(self.start_paddr + self.length > npaddr); 47 | let nlength = self.start_paddr.into(): usize + self.length - npaddr.into(): usize; 48 | self.length = nlength; 49 | self.start_paddr = npaddr; 50 | } 51 | 52 | /// Create a new memory region using `start_paddr` and `length`. 53 | pub fn new(start_paddr: PAddr, length: usize) -> MemoryRegion { 54 | MemoryRegion { 55 | start_paddr: start_paddr, 56 | length: length 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /kernel/src/common/traits.rs: -------------------------------------------------------------------------------- 1 | use cap::{CPoolHalf, UntypedHalf, PageHalf}; 2 | use super::*; 3 | 4 | use core::mem::{size_of}; 5 | use core::slice; 6 | use core::ptr::{Unique}; 7 | use core::ops::{Add, AddAssign}; 8 | 9 | pub trait ArchTrait where Self: Sized { 10 | type VAddr: Copy + Clone + Eq + Ord + PartialEq + PartialOrd + 11 | From + Into + From + Into + 12 | From + Into + Add + 13 | AddAssign; 14 | type PAddr: Copy + Clone + Eq + Ord + PartialEq + PartialOrd + 15 | From + Into + From + Into + 16 | From + Into + Add + 17 | AddAssign; 18 | type InitInfo: InitInfoTrait; 19 | type InterruptInfo: InterruptInfoTrait; 20 | type TopPageTableHalf: TopPageTableHalfTrait; 21 | type PageHalf: PageHalfTrait; 22 | type ArchSpecificCapability; 23 | 24 | // This may cause data race, so it is unsafe. 25 | unsafe fn with_object_vaddr Return>(paddr: PAddr, size: usize, f: F) 26 | -> Return; 27 | 28 | unsafe fn with_object_unique) -> Return>(paddr: PAddr, f: F) 29 | -> Return { 30 | let size = size_of::(); 31 | Self::with_object_vaddr(paddr, size, |vaddr| { 32 | let unique = unsafe { 33 | Unique::new(vaddr.as_usize() as *mut T) }; 34 | f(unique) 35 | }) 36 | } 37 | 38 | unsafe fn with_slice Return>(paddr: PAddr, size: usize, f: F) 39 | -> Return { 40 | let allsize = size * (size_of::()); 41 | Self::with_object_vaddr(paddr, allsize, |vaddr| { 42 | let slice = unsafe { 43 | slice::from_raw_parts::(vaddr.as_usize() as *const _, allsize) }; 44 | f(slice) 45 | }) 46 | } 47 | 48 | unsafe fn with_slice_mut Return>(paddr: PAddr, size: usize, f: F) 49 | -> Return { 50 | let allsize = size * (size_of::()); 51 | Self::with_object_vaddr(paddr, allsize, |vaddr| { 52 | let mut slice = unsafe { 53 | slice::from_raw_parts_mut::(vaddr.as_usize() as *mut _, allsize) }; 54 | f(slice) 55 | }) 56 | } 57 | 58 | unsafe fn with_object Return>(paddr: PAddr, f: F) -> Return { 59 | Self::with_object_unique(paddr, |unique| { 60 | f(unsafe { unique.get() }) 61 | }) 62 | } 63 | 64 | unsafe fn with_object_mut Return>(paddr: PAddr, f: F) -> Return { 65 | Self::with_object_unique(paddr, |mut unique| { 66 | f(unsafe { unique.get_mut() }) 67 | }) 68 | } 69 | 70 | fn enable_interrupt(); 71 | fn disable_interrupt(); 72 | 73 | fn set_interrupt_handler(handler: fn(info: Self::InterruptInfo)); 74 | unsafe fn switch_to_user_mode(code_vaddr: VAddr, stack_vaddr: VAddr); 75 | } 76 | 77 | pub trait InitInfoTrait { 78 | fn free_regions(&self) -> Iterator; 79 | fn kernel_region(&self) -> MemoryRegion; 80 | fn rinit_region(&self) -> MemoryRegion; 81 | } 82 | 83 | pub trait InterruptInfoTrait { 84 | 85 | } 86 | 87 | pub trait TopPageTableHalfTrait { 88 | fn new(untyped: &mut UntypedHalf) -> Self; 89 | 90 | // This takes untyped, and thus should only be called once in kmain. 91 | fn map(&mut self, vaddr: VAddr, page: &PageHalf, untyped: &mut UntypedHalf, cpool: &mut CPoolHalf); 92 | 93 | // Unsafe due to memory distortion. 94 | unsafe fn switch_to(&self); 95 | 96 | const fn length() -> usize; 97 | } 98 | 99 | pub trait PageHalfTrait { 100 | fn new(untyped: &mut UntypedHalf) -> Self; 101 | const fn length() -> usize; 102 | } 103 | -------------------------------------------------------------------------------- /kernel/src/elf/loader.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | use core::mem::{size_of}; 3 | use core::slice; 4 | use core::str; 5 | 6 | use super::{FileHeader, ProgramHeader, SectionHeader, Symbol, StrOffset}; 7 | 8 | /// Abstract representation of a loadable ELF binary. 9 | pub struct ElfBinary<'s> { 10 | name: &'s str, 11 | region: &'s [u8], 12 | header: &'s FileHeader, 13 | } 14 | 15 | impl<'s> fmt::Debug for ElfBinary<'s> { 16 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 17 | write!(f, "{} {}", self.name, self.header) 18 | } 19 | } 20 | 21 | // T must be a POD for this to be safe 22 | unsafe fn slice_pod(region: &[u8], offset: usize, count: usize) -> &[T] { 23 | assert!(region.len() - offset >= count * size_of::()); 24 | slice::from_raw_parts(region[offset..].as_ptr() as *const T, count) 25 | } 26 | 27 | #[allow(dead_code)] 28 | impl<'s> ElfBinary<'s> { 29 | /// Create a new ElfBinary. 30 | /// Makes sure that the provided region has valid ELF magic byte sequence 31 | /// and is big enough to contain at least the ELF file header 32 | /// otherwise it will return None. 33 | pub fn new(name: &'s str, region: &'s [u8]) -> Option> { 34 | use super::{ELF_MAGIC}; 35 | 36 | if region.len() >= size_of::() && region.starts_with(ELF_MAGIC) { 37 | let header: &FileHeader = unsafe { &slice_pod(region, 0, 1)[0] }; 38 | Some(ElfBinary { name: name, region: region, header: header }) 39 | } else { 40 | None 41 | } 42 | } 43 | 44 | pub fn file_header(&self) -> &'s FileHeader { 45 | self.header 46 | } 47 | 48 | /// Create a slice of the program headers. 49 | pub fn program_headers(&self) -> &'s [ProgramHeader] { 50 | let correct_header_size = self.header.phentsize as usize == size_of::(); 51 | let pheader_region_size = self.header.phoff as usize + self.header.phnum as usize * self.header.phentsize as usize; 52 | let big_enough_region = self.region.len() >= pheader_region_size; 53 | 54 | if self.header.phoff == 0 || !correct_header_size || !big_enough_region { 55 | return &[]; 56 | } 57 | 58 | unsafe { 59 | slice_pod(self.region, self.header.phoff as usize, self.header.phnum as usize) 60 | } 61 | } 62 | 63 | // Get the string at offset str_offset in the string table strtab 64 | fn strtab_str(&self, strtab: &'s SectionHeader, str_offset: StrOffset) -> &'s str { 65 | use super::{SHT_STRTAB}; 66 | 67 | assert!(strtab.shtype == SHT_STRTAB); 68 | let data = self.section_data(strtab); 69 | let offset = str_offset.0 as usize; 70 | let mut end = offset; 71 | while data[end] != 0 { 72 | end += 1; 73 | } 74 | str::from_utf8(&data[offset..end]).unwrap() 75 | } 76 | 77 | // Get the name of the section 78 | pub fn symbol_name(&self, symbol: &'s Symbol) -> &'s str { 79 | use super::{SHT_STRTAB}; 80 | 81 | let strtab = self.section_headers().iter().find(|s| s.shtype == SHT_STRTAB && self.section_name(s) == ".strtab").unwrap(); 82 | self.strtab_str(strtab, symbol.name) 83 | } 84 | 85 | // Get the data of the section 86 | pub fn section_data(&self, section: &'s SectionHeader) -> &'s [u8] { 87 | &self.region[(section.offset as usize)..(section.offset as usize + section.size as usize)] 88 | } 89 | 90 | // Get the name of the section 91 | pub fn section_name(&self, section: &'s SectionHeader) -> &'s str { 92 | self.strtab_str(&self.section_headers()[self.header.shstrndx as usize], section.name) 93 | } 94 | 95 | // Get the symbols of the section 96 | fn section_symbols(&self, section: &'s SectionHeader) -> &'s [Symbol] { 97 | use super::{SHT_SYMTAB}; 98 | 99 | assert!(section.shtype == SHT_SYMTAB); 100 | unsafe { 101 | slice_pod(self.section_data(section), 0, section.size as usize / size_of::()) 102 | } 103 | } 104 | 105 | // Enumerate all the symbols in the file 106 | pub fn for_each_symbol(&self, mut func: F) { 107 | use super::{SHT_SYMTAB}; 108 | 109 | for sym in self.section_headers().iter().filter(|s| s.shtype == SHT_SYMTAB).flat_map(|s| self.section_symbols(s).iter()) { 110 | func(sym); 111 | } 112 | } 113 | 114 | /// Create a slice of the section headers. 115 | pub fn section_headers(&self) -> &'s [SectionHeader] { 116 | let correct_header_size = self.header.shentsize as usize == size_of::(); 117 | let sheader_region_size = self.header.shoff as usize + self.header.shnum as usize * self.header.shentsize as usize; 118 | let big_enough_region = self.region.len() >= sheader_region_size; 119 | 120 | if self.header.shoff == 0 || !correct_header_size || !big_enough_region { 121 | return &[]; 122 | } 123 | 124 | unsafe { 125 | slice_pod(self.region, self.header.shoff as usize, self.header.shnum as usize) 126 | } 127 | } 128 | 129 | /// Can we load the binary on our platform? 130 | // TODO Move this to platform specific. 131 | fn can_load(&self) -> bool { 132 | use super::{ELFCLASS64, EV_CURRENT, ELFDATA2LSB, ELFOSABI_SYSV, ELFOSABI_LINUX, ET_EXEC, ET_DYN, EM_X86_64}; 133 | 134 | let correct_class = {self.header.ident.class} == ELFCLASS64; 135 | let correct_elfversion = {self.header.ident.version} == EV_CURRENT; 136 | let correct_data = {self.header.ident.data} == ELFDATA2LSB; 137 | let correct_osabi = {self.header.ident.osabi} == ELFOSABI_SYSV || {self.header.ident.osabi} == ELFOSABI_LINUX; 138 | let correct_type = {self.header.elftype} == ET_EXEC || {self.header.elftype} == ET_DYN; 139 | let correct_machine = {self.header.machine} == EM_X86_64; 140 | 141 | correct_class && correct_data && correct_elfversion && correct_machine && correct_osabi && correct_type 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /kernel/src/logging.rs: -------------------------------------------------------------------------------- 1 | use core::sync::atomic; 2 | use core::fmt; 3 | 4 | /// A formatter object 5 | pub struct Writer(bool); 6 | 7 | /// A primitive lock for the logging output 8 | /// 9 | /// This is not really a lock. Since there is no threading at the moment, all 10 | /// it does is prevent writing when a collision would occur. 11 | static LOGGING_LOCK: atomic::AtomicBool = atomic::ATOMIC_BOOL_INIT; 12 | 13 | impl Writer 14 | { 15 | /// Obtain a logger for the specified module 16 | pub fn get(module: &str) -> Writer { 17 | // This "acquires" the lock (actually just disables output if paralel writes are attempted 18 | let mut ret = Writer( ! LOGGING_LOCK.swap(true, atomic::Ordering::Acquire) ); 19 | 20 | // Print the module name before returning (prefixes all messages) 21 | { 22 | use core::fmt::Write; 23 | let _ = write!(&mut ret, "[{}] ", module); 24 | } 25 | 26 | ret 27 | } 28 | } 29 | 30 | impl ::core::ops::Drop for Writer 31 | { 32 | fn drop(&mut self) 33 | { 34 | // Write a terminating newline before releasing the lock 35 | { 36 | use core::fmt::Write; 37 | let _ = write!(self, "\n"); 38 | } 39 | // On drop, "release" the lock 40 | if self.0 { 41 | LOGGING_LOCK.store(false, atomic::Ordering::Release); 42 | } 43 | } 44 | } 45 | 46 | impl fmt::Write for Writer 47 | { 48 | fn write_str(&mut self, s: &str) -> fmt::Result 49 | { 50 | // If the lock is owned by this instance, then we can safely write to the output 51 | if self.0 52 | { 53 | unsafe { 54 | ::arch::debug::puts( s ); 55 | } 56 | } 57 | Ok( () ) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /kernel/src/macros.rs: -------------------------------------------------------------------------------- 1 | /// A very primitive logging macro 2 | /// 3 | /// Obtaines a logger instance (locking the log channel) with the current module name passed 4 | /// then passes the standard format! arguments to it 5 | macro_rules! log{ 6 | ( $($arg:tt)* ) => ({ 7 | // Import the Writer trait (required by write!) 8 | use core::fmt::Write; 9 | let _ = write!(&mut ::logging::Writer::get(module_path!()), $($arg)*); 10 | }) 11 | } 12 | -------------------------------------------------------------------------------- /kernel/src/unwind.rs: -------------------------------------------------------------------------------- 1 | #[lang="panic_fmt"] 2 | #[no_mangle] 3 | pub extern "C" fn rust_begin_unwind(args: ::core::fmt::Arguments, file: &str, line: usize) -> ! 4 | { 5 | // 'args' will print to the formatted string passed to panic! 6 | log!("file='{}', line={} :: {}", file, line, args); 7 | loop {} 8 | } 9 | 10 | #[allow(non_camel_case_types)] 11 | #[repr(C)] 12 | #[derive(Clone,Copy)] 13 | pub enum _Unwind_Reason_Code 14 | { 15 | _URC_NO_REASON = 0, 16 | _URC_FOREIGN_EXCEPTION_CAUGHT = 1, 17 | _URC_FATAL_PHASE2_ERROR = 2, 18 | _URC_FATAL_PHASE1_ERROR = 3, 19 | _URC_NORMAL_STOP = 4, 20 | _URC_END_OF_STACK = 5, 21 | _URC_HANDLER_FOUND = 6, 22 | _URC_INSTALL_CONTEXT = 7, 23 | _URC_CONTINUE_UNWIND = 8, 24 | } 25 | 26 | #[allow(non_camel_case_types)] 27 | #[derive(Clone,Copy)] 28 | pub struct _Unwind_Context; 29 | 30 | #[allow(non_camel_case_types)] 31 | pub type _Unwind_Action = u32; 32 | static _UA_SEARCH_PHASE: _Unwind_Action = 1; 33 | 34 | #[allow(non_camel_case_types)] 35 | #[repr(C)] 36 | #[derive(Clone,Copy)] 37 | pub struct _Unwind_Exception 38 | { 39 | exception_class: u64, 40 | exception_cleanup: fn(_Unwind_Reason_Code,*const _Unwind_Exception), 41 | private: [u64; 2], 42 | } 43 | 44 | #[lang="eh_personality"] 45 | #[no_mangle] 46 | pub fn rust_eh_personality( 47 | _version: isize, _actions: _Unwind_Action, _exception_class: u64, 48 | _exception_object: &_Unwind_Exception, _context: &_Unwind_Context 49 | ) -> _Unwind_Reason_Code 50 | { 51 | loop{} 52 | } 53 | 54 | #[no_mangle] 55 | #[allow(non_snake_case)] 56 | pub fn _Unwind_Resume() 57 | { 58 | loop{} 59 | } 60 | -------------------------------------------------------------------------------- /kernel/src/util/field_offset.rs: -------------------------------------------------------------------------------- 1 | use core::marker::PhantomData; 2 | use core::mem; 3 | use core::ops::Add; 4 | use core::fmt; 5 | 6 | /// Represents a pointer to a field of type `U` within the type `T` 7 | pub struct FieldOffset( 8 | /// Offset in bytes of the field within the struct 9 | usize, 10 | /// A pointer-to-member can be thought of as a function from 11 | /// `&T` to `&U` with matching lifetimes 12 | PhantomData Fn(&'a T) -> &'a U> 13 | ); 14 | 15 | #[allow(dead_code)] 16 | impl FieldOffset { 17 | /// Construct a field offset via a lambda which returns a reference 18 | /// to the field in question. 19 | /// 20 | /// The lambda *must not* access the value passed in. 21 | pub unsafe fn new FnOnce(&'a T) -> &'a U>(f: F) -> Self { 22 | // Construct a "fake" T. It's not valid, but the lambda shouldn't 23 | // actually access it (which is why this is unsafe) 24 | let x = mem::zeroed(); 25 | let offset = { 26 | let x = &x; 27 | // Pass a reference to the zeroed T to the lambda 28 | // The lambda gives us back a reference to (what we hope is) 29 | // a field of T, of type U 30 | let y = f(x); 31 | // Compute the offset of the field via the difference between the 32 | // references `x` and `y`. Overflow is an error: in debug builds it 33 | // will be caught here, in release it will wrap around and be caught 34 | // on the next line. 35 | (y as *const U as usize) - (x as *const T as usize) 36 | }; 37 | // Don't run destructor on "fake" T 38 | mem::forget(x); 39 | // Sanity check: ensure that the field offset plus the field size 40 | // is no greater than the size of the containing struct. This is 41 | // not sufficient to make the function *safe*, but it does catch 42 | // obvious errors like returning a reference to a boxed value, 43 | // which is owned by `T` and so has the correct lifetime, but is not 44 | // actually a field. 45 | assert!(offset + mem::size_of::() <= mem::size_of::()); 46 | // Construct an instance using the offset 47 | Self::new_from_offset(offset) 48 | } 49 | /// Construct a field offset directly from a byte offset. 50 | pub unsafe fn new_from_offset(offset: usize) -> Self { 51 | FieldOffset(offset, PhantomData) 52 | } 53 | // Methods for applying the pointer to member 54 | /// Apply the field offset to a native pointer. 55 | pub fn apply_ptr<'a>(&self, x: *const T) -> *const U { 56 | ((x as usize) + self.0) as *const U 57 | } 58 | /// Apply the field offset to a native mutable pointer. 59 | pub fn apply_ptr_mut<'a>(&self, x: *mut T) -> *mut U { 60 | ((x as usize) + self.0) as *mut U 61 | } 62 | /// Apply the field offset to a reference. 63 | pub fn apply<'a>(&self, x: &'a T) -> &'a U { 64 | unsafe { &*self.apply_ptr(x) } 65 | } 66 | /// Apply the field offset to a mutable reference. 67 | pub fn apply_mut<'a>(&self, x: &'a mut T) -> &'a mut U { 68 | unsafe { &mut *self.apply_ptr_mut(x) } 69 | } 70 | /// Get the raw byte offset for this field offset. 71 | pub fn get_byte_offset(&self) -> usize { 72 | self.0 73 | } 74 | // Methods for unapplying the pointer to member 75 | /// Unapply the field offset to a native pointer. 76 | /// 77 | /// *Warning: very unsafe!* 78 | pub unsafe fn unapply_ptr<'a>(&self, x: *const U) -> *const T { 79 | ((x as usize) - self.0) as *const T 80 | } 81 | /// Unapply the field offset to a native mutable pointer. 82 | /// 83 | /// *Warning: very unsafe!* 84 | pub unsafe fn unapply_ptr_mut<'a>(&self, x: *mut U) -> *mut T { 85 | ((x as usize) - self.0) as *mut T 86 | } 87 | /// Unapply the field offset to a reference. 88 | /// 89 | /// *Warning: very unsafe!* 90 | pub unsafe fn unapply<'a>(&self, x: &'a U) -> &'a T { 91 | &*self.unapply_ptr(x) 92 | } 93 | /// Unapply the field offset to a mutable reference. 94 | /// 95 | /// *Warning: very unsafe!* 96 | pub unsafe fn unapply_mut<'a>(&self, x: &'a mut U) -> &'a mut T { 97 | &mut *self.unapply_ptr_mut(x) 98 | } 99 | } 100 | 101 | /// Allow chaining pointer-to-members. 102 | /// 103 | /// Applying the resulting field offset is equivalent to applying the first 104 | /// field offset, then applying the second field offset. 105 | /// 106 | /// The requirements on the generic type parameters ensure this is a safe operation. 107 | impl Add> for FieldOffset { 108 | type Output = FieldOffset; 109 | 110 | fn add(self, other: FieldOffset) -> FieldOffset { 111 | FieldOffset(self.0 + other.0, PhantomData) 112 | } 113 | } 114 | 115 | /// The debug implementation prints the byte offset of the field in hexadecimal. 116 | impl fmt::Debug for FieldOffset { 117 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 118 | write!(f, "FieldOffset({:#x})", self.0) 119 | } 120 | } 121 | 122 | impl Copy for FieldOffset { } 123 | impl Clone for FieldOffset { 124 | fn clone(&self) -> Self { *self } 125 | } 126 | 127 | /// This macro allows safe construction of a FieldOffset, 128 | /// by generating a known to be valid lambda to pass to the 129 | /// constructor. It takes a type and the identifier of a field 130 | /// within that type as input. 131 | /// 132 | /// Examples: 133 | /// 134 | /// Offset of field `Foo().bar` 135 | /// 136 | /// `offset_of!(Foo => bar)` 137 | /// 138 | /// Offset of nested field `Foo().bar.x` 139 | /// 140 | /// `offset_of!(Foo => bar: Bar => x)` 141 | #[macro_export] 142 | macro_rules! offset_of { 143 | ($t: path => $f: ident) => { 144 | unsafe { ::util::field_offset::FieldOffset::<$t, _>::new(|x| { 145 | let $t { ref $f, .. } = *x; 146 | $f 147 | }) } 148 | }; 149 | ($t: path => $f: ident: $($rest: tt)*) => { 150 | offset_of!($t => $f) + offset_of!($($rest)*) 151 | }; 152 | } 153 | 154 | #[cfg(test)] 155 | mod tests { 156 | // Example structs 157 | #[derive(Debug)] 158 | struct Foo { 159 | a: u32, 160 | b: f64, 161 | c: bool 162 | } 163 | 164 | #[derive(Debug)] 165 | struct Bar { 166 | x: u32, 167 | y: Foo, 168 | } 169 | 170 | #[test] 171 | fn test_simple() { 172 | // Get a pointer to `b` within `Foo` 173 | let foo_b = offset_of!(Foo => b); 174 | 175 | // Construct an example `Foo` 176 | let mut x = Foo { 177 | a: 1, 178 | b: 2.0, 179 | c: false 180 | }; 181 | 182 | // Apply the pointer to get at `b` and read it 183 | { 184 | let y = foo_b.apply(&x); 185 | assert!(*y == 2.0); 186 | } 187 | 188 | // Apply the pointer to get at `b` and mutate it 189 | { 190 | let y = foo_b.apply_mut(&mut x); 191 | *y = 42.0; 192 | } 193 | assert!(x.b == 42.0); 194 | } 195 | 196 | #[test] 197 | fn test_nested() { 198 | // Construct an example `Foo` 199 | let mut x = Bar { 200 | x: 0, 201 | y: Foo { 202 | a: 1, 203 | b: 2.0, 204 | c: false 205 | } 206 | }; 207 | 208 | // Combine the pointer-to-members 209 | let bar_y_b = offset_of!(Bar => y: Foo => b); 210 | 211 | // Apply the pointer to get at `b` and mutate it 212 | { 213 | let y = bar_y_b.apply_mut(&mut x); 214 | *y = 42.0; 215 | } 216 | assert!(x.y.b == 42.0); 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /kernel/src/util/guard.rs: -------------------------------------------------------------------------------- 1 | use super::MemoryObject; 2 | use core::ops::{Deref, DerefMut}; 3 | 4 | /// Read guard using a memory object. 5 | pub struct UniqueReadGuard { 6 | object: MemoryObject 7 | } 8 | 9 | /// Write guard using a memory object. 10 | pub struct UniqueWriteGuard { 11 | object: MemoryObject 12 | } 13 | 14 | // Implementation for UniqueReadGuard 15 | 16 | impl UniqueReadGuard { 17 | /// Create a new read guard from a memory object. 18 | pub const unsafe fn new(object: MemoryObject) -> Self { 19 | UniqueReadGuard:: { 20 | object: object, 21 | } 22 | } 23 | } 24 | 25 | unsafe impl Send for UniqueReadGuard { } 26 | unsafe impl Sync for UniqueReadGuard { } 27 | 28 | impl Deref for UniqueReadGuard { 29 | type Target = T; 30 | fn deref(&self) -> &T { 31 | unsafe { self.object.as_ref() } 32 | } 33 | } 34 | 35 | // Implementation for UniqueWriteGuard 36 | 37 | impl UniqueWriteGuard { 38 | /// Create a new write guard using a memory object. 39 | pub const unsafe fn new(object: MemoryObject) -> Self { 40 | UniqueWriteGuard:: { 41 | object: object, 42 | } 43 | } 44 | } 45 | 46 | unsafe impl Send for UniqueWriteGuard { } 47 | unsafe impl Sync for UniqueWriteGuard { } 48 | 49 | impl Deref for UniqueWriteGuard { 50 | type Target = T; 51 | fn deref(&self) -> &T { 52 | unsafe { self.object.as_ref() } 53 | } 54 | } 55 | 56 | impl DerefMut for UniqueWriteGuard { 57 | fn deref_mut(&mut self) -> &mut T { 58 | unsafe { self.object.as_mut() } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /kernel/src/util/managed_arc/mod.rs: -------------------------------------------------------------------------------- 1 | use core::any::{Any, TypeId}; 2 | use core::marker::PhantomData; 3 | use core::convert::{From, Into}; 4 | use core::fmt; 5 | use core::mem; 6 | use core::ptr; 7 | use common::*; 8 | use spin::Mutex; 9 | use util::MemoryObject; 10 | 11 | /// Read/write lock for ManagedArc. 12 | mod rwlock; 13 | /// Weak pool storing weak pointers for ManagedArc. 14 | mod weak_pool; 15 | 16 | pub use self::rwlock::{ManagedArcRwLockReadGuard, ManagedArcRwLockWriteGuard}; 17 | pub use self::weak_pool::{ManagedWeakPool1Arc, ManagedWeakPool3Arc, ManagedWeakPool256Arc}; 18 | 19 | /// A weak node (entry of a weak pool). 20 | #[derive(Debug)] 21 | struct ManagedWeakNode { 22 | ptr: PAddr, 23 | strong_type_id: TypeId, 24 | prev: Option, 25 | next: Option 26 | } 27 | 28 | /// A weak address. 29 | #[derive(Copy, Clone, Debug)] 30 | struct ManagedWeakAddr { 31 | inner_addr: PAddr, 32 | inner_type_id: TypeId, 33 | offset: usize, 34 | } 35 | 36 | /// Inner of an Arc, containing strong pointers and weak pointers 37 | /// information. Wrap the actual data. 38 | struct ManagedArcInner { 39 | lead: Mutex, 40 | // TODO: Implement weak pool lock. 41 | first_weak: Mutex>, 42 | data: T 43 | } 44 | 45 | impl Drop for ManagedArcInner { 46 | fn drop(&mut self) { 47 | let lead = self.lead.lock(); 48 | assert!(*lead == 0); 49 | 50 | // TODO drop all weak pointers 51 | panic!(); 52 | } 53 | } 54 | 55 | /// A managed Arc, pointing to a `ManagedArcInner`. 56 | pub struct ManagedArc { 57 | ptr: PAddr, 58 | _marker: PhantomData, 59 | } 60 | 61 | impl fmt::Debug for ManagedArc { 62 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 63 | write!(f, "{}(0x{:x})", unsafe { ::core::intrinsics::type_name::() }, self.ptr) 64 | } 65 | } 66 | 67 | impl Drop for ManagedArc { 68 | fn drop(&mut self) { 69 | let mut inner_obj = self.inner_object(); 70 | let inner = unsafe { inner_obj.as_mut() }; 71 | let mut lead = inner.lead.lock(); 72 | *lead -= 1; 73 | } 74 | } 75 | 76 | impl Clone for ManagedArc { 77 | fn clone(&self) -> Self { 78 | let mut inner_obj = self.inner_object(); 79 | let inner = unsafe { inner_obj.as_mut() }; 80 | let mut lead = inner.lead.lock(); 81 | *lead += 1; 82 | 83 | ManagedArc { 84 | ptr: self.ptr, 85 | _marker: PhantomData, 86 | } 87 | } 88 | } 89 | 90 | /// Like `ManagedArc`, but use `TypeId` to represent its type. 91 | #[derive(Debug)] 92 | pub struct ManagedArcAny { 93 | ptr: PAddr, 94 | type_id: TypeId 95 | } 96 | 97 | impl ManagedArcAny { 98 | /// Check whether this Arc is of given type. 99 | pub fn is(&self) -> bool 100 | where ManagedArc: Any { 101 | self.type_id == TypeId::of::() 102 | } 103 | } 104 | 105 | impl From for ManagedArc { 106 | fn from(any: ManagedArcAny) -> Self { 107 | assert!(any.type_id == TypeId::of::>()); 108 | let ptr = any.ptr; 109 | mem::forget(any); 110 | ManagedArc { 111 | ptr: ptr, 112 | _marker: PhantomData, 113 | } 114 | } 115 | } 116 | 117 | impl Into for ManagedArc { 118 | fn into(self) -> ManagedArcAny { 119 | let ptr = self.ptr; 120 | mem::forget(self); 121 | ManagedArcAny { 122 | ptr: ptr, 123 | type_id: TypeId::of::>(), 124 | } 125 | } 126 | } 127 | 128 | impl Drop for ManagedArcAny { 129 | fn drop(&mut self) { 130 | log!("Error: trying to drop a ManagedArcAny."); 131 | panic!(); 132 | } 133 | } 134 | 135 | impl ManagedArc { 136 | /// Get the ManagedArcInner length. 137 | pub fn inner_length() -> usize { 138 | mem::size_of::>() 139 | } 140 | 141 | /// Get the ManagedArcInner alginment. 142 | pub fn inner_alignment() -> usize { 143 | mem::align_of::>() 144 | } 145 | 146 | /// Create a managed Arc from a physical address. 147 | pub unsafe fn from_ptr(ptr: PAddr) -> Self { 148 | let arc = ManagedArc { ptr: ptr, _marker: PhantomData }; 149 | 150 | let inner_obj = arc.inner_object(); 151 | let inner = inner_obj.as_ref(); 152 | let mut lead = inner.lead.lock(); 153 | *lead += 1; 154 | 155 | arc 156 | } 157 | 158 | /// Create a managed Arc using the given data. 159 | pub unsafe fn new(ptr: PAddr, data: T) -> Self { 160 | let arc = ManagedArc { ptr: ptr, _marker: PhantomData }; 161 | let mut inner = arc.inner_object(); 162 | ptr::write(inner.as_mut(), ManagedArcInner { 163 | lead: Mutex::new(1), 164 | first_weak: Mutex::new(None), 165 | data: data, 166 | }); 167 | 168 | arc 169 | } 170 | 171 | /// Read the inner object, wrapped in a memory object. 172 | fn inner_object(&self) -> MemoryObject> { 173 | unsafe { MemoryObject::>::new(self.ptr) } 174 | } 175 | 176 | /// Get the strong pointers count. 177 | pub fn lead_count(&self) -> usize { 178 | let inner = self.inner_object(); 179 | let lead = unsafe { inner.as_ref().lead.lock() }; 180 | *lead 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /kernel/src/util/managed_arc/rwlock.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Deref, DerefMut}; 2 | use spin::{RwLock, RwLockReadGuard, RwLockWriteGuard}; 3 | use util::MemoryObject; 4 | 5 | use super::{ManagedArcInner, ManagedArc}; 6 | 7 | /// A read guard for ManagedArc. 8 | pub struct ManagedArcRwLockReadGuard<'a, T: 'a> { 9 | lock: RwLockReadGuard<'a, T>, 10 | #[allow(dead_code)] 11 | object: MemoryObject>>, 12 | } 13 | 14 | impl<'a, T: 'a> Deref for ManagedArcRwLockReadGuard<'a, T> { 15 | type Target = T; 16 | fn deref(&self) -> &T { 17 | self.lock.deref() 18 | } 19 | } 20 | 21 | /// A write guard for ManagedArc. 22 | pub struct ManagedArcRwLockWriteGuard<'a, T: 'a> { 23 | lock: RwLockWriteGuard<'a, T>, 24 | #[allow(dead_code)] 25 | object: MemoryObject>>, 26 | } 27 | 28 | impl<'a, T: 'a> Deref for ManagedArcRwLockWriteGuard<'a, T> { 29 | type Target = T; 30 | fn deref(&self) -> &T { 31 | self.lock.deref() 32 | } 33 | } 34 | 35 | impl<'a, T: 'a> DerefMut for ManagedArcRwLockWriteGuard<'a, T> { 36 | fn deref_mut(&mut self) -> &mut T { 37 | self.lock.deref_mut() 38 | } 39 | } 40 | 41 | impl ManagedArc> { 42 | /// Read the value from the ManagedArc. Returns the guard. 43 | pub fn read(&self) -> ManagedArcRwLockReadGuard { 44 | let inner_obj = self.inner_object(); 45 | let inner = unsafe { &*inner_obj.as_ptr() }; 46 | ManagedArcRwLockReadGuard { 47 | lock: inner.data.read(), 48 | object: inner_obj 49 | } 50 | } 51 | 52 | /// Write to the ManagedArc. Returns the guard. 53 | pub fn write(&self) -> ManagedArcRwLockWriteGuard { 54 | let inner_obj = self.inner_object(); 55 | let inner = unsafe { &*inner_obj.as_ptr() }; 56 | ManagedArcRwLockWriteGuard { 57 | lock: inner.data.write(), 58 | object: inner_obj 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /kernel/src/util/managed_arc/weak_pool.rs: -------------------------------------------------------------------------------- 1 | use core::any::{Any, TypeId}; 2 | use core::ops::Deref; 3 | use core::mem; 4 | use core::ptr; 5 | use common::*; 6 | use spin::{Mutex}; 7 | use util::{MemoryObject}; 8 | 9 | use super::{ManagedArc, ManagedArcAny, ManagedArcInner, ManagedWeakAddr, ManagedWeakNode}; 10 | 11 | /// Managed weak pool of size 1. 12 | pub struct ManagedWeakPool1([Mutex>; 1], PAddr); 13 | /// Managed weak pool of size 3. 14 | pub struct ManagedWeakPool3([Mutex>; 3], PAddr); 15 | /// Managed weak pool of size 256. 16 | pub struct ManagedWeakPool256([Mutex>; 256], PAddr); 17 | 18 | /// Managed Arc for weak pool of size 1. 19 | pub type ManagedWeakPool1Arc = ManagedArc; 20 | /// Managed Arc for weak pool of size 3. 21 | pub type ManagedWeakPool3Arc = ManagedArc; 22 | /// Managed Arc for weak pool of size 256. 23 | pub type ManagedWeakPool256Arc = ManagedArc; 24 | 25 | /// Guard for managed weak pool. 26 | pub struct ManagedWeakPoolGuard { 27 | object: MemoryObject> 28 | } 29 | 30 | impl Deref for ManagedWeakPoolGuard { 31 | type Target = T; 32 | fn deref(&self) -> &T { 33 | unsafe { &self.object.as_ref().data } 34 | } 35 | } 36 | 37 | macro_rules! weak_pool { 38 | ( $t:ty ) => ( 39 | impl ManagedArc<$t> { 40 | /// Create a managed weak pool in the given physical address. 41 | pub unsafe fn create(ptr: PAddr) -> Self { 42 | let arc = ManagedArc::new(ptr, mem::uninitialized()); 43 | let mut inner_obj = arc.inner_object(); 44 | let inner: &mut ManagedArcInner<$t> = inner_obj.as_mut(); 45 | 46 | ptr::write(&mut inner.data.1, ptr); 47 | for element in (inner.data: $t).0.iter_mut() { 48 | ptr::write(element, Mutex::new(None)); 49 | } 50 | 51 | arc 52 | } 53 | 54 | /// Read the Arc. Returns the guard. 55 | pub fn read(&self) -> ManagedWeakPoolGuard<$t> { 56 | ManagedWeakPoolGuard { object: self.inner_object() } 57 | } 58 | } 59 | 60 | impl $t { 61 | /// Create a new strong pointer if `index` points to a 62 | /// non-none weak pointer in the weak pool. 63 | pub unsafe fn upgrade_any(&self, index: usize, f: F) -> Option where F: FnOnce(PAddr, TypeId) -> Option { 64 | let upgrading_obj = self.0[index].lock(); 65 | let upgrading_weak = upgrading_obj.as_ref(); 66 | 67 | upgrading_weak.and_then(|weak| { 68 | f(weak.ptr, weak.strong_type_id) 69 | }) 70 | } 71 | 72 | /// Like `upgrade_any`, but create the pointer using the 73 | /// given type. 74 | pub fn upgrade(&self, index: usize) -> Option> 75 | where ManagedArc: Any { 76 | let upgrading_obj = self.0[index].lock(); 77 | let upgrading_weak = upgrading_obj.as_ref(); 78 | 79 | upgrading_weak.and_then(|weak| { 80 | if weak.strong_type_id != TypeId::of::>() { 81 | None 82 | } else { 83 | Some(unsafe { ManagedArc::::from_ptr(weak.ptr) }) 84 | } 85 | }) 86 | } 87 | 88 | /// Downgrade a strong pointer to a weak pointer and store 89 | /// it at `index` in this weak pool. 90 | pub fn downgrade_at(&self, arc: &ManagedArc, index: usize) 91 | where ManagedArc: Any { 92 | 93 | let ptr = self.1; 94 | 95 | let weak_addr = ManagedWeakAddr { 96 | inner_addr: ptr, 97 | offset: index, 98 | inner_type_id: TypeId::of::>() 99 | }; 100 | let mut weak_node = ManagedWeakNode { 101 | ptr: arc.ptr, 102 | strong_type_id: TypeId::of::>(), 103 | prev: None, 104 | next: None 105 | }; 106 | 107 | let mut weak_node_option = self.0[index].lock(); 108 | assert!(weak_node_option.is_none()); 109 | 110 | let arc_inner_obj = arc.inner_object(); 111 | let arc_inner = unsafe { arc_inner_obj.as_ref() }; 112 | 113 | let mut arc_first_weak = arc_inner.first_weak.lock(); 114 | 115 | if arc_first_weak.is_none() { 116 | // ArcInner doesn't have any weak. 117 | *arc_first_weak = Some(weak_addr); 118 | *weak_node_option = Some(weak_node); 119 | 120 | } else { 121 | // ArcInner has weak. Insert the new weak as the first child. 122 | let arc_second_weak_addr = arc_first_weak.take().unwrap(); 123 | set_weak_node(arc_second_weak_addr, |second_weak_node| { 124 | assert!(second_weak_node.is_some()); 125 | 126 | second_weak_node.map(|mut second_weak_node| { 127 | second_weak_node.prev = Some(weak_addr); 128 | second_weak_node 129 | }) 130 | }); 131 | weak_node.next = Some(arc_second_weak_addr); 132 | 133 | *arc_first_weak = Some(weak_addr); 134 | *weak_node_option = Some(weak_node); 135 | } 136 | } 137 | 138 | /// Downgrade a strong pointer to a weak pointer, and then 139 | /// store it in a free slot in this weak pool. 140 | pub fn downgrade_free(&self, arc: &ManagedArc) -> Option 141 | where ManagedArc: Any { 142 | for (i, element) in self.0.iter().enumerate() { 143 | // TODO race conditions 144 | 145 | if { element.lock().is_none() } { 146 | self.downgrade_at(arc, i); 147 | return Some(i); 148 | } 149 | } 150 | None 151 | } 152 | } 153 | ) 154 | } 155 | 156 | weak_pool!(ManagedWeakPool1); 157 | weak_pool!(ManagedWeakPool3); 158 | weak_pool!(ManagedWeakPool256); 159 | 160 | fn set_weak_node(addr: ManagedWeakAddr, f: F) where F: FnOnce(Option) -> Option { 161 | if addr.inner_type_id == TypeId::of::>() { 162 | let inner_obj: MemoryObject> = 163 | unsafe { MemoryObject::new(addr.inner_addr) }; 164 | let inner = unsafe { inner_obj.as_ref() }; 165 | let mut weak_node = inner.data.0[addr.offset].lock(); 166 | *weak_node = f((*weak_node).take()); 167 | } else if addr.inner_type_id == TypeId::of::>() { 168 | let inner_obj: MemoryObject> = 169 | unsafe { MemoryObject::new(addr.inner_addr) }; 170 | let inner = unsafe { inner_obj.as_ref() }; 171 | let mut weak_node = inner.data.0[addr.offset].lock(); 172 | *weak_node = f((*weak_node).take()); 173 | } else if addr.inner_type_id == TypeId::of::>() { 174 | let inner_obj: MemoryObject> = 175 | unsafe { MemoryObject::new(addr.inner_addr) }; 176 | let inner = unsafe { inner_obj.as_ref() }; 177 | let mut weak_node = inner.data.0[addr.offset].lock(); 178 | *weak_node = f((*weak_node).take()); 179 | } else { 180 | panic!(); 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /kernel/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | /// External readonly object helpers. 2 | mod object; 3 | 4 | /// Lock guard helpers. 5 | mod guard; 6 | 7 | /// Streaming iterator 8 | mod streamer; 9 | 10 | /// Managed reference-counted pointers that erases all weak pointers 11 | /// when the last strong pointer goes out. 12 | pub mod managed_arc; 13 | 14 | /// Get the offset of a struct field. 15 | #[macro_use] 16 | pub mod field_offset; 17 | 18 | pub use self::object::{ExternMutex, ExternReadonlyObject, MutexGuard, MemoryObject}; 19 | pub use self::guard::{UniqueReadGuard, UniqueWriteGuard}; 20 | pub use self::streamer::{Streamer}; 21 | pub use spin::{Mutex, RwLock}; 22 | 23 | use common::PAddr; 24 | 25 | /// Align the physical address up using the alignment. 26 | pub fn align_up(paddr: PAddr, alignment: usize) -> PAddr { 27 | let raw = paddr.into(): usize; 28 | let aligned = if raw % alignment == 0 { 29 | raw 30 | } else { 31 | raw + (alignment - (raw % alignment)) 32 | }; 33 | PAddr::from(aligned) 34 | } 35 | 36 | /// Align the physical address down using the alignment. 37 | pub fn align_down(paddr: PAddr, alignment: usize) -> PAddr { 38 | let raw = paddr.into(): usize; 39 | let aligned = if raw % alignment == 0 { 40 | raw 41 | } else { 42 | raw - (raw % alignment) 43 | }; 44 | PAddr::from(aligned) 45 | } 46 | 47 | /// Count blocks needed for the length. 48 | pub fn block_count(length: usize, block_length: usize) -> usize { 49 | if length % block_length == 0 { 50 | length / block_length 51 | } else { 52 | length / block_length + 1 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /kernel/src/util/object.rs: -------------------------------------------------------------------------------- 1 | use common::*; 2 | use core::ops::Deref; 3 | use core::cell::{UnsafeCell}; 4 | use core::marker::{PhantomData}; 5 | 6 | pub use spin::{ExternMutex, MutexGuard}; 7 | pub use arch::{MemoryObject}; 8 | 9 | /// Represents an external readonly object. 10 | pub struct ExternReadonlyObject { 11 | pointer: UnsafeCell>, 12 | paddr: UnsafeCell>, 13 | _marker: PhantomData 14 | } 15 | 16 | impl ExternReadonlyObject { 17 | /// Create a new object that doesn't point to anything. 18 | pub const unsafe fn new() -> Self { 19 | ExternReadonlyObject { 20 | pointer: UnsafeCell::new(None), 21 | paddr: UnsafeCell::new(None), 22 | _marker: PhantomData, 23 | } 24 | } 25 | 26 | /// Bootstrap the pointer using a physical address. 27 | pub unsafe fn bootstrap(&self, ptr: *const T, paddr: PAddr) { 28 | *self.pointer.get() = Some(ptr); 29 | *self.paddr.get() = Some(paddr); 30 | } 31 | 32 | /// Unbootstrap the pointer. Erase the address stored. 33 | #[allow(dead_code)] 34 | pub unsafe fn unbootstrap(&self) { 35 | *self.pointer.get() = None; 36 | *self.paddr.get() = None; 37 | } 38 | 39 | /// Get the physical address in the object. 40 | pub fn paddr(&self) -> PAddr { 41 | unsafe { (*self.paddr.get()).unwrap() } 42 | } 43 | } 44 | 45 | unsafe impl Send for ExternReadonlyObject { } 46 | unsafe impl Sync for ExternReadonlyObject { } 47 | 48 | impl Deref for ExternReadonlyObject { 49 | type Target = T; 50 | fn deref(&self) -> &T { 51 | unsafe { &*((*self.pointer.get()).unwrap()) } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /kernel/src/util/streamer.rs: -------------------------------------------------------------------------------- 1 | /// Streamer describes a "streaming iterator." 2 | /// 3 | /// It provides a mechanism for writing code that is generic over streams 4 | /// produced by this crate. 5 | /// 6 | /// Note that this is strictly less useful than `Iterator` because the item 7 | /// associated type is bound to a specific lifetime. However, this does permit 8 | /// us to write *some* generic code over streams that produce values tied 9 | /// to the lifetime of the stream. 10 | /// 11 | /// Some form of stream abstraction is inherently required for this crate 12 | /// because elements in a finite state transducer are produced *by iterating* 13 | /// over the structure. The alternative would be to create a new allocation 14 | /// for each element iterated over, which would be prohibitively expensive. 15 | /// 16 | /// # Usage & motivation 17 | /// 18 | /// Streams are hard to use because they don't fit into Rust's current type 19 | /// system very well. They are so hard to use that this author loathes having a 20 | /// publically defined trait for it. Nevertheless, they do just barely provide 21 | /// a means for composing multiple stream abstractions with different concrete 22 | /// types. For example, one might want to take the union of a range query 23 | /// stream with a stream that has been filtered by a regex. These streams have 24 | /// different concrete types. A `Streamer` trait allows us to write code that 25 | /// is generic over these concrete types. (All of the set operations are 26 | /// implemented this way.) 27 | /// 28 | /// A problem with streams is that the trait is itself parameterized by a 29 | /// lifetime. In practice, this makes them very unergonomic because specifying 30 | /// a `Streamer` bound generally requires a higher-ranked trait bound. This is 31 | /// necessary because the lifetime can't actually be named in the enclosing 32 | /// function; instead, the lifetime is local to iteration itself. Therefore, 33 | /// one must assert that the bound is valid for *any particular* lifetime. 34 | /// This is the essence of higher-rank trait bounds. 35 | /// 36 | /// Because of this, you might expect to see lots of bounds that look like 37 | /// this: 38 | /// 39 | /// ```ignore 40 | /// fn takes_stream(s: S) 41 | /// where S: for<'a> Streamer<'a, Item=T> 42 | /// { 43 | /// } 44 | /// ``` 45 | /// 46 | /// There are *three* different problems with this declaration: 47 | /// 48 | /// 1. `S` is not bound by any particular lifetime itself, and most streams 49 | /// probably contain a reference to an underlying finite state transducer. 50 | /// 2. It is often convenient to separate the notion of "stream" with 51 | /// "stream constructor." This represents a similar split found in the 52 | /// standard library for `Iterator` and `IntoIterator`, respectively. 53 | /// 3. The `Item=T` is invalid because `Streamer`'s associated type is 54 | /// parameterized by a lifetime and there is no way to parameterize an 55 | /// arbitrary type constructor. (In this context, `T` is the type 56 | /// constructor, because it will invariably require a lifetime to become 57 | /// a concrete type.) 58 | /// 59 | /// With that said, we must revise our possibly-workable bounds to a giant 60 | /// scary monster: 61 | /// 62 | /// ```ignore 63 | /// fn takes_stream<'f, I, S>(s: I) 64 | /// where I: for<'a> IntoStreamer<'a, Into=S, Item=(&'a [u8], Output)>, 65 | /// S: 'f + for<'a> Streamer<'a, Item=(&'a [u8], Output)> 66 | /// { 67 | /// } 68 | /// ``` 69 | /// 70 | /// We addressed the above points correspondingly: 71 | /// 72 | /// 1. `S` is now bound by `'f`, which corresponds to the lifetime (possibly 73 | /// `'static`) of the underlying stream. 74 | /// 2. The `I` type parameter has been added to refer to a type that knows how 75 | /// to build a stream. Notice that neither of the bounds for `I` or `S` 76 | /// share a lifetime parameter. This is because the higher rank trait bound 77 | /// specifies it works for *any* particular lifetime. 78 | /// 3. `T` has been replaced with specific concrete types. Note that these 79 | /// concrete types are duplicated. With iterators, we could use 80 | /// `Item=S::Item` in the bound for `I`, but one cannot access an associated 81 | /// type through a higher-ranked trait bound. Therefore, we must duplicate 82 | /// the item type. 83 | /// 84 | /// As you can see, streams offer little flexibility, little ergonomics and a 85 | /// lot of hard to read trait bounds. The situation is lamentable, but 86 | /// nevertheless, without them, we would not be able to compose streams by 87 | /// leveraging the type system. 88 | /// 89 | /// A redeemable quality is that these *same exact* trait bounds (modulo some 90 | /// tweaks in the `Item` associated type) appear in many places in this crate 91 | /// without much variation. Therefore, once you grok it, it's mostly easy to 92 | /// pattern match it with "oh I need a stream." My hope is that clear 93 | /// documentation and examples make these complex bounds easier to burden. 94 | /// 95 | /// Stretching this abstraction further with Rust's current type system is not 96 | /// advised. 97 | pub trait Streamer<'a> { 98 | /// The type of the item emitted by this stream. 99 | type Item: 'a; 100 | 101 | /// Emits the next element in this stream, or `None` to indicate the stream 102 | /// has been exhausted. 103 | /// 104 | /// It is not specified what a stream does after `None` is emitted. In most 105 | /// cases, `None` should be emitted on every subsequent call. 106 | fn next(&'a mut self) -> Option; 107 | } 108 | 109 | /// IntoStreamer describes types that can be converted to streams. 110 | /// 111 | /// This is analogous to the `IntoIterator` trait for `Iterator` in 112 | /// `std::iter`. 113 | pub trait IntoStreamer<'a> { 114 | /// The type of the item emitted by the stream. 115 | type Item: 'a; 116 | /// The type of the stream to be constructed. 117 | type Into: Streamer<'a, Item=Self::Item>; 118 | 119 | /// Construct a stream from `Self`. 120 | fn into_stream(self) -> Self::Into; 121 | } 122 | 123 | impl<'a, S: Streamer<'a>> IntoStreamer<'a> for S { 124 | type Item = S::Item; 125 | type Into = S; 126 | 127 | fn into_stream(self) -> S { 128 | self 129 | } 130 | } 131 | 132 | impl<'a, I: Iterator> Streamer<'a> for I where ::Item: 'a { 133 | type Item = I::Item; 134 | 135 | fn next(&'a mut self) -> Option { 136 | Iterator::next(self) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /lazy_static/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | doc 3 | Cargo.lock 4 | .cargo 5 | -------------------------------------------------------------------------------- /lazy_static/.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - nightly 4 | - beta 5 | - stable 6 | before_script: 7 | - | 8 | pip install 'travis-cargo<0.2' --user && 9 | export PATH=`python -m site --user-base`/bin:$PATH 10 | script: 11 | - | 12 | travis-cargo build && 13 | travis-cargo test && 14 | travis-cargo bench && 15 | travis-cargo --only nightly build -- --features spin_no_std && 16 | travis-cargo --only nightly test -- --features spin_no_std && 17 | travis-cargo --only nightly bench -- --features spin_no_std && 18 | travis-cargo --only stable doc 19 | after_success: 20 | - travis-cargo --only stable doc-upload 21 | env: 22 | global: 23 | - TRAVIS_CARGO_NIGHTLY_FEATURE=nightly 24 | - secure: YXu24LptjeYirjWYjWGsMT2m3mB7LvQATE6TVo7VEUXv8GYoy2ORIHD83PeImxC93MmZ01QeUezRzuCW51ZcK92VnNSBttlF60SvIX18VsJrV92tsAhievFstqYQ+fB8DIuQ8noU0jPz7GpI+R9dlTRSImAqWOnVIghA+Wzz7Js= 25 | os: 26 | - linux 27 | - osx 28 | -------------------------------------------------------------------------------- /lazy_static/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lazy_static" 3 | # NB: When modifying, also modify html_root_url in lib.rs 4 | version = "0.2.9" 5 | authors = ["Marvin Löbel "] 6 | license = "MIT/Apache-2.0" 7 | 8 | description = "A macro for declaring lazily evaluated statics in Rust." 9 | readme = "README.md" 10 | documentation = "https://docs.rs/lazy_static" 11 | 12 | repository = "https://github.com/rust-lang-nursery/lazy-static.rs" 13 | keywords = ["macro", "lazy", "static"] 14 | categories = [ "no-std", "rust-patterns" ] 15 | 16 | [dependencies.spin] 17 | version = "0.4" 18 | path = "../spin" 19 | optional = true 20 | 21 | [features] 22 | nightly = [] 23 | spin_no_std = ["nightly", "spin"] 24 | 25 | [badges] 26 | appveyor = { repository = "rust-lang-nursery/lazy-static.rs" } 27 | travis-ci = { repository = "rust-lang-nursery/lazy-static.rs" } 28 | -------------------------------------------------------------------------------- /lazy_static/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /lazy_static/README.md: -------------------------------------------------------------------------------- 1 | lazy-static.rs 2 | ============== 3 | 4 | A macro for declaring lazily evaluated statics in Rust. 5 | 6 | Using this macro, it is possible to have `static`s that require code to be 7 | executed at runtime in order to be initialized. 8 | This includes anything requiring heap allocations, like vectors or hash maps, 9 | as well as anything that requires non-const function calls to be computed. 10 | 11 | [![Travis-CI Status](https://travis-ci.org/rust-lang-nursery/lazy-static.rs.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/lazy-static.rs) 12 | 13 | # Getting Started 14 | 15 | [lazy-static.rs is available on crates.io](https://crates.io/crates/lazy_static). 16 | It is recommended to look there for the newest released version, as well as links to the newest builds of the docs. 17 | 18 | At the point of the last update of this README, the latest published version could be used like this: 19 | 20 | Add the following dependency to your Cargo manifest... 21 | 22 | ```toml 23 | [dependencies] 24 | lazy_static = "0.2" 25 | ``` 26 | 27 | ...and see the [docs](https://docs.rs/lazy_static) for how to use it. 28 | 29 | # Example 30 | 31 | ```rust 32 | #[macro_use] 33 | extern crate lazy_static; 34 | 35 | use std::collections::HashMap; 36 | 37 | lazy_static! { 38 | static ref HASHMAP: HashMap = { 39 | let mut m = HashMap::new(); 40 | m.insert(0, "foo"); 41 | m.insert(1, "bar"); 42 | m.insert(2, "baz"); 43 | m 44 | }; 45 | } 46 | 47 | fn main() { 48 | // First access to `HASHMAP` initializes it 49 | println!("The entry for `0` is \"{}\".", HASHMAP.get(&0).unwrap()); 50 | 51 | // Any further access to `HASHMAP` just returns the computed value 52 | println!("The entry for `1` is \"{}\".", HASHMAP.get(&1).unwrap()); 53 | } 54 | ``` 55 | 56 | ## License 57 | 58 | Licensed under either of 59 | 60 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 61 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 62 | 63 | at your option. 64 | 65 | ### Contribution 66 | 67 | Unless you explicitly state otherwise, any contribution intentionally submitted 68 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any 69 | additional terms or conditions. 70 | -------------------------------------------------------------------------------- /lazy_static/appveyor.yml: -------------------------------------------------------------------------------- 1 | environment: 2 | global: 3 | PROJECT_NAME: lazy_static 4 | matrix: 5 | # Stable channel 6 | - TARGET: i686-pc-windows-gnu 7 | CHANNEL: stable 8 | - TARGET: i686-pc-windows-msvc 9 | CHANNEL: stable 10 | - TARGET: x86_64-pc-windows-gnu 11 | CHANNEL: stable 12 | - TARGET: x86_64-pc-windows-msvc 13 | CHANNEL: stable 14 | # Beta channel 15 | - TARGET: i686-pc-windows-gnu 16 | CHANNEL: beta 17 | - TARGET: i686-pc-windows-msvc 18 | CHANNEL: beta 19 | - TARGET: x86_64-pc-windows-gnu 20 | CHANNEL: beta 21 | - TARGET: x86_64-pc-windows-msvc 22 | CHANNEL: beta 23 | # Nightly channel 24 | - TARGET: i686-pc-windows-gnu 25 | CHANNEL: nightly 26 | - TARGET: i686-pc-windows-msvc 27 | CHANNEL: nightly 28 | - TARGET: x86_64-pc-windows-gnu 29 | CHANNEL: nightly 30 | - TARGET: x86_64-pc-windows-msvc 31 | CHANNEL: nightly 32 | 33 | # Install Rust and Cargo 34 | # (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml) 35 | install: 36 | - ps: Start-FileDownload "https://static.rust-lang.org/dist/channel-rust-stable" 37 | - ps: $env:RUST_VERSION = Get-Content channel-rust-stable | select -first 1 | %{$_.split('-')[1]} 38 | - if NOT "%CHANNEL%" == "stable" set RUST_VERSION=%CHANNEL% 39 | - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:RUST_VERSION}-${env:TARGET}.exe" 40 | - rust-%RUST_VERSION%-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" 41 | - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin 42 | - if "%TARGET%" == "i686-pc-windows-gnu" set PATH=%PATH%;C:\msys64\mingw32\bin 43 | - if "%TARGET%" == "x86_64-pc-windows-gnu" set PATH=%PATH%;C:\msys64\mingw64\bin 44 | - rustc -V 45 | - cargo -V 46 | 47 | build: false 48 | 49 | test_script: 50 | - cargo build --verbose 51 | - cargo test 52 | -------------------------------------------------------------------------------- /lazy_static/src/core_lazy.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 lazy-static.rs Developers 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate spin; 9 | 10 | use self::spin::Once; 11 | 12 | pub struct Lazy(Once); 13 | 14 | impl Lazy { 15 | #[inline(always)] 16 | pub const fn new() -> Self { 17 | Lazy(Once::new()) 18 | } 19 | 20 | #[inline(always)] 21 | pub fn get(&'static self, builder: F) -> &T 22 | where F: FnOnce() -> T 23 | { 24 | self.0.call_once(builder) 25 | } 26 | } 27 | 28 | #[macro_export] 29 | #[allow_internal_unstable] 30 | #[doc(hidden)] 31 | macro_rules! __lazy_static_create { 32 | ($NAME:ident, $T:ty) => { 33 | static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::new(); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /lazy_static/src/lazy.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 lazy-static.rs Developers 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate std; 9 | 10 | use self::std::prelude::v1::*; 11 | use self::std::sync::Once; 12 | 13 | pub struct Lazy(pub *const T, pub Once); 14 | 15 | impl Lazy { 16 | #[inline(always)] 17 | pub fn get(&'static mut self, f: F) -> &T 18 | where F: FnOnce() -> T 19 | { 20 | unsafe { 21 | let r = &mut self.0; 22 | self.1.call_once(|| { 23 | *r = Box::into_raw(Box::new(f())); 24 | }); 25 | 26 | &*self.0 27 | } 28 | } 29 | } 30 | 31 | unsafe impl Sync for Lazy {} 32 | 33 | #[macro_export] 34 | #[doc(hidden)] 35 | macro_rules! __lazy_static_create { 36 | ($NAME:ident, $T:ty) => { 37 | use std::sync::ONCE_INIT; 38 | static mut $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy(0 as *const $T, ONCE_INIT); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /lazy_static/src/nightly_lazy.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2016 lazy-static.rs Developers 2 | // 3 | // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be 6 | // copied, modified, or distributed except according to those terms. 7 | 8 | extern crate std; 9 | 10 | use self::std::prelude::v1::*; 11 | use self::std::cell::UnsafeCell; 12 | use self::std::sync::{Once, ONCE_INIT}; 13 | 14 | pub struct Lazy(UnsafeCell>, Once); 15 | 16 | impl Lazy { 17 | #[inline(always)] 18 | pub const fn new() -> Self { 19 | Lazy(UnsafeCell::new(None), ONCE_INIT) 20 | } 21 | 22 | #[inline(always)] 23 | pub fn get(&'static self, f: F) -> &T 24 | where F: FnOnce() -> T 25 | { 26 | unsafe { 27 | self.1.call_once(|| { 28 | *self.0.get() = Some(f()); 29 | }); 30 | 31 | match *self.0.get() { 32 | Some(ref x) => x, 33 | None => std::intrinsics::unreachable(), 34 | } 35 | } 36 | } 37 | } 38 | 39 | unsafe impl Sync for Lazy {} 40 | 41 | #[macro_export] 42 | #[allow_internal_unstable] 43 | #[doc(hidden)] 44 | macro_rules! __lazy_static_create { 45 | ($NAME:ident, $T:ty) => { 46 | static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::new(); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /lazy_static/tests/no_std.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature="spin_no_std")] 2 | #![feature(const_fn)] 3 | 4 | #![no_std] 5 | 6 | #[macro_use] 7 | extern crate lazy_static; 8 | 9 | lazy_static! { 10 | /// Documentation! 11 | pub static ref NUMBER: u32 = times_two(3); 12 | } 13 | 14 | fn times_two(n: u32) -> u32 { 15 | n * 2 16 | } 17 | 18 | #[test] 19 | fn test_basic() { 20 | assert_eq!(*NUMBER, 6); 21 | } 22 | -------------------------------------------------------------------------------- /lazy_static/tests/test.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(feature="nightly", feature(const_fn))] 2 | 3 | #[macro_use] 4 | extern crate lazy_static; 5 | use std::collections::HashMap; 6 | 7 | lazy_static! { 8 | /// Documentation! 9 | pub static ref NUMBER: u32 = times_two(3); 10 | 11 | static ref ARRAY_BOXES: [Box; 3] = [Box::new(1), Box::new(2), Box::new(3)]; 12 | 13 | /// More documentation! 14 | #[allow(unused_variables)] 15 | #[derive(Copy, Clone, Debug)] 16 | pub static ref STRING: String = "hello".to_string(); 17 | 18 | static ref HASHMAP: HashMap = { 19 | let mut m = HashMap::new(); 20 | m.insert(0, "abc"); 21 | m.insert(1, "def"); 22 | m.insert(2, "ghi"); 23 | m 24 | }; 25 | 26 | // This should not compile if the unsafe is removed. 27 | static ref UNSAFE: u32 = unsafe { 28 | std::mem::transmute::(-1) 29 | }; 30 | 31 | // This *should* triggger warn(dead_code) by design. 32 | static ref UNUSED: () = (); 33 | 34 | } 35 | 36 | lazy_static! { 37 | static ref S1: &'static str = "a"; 38 | static ref S2: &'static str = "b"; 39 | } 40 | lazy_static! { 41 | static ref S3: String = [*S1, *S2].join(""); 42 | } 43 | 44 | #[test] 45 | fn s3() { 46 | assert_eq!(&*S3, "ab"); 47 | } 48 | 49 | fn times_two(n: u32) -> u32 { 50 | n * 2 51 | } 52 | 53 | #[test] 54 | fn test_basic() { 55 | assert_eq!(&**STRING, "hello"); 56 | assert_eq!(*NUMBER, 6); 57 | assert!(HASHMAP.get(&1).is_some()); 58 | assert!(HASHMAP.get(&3).is_none()); 59 | assert_eq!(&*ARRAY_BOXES, &[Box::new(1), Box::new(2), Box::new(3)]); 60 | assert_eq!(*UNSAFE, std::u32::MAX); 61 | } 62 | 63 | #[test] 64 | fn test_repeat() { 65 | assert_eq!(*NUMBER, 6); 66 | assert_eq!(*NUMBER, 6); 67 | assert_eq!(*NUMBER, 6); 68 | } 69 | 70 | #[test] 71 | fn test_meta() { 72 | // this would not compile if STRING were not marked #[derive(Copy, Clone)] 73 | let copy_of_string = STRING; 74 | // just to make sure it was copied 75 | assert!(&STRING as *const _ != ©_of_string as *const _); 76 | 77 | // this would not compile if STRING were not marked #[derive(Debug)] 78 | assert_eq!(format!("{:?}", STRING), "STRING { __private_field: () }".to_string()); 79 | } 80 | 81 | mod visibility { 82 | lazy_static! { 83 | pub static ref FOO: Box = Box::new(0); 84 | static ref BAR: Box = Box::new(98); 85 | } 86 | 87 | #[test] 88 | fn sub_test() { 89 | assert_eq!(**FOO, 0); 90 | assert_eq!(**BAR, 98); 91 | } 92 | } 93 | 94 | #[test] 95 | fn test_visibility() { 96 | assert_eq!(*visibility::FOO, Box::new(0)); 97 | } 98 | 99 | // This should not cause a warning about a missing Copy implementation 100 | lazy_static! { 101 | pub static ref VAR: i32 = { 0 }; 102 | } 103 | 104 | #[derive(Copy, Clone, Debug, PartialEq)] 105 | struct X; 106 | struct Once(X); 107 | const ONCE_INIT: Once = Once(X); 108 | static DATA: X = X; 109 | static ONCE: X = X; 110 | fn require_sync() -> X { X } 111 | fn transmute() -> X { X } 112 | fn __static_ref_initialize() -> X { X } 113 | fn test(_: Vec) -> X { X } 114 | 115 | // All these names should not be shadowed 116 | lazy_static! { 117 | static ref ITEM_NAME_TEST: X = { 118 | test(vec![X, Once(X).0, ONCE_INIT.0, DATA, ONCE, 119 | require_sync(), transmute(), 120 | // Except this, which will sadly be shadowed by internals: 121 | // __static_ref_initialize() 122 | ]) 123 | }; 124 | } 125 | 126 | #[test] 127 | fn item_name_shadowing() { 128 | assert_eq!(*ITEM_NAME_TEST, X); 129 | } 130 | 131 | use std::sync::atomic::AtomicBool; 132 | use std::sync::atomic::ATOMIC_BOOL_INIT; 133 | use std::sync::atomic::Ordering::SeqCst; 134 | 135 | static PRE_INIT_FLAG: AtomicBool = ATOMIC_BOOL_INIT; 136 | 137 | lazy_static! { 138 | static ref PRE_INIT: () = { 139 | PRE_INIT_FLAG.store(true, SeqCst); 140 | () 141 | }; 142 | } 143 | 144 | #[test] 145 | fn pre_init() { 146 | assert_eq!(PRE_INIT_FLAG.load(SeqCst), false); 147 | lazy_static::initialize(&PRE_INIT); 148 | assert_eq!(PRE_INIT_FLAG.load(SeqCst), true); 149 | } 150 | 151 | lazy_static! { 152 | static ref LIFETIME_NAME: for<'a> fn(&'a u8) = { fn f(_: &u8) {} f }; 153 | } 154 | 155 | #[test] 156 | fn lifetime_name() { 157 | let _ = LIFETIME_NAME; 158 | } 159 | -------------------------------------------------------------------------------- /nix/rust-nightly.nix: -------------------------------------------------------------------------------- 1 | { stdenv, lib, buildEnv, makeWrapper, runCommand, fetchzip, zlib, rsync }: 2 | 3 | # rustc and cargo nightly binaries 4 | 5 | let 6 | mkTarget = system: 7 | if system == "i686-linux" then "i686-unknown-linux-gnu" 8 | else if system == "x86_64-linux" then "x86_64-unknown-linux-gnu" 9 | else if system == "i686-darwin" then "i686-apple-darwin" 10 | else if system == "x86_64-darwin" then "x86_64-apple-darwin" 11 | else abort "no snapshot to bootstrap for this platform (missing target triple)"; 12 | 13 | mkUrl = { pname, archive, date, system }: 14 | "${archive}/${date}/${pname}-nightly-${mkTarget system}.tar.gz"; 15 | 16 | generic = { pname, archive, exes }: 17 | { date, hash, system ? stdenv.system }: 18 | stdenv.mkDerivation rec { 19 | name = "${pname}-${version}"; 20 | version = "nightly-${date}"; 21 | # TODO meta; 22 | 23 | src = fetchzip { 24 | url = mkUrl { inherit pname archive date system; }; 25 | sha256 = hash; 26 | }; 27 | 28 | nativeBuildInputs = [ rsync ]; 29 | 30 | dontStrip = true; 31 | 32 | unpackPhase = ""; # skip it 33 | 34 | installPhase = '' 35 | rsync --chmod=u+w -r $src/*/ $out/ 36 | ''; 37 | 38 | preFixup = if stdenv.isLinux then let 39 | # it's overkill, but fixup will prune 40 | rpath = "$out/lib:" + lib.makeLibraryPath [ zlib stdenv.cc.cc.lib ]; 41 | in '' 42 | for executable in ${lib.concatStringsSep " " exes}; do 43 | patchelf \ 44 | --interpreter "$(< $NIX_CC/nix-support/dynamic-linker)" \ 45 | --set-rpath "${rpath}" \ 46 | "$out/bin/$executable" 47 | done 48 | for library in $out/lib/*.so; do 49 | patchelf --set-rpath "${rpath}" "$library" 50 | done 51 | '' else ""; 52 | }; 53 | 54 | in rec { 55 | rustc = generic { 56 | pname = "rustc"; 57 | archive = "https://static.rust-lang.org/dist"; 58 | exes = [ "rustc" "rustdoc" ]; 59 | }; 60 | 61 | rustcWithSysroots = { rustc, sysroots ? [] }: buildEnv { 62 | name = "combined-sysroots"; 63 | paths = [ rustc ] ++ sysroots; 64 | pathsToLink = [ "/lib" "/share" ]; 65 | #buildInputs = [ makeWrapper ]; 66 | # Can't use wrapper script because of https://github.com/rust-lang/rust/issues/31943 67 | postBuild = '' 68 | mkdir -p $out/bin/ 69 | cp ${rustc}/bin/* $out/bin/ 70 | ''; 71 | }; 72 | 73 | rust-std = { date, hash, system ? stdenv.system }: stdenv.mkDerivation rec { 74 | # Strip install.sh, etc 75 | pname = "rust-std"; 76 | version = "nightly-${date}"; 77 | name = "${pname}-${version}-${system}"; 78 | src = fetchzip { 79 | url = mkUrl { 80 | archive = "https://static.rust-lang.org/dist"; 81 | inherit pname date system; 82 | }; 83 | sha256 = hash; 84 | }; 85 | buildCommand= '' 86 | mkdir -p $out 87 | cp -r "$src"/*/* $out/ 88 | rm $out/manifest.in 89 | ''; 90 | }; 91 | 92 | cargo = generic { 93 | pname = "cargo"; 94 | archive = "https://static.rust-lang.org/cargo-dist"; 95 | exes = [ "cargo" ]; 96 | }; 97 | 98 | rust = generic { 99 | pname = "rust"; 100 | archive = "https://static.rust-lang.org/dist"; 101 | exes = [ "rustc" "rustdoc" "cargo" ]; 102 | }; 103 | } 104 | -------------------------------------------------------------------------------- /rinit/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Build directory 14 | /build/ 15 | -------------------------------------------------------------------------------- /rinit/Cargo.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | name = "abi" 3 | version = "0.1.0" 4 | 5 | [[package]] 6 | name = "rinit" 7 | version = "0.1.0" 8 | dependencies = [ 9 | "selfalloc 0.1.0", 10 | "spin 0.4.4", 11 | "system 0.1.0", 12 | ] 13 | 14 | [[package]] 15 | name = "rlibc" 16 | version = "1.0.0" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | 19 | [[package]] 20 | name = "selfalloc" 21 | version = "0.1.0" 22 | dependencies = [ 23 | "abi 0.1.0", 24 | "spin 0.4.4", 25 | "system 0.1.0", 26 | ] 27 | 28 | [[package]] 29 | name = "spin" 30 | version = "0.4.4" 31 | 32 | [[package]] 33 | name = "system" 34 | version = "0.1.0" 35 | dependencies = [ 36 | "abi 0.1.0", 37 | "rlibc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", 38 | "spin 0.4.4", 39 | ] 40 | 41 | [metadata] 42 | "checksum rlibc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc874b127765f014d792f16763a81245ab80500e2ad921ed4ee9e82481ee08fe" 43 | -------------------------------------------------------------------------------- /rinit/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rinit" 3 | version = "0.1.0" 4 | authors = ["Wei Tang "] 5 | 6 | [lib] 7 | crate-type = ["staticlib"] 8 | 9 | [dependencies.system] 10 | path = "../system" 11 | features = ["kernel_debug"] 12 | 13 | [dependencies.spin] 14 | path = "../spin" 15 | 16 | [dependencies.selfalloc] 17 | path = "../selfalloc" -------------------------------------------------------------------------------- /rinit/Makefile: -------------------------------------------------------------------------------- 1 | name := librinit 2 | 3 | include ../userspace.mk 4 | 5 | cargo: 6 | ifeq ($(version),release) 7 | @RUSTFLAGS="-L $(LIBCORE) -L $(LIBALLOC) -L $(LIBCOMPILER_BUILTINS)" cargo rustc --release --target $(TARGET_SPEC) --verbose 8 | else 9 | @RUSTFLAGS="-L $(LIBCORE) -L $(LIBALLOC) -L $(LIBCOMPILER_BUILTINS)" cargo rustc --target $(TARGET_SPEC) --verbose 10 | endif 11 | -------------------------------------------------------------------------------- /rinit/src/vga_buffer.rs: -------------------------------------------------------------------------------- 1 | use core::ptr::Unique; 2 | use spin::Mutex; 3 | 4 | #[cfg(any(target_arch = "x86_64"))] 5 | #[allow(dead_code)] 6 | pub unsafe fn outportb(port: u16, val: u8) 7 | { 8 | asm!("outb %al, %dx" : : "{dx}"(port), "{al}"(val)); 9 | } 10 | 11 | #[cfg(any(target_arch = "x86_64"))] 12 | #[allow(dead_code)] 13 | pub unsafe fn inportb(port: u16) -> u8 14 | { 15 | let ret: u8; 16 | asm!("inb %dx, %al" : "={ax}"(ret): "{dx}"(port)); 17 | ret 18 | } 19 | 20 | fn move_cursor(column: usize, row: usize) { 21 | let crtc_adr : u16 = 0x3D4; 22 | let offset : u16 = (column + row * 80) as u16; 23 | 24 | unsafe { 25 | outportb(crtc_adr + 0, 14); 26 | outportb(crtc_adr + 1, (offset >> 8) as u8); 27 | outportb(crtc_adr + 0, 15); 28 | outportb(crtc_adr + 1, offset as u8); 29 | } 30 | } 31 | 32 | #[allow(dead_code)] 33 | #[repr(u8)] 34 | pub enum Color { 35 | Black = 0, 36 | Blue = 1, 37 | Green = 2, 38 | Cyan = 3, 39 | Red = 4, 40 | Magenta = 5, 41 | Brown = 6, 42 | LightGray = 7, 43 | DarkGray = 8, 44 | LightBlue = 9, 45 | LightGreen = 10, 46 | LightCyan = 11, 47 | LightRed = 12, 48 | Pink = 13, 49 | Yellow = 14, 50 | White = 15, 51 | } 52 | 53 | #[derive(Clone, Copy)] 54 | struct ColorCode(u8); 55 | 56 | impl ColorCode { 57 | const fn new(foreground: Color, background: Color) -> ColorCode { 58 | ColorCode((background as u8) << 4 | (foreground as u8)) 59 | } 60 | } 61 | 62 | #[repr(C)] 63 | #[derive(Clone, Copy)] 64 | struct ScreenChar { 65 | ascii_character: u8, 66 | color_code: ColorCode, 67 | } 68 | 69 | const BUFFER_HEIGHT: usize = 25; 70 | const BUFFER_WIDTH: usize = 80; 71 | 72 | struct Buffer { 73 | chars: [[ScreenChar; BUFFER_WIDTH]; BUFFER_HEIGHT], 74 | } 75 | 76 | pub struct Writer { 77 | row_position: usize, 78 | column_position: usize, 79 | color_code: ColorCode, 80 | buffer: Unique, 81 | } 82 | 83 | #[allow(dead_code)] 84 | impl Writer { 85 | pub fn write_byte(&mut self, byte: u8) { 86 | match byte { 87 | b'\n' => self.new_line(), 88 | byte => { 89 | if self.column_position >= BUFFER_WIDTH { 90 | self.new_line(); 91 | } 92 | 93 | let row = self.row_position; 94 | let col = self.column_position; 95 | 96 | self.buffer().chars[row][col] = ScreenChar { 97 | ascii_character: byte, 98 | color_code: self.color_code, 99 | }; 100 | self.column_position += 1; 101 | 102 | if (col + 1) >= BUFFER_WIDTH { 103 | move_cursor(0, row + 1); 104 | } else { 105 | move_cursor(col + 1, row); 106 | } 107 | } 108 | } 109 | } 110 | 111 | fn buffer(&mut self) -> &mut Buffer { 112 | unsafe { self.buffer.as_mut() } 113 | } 114 | 115 | fn new_line(&mut self) { 116 | if self.row_position == BUFFER_HEIGHT-1 { 117 | for row in 0..(BUFFER_HEIGHT-1) { 118 | let buffer = self.buffer(); 119 | buffer.chars[row] = buffer.chars[row + 1]; 120 | } 121 | } else { 122 | self.row_position += 1; 123 | } 124 | self.column_position = 0; 125 | 126 | let row = self.row_position; 127 | self.clear_row(row); 128 | 129 | move_cursor(0, row); 130 | } 131 | 132 | fn clear_row(&mut self, row: usize) { 133 | let blank = ScreenChar { 134 | ascii_character: b' ', 135 | color_code: self.color_code, 136 | }; 137 | self.buffer().chars[row] = [blank; BUFFER_WIDTH]; 138 | } 139 | 140 | fn clear_screen(&mut self) { 141 | for row in 0..(BUFFER_HEIGHT-1) { 142 | self.clear_row(row) 143 | } 144 | } 145 | } 146 | 147 | impl ::core::fmt::Write for Writer { 148 | fn write_str(&mut self, s: &str) -> ::core::fmt::Result { 149 | for byte in s.bytes() { 150 | self.write_byte(byte); 151 | } 152 | Ok(()) 153 | } 154 | } 155 | 156 | pub static WRITER: Mutex = Mutex::new(Writer { 157 | column_position: 0, 158 | row_position: 0, 159 | color_code: ColorCode::new(Color::LightGreen, Color::Black), 160 | buffer: unsafe { Unique::new_unchecked(0x90002000 as *mut _) }, 161 | }); 162 | 163 | #[allow(dead_code)] 164 | pub fn clear_screen() { 165 | WRITER.lock().clear_screen(); 166 | } 167 | 168 | #[allow(unused_macros)] 169 | macro_rules! print { 170 | ($($arg:tt)*) => ({ 171 | use core::fmt::Write; 172 | $crate::vga_buffer::WRITER.lock().write_fmt(format_args!($($arg)*)).unwrap(); 173 | }); 174 | } 175 | 176 | #[allow(unused_macros)] 177 | macro_rules! println { 178 | ($fmt:expr) => (print!(concat!($fmt, "\n"))); 179 | ($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*)); 180 | } 181 | -------------------------------------------------------------------------------- /selfalloc/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Build directory 14 | /build/ 15 | -------------------------------------------------------------------------------- /selfalloc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selfalloc" 3 | version = "0.1.0" 4 | authors = ["Wei Tang "] 5 | 6 | [dependencies.system] 7 | path = "../system" 8 | 9 | [dependencies.spin] 10 | path = "../spin" 11 | 12 | [dependencies.abi] 13 | path = "../abi" -------------------------------------------------------------------------------- /selfalloc/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(const_fn)] 2 | #![feature(global_allocator)] 3 | #![feature(alloc)] 4 | #![feature(allocator_api)] 5 | #![feature(lang_items)] 6 | #![no_std] 7 | 8 | #[macro_use] 9 | extern crate system; 10 | extern crate spin; 11 | extern crate alloc; 12 | extern crate abi; 13 | 14 | use spin::{Once, Mutex}; 15 | use abi::CAddr; 16 | 17 | const PAGE_LENGTH: usize = 4096; 18 | 19 | static ALLOCATOR: Once> = Once::new(); 20 | 21 | struct WatermarkAllocator { 22 | untyped_cap: CAddr, 23 | toplevel_table_cap: CAddr, 24 | page_cap: CAddr, 25 | page_start_addr: usize, 26 | watermark: usize, 27 | } 28 | 29 | pub unsafe fn setup_allocator(untyped_cap: CAddr, pt_cap: CAddr, page_start_addr: usize) { 30 | ALLOCATOR.call_once(|| Mutex::new(WatermarkAllocator::new(untyped_cap, pt_cap, page_start_addr))); 31 | } 32 | 33 | // http://os.phil-opp.com/kernel-heap.html#alignment 34 | 35 | /// Align downwards. Returns the greatest x with alignment `align` 36 | /// so that x <= addr. The alignment must be a power of 2. 37 | pub fn align_down(addr: usize, align: usize) -> usize { 38 | if align.is_power_of_two() { 39 | addr & !(align - 1) 40 | } else if align == 0 { 41 | addr 42 | } else { 43 | panic!("`align` must be a power of 2"); 44 | } 45 | } 46 | 47 | /// Align upwards. Returns the smallest x with alignment `align` 48 | /// so that x >= addr. The alignment must be a power of 2. 49 | pub fn align_up(addr: usize, align: usize) -> usize { 50 | align_down(addr + align - 1, align) 51 | } 52 | 53 | impl WatermarkAllocator { 54 | fn new(untyped_cap: CAddr, toplevel_table_cap: CAddr, page_start_addr: usize) -> Self { 55 | let page_cap = system::retype_raw_page_free(untyped_cap); 56 | system::map_raw_page_free(page_start_addr, untyped_cap, toplevel_table_cap, page_cap.clone()); 57 | 58 | WatermarkAllocator { 59 | untyped_cap: untyped_cap, 60 | page_cap: page_cap, 61 | toplevel_table_cap: toplevel_table_cap, 62 | page_start_addr: page_start_addr, 63 | watermark: 0, 64 | } 65 | } 66 | 67 | pub fn allocate(&mut self, size: usize, align: usize) -> *mut u8 { 68 | let alloc_start = align_up(self.watermark, align); 69 | let ret = (self.page_start_addr + alloc_start) as *mut u8; 70 | 71 | let mut alloc_end = alloc_start.saturating_add(size); 72 | 73 | while alloc_end >= PAGE_LENGTH { 74 | self.page_cap = system::retype_raw_page_free(self.untyped_cap); 75 | self.page_start_addr += PAGE_LENGTH; 76 | system::map_raw_page_free(self.page_start_addr, self.untyped_cap, self.toplevel_table_cap, self.page_cap.clone()); 77 | 78 | alloc_end -= PAGE_LENGTH; 79 | } 80 | 81 | self.watermark = alloc_end; 82 | ret 83 | } 84 | } 85 | 86 | #[global_allocator] 87 | static WATER_ALLOCATOR: WaterAlloc = WaterAlloc; 88 | 89 | use alloc::allocator::Layout; 90 | use core::alloc::{GlobalAlloc, Opaque}; 91 | 92 | struct WaterAlloc; 93 | 94 | unsafe impl<'a> GlobalAlloc for WaterAlloc { 95 | unsafe fn alloc(&self, layout: Layout) -> *mut Opaque { 96 | ALLOCATOR.wait().unwrap().lock().allocate(layout.size(), layout.align()) as _ 97 | } 98 | 99 | unsafe fn dealloc(&self, _pointer: *mut Opaque, _layout: Layout) { } 100 | } 101 | 102 | #[lang="oom"] 103 | #[no_mangle] 104 | pub fn rust_oom() -> ! { 105 | panic!("Out of memory"); 106 | } 107 | -------------------------------------------------------------------------------- /spin/.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | **/target/ 3 | **/Cargo.lock 4 | -------------------------------------------------------------------------------- /spin/.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | 3 | rust: 4 | - stable 5 | - beta 6 | - nightly 7 | 8 | sudo: false 9 | 10 | notifications: 11 | email: 12 | on_success: never 13 | on_failure: always 14 | 15 | before_script: 16 | - | 17 | pip install 'travis-cargo<0.2' --user && 18 | export PATH=$HOME/.local/bin:$PATH 19 | 20 | script: 21 | # Nightly tests with default features 22 | - travis-cargo --only nightly build 23 | - travis-cargo --only nightly test 24 | - travis-cargo --only nightly doc -- --no-deps 25 | # Ensure things work with features disabled 26 | - travis-cargo build -- --no-default-features 27 | - travis-cargo test -- --no-default-features 28 | - rustdoc --test README.md -L target/debug 29 | 30 | after_success: 31 | - curl https://mvdnes.github.io/rust-docs/travis-doc-upload.sh | bash 32 | 33 | env: 34 | global: 35 | # override the default `--features unstable` used by travis-cargo 36 | # since unstable is activated by default 37 | - TRAVIS_CARGO_NIGHTLY_FEATURE="" 38 | -------------------------------------------------------------------------------- /spin/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | 3 | name = "spin" 4 | version = "0.4.4" 5 | authors = [ "Mathijs van de Nes ", 6 | "John Ericson " ] 7 | license = "MIT" 8 | repository = "https://github.com/mvdnes/spin-rs.git" 9 | documentation = "https://mvdnes.github.io/rust-docs/spin-rs/spin/index.html" 10 | keywords = ["spinlock", "mutex", "rwlock"] 11 | description = """ 12 | Synchronization primitives based on spinning. 13 | They may contain data, 14 | They are usable without `std` 15 | and static initializers are available. 16 | """ 17 | 18 | [features] 19 | asm = [] 20 | core_intrinsics = [] 21 | const_fn = [] 22 | once = ["const_fn"] 23 | unstable = ["asm", "core_intrinsics", "const_fn", "once"] 24 | default = ["unstable"] 25 | -------------------------------------------------------------------------------- /spin/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Mathijs van de Nes 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /spin/README.md: -------------------------------------------------------------------------------- 1 | spin-rs 2 | =========== 3 | 4 | [![Build Status](https://travis-ci.org/mvdnes/spin-rs.svg)](https://travis-ci.org/mvdnes/spin-rs) 5 | [![Crates.io version](https://img.shields.io/crates/v/spin.svg)](https://crates.io/crates/spin) 6 | 7 | [Documentation](https://mvdnes.github.io/rust-docs/spin-rs/spin/index.html) 8 | 9 | This Rust library implements a simple 10 | [spinlock](https://en.wikipedia.org/wiki/Mutex). 11 | 12 | Usage 13 | ----- 14 | 15 | By default this crate only works on nightly but you can disable the default features 16 | if you want to run on stable. Nightly is more efficient than stable currently. 17 | 18 | Also, `Once` is only available on nightly as it is only useful when `const_fn` is available. 19 | 20 | Include the following code in your Cargo.toml 21 | 22 | ```toml 23 | [dependencies.spin] 24 | version = "0.3" 25 | # If you want to run on stable you will need to add the following: 26 | # default-features = false 27 | ``` 28 | 29 | Example 30 | ------- 31 | 32 | When calling `lock` on a `Mutex` you will get a reference to the data. When this 33 | reference is dropped, the lock will be unlocked. 34 | 35 | ```rust 36 | extern crate spin; 37 | 38 | fn main() 39 | { 40 | let mutex = spin::Mutex::new(0); 41 | let rw_lock = spin::RwLock::new(0); 42 | 43 | // Modify the data 44 | { 45 | let mut data = mutex.lock(); 46 | *data = 2; 47 | let mut data = rw_lock.write(); 48 | *data = 3; 49 | } 50 | 51 | // Read the data 52 | let answer = 53 | { 54 | let data1 = mutex.lock(); 55 | let data2 = rw_lock.read(); 56 | let data3 = rw_lock.read(); // sharing 57 | (*data1, *data2, *data3) 58 | }; 59 | 60 | println!("Answers are {:?}", answer); 61 | } 62 | ``` 63 | 64 | To share the lock, an `Arc>` may be used. 65 | 66 | Remarks 67 | ------- 68 | 69 | The behaviour of these lock is similar to their namesakes in `std::sync`. they 70 | differ on the following: 71 | 72 | - The lock will not be poisoned in case of failure; 73 | -------------------------------------------------------------------------------- /spin/examples/debug.rs: -------------------------------------------------------------------------------- 1 | extern crate spin; 2 | 3 | fn main() { 4 | let mutex = spin::Mutex::new(42); 5 | println!("{:?}", mutex); 6 | { 7 | let x = mutex.lock(); 8 | println!("{:?}, {:?}", mutex, *x); 9 | } 10 | 11 | let rwlock = spin::RwLock::new(42); 12 | println!("{:?}", rwlock); 13 | { 14 | let x = rwlock.read(); 15 | println!("{:?}, {:?}", rwlock, *x); 16 | } 17 | { 18 | let x = rwlock.write(); 19 | println!("{:?}, {:?}", rwlock, *x); 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /spin/script/doc-upload.cfg: -------------------------------------------------------------------------------- 1 | PROJECT_NAME=spin-rs 2 | DOCS_REPO=mvdnes/rust-docs.git 3 | DOC_RUST_VERSION=nightly 4 | -------------------------------------------------------------------------------- /spin/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(const_atomic_usize_new)] 2 | #![feature(const_unsafe_cell_new)] 3 | 4 | #![crate_type = "lib"] 5 | #![warn(missing_docs)] 6 | 7 | //! Synchronization primitives based on spinning 8 | 9 | #![cfg_attr(feature = "asm", feature(asm))] 10 | #![cfg_attr(feature = "core_intrinsics", feature(core_intrinsics))] 11 | #![cfg_attr(feature = "const_fn", feature(const_fn))] 12 | 13 | #![no_std] 14 | 15 | #[cfg(test)] 16 | #[macro_use] 17 | extern crate std; 18 | 19 | pub use mutex::*; 20 | pub use rw_lock::*; 21 | 22 | #[cfg(feature = "once")] 23 | pub use once::*; 24 | 25 | mod mutex; 26 | mod rw_lock; 27 | 28 | #[cfg(feature = "once")] 29 | mod once; 30 | 31 | mod util; 32 | -------------------------------------------------------------------------------- /spin/src/util.rs: -------------------------------------------------------------------------------- 1 | /// Called while spinning (name borrowed from Linux). Can be implemented to call 2 | /// a platform-specific method of lightening CPU load in spinlocks. 3 | #[cfg(all(feature = "asm", any(target_arch = "x86", target_arch = "x86_64")))] 4 | #[inline(always)] 5 | pub fn cpu_relax() { 6 | // This instruction is meant for usage in spinlock loops 7 | // (see Intel x86 manual, III, 4.2) 8 | unsafe { asm!("pause" :::: "volatile"); } 9 | } 10 | 11 | #[cfg(any(not(feature = "asm"), not(any(target_arch = "x86", target_arch = "x86_64"))))] 12 | #[inline(always)] 13 | pub fn cpu_relax() { 14 | } 15 | -------------------------------------------------------------------------------- /system/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Build directory 14 | /build/ 15 | -------------------------------------------------------------------------------- /system/Cargo.lock: -------------------------------------------------------------------------------- 1 | [root] 2 | name = "system" 3 | version = "0.1.0" 4 | dependencies = [ 5 | "abi 0.1.0", 6 | "rlibc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", 7 | "spin 0.4.4", 8 | ] 9 | 10 | [[package]] 11 | name = "abi" 12 | version = "0.1.0" 13 | 14 | [[package]] 15 | name = "rlibc" 16 | version = "1.0.0" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | 19 | [[package]] 20 | name = "spin" 21 | version = "0.4.4" 22 | 23 | [metadata] 24 | "checksum rlibc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc874b127765f014d792f16763a81245ab80500e2ad921ed4ee9e82481ee08fe" 25 | -------------------------------------------------------------------------------- /system/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "system" 3 | version = "0.1.0" 4 | authors = ["Wei Tang "] 5 | 6 | [dependencies.rlibc] 7 | version = "1.0" 8 | 9 | [dependencies.abi] 10 | path = "../abi" 11 | 12 | [dependencies.spin] 13 | path = "../spin" 14 | 15 | [features] 16 | default = [] 17 | kernel_debug = ["abi/kernel_debug"] -------------------------------------------------------------------------------- /system/src/call.rs: -------------------------------------------------------------------------------- 1 | use abi::{SystemCall, TaskBuffer, CAddr, ChannelMessage}; 2 | use core::any::Any; 3 | use super::task_buffer_addr; 4 | 5 | pub fn retype_raw_page_free(source: CAddr) -> CAddr { 6 | let result = system_call(SystemCall::RetypeRawPageFree { 7 | request: source, 8 | response: None 9 | }); 10 | match result { 11 | SystemCall::RetypeRawPageFree { 12 | response, .. 13 | } => { return response.unwrap(); }, 14 | _ => panic!(), 15 | }; 16 | } 17 | 18 | pub fn map_raw_page_free(vaddr: usize, untyped: CAddr, toplevel_table: CAddr, page: CAddr) { 19 | system_call(SystemCall::MapRawPageFree { 20 | untyped: untyped, 21 | toplevel_table: toplevel_table, 22 | request: (vaddr, page), 23 | }); 24 | } 25 | 26 | pub fn retype_cpool(source: CAddr, target: CAddr) { 27 | system_call(SystemCall::RetypeCPool { 28 | request: (source, target), 29 | }); 30 | } 31 | 32 | pub fn retype_task(source: CAddr, target: CAddr) { 33 | system_call(SystemCall::RetypeTask { 34 | request: (source, target), 35 | }); 36 | } 37 | 38 | pub fn task_set_instruction_pointer(target: CAddr, ptr: u64) { 39 | system_call(SystemCall::TaskSetInstructionPointer { 40 | request: (target, ptr), 41 | }); 42 | } 43 | 44 | pub fn task_set_stack_pointer(target: CAddr, ptr: u64) { 45 | system_call(SystemCall::TaskSetStackPointer { 46 | request: (target, ptr), 47 | }); 48 | } 49 | 50 | pub fn task_set_cpool(target: CAddr, cpool: CAddr) { 51 | system_call(SystemCall::TaskSetCPool { 52 | request: (target, cpool), 53 | }); 54 | } 55 | 56 | pub fn task_set_top_page_table(target: CAddr, table: CAddr) { 57 | system_call(SystemCall::TaskSetTopPageTable { 58 | request: (target, table), 59 | }); 60 | } 61 | 62 | pub fn task_set_buffer(target: CAddr, buffer: CAddr) { 63 | system_call(SystemCall::TaskSetBuffer { 64 | request: (target, buffer), 65 | }); 66 | } 67 | 68 | pub fn task_set_active(target: CAddr) { 69 | system_call(SystemCall::TaskSetActive { 70 | request: target 71 | }); 72 | } 73 | 74 | pub fn task_set_inactive(target: CAddr) { 75 | system_call(SystemCall::TaskSetInactive { 76 | request: target 77 | }); 78 | } 79 | 80 | fn channel_take_nonpayload(target: CAddr) -> ChannelMessage { 81 | let result = system_call(SystemCall::ChannelTake { 82 | request: target, 83 | response: None 84 | }); 85 | match result { 86 | SystemCall::ChannelTake { 87 | response, .. 88 | } => { 89 | return response.unwrap() 90 | }, 91 | _ => panic!(), 92 | }; 93 | } 94 | 95 | pub fn channel_take_raw(target: CAddr) -> u64 { 96 | let result = channel_take_nonpayload(target); 97 | match result { 98 | ChannelMessage::Raw(v) => return v, 99 | _ => panic!(), 100 | }; 101 | } 102 | 103 | pub fn channel_take_cap(target: CAddr) -> CAddr { 104 | let result = channel_take_nonpayload(target); 105 | match result { 106 | ChannelMessage::Cap(v) => return v.unwrap(), 107 | _ => panic!(), 108 | }; 109 | } 110 | 111 | pub fn channel_take(target: CAddr) -> T { 112 | let (result, payload) = system_call_take_payload(SystemCall::ChannelTake { 113 | request: target, 114 | response: None 115 | }); 116 | match result { 117 | SystemCall::ChannelTake { 118 | request: _, 119 | response: Some(ChannelMessage::Payload), 120 | } => { 121 | return payload; 122 | }, 123 | _ => panic!(), 124 | }; 125 | } 126 | 127 | pub fn channel_put_raw(target: CAddr, value: u64) { 128 | system_call(SystemCall::ChannelPut { 129 | request: (target, ChannelMessage::Raw(value)) 130 | }); 131 | } 132 | 133 | pub fn channel_put_cap(target: CAddr, value: CAddr) { 134 | system_call(SystemCall::ChannelPut { 135 | request: (target, ChannelMessage::Cap(Some(value))) 136 | }); 137 | } 138 | 139 | pub fn channel_put(target: CAddr, value: T) { 140 | system_call_put_payload(SystemCall::ChannelPut { 141 | request: (target, ChannelMessage::Payload) 142 | }, value); 143 | } 144 | 145 | pub fn print(buffer: [u8; 32], size: usize) { 146 | let _ = system_call(SystemCall::Print { 147 | request: (buffer, size) 148 | }); 149 | } 150 | 151 | #[cfg(feature="kernel_debug")] 152 | pub fn debug_cpool_list() { 153 | system_call(SystemCall::DebugCPoolList); 154 | } 155 | 156 | #[cfg(feature="kernel_debug")] 157 | pub fn debug_test_succeed() { 158 | system_call(SystemCall::DebugTestSucceed); 159 | loop {} 160 | } 161 | 162 | #[cfg(feature="kernel_debug")] 163 | pub fn debug_test_fail() { 164 | system_call(SystemCall::DebugTestFail); 165 | loop {} 166 | } 167 | 168 | fn system_call(message: SystemCall) -> SystemCall { 169 | let addr = task_buffer_addr(); 170 | unsafe { 171 | let buffer = &mut *(addr as *mut TaskBuffer); 172 | buffer.call = Some(message); 173 | system_call_raw(); 174 | buffer.call.take().unwrap() 175 | } 176 | } 177 | 178 | fn system_call_put_payload(message: SystemCall, payload: T) -> SystemCall { 179 | use core::mem::{size_of}; 180 | let addr = task_buffer_addr(); 181 | 182 | unsafe { 183 | let buffer = &mut *(addr as *mut TaskBuffer); 184 | buffer.call = Some(message); 185 | 186 | buffer.payload_length = size_of::(); 187 | let payload_addr = &mut buffer.payload_data as *mut _ as *mut T; 188 | let payload_data = &mut *payload_addr; 189 | *payload_data = payload; 190 | 191 | system_call_raw(); 192 | buffer.call.take().unwrap() 193 | } 194 | } 195 | 196 | fn system_call_take_payload(message: SystemCall) -> (SystemCall, T) { 197 | use core::mem::{size_of}; 198 | let addr = task_buffer_addr(); 199 | 200 | unsafe { 201 | let buffer = &mut *(addr as *mut TaskBuffer); 202 | buffer.call = Some(message); 203 | 204 | system_call_raw(); 205 | 206 | let payload_addr = &mut buffer.payload_data as *mut _ as *mut T; 207 | let payload_data = &*payload_addr; 208 | assert!(buffer.payload_length != 0 && buffer.payload_length == size_of::()); 209 | 210 | (buffer.call.take().unwrap(), payload_data.clone()) 211 | } 212 | } 213 | 214 | #[inline(never)] 215 | unsafe fn system_call_raw() { 216 | asm!("int 80h" 217 | :: 218 | : "rax", "rbx", "rcx", "rdx", 219 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 220 | : "volatile", "intel"); 221 | } 222 | -------------------------------------------------------------------------------- /system/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(lang_items)] 2 | #![feature(asm)] 3 | #![feature(const_fn)] 4 | #![no_std] 5 | 6 | extern crate rlibc; 7 | extern crate abi; 8 | extern crate spin; 9 | 10 | #[macro_export] 11 | macro_rules! system_print { 12 | ( $($arg:tt)* ) => ({ 13 | use core::fmt::Write; 14 | let _ = write!(&mut $crate::PrintWriter::new(), $($arg)*); 15 | }) 16 | } 17 | 18 | pub mod unwind; 19 | mod call; 20 | 21 | #[cfg(feature="kernel_debug")] 22 | pub use self::call::{debug_cpool_list, debug_test_succeed, debug_test_fail}; 23 | 24 | pub use self::call::{retype_cpool, retype_task, 25 | channel_put, channel_take, 26 | channel_put_raw, channel_take_raw, 27 | channel_put_cap, channel_take_cap, 28 | retype_raw_page_free, map_raw_page_free, 29 | task_set_stack_pointer, task_set_instruction_pointer, 30 | task_set_cpool, task_set_top_page_table, task_set_buffer, 31 | task_set_active, task_set_inactive}; 32 | pub use abi::{CAddr, ChannelMessage}; 33 | 34 | use core::fmt; 35 | 36 | const STACK_LENGTH: usize = 4 * 4096; 37 | pub fn task_buffer_loc() -> usize { 38 | // We create a random value on stack, lookup its address, and go 39 | // to the top of the stack possible as the kernel buffer address 40 | // storage. 41 | 42 | let mut v: usize = 0xdeadbeaf; 43 | let v_addr = &mut v as *mut usize as usize; 44 | 45 | v_addr - (v_addr % STACK_LENGTH) 46 | } 47 | 48 | pub fn task_buffer_addr() -> usize { 49 | unsafe { 50 | let loc = task_buffer_loc() as *mut usize; 51 | return *loc; 52 | } 53 | } 54 | 55 | pub unsafe fn set_task_buffer_addr(addr: usize) { 56 | use core::ptr::write; 57 | 58 | let loc = task_buffer_loc() as *mut usize; 59 | write(loc, addr); 60 | } 61 | 62 | pub struct PrintWriter { 63 | buffer: [u8; 32], 64 | size: usize 65 | } 66 | 67 | impl PrintWriter { 68 | pub fn new() -> Self { 69 | PrintWriter { 70 | buffer: [0u8; 32], 71 | size: 0 72 | } 73 | } 74 | 75 | pub fn flush(&mut self) { 76 | if self.size > 0 { 77 | call::print(self.buffer.clone(), self.size); 78 | self.buffer = [0u8; 32]; 79 | self.size = 0; 80 | } 81 | } 82 | } 83 | 84 | impl fmt::Write for PrintWriter { 85 | fn write_str(&mut self, s: &str) -> fmt::Result { 86 | for u in s.as_bytes().iter() { 87 | self.buffer[self.size] = *u; 88 | self.size += 1; 89 | 90 | if self.size >= 32 { 91 | self.flush(); 92 | } 93 | } 94 | Result::Ok(()) 95 | } 96 | } 97 | 98 | impl Drop for PrintWriter { 99 | fn drop(&mut self) { 100 | self.flush(); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /system/src/unwind.rs: -------------------------------------------------------------------------------- 1 | 2 | #[lang="panic_fmt"] 3 | #[no_mangle] 4 | pub extern "C" fn rust_begin_unwind(args: ::core::fmt::Arguments, file: &str, line: usize) -> ! 5 | { 6 | // 'args' will print to the formatted string passed to panic! 7 | use core::fmt::Write; 8 | system_print!("file='{}', line={} :: {}", file, line, args); 9 | loop {} 10 | } 11 | 12 | #[allow(non_camel_case_types)] 13 | #[repr(C)] 14 | #[derive(Clone,Copy)] 15 | pub enum _Unwind_Reason_Code 16 | { 17 | _URC_NO_REASON = 0, 18 | _URC_FOREIGN_EXCEPTION_CAUGHT = 1, 19 | _URC_FATAL_PHASE2_ERROR = 2, 20 | _URC_FATAL_PHASE1_ERROR = 3, 21 | _URC_NORMAL_STOP = 4, 22 | _URC_END_OF_STACK = 5, 23 | _URC_HANDLER_FOUND = 6, 24 | _URC_INSTALL_CONTEXT = 7, 25 | _URC_CONTINUE_UNWIND = 8, 26 | } 27 | 28 | #[allow(non_camel_case_types)] 29 | #[derive(Clone,Copy)] 30 | pub struct _Unwind_Context; 31 | 32 | #[allow(non_camel_case_types)] 33 | pub type _Unwind_Action = u32; 34 | static _UA_SEARCH_PHASE: _Unwind_Action = 1; 35 | 36 | #[allow(non_camel_case_types)] 37 | #[repr(C)] 38 | #[derive(Clone,Copy)] 39 | pub struct _Unwind_Exception 40 | { 41 | exception_class: u64, 42 | exception_cleanup: fn(_Unwind_Reason_Code,*const _Unwind_Exception), 43 | private: [u64; 2], 44 | } 45 | 46 | #[lang="eh_personality"] 47 | #[no_mangle] 48 | pub fn rust_eh_personality( 49 | _version: isize, _actions: _Unwind_Action, _exception_class: u64, 50 | _exception_object: &_Unwind_Exception, _context: &_Unwind_Context 51 | ) -> _Unwind_Reason_Code 52 | { 53 | loop{} 54 | } 55 | 56 | #[no_mangle] 57 | #[allow(non_snake_case)] 58 | pub fn _Unwind_Resume() 59 | { 60 | loop{} 61 | } 62 | -------------------------------------------------------------------------------- /tests/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | eval "$*" 4 | CODE="$?" 5 | if [ "$CODE" -eq "99" ] 6 | then 7 | echo "Exit code: $CODE, test succeed." 8 | true 9 | else 10 | echo "Exit code: $CODE, test failed." 11 | false 12 | fi 13 | -------------------------------------------------------------------------------- /tests/userspace/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | 13 | # Build directory 14 | /build/ 15 | -------------------------------------------------------------------------------- /tests/userspace/Cargo.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | name = "abi" 3 | version = "0.1.0" 4 | 5 | [[package]] 6 | name = "rlibc" 7 | version = "1.0.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | 10 | [[package]] 11 | name = "selfalloc" 12 | version = "0.1.0" 13 | dependencies = [ 14 | "abi 0.1.0", 15 | "spin 0.4.4", 16 | "system 0.1.0", 17 | ] 18 | 19 | [[package]] 20 | name = "spin" 21 | version = "0.4.4" 22 | 23 | [[package]] 24 | name = "system" 25 | version = "0.1.0" 26 | dependencies = [ 27 | "abi 0.1.0", 28 | "rlibc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", 29 | "spin 0.4.4", 30 | ] 31 | 32 | [[package]] 33 | name = "test-userspace" 34 | version = "0.1.0" 35 | dependencies = [ 36 | "selfalloc 0.1.0", 37 | "spin 0.4.4", 38 | "system 0.1.0", 39 | ] 40 | 41 | [metadata] 42 | "checksum rlibc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc874b127765f014d792f16763a81245ab80500e2ad921ed4ee9e82481ee08fe" 43 | -------------------------------------------------------------------------------- /tests/userspace/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-userspace" 3 | version = "0.1.0" 4 | authors = ["Wei Tang "] 5 | 6 | [[example]] 7 | name = "allocator" 8 | crate-type = ["staticlib"] 9 | 10 | [dependencies.system] 11 | path = "../../system" 12 | features = ["kernel_debug"] 13 | 14 | [dependencies.spin] 15 | path = "../../spin" 16 | 17 | [dependencies.selfalloc] 18 | path = "../../selfalloc" -------------------------------------------------------------------------------- /tests/userspace/Makefile: -------------------------------------------------------------------------------- 1 | test ?= $(error test target not set) 2 | kernel ?= $(error kernel not set) 3 | version ?= release 4 | name := $(test) 5 | librinit := target/$(ARCH)/$(version)/examples/lib$(name).a 6 | 7 | include ../../userspace.mk 8 | 9 | cargo: 10 | ifeq ($(version),release) 11 | @RUSTFLAGS="-L $(LIBCORE) -L $(LIBALLOC) -L $(LIBCOMPILER_BUILTINS)" cargo build --release --target $(TARGET_SPEC) --example $(name) 12 | else 13 | @RUSTFLAGS="-L $(LIBCORE) -L $(LIBALLOC) -L $(LIBCOMPILER_BUILTINS)" cargo build --target $(TARGET_SPEC) --example $(name) 14 | endif 15 | 16 | test: build 17 | ../run.sh qemu-system-$(ARCH) -d int -no-reboot -vnc :1 -device isa-debug-exit -kernel $(kernel) -initrd $(rinit) -serial stdio 18 | -------------------------------------------------------------------------------- /tests/userspace/examples/allocator.rs: -------------------------------------------------------------------------------- 1 | #![feature(lang_items)] 2 | #![feature(asm)] 3 | #![feature(const_fn)] 4 | #![feature(unique)] 5 | #![feature(alloc)] 6 | #![no_std] 7 | 8 | extern crate system; 9 | extern crate spin; 10 | extern crate selfalloc; 11 | extern crate alloc; 12 | 13 | use system::{CAddr}; 14 | 15 | #[lang="start"] 16 | #[no_mangle] 17 | #[allow(private_no_mangle_fns)] 18 | fn start(_argc: isize, _argv: *const *const u8) { 19 | unsafe { system::set_task_buffer_addr(0x90001000); } 20 | unsafe { selfalloc::setup_allocator(CAddr::from(2), CAddr::from(3), 0x1000000000); } 21 | 22 | // Test allocator 23 | { 24 | use alloc::boxed::Box; 25 | use core::ops::Deref; 26 | let heap_test = Box::new(42); 27 | if heap_test.deref() != &42 { 28 | system::debug_test_fail(); 29 | } 30 | } 31 | 32 | system::debug_test_succeed(); 33 | } 34 | -------------------------------------------------------------------------------- /userspace.mk: -------------------------------------------------------------------------------- 1 | version ?= debug 2 | 3 | linker_script := $(USERSPACE_LINKER) 4 | 5 | linker_flags := -T $(linker_script) 6 | linker_flags += -Map build/$(arch)/map.txt 7 | linker_flags += --gc-sections 8 | linker_flags += -z max-page-size=0x1000 9 | 10 | librinit ?= target/$(ARCH)/$(version)/$(name).a 11 | 12 | assembly_source_files := $(wildcard src/arch/$(ARCH)/*.S) 13 | assembly_object_files := $(patsubst src/arch/$(ARCH)/%.S, \ 14 | build/$(arch)/%.o, $(assembly_source_files)) 15 | 16 | rinit := build/$(ARCH)/$(name).bin 17 | 18 | .PHONY: clean cargo build 19 | 20 | # compile assembly files 21 | build/$(arch)/%.o: src/arch/$(ARCH)/%.S 22 | @mkdir -p $(shell dirname $@) 23 | @$(AS) -o $@ $< 24 | 25 | build: cargo $(librinit) $(assembly_object_files) $(linker_script) 26 | @mkdir -p build/$(ARCH) 27 | @$(LD) $(linker_flags) -o $(rinit) $(assembly_object_files) $(librinit) 28 | 29 | clean: 30 | @rm -rf build 31 | @rm -rf target 32 | -------------------------------------------------------------------------------- /x86_64.json: -------------------------------------------------------------------------------- 1 | { 2 | "cpu": "x86-64", 3 | "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", 4 | "llvm-target": "x86_64-unknown-none", 5 | "target-endian": "little", 6 | "target-pointer-width": "64", 7 | "target-c-int-width": "64", 8 | "linker-flavor": "gcc", 9 | "features": "-mmx,-sse,+soft-float", 10 | "os": "tifflin", 11 | "arch": "x86_64", 12 | "pre-link-args": ["-m64"], 13 | "no-compiler-rt": true, 14 | "disable-redzone": true, 15 | "eliminate-frame-pointer": false, 16 | "morestack": false 17 | } 18 | --------------------------------------------------------------------------------