├── .gitattributes ├── .gitignore ├── zig.mod ├── src ├── additional │ ├── additional.zig │ ├── serial_port.zig │ └── pic8259.zig ├── registers │ ├── registers.zig │ ├── xcontrol.zig │ ├── rflags.zig │ ├── model_specific.zig │ └── control.zig ├── instructions │ ├── port.zig │ ├── tables.zig │ ├── instructions.zig │ ├── interrupts.zig │ ├── tlb.zig │ ├── random.zig │ └── segmentation.zig ├── structures │ ├── structures.zig │ ├── tss.zig │ ├── port.zig │ ├── paging │ │ ├── frame_alloc.zig │ │ ├── paging.zig │ │ ├── page_table.zig │ │ ├── frame.zig │ │ ├── page.zig │ │ └── mapping │ │ │ └── mapping.zig │ └── gdt.zig ├── common.zig ├── index.zig └── addr.zig ├── upstream_versions.txt ├── gyro.zzz ├── .github └── workflows │ └── main.yml ├── LICENSE └── README.md /.gitattributes: -------------------------------------------------------------------------------- 1 | *.zig text eol=lf 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | zig-cache/ 2 | .gyro/ 3 | gyro.lock 4 | deps.zig 5 | build_runner.zig 6 | .zigmod 7 | -------------------------------------------------------------------------------- /zig.mod: -------------------------------------------------------------------------------- 1 | id: 1busqtt3049neerwcob96haqgwkm57kd3n4audd7z69lhnni 2 | name: x86_64 3 | main: src/index.zig 4 | license: MIT 5 | description: Various functionality required to make an x86_64 kernel 6 | dependencies: 7 | - src: git https://github.com/leecannon/zig-bitjuggle 8 | -------------------------------------------------------------------------------- /src/additional/additional.zig: -------------------------------------------------------------------------------- 1 | /// Types for UART serial ports. 2 | pub const serial_port = @import("serial_port.zig"); 3 | 4 | /// A simple pic8259 implementation 5 | pub const pic8259 = @import("pic8259.zig"); 6 | 7 | comptime { 8 | const std = @import("std"); 9 | std.testing.refAllDecls(@This()); 10 | } 11 | -------------------------------------------------------------------------------- /upstream_versions.txt: -------------------------------------------------------------------------------- 1 | This file is used to track the latest version of each reimplemented crate, to make it easy to port changes. 2 | 3 | 2021-11-14 498fa5c534ba81d6867e2d6b0c4c3f4552cad670 https://github.com/rust-osdev/x86_64 4 | 2020-05-20 c3377fb8c1c92a8c042dd94ad9bfcd9a20470ff9 https://github.com/emk/toyos-rs/tree/master/crates/pic8259_simple 5 | -------------------------------------------------------------------------------- /gyro.zzz: -------------------------------------------------------------------------------- 1 | pkgs: 2 | x86_64: 3 | version: 1.1.1 4 | description: "Support for x86_64 specific instructions (e.g. TLB flush), registers (e.g. control registers), and structures (e.g. page tables)" 5 | license: MIT 6 | source_url: "https://github.com/leecannon/zig-x86_64" 7 | tags: 8 | x86-64 9 | x86 10 | osdev 11 | root: src/index.zig 12 | files: 13 | README.md 14 | LICENSE 15 | build.zig 16 | src/*.zig 17 | src/structures/*.zig 18 | src/structures/paging/*.zig 19 | src/structures/paging/mapping/*.zig 20 | src/registers/*.zig 21 | src/instructions/*.zig 22 | src/additional/*.zig 23 | deps: 24 | leecannon/bitjuggle: ^1.0.0 25 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | paths: 6 | - '**.zig' 7 | - 'gyro.zzz' 8 | pull_request: 9 | paths: 10 | - '**.zig' 11 | - 'gyro.zzz' 12 | schedule: 13 | - cron: '0 0 * * *' 14 | workflow_dispatch: 15 | 16 | jobs: 17 | test: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/checkout@v2 21 | - uses: goto-bus-stop/setup-zig@v1 22 | with: 23 | version: master 24 | - uses: mattnite/setup-gyro@v1 25 | 26 | - name: build 27 | run: gyro build 28 | lint: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@v2 32 | - uses: goto-bus-stop/setup-zig@v1 33 | with: 34 | version: master 35 | - run: zig fmt --check --ast-check **.zig 36 | -------------------------------------------------------------------------------- /src/registers/registers.zig: -------------------------------------------------------------------------------- 1 | /// Functions to read and write control registers. 2 | pub const control = @import("control.zig"); 3 | 4 | /// Access to various extended system registers 5 | pub const xcontrol = @import("xcontrol.zig"); 6 | 7 | /// Processor state stored in the RFLAGS register. 8 | pub const RFlags = @import("rflags.zig").RFlags; 9 | 10 | /// Functions to read and write model specific registers. 11 | pub const model_specific = @import("model_specific.zig"); 12 | 13 | const x86_64 = @import("../index.zig"); 14 | const bitjuggle = @import("bitjuggle"); 15 | const std = @import("std"); 16 | 17 | /// Gets the current instruction pointer. Note that this is only approximate as it requires a few 18 | /// instructions to execute. 19 | pub inline fn readInstructionPointer() x86_64.VirtAddr { 20 | return x86_64.VirtAddr.initUnchecked(asm ("lea (%%rip), %[ret]" 21 | : [ret] "=r" (-> u64), 22 | )); 23 | } 24 | 25 | comptime { 26 | std.testing.refAllDecls(@This()); 27 | } 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020-2022 Lee Cannon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # zig-x86_64 2 | 3 | [![CI](https://github.com/leecannon/zig-x86_64/actions/workflows/main.yml/badge.svg?branch=master)](https://github.com/leecannon/zig-x86_64/actions/workflows/main.yml) 4 | 5 | **I don't use this library anymore. I made this as a port of the rust crate but over time I have come to dislike the API.** 6 | 7 | **As written it does not support self-hosted so will probably be deleted with the release of zig 0.11** 8 | 9 | This repo contains various functionality required to make an x86_64 kernel (following [Writing an OS in Rust](https://os.phil-opp.com/)) 10 | 11 | It is mainly a zig reimplementation of the rust crate [x86_64](https://github.com/rust-osdev/x86_64). 12 | 13 | It includes a few additonal types in the `x86_64.additional` namespace: 14 | 15 | - `SerialPort` - Serial port type, mainly for debug output 16 | - `SimplePic` - Reimplementation of [pic8259_simple](https://docs.rs/pic8259_simple) 17 | 18 | ## How to get 19 | 20 | ### Gyro 21 | 22 | `gyro add leecannon/x86_64` 23 | 24 | ### Zigmod 25 | 26 | `zigmod aq add 1/leecannon/x86_64` 27 | 28 | ### Git 29 | 30 | #### Submodule 31 | 32 | `git submodule add https://github.com/leecannon/zig-x86_64 zig-x86_64` 33 | 34 | #### Clone 35 | 36 | `git clone https://github.com/leecannon/zig-x86_64` 37 | -------------------------------------------------------------------------------- /src/instructions/port.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | pub fn readU8(port: u16) u8 { 6 | return asm volatile ("inb %[port],%[ret]" 7 | : [ret] "={al}" (-> u8), 8 | : [port] "N{dx}" (port), 9 | ); 10 | } 11 | 12 | pub fn readU16(port: u16) u16 { 13 | return asm volatile ("inw %[port],%[ret]" 14 | : [ret] "={al}" (-> u16), 15 | : [port] "N{dx}" (port), 16 | ); 17 | } 18 | 19 | pub fn readU32(port: u16) u32 { 20 | return asm volatile ("inl %[port],%[ret]" 21 | : [ret] "={eax}" (-> u32), 22 | : [port] "N{dx}" (port), 23 | ); 24 | } 25 | 26 | pub fn writeU8(port: u16, value: u8) void { 27 | asm volatile ("outb %[value],%[port]" 28 | : 29 | : [value] "{al}" (value), 30 | [port] "N{dx}" (port), 31 | ); 32 | } 33 | 34 | pub fn writeU16(port: u16, value: u16) void { 35 | asm volatile ("outw %[value],%[port]" 36 | : 37 | : [value] "{al}" (value), 38 | [port] "N{dx}" (port), 39 | ); 40 | } 41 | 42 | pub fn writeU32(port: u16, value: u32) void { 43 | asm volatile ("outl %[value],%[port]" 44 | : 45 | : [value] "{eax}" (value), 46 | [port] "N{dx}" (port), 47 | ); 48 | } 49 | 50 | comptime { 51 | std.testing.refAllDecls(@This()); 52 | } 53 | -------------------------------------------------------------------------------- /src/structures/structures.zig: -------------------------------------------------------------------------------- 1 | /// Abstractions for page tables and other paging related structures. 2 | pub const paging = @import("paging/paging.zig"); 3 | 4 | /// Types for the Global Descriptor Table and segment selectors. 5 | pub const gdt = @import("gdt.zig"); 6 | 7 | /// Types for accessing I/O ports. 8 | pub const port = @import("port.zig"); 9 | 10 | /// Provides a type for the task state segment structure. 11 | pub const tss = @import("tss.zig"); 12 | 13 | /// Provides types for the Interrupt Descriptor Table and its entries. 14 | pub const idt = @import("idt.zig"); 15 | 16 | const x86_64 = @import("../index.zig"); 17 | const bitjuggle = @import("bitjuggle"); 18 | const std = @import("std"); 19 | 20 | /// A struct describing a pointer to a descriptor table (GDT / IDT). 21 | /// This is in a format suitable for giving to 'lgdt' or 'lidt'. 22 | pub const DescriptorTablePointer = packed struct { 23 | /// bytes of the DT. 24 | limit: u16, 25 | 26 | /// Pointer to the memory region containing the DT. 27 | base: x86_64.VirtAddr, 28 | 29 | test { 30 | std.testing.refAllDecls(@This()); 31 | try std.testing.expectEqual(@bitSizeOf(u16) + @bitSizeOf(u64), @bitSizeOf(DescriptorTablePointer)); 32 | try std.testing.expectEqual(@sizeOf(u16) + @sizeOf(u64), @sizeOf(DescriptorTablePointer)); 33 | } 34 | }; 35 | 36 | comptime { 37 | std.testing.refAllDecls(@This()); 38 | } 39 | -------------------------------------------------------------------------------- /src/structures/tss.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | /// In 64-bit mode the TSS holds information that is not directly related to the task-switch mechanism, 6 | /// but is used for finding kernel level stack if interrupts arrive while in kernel mode. 7 | pub const TaskStateSegment = packed struct { 8 | reserved_1: u32 = 0, 9 | /// The full 64-bit canonical forms of the stack pointers (RSP) for privilege levels 0-2. 10 | privilege_stack_table: [3]x86_64.VirtAddr = [_]x86_64.VirtAddr{x86_64.VirtAddr.zero()} ** 3, 11 | reserved_2: u64 = 0, 12 | /// The full 64-bit canonical forms of the interrupt stack table (IST) pointers. 13 | interrupt_stack_table: [7]x86_64.VirtAddr = [_]x86_64.VirtAddr{x86_64.VirtAddr.zero()} ** 7, 14 | reserved_3: u64 = 0, 15 | reserved_4: u16 = 0, 16 | /// The 16-bit offset to the I/O permission bit map from the 64-bit TSS base. 17 | iomap_base: u16 = 0, 18 | 19 | /// Creates a new zeroed TSS 20 | /// WARNING: Deprecated 21 | pub fn init() TaskStateSegment { 22 | return .{}; 23 | } 24 | 25 | test { 26 | std.testing.refAllDecls(@This()); 27 | try std.testing.expectEqual(@bitSizeOf(u32) * 26, @bitSizeOf(TaskStateSegment)); 28 | try std.testing.expectEqual(@sizeOf(u32) * 26, @sizeOf(TaskStateSegment)); 29 | } 30 | }; 31 | 32 | comptime { 33 | std.testing.refAllDecls(@This()); 34 | } 35 | -------------------------------------------------------------------------------- /src/common.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub fn formatWithoutFields(value: anytype, options: std.fmt.FormatOptions, writer: anytype, comptime blacklist: []const []const u8) !void { 4 | // This ANY const is a workaround for: https://github.com/ziglang/zig/issues/7948 5 | const ANY = "any"; 6 | 7 | const T = @TypeOf(value); 8 | 9 | switch (@typeInfo(T)) { 10 | .Struct => |info| { 11 | try writer.writeAll(@typeName(T)); 12 | try writer.writeAll("{"); 13 | comptime var i = 0; 14 | outer: inline for (info.fields) |f| { 15 | inline for (blacklist) |blacklist_item| { 16 | if (comptime std.mem.indexOf(u8, f.name, blacklist_item) != null) continue :outer; 17 | } 18 | 19 | if (i == 0) { 20 | try writer.writeAll(" ."); 21 | } else { 22 | try writer.writeAll(", ."); 23 | } 24 | 25 | try writer.writeAll(f.name); 26 | try writer.writeAll(" = "); 27 | try std.fmt.formatType(@field(value, f.name), ANY, options, writer, std.fmt.default_max_depth - 1); 28 | 29 | i += 1; 30 | } 31 | try writer.writeAll(" }"); 32 | }, 33 | else => { 34 | @compileError("Unimplemented for: " ++ @typeName(T)); 35 | }, 36 | } 37 | } 38 | 39 | comptime { 40 | std.testing.refAllDecls(@This()); 41 | } 42 | -------------------------------------------------------------------------------- /src/instructions/tables.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | /// Load a GDT. 6 | /// 7 | /// Use the `x86_64.structures.gdt.GlobalDescriptorTable` struct for a high-level interface to loading a GDT. 8 | pub fn lgdt(gdt: *const x86_64.structures.DescriptorTablePointer) void { 9 | asm volatile ("lgdt (%[gdt])" 10 | : 11 | : [gdt] "r" (gdt), 12 | : "memory" 13 | ); 14 | } 15 | 16 | /// Load a IDT. 17 | /// 18 | /// Use the `x86_64.structures.idt.InterruptDescriptorTable` struct for a high-level interface to loading a IDT. 19 | pub fn lidt(idt: *const x86_64.structures.DescriptorTablePointer) void { 20 | asm volatile ("lidt (%[idt])" 21 | : 22 | : [idt] "r" (idt), 23 | : "memory" 24 | ); 25 | } 26 | 27 | /// Get the address of the current IDT. 28 | pub fn sidt() x86_64.structures.DescriptorTablePointer { 29 | var idt: x86_64.structures.DescriptorTablePointer = undefined; 30 | asm volatile ("sidt (%[idt])" 31 | : 32 | : [idt] "r" (idt), 33 | ); 34 | return idt; 35 | } 36 | 37 | /// Load the task state register using the `ltr` instruction. 38 | pub fn loadTss(sel: x86_64.structures.gdt.SegmentSelector) void { 39 | asm volatile ("ltr %[sel]" 40 | : 41 | : [sel] "r" (sel.value), 42 | ); 43 | } 44 | 45 | /// Get the address of the current GDT. 46 | pub fn sgdt() x86_64.structures.DescriptorTablePointer { 47 | var gdt: x86_64.structures.DescriptorTablePointer = undefined; 48 | asm volatile ("sgdt (%[gdt])" 49 | : 50 | : [gdt] "r" (gdt), 51 | ); 52 | return gdt; 53 | } 54 | 55 | comptime { 56 | std.testing.refAllDecls(@This()); 57 | } 58 | -------------------------------------------------------------------------------- /src/structures/port.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | /// A u8 I/O port 6 | pub const Portu8 = struct { 7 | port: u16, 8 | 9 | pub fn init(port: u16) Portu8 { 10 | return .{ 11 | .port = port, 12 | }; 13 | } 14 | 15 | /// Read from the port 16 | pub fn read(self: Portu8) u8 { 17 | return x86_64.instructions.port.readU8(self.port); 18 | } 19 | 20 | /// Write to the port 21 | pub fn write(self: Portu8, value: u8) void { 22 | x86_64.instructions.port.writeU8(self.port, value); 23 | } 24 | 25 | comptime { 26 | std.testing.refAllDecls(@This()); 27 | } 28 | }; 29 | 30 | /// A u16 I/O port 31 | pub const Portu16 = struct { 32 | port: u16, 33 | 34 | pub fn init(port: u16) Portu16 { 35 | return .{ 36 | .port = port, 37 | }; 38 | } 39 | 40 | /// Read from the port 41 | pub fn read(self: Portu16) u16 { 42 | return x86_64.instructions.port.readU16(self.port); 43 | } 44 | 45 | /// Write to the port 46 | pub fn write(self: Portu16, value: u16) void { 47 | x86_64.instructions.port.writeU16(self.port, value); 48 | } 49 | 50 | comptime { 51 | std.testing.refAllDecls(@This()); 52 | } 53 | }; 54 | 55 | /// A u32 I/O port 56 | pub const Portu32 = struct { 57 | port: u16, 58 | 59 | pub fn init(port: u16) Portu32 { 60 | return .{ 61 | .port = port, 62 | }; 63 | } 64 | 65 | /// Read from the port 66 | pub fn read(self: Portu32) u32 { 67 | return x86_64.instructions.port.readU32(self.port); 68 | } 69 | 70 | /// Write to the port 71 | pub fn write(self: Portu32, value: u32) void { 72 | x86_64.instructions.port.writeU32(self.port, value); 73 | } 74 | 75 | comptime { 76 | std.testing.refAllDecls(@This()); 77 | } 78 | }; 79 | 80 | comptime { 81 | std.testing.refAllDecls(@This()); 82 | } 83 | -------------------------------------------------------------------------------- /src/structures/paging/frame_alloc.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | const paging = x86_64.structures.paging; 6 | 7 | pub const FrameAllocator = struct { 8 | z_impl_allocateFrame: fn (frame_allocator: *FrameAllocator) ?paging.PhysFrame, 9 | z_impl_allocateFrame2MiB: fn (frame_allocator: *FrameAllocator) ?paging.PhysFrame2MiB, 10 | z_impl_allocateFrame1GiB: fn (frame_allocator: *FrameAllocator) ?paging.PhysFrame1GiB, 11 | z_impl_deallocateFrame: fn (frame_allocator: *FrameAllocator, frame: paging.PhysFrame) void, 12 | z_impl_deallocateFrame2MiB: fn (frame_allocator: *FrameAllocator, frame: paging.PhysFrame2MiB) void, 13 | z_impl_deallocateFrame1GiB: fn (frame_allocator: *FrameAllocator, frame: paging.PhysFrame1GiB) void, 14 | 15 | pub inline fn allocate4KiB(frame_allocator: *FrameAllocator) ?paging.PhysFrame { 16 | return frame_allocator.z_impl_allocateFrame(frame_allocator); 17 | } 18 | 19 | pub inline fn allocate2MiB(frame_allocator: *FrameAllocator) ?paging.PhysFrame2MiB { 20 | return frame_allocator.z_impl_allocateFrame2MiB(frame_allocator); 21 | } 22 | 23 | pub inline fn allocate1GiB(frame_allocator: *FrameAllocator) ?paging.PhysFrame1GiB { 24 | return frame_allocator.z_impl_allocateFrame1GiB(frame_allocator); 25 | } 26 | 27 | pub inline fn deallocate4KiB(frame_allocator: *FrameAllocator, frame: paging.PhysFrame) void { 28 | return frame_allocator.z_impl_deallocateFrame(frame_allocator, frame); 29 | } 30 | 31 | pub inline fn deallocate2MiB(frame_allocator: *FrameAllocator, frame: paging.PhysFrame2MiB) void { 32 | return frame_allocator.z_impl_deallocateFrame2MiB(frame_allocator, frame); 33 | } 34 | 35 | pub inline fn deallocate1GiB(frame_allocator: *FrameAllocator, frame: paging.PhysFrame1GiB) void { 36 | return frame_allocator.z_impl_deallocateFrame1GiB(frame_allocator, frame); 37 | } 38 | 39 | comptime { 40 | std.testing.refAllDecls(@This()); 41 | } 42 | }; 43 | 44 | comptime { 45 | std.testing.refAllDecls(@This()); 46 | } 47 | -------------------------------------------------------------------------------- /src/instructions/instructions.zig: -------------------------------------------------------------------------------- 1 | /// Enabling and disabling interrupts 2 | pub const interrupts = @import("interrupts.zig"); 3 | 4 | /// Access to I/O ports 5 | pub const port = @import("port.zig"); 6 | 7 | /// Support for build-in RNGs 8 | pub const random = @import("random.zig"); 9 | 10 | /// Provides functions to read and write segment registers. 11 | pub const segmentation = @import("segmentation.zig"); 12 | 13 | /// Functions to load GDT, IDT, and TSS structures. 14 | pub const tables = @import("tables.zig"); 15 | 16 | /// Functions to flush the translation lookaside buffer (TLB). 17 | pub const tlb = @import("tlb.zig"); 18 | 19 | /// Halts the CPU until the next interrupt arrives. 20 | pub fn hlt() void { 21 | asm volatile ("hlt"); 22 | } 23 | 24 | /// Set the AC (alignment check) bit in the RFLAGS register. 25 | /// 26 | /// This is used to enable accessing user mapped pages when 27 | /// SMAP (Supervisor Memory Access Protection) is enabled 28 | pub fn setac() void { 29 | asm volatile ("stac" ::: "cc"); 30 | } 31 | 32 | /// Clear the AC (alignment check) bit in the RFLAGS register. 33 | /// 34 | /// This is used to disable accessing user mapped pages when 35 | /// SMAP (Supervisor Memory Access Protection) is enabled 36 | pub fn clearac() void { 37 | asm volatile ("clac" ::: "cc"); 38 | } 39 | 40 | /// Executes the `nop` instructions, which performs no operation (i.e. does nothing). 41 | /// 42 | /// This operation is useful to work around the LLVM bug that endless loops are illegally 43 | /// optimized away (see https://github.com/rust-lang/rust/issues/28728). By invoking this 44 | /// instruction (which is marked as volatile), the compiler should no longer optimize the 45 | /// endless loop away. 46 | pub fn nop() void { 47 | asm volatile ("nop"); 48 | } 49 | 50 | /// Emits a '[magic breakpoint](https://wiki.osdev.org/Bochs#Magic_Breakpoint)' instruction for the [Bochs](http://bochs.sourceforge.net/) CPU 51 | /// emulator. Make sure to set `magic_break: enabled=1` in your `.bochsrc` file. 52 | pub fn bochsBreakpoint() void { 53 | asm volatile ("xchgw %%bx, %%bx"); 54 | } 55 | 56 | pub fn pause() void { 57 | asm volatile ("pause" ::: "memory"); 58 | } 59 | 60 | comptime { 61 | const std = @import("std"); 62 | std.testing.refAllDecls(@This()); 63 | } 64 | -------------------------------------------------------------------------------- /src/structures/paging/paging.zig: -------------------------------------------------------------------------------- 1 | const page_table = @import("page_table.zig"); 2 | pub const PAGE_TABLE_ENTRY_COUNT = page_table.PAGE_TABLE_ENTRY_COUNT; 3 | pub const FrameError = page_table.FrameError; 4 | pub const PageTableEntry = page_table.PageTableEntry; 5 | pub const PageTableFlags = page_table.PageTableFlags; 6 | pub const PageTable = page_table.PageTable; 7 | pub const PageTableIndex = page_table.PageTableIndex; 8 | pub const PageOffset = page_table.PageOffset; 9 | pub const PageTableLevel = page_table.PageTableLevel; 10 | 11 | const frame = @import("frame.zig"); 12 | 13 | pub const PhysFrame = frame.PhysFrame; 14 | pub const PhysFrame2MiB = frame.PhysFrame2MiB; 15 | pub const PhysFrame1GiB = frame.PhysFrame1GiB; 16 | pub const PhysFrameError = frame.PhysFrameError; 17 | pub const PhysFrameIterator = frame.PhysFrameIterator; 18 | pub const PhysFrameIterator2MiB = frame.PhysFrameIterator2MiB; 19 | pub const PhysFrameIterator1GiB = frame.PhysFrameIterator1GiB; 20 | pub const PhysFrameRange = frame.PhysFrameRange; 21 | pub const PhysFrameRange2MiB = frame.PhysFrameRange2MiB; 22 | pub const PhysFrameRange1GiB = frame.PhysFrameRange1GiB; 23 | pub const PhysFrameRangeInclusive = frame.PhysFrameRangeInclusive; 24 | pub const PhysFrameRange2MiBInclusive = frame.PhysFrameRange2MiBInclusive; 25 | pub const PhysFrameRange1GiBInclusive = frame.PhysFrameRange1GiBInclusive; 26 | 27 | const page = @import("page.zig"); 28 | 29 | pub const PageSize = page.PageSize; 30 | pub const Page = page.Page; 31 | pub const Page2MiB = page.Page2MiB; 32 | pub const Page1GiB = page.Page1GiB; 33 | pub const PageError = page.PageError; 34 | pub const pageFromTableIndices = page.pageFromTableIndices; 35 | pub const pageFromTableIndices2MiB = page.pageFromTableIndices2MiB; 36 | pub const pageFromTableIndices1GiB = page.pageFromTableIndices1GiB; 37 | pub const PageRange = page.PageRange; 38 | pub const PageRange2MiB = page.PageRange2MiB; 39 | pub const PageRange1GiB = page.PageRange1GiB; 40 | pub const PageRangeInclusive = page.PageRangeInclusive; 41 | pub const PageRange2MiBInclusive = page.PageRange2MiBInclusive; 42 | pub const PageRange1GiBInclusive = page.PageRange1GiBInclusive; 43 | pub const PageIterator = page.PageIterator; 44 | pub const PageIterator2MiB = page.PageIterator2MiB; 45 | pub const PageIterator1GiB = page.PageIterator1GiB; 46 | 47 | const frame_alloc = @import("frame_alloc.zig"); 48 | pub const FrameAllocator = frame_alloc.FrameAllocator; 49 | 50 | pub const mapping = @import("mapping/mapping.zig"); 51 | 52 | comptime { 53 | @import("std").testing.refAllDecls(@This()); 54 | } 55 | -------------------------------------------------------------------------------- /src/instructions/interrupts.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | pub const EnsureNoInterrupts = struct { 6 | enabled: bool, 7 | 8 | pub fn start() EnsureNoInterrupts { 9 | return .{ 10 | .enabled = areEnabled(), 11 | }; 12 | } 13 | 14 | pub fn end(self: EnsureNoInterrupts) void { 15 | if (self.enabled) enable(); 16 | } 17 | }; 18 | 19 | /// Returns whether interrupts are enabled. 20 | pub fn areEnabled() bool { 21 | return x86_64.registers.RFlags.read().interrupt; 22 | } 23 | 24 | /// Enable interrupts. 25 | /// 26 | /// This is a wrapper around the `sti` instruction. 27 | pub fn enable() void { 28 | asm volatile ("sti"); 29 | } 30 | 31 | /// Disable interrupts. 32 | /// 33 | /// This is a wrapper around the `cli` instruction. 34 | pub fn disable() void { 35 | asm volatile ("cli"); 36 | } 37 | 38 | /// Atomically enable interrupts and put the CPU to sleep 39 | /// 40 | /// Executes the `sti; hlt` instruction sequence. Since the `sti` instruction 41 | /// keeps interrupts disabled until after the immediately following 42 | /// instruction (called "interrupt shadow"), no interrupt can occur between the 43 | /// two instructions. (One exception to this are non-maskable interrupts; this 44 | /// is explained below.) 45 | /// 46 | /// This function is useful to put the CPU to sleep without missing interrupts 47 | /// that occur immediately before the `hlt` instruction 48 | /// 49 | /// ## Non-maskable Interrupts 50 | /// 51 | /// On some processors, the interrupt shadow of `sti` does not apply to 52 | /// non-maskable interrupts (NMIs). This means that an NMI can occur between 53 | /// the `sti` and `hlt` instruction, with the result that the CPU is put to 54 | /// sleep even though a new interrupt occured. 55 | /// 56 | /// To work around this, it is recommended to check in the NMI handler if 57 | /// the interrupt occured between `sti` and `hlt` instructions. If this is the 58 | /// case, the handler should increase the instruction pointer stored in the 59 | /// interrupt stack frame so that the `hlt` instruction is skipped. 60 | /// 61 | /// See for more 62 | /// information. 63 | pub fn enableAndHlt() void { 64 | asm volatile ("sti; hlt"); 65 | } 66 | 67 | pub fn disableAndHlt() noreturn { 68 | while (true) { 69 | asm volatile ("cli; hlt"); 70 | } 71 | } 72 | 73 | /// Cause a breakpoint exception by invoking the `int3` instruction. 74 | pub fn int3() void { 75 | asm volatile ("int3"); 76 | } 77 | 78 | /// Generate a software interrupt by invoking the `int` instruction. 79 | pub fn softwareInterrupt(comptime num: usize) void { 80 | asm volatile ("int %[num]" 81 | : 82 | : [num] "N" (num), 83 | ); 84 | } 85 | 86 | comptime { 87 | std.testing.refAllDecls(@This()); 88 | } 89 | -------------------------------------------------------------------------------- /src/instructions/tlb.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | /// Invalidate the given address in the TLB using the `invlpg` instruction. 6 | pub fn flush(addr: x86_64.VirtAddr) void { 7 | asm volatile ("invlpg (%[addr])" 8 | : 9 | : [addr] "r" (addr.value), 10 | : "memory" 11 | ); 12 | } 13 | 14 | /// Invalidate the TLB completely by reloading the CR3 register. 15 | pub fn flushAll() void { 16 | x86_64.registers.control.Cr3.write(x86_64.registers.control.Cr3.read()); 17 | } 18 | 19 | /// The Invalidate PCID Command to execute. 20 | pub const InvPicCommand = union(enum) { 21 | pub const AddressCommand = struct { virtAddr: x86_64.VirtAddr, pcid: Pcid }; 22 | 23 | /// The logical processor invalidates mappings—except global translations—for the linear address and PCID specified. 24 | address: AddressCommand, 25 | 26 | /// The logical processor invalidates all mappings—except global translations—associated with the PCID. 27 | single: Pcid, 28 | 29 | /// The logical processor invalidates all mappings—including global translations—associated with any PCID. 30 | all, 31 | 32 | /// The logical processor invalidates all mappings—except global translations—associated with any PCID. 33 | allExceptGlobal, 34 | }; 35 | 36 | /// The INVPCID descriptor comprises 128 bits and consists of a PCID and a linear address. 37 | /// For INVPCID type 0, the processor uses the full 64 bits of the linear address even outside 64-bit mode; the linear address is not used for other INVPCID types. 38 | pub const InvpcidDescriptor = extern struct { 39 | address: u64, 40 | pcid: u64, 41 | }; 42 | 43 | /// Structure of a PCID. A PCID has to be <= 4096 for x86_64. 44 | pub const Pcid = packed struct { 45 | value: u12, 46 | 47 | /// Create a new PCID 48 | pub fn init(pcid: u12) Pcid { 49 | return .{ 50 | .value = pcid, 51 | }; 52 | } 53 | }; 54 | 55 | /// Invalidate the given address in the TLB using the `invpcid` instruction. 56 | /// 57 | /// ## Safety 58 | /// This function is unsafe as it requires CPUID.(EAX=07H, ECX=0H):EBX.INVPCID to be 1. 59 | pub fn flushPcid(command: InvPicCommand) void { 60 | var desc = InvpcidDescriptor{ 61 | .address = 0, 62 | .pcid = 0, 63 | }; 64 | 65 | const kind: u64 = blk: { 66 | switch (command) { 67 | .address => |address| { 68 | desc.address = address.virtAddr.value; 69 | desc.pcid = address.pcid.value; 70 | break :blk 0; 71 | }, 72 | .single => |pcid| { 73 | desc.pcid = pcid.value; 74 | break :blk 1; 75 | }, 76 | .all => break :blk 2, 77 | .allExceptGlobal => break :blk 3, 78 | } 79 | }; 80 | 81 | asm volatile ("invpcid (%[desc]), %[kind]" 82 | : 83 | : [kind] "r" (kind), 84 | [desc] "r" (@ptrToInt(&desc)), 85 | : "memory" 86 | ); 87 | } 88 | 89 | comptime { 90 | std.testing.refAllDecls(@This()); 91 | } 92 | -------------------------------------------------------------------------------- /src/instructions/random.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | /// Used to obtain random numbers using x86_64's RDRAND opcode 6 | pub const RdRand = struct { 7 | /// Creates a RdRand if RDRAND is supported, null otherwise 8 | pub fn init() ?RdRand { 9 | // RDRAND support indicated by CPUID page 01h, ecx bit 30 10 | // https://en.wikipedia.org/wiki/RdRand#Overview 11 | if (bitjuggle.isBitSet(x86_64.cpuid(0x1).ecx, 30)) { 12 | return RdRand{}; 13 | } 14 | return null; 15 | } 16 | 17 | /// Uniformly sampled u64. 18 | /// May fail in rare circumstances or heavy load. 19 | pub fn getU64(self: RdRand) ?u64 { 20 | _ = self; 21 | var carry: u8 = undefined; 22 | const num: u64 = asm ("rdrand %[result]; setc %[carry]" 23 | : [result] "=r" (-> u64), 24 | : [carry] "qm" (&carry), 25 | : "cc" 26 | ); 27 | return if (carry == 0) null else num; 28 | } 29 | 30 | /// Uniformly sampled u32. 31 | /// May fail in rare circumstances or heavy load. 32 | pub fn getU32(self: RdRand) ?u32 { 33 | _ = self; 34 | var carry: u8 = undefined; 35 | const num: u32 = asm ("rdrand %[result]; setc %[carry]" 36 | : [result] "=r" (-> u32), 37 | : [carry] "qm" (&carry), 38 | : "cc" 39 | ); 40 | return if (carry == 0) null else num; 41 | } 42 | 43 | /// Uniformly sampled u16. 44 | /// May fail in rare circumstances or heavy load. 45 | pub fn getU16(self: RdRand) ?u16 { 46 | _ = self; 47 | var carry: u8 = undefined; 48 | const num: u16 = asm ("rdrand %[result]; setc %[carry]" 49 | : [result] "=r" (-> u16), 50 | : [carry] "qm" (&carry), 51 | : "cc" 52 | ); 53 | return if (carry == 0) null else num; 54 | } 55 | 56 | comptime { 57 | std.testing.refAllDecls(@This()); 58 | } 59 | }; 60 | 61 | /// Used to obtain seed numbers using x86_64's RDSEED opcode 62 | pub const RdSeed = struct { 63 | /// Creates RdSeed if RDSEED is supported 64 | pub fn init() ?RdSeed { 65 | // RDSEED support indicated by CPUID page 07h, ebx bit 18 66 | if (bitjuggle.isBitSet(x86_64.cpuid(0x7).ebx, 18)) { 67 | return RdSeed{}; 68 | } 69 | return null; 70 | } 71 | 72 | /// Random u64 seed directly from entropy store. 73 | /// May fail in rare circumstances or heavy load. 74 | pub fn getU64(self: RdSeed) ?u64 { 75 | _ = self; 76 | var carry: u8 = undefined; 77 | const num: u64 = asm ("rdseed %[result]; setc %[carry]" 78 | : [result] "=r" (-> u64), 79 | : [carry] "qm" (&carry), 80 | : "cc" 81 | ); 82 | return if (carry == 0) null else num; 83 | } 84 | 85 | /// Random u32 seed directly from entropy store. 86 | /// May fail in rare circumstances or heavy load. 87 | pub fn getU32(self: RdSeed) ?u32 { 88 | _ = self; 89 | var carry: u8 = undefined; 90 | const num: u32 = asm ("rdseed %[result]; setc %[carry]" 91 | : [result] "=r" (-> u32), 92 | : [carry] "qm" (&carry), 93 | : "cc" 94 | ); 95 | return if (carry == 0) null else num; 96 | } 97 | 98 | /// Random u16 seed directly from entropy store. 99 | /// May fail in rare circumstances or heavy load. 100 | pub fn getU16(self: RdSeed) ?u16 { 101 | _ = self; 102 | var carry: u8 = undefined; 103 | const num: u16 = asm ("rdseed %[result]; setc %[carry]" 104 | : [result] "=r" (-> u16), 105 | : [carry] "qm" (&carry), 106 | : "cc" 107 | ); 108 | return if (carry == 0) null else num; 109 | } 110 | 111 | comptime { 112 | std.testing.refAllDecls(@This()); 113 | } 114 | }; 115 | 116 | comptime { 117 | std.testing.refAllDecls(@This()); 118 | } 119 | -------------------------------------------------------------------------------- /src/additional/serial_port.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const std = @import("std"); 3 | 4 | const Portu8 = x86_64.structures.port.Portu8; 5 | const writeU8 = x86_64.instructions.port.writeU8; 6 | 7 | const DATA_READY: u8 = 1; 8 | const OUTPUT_READY: u8 = 1 << 5; 9 | 10 | pub const COMPort = enum { 11 | COM1, 12 | COM2, 13 | COM3, 14 | COM4, 15 | 16 | fn toPort(com_port: COMPort) u16 { 17 | return switch (com_port) { 18 | .COM1 => 0x3F8, 19 | .COM2 => 0x2F8, 20 | .COM3 => 0x3E8, 21 | .COM4 => 0x2E8, 22 | }; 23 | } 24 | 25 | comptime { 26 | std.testing.refAllDecls(@This()); 27 | } 28 | }; 29 | 30 | pub const BaudRate = enum { 31 | Baud115200, 32 | Baud57600, 33 | Baud38400, 34 | Baud28800, 35 | 36 | fn toDivisor(baud_rate: BaudRate) u8 { 37 | return switch (baud_rate) { 38 | .Baud115200 => 1, 39 | .Baud57600 => 2, 40 | .Baud38400 => 3, 41 | .Baud28800 => 4, 42 | }; 43 | } 44 | 45 | comptime { 46 | std.testing.refAllDecls(@This()); 47 | } 48 | }; 49 | 50 | pub const SerialPort = struct { 51 | z_data_port: Portu8, 52 | z_line_status_port: Portu8, 53 | 54 | /// Initalize the serial port at `com_port` with the baud rate `baud_rate` 55 | pub fn init(com_port: COMPort, baud_rate: BaudRate) SerialPort { 56 | const data_port_number = com_port.toPort(); 57 | 58 | // Disable interrupts 59 | writeU8(data_port_number + 1, 0x00); 60 | 61 | // Set Baudrate 62 | writeU8(data_port_number + 3, 0x80); 63 | writeU8(data_port_number, baud_rate.toDivisor()); 64 | writeU8(data_port_number + 1, 0x00); 65 | 66 | // 8 bits, no parity, one stop bit 67 | writeU8(data_port_number + 3, 0x03); 68 | 69 | // Enable FIFO 70 | writeU8(data_port_number + 2, 0xC7); 71 | 72 | // Mark data terminal ready 73 | writeU8(data_port_number + 4, 0x0B); 74 | 75 | // Enable interupts 76 | writeU8(data_port_number + 1, 0x01); 77 | 78 | return .{ 79 | .z_data_port = Portu8.init(data_port_number), 80 | .z_line_status_port = Portu8.init(data_port_number + 5), 81 | }; 82 | } 83 | 84 | fn waitForOutputReady(self: SerialPort) void { 85 | while (self.z_line_status_port.read() & OUTPUT_READY == 0) { 86 | x86_64.instructions.pause(); 87 | } 88 | } 89 | 90 | fn waitForInputReady(self: SerialPort) void { 91 | while (self.z_line_status_port.read() & DATA_READY == 0) { 92 | x86_64.instructions.pause(); 93 | } 94 | } 95 | 96 | fn sendByte(self: SerialPort, data: u8) void { 97 | switch (data) { 98 | 8, 0x7F => { 99 | self.waitForOutputReady(); 100 | self.z_data_port.write(8); 101 | self.waitForOutputReady(); 102 | self.z_data_port.write(' '); 103 | self.waitForOutputReady(); 104 | self.z_data_port.write(8); 105 | }, 106 | else => { 107 | self.waitForOutputReady(); 108 | self.z_data_port.write(data); 109 | }, 110 | } 111 | } 112 | 113 | pub fn readByte(self: SerialPort) u8 { 114 | self.waitForInputReady(); 115 | return self.z_data_port.read(); 116 | } 117 | 118 | pub const Writer = std.io.Writer(SerialPort, error{}, writerImpl); 119 | pub fn writer(self: SerialPort) Writer { 120 | return .{ .context = self }; 121 | } 122 | 123 | /// The impl function driving the `std.io.Writer` 124 | fn writerImpl(self: SerialPort, bytes: []const u8) error{}!usize { 125 | for (bytes) |char| { 126 | self.sendByte(char); 127 | } 128 | return bytes.len; 129 | } 130 | 131 | comptime { 132 | std.testing.refAllDecls(@This()); 133 | } 134 | }; 135 | 136 | comptime { 137 | std.testing.refAllDecls(@This()); 138 | } 139 | -------------------------------------------------------------------------------- /src/registers/xcontrol.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | const formatWithoutFields = @import("../common.zig").formatWithoutFields; 5 | 6 | /// Extended feature enable mask register 7 | pub const XCr0 = packed struct { 8 | /// Enables x87 FPU 9 | x87: bool, 10 | 11 | /// Enables 128-bit (legacy) SSE 12 | /// Must be set to enable AVX and YMM 13 | sse: bool, 14 | 15 | /// Enables 256-bit SSE 16 | /// Must be set to enable AVX 17 | avx: bool, 18 | 19 | /// When set, MPX instructions are enabled and the bound registers BND0-BND3 can be managed by XSAVE. 20 | bndreg: bool, 21 | 22 | /// When set, MPX instructions can be executed and XSAVE can manage the BNDCFGU and BNDSTATUS registers. 23 | bndcsr: bool, 24 | 25 | /// If set, AVX-512 instructions can be executed and XSAVE can manage the K0-K7 mask registers. 26 | opmask: bool, 27 | 28 | /// If set, AVX-512 instructions can be executed and XSAVE can be used to manage the upper halves of the lower ZMM registers. 29 | zmm_hi256: bool, 30 | 31 | /// If set, AVX-512 instructions can be executed and XSAVE can manage the upper ZMM registers. 32 | hi16_zmm: bool, 33 | 34 | z_reserved8: bool, 35 | 36 | /// When set, PKRU state management is supported by XSAVE/XRSTOR 37 | mpk: bool, 38 | 39 | z_reserved10_15: u6, 40 | z_reserved16_47: u32, 41 | z_reserved48_55: u8, 42 | z_reserved56_61: u6, 43 | 44 | /// When set the Lightweight Profiling extensions are enabled 45 | lwp: bool, 46 | 47 | z_reserved63: bool, 48 | 49 | /// Read the current set of XCr0 flags. 50 | pub fn read() XCr0 { 51 | return XCr0.fromU64(readRaw()); 52 | } 53 | 54 | /// Read the current raw XCr0 value. 55 | fn readRaw() u64 { 56 | var high: u32 = undefined; 57 | var low: u32 = undefined; 58 | 59 | asm ("xor %%rcx, %%rcx; xgetbv" 60 | : [low] "={rax}" (low), 61 | [high] "={rdx}" (high), 62 | : 63 | : "rcx" 64 | ); 65 | 66 | return (@as(u64, high) << 32) | @as(u64, low); 67 | } 68 | 69 | /// Write XCr0 flags. 70 | /// 71 | /// Preserves the value of reserved fields. 72 | pub fn write(self: XCr0) void { 73 | writeRaw(self.toU64() | (readRaw() & ALL_RESERVED)); 74 | } 75 | 76 | /// Write raw XCr0 flags. 77 | /// 78 | /// Does _not_ preserve any values, including reserved fields. 79 | fn writeRaw(value: u64) void { 80 | var high: u32 = @truncate(u32, value >> 32); 81 | var low: u32 = @truncate(u32, value); 82 | 83 | asm volatile ("xor %%ecx, %%ecx; xsetbv" 84 | : 85 | : [low] "{eax}" (low), 86 | [high] "{edx}" (high), 87 | : "ecx" 88 | ); 89 | } 90 | 91 | const ALL_RESERVED: u64 = blk: { 92 | var flags = std.mem.zeroes(XCr0); 93 | flags.z_reserved8 = true; 94 | flags.z_reserved10_15 = std.math.maxInt(u6); 95 | flags.z_reserved16_47 = std.math.maxInt(u32); 96 | flags.z_reserved48_55 = std.math.maxInt(u8); 97 | flags.z_reserved56_61 = std.math.maxInt(u6); 98 | flags.z_reserved63 = true; 99 | break :blk @bitCast(u64, flags); 100 | }; 101 | 102 | const ALL_NOT_RESERVED: u64 = ~ALL_RESERVED; 103 | 104 | pub fn fromU64(value: u64) XCr0 { 105 | return @bitCast(XCr0, value & ALL_NOT_RESERVED); 106 | } 107 | 108 | pub fn toU64(self: XCr0) u64 { 109 | return @bitCast(u64, self) & ALL_NOT_RESERVED; 110 | } 111 | 112 | pub fn format(value: XCr0, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 113 | _ = fmt; 114 | return formatWithoutFields( 115 | value, 116 | options, 117 | writer, 118 | &.{ "z_reserved8", "z_reserved10_15", "z_reserved16_47", "z_reserved48_55", "z_reserved56_61", "z_reserved63" }, 119 | ); 120 | } 121 | 122 | test { 123 | try std.testing.expectEqual(@as(usize, 64), @bitSizeOf(XCr0)); 124 | try std.testing.expectEqual(@as(usize, 8), @sizeOf(XCr0)); 125 | } 126 | 127 | comptime { 128 | std.testing.refAllDecls(@This()); 129 | } 130 | }; 131 | 132 | comptime { 133 | std.testing.refAllDecls(@This()); 134 | } 135 | -------------------------------------------------------------------------------- /src/registers/rflags.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | const formatWithoutFields = @import("../common.zig").formatWithoutFields; 5 | 6 | /// The RFLAGS register. 7 | pub const RFlags = packed struct { 8 | /// Set by hardware if last arithmetic operation generated a carry out of the 9 | /// most-significant bit of the result. 10 | carry: bool, 11 | z_reserved1: bool, 12 | 13 | /// Set by hardware if last result has an even number of 1 bits (only for some operations). 14 | parity: bool, 15 | z_reserved3: bool, 16 | 17 | /// Set by hardware if last arithmetic operation generated a carry ouf of bit 3 of the 18 | /// result. 19 | auxiliary_carry: bool, 20 | z_reserved5: bool, 21 | 22 | /// Set by hardware if last arithmetic operation resulted in a zero value. 23 | zero: bool, 24 | 25 | /// Set by hardware if last arithmetic operation resulted in a negative value. 26 | sign: bool, 27 | 28 | /// Enable single-step mode for debugging. 29 | trap: bool, 30 | 31 | /// Enable interrupts. 32 | interrupt: bool, 33 | 34 | /// Determines the order in which strings are processed. 35 | direction: bool, 36 | 37 | /// Set by hardware to indicate that the sign bit of the result of the last signed integer 38 | /// operation differs from the source operands. 39 | overflow: bool, 40 | 41 | /// Specifies the privilege level required for executing I/O address-space instructions. 42 | iopl: u2, 43 | 44 | /// Used by `iret` in hardware task switch mode to determine if current task is nested. 45 | nested: bool, 46 | z_reserved15: bool, 47 | 48 | /// Allows to restart an instruction following an instrucion breakpoint. 49 | @"resume": bool, 50 | 51 | /// Enable the virtual-8086 mode. 52 | virtual_8086: bool, 53 | 54 | /// Enable automatic alignment checking if CR0.AM is set. Only works if CPL is 3. 55 | alignment_check: bool, 56 | 57 | /// Virtual image of the INTERRUPT_FLAG bit. 58 | /// 59 | /// Used when virtual-8086 mode extensions (CR4.VME) or protected-mode virtual 60 | /// interrupts (CR4.PVI) are activated. 61 | virtual_interrupt: bool, 62 | 63 | /// Indicates that an external, maskable interrupt is pending. 64 | /// 65 | /// Used when virtual-8086 mode extensions (CR4.VME) or protected-mode virtual 66 | /// interrupts (CR4.PVI) are activated. 67 | virtual_interrupt_pending: bool, 68 | 69 | /// Processor feature identification flag. 70 | /// 71 | /// If this flag is modifiable, the CPU supports CPUID. 72 | id: bool, 73 | 74 | z_reserved22_31: u10, 75 | z_reserved32_63: u32, 76 | 77 | /// Returns the current value of the RFLAGS register. 78 | pub fn read() RFlags { 79 | return RFlags.fromU64(readRaw()); 80 | } 81 | 82 | /// Returns the raw current value of the RFLAGS register. 83 | fn readRaw() u64 { 84 | return asm ("pushfq; popq %[ret]" 85 | : [ret] "=r" (-> u64), 86 | : 87 | : "memory" 88 | ); 89 | } 90 | 91 | /// Writes the RFLAGS register, preserves reserved bits. 92 | pub fn write(self: RFlags) void { 93 | writeRaw(self.toU64() | (readRaw() & ALL_RESERVED)); 94 | } 95 | 96 | /// Writes the RFLAGS register. 97 | /// Does not preserve any bits 98 | fn writeRaw(value: u64) void { 99 | asm volatile ("pushq %[val]; popfq" 100 | : 101 | : [val] "r" (value), 102 | : "memory", "flags" 103 | ); 104 | } 105 | 106 | const ALL_RESERVED: u64 = blk: { 107 | var flags = std.mem.zeroes(RFlags); 108 | flags.z_reserved1 = true; 109 | flags.z_reserved15 = true; 110 | flags.z_reserved22_31 = std.math.maxInt(u10); 111 | flags.z_reserved3 = true; 112 | flags.z_reserved32_63 = std.math.maxInt(u32); 113 | flags.z_reserved5 = true; 114 | break :blk @bitCast(u64, flags); 115 | }; 116 | 117 | const ALL_NOT_RESERVED: u64 = ~ALL_RESERVED; 118 | 119 | pub fn fromU64(value: u64) RFlags { 120 | return @bitCast(RFlags, value & ALL_NOT_RESERVED); 121 | } 122 | 123 | pub fn toU64(self: RFlags) u64 { 124 | return @bitCast(u64, self) & ALL_NOT_RESERVED; 125 | } 126 | 127 | pub fn format(value: RFlags, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 128 | _ = fmt; 129 | return formatWithoutFields( 130 | value, 131 | options, 132 | writer, 133 | &.{ "z_reserved1", "z_reserved15", "z_reserved22_31", "z_reserved3", "z_reserved32_63", "z_reserved5" }, 134 | ); 135 | } 136 | 137 | test { 138 | try std.testing.expectEqual(@as(usize, 64), @bitSizeOf(RFlags)); 139 | try std.testing.expectEqual(@as(usize, 8), @sizeOf(RFlags)); 140 | } 141 | 142 | comptime { 143 | std.testing.refAllDecls(@This()); 144 | } 145 | }; 146 | 147 | comptime { 148 | std.testing.refAllDecls(@This()); 149 | } 150 | -------------------------------------------------------------------------------- /src/index.zig: -------------------------------------------------------------------------------- 1 | pub usingnamespace @import("addr.zig"); 2 | 3 | /// Representations of various x86 specific structures and descriptor tables. 4 | pub const structures = @import("structures/structures.zig"); 5 | 6 | /// Access to various system and model specific registers. 7 | pub const registers = @import("registers/registers.zig"); 8 | 9 | /// Special x86_64 instructions. 10 | pub const instructions = @import("instructions/instructions.zig"); 11 | 12 | /// Various additional functionality in addition to the rust x86_64 crate 13 | pub const additional = @import("additional/additional.zig"); 14 | 15 | pub const PrivilegeLevel = enum(u8) { 16 | /// Privilege-level 0 (most privilege): This level is used by critical system-software 17 | /// components that require direct access to, and control over, all processor and system 18 | /// resources. This can include BIOS, memory-management functions, and interrupt handlers. 19 | Ring0 = 0, 20 | 21 | /// Privilege-level 1 (moderate privilege): This level is used by less-critical system- 22 | /// software services that can access and control a limited scope of processor and system 23 | /// resources. Software running at these privilege levels might include some device drivers 24 | /// and library routines. The actual privileges of this level are defined by the 25 | /// operating system. 26 | Ring1 = 1, 27 | 28 | /// Privilege-level 2 (moderate privilege): Like level 1, this level is used by 29 | /// less-critical system-software services that can access and control a limited scope of 30 | /// processor and system resources. The actual privileges of this level are defined by the 31 | /// operating system. 32 | Ring2 = 2, 33 | 34 | /// Privilege-level 3 (least privilege): This level is used by application software. 35 | /// Software running at privilege-level 3 is normally prevented from directly accessing 36 | /// most processor and system resources. Instead, applications request access to the 37 | /// protected processor and system resources by calling more-privileged service routines 38 | /// to perform the accesses. 39 | Ring3 = 3, 40 | }; 41 | 42 | /// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of 43 | /// data are not placed into the same cache line. Updating an atomic value invalidates the whole 44 | /// cache line it belongs to, which makes the next access to the same cache line slower for other 45 | /// CPU cores. 46 | /// 47 | /// Note that 128 is just a reasonable guess and is not guaranteed to match the actual cache line 48 | /// length of the machine the program is running on. On modern Intel architectures, spatial 49 | /// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that 50 | /// cache lines are 128 bytes long. 51 | pub const CACHE_LINE_LENGTH: usize = 128; 52 | 53 | /// Result of the `cpuid` instruction. 54 | pub const CpuidResult = struct { 55 | /// EAX register. 56 | eax: u32, 57 | 58 | /// EBX register. 59 | ebx: u32, 60 | 61 | /// ECX register. 62 | ecx: u32, 63 | 64 | /// EDX register. 65 | edx: u32, 66 | }; 67 | 68 | /// Returns the result of the `cpuid` instruction for a given `leaf` (`EAX`) and sub_leaf (`ECX`) equal to zero. 69 | /// See `cpuidWithSubleaf` 70 | pub fn cpuid(leaf: u32) CpuidResult { 71 | return cpuidWithSubleaf(leaf, 0); 72 | } 73 | 74 | /// Returns the result of the `cpuid` instruction for a given `leaf` (`EAX`) and `sub_leaf` (`ECX`). 75 | /// 76 | /// The highest-supported leaf value is returned by the first item of `cpuidMax(0)`. 77 | /// For leaves containing sub-leaves, the second item returns the highest-supported sub-leaf value. 78 | /// 79 | /// The CPUID Wikipedia page contains how to query which information using the `EAX` and `ECX` registers, and the interpretation of 80 | /// the results returned in `EAX`, `EBX`, `ECX`, and `EDX`. 81 | /// 82 | /// The references are: 83 | /// - Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2: Instruction Set Reference, A-Z 84 | /// - AMD64 Architecture Programmer's Manual, Volume 3: General-Purpose and System Instructions 85 | pub fn cpuidWithSubleaf(leaf: u32, sub_leaf: u32) CpuidResult { 86 | var eax: u32 = undefined; 87 | var ebx: u32 = undefined; 88 | var ecx: u32 = undefined; 89 | var edx: u32 = undefined; 90 | 91 | asm volatile ("cpuid" 92 | : [eax] "={eax}" (eax), 93 | [ebx] "={ebx}" (ebx), 94 | [ecx] "={ecx}" (ecx), 95 | [edx] "={edx}" (edx), 96 | : [eax] "{eax}" (leaf), 97 | [ecx] "{ecx}" (sub_leaf), 98 | ); 99 | 100 | return CpuidResult{ 101 | .eax = eax, 102 | .ebx = ebx, 103 | .ecx = ecx, 104 | .edx = edx, 105 | }; 106 | } 107 | 108 | /// Returns the highest-supported `leaf` (`EAX`) and sub-leaf (`ECX`) `cpuid` values. 109 | /// 110 | /// If `cpuid` is supported, and `leaf` is zero, then the first item contains the highest `leaf` value that `cpuid` supports. 111 | /// For `leaf`s containing sub-leafs, the second item contains the highest-supported sub-leaf value. 112 | pub fn cpuidMax(leaf: u32) [2]u32 { 113 | const result = cpuid(leaf); 114 | return [2]u32{ 115 | result.eax, 116 | result.ebx, 117 | }; 118 | } 119 | 120 | comptime { 121 | const std = @import("std"); 122 | std.testing.refAllDecls(@This()); 123 | } 124 | -------------------------------------------------------------------------------- /src/instructions/segmentation.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | /// Returns the current value of the code segment register. 6 | pub fn getCs() x86_64.structures.gdt.SegmentSelector { 7 | return .{ 8 | .value = asm ("mov %%cs, %[ret]" 9 | : [ret] "=r" (-> u16), 10 | ), 11 | }; 12 | } 13 | 14 | /// Reload code segment register. 15 | /// 16 | /// The segment base and limit are unused in 64-bit mode. Only the L (long), D 17 | /// (default operation size), and DPL (descriptor privilege-level) fields of the 18 | /// descriptor are recognized. So changing the segment register can be used to 19 | /// change privilege level or enable/disable long mode. 20 | /// 21 | /// Note this is special since we cannot directly move to [`CS`]. Instead we 22 | /// push the new segment selector and return value on the stack and use 23 | /// `retfq` to reload [`CS`] and continue at the end of our function. 24 | pub fn setCs(sel: x86_64.structures.gdt.SegmentSelector) void { 25 | asm volatile ("pushq %[sel]; leaq 1f(%%rip), %%rax; pushq %%rax; lretq; 1:" 26 | : 27 | : [sel] "ri" (@as(u64, sel.value)), 28 | : "rax", "memory" 29 | ); 30 | } 31 | 32 | /// Returns the current value of the stack segment register. 33 | pub fn getSs() x86_64.structures.gdt.SegmentSelector { 34 | return .{ 35 | .value = asm ("mov %%ss, %[ret]" 36 | : [ret] "=r" (-> u16), 37 | ), 38 | }; 39 | } 40 | 41 | /// Reload stack segment register. 42 | /// 43 | /// Entirely unused in 64-bit mode; setting the segment register does nothing. 44 | /// However, in ring 3, the SS register still has to point to a valid 45 | /// [`Descriptor`] (it cannot be zero). This means a user-mode read/write 46 | /// segment descriptor must be present in the GDT. 47 | /// 48 | /// This register is also set by the `syscall`/`sysret` and 49 | /// `sysenter`/`sysexit` instructions (even on 64-bit transitions). This is to 50 | /// maintain symmetry with 32-bit transitions where setting SS actually will 51 | /// actually have an effect. 52 | pub fn setSs(sel: x86_64.structures.gdt.SegmentSelector) void { 53 | asm volatile ("movw %[sel], %%ss" 54 | : 55 | : [sel] "r" (sel.value), 56 | : "memory" 57 | ); 58 | } 59 | 60 | /// Returns the current value of the data segment register. 61 | pub fn getDs() x86_64.structures.gdt.SegmentSelector { 62 | return .{ 63 | .value = asm ("mov %%ds, %[ret]" 64 | : [ret] "=r" (-> u16), 65 | ), 66 | }; 67 | } 68 | 69 | /// Reload data segment register. 70 | /// 71 | /// Entirely unused in 64-bit mode; setting the segment register does nothing. 72 | pub fn setDs(sel: x86_64.structures.gdt.SegmentSelector) void { 73 | asm volatile ("movw %[sel], %%ds" 74 | : 75 | : [sel] "r" (sel.value), 76 | : "memory" 77 | ); 78 | } 79 | 80 | /// Returns the current value of the es segment register. 81 | pub fn getEs() x86_64.structures.gdt.SegmentSelector { 82 | return .{ 83 | .value = asm ("mov %%es, %[ret]" 84 | : [ret] "=r" (-> u16), 85 | ), 86 | }; 87 | } 88 | 89 | /// Reload es segment register. 90 | /// 91 | /// Entirely unused in 64-bit mode; setting the segment register does nothing. 92 | pub fn setEs(sel: x86_64.structures.gdt.SegmentSelector) void { 93 | asm volatile ("movw %[sel], %%es" 94 | : 95 | : [sel] "r" (sel.value), 96 | : "memory" 97 | ); 98 | } 99 | 100 | /// Returns the current value of the fs segment register. 101 | pub fn getFs() x86_64.structures.gdt.SegmentSelector { 102 | return .{ 103 | .value = asm ("mov %%fs, %[ret]" 104 | : [ret] "=r" (-> u16), 105 | ), 106 | }; 107 | } 108 | 109 | /// Reload fs segment register. 110 | pub fn setFs(sel: x86_64.structures.gdt.SegmentSelector) void { 111 | asm volatile ("movw %[sel], %%fs" 112 | : 113 | : [sel] "r" (sel.value), 114 | : "memory" 115 | ); 116 | } 117 | 118 | /// Returns the current value of the gs segment register. 119 | pub fn getGs() x86_64.structures.gdt.SegmentSelector { 120 | return .{ 121 | .value = asm ("mov %%gs, %[ret]" 122 | : [ret] "=r" (-> u16), 123 | ), 124 | }; 125 | } 126 | 127 | /// Reload gs segment register. 128 | pub fn setGs(sel: x86_64.structures.gdt.SegmentSelector) void { 129 | asm volatile ("movw %[sel], %%gs" 130 | : 131 | : [sel] "r" (sel.value), 132 | : "memory" 133 | ); 134 | } 135 | 136 | /// Swap `KernelGsBase` MSR and `GsBase` MSR. 137 | pub fn swapGs() void { 138 | asm volatile ("swapgs" ::: "memory"); 139 | } 140 | 141 | /// Reads the fs segment base address 142 | /// 143 | /// ## Exceptions 144 | /// 145 | /// If `CR4.fsgsbase` is not set, this instruction will throw an `#UD`. 146 | pub fn readFsBase() u64 { 147 | return asm ("rdfsbase %[ret]" 148 | : [ret] "=r" (-> u64), 149 | ); 150 | } 151 | 152 | /// Writes the fs segment base address 153 | /// 154 | /// ## Exceptions 155 | /// 156 | /// If `CR4.fsgsbase` is not set, this instruction will throw an `#UD`. 157 | /// 158 | /// The caller must ensure that this write operation has no unsafe side 159 | /// effects, as the fs segment base address is often used for thread 160 | /// local storage. 161 | pub fn writeFsBase(value: u64) void { 162 | asm volatile ("wrfsbase %[val]" 163 | : 164 | : [val] "r" (value), 165 | ); 166 | } 167 | 168 | /// Reads the gs segment base address 169 | /// 170 | /// ## Exceptions 171 | /// 172 | /// If `CR4.fsgsbase` is not set, this instruction will throw an `#UD`. 173 | pub fn readGsBase() u64 { 174 | return asm ("rdgsbase %[ret]" 175 | : [ret] "=r" (-> u64), 176 | ); 177 | } 178 | 179 | /// Writes the gs segment base address 180 | /// 181 | /// ## Exceptions 182 | /// 183 | /// If `CR4.fsgsbase` is not set, this instruction will throw an `#UD`. 184 | /// 185 | /// The caller must ensure that this write operation has no unsafe side 186 | /// effects, as the gs segment base address might be in use. 187 | pub fn writeGsBase(value: u64) void { 188 | asm volatile ("wrgsbase %[val]" 189 | : 190 | : [val] "r" (value), 191 | ); 192 | } 193 | 194 | comptime { 195 | std.testing.refAllDecls(@This()); 196 | } 197 | -------------------------------------------------------------------------------- /src/additional/pic8259.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | const port = x86_64.structures.port.Portu8; 6 | 7 | /// Command sent to begin PIC initialization. 8 | const CMD_INIT: u8 = 0x11; 9 | 10 | /// Command sent to acknowledge an interrupt. 11 | const CMD_END_INTERRUPT: u8 = 0x20; 12 | 13 | /// The mode in which we want to run our PICs. 14 | const MODE_8086: u8 = 0x01; 15 | 16 | const DEFAULT_PRIMARY_MASK: u8 = blk: { 17 | var temp = PicPrimaryInterruptMask.allMasked(); 18 | temp.chain = false; 19 | break :blk temp.toU8(); 20 | }; 21 | const DEFAULT_SECONDARY_MASK: u8 = PicSecondaryInterruptMask.allMasked().toU8(); 22 | 23 | const PRIMARY_COMMAND_PORT: port = port.init(0x20); 24 | const PRIMARY_DATA_PORT: port = port.init(0x21); 25 | const SECONDARY_COMMAND_PORT: port = port.init(0xA0); 26 | const SECONDARY_DATA_PORT: port = port.init(0xA1); 27 | 28 | pub const SimplePic = struct { 29 | primary_interrupt_offset: u8, 30 | secondary_interrupt_offset: u8, 31 | 32 | /// Initialize both our PICs. We initialize them together, at the same 33 | /// time, because it's traditional to do so, and because I/O operations 34 | /// might not be instantaneous on older processors. 35 | /// 36 | /// NOTE: All interrupts start masked, except the connection from primary to secondary. 37 | pub fn init(primary_interrupt_offset: u8, secondary_interrupt_offset: u8) SimplePic { 38 | // We need to add a delay between writes to our PICs, especially on 39 | // older motherboards. But we don't necessarily have any kind of 40 | // timers yet, because most of them require interrupts. Various 41 | // older versions of Linux and other PC operating systems have 42 | // worked around this by writing garbage data to port 0x80, which 43 | // allegedly takes long enough to make everything work on most 44 | // hardware. 45 | const wait_port = port.init(0x80); 46 | 47 | // Tell each PIC that we're going to send it a three-byte 48 | // initialization sequence on its data port. 49 | PRIMARY_COMMAND_PORT.write(CMD_INIT); 50 | wait_port.write(0); 51 | SECONDARY_COMMAND_PORT.write(CMD_INIT); 52 | wait_port.write(0); 53 | 54 | // Byte 1: Set up our base offsets. 55 | PRIMARY_DATA_PORT.write(primary_interrupt_offset); 56 | wait_port.write(0); 57 | SECONDARY_DATA_PORT.write(secondary_interrupt_offset); 58 | wait_port.write(0); 59 | 60 | // Byte 2: Configure chaining between PIC1 and PIC2. 61 | PRIMARY_DATA_PORT.write(4); 62 | wait_port.write(0); 63 | SECONDARY_DATA_PORT.write(2); 64 | wait_port.write(0); 65 | 66 | // Byte 3: Set our mode. 67 | PRIMARY_DATA_PORT.write(MODE_8086); 68 | wait_port.write(0); 69 | SECONDARY_DATA_PORT.write(MODE_8086); 70 | wait_port.write(0); 71 | 72 | // Set the default interrupt masks 73 | PRIMARY_DATA_PORT.write(DEFAULT_PRIMARY_MASK); 74 | SECONDARY_DATA_PORT.write(DEFAULT_SECONDARY_MASK); 75 | 76 | return .{ 77 | .primary_interrupt_offset = primary_interrupt_offset, 78 | .secondary_interrupt_offset = secondary_interrupt_offset, 79 | }; 80 | } 81 | 82 | fn handlesInterrupt(offset: u8, interrupt_id: u8) bool { 83 | return offset <= interrupt_id and interrupt_id < offset + 8; 84 | } 85 | 86 | /// Figure out which (if any) PICs in our chain need to know about this interrupt 87 | pub fn notifyEndOfInterrupt(self: SimplePic, interrupt_id: u8) void { 88 | if (handlesInterrupt(self.secondary_interrupt_offset, interrupt_id)) { 89 | SECONDARY_COMMAND_PORT.write(CMD_END_INTERRUPT); 90 | PRIMARY_COMMAND_PORT.write(CMD_END_INTERRUPT); 91 | } else if (handlesInterrupt(self.primary_interrupt_offset, interrupt_id)) { 92 | PRIMARY_COMMAND_PORT.write(CMD_END_INTERRUPT); 93 | } 94 | } 95 | 96 | pub fn rawGetPrimaryInterruptMask() PicPrimaryInterruptMask { 97 | return PicPrimaryInterruptMask.fromU8(PRIMARY_DATA_PORT.read()); 98 | } 99 | 100 | pub fn rawSetPrimaryInterruptMask(mask: PicPrimaryInterruptMask) void { 101 | PRIMARY_DATA_PORT.write(mask.toU8()); 102 | } 103 | 104 | pub fn rawGetSecondaryInterruptMask() PicSecondaryInterruptMask { 105 | return PicSecondaryInterruptMask.fromU8(SECONDARY_DATA_PORT.read()); 106 | } 107 | 108 | pub fn rawSetSecondaryInterruptMask(mask: PicSecondaryInterruptMask) void { 109 | SECONDARY_DATA_PORT.write(mask.toU8()); 110 | } 111 | 112 | pub fn isInterruptMasked(self: SimplePic, interrupt: PicInterrupt) bool { 113 | _ = self; 114 | 115 | return switch (interrupt) { 116 | // Primary 117 | .Timer => rawGetPrimaryInterruptMask().timer, 118 | .Keyboard => rawGetPrimaryInterruptMask().keyboard, 119 | .Chain => rawGetPrimaryInterruptMask().chain, 120 | .SerialPort2 => rawGetPrimaryInterruptMask().serial_port_2, 121 | .SerialPort1 => rawGetPrimaryInterruptMask().serial_port_1, 122 | .ParallelPort23 => rawGetPrimaryInterruptMask().parallel_port_23, 123 | .FloppyDisk => rawGetPrimaryInterruptMask().floppy_disk, 124 | .ParallelPort1 => rawGetPrimaryInterruptMask().parallel_port_1, 125 | 126 | // Secondary 127 | .RealTimeClock => rawGetSecondaryInterruptMask().real_time_clock, 128 | .Acpi => rawGetSecondaryInterruptMask().acpi, 129 | .Available1 => rawGetSecondaryInterruptMask().available_1, 130 | .Available2 => rawGetSecondaryInterruptMask().available_2, 131 | .Mouse => rawGetSecondaryInterruptMask().mouse, 132 | .CoProcessor => rawGetSecondaryInterruptMask().co_processor, 133 | .PrimaryAta => rawGetSecondaryInterruptMask().primary_ata, 134 | .SecondaryAta => rawGetSecondaryInterruptMask().secondary_ata, 135 | }; 136 | } 137 | 138 | fn isPrimaryPic(interrupt: PicInterrupt) bool { 139 | return switch (interrupt) { 140 | .Timer, .Keyboard, .Chain, .SerialPort2, .SerialPort1, .ParallelPort23, .FloppyDisk, .ParallelPort1 => true, 141 | else => false, 142 | }; 143 | } 144 | 145 | pub fn setInterruptMask(self: SimplePic, interrupt: PicInterrupt, mask: bool) void { 146 | _ = self; 147 | if (isPrimaryPic(interrupt)) { 148 | var current_mask = rawGetPrimaryInterruptMask(); 149 | switch (interrupt) { 150 | .Timer => current_mask.timer = mask, 151 | .Keyboard => current_mask.keyboard = mask, 152 | .Chain => current_mask.chain = mask, 153 | .SerialPort2 => current_mask.serial_port_2 = mask, 154 | .SerialPort1 => current_mask.serial_port_1 = mask, 155 | .ParallelPort23 => current_mask.parallel_port_23 = mask, 156 | .FloppyDisk => current_mask.floppy_disk = mask, 157 | .ParallelPort1 => current_mask.parallel_port_1 = mask, 158 | else => unreachable, 159 | } 160 | rawSetPrimaryInterruptMask(current_mask); 161 | } else { 162 | var current_mask = rawGetSecondaryInterruptMask(); 163 | switch (interrupt) { 164 | .RealTimeClock => current_mask.real_time_clock = mask, 165 | .Acpi => current_mask.acpi = mask, 166 | .Available1 => current_mask.available_1 = mask, 167 | .Available2 => current_mask.available_2 = mask, 168 | .Mouse => current_mask.mouse = mask, 169 | .CoProcessor => current_mask.co_processor = mask, 170 | .PrimaryAta => current_mask.primary_ata = mask, 171 | .SecondaryAta => current_mask.secondary_ata = mask, 172 | else => unreachable, 173 | } 174 | rawSetSecondaryInterruptMask(current_mask); 175 | } 176 | } 177 | 178 | comptime { 179 | std.testing.refAllDecls(@This()); 180 | } 181 | }; 182 | 183 | pub const PicInterrupt = enum { 184 | Timer, 185 | Keyboard, 186 | Chain, 187 | SerialPort2, 188 | SerialPort1, 189 | ParallelPort23, 190 | FloppyDisk, 191 | ParallelPort1, 192 | RealTimeClock, 193 | Acpi, 194 | Available1, 195 | Available2, 196 | Mouse, 197 | CoProcessor, 198 | PrimaryAta, 199 | SecondaryAta, 200 | }; 201 | 202 | pub const PicPrimaryInterruptMask = packed struct { 203 | timer: bool, 204 | keyboard: bool, 205 | chain: bool, 206 | serial_port_2: bool, 207 | serial_port_1: bool, 208 | parallel_port_23: bool, 209 | floppy_disk: bool, 210 | parallel_port_1: bool, 211 | 212 | pub fn noneMasked() PicPrimaryInterruptMask { 213 | return fromU8(0); 214 | } 215 | 216 | pub fn allMasked() PicPrimaryInterruptMask { 217 | return fromU8(0b11111111); 218 | } 219 | 220 | pub fn toU8(value: PicPrimaryInterruptMask) u8 { 221 | return @bitCast(u8, value); 222 | } 223 | 224 | pub fn fromU8(value: u8) PicPrimaryInterruptMask { 225 | return @bitCast(PicPrimaryInterruptMask, value); 226 | } 227 | 228 | test { 229 | std.testing.refAllDecls(@This()); 230 | try std.testing.expectEqual(@bitSizeOf(u8), @bitSizeOf(PicPrimaryInterruptMask)); 231 | try std.testing.expectEqual(@sizeOf(u8), @sizeOf(PicPrimaryInterruptMask)); 232 | } 233 | }; 234 | 235 | pub const PicSecondaryInterruptMask = packed struct { 236 | real_time_clock: bool, 237 | acpi: bool, 238 | available_1: bool, 239 | available_2: bool, 240 | mouse: bool, 241 | co_processor: bool, 242 | primary_ata: bool, 243 | secondary_ata: bool, 244 | 245 | pub fn noneMasked() PicSecondaryInterruptMask { 246 | return fromU8(0); 247 | } 248 | 249 | pub fn allMasked() PicSecondaryInterruptMask { 250 | return fromU8(0b11111111); 251 | } 252 | 253 | pub fn toU8(value: PicSecondaryInterruptMask) u8 { 254 | return @bitCast(u8, value); 255 | } 256 | 257 | pub fn fromU8(value: u8) PicSecondaryInterruptMask { 258 | return @bitCast(PicSecondaryInterruptMask, value); 259 | } 260 | 261 | test { 262 | std.testing.refAllDecls(@This()); 263 | try std.testing.expectEqual(@bitSizeOf(u8), @bitSizeOf(PicSecondaryInterruptMask)); 264 | try std.testing.expectEqual(@sizeOf(u8), @sizeOf(PicSecondaryInterruptMask)); 265 | } 266 | }; 267 | 268 | comptime { 269 | std.testing.refAllDecls(@This()); 270 | } 271 | -------------------------------------------------------------------------------- /src/structures/paging/page_table.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | const formatWithoutFields = @import("../../common.zig").formatWithoutFields; 5 | 6 | const PageSize = x86_64.structures.paging.PageSize; 7 | 8 | /// The number of entries in a page table. 9 | pub const PAGE_TABLE_ENTRY_COUNT: usize = 512; 10 | 11 | /// The error returned by the `PageTableEntry::frame` method. 12 | pub const FrameError = error{ 13 | /// The entry does not have the `present` flag set, so it isn't currently mapped to a frame. 14 | FrameNotPresent, 15 | /// The entry does have the `huge_page` flag set. The `frame` method has a standard 4KiB frame 16 | /// as return type, so a huge frame can't be returned. 17 | HugeFrame, 18 | }; 19 | 20 | /// A 64-bit page table entry. 21 | pub const PageTableEntry = packed struct { 22 | entry: u64, 23 | 24 | /// Creates an unused page table entry. 25 | pub fn init() PageTableEntry { 26 | return .{ .entry = 0 }; 27 | } 28 | 29 | /// Returns whether this entry is zero. 30 | pub fn isUnused(self: PageTableEntry) bool { 31 | return self.entry == 0; 32 | } 33 | 34 | /// Sets this entry to zero. 35 | pub fn setUnused(self: *PageTableEntry) void { 36 | self.entry = 0; 37 | } 38 | 39 | /// Returns the flags of this entry. 40 | pub fn getFlags(self: PageTableEntry) PageTableFlags { 41 | return PageTableFlags.fromU64(self.entry); 42 | } 43 | 44 | /// Returns the physical address mapped by this entry, might be zero. 45 | pub fn getAddr(self: PageTableEntry) x86_64.PhysAddr { 46 | // Unchecked is used as the mask ensures validity 47 | return x86_64.PhysAddr.initUnchecked(self.entry & 0x000f_ffff_ffff_f000); 48 | } 49 | 50 | /// Returns the physical frame mapped by this entry. 51 | /// 52 | /// Returns the following errors: 53 | /// 54 | /// - `FrameError::FrameNotPresent` if the entry doesn't have the `present` flag set. 55 | /// - `FrameError::HugeFrame` if the entry has the `huge_page` flag set (for huge pages the 56 | /// `addr` function must be used) 57 | pub fn getFrame(self: PageTableEntry) FrameError!x86_64.structures.paging.PhysFrame { 58 | const flags = self.getFlags(); 59 | 60 | if (!flags.present) { 61 | return FrameError.FrameNotPresent; 62 | } 63 | 64 | if (flags.huge) { 65 | return FrameError.HugeFrame; 66 | } 67 | 68 | return x86_64.structures.paging.PhysFrame.containingAddress(self.getAddr()); 69 | } 70 | 71 | /// Map the entry to the specified physical address 72 | pub fn setAddr(self: *PageTableEntry, addr: x86_64.PhysAddr) void { 73 | std.debug.assert(addr.isAligned(PageSize.Size4KiB.bytes())); 74 | self.entry = addr.value | self.getFlags().toU64(); 75 | } 76 | 77 | /// Map the entry to the specified physical frame with the specified flags. 78 | pub fn setFrame(self: *PageTableEntry, frame: x86_64.structures.paging.PhysFrame, flags: PageTableFlags) void { 79 | std.debug.assert(!self.getFlags().huge); 80 | self.setAddr(frame.start_address); 81 | self.setFlags(flags); 82 | } 83 | 84 | /// Sets the flags of this entry. 85 | pub fn setFlags(self: *PageTableEntry, flags: PageTableFlags) void { 86 | self.entry = self.getAddr().value | flags.toU64(); 87 | } 88 | 89 | pub fn format(value: PageTableEntry, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 90 | _ = fmt; 91 | _ = options; 92 | try writer.print("PageTableEntry({}, Flags = 0b{b})", .{ value.getAddr(), value.getFlags().value }); 93 | } 94 | 95 | test { 96 | std.testing.refAllDecls(@This()); 97 | try std.testing.expectEqual(@bitSizeOf(u64), @bitSizeOf(PageTableEntry)); 98 | try std.testing.expectEqual(@sizeOf(u64), @sizeOf(PageTableEntry)); 99 | } 100 | }; 101 | 102 | pub const PageTableFlags = packed struct { 103 | /// Specifies whether the mapped frame or page table is loaded in memory. 104 | present: bool = false, 105 | 106 | /// Controls whether writes to the mapped frames are allowed. 107 | /// 108 | /// If this bit is unset in a level 1 page table entry, the mapped frame is read-only. 109 | /// If this bit is unset in a higher level page table entry the complete range of mapped 110 | /// pages is read-only. 111 | writeable: bool = false, 112 | 113 | /// Controls whether accesses from userspace (i.e. ring 3) are permitted. 114 | user_accessible: bool = false, 115 | 116 | /// If this bit is set, a “write-through” policy is used for the cache, else a “write-back” 117 | /// policy is used. 118 | write_through: bool = false, 119 | 120 | /// Disables caching for the pointed entry is cacheable. 121 | no_cache: bool = false, 122 | 123 | /// Set by the CPU when the mapped frame or page table is accessed. 124 | accessed: bool = false, 125 | 126 | /// Set by the CPU on a write to the mapped frame. 127 | dirty: bool = false, 128 | 129 | /// Specifies that the entry maps a huge frame instead of a page table. Only allowed in 130 | /// P2 or P3 tables. 131 | huge: bool = false, 132 | 133 | /// Indicates that the mapping is present in all address spaces, so it isn't flushed from 134 | /// the TLB on an address space switch. 135 | global: bool = false, 136 | 137 | /// Available to the OS, can be used to store additional data, e.g. custom flags. 138 | bit_9_11: u3 = 0, 139 | 140 | z_reserved12_15: u4 = 0, 141 | z_reserved16_47: u32 = 0, 142 | z_reserved48_51: u4 = 0, 143 | 144 | /// Available to the OS, can be used to store additional data, e.g. custom flags. 145 | bit_52_62: u11 = 0, 146 | 147 | /// Forbid code execution from the mapped frames. 148 | /// 149 | /// Can be only used when the no-execute page protection feature is enabled in the EFER 150 | /// register. 151 | no_execute: bool = false, 152 | 153 | pub fn sanitizeForParent(self: PageTableFlags) PageTableFlags { 154 | var parent_flags = PageTableFlags{}; 155 | if (self.present) parent_flags.present = true; 156 | if (self.writeable) parent_flags.writeable = true; 157 | if (self.user_accessible) parent_flags.user_accessible = true; 158 | return parent_flags; 159 | } 160 | 161 | pub fn fromU64(value: u64) PageTableFlags { 162 | return @bitCast(PageTableFlags, value & ALL_NOT_RESERVED); 163 | } 164 | 165 | pub fn toU64(self: PageTableFlags) u64 { 166 | return @bitCast(u64, self) & ALL_NOT_RESERVED; 167 | } 168 | 169 | const ALL_RESERVED: u64 = blk: { 170 | var flags = std.mem.zeroes(PageTableFlags); 171 | flags.z_reserved12_15 = std.math.maxInt(u4); 172 | flags.z_reserved16_47 = std.math.maxInt(u32); 173 | flags.z_reserved48_51 = std.math.maxInt(u4); 174 | break :blk @bitCast(u64, flags); 175 | }; 176 | 177 | const ALL_NOT_RESERVED: u64 = ~ALL_RESERVED; 178 | 179 | pub fn format(value: PageTableFlags, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 180 | _ = fmt; 181 | return formatWithoutFields( 182 | value, 183 | options, 184 | writer, 185 | &.{ "z_reserved12_15", "z_reserved16_47", "z_reserved48_51" }, 186 | ); 187 | } 188 | 189 | test { 190 | try std.testing.expectEqual(@bitSizeOf(u64), @bitSizeOf(PageTableFlags)); 191 | try std.testing.expectEqual(@sizeOf(u64), @sizeOf(PageTableFlags)); 192 | } 193 | 194 | comptime { 195 | std.testing.refAllDecls(@This()); 196 | } 197 | }; 198 | 199 | /// Represents a page table. 200 | /// Always page-sized. 201 | /// **IMPORTANT** Must be align(4096) 202 | pub const PageTable = extern struct { 203 | entries: [PAGE_TABLE_ENTRY_COUNT]PageTableEntry = [_]PageTableEntry{PageTableEntry.init()} ** PAGE_TABLE_ENTRY_COUNT, 204 | 205 | /// Clears all entries. 206 | pub fn zero(self: *PageTable) void { 207 | for (self.entries) |*entry| { 208 | entry.setUnused(); 209 | } 210 | } 211 | 212 | pub fn getAtIndex(self: *PageTable, index: PageTableIndex) *PageTableEntry { 213 | return &self.entries[index.value]; 214 | } 215 | 216 | comptime { 217 | std.testing.refAllDecls(@This()); 218 | } 219 | }; 220 | 221 | pub const PageTableLevel = enum(u6) { 222 | /// Represents the level for a page table. 223 | one = 1, 224 | /// Represents the level for a page directory. 225 | two, 226 | /// Represents the level for a page-directory pointer. 227 | three, 228 | /// Represents the level for a page-map level-4. 229 | four, 230 | 231 | /// Returns the next lower level or `null` for level 1 232 | pub fn nextLowerLevel(self: PageTableLevel) ?PageTableLevel { 233 | return switch (self) { 234 | .one => null, 235 | .two => .one, 236 | .three => .two, 237 | .four => .three, 238 | }; 239 | } 240 | 241 | /// Returns the alignment for the address space described by a table of this level. 242 | pub fn tableAddressSpaceAlignment(self: PageTableLevel) u64 { 243 | return @as(u64, 1) << (@enumToInt(self) * 9 + 12); 244 | } 245 | 246 | /// Returns the alignment for the address space described by an entry in a table of this level. 247 | pub fn entryAddressSpaceAlignment(self: PageTableLevel) u64 { 248 | return @as(u64, 1) << (((@enumToInt(self) - 1) * 9) + 12); 249 | } 250 | 251 | comptime { 252 | std.testing.refAllDecls(@This()); 253 | } 254 | }; 255 | 256 | /// A 9-bit index into a page table. 257 | pub const PageTableIndex = struct { 258 | value: u9, 259 | 260 | /// Creates a new index from the given `u16`. 261 | pub fn init(index: u9) PageTableIndex { 262 | return .{ .value = index }; 263 | } 264 | 265 | pub fn format(value: PageTableIndex, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 266 | _ = options; 267 | _ = fmt; 268 | try writer.print("PageTableIndex({})", .{value.value}); 269 | } 270 | 271 | comptime { 272 | std.testing.refAllDecls(@This()); 273 | } 274 | }; 275 | 276 | /// A 12-bit offset into a 4KiB Page. 277 | pub const PageOffset = struct { 278 | value: u12, 279 | 280 | /// Creates a new offset from the given `u12`. 281 | pub fn init(offset: u12) PageOffset { 282 | return .{ .value = offset }; 283 | } 284 | 285 | pub fn format(value: PageOffset, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 286 | _ = options; 287 | _ = fmt; 288 | try writer.print("PageOffset({})", .{value.value}); 289 | } 290 | 291 | comptime { 292 | std.testing.refAllDecls(@This()); 293 | } 294 | }; 295 | 296 | comptime { 297 | std.testing.refAllDecls(@This()); 298 | } 299 | -------------------------------------------------------------------------------- /src/addr.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | const PageTableIndex = x86_64.structures.paging.PageTableIndex; 6 | const PageTableLevel = x86_64.structures.paging.PageTableLevel; 7 | const PageOffset = x86_64.structures.paging.PageOffset; 8 | 9 | /// A canonical 64-bit virtual memory address. 10 | /// 11 | /// On `x86_64`, only the 48 lower bits of a virtual address can be used. The top 16 bits need 12 | /// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterium 13 | /// are called “canonical”. This type guarantees that it always represents a canonical address. 14 | pub const VirtAddr = packed struct { 15 | value: u64, 16 | 17 | /// Tries to create a new canonical virtual address. 18 | /// 19 | /// If required this function performs sign extension of bit 47 to make the address canonical. 20 | pub fn init(addr: u64) error{VirtAddrNotValid}!VirtAddr { 21 | return switch (bitjuggle.getBits(addr, 47, 17)) { 22 | 0, 0x1ffff => VirtAddr{ .value = addr }, 23 | 1 => initTruncate(addr), 24 | else => return error.VirtAddrNotValid, 25 | }; 26 | } 27 | 28 | /// Creates a new canonical virtual address. 29 | /// 30 | /// If required this function performs sign extension of bit 47 to make the address canonical. 31 | /// 32 | /// ## Panics 33 | /// This function panics if the bits in the range 48 to 64 contain data (i.e. are not null and no sign extension). 34 | pub fn initPanic(addr: u64) VirtAddr { 35 | return init(addr) catch @panic("address passed to VirtAddr.init_panic must not contain any data in bits 48 to 64"); 36 | } 37 | 38 | /// Creates a new canonical virtual address, throwing out bits 48..64. 39 | /// 40 | /// If required this function performs sign extension of bit 47 to make the address canonical. 41 | pub fn initTruncate(addr: u64) VirtAddr { 42 | // By doing the right shift as a signed operation (on a i64), it will 43 | // sign extend the value, repeating the leftmost bit. 44 | 45 | // Split into individual ops: 46 | // const no_high_bits = addr << 16; 47 | // const as_i64 = @bitCast(i64, no_high_bits); 48 | // const sign_extend_high_bits = as_i64 >> 16; 49 | // const value = @bitCast(u64, sign_extend_high_bits); 50 | return VirtAddr{ .value = @bitCast(u64, @bitCast(i64, (addr << 16)) >> 16) }; 51 | } 52 | 53 | /// Creates a new virtual address, without any checks. 54 | pub fn initUnchecked(addr: u64) VirtAddr { 55 | return .{ .value = addr }; 56 | } 57 | 58 | /// Creates a virtual address that points to `0`. 59 | pub fn zero() VirtAddr { 60 | return .{ .value = 0 }; 61 | } 62 | 63 | /// Convenience method for checking if a virtual address is null. 64 | pub fn isNull(self: VirtAddr) bool { 65 | return self.value == 0; 66 | } 67 | 68 | /// Creates a virtual address from the given pointer 69 | /// Panics if the given pointer is not a valid virtual address, this should never happen in reality 70 | pub fn fromPtr(ptr: anytype) VirtAddr { 71 | comptime if (@typeInfo(@TypeOf(ptr)) != .Pointer) @compileError("not a pointer"); 72 | return initPanic(@ptrToInt(ptr)); 73 | } 74 | 75 | /// Converts the address to a pointer. 76 | pub fn toPtr(self: VirtAddr, comptime T: type) T { 77 | return @intToPtr(T, self.value); 78 | } 79 | 80 | /// Aligns the virtual address upwards to the given alignment. 81 | /// The alignment must be a power of 2 and greater than 0. 82 | pub fn alignUp(self: VirtAddr, alignment: usize) VirtAddr { 83 | return .{ .value = std.mem.alignForward(self.value, alignment) }; 84 | } 85 | 86 | /// Aligns the virtual address downwards to the given alignment. 87 | /// The alignment must be a power of 2 and greater than 0. 88 | pub fn alignDown(self: VirtAddr, alignment: usize) VirtAddr { 89 | return .{ .value = std.mem.alignBackward(self.value, alignment) }; 90 | } 91 | 92 | /// Checks whether the virtual address has the given alignment. 93 | /// The alignment must be a power of 2 and greater than 0. 94 | pub fn isAligned(self: VirtAddr, alignment: usize) bool { 95 | return std.mem.isAligned(self.value, alignment); 96 | } 97 | 98 | /// Returns the 12-bit page offset of this virtual address. 99 | pub fn pageOffset(self: VirtAddr) PageOffset { 100 | return PageOffset.init(@truncate(u12, self.value)); 101 | } 102 | 103 | /// Returns the 9-bit level 1 page table index. 104 | pub fn p1Index(self: VirtAddr) PageTableIndex { 105 | return PageTableIndex.init(@truncate(u9, self.value >> 12)); 106 | } 107 | 108 | /// Returns the 9-bit level 2 page table index. 109 | pub fn p2Index(self: VirtAddr) PageTableIndex { 110 | return PageTableIndex.init(@truncate(u9, self.value >> 21)); 111 | } 112 | 113 | /// Returns the 9-bit level 3 page table index. 114 | pub fn p3Index(self: VirtAddr) PageTableIndex { 115 | return PageTableIndex.init(@truncate(u9, self.value >> 30)); 116 | } 117 | 118 | /// Returns the 9-bit level 4 page table index. 119 | pub fn p4Index(self: VirtAddr) PageTableIndex { 120 | return PageTableIndex.init(@truncate(u9, self.value >> 39)); 121 | } 122 | 123 | /// Returns the 9-bit level page table index. 124 | pub fn pageTableIndex(self: VirtAddr, level: PageTableLevel) PageTableIndex { 125 | return PageTableIndex.init(@truncate(u9, self.value >> 12 >> ((@enumToInt(level) - 1) * 9))); 126 | } 127 | 128 | pub fn format(value: VirtAddr, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 129 | _ = fmt; 130 | _ = options; 131 | try writer.print("VirtAddr(0x{x})", .{value.value}); 132 | } 133 | 134 | test { 135 | std.testing.refAllDecls(@This()); 136 | try std.testing.expectEqual(@bitSizeOf(u64), @bitSizeOf(VirtAddr)); 137 | try std.testing.expectEqual(@sizeOf(u64), @sizeOf(VirtAddr)); 138 | } 139 | }; 140 | 141 | test "VirtAddr.initTruncate" { 142 | var virtAddr = VirtAddr.initTruncate(0); 143 | try std.testing.expectEqual(@as(u64, 0), virtAddr.value); 144 | 145 | virtAddr = VirtAddr.initTruncate(1 << 47); 146 | try std.testing.expectEqual(@truncate(u64, 0xfffff << 47), virtAddr.value); 147 | 148 | virtAddr = VirtAddr.initTruncate(123); 149 | try std.testing.expectEqual(@as(u64, 123), virtAddr.value); 150 | 151 | virtAddr = VirtAddr.initTruncate(123 << 47); 152 | try std.testing.expectEqual(@truncate(u64, 0xfffff << 47), virtAddr.value); 153 | } 154 | 155 | test "VirtAddr.init" { 156 | var virtAddr = try VirtAddr.init(0); 157 | try std.testing.expectEqual(@as(u64, 0), virtAddr.value); 158 | 159 | virtAddr = try VirtAddr.init(1 << 47); 160 | try std.testing.expectEqual(@truncate(u64, 0xfffff << 47), virtAddr.value); 161 | 162 | virtAddr = try VirtAddr.init(123); 163 | try std.testing.expectEqual(@as(u64, 123), virtAddr.value); 164 | 165 | try std.testing.expectError(error.VirtAddrNotValid, VirtAddr.init(123 << 47)); 166 | } 167 | 168 | test "VirtAddr.fromPtr" { 169 | var something: usize = undefined; 170 | var somethingelse: usize = undefined; 171 | 172 | var virtAddr = VirtAddr.fromPtr(&something); 173 | try std.testing.expectEqual(@ptrToInt(&something), virtAddr.value); 174 | 175 | virtAddr = VirtAddr.fromPtr(&somethingelse); 176 | try std.testing.expectEqual(@ptrToInt(&somethingelse), virtAddr.value); 177 | } 178 | 179 | test "VirtAddr.toPtr" { 180 | var something: usize = undefined; 181 | 182 | var virtAddr = VirtAddr.fromPtr(&something); 183 | const ptr = virtAddr.toPtr(*usize); 184 | ptr.* = 123; 185 | 186 | try std.testing.expectEqual(@as(usize, 123), something); 187 | } 188 | 189 | test "VirtAddr.pageOffset/Index" { 190 | var something: usize = undefined; 191 | var virtAddr = VirtAddr.fromPtr(&something); 192 | 193 | try std.testing.expectEqual(bitjuggle.getBits(virtAddr.value, 0, 12), virtAddr.pageOffset().value); 194 | try std.testing.expectEqual(bitjuggle.getBits(virtAddr.value, 12, 9), virtAddr.p1Index().value); 195 | try std.testing.expectEqual(bitjuggle.getBits(virtAddr.value, 21, 9), virtAddr.p2Index().value); 196 | try std.testing.expectEqual(bitjuggle.getBits(virtAddr.value, 30, 9), virtAddr.p3Index().value); 197 | try std.testing.expectEqual(bitjuggle.getBits(virtAddr.value, 39, 9), virtAddr.p4Index().value); 198 | } 199 | 200 | /// A 64-bit physical memory address. 201 | /// 202 | /// On `x86_64`, only the 52 lower bits of a physical address can be used. The top 12 bits need 203 | /// to be zero. This type guarantees that it always represents a valid physical address. 204 | pub const PhysAddr = packed struct { 205 | value: u64, 206 | 207 | /// Tries to create a new physical address. 208 | /// 209 | /// Fails if any bits in the range 52 to 64 are set. 210 | pub fn init(addr: u64) error{PhysAddrNotValid}!PhysAddr { 211 | return switch (bitjuggle.getBits(addr, 52, 12)) { 212 | 0 => PhysAddr{ .value = addr }, 213 | else => return error.PhysAddrNotValid, 214 | }; 215 | } 216 | 217 | /// Creates a new physical address. 218 | /// 219 | /// ## Panics 220 | /// This function panics if a bit in the range 52 to 64 is set. 221 | pub fn initPanic(addr: u64) PhysAddr { 222 | return init(addr) catch @panic("physical addresses must not have any bits in the range 52 to 64 set"); 223 | } 224 | 225 | const TRUNCATE_CONST: u64 = 1 << 52; 226 | 227 | /// Creates a new physical address, throwing bits 52..64 away. 228 | pub fn initTruncate(addr: u64) PhysAddr { 229 | return PhysAddr{ .value = addr % TRUNCATE_CONST }; 230 | } 231 | 232 | /// Creates a new physical address, without any checks. 233 | pub fn initUnchecked(addr: u64) PhysAddr { 234 | return .{ .value = addr }; 235 | } 236 | 237 | /// Creates a physical address that points to `0`. 238 | pub fn zero() PhysAddr { 239 | return .{ .value = 0 }; 240 | } 241 | 242 | /// Convenience method for checking if a physical address is null. 243 | pub fn isNull(self: PhysAddr) bool { 244 | return self.value == 0; 245 | } 246 | 247 | /// Aligns the physical address upwards to the given alignment. 248 | /// The alignment must be a power of 2 and greater than 0. 249 | pub fn alignUp(self: PhysAddr, alignment: usize) PhysAddr { 250 | return .{ .value = std.mem.alignForward(self.value, alignment) }; 251 | } 252 | 253 | /// Aligns the physical address downwards to the given alignment. 254 | /// The alignment must be a power of 2 and greater than 0. 255 | pub fn alignDown(self: PhysAddr, alignment: usize) PhysAddr { 256 | return .{ .value = std.mem.alignBackward(self.value, alignment) }; 257 | } 258 | 259 | /// Checks whether the physical address has the given alignment. 260 | /// The alignment must be a power of 2 and greater than 0. 261 | pub fn isAligned(self: PhysAddr, alignment: usize) bool { 262 | return std.mem.isAligned(self.value, alignment); 263 | } 264 | 265 | pub fn format(value: PhysAddr, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 266 | _ = fmt; 267 | _ = options; 268 | try writer.print("PhysAddr(0x{x})", .{value.value}); 269 | } 270 | 271 | test { 272 | std.testing.refAllDecls(@This()); 273 | try std.testing.expectEqual(@bitSizeOf(u64), @bitSizeOf(PhysAddr)); 274 | try std.testing.expectEqual(@sizeOf(u64), @sizeOf(PhysAddr)); 275 | } 276 | }; 277 | 278 | comptime { 279 | std.testing.refAllDecls(@This()); 280 | } 281 | -------------------------------------------------------------------------------- /src/registers/model_specific.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | const formatWithoutFields = @import("../common.zig").formatWithoutFields; 5 | 6 | /// The Extended Feature Enable Register. 7 | pub const Efer = packed struct { 8 | 9 | /// Enables the `syscall` and `sysret` instructions. 10 | system_call_extensions: bool, 11 | 12 | z_reserved1_7: u7, 13 | 14 | /// Activates long mode, requires activating paging. 15 | long_mode_enable: bool, 16 | 17 | z_reserved9: bool, 18 | 19 | /// Indicates that long mode is active. 20 | long_mode_active: bool, 21 | 22 | /// Enables the no-execute page-protection feature. 23 | no_execute_enable: bool, 24 | 25 | /// Enables SVM extensions. 26 | secure_virtual_machine_enable: bool, 27 | 28 | /// Enable certain limit checks in 64-bit mode. 29 | long_mode_segment_limit: bool, 30 | 31 | /// Enable the `fxsave` and `fxrstor` instructions to execute faster in 64-bit mode. 32 | fast_fxsave_fxrstor: bool, 33 | 34 | /// Changes how the `invlpg` instruction operates on TLB entries of upper-level entries. 35 | translation_cache_extension: bool, 36 | 37 | z_reserved16_31: u16, 38 | z_reserved32_63: u32, 39 | 40 | /// Read the current EFER flags. 41 | pub fn read() Efer { 42 | return Efer.fromU64(REGISTER.read()); 43 | } 44 | 45 | /// Write the EFER flags, preserving reserved values. 46 | /// 47 | /// Preserves the value of reserved fields. 48 | pub fn write(self: Efer) void { 49 | REGISTER.write(self.toU64() | (REGISTER.read() & ALL_RESERVED)); 50 | } 51 | 52 | const REGISTER = Msr(0xC000_0080); 53 | 54 | const ALL_RESERVED: u64 = blk: { 55 | var flags = std.mem.zeroes(Efer); 56 | flags.z_reserved1_7 = std.math.maxInt(u7); 57 | flags.z_reserved9 = true; 58 | flags.z_reserved16_31 = std.math.maxInt(u16); 59 | flags.z_reserved32_63 = std.math.maxInt(u32); 60 | break :blk @bitCast(u64, flags); 61 | }; 62 | 63 | const ALL_NOT_RESERVED: u64 = ~ALL_RESERVED; 64 | 65 | pub fn fromU64(value: u64) Efer { 66 | return @bitCast(Efer, value & ALL_NOT_RESERVED); 67 | } 68 | 69 | pub fn toU64(self: Efer) u64 { 70 | return @bitCast(u64, self) & ALL_NOT_RESERVED; 71 | } 72 | 73 | pub fn format(value: Efer, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 74 | _ = fmt; 75 | return formatWithoutFields( 76 | value, 77 | options, 78 | writer, 79 | &.{ "z_reserved1_7", "z_reserved9", "z_reserved16_31", "z_reserved32_63" }, 80 | ); 81 | } 82 | 83 | test { 84 | try std.testing.expectEqual(@as(usize, 64), @bitSizeOf(Efer)); 85 | try std.testing.expectEqual(@as(usize, 8), @sizeOf(Efer)); 86 | } 87 | 88 | comptime { 89 | std.testing.refAllDecls(@This()); 90 | } 91 | }; 92 | 93 | /// FS.Base Model Specific Register. 94 | pub const FsBase = struct { 95 | const REGISTER = Msr(0xC000_0100); 96 | 97 | /// Read the current FsBase register. 98 | pub fn read() x86_64.VirtAddr { 99 | // We use unchecked here as we assume that the write function did not write an invalid address 100 | return x86_64.VirtAddr.initUnchecked(REGISTER.read()); 101 | } 102 | 103 | /// Write a given virtual address to the FS.Base register. 104 | pub fn write(addr: x86_64.VirtAddr) void { 105 | REGISTER.write(addr.value); 106 | } 107 | 108 | comptime { 109 | std.testing.refAllDecls(@This()); 110 | } 111 | }; 112 | 113 | /// GS.Base Model Specific Register. 114 | pub const GsBase = struct { 115 | const REGISTER = Msr(0xC000_0101); 116 | 117 | /// Read the current GsBase register. 118 | pub fn read() x86_64.VirtAddr { 119 | // We use unchecked here as we assume that the write function did not write an invalid address 120 | return x86_64.VirtAddr.initUnchecked(REGISTER.read()); 121 | } 122 | 123 | /// Write a given virtual address to the GS.Base register. 124 | pub fn write(addr: x86_64.VirtAddr) void { 125 | REGISTER.write(addr.value); 126 | } 127 | 128 | comptime { 129 | std.testing.refAllDecls(@This()); 130 | } 131 | }; 132 | 133 | /// KernelGsBase Model Specific Register. 134 | pub const KernelGsBase = struct { 135 | const REGISTER = Msr(0xC000_0102); 136 | 137 | /// Read the current KernelGsBase register. 138 | pub fn read() x86_64.VirtAddr { 139 | // We use unchecked here as we assume that the write function did not write an invalid address 140 | return x86_64.VirtAddr.initUnchecked(REGISTER.read()); 141 | } 142 | 143 | /// Write a given virtual address to the KernelGsBase register. 144 | pub fn write(addr: x86_64.VirtAddr) void { 145 | REGISTER.write(addr.value); 146 | } 147 | 148 | comptime { 149 | std.testing.refAllDecls(@This()); 150 | } 151 | }; 152 | 153 | /// Syscall Register: STAR 154 | pub const Star = struct { 155 | sysretCsSelector: x86_64.structures.gdt.SegmentSelector, 156 | sysretSsSelector: x86_64.structures.gdt.SegmentSelector, 157 | syscallCsSelector: x86_64.structures.gdt.SegmentSelector, 158 | syscallSsSelector: x86_64.structures.gdt.SegmentSelector, 159 | 160 | const REGISTER = Msr(0xC000_0081); 161 | 162 | /// Read the Ring 0 and Ring 3 segment bases. 163 | pub fn read() Star { 164 | const raw = readRaw(); 165 | return .{ 166 | .sysretCsSelector = .{ .value = raw[0] + 16 }, 167 | .sysretSsSelector = .{ .value = raw[0] + 8 }, 168 | .syscallCsSelector = .{ .value = raw[1] }, 169 | .syscallSsSelector = .{ .value = raw[1] + 8 }, 170 | }; 171 | } 172 | 173 | /// Read the Ring 0 and Ring 3 segment bases. 174 | /// The remaining fields are ignored because they are 175 | /// not valid for long mode. 176 | /// 177 | /// # Returns 178 | /// - Item 0 (SYSRET): The CS selector is set to this field + 16. SS.Sel is set to 179 | /// this field + 8. Because SYSRET always returns to CPL 3, the 180 | /// RPL bits 1:0 should be initialized to 11b. 181 | /// - Item 1 (SYSCALL): This field is copied directly into CS.Sel. SS.Sel is set to 182 | /// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits 183 | /// 33:32 should be initialized to 00b. 184 | pub fn readRaw() [2]u16 { 185 | const val = REGISTER.read(); 186 | return [2]u16{ 187 | bitjuggle.getBits(val, 48, 16), 188 | bitjuggle.getBits(val, 32, 16), 189 | }; 190 | } 191 | 192 | pub const WriteError = error{ 193 | /// Sysret CS and SS is not offset by 8. 194 | InvalidSysretOffset, 195 | /// Syscall CS and SS is not offset by 8. 196 | InvalidSyscallOffset, 197 | /// Sysret's segment must be a Ring3 segment. 198 | SysretNotRing3, 199 | /// Syscall's segment must be a Ring0 segment. 200 | SyscallNotRing0, 201 | }; 202 | 203 | /// Write the Ring 0 and Ring 3 segment bases. 204 | /// The remaining fields are ignored because they are 205 | /// not valid for long mode. 206 | /// This function will fail if the segment selectors are 207 | /// not in the correct offset of each other or if the 208 | /// segment selectors do not have correct privileges. 209 | pub fn write(self: Star) WriteError!void { 210 | if (self.sysretCsSelector.value - 16 != self.sysretSsSelector.value - 8) { 211 | return WriteError.InvalidSysretOffset; 212 | } 213 | if (self.syscallCsSelector.value != self.syscallSsSelector.value - 8) { 214 | return WriteError.InvalidSyscallOffset; 215 | } 216 | if (self.sysretSsSelector.getRpl() != .Ring3) { 217 | return WriteError.SysretNotRing3; 218 | } 219 | if (self.syscallSsSelector.getRpl() != .Ring0) { 220 | return WriteError.SyscallNotRing0; 221 | } 222 | 223 | writeRaw(self.sysretSsSelector.value - 8, self.syscallSsSelector.value); 224 | } 225 | 226 | /// Write the Ring 0 and Ring 3 segment bases. 227 | /// The remaining fields are ignored because they are 228 | /// not valid for long mode. 229 | /// 230 | /// # Parameters 231 | /// - sysret: The CS selector is set to this field + 16. SS.Sel is set to 232 | /// this field + 8. Because SYSRET always returns to CPL 3, the 233 | /// RPL bits 1:0 should be initialized to 11b. 234 | /// - syscall: This field is copied directly into CS.Sel. SS.Sel is set to 235 | /// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits 236 | /// 33:32 should be initialized to 00b. 237 | pub fn writeRaw(sysret: u16, syscall: u16) void { 238 | var value: u64 = 0; 239 | bitjuggle.setBits(&value, 48, 16, sysret); 240 | bitjuggle.setBits(&value, 32, 16, syscall); 241 | REGISTER.write(value); 242 | } 243 | 244 | comptime { 245 | std.testing.refAllDecls(@This()); 246 | } 247 | }; 248 | 249 | /// Syscall Register: LSTAR 250 | pub const LStar = struct { 251 | const REGISTER = Msr(0xC000_0082); 252 | 253 | /// Read the current LStar register. 254 | /// This holds the target RIP of a syscall. 255 | pub fn read() x86_64.VirtAddr { 256 | // We use unchecked here as we assume that the write function did not write an invalid address 257 | return x86_64.VirtAddr.initUnchecked(REGISTER.read()); 258 | } 259 | 260 | /// Write a given virtual address to the LStar register. 261 | /// This holds the target RIP of a syscall. 262 | pub fn write(addr: x86_64.VirtAddr) void { 263 | REGISTER.write(addr.value); 264 | } 265 | 266 | comptime { 267 | std.testing.refAllDecls(@This()); 268 | } 269 | }; 270 | 271 | /// Syscall Register: SFMask 272 | pub const SFMask = struct { 273 | const REGISTER = Msr(0xC000_0084); 274 | 275 | /// Read to the SFMask register. 276 | /// The SFMASK register is used to specify which RFLAGS bits 277 | /// are cleared during a SYSCALL. In long mode, SFMASK is used 278 | /// to specify which RFLAGS bits are cleared when SYSCALL is 279 | /// executed. If a bit in SFMASK is set to 1, the corresponding 280 | /// bit in RFLAGS is cleared to 0. If a bit in SFMASK is cleared 281 | /// to 0, the corresponding rFLAGS bit is not modified. 282 | pub fn read() x86_64.registers.RFlags { 283 | return x86_64.registers.RFlags.fromU64(REGISTER.read()); 284 | } 285 | 286 | /// Write to the SFMask register. 287 | /// The SFMASK register is used to specify which RFLAGS bits 288 | /// are cleared during a SYSCALL. In long mode, SFMASK is used 289 | /// to specify which RFLAGS bits are cleared when SYSCALL is 290 | /// executed. If a bit in SFMASK is set to 1, the corresponding 291 | /// bit in RFLAGS is cleared to 0. If a bit in SFMASK is cleared 292 | /// to 0, the corresponding rFLAGS bit is not modified. 293 | pub fn write(value: x86_64.registers.RFlags) void { 294 | REGISTER.write(value.toU64()); 295 | } 296 | 297 | comptime { 298 | std.testing.refAllDecls(@This()); 299 | } 300 | }; 301 | 302 | fn Msr(comptime register: u32) type { 303 | return struct { 304 | pub inline fn read() u64 { 305 | var high: u32 = undefined; 306 | var low: u32 = undefined; 307 | 308 | asm volatile ("rdmsr" 309 | : [low] "={eax}" (low), 310 | [high] "={edx}" (high), 311 | : [reg] "{ecx}" (register), 312 | : "memory" 313 | ); 314 | 315 | return (@as(u64, high) << 32) | @as(u64, low); 316 | } 317 | 318 | pub inline fn write(value: u64) void { 319 | asm volatile ("wrmsr" 320 | : 321 | : [reg] "{ecx}" (register), 322 | [low] "{eax}" (@truncate(u32, value)), 323 | [high] "{edx}" (@truncate(u32, value >> 32)), 324 | : "memory" 325 | ); 326 | } 327 | 328 | comptime { 329 | std.testing.refAllDecls(@This()); 330 | } 331 | }; 332 | } 333 | 334 | comptime { 335 | std.testing.refAllDecls(@This()); 336 | } 337 | -------------------------------------------------------------------------------- /src/structures/gdt.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | /// Specifies which element to load into a segment from 6 | /// descriptor tables (i.e., is a index to LDT or GDT table 7 | /// with some additional flags). 8 | /// 9 | /// See Intel 3a, Section 3.4.2 "Segment Selectors" 10 | pub const SegmentSelector = struct { 11 | value: u16, 12 | 13 | /// Creates a new SegmentSelector 14 | pub fn init(index: u16, rpl: x86_64.PrivilegeLevel) SegmentSelector { 15 | return .{ 16 | .value = index << 3 | @as(u16, @enumToInt(rpl)), 17 | }; 18 | } 19 | 20 | // Returns the GDT index. 21 | pub fn getIndex(self: SegmentSelector) u16 { 22 | return self.value >> 3; 23 | } 24 | 25 | /// Set the privilege level for this Segment selector. 26 | pub fn setRpl(self: *SegmentSelector, rpl: x86_64.PrivilegeLevel) void { 27 | bitjuggle.setBits(&self.value, 0, 2, @as(u16, @enumToInt(rpl))); 28 | } 29 | 30 | /// Returns the requested privilege level. 31 | /// Returns `error.InvalidPrivilegeLevel` if the privledge level bits are out of range of the `PrivilegeLevel` enum 32 | pub fn getRpl(self: SegmentSelector) x86_64.PrivilegeLevel { 33 | switch (bitjuggle.getBits(self.value, 0, 2)) { 34 | 0 => return x86_64.PrivilegeLevel.Ring0, 35 | 1 => return x86_64.PrivilegeLevel.Ring1, 36 | 2 => return x86_64.PrivilegeLevel.Ring2, 37 | 3 => return x86_64.PrivilegeLevel.Ring3, 38 | } 39 | } 40 | 41 | /// Returns the requested privilege level. 42 | /// 43 | /// ## Panic 44 | /// Will panic if the privledge level bits are out of range of the `PrivilegeLevel` enum 45 | pub fn getRplPanic(self: SegmentSelector) x86_64.PrivilegeLevel { 46 | return @intToEnum(x86_64.PrivilegeLevel, bitjuggle.getBits(self.value, 0, 2)); 47 | } 48 | 49 | pub fn format(value: SegmentSelector, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 50 | _ = fmt; 51 | _ = options; 52 | try writer.print("SegmentSelector(.index = {}, .rpl = {})", .{ value.getIndex(), value.getRpl() }); 53 | } 54 | 55 | comptime { 56 | std.testing.refAllDecls(@This()); 57 | } 58 | }; 59 | 60 | test "SegmentSelector" { 61 | var a = SegmentSelector.init(1, .Ring0); 62 | try std.testing.expectEqual(@as(u16, 1), a.getIndex()); 63 | try std.testing.expectEqual(x86_64.PrivilegeLevel.Ring0, a.getRpl()); 64 | a.setRpl(.Ring3); 65 | try std.testing.expectEqual(@as(u16, 1), a.getIndex()); 66 | try std.testing.expectEqual(x86_64.PrivilegeLevel.Ring3, a.getRpl()); 67 | } 68 | 69 | /// A 64-bit mode global descriptor table (GDT). 70 | /// 71 | /// In 64-bit mode, segmentation is not supported. The GDT is used nonetheless, for example for 72 | /// switching between user and kernel mode or for loading a TSS. 73 | /// 74 | /// The GDT has a fixed size of 8 entries, trying to add more entries will panic. 75 | /// 76 | /// You do **not** need to add a null segment descriptor yourself - this is already done internally. 77 | /// 78 | /// Data segment registers in ring 0 can be loaded with the null segment selector. When running in 79 | /// ring 3, the `ss` register must point to a valid data segment which can be obtained through the 80 | /// `createUserDataSegment()` function. 81 | /// 82 | /// Code segments must be valid and non-null at all times and can be obtained through the 83 | /// `createKernelCodeSegment()` and `createUserCodeSegment()` in rings 0 and 3 respectively. 84 | /// 85 | /// For more info, see: 86 | /// [x86 Instruction Reference for `mov`](https://www.felixcloutier.com/x86/mov#64-bit-mode-exceptions), 87 | /// [Intel Manual](https://software.intel.com/sites/default/files/managed/39/c5/325462-sdm-vol-1-2abcd-3abcd.pdf), 88 | /// [AMD Manual](https://www.amd.com/system/files/TechDocs/24593.pdf) 89 | pub const GlobalDescriptorTable = struct { 90 | table: [8]u64 = [_]u64{0} ** 8, 91 | next_free: u16 = 1, 92 | 93 | /// Create a GDT from a slice of `u64`. 94 | /// The length of the slice must not exceed 8. 95 | pub fn fromRawSlice(slice: []const u64) !GlobalDescriptorTable { 96 | if (slice.len > 8) return error.SliceLenExceedsEight; 97 | 98 | var table: [8]u64 = [_]u64{0} ** 8; 99 | 100 | const next_free = @truncate(u16, slice.len); 101 | var i: usize = 0; 102 | while (i != next_free) : (i += 1) { 103 | table[i] = slice[i]; 104 | } 105 | 106 | return GlobalDescriptorTable{ 107 | .table = table, 108 | .next_free = next_free, 109 | }; 110 | } 111 | 112 | /// Adds the given segment descriptor to the GDT, returning the segment selector. 113 | /// 114 | /// Panics if the GDT has no free entries left. 115 | pub fn addEntry(self: *GlobalDescriptorTable, entry: Descriptor) SegmentSelector { 116 | switch (entry) { 117 | .UserSegment => |value| { 118 | const rpl = if (value & Descriptor.DPL_RING_3 != 0) x86_64.PrivilegeLevel.Ring3 else x86_64.PrivilegeLevel.Ring0; 119 | return SegmentSelector.init(self.push(value), rpl); 120 | }, 121 | .SystemSegment => |systemSegmentData| { 122 | const index = self.push(systemSegmentData.low); 123 | _ = self.push(systemSegmentData.high); 124 | return SegmentSelector.init(index, x86_64.PrivilegeLevel.Ring0); 125 | }, 126 | } 127 | } 128 | 129 | /// Loads the GDT in the CPU using the `lgdt` instruction. 130 | /// ### NOTE: 131 | /// This does **not** alter any of the segment registers; you **must** (re)load them yourself using the appropriate functions: 132 | /// `instructions.segmentation.loadSs`, `instructions.segmentation.setCs` 133 | pub fn load(self: *GlobalDescriptorTable) void { 134 | const ptr = x86_64.structures.DescriptorTablePointer{ 135 | .base = x86_64.VirtAddr.fromPtr(&self.table), 136 | .limit = @as(u16, self.next_free * @sizeOf(u64) - 1), 137 | }; 138 | 139 | x86_64.instructions.tables.lgdt(&ptr); 140 | } 141 | 142 | fn push(self: *GlobalDescriptorTable, value: u64) u16 { 143 | if (self.next_free < self.table.len) { 144 | const index = self.next_free; 145 | self.table[index] = value; 146 | self.next_free += 1; 147 | return index; 148 | } 149 | 150 | @panic("GDT full"); 151 | } 152 | 153 | comptime { 154 | std.testing.refAllDecls(@This()); 155 | } 156 | }; 157 | 158 | test "GlobalDescriptorTable" { 159 | var gdt: GlobalDescriptorTable = .{}; 160 | _ = gdt.addEntry(createKernelCodeSegment()); 161 | _ = gdt.addEntry(createUserCodeSegment()); 162 | _ = gdt.addEntry(createUserDataSegment()); 163 | } 164 | 165 | /// Creates a segment descriptor for a 64-bit kernel code segment. 166 | /// Suitable for use with `syscall` or 64-bit `sysenter`. 167 | pub fn createKernelCodeSegment() Descriptor { 168 | return .{ .UserSegment = Descriptor.KERNEL_CODE64 }; 169 | } 170 | 171 | /// Creates a segment descriptor for a ring 0 data segment (32-bit or 64-bit). 172 | /// Suitable for use with `syscall` or 64-bit `sysenter`. 173 | pub fn createKernelDataSegment() Descriptor { 174 | return .{ .UserSegment = Descriptor.KERNEL_DATA }; 175 | } 176 | 177 | /// Creates a segment descriptor for a ring 3 data segment (32-bit or 64-bit). 178 | /// Suitable for use with `sysret` or `sysexit`. 179 | pub fn createUserDataSegment() Descriptor { 180 | return .{ .UserSegment = Descriptor.USER_DATA }; 181 | } 182 | 183 | /// Creates a segment descriptor for a 64-bit ring 3 code segment. # 184 | /// Suitable for use with `sysret` or `sysexit`. 185 | pub fn createUserCodeSegment() Descriptor { 186 | return .{ .UserSegment = Descriptor.USER_CODE64 }; 187 | } 188 | 189 | /// Creates a TSS system descriptor for the given TSS. 190 | pub fn tssSegment(tss: *x86_64.structures.tss.TaskStateSegment) Descriptor { 191 | const ptr = @ptrToInt(tss); 192 | 193 | var low = Descriptor.PRESENT; 194 | 195 | // base 196 | bitjuggle.setBits(&low, 16, 24, bitjuggle.getBits(ptr, 0, 24)); 197 | bitjuggle.setBits(&low, 56, 8, bitjuggle.getBits(ptr, 24, 8)); 198 | 199 | // limit (the `-1` is in needed since the bound is inclusive) 200 | bitjuggle.setBits(&low, 0, 16, @as(u64, @sizeOf(x86_64.structures.tss.TaskStateSegment) - 1)); 201 | 202 | // type (0b1001 = available 64-bit tss) 203 | bitjuggle.setBits(&low, 40, 4, 0b1001); 204 | 205 | var high: u64 = 0; 206 | bitjuggle.setBits(&high, 0, 32, bitjuggle.getBits(ptr, 32, 32)); 207 | 208 | return .{ 209 | .SystemSegment = .{ 210 | .low = low, 211 | .high = high, 212 | }, 213 | }; 214 | } 215 | 216 | /// A 64-bit mode segment descriptor 217 | /// 218 | /// Segmentation is no longer supported in 64-bit mode, so most of the descriptor 219 | /// contents are ignored. 220 | pub const Descriptor = union(enum) { 221 | /// Set by the processor if this segment has been accessed. Only cleared by software. 222 | pub const ACCESSED: u64 = 1 << 40; 223 | 224 | /// For 32-bit data segments, sets the segment as writable. For 32-bit code segments, 225 | /// sets the segment as _readable_. In 64-bit mode, ignored for all segments. 226 | pub const WRITABLE: u64 = 1 << 41; 227 | 228 | /// For code segments, sets the segment as “conforming”, influencing the 229 | /// privilege checks that occur on control transfers. For 32-bit data segments, 230 | /// sets the segment as "expand down". In 64-bit mode, ignored for data segments. 231 | pub const CONFORMING: u64 = 1 << 42; 232 | 233 | /// This flag must be set for code segments and unset for data segments. 234 | pub const EXECUTABLE: u64 = 1 << 43; 235 | 236 | /// This flag must be set for user segments (in contrast to system segments). 237 | pub const USER_SEGMENT: u64 = 1 << 44; 238 | 239 | /// The DPL for this descriptor is Ring 3. In 64-bit mode, ignored for data segments. 240 | pub const DPL_RING_3: u64 = 3 << 45; 241 | 242 | /// Must be set for any segment, causes a segment not present exception if not set. 243 | pub const PRESENT: u64 = 1 << 47; 244 | 245 | /// Available for use by the Operating System 246 | pub const AVAILABLE: u64 = 1 << 52; 247 | 248 | /// Must be set for 64-bit code segments, unset otherwise. 249 | pub const LONG_MODE: u64 = 1 << 53; 250 | 251 | /// Use 32-bit (as opposed to 16-bit) operands. If [`long_mode`] is set, 252 | /// this must be unset. In 64-bit mode, ignored for data segments. 253 | pub const DEFAULT_SIZE: u64 = 1 << 54; 254 | 255 | /// Limit field is scaled by 4096 bytes. In 64-bit mode, ignored for all segments. 256 | pub const GRANULARITY: u64 = 1 << 55; 257 | 258 | /// Bits 0..=15 of the limit field (ignored in 64-bit mode) 259 | pub const LIMIT_0_15: u64 = 0xFFFF; 260 | /// Bits 16..=19 of the limit field (ignored in 64-bit mode) 261 | pub const LIMIT_16_19: u64 = 0xF << 48; 262 | /// Bits 0..=23 of the base field (ignored in 64-bit mode, except for fs and gs) 263 | pub const BASE_0_23: u64 = 0xFF_FFFF << 16; 264 | /// Bits 24..=31 of the base field (ignored in 64-bit mode, except for fs and gs) 265 | pub const BASE_24_31: u64 = 0xFF << 56; 266 | 267 | /// Flags that we set for all our default segments 268 | pub const COMMON: u64 = USER_SEGMENT | PRESENT | WRITABLE | ACCESSED | LIMIT_0_15 | LIMIT_16_19 | GRANULARITY; 269 | 270 | /// A kernel data segment (64-bit or flat 32-bit) 271 | pub const KERNEL_DATA: u64 = COMMON | DEFAULT_SIZE; 272 | /// A flat 32-bit kernel code segment 273 | pub const KERNEL_CODE32: u64 = COMMON | EXECUTABLE | DEFAULT_SIZE; 274 | /// A 64-bit kernel code segment 275 | pub const KERNEL_CODE64: u64 = COMMON | EXECUTABLE | LONG_MODE; 276 | 277 | /// A user data segment (64-bit or flat 32-bit) 278 | pub const USER_DATA: u64 = KERNEL_DATA | DPL_RING_3; 279 | /// A flat 32-bit user code segment 280 | pub const USER_CODE32: u64 = KERNEL_CODE32 | DPL_RING_3; 281 | /// A 64-bit user code segment 282 | pub const USER_CODE64: u64 = KERNEL_CODE64 | DPL_RING_3; 283 | 284 | /// Descriptor for a code or data segment. 285 | /// 286 | /// Since segmentation is no longer supported in 64-bit mode, almost all of 287 | /// code and data descriptors is ignored. Only some flags are still use 288 | UserSegment: u64, 289 | 290 | /// A system segment descriptor such as a LDT or TSS descriptor. 291 | SystemSegment: SystemSegmentData, 292 | 293 | pub const SystemSegmentData = struct { 294 | low: u64, 295 | high: u64, 296 | }; 297 | 298 | test "Descriptors match linux" { 299 | // Make sure our defaults match the ones used by the Linux kernel. 300 | // Constants pulled from an old version of arch/x86/kernel/cpu/common.c 301 | try std.testing.expectEqual(0x00af9b000000ffff, Descriptor.KERNEL_CODE64); 302 | try std.testing.expectEqual(0x00cf9b000000ffff, Descriptor.KERNEL_CODE32); 303 | try std.testing.expectEqual(0x00cf93000000ffff, Descriptor.KERNEL_DATA); 304 | try std.testing.expectEqual(0x00affb000000ffff, Descriptor.USER_CODE64); 305 | try std.testing.expectEqual(0x00cffb000000ffff, Descriptor.USER_CODE32); 306 | try std.testing.expectEqual(0x00cff3000000ffff, Descriptor.USER_DATA); 307 | } 308 | 309 | comptime { 310 | std.testing.refAllDecls(@This()); 311 | } 312 | }; 313 | 314 | comptime { 315 | std.testing.refAllDecls(@This()); 316 | } 317 | -------------------------------------------------------------------------------- /src/registers/control.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | const formatWithoutFields = @import("../common.zig").formatWithoutFields; 5 | 6 | /// Various control flags modifying the basic operation of the CPU. 7 | pub const Cr0 = packed struct { 8 | /// Enables protected mode. 9 | protected_mode: bool, 10 | 11 | /// Enables monitoring of the coprocessor, typical for x87 instructions. 12 | /// 13 | /// Controls together with the `task_switched` flag whether a `wait` or `fwait` 14 | /// instruction should cause an `#NE` exception. 15 | monitor_coprocessor: bool, 16 | 17 | /// Force all x87 and MMX instructions to cause an `#NE` exception. 18 | emulate_coprocessor: bool, 19 | 20 | /// Automatically set to 1 on _hardware_ task switch. 21 | /// 22 | /// This flags allows lazily saving x87/MMX/SSE instructions on hardware context switches. 23 | task_switched: bool, 24 | 25 | /// Indicates support of 387DX math coprocessor instructions. 26 | /// 27 | /// Always set on all recent x86 processors, cannot be cleared. 28 | extension_type: bool, 29 | 30 | /// Enables the native (internal) error reporting mechanism for x87 FPU errors. 31 | numeric_error: bool, 32 | 33 | z_reserved6_15: u10, 34 | 35 | /// Controls whether supervisor-level writes to read-only pages are inhibited. 36 | /// 37 | /// When set, it is not possible to write to read-only pages from ring 0. 38 | write_protect: bool, 39 | 40 | z_reserved17: bool, 41 | 42 | /// Enables automatic usermode alignment checking if `RFlags.alignment_check` is also set. 43 | alignment_mask: bool, 44 | 45 | z_reserved19_28: u10, 46 | 47 | /// Ignored. Used to control write-back/write-through cache strategy on older CPUs. 48 | not_write_through: bool, 49 | 50 | /// Disables some processor caches, specifics are model-dependent. 51 | cache_disable: bool, 52 | 53 | /// Enables paging. 54 | /// 55 | /// If this bit is set, `protected_mode` must be set. 56 | paging: bool, 57 | 58 | z_reserved32_63: u32, 59 | 60 | /// Read the current set of CR0 flags. 61 | pub fn read() Cr0 { 62 | return Cr0.fromU64(readRaw()); 63 | } 64 | 65 | /// Read the current raw CR0 value. 66 | fn readRaw() u64 { 67 | return asm ("mov %%cr0, %[ret]" 68 | : [ret] "=r" (-> u64), 69 | ); 70 | } 71 | 72 | /// Write CR0 flags. 73 | /// 74 | /// Preserves the value of reserved fields. 75 | pub fn write(self: Cr0) void { 76 | writeRaw(self.toU64() | (readRaw() & ALL_RESERVED)); 77 | } 78 | 79 | /// Write raw CR0 flags. 80 | /// 81 | /// Does _not_ preserve any values, including reserved fields. 82 | fn writeRaw(value: u64) void { 83 | asm volatile ("mov %[val], %%cr0" 84 | : 85 | : [val] "r" (value), 86 | : "memory" 87 | ); 88 | } 89 | 90 | const ALL_RESERVED: u64 = blk: { 91 | var flags = std.mem.zeroes(Cr0); 92 | flags.z_reserved6_15 = std.math.maxInt(u10); 93 | flags.z_reserved17 = true; 94 | flags.z_reserved19_28 = std.math.maxInt(u10); 95 | flags.z_reserved32_63 = std.math.maxInt(u32); 96 | break :blk @bitCast(u64, flags); 97 | }; 98 | 99 | const ALL_NOT_RESERVED: u64 = ~ALL_RESERVED; 100 | 101 | pub fn fromU64(value: u64) Cr0 { 102 | return @bitCast(Cr0, value & ALL_NOT_RESERVED); 103 | } 104 | 105 | pub fn toU64(self: Cr0) u64 { 106 | return @bitCast(u64, self) & ALL_NOT_RESERVED; 107 | } 108 | 109 | pub fn format(value: Cr0, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 110 | _ = fmt; 111 | return formatWithoutFields( 112 | value, 113 | options, 114 | writer, 115 | &.{ "z_reserved6_15", "z_reserved17", "z_reserved19_28", "z_reserved32_63" }, 116 | ); 117 | } 118 | 119 | test { 120 | try std.testing.expectEqual(@as(usize, 64), @bitSizeOf(Cr0)); 121 | try std.testing.expectEqual(@as(usize, 8), @sizeOf(Cr0)); 122 | try std.testing.expectEqual(@as(usize, 0b11100000000001010000000000111111), ALL_NOT_RESERVED); 123 | } 124 | 125 | comptime { 126 | std.testing.refAllDecls(@This()); 127 | } 128 | }; 129 | 130 | /// Contains the Page Fault Linear Address (PFLA). 131 | /// 132 | /// When a page fault occurs, the CPU sets this register to the faulting virtual address. 133 | pub const Cr2 = struct { 134 | /// Read the current page fault linear address from the CR2 register. 135 | pub fn read() x86_64.VirtAddr { 136 | // We can use unchecked as this virtual address is set by the CPU itself 137 | return x86_64.VirtAddr.initUnchecked(asm ("mov %%cr2, %[ret]" 138 | : [ret] "=r" (-> u64), 139 | )); 140 | } 141 | 142 | comptime { 143 | std.testing.refAllDecls(@This()); 144 | } 145 | }; 146 | 147 | /// Controls cache settings for the highest-level page table. 148 | /// 149 | /// Unused if paging is disabled or if `Cr4Flags.pcid` is enabled. 150 | pub const Cr3Flags = packed struct { 151 | z_reserved0: bool, 152 | z_reserved1: bool, 153 | 154 | /// Use a writethrough cache policy for the table (otherwise a writeback policy is used). 155 | page_level_writethrough: bool, 156 | 157 | /// Disable caching for the table. 158 | page_level_cache_disable: bool, 159 | 160 | z_reserved4_63: u60, 161 | 162 | const ALL_RESERVED: u64 = blk: { 163 | var flags = std.mem.zeroes(Cr3Flags); 164 | flags.z_reserved0 = true; 165 | flags.z_reserved1 = true; 166 | flags.z_reserved4_63 = std.math.maxInt(u60); 167 | break :blk @bitCast(u64, flags); 168 | }; 169 | 170 | const ALL_NOT_RESERVED: u64 = ~ALL_RESERVED; 171 | 172 | pub fn fromU64(value: u64) Cr3Flags { 173 | return @bitCast(Cr3Flags, value & ALL_NOT_RESERVED); 174 | } 175 | 176 | pub fn toU64(self: Cr3Flags) u64 { 177 | return @bitCast(u64, self) & ALL_NOT_RESERVED; 178 | } 179 | 180 | pub fn format(value: Cr3Flags, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 181 | _ = fmt; 182 | return formatWithoutFields( 183 | value, 184 | options, 185 | writer, 186 | &.{ "z_reserved0", "z_reserved1", "z_reserved4_63" }, 187 | ); 188 | } 189 | 190 | test { 191 | try std.testing.expectEqual(@as(usize, 64), @bitSizeOf(Cr3Flags)); 192 | try std.testing.expectEqual(@as(usize, 8), @sizeOf(Cr3Flags)); 193 | } 194 | 195 | comptime { 196 | std.testing.refAllDecls(@This()); 197 | } 198 | }; 199 | 200 | /// Contains the physical address of the highest-level page table. 201 | pub const Cr3 = struct { 202 | pub const Contents = struct { 203 | phys_frame: x86_64.structures.paging.PhysFrame, 204 | cr3_flags: Cr3Flags, 205 | 206 | pub fn toU64(self: Contents) u64 { 207 | return self.phys_frame.start_address.value | self.cr3_flags.toU64(); 208 | } 209 | }; 210 | 211 | pub const PcidContents = struct { 212 | phys_frame: x86_64.structures.paging.PhysFrame, 213 | pcid: x86_64.instructions.tlb.Pcid, 214 | 215 | pub fn toU64(self: PcidContents) u64 { 216 | return self.phys_frame.start_address.value | @as(u64, self.pcid.value); 217 | } 218 | }; 219 | 220 | /// Read the current P4 table address from the CR3 register. 221 | pub fn read() Contents { 222 | const value = readRaw(); 223 | 224 | return .{ 225 | .phys_frame = x86_64.structures.paging.PhysFrame.containingAddress( 226 | // unchecked is fine as the mask ensures validity 227 | x86_64.PhysAddr.initUnchecked(value & 0x000f_ffff_ffff_f000), 228 | ), 229 | .cr3_flags = Cr3Flags.fromU64(value), 230 | }; 231 | } 232 | 233 | /// Read the raw value from the CR3 register 234 | fn readRaw() u64 { 235 | return asm ("mov %%cr3, %[value]" 236 | : [value] "=r" (-> u64), 237 | ); 238 | } 239 | 240 | /// Read the current P4 table address from the CR3 register along with PCID. 241 | /// The correct functioning of this requires CR4.PCIDE = 1. 242 | /// See [`Cr4Flags::PCID`] 243 | pub fn readPcid() PcidContents { 244 | const value = readRaw(); 245 | 246 | return .{ 247 | .phys_frame = x86_64.structures.paging.PhysFrame.containingAddress( 248 | // unchecked is fine as the mask ensures validity 249 | x86_64.PhysAddr.initUnchecked(value & 0x000f_ffff_ffff_f000), 250 | ), 251 | .pcid = x86_64.instructions.tlb.Pcid.init(@truncate(u12, value & 0xFFF)), 252 | }; 253 | } 254 | 255 | /// Write a new P4 table address into the CR3 register. 256 | pub fn write(contents: Contents) void { 257 | writeRaw(contents.toU64()); 258 | } 259 | 260 | /// Write a new P4 table address into the CR3 register. 261 | /// 262 | /// ## Safety 263 | /// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by 264 | /// changing the page mapping. 265 | /// [`Cr4Flags::PCID`] must be set before calling this method. 266 | pub fn writePcid(pcidContents: PcidContents) void { 267 | writeRaw(pcidContents.toU64()); 268 | } 269 | 270 | fn writeRaw(value: u64) void { 271 | asm volatile ("mov %[value], %%cr3" 272 | : 273 | : [value] "r" (value), 274 | : "memory" 275 | ); 276 | } 277 | 278 | comptime { 279 | std.testing.refAllDecls(@This()); 280 | } 281 | }; 282 | 283 | /// Contains various control flags that enable architectural extensions, and indicate support for specific processor capabilities. 284 | pub const Cr4 = packed struct { 285 | /// Enables hardware-supported performance enhancements for software running in 286 | /// virtual-8086 mode. 287 | virtual_8086_mode_extensions: bool, 288 | 289 | /// Enables support for protected-mode virtual interrupts. 290 | protected_mode_virtual_interrupts: bool, 291 | 292 | /// When set, only privilege-level 0 can execute the RDTSC or RDTSCP instructions. 293 | timestamp_disable: bool, 294 | 295 | /// Enables I/O breakpoint capability and enforces treatment of DR4 and DR5 x86_64 registers 296 | /// as reserved. 297 | debugging_extensions: bool, 298 | 299 | /// Enables the use of 4MB physical frames; ignored in long mode. 300 | page_size_extension: bool, 301 | 302 | /// Enables physical address extensions and 2MB physical frames. Required in long mode. 303 | physical_address_extension: bool, 304 | 305 | /// Enables the machine-check exception mechanism. 306 | machine_check_exception: bool, 307 | 308 | /// Enables the global page feature, allowing some page translations to be marked as global (see `PageTableFlags.global`). 309 | page_global: bool, 310 | 311 | /// Allows software running at any privilege level to use the RDPMC instruction. 312 | performance_monitor_counter: bool, 313 | 314 | /// Enables the use of legacy SSE instructions; allows using FXSAVE/FXRSTOR for saving 315 | /// processor state of 128-bit media instructions. 316 | osfxsr: bool, 317 | 318 | /// Enables the SIMD floating-point exception (#XF) for handling unmasked 256-bit and 319 | /// 128-bit media floating-point errors. 320 | osxmmexcpt_enable: bool, 321 | 322 | /// Prevents the execution of the SGDT, SIDT, SLDT, SMSW, and STR instructions by 323 | /// user-mode software. 324 | user_mode_instruction_prevention: bool, 325 | 326 | /// Enables 5-level paging on supported CPUs (Intel Only). 327 | l5_paging: bool, 328 | 329 | /// Enables VMX instructions (Intel Only). 330 | virtual_machine_extensions: bool, 331 | 332 | /// Enables SMX instructions (Intel Only). 333 | safer_mode_extensions: bool, 334 | 335 | /// Enables software running in 64-bit mode at any privilege level to read and write 336 | /// the FS.base and GS.base hidden segment register state. 337 | fsgsbase: bool, 338 | 339 | z_reserved16: bool, 340 | 341 | /// Enables process-context identifiers (PCIDs). 342 | pcid: bool, 343 | 344 | /// Enables extended processor state management instructions, including XGETBV and XSAVE. 345 | osxsave: bool, 346 | 347 | //// Enables the Key Locker feature (Intel Only). 348 | /// 349 | /// This enables creation and use of opaque AES key handles; see the 350 | /// [Intel Key Locker Specification](https://software.intel.com/content/www/us/en/develop/download/intel-key-locker-specification.html) 351 | /// for more information. 352 | key_locker: bool, 353 | 354 | /// Prevents the execution of instructions that reside in pages accessible by user-mode 355 | /// software when the processor is in supervisor-mode. 356 | supervisor_mode_execution_prevention: bool, 357 | 358 | /// Enables restrictions for supervisor-mode software when reading data from user-mode 359 | /// pages. 360 | supervisor_mode_access_prevention: bool, 361 | 362 | /// Enables protection keys for user-mode pages. 363 | /// 364 | /// Also enables access to the PKRU register (via the `RDPKRU`/`WRPKRU` instructions) to set user-mode protection key access 365 | /// controls. 366 | protection_key_user: bool, 367 | 368 | /// Enables Control-flow Enforcement Technology (CET) 369 | /// 370 | /// This enables the shadow stack feature, ensuring return addresses read via `RET` and `IRET` have not been corrupted. 371 | control_flow_enforcement: bool, 372 | 373 | /// Enables protection keys for supervisor-mode pages (Intel Only). 374 | /// 375 | /// Also enables the `IA32_PKRS` MSR to set supervisor-mode protection key access controls. 376 | protection_key_supervisor: bool, 377 | 378 | z_reserved25_31: u7, 379 | z_reserved32_63: u32, 380 | 381 | /// Read the current set of CR0 flags. 382 | pub fn read() Cr4 { 383 | return Cr4.fromU64(readRaw()); 384 | } 385 | 386 | /// Read the current raw CR4 value. 387 | fn readRaw() u64 { 388 | return asm ("mov %%cr4, %[ret]" 389 | : [ret] "=r" (-> u64), 390 | ); 391 | } 392 | 393 | /// Write CR4 flags. 394 | /// 395 | /// Preserves the value of reserved fields. 396 | pub fn write(self: Cr4) void { 397 | writeRaw(self.toU64() | (readRaw() & ALL_RESERVED)); 398 | } 399 | 400 | /// Write raw CR4 flags. 401 | /// 402 | /// Does _not_ preserve any values, including reserved fields. 403 | fn writeRaw(value: u64) void { 404 | asm volatile ("mov %[val], %%cr4" 405 | : 406 | : [val] "r" (value), 407 | : "memory" 408 | ); 409 | } 410 | 411 | const ALL_RESERVED: u64 = blk: { 412 | var flags = std.mem.zeroes(Cr4); 413 | flags.z_reserved16 = true; 414 | flags.z_reserved25_31 = std.math.maxInt(u7); 415 | flags.z_reserved32_63 = std.math.maxInt(u32); 416 | break :blk @bitCast(u64, flags); 417 | }; 418 | 419 | const ALL_NOT_RESERVED: u64 = ~ALL_RESERVED; 420 | 421 | pub fn fromU64(value: u64) Cr4 { 422 | return @bitCast(Cr4, value & ALL_NOT_RESERVED); 423 | } 424 | 425 | pub fn toU64(self: Cr4) u64 { 426 | return @bitCast(u64, self) & ALL_NOT_RESERVED; 427 | } 428 | 429 | pub fn format(value: Cr4, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 430 | _ = fmt; 431 | return formatWithoutFields( 432 | value, 433 | options, 434 | writer, 435 | &.{ "z_reserved16", "z_reserved25_31", "z_reserved32_63" }, 436 | ); 437 | } 438 | 439 | test { 440 | try std.testing.expectEqual(@as(usize, 64), @bitSizeOf(Cr4)); 441 | try std.testing.expectEqual(@as(usize, 8), @sizeOf(Cr4)); 442 | } 443 | 444 | comptime { 445 | std.testing.refAllDecls(@This()); 446 | } 447 | }; 448 | 449 | comptime { 450 | std.testing.refAllDecls(@This()); 451 | } 452 | -------------------------------------------------------------------------------- /src/structures/paging/frame.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | /// A physical memory frame. Page size 4 KiB 6 | pub const PhysFrame = extern struct { 7 | const size: x86_64.structures.paging.PageSize = .Size4KiB; 8 | 9 | start_address: x86_64.PhysAddr, 10 | 11 | /// Returns the frame that starts at the given physical address. 12 | /// 13 | /// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start) 14 | pub fn fromStartAddress(address: x86_64.PhysAddr) PhysFrameError!PhysFrame { 15 | if (!address.isAligned(size.bytes())) { 16 | return PhysFrameError.AddressNotAligned; 17 | } 18 | return fromStartAddressUnchecked(address); 19 | } 20 | 21 | /// Returns the frame that starts at the given physical address. 22 | /// Without validaing the addresses alignment 23 | pub fn fromStartAddressUnchecked(address: x86_64.PhysAddr) PhysFrame { 24 | return .{ .start_address = address }; 25 | } 26 | 27 | /// Returns the frame that contains the given physical address. 28 | pub fn containingAddress(address: x86_64.PhysAddr) PhysFrame { 29 | return .{ 30 | .start_address = address.alignDown(size.bytes()), 31 | }; 32 | } 33 | 34 | /// Returns the size of the frame (4KB, 2MB or 1GB). 35 | pub fn sizeOf(self: PhysFrame) u64 { 36 | _ = self; 37 | return size.bytes(); 38 | } 39 | 40 | pub fn format(value: PhysFrame, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 41 | _ = fmt; 42 | _ = options; 43 | try writer.print("PhysFrame[" ++ size.sizeString() ++ "](0x{x})", .{value.start_address.value}); 44 | } 45 | 46 | comptime { 47 | std.testing.refAllDecls(@This()); 48 | } 49 | }; 50 | 51 | /// A physical memory frame. Page size 2 MiB 52 | pub const PhysFrame2MiB = extern struct { 53 | const size: x86_64.structures.paging.PageSize = .Size2MiB; 54 | 55 | start_address: x86_64.PhysAddr, 56 | 57 | /// Returns the frame that starts at the given physical address. 58 | /// 59 | /// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start) 60 | pub fn fromStartAddress(address: x86_64.PhysAddr) PhysFrameError!PhysFrame2MiB { 61 | if (!address.isAligned(size.bytes())) { 62 | return PhysFrameError.AddressNotAligned; 63 | } 64 | return containingAddress(address); 65 | } 66 | 67 | /// Returns the frame that starts at the given physical address. 68 | /// Without validaing the addresses alignment 69 | pub fn fromStartAddressUnchecked(address: x86_64.PhysAddr) PhysFrame2MiB { 70 | return .{ .start_address = address }; 71 | } 72 | 73 | /// Returns the frame that contains the given physical address. 74 | pub fn containingAddress(address: x86_64.PhysAddr) PhysFrame2MiB { 75 | return .{ 76 | .start_address = address.alignDown(size.bytes()), 77 | }; 78 | } 79 | 80 | /// Returns the size of the frame (4KB, 2MB or 1GB). 81 | pub fn sizeOf(self: PhysFrame2MiB) u64 { 82 | _ = self; 83 | return size.bytes(); 84 | } 85 | 86 | pub fn format(value: PhysFrame2MiB, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 87 | _ = fmt; 88 | _ = options; 89 | try writer.print("PhysFrame[" ++ size.sizeString() ++ "](0x{x})", .{value.start_address.value}); 90 | } 91 | 92 | comptime { 93 | std.testing.refAllDecls(@This()); 94 | } 95 | }; 96 | 97 | /// A physical memory frame. Page size 1 GiB 98 | pub const PhysFrame1GiB = extern struct { 99 | const size: x86_64.structures.paging.PageSize = .Size1GiB; 100 | 101 | start_address: x86_64.PhysAddr, 102 | 103 | /// Returns the frame that starts at the given physical address. 104 | /// 105 | /// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start) 106 | pub fn fromStartAddress(address: x86_64.PhysAddr) PhysFrameError!PhysFrame1GiB { 107 | if (!address.isAligned(size.bytes())) { 108 | return PhysFrameError.AddressNotAligned; 109 | } 110 | return containingAddress(address); 111 | } 112 | 113 | /// Returns the frame that starts at the given physical address. 114 | /// Without validaing the addresses alignment 115 | pub fn fromStartAddressUnchecked(address: x86_64.PhysAddr) PhysFrame1GiB { 116 | return .{ .start_address = address }; 117 | } 118 | 119 | /// Returns the frame that contains the given physical address. 120 | pub fn containingAddress(address: x86_64.PhysAddr) PhysFrame1GiB { 121 | return .{ 122 | .start_address = address.alignDown(size.bytes()), 123 | }; 124 | } 125 | 126 | /// Returns the size of the frame (4KB, 2MB or 1GB). 127 | pub fn sizeOf(self: PhysFrame1GiB) u64 { 128 | _ = self; 129 | return size.bytes(); 130 | } 131 | 132 | pub fn format(value: PhysFrame1GiB, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 133 | _ = fmt; 134 | _ = options; 135 | try writer.print("PhysFrame[" ++ size.sizeString() ++ "](0x{x})", .{value.start_address.value}); 136 | } 137 | 138 | comptime { 139 | std.testing.refAllDecls(@This()); 140 | } 141 | }; 142 | 143 | pub const PhysFrameError = error{AddressNotAligned}; 144 | 145 | /// Generates iterators for ranges of physical memory frame. Page size 4 KiB 146 | pub const PhysFrameIterator = struct { 147 | /// Returns a range of frames, exclusive `end`. 148 | pub fn range(start: PhysFrame, end: PhysFrame) PhysFrameRange { 149 | return .{ .start = start, .end = end }; 150 | } 151 | 152 | /// Returns a range of frames, inclusive `end`. 153 | pub fn rangeInclusive(start: PhysFrame, end: PhysFrame) PhysFrameRangeInclusive { 154 | return .{ .start = start, .end = end }; 155 | } 156 | 157 | comptime { 158 | std.testing.refAllDecls(@This()); 159 | } 160 | }; 161 | 162 | /// Generates iterators for ranges of physical memory frame. Page size 2 MiB 163 | pub const PhysFrameIterator2MiB = struct { 164 | /// Returns a range of frames, exclusive `end`. 165 | pub fn range(start: PhysFrame2MiB, end: PhysFrame2MiB) PhysFrameRange2MiB { 166 | return .{ .start = start, .end = end }; 167 | } 168 | 169 | /// Returns a range of frames, inclusive `end`. 170 | pub fn rangeInclusive(start: PhysFrame2MiB, end: PhysFrame2MiB) PhysFrameRange2MiBInclusive { 171 | return .{ .start = start, .end = end }; 172 | } 173 | 174 | comptime { 175 | std.testing.refAllDecls(@This()); 176 | } 177 | }; 178 | 179 | /// Generates iterators for ranges of physical memory frame. Page size 1 GiB 180 | pub const PhysFrameIterator1GiB = struct { 181 | /// Returns a range of frames, exclusive `end`. 182 | pub fn range(start: PhysFrame1GiB, end: PhysFrame1GiB) PhysFrameRange1GiB { 183 | return .{ .start = start, .end = end }; 184 | } 185 | 186 | /// Returns a range of frames, inclusive `end`. 187 | pub fn rangeInclusive(start: PhysFrame1GiB, end: PhysFrame1GiB) PhysFrameRange1GiBInclusive { 188 | return .{ .start = start, .end = end }; 189 | } 190 | 191 | comptime { 192 | std.testing.refAllDecls(@This()); 193 | } 194 | }; 195 | 196 | /// A range of physical memory frames, exclusive the upper bound. Page size 4 KiB 197 | pub const PhysFrameRange = struct { 198 | /// The start of the range, inclusive. 199 | start: ?PhysFrame, 200 | /// The end of the range, exclusive. 201 | end: PhysFrame, 202 | 203 | /// Returns whether the range contains no frames. 204 | pub fn isEmpty(self: PhysFrameRange) bool { 205 | if (self.start) |x| { 206 | return x.start_address.value >= self.end.start_address.value; 207 | } 208 | return true; 209 | } 210 | 211 | pub fn next(self: *PhysFrameRange) ?PhysFrame { 212 | if (self.start) |start| { 213 | if (start.start_address.value < self.end.start_address.value) { 214 | const frame = start; 215 | 216 | const opt_addr = x86_64.PhysAddr.init(start.start_address.value + PhysFrame.size.bytes()) catch null; 217 | 218 | if (opt_addr) |addr| { 219 | self.start = PhysFrame.containingAddress(addr); 220 | } else { 221 | self.start = null; 222 | } 223 | 224 | return frame; 225 | } 226 | } 227 | return null; 228 | } 229 | 230 | comptime { 231 | std.testing.refAllDecls(@This()); 232 | } 233 | }; 234 | 235 | /// A range of physical memory frames, exclusive the upper bound. Page size 2 MiB 236 | pub const PhysFrameRange2MiB = struct { 237 | /// The start of the range, inclusive. 238 | start: ?PhysFrame2MiB, 239 | /// The end of the range, exclusive. 240 | end: PhysFrame2MiB, 241 | 242 | /// Returns whether the range contains no frames. 243 | pub fn isEmpty(self: PhysFrameRange2MiB) bool { 244 | if (self.start) |x| { 245 | return x.start_address.value >= self.end.start_address.value; 246 | } 247 | return true; 248 | } 249 | 250 | pub fn next(self: *PhysFrameRange2MiB) ?PhysFrame2MiB { 251 | if (self.start) |start| { 252 | if (start.start_address.value < self.end.start_address.value) { 253 | const frame = start; 254 | 255 | const opt_addr = x86_64.PhysAddr.init(start.start_address.value + PhysFrame2MiB.size.bytes()) catch null; 256 | 257 | if (opt_addr) |addr| { 258 | self.start = PhysFrame2MiB.containingAddress(addr); 259 | } else { 260 | self.start = null; 261 | } 262 | 263 | return frame; 264 | } 265 | } 266 | return null; 267 | } 268 | 269 | comptime { 270 | std.testing.refAllDecls(@This()); 271 | } 272 | }; 273 | 274 | /// A range of physical memory frames, exclusive the upper bound. Page size 1 GiB 275 | pub const PhysFrameRange1GiB = struct { 276 | /// The start of the range, inclusive. 277 | start: ?PhysFrame1GiB, 278 | /// The end of the range, exclusive. 279 | end: PhysFrame1GiB, 280 | 281 | /// Returns whether the range contains no frames. 282 | pub fn isEmpty(self: PhysFrameRange1GiB) bool { 283 | if (self.start) |x| { 284 | return x.start_address.value >= self.end.start_address.value; 285 | } 286 | return true; 287 | } 288 | 289 | pub fn next(self: *PhysFrameRange1GiB) ?PhysFrame1GiB { 290 | if (self.start) |start| { 291 | if (start.start_address.value < self.end.start_address.value) { 292 | const frame = start; 293 | 294 | const opt_addr = x86_64.PhysAddr.init(start.start_address.value + PhysFrame1GiB.size.bytes()) catch null; 295 | 296 | if (opt_addr) |addr| { 297 | self.start = PhysFrame1GiB.containingAddress(addr); 298 | } else { 299 | self.start = null; 300 | } 301 | 302 | return frame; 303 | } 304 | } 305 | return null; 306 | } 307 | 308 | comptime { 309 | std.testing.refAllDecls(@This()); 310 | } 311 | }; 312 | 313 | /// An range of physical memory frames, inclusive the upper bound. Page size 4 KiB 314 | pub const PhysFrameRangeInclusive = struct { 315 | /// The start of the range, inclusive. 316 | start: ?PhysFrame, 317 | /// The end of the range, inclusive. 318 | end: PhysFrame, 319 | 320 | /// Returns whether the range contains no frames. 321 | pub fn isEmpty(self: PhysFrameRangeInclusive) bool { 322 | if (self.start) |x| { 323 | return x.start_address.value > self.end.start_address.value; 324 | } 325 | return true; 326 | } 327 | 328 | pub fn next(self: *PhysFrameRangeInclusive) ?PhysFrame { 329 | if (self.start) |start| { 330 | if (start.start_address.value <= self.end.start_address.value) { 331 | const frame = start; 332 | 333 | const opt_addr = x86_64.PhysAddr.init(start.start_address.value + PhysFrame.size.bytes()) catch null; 334 | 335 | if (opt_addr) |addr| { 336 | self.start = PhysFrame.containingAddress(addr); 337 | } else { 338 | self.start = null; 339 | } 340 | 341 | return frame; 342 | } 343 | } 344 | return null; 345 | } 346 | 347 | comptime { 348 | std.testing.refAllDecls(@This()); 349 | } 350 | }; 351 | 352 | /// An range of physical memory frames, inclusive the upper bound. Page size 2 MiB 353 | pub const PhysFrameRange2MiBInclusive = struct { 354 | /// The start of the range, inclusive. 355 | start: ?PhysFrame2MiB, 356 | /// The end of the range, inclusive. 357 | end: PhysFrame2MiB, 358 | 359 | /// Returns whether the range contains no frames. 360 | pub fn isEmpty(self: PhysFrameRange2MiBInclusive) bool { 361 | if (self.start) |x| { 362 | return x.start_address.value > self.end.start_address.value; 363 | } 364 | return true; 365 | } 366 | 367 | pub fn next(self: *PhysFrameRange2MiBInclusive) ?PhysFrame2MiB { 368 | if (self.start) |start| { 369 | if (start.start_address.value <= self.end.start_address.value) { 370 | const frame = start; 371 | 372 | const opt_addr = x86_64.PhysAddr.init(start.start_address.value + PhysFrame2MiB.size.bytes()) catch null; 373 | 374 | if (opt_addr) |addr| { 375 | self.start = PhysFrame2MiB.containingAddress(addr); 376 | } else { 377 | self.start = null; 378 | } 379 | 380 | return frame; 381 | } 382 | } 383 | return null; 384 | } 385 | 386 | comptime { 387 | std.testing.refAllDecls(@This()); 388 | } 389 | }; 390 | 391 | /// An range of physical memory frames, inclusive the upper bound. Page size 1 GiB 392 | pub const PhysFrameRange1GiBInclusive = struct { 393 | /// The start of the range, inclusive. 394 | start: ?PhysFrame1GiB, 395 | /// The end of the range, inclusive. 396 | end: PhysFrame1GiB, 397 | 398 | /// Returns whether the range contains no frames. 399 | pub fn isEmpty(self: PhysFrameRange1GiBInclusive) bool { 400 | if (self.start) |x| { 401 | return x.start_address.value > self.end.start_address.value; 402 | } 403 | return true; 404 | } 405 | 406 | pub fn next(self: *PhysFrameRange1GiBInclusive) ?PhysFrame1GiB { 407 | if (self.start) |start| { 408 | if (start.start_address.value <= self.end.start_address.value) { 409 | const frame = start; 410 | 411 | const opt_addr = x86_64.PhysAddr.init(start.start_address.value + PhysFrame1GiB.size.bytes()) catch null; 412 | 413 | if (opt_addr) |addr| { 414 | self.start = PhysFrame1GiB.containingAddress(addr); 415 | } else { 416 | self.start = null; 417 | } 418 | 419 | return frame; 420 | } 421 | } 422 | return null; 423 | } 424 | 425 | comptime { 426 | std.testing.refAllDecls(@This()); 427 | } 428 | }; 429 | 430 | test "PhysFrameIterator" { 431 | var physAddrA = x86_64.PhysAddr.initPanic(0x000FFFFFFFFF0000); 432 | physAddrA = physAddrA.alignDown(x86_64.structures.paging.PageSize.Size4KiB.bytes()); 433 | 434 | var physAddrB = x86_64.PhysAddr.initPanic(0x000FFFFFFFFFFFFF); 435 | physAddrB = physAddrB.alignDown(x86_64.structures.paging.PageSize.Size4KiB.bytes()); 436 | 437 | const a = try PhysFrame.fromStartAddress(physAddrA); 438 | const b = try PhysFrame.fromStartAddress(physAddrB); 439 | 440 | var iterator = PhysFrameIterator.range(a, b); 441 | var inclusive_iterator = PhysFrameIterator.rangeInclusive(a, b); 442 | 443 | try std.testing.expect(!iterator.isEmpty()); 444 | try std.testing.expect(!inclusive_iterator.isEmpty()); 445 | 446 | var count: usize = 0; 447 | while (iterator.next()) |_| { 448 | count += 1; 449 | } 450 | try std.testing.expectEqual(@as(usize, 15), count); 451 | 452 | count = 0; 453 | while (inclusive_iterator.next()) |_| { 454 | count += 1; 455 | } 456 | try std.testing.expectEqual(@as(usize, 16), count); 457 | 458 | try std.testing.expect(iterator.isEmpty()); 459 | try std.testing.expect(inclusive_iterator.isEmpty()); 460 | } 461 | 462 | comptime { 463 | std.testing.refAllDecls(@This()); 464 | } 465 | -------------------------------------------------------------------------------- /src/structures/paging/page.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | const PageTableIndex = x86_64.structures.paging.PageTableIndex; 6 | const PageTableLevel = x86_64.structures.paging.PageTableLevel; 7 | 8 | const SIZE_4KiB_STR: []const u8 = "4KiB"; 9 | const SIZE_2MiB_STR: []const u8 = "2MiB"; 10 | const SIZE_1GiB_STR: []const u8 = "1GiB"; 11 | 12 | pub const PageSize = enum { 13 | Size4KiB, 14 | Size2MiB, 15 | Size1GiB, 16 | 17 | pub fn bytes(self: PageSize) u64 { 18 | return switch (self) { 19 | .Size4KiB => 4096, 20 | .Size2MiB => 4096 * 512, 21 | .Size1GiB => 4096 * 512 * 512, 22 | }; 23 | } 24 | 25 | pub fn sizeString(self: PageSize) []const u8 { 26 | return switch (self) { 27 | .Size4KiB => SIZE_4KiB_STR, 28 | .Size2MiB => SIZE_2MiB_STR, 29 | .Size1GiB => SIZE_1GiB_STR, 30 | }; 31 | } 32 | 33 | pub fn isGiantPage(self: PageSize) bool { 34 | return self == .Size1GiB; 35 | } 36 | }; 37 | 38 | /// A virtual memory page. Page size 4 KiB 39 | pub const Page = extern struct { 40 | const page_size = PageSize.Size4KiB; 41 | const bytes: u64 = page_size.bytes(); 42 | 43 | start_address: x86_64.VirtAddr, 44 | 45 | /// Returns the page that starts at the given virtual address. 46 | /// 47 | /// Returns an error if the address is not correctly aligned (i.e. is not a valid page start). 48 | pub fn fromStartAddress(address: x86_64.VirtAddr) PageError!Page { 49 | if (!address.isAligned(page_size.bytes())) { 50 | return PageError.AddressNotAligned; 51 | } 52 | return containingAddress(address); 53 | } 54 | 55 | /// Returns the page that starts at the given virtual address. 56 | pub fn fromStartAddressUnchecked(address: x86_64.VirtAddr) Page { 57 | return .{ .start_address = address }; 58 | } 59 | 60 | /// Returns the page that contains the given virtual address. 61 | pub fn containingAddress(address: x86_64.VirtAddr) Page { 62 | return .{ .start_address = address.alignDown(page_size.bytes()) }; 63 | } 64 | 65 | /// Returns the level 4 page table index of this page. 66 | pub fn p4Index(self: Page) PageTableIndex { 67 | return self.start_address.p4Index(); 68 | } 69 | 70 | /// Returns the level 3 page table index of this page. 71 | pub fn p3Index(self: Page) PageTableIndex { 72 | return self.start_address.p3Index(); 73 | } 74 | 75 | /// Returns the level 2 page table index of this page. 76 | pub fn p2Index(self: Page) PageTableIndex { 77 | return self.start_address.p2Index(); 78 | } 79 | 80 | /// Returns the level 1 page table index of this page. 81 | pub fn p1Index(self: Page) PageTableIndex { 82 | return self.start_address.p1Index(); 83 | } 84 | 85 | /// Returns the level 3 page table index of this page. 86 | pub fn pageTableIndex(self: Page, level: PageTableLevel) PageTableIndex { 87 | return self.start_address.pageTableIndex(level); 88 | } 89 | 90 | pub fn format(value: Page, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 91 | _ = options; 92 | _ = fmt; 93 | try writer.print("Page[" ++ page_size.sizeString() ++ "](0x{x})", .{value.start_address.value}); 94 | } 95 | 96 | comptime { 97 | std.testing.refAllDecls(@This()); 98 | } 99 | }; 100 | 101 | /// A virtual memory page. Page size 2 MiB 102 | pub const Page2MiB = extern struct { 103 | const page_size = PageSize.Size2MiB; 104 | const bytes: u64 = page_size.bytes(); 105 | 106 | start_address: x86_64.VirtAddr, 107 | 108 | /// Returns the page that starts at the given virtual address. 109 | /// 110 | /// Returns an error if the address is not correctly aligned (i.e. is not a valid page start). 111 | pub fn fromStartAddress(address: x86_64.VirtAddr) PageError!Page2MiB { 112 | if (!address.isAligned(page_size.bytes())) { 113 | return PageError.AddressNotAligned; 114 | } 115 | return containingAddress(address); 116 | } 117 | 118 | /// Returns the page that starts at the given virtual address. 119 | pub fn fromStartAddressUnchecked(address: x86_64.VirtAddr) Page2MiB { 120 | return .{ .start_address = address }; 121 | } 122 | 123 | /// Returns the page that contains the given virtual address. 124 | pub fn containingAddress(address: x86_64.VirtAddr) Page2MiB { 125 | return .{ .start_address = address.alignDown(page_size.bytes()) }; 126 | } 127 | 128 | /// Returns the level 4 page table index of this page. 129 | pub fn p4Index(self: Page2MiB) PageTableIndex { 130 | return self.start_address.p4Index(); 131 | } 132 | 133 | /// Returns the level 3 page table index of this page. 134 | pub fn p3Index(self: Page2MiB) PageTableIndex { 135 | return self.start_address.p3Index(); 136 | } 137 | 138 | /// Returns the level 2 page table index of this page. 139 | pub fn p2Index(self: Page2MiB) PageTableIndex { 140 | return self.start_address.p2Index(); 141 | } 142 | 143 | pub fn format(value: Page2MiB, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 144 | _ = options; 145 | _ = fmt; 146 | try writer.print("Page[" ++ page_size.sizeString() ++ "](0x{x})", .{value.start_address.value}); 147 | } 148 | 149 | comptime { 150 | std.testing.refAllDecls(@This()); 151 | } 152 | }; 153 | 154 | /// A virtual memory page. Page size 1 GiB 155 | pub const Page1GiB = extern struct { 156 | const page_size = PageSize.Size1GiB; 157 | const bytes: u64 = page_size.bytes(); 158 | 159 | start_address: x86_64.VirtAddr, 160 | 161 | /// Returns the page that starts at the given virtual address. 162 | /// 163 | /// Returns an error if the address is not correctly aligned (i.e. is not a valid page start). 164 | pub fn fromStartAddress(address: x86_64.VirtAddr) PageError!Page1GiB { 165 | if (!address.isAligned(page_size.bytes())) { 166 | return PageError.AddressNotAligned; 167 | } 168 | return containingAddress(address); 169 | } 170 | 171 | /// Returns the page that starts at the given virtual address. 172 | pub fn fromStartAddressUnchecked(address: x86_64.VirtAddr) Page1GiB { 173 | return .{ .start_address = address }; 174 | } 175 | 176 | /// Returns the page that contains the given virtual address. 177 | pub fn containingAddress(address: x86_64.VirtAddr) Page1GiB { 178 | return .{ .start_address = address.alignDown(page_size.bytes()) }; 179 | } 180 | 181 | /// Returns the level 4 page table index of this page. 182 | pub fn p4Index(self: Page1GiB) PageTableIndex { 183 | return self.start_address.p4Index(); 184 | } 185 | 186 | /// Returns the level 3 page table index of this page. 187 | pub fn p3Index(self: Page1GiB) PageTableIndex { 188 | return self.start_address.p3Index(); 189 | } 190 | 191 | pub fn format(value: Page1GiB, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { 192 | _ = options; 193 | _ = fmt; 194 | try writer.print("Page[" ++ page_size.sizeString() ++ "](0x{x})", .{value.start_address.value}); 195 | } 196 | 197 | comptime { 198 | std.testing.refAllDecls(@This()); 199 | } 200 | }; 201 | 202 | pub const PageError = error{AddressNotAligned}; 203 | 204 | /// Returns the 1GiB memory page with the specified page table indices. 205 | pub fn pageFromTableIndices1GiB(p4_index: PageTableIndex, p3_index: PageTableIndex) Page1GiB { 206 | var addr: u64 = 0; 207 | bitjuggle.setBits(&addr, 39, 9, p4_index.value); 208 | bitjuggle.setBits(&addr, 30, 9, p3_index.value); 209 | return Page1GiB.containingAddress(x86_64.VirtAddr.initPanic(addr)); 210 | } 211 | 212 | /// Returns the 2MiB memory page with the specified page table indices. 213 | pub fn pageFromTableIndices2MiB(p4_index: PageTableIndex, p3_index: PageTableIndex, p2_index: PageTableIndex) Page2MiB { 214 | var addr: u64 = 0; 215 | bitjuggle.setBits(&addr, 39, 9, p4_index.value); 216 | bitjuggle.setBits(&addr, 30, 9, p3_index.value); 217 | bitjuggle.setBits(&addr, 21, 9, p2_index.value); 218 | return Page2MiB.containingAddress(x86_64.VirtAddr.initPanic(addr)); 219 | } 220 | 221 | /// Returns the 4KiB memory page p4_index the specified page table indices. 222 | pub fn pageFromTableIndices(p4_index: PageTableIndex, p3_index: PageTableIndex, p2_index: PageTableIndex, p1_index: PageTableIndex) Page { 223 | var addr: u64 = 0; 224 | bitjuggle.setBits(&addr, 39, 9, p4_index.value); 225 | bitjuggle.setBits(&addr, 30, 9, p3_index.value); 226 | bitjuggle.setBits(&addr, 21, 9, p2_index.value); 227 | bitjuggle.setBits(&addr, 12, 9, p1_index.value); 228 | return Page.containingAddress(x86_64.VirtAddr.initPanic(addr)); 229 | } 230 | 231 | /// A range of pages, exclusive the upper bound. Page size 4 KiB 232 | pub const PageRange = struct { 233 | /// The start of the range, inclusive. 234 | start: Page, 235 | /// The end of the range, exclusive. 236 | end: Page, 237 | 238 | /// Returns whether the range contains no frames. 239 | pub fn isEmpty(self: PageRange) bool { 240 | return self.start.start_address.value >= self.end.start_address.value; 241 | } 242 | 243 | pub fn next(self: *PageRange) ?Page { 244 | if (self.start.start_address.value < self.end.start_address.value) { 245 | const page = self.start; 246 | self.start = Page.containingAddress(.{ .value = self.start.start_address.value + Page.bytes }); 247 | return page; 248 | } 249 | return null; 250 | } 251 | 252 | comptime { 253 | std.testing.refAllDecls(@This()); 254 | } 255 | }; 256 | 257 | /// A range of pages, exclusive the upper bound. Page size 2 MiB 258 | pub const PageRange2MiB = struct { 259 | /// The start of the range, inclusive. 260 | start: Page2MiB, 261 | /// The end of the range, exclusive. 262 | end: Page2MiB, 263 | 264 | /// Returns whether the range contains no frames. 265 | pub fn isEmpty(self: PageRange2MiB) bool { 266 | return self.start.start_address.value >= self.end.start_address.value; 267 | } 268 | 269 | pub fn next(self: *PageRange2MiB) ?Page2MiB { 270 | if (self.start.start_address.value < self.end.start_address.value) { 271 | const page = self.start; 272 | self.start = Page2MiB.containingAddress(.{ .value = self.start.start_address.value + Page2MiB.bytes }); 273 | return page; 274 | } 275 | return null; 276 | } 277 | 278 | comptime { 279 | std.testing.refAllDecls(@This()); 280 | } 281 | }; 282 | 283 | /// A range of pages, exclusive the upper bound. Page size 1 GiB 284 | pub const PageRange1GiB = struct { 285 | /// The start of the range, inclusive. 286 | start: Page1GiB, 287 | /// The end of the range, exclusive. 288 | end: Page1GiB, 289 | 290 | /// Returns whether the range contains no frames. 291 | pub fn isEmpty(self: PageRange1GiB) bool { 292 | return self.start.start_address.value >= self.end.start_address.value; 293 | } 294 | 295 | pub fn next(self: *PageRange1GiB) ?Page1GiB { 296 | if (self.start.start_address.value < self.end.start_address.value) { 297 | const page = self.start; 298 | self.start = Page1GiB.containingAddress(.{ .value = self.start.start_address.value + Page1GiB.bytes }); 299 | return page; 300 | } 301 | return null; 302 | } 303 | 304 | comptime { 305 | std.testing.refAllDecls(@This()); 306 | } 307 | }; 308 | 309 | /// A range of pages, inclusive the upper bound. Page size 4 KiB 310 | pub const PageRangeInclusive = struct { 311 | /// The start of the range, inclusive. 312 | start: Page, 313 | /// The end of the range, exclusive. 314 | end: Page, 315 | 316 | /// Returns whether the range contains no frames. 317 | pub fn isEmpty(self: PageRangeInclusive) bool { 318 | return self.start.start_address.value > self.end.start_address.value; 319 | } 320 | 321 | pub fn next(self: *PageRangeInclusive) ?Page { 322 | if (self.start.start_address.value <= self.end.start_address.value) { 323 | const page = self.start; 324 | self.start = Page.containingAddress(x86_64.VirtAddr{ .value = self.start.start_address.value + Page.bytes }); 325 | return page; 326 | } 327 | return null; 328 | } 329 | 330 | comptime { 331 | std.testing.refAllDecls(@This()); 332 | } 333 | }; 334 | 335 | /// A range of pages, inclusive the upper bound. Page size 2 MiB 336 | pub const PageRange2MiBInclusive = struct { 337 | /// The start of the range, inclusive. 338 | start: Page2MiB, 339 | /// The end of the range, exclusive. 340 | end: Page2MiB, 341 | 342 | /// Returns whether the range contains no frames. 343 | pub fn isEmpty(self: PageRange2MiBInclusive) bool { 344 | return self.start.start_address.value > self.end.start_address.value; 345 | } 346 | 347 | pub fn next(self: *PageRange2MiBInclusive) ?Page2MiB { 348 | if (self.start.start_address.value <= self.end.start_address.value) { 349 | const page = self.start; 350 | self.start = Page2MiB.containingAddress(x86_64.VirtAddr{ .value = self.start.start_address.value + Page2MiB.bytes }); 351 | return page; 352 | } 353 | return null; 354 | } 355 | 356 | comptime { 357 | std.testing.refAllDecls(@This()); 358 | } 359 | }; 360 | 361 | /// A range of pages, inclusive the upper bound. Page size 1 GiB 362 | pub const PageRange1GiBInclusive = struct { 363 | /// The start of the range, inclusive. 364 | start: Page1GiB, 365 | /// The end of the range, exclusive. 366 | end: Page1GiB, 367 | 368 | /// Returns whether the range contains no frames. 369 | pub fn isEmpty(self: PageRange1GiBInclusive) bool { 370 | return self.start.start_address.value > self.end.start_address.value; 371 | } 372 | 373 | pub fn next(self: *PageRange1GiBInclusive) ?Page1GiB { 374 | if (self.start.start_address.value <= self.end.start_address.value) { 375 | const page = self.start; 376 | self.start = Page1GiB.containingAddress(x86_64.VirtAddr{ .value = self.start.start_address.value + Page1GiB.bytes }); 377 | return page; 378 | } 379 | return null; 380 | } 381 | 382 | comptime { 383 | std.testing.refAllDecls(@This()); 384 | } 385 | }; 386 | 387 | /// Generates iterators for ranges of physical memory frame. Page size 4 KiB 388 | pub const PageIterator = struct { 389 | /// Returns a range of pages, exclusive `end`. 390 | pub fn range(start: Page, end: Page) PageRange { 391 | return .{ .start = start, .end = end }; 392 | } 393 | 394 | /// Returns a range of pages, inclusive `end`. 395 | pub fn rangeInclusive(start: Page, end: Page) PageRangeInclusive { 396 | return .{ .start = start, .end = end }; 397 | } 398 | 399 | comptime { 400 | std.testing.refAllDecls(@This()); 401 | } 402 | }; 403 | 404 | /// Generates iterators for ranges of physical memory frame. Page size 2 MiB 405 | pub const PageIterator2MiB = struct { 406 | /// Returns a range of pages, exclusive `end`. 407 | pub fn range(start: Page2MiB, end: Page2MiB) PageRange2MiB { 408 | return .{ .start = start, .end = end }; 409 | } 410 | 411 | /// Returns a range of pages, inclusive `end`. 412 | pub fn rangeInclusive(start: Page2MiB, end: Page2MiB) PageRange2MiBInclusive { 413 | return .{ .start = start, .end = end }; 414 | } 415 | 416 | comptime { 417 | std.testing.refAllDecls(@This()); 418 | } 419 | }; 420 | 421 | /// Generates iterators for ranges of physical memory frame. Page size 1 GiB 422 | pub const PageIterator1GiB = struct { 423 | /// Returns a range of pages, exclusive `end`. 424 | pub fn range(start: Page1GiB, end: Page1GiB) PageRange1GiB { 425 | return .{ .start = start, .end = end }; 426 | } 427 | 428 | /// Returns a range of pages, inclusive `end`. 429 | pub fn rangeInclusive(start: Page1GiB, end: Page1GiB) PageRange1GiBInclusive { 430 | return .{ .start = start, .end = end }; 431 | } 432 | 433 | comptime { 434 | std.testing.refAllDecls(@This()); 435 | } 436 | }; 437 | 438 | test "PageIterator" { 439 | var virtAddrA = x86_64.VirtAddr.initPanic(0x00000FFFFFFF0000); 440 | virtAddrA = virtAddrA.alignDown(x86_64.structures.paging.PageSize.Size4KiB.bytes()); 441 | 442 | var virtAddrB = x86_64.VirtAddr.initPanic(0x00000FFFFFFFFFFF); 443 | virtAddrB = virtAddrB.alignDown(x86_64.structures.paging.PageSize.Size4KiB.bytes()); 444 | 445 | const a = try Page.fromStartAddress(virtAddrA); 446 | const b = try Page.fromStartAddress(virtAddrB); 447 | 448 | var iterator = PageIterator.range(a, b); 449 | var inclusive_iterator = PageIterator.rangeInclusive(a, b); 450 | 451 | try std.testing.expect(!iterator.isEmpty()); 452 | try std.testing.expect(!inclusive_iterator.isEmpty()); 453 | 454 | var count: usize = 0; 455 | while (iterator.next()) |_| { 456 | count += 1; 457 | } 458 | try std.testing.expectEqual(@as(usize, 15), count); 459 | 460 | count = 0; 461 | while (inclusive_iterator.next()) |_| { 462 | count += 1; 463 | } 464 | try std.testing.expectEqual(@as(usize, 16), count); 465 | 466 | try std.testing.expect(iterator.isEmpty()); 467 | try std.testing.expect(inclusive_iterator.isEmpty()); 468 | } 469 | 470 | comptime { 471 | std.testing.refAllDecls(@This()); 472 | } 473 | -------------------------------------------------------------------------------- /src/structures/paging/mapping/mapping.zig: -------------------------------------------------------------------------------- 1 | const x86_64 = @import("../../../index.zig"); 2 | const bitjuggle = @import("bitjuggle"); 3 | const std = @import("std"); 4 | 5 | pub const OffsetPageTable = @import("mapped_page_table.zig").OffsetPageTable; 6 | pub const MappedPageTable = @import("mapped_page_table.zig").MappedPageTable; 7 | pub const RecursivePageTable = @import("recursive_page_table.zig").RecursivePageTable; 8 | 9 | const paging = x86_64.structures.paging; 10 | 11 | pub const Mapper = struct { 12 | // This is the most annoying code ive ever written... 13 | // All just to have something that is trivial in most languages; an interface 14 | z_impl_mapToWithTableFlags1GiB: fn ( 15 | mapper: *Mapper, 16 | page: paging.Page1GiB, 17 | frame: paging.PhysFrame1GiB, 18 | flags: paging.PageTableFlags, 19 | parent_table_flags: paging.PageTableFlags, 20 | frame_allocator: *paging.FrameAllocator, 21 | ) MapToError!MapperFlush1GiB, 22 | 23 | z_impl_unmap1GiB: fn ( 24 | mapper: *Mapper, 25 | page: paging.Page1GiB, 26 | ) UnmapError!UnmapResult1GiB, 27 | 28 | z_impl_updateFlags1GiB: fn ( 29 | mapper: *Mapper, 30 | page: paging.Page1GiB, 31 | flags: paging.PageTableFlags, 32 | ) FlagUpdateError!MapperFlush1GiB, 33 | 34 | z_impl_setFlagsP4Entry1GiB: fn ( 35 | mapper: *Mapper, 36 | page: paging.Page1GiB, 37 | flags: paging.PageTableFlags, 38 | ) FlagUpdateError!MapperFlushAll, 39 | 40 | z_impl_translatePage1GiB: fn ( 41 | mapper: *Mapper, 42 | page: paging.Page1GiB, 43 | ) TranslateError!paging.PhysFrame1GiB, 44 | 45 | z_impl_mapToWithTableFlags2MiB: fn ( 46 | mapper: *Mapper, 47 | page: paging.Page2MiB, 48 | frame: paging.PhysFrame2MiB, 49 | flags: paging.PageTableFlags, 50 | parent_table_flags: paging.PageTableFlags, 51 | frame_allocator: *paging.FrameAllocator, 52 | ) MapToError!MapperFlush2MiB, 53 | 54 | z_impl_unmap2MiB: fn ( 55 | mapper: *Mapper, 56 | page: paging.Page2MiB, 57 | ) UnmapError!UnmapResult2MiB, 58 | 59 | z_impl_updateFlags2MiB: fn ( 60 | mapper: *Mapper, 61 | page: paging.Page2MiB, 62 | flags: paging.PageTableFlags, 63 | ) FlagUpdateError!MapperFlush2MiB, 64 | 65 | z_impl_setFlagsP4Entry2MiB: fn ( 66 | mapper: *Mapper, 67 | page: paging.Page2MiB, 68 | flags: paging.PageTableFlags, 69 | ) FlagUpdateError!MapperFlushAll, 70 | 71 | z_impl_setFlagsP3Entry2MiB: fn ( 72 | mapper: *Mapper, 73 | page: paging.Page2MiB, 74 | flags: paging.PageTableFlags, 75 | ) FlagUpdateError!MapperFlushAll, 76 | 77 | z_impl_translatePage2MiB: fn ( 78 | mapper: *Mapper, 79 | page: paging.Page2MiB, 80 | ) TranslateError!paging.PhysFrame2MiB, 81 | 82 | z_impl_mapToWithTableFlags: fn ( 83 | mapper: *Mapper, 84 | page: paging.Page, 85 | frame: paging.PhysFrame, 86 | flags: paging.PageTableFlags, 87 | parent_table_flags: paging.PageTableFlags, 88 | frame_allocator: *paging.FrameAllocator, 89 | ) MapToError!MapperFlush, 90 | 91 | z_impl_unmap: fn ( 92 | mapper: *Mapper, 93 | page: paging.Page, 94 | ) UnmapError!UnmapResult, 95 | 96 | z_impl_updateFlags: fn ( 97 | mapper: *Mapper, 98 | page: paging.Page, 99 | flags: paging.PageTableFlags, 100 | ) FlagUpdateError!MapperFlush, 101 | 102 | z_impl_setFlagsP4Entry: fn ( 103 | mapper: *Mapper, 104 | page: paging.Page, 105 | flags: paging.PageTableFlags, 106 | ) FlagUpdateError!MapperFlushAll, 107 | 108 | z_impl_setFlagsP3Entry: fn ( 109 | mapper: *Mapper, 110 | page: paging.Page, 111 | flags: paging.PageTableFlags, 112 | ) FlagUpdateError!MapperFlushAll, 113 | 114 | z_impl_setFlagsP2Entry: fn ( 115 | mapper: *Mapper, 116 | page: paging.Page, 117 | flags: paging.PageTableFlags, 118 | ) FlagUpdateError!MapperFlushAll, 119 | 120 | z_impl_translatePage: fn ( 121 | mapper: *Mapper, 122 | page: paging.Page, 123 | ) TranslateError!paging.PhysFrame, 124 | 125 | z_impl_translate: fn ( 126 | mapper: *Mapper, 127 | addr: x86_64.VirtAddr, 128 | ) TranslateError!TranslateResult, 129 | 130 | z_impl_clean_up: fn ( 131 | mapper: *Mapper, 132 | frame_allocator: *paging.FrameAllocator, 133 | ) void, 134 | 135 | z_impl_clean_up_range: fn ( 136 | mapper: *Mapper, 137 | range: paging.PageRangeInclusive, 138 | frame_allocator: *paging.FrameAllocator, 139 | ) void, 140 | 141 | /// Remove all empty P1-P3 tables 142 | /// 143 | /// The caller has to guarantee that it's safe to free page table frames: 144 | /// All page table frames must only be used once and only in this page table 145 | /// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table). 146 | pub inline fn cleanUp(mapper: *Mapper, frame_allocator: *paging.FrameAllocator) void { 147 | mapper.z_impl_clean_up(mapper, frame_allocator); 148 | } 149 | 150 | /// Remove all empty P1-P3 tables in a certain range 151 | /// 152 | /// The caller has to guarantee that it's safe to free page table frames: 153 | /// All page table frames must only be used once and only in this page table 154 | /// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table). 155 | pub inline fn cleanUpRange(mapper: *Mapper, range: paging.PageRangeInclusive, frame_allocator: *paging.FrameAllocator) void { 156 | mapper.z_impl_clean_up_range(mapper, range, frame_allocator); 157 | } 158 | 159 | /// Creates a new mapping in the page table. 160 | /// 161 | /// This function might need additional physical frames to create new page tables. These 162 | /// frames are allocated from the `allocator` argument. At most three frames are required. 163 | /// 164 | /// Parent page table entries are automatically updated with `PRESENT | WRITABLE | USER_ACCESSIBLE` 165 | /// if present in the `PageTableFlags`. Depending on the used mapper implementation 166 | /// the `PRESENT` and `WRITABLE` flags might be set for parent tables, 167 | /// even if they are not set in `PageTableFlags`. 168 | /// 169 | /// The `mapToWithTableFlags` method gives explicit control over the parent page table flags. 170 | pub inline fn mapTo( 171 | mapper: *Mapper, 172 | page: paging.Page, 173 | frame: paging.PhysFrame, 174 | flags: paging.PageTableFlags, 175 | frame_allocator: *paging.FrameAllocator, 176 | ) MapToError!MapperFlush { 177 | return mapper.z_impl_mapToWithTableFlags(mapper, page, frame, flags, flags, frame_allocator); 178 | } 179 | 180 | /// Creates a new mapping in the page table. 181 | /// 182 | /// This function might need additional physical frames to create new page tables. These 183 | /// frames are allocated from the `allocator` argument. At most three frames are required. 184 | /// 185 | /// Parent page table entries are automatically updated with `PRESENT | WRITABLE | USER_ACCESSIBLE` 186 | /// if present in the `PageTableFlags`. Depending on the used mapper implementation 187 | /// the `PRESENT` and `WRITABLE` flags might be set for parent tables, 188 | /// even if they are not set in `PageTableFlags`. 189 | /// 190 | /// The `mapToWithTableFlags` method gives explicit control over the parent page table flags. 191 | pub inline fn mapTo2MiB( 192 | mapper: *Mapper, 193 | page: paging.Page2MiB, 194 | frame: paging.PhysFrame2MiB, 195 | flags: paging.PageTableFlags, 196 | frame_allocator: *paging.FrameAllocator, 197 | ) MapToError!MapperFlush2MiB { 198 | return mapper.z_impl_mapToWithTableFlags2MiB(mapper, page, frame, flags, flags, frame_allocator); 199 | } 200 | 201 | /// Creates a new mapping in the page table. 202 | /// 203 | /// This function might need additional physical frames to create new page tables. These 204 | /// frames are allocated from the `allocator` argument. At most three frames are required. 205 | /// 206 | /// Parent page table entries are automatically updated with `PRESENT | WRITABLE | USER_ACCESSIBLE` 207 | /// if present in the `PageTableFlags`. Depending on the used mapper implementation 208 | /// the `PRESENT` and `WRITABLE` flags might be set for parent tables, 209 | /// even if they are not set in `PageTableFlags`. 210 | /// 211 | /// The `mapToWithTableFlags` method gives explicit control over the parent page table flags. 212 | pub inline fn mapTo1GiB( 213 | mapper: *Mapper, 214 | page: paging.Page1GiB, 215 | frame: paging.PhysFrame1GiB, 216 | flags: paging.PageTableFlags, 217 | frame_allocator: *paging.FrameAllocator, 218 | ) MapToError!MapperFlush1GiB { 219 | return mapper.z_impl_mapToWithTableFlags1GiB(mapper, page, frame, flags, flags, frame_allocator); 220 | } 221 | 222 | /// Maps the given frame to the virtual page with the same address. 223 | pub fn identityMap( 224 | mapper: *Mapper, 225 | frame: paging.PhysFrame, 226 | flags: paging.PageTableFlags, 227 | frame_allocator: *paging.FrameAllocator, 228 | ) MapToError!MapperFlush { 229 | return mapper.mapTo( 230 | paging.Page.containingAddress(x86_64.VirtAddr.initPanic(frame.start_address.value)), 231 | frame, 232 | flags, 233 | frame_allocator, 234 | ); 235 | } 236 | 237 | /// Maps the given frame to the virtual page with the same address. 238 | pub fn identityMap2MiB( 239 | mapper: *Mapper, 240 | frame: paging.PhysFrame2MiB, 241 | flags: paging.PageTableFlags, 242 | frame_allocator: *paging.FrameAllocator, 243 | ) MapToError!MapperFlush2MiB { 244 | return mapper.mapTo2MiB( 245 | paging.Page2MiB.containingAddress(x86_64.VirtAddr.initPanic(frame.start_address.value)), 246 | frame, 247 | flags, 248 | frame_allocator, 249 | ); 250 | } 251 | 252 | /// Maps the given frame to the virtual page with the same address. 253 | pub fn identityMap1GiB( 254 | mapper: *Mapper, 255 | frame: paging.PhysFrame1GiB, 256 | flags: paging.PageTableFlags, 257 | frame_allocator: *paging.FrameAllocator, 258 | ) MapToError!MapperFlush1GiB { 259 | return mapper.mapTo1GiB( 260 | paging.Page1GiB.containingAddress(x86_64.VirtAddr.initPanic(frame.start_address.value)), 261 | frame, 262 | flags, 263 | frame_allocator, 264 | ); 265 | } 266 | 267 | /// Translates the given virtual address to the physical address that it maps to. 268 | /// 269 | /// Returns `None` if there is no valid mapping for the given address. 270 | /// 271 | /// This is a convenience method. For more information about a mapping see the 272 | /// `translate` function. 273 | pub fn translateAddr(mapper: *Mapper, addr: x86_64.VirtAddr) ?x86_64.PhysAddr { 274 | return switch (mapper.translate(addr) catch return null) { 275 | .Frame4KiB => |res| x86_64.PhysAddr.initPanic(res.frame.start_address.value + res.offset), 276 | .Frame2MiB => |res| x86_64.PhysAddr.initPanic(res.frame.start_address.value + res.offset), 277 | .Frame1GiB => |res| x86_64.PhysAddr.initPanic(res.frame.start_address.value + res.offset), 278 | }; 279 | } 280 | 281 | /// Return the frame that the given virtual address is mapped to and the offset within that 282 | /// frame. 283 | /// 284 | /// If the given address has a valid mapping, the mapped frame and the offset within that 285 | /// frame is returned. Otherwise an error value is returned. 286 | pub inline fn translate( 287 | mapper: *Mapper, 288 | addr: x86_64.VirtAddr, 289 | ) TranslateError!TranslateResult { 290 | return mapper.z_impl_translate(mapper, addr); 291 | } 292 | 293 | /// Return the frame that the specified page is mapped to. 294 | pub inline fn translatePage( 295 | mapper: *Mapper, 296 | page: paging.Page, 297 | ) TranslateError!paging.PhysFrame { 298 | return mapper.z_impl_translatePage(mapper, page); 299 | } 300 | 301 | /// Return the frame that the specified page is mapped to. 302 | pub inline fn translatePage2MiB( 303 | mapper: *Mapper, 304 | page: paging.Page2MiB, 305 | ) TranslateError!paging.PhysFrame2MiB { 306 | return mapper.z_impl_translatePage2MiB(mapper, page); 307 | } 308 | 309 | /// Return the frame that the specified page is mapped to. 310 | pub inline fn translatePage1GiB( 311 | mapper: *Mapper, 312 | page: paging.Page1GiB, 313 | ) TranslateError!paging.PhysFrame1GiB { 314 | return mapper.z_impl_translatePage1GiB(mapper, page); 315 | } 316 | 317 | /// Set the flags of an existing page table level 2 entry 318 | pub inline fn setFlagsP2Entry( 319 | mapper: *Mapper, 320 | page: paging.Page, 321 | flags: paging.PageTableFlags, 322 | ) FlagUpdateError!MapperFlushAll { 323 | return mapper.z_impl_setFlagsP2Entry(mapper, page, flags); 324 | } 325 | 326 | /// Set the flags of an existing page table level 3 entry 327 | pub inline fn setFlagsP3Entry( 328 | mapper: *Mapper, 329 | page: paging.Page, 330 | flags: paging.PageTableFlags, 331 | ) FlagUpdateError!MapperFlushAll { 332 | return mapper.z_impl_setFlagsP3Entry(mapper, page, flags); 333 | } 334 | 335 | /// Set the flags of an existing page table level 3 entry 336 | pub inline fn setFlagsP3Entry2MiB( 337 | mapper: *Mapper, 338 | page: paging.Page2MiB, 339 | flags: paging.PageTableFlags, 340 | ) FlagUpdateError!MapperFlushAll { 341 | return mapper.z_impl_setFlagsP3Entry2MiB(mapper, page, flags); 342 | } 343 | 344 | /// Set the flags of an existing page table level 4 entry 345 | pub inline fn setFlagsP4Entry( 346 | mapper: *Mapper, 347 | page: paging.Page, 348 | flags: paging.PageTableFlags, 349 | ) FlagUpdateError!MapperFlushAll { 350 | return mapper.z_impl_setFlagsP4Entry(mapper, page, flags); 351 | } 352 | 353 | /// Set the flags of an existing page table level 4 entry 354 | pub inline fn setFlagsP4Entry2MiB( 355 | mapper: *Mapper, 356 | page: paging.Page2MiB, 357 | flags: paging.PageTableFlags, 358 | ) FlagUpdateError!MapperFlushAll { 359 | return mapper.z_impl_setFlagsP4Entry2MiB(mapper, page, flags); 360 | } 361 | 362 | /// Set the flags of an existing page table level 4 entry 363 | pub inline fn setFlagsP4Entry1GiB( 364 | mapper: *Mapper, 365 | page: paging.Page1GiB, 366 | flags: paging.PageTableFlags, 367 | ) FlagUpdateError!MapperFlushAll { 368 | return mapper.z_impl_setFlagsP4Entry1GiB(mapper, page, flags); 369 | } 370 | 371 | /// Updates the flags of an existing mapping. 372 | pub inline fn updateFlags( 373 | mapper: *Mapper, 374 | page: paging.Page, 375 | flags: paging.PageTableFlags, 376 | ) FlagUpdateError!MapperFlush { 377 | return mapper.z_impl_updateFlags(mapper, page, flags); 378 | } 379 | 380 | /// Updates the flags of an existing mapping. 381 | pub inline fn updateFlags2MiB( 382 | mapper: *Mapper, 383 | page: paging.Page2MiB, 384 | flags: paging.PageTableFlags, 385 | ) FlagUpdateError!MapperFlush2MiB { 386 | return mapper.z_impl_updateFlags2MiB(mapper, page, flags); 387 | } 388 | 389 | /// Updates the flags of an existing mapping. 390 | pub inline fn updateFlags1GiB( 391 | mapper: *Mapper, 392 | page: paging.Page1GiB, 393 | flags: paging.PageTableFlags, 394 | ) FlagUpdateError!MapperFlush1GiB { 395 | return mapper.z_impl_updateFlags1GiB(mapper, page, flags); 396 | } 397 | 398 | /// Removes a mapping from the page table and returns the frame that used to be mapped. 399 | /// 400 | /// Note that no page tables or pages are deallocated. 401 | pub inline fn unmap( 402 | mapper: *Mapper, 403 | page: paging.Page, 404 | ) UnmapError!UnmapResult { 405 | return mapper.z_impl_unmap(mapper, page); 406 | } 407 | 408 | /// Removes a mapping from the page table and returns the frame that used to be mapped. 409 | /// 410 | /// Note that no page tables or pages are deallocated. 411 | pub inline fn unmap2MiB( 412 | mapper: *Mapper, 413 | page: paging.Page2MiB, 414 | ) UnmapError!UnmapResult2MiB { 415 | return mapper.z_impl_unmap2MiB(mapper, page); 416 | } 417 | 418 | /// Removes a mapping from the page table and returns the frame that used to be mapped. 419 | /// 420 | /// Note that no page tables or pages are deallocated. 421 | pub inline fn unmap1GiB( 422 | mapper: *Mapper, 423 | page: paging.Page1GiB, 424 | ) UnmapError!UnmapResult1GiB { 425 | return mapper.z_impl_unmap1GiB(mapper, page); 426 | } 427 | 428 | /// Creates a new mapping in the page table. 429 | /// 430 | /// This function might need additional physical frames to create new page tables. These 431 | /// frames are allocated from the `allocator` argument. At most three frames are required. 432 | /// 433 | /// The flags of the parent table(s) can be explicitly specified. Those flags are used for 434 | /// newly created table entries, and for existing entries the flags are added. 435 | /// 436 | /// Depending on the used mapper implementation, the `PRESENT` and `WRITABLE` flags might 437 | /// be set for parent tables, even if they are not specified in `parent_table_flags`. 438 | pub inline fn mapToWithTableFlags( 439 | mapper: *Mapper, 440 | page: paging.Page, 441 | frame: paging.PhysFrame, 442 | flags: paging.PageTableFlags, 443 | parent_table_flags: paging.PageTableFlags, 444 | frame_allocator: *paging.FrameAllocator, 445 | ) MapToError!MapperFlush { 446 | return mapper.z_impl_mapToWithTableFlags(mapper, page, frame, flags, parent_table_flags, frame_allocator); 447 | } 448 | 449 | /// Creates a new mapping in the page table. 450 | /// 451 | /// This function might need additional physical frames to create new page tables. These 452 | /// frames are allocated from the `allocator` argument. At most three frames are required. 453 | /// 454 | /// The flags of the parent table(s) can be explicitly specified. Those flags are used for 455 | /// newly created table entries, and for existing entries the flags are added. 456 | /// 457 | /// Depending on the used mapper implementation, the `PRESENT` and `WRITABLE` flags might 458 | /// be set for parent tables, even if they are not specified in `parent_table_flags`. 459 | pub inline fn mapToWithTableFlags2MiB( 460 | mapper: *Mapper, 461 | page: paging.Page2MiB, 462 | frame: paging.PhysFrame2MiB, 463 | flags: paging.PageTableFlags, 464 | parent_table_flags: paging.PageTableFlags, 465 | frame_allocator: *paging.FrameAllocator, 466 | ) MapToError!MapperFlush2MiB { 467 | return mapper.z_impl_mapToWithTableFlags2MiB(mapper, page, frame, flags, parent_table_flags, frame_allocator); 468 | } 469 | 470 | /// Creates a new mapping in the page table. 471 | /// 472 | /// This function might need additional physical frames to create new page tables. These 473 | /// frames are allocated from the `allocator` argument. At most three frames are required. 474 | /// 475 | /// The flags of the parent table(s) can be explicitly specified. Those flags are used for 476 | /// newly created table entries, and for existing entries the flags are added. 477 | /// 478 | /// Depending on the used mapper implementation, the `PRESENT` and `WRITABLE` flags might 479 | /// be set for parent tables, even if they are not specified in `parent_table_flags`. 480 | pub inline fn mapToWithTableFlags1GiB( 481 | mapper: *Mapper, 482 | page: paging.Page1GiB, 483 | frame: paging.PhysFrame1GiB, 484 | flags: paging.PageTableFlags, 485 | parent_table_flags: paging.PageTableFlags, 486 | frame_allocator: *paging.FrameAllocator, 487 | ) MapToError!MapperFlush1GiB { 488 | return mapper.z_impl_mapToWithTableFlags1GiB(mapper, page, frame, flags, parent_table_flags, frame_allocator); 489 | } 490 | 491 | comptime { 492 | std.testing.refAllDecls(@This()); 493 | } 494 | }; 495 | 496 | /// Unmap result. Page size 4 KiB 497 | pub const UnmapResult = struct { 498 | frame: paging.PhysFrame, 499 | flush: MapperFlush, 500 | 501 | comptime { 502 | std.testing.refAllDecls(@This()); 503 | } 504 | }; 505 | 506 | /// Unmap result. Page size 2 MiB 507 | pub const UnmapResult2MiB = struct { 508 | frame: paging.PhysFrame2MiB, 509 | flush: MapperFlush2MiB, 510 | 511 | comptime { 512 | std.testing.refAllDecls(@This()); 513 | } 514 | }; 515 | 516 | /// Unmap result. Page size 1 GiB 517 | pub const UnmapResult1GiB = struct { 518 | frame: paging.PhysFrame1GiB, 519 | flush: MapperFlush1GiB, 520 | 521 | comptime { 522 | std.testing.refAllDecls(@This()); 523 | } 524 | }; 525 | 526 | pub const TranslateResultType = enum { 527 | Frame4KiB, 528 | Frame2MiB, 529 | Frame1GiB, 530 | }; 531 | 532 | pub const TranslateResult = union(TranslateResultType) { 533 | Frame4KiB: TranslateResultContents, 534 | Frame2MiB: TranslateResult2MiBContents, 535 | Frame1GiB: TranslateResult1GiBContents, 536 | }; 537 | 538 | pub const TranslateResultContents = struct { 539 | /// The mapped frame. 540 | frame: paging.PhysFrame, 541 | /// The offset within the mapped frame. 542 | offset: u64, 543 | /// The flags for the frame. 544 | flags: paging.PageTableFlags, 545 | 546 | comptime { 547 | std.testing.refAllDecls(@This()); 548 | } 549 | }; 550 | 551 | pub const TranslateResult2MiBContents = struct { 552 | /// The mapped frame. 553 | frame: paging.PhysFrame2MiB, 554 | /// The offset within the mapped frame. 555 | offset: u64, 556 | /// The flags for the frame. 557 | flags: paging.PageTableFlags, 558 | 559 | comptime { 560 | std.testing.refAllDecls(@This()); 561 | } 562 | }; 563 | 564 | pub const TranslateResult1GiBContents = struct { 565 | /// The mapped frame. 566 | frame: paging.PhysFrame1GiB, 567 | /// The offset within the mapped frame. 568 | offset: u64, 569 | /// The flags for the frame. 570 | flags: paging.PageTableFlags, 571 | 572 | comptime { 573 | std.testing.refAllDecls(@This()); 574 | } 575 | }; 576 | 577 | /// An error indicating that a `translate` call failed. 578 | pub const TranslateError = error{ 579 | /// The given page is not mapped to a physical frame. 580 | NotMapped, 581 | /// The page table entry for the given page points to an invalid physical address. 582 | InvalidFrameAddress, 583 | }; 584 | 585 | /// This type represents a change of a page table requiring a complete TLB flush 586 | /// 587 | /// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs 588 | /// to be flushed from the TLB before it's accessed. This type is returned from a function that 589 | /// made the change to ensure that the TLB flush is not forgotten. 590 | pub const MapperFlushAll = struct { 591 | /// Flush all pages from the TLB to ensure that the newest mapping is used. 592 | pub fn flushAll(self: MapperFlushAll) void { 593 | _ = self; 594 | x86_64.instructions.tlb.flushAll(); 595 | } 596 | }; 597 | 598 | /// This type represents a page whose mapping has changed in the page table. Page size 4 KiB 599 | /// 600 | /// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs 601 | /// to be flushed from the TLB before it's accessed. This type is returned from function that 602 | /// change the mapping of a page to ensure that the TLB flush is not forgotten. 603 | pub const MapperFlush = struct { 604 | page: paging.Page, 605 | 606 | /// Create a new flush promise 607 | pub fn init(page: paging.Page) MapperFlush { 608 | return .{ .page = page }; 609 | } 610 | 611 | /// Flush the page from the TLB to ensure that the newest mapping is used. 612 | pub fn flush(self: MapperFlush) void { 613 | x86_64.instructions.tlb.flush(self.page.start_address); 614 | } 615 | 616 | comptime { 617 | std.testing.refAllDecls(@This()); 618 | } 619 | }; 620 | 621 | /// This type represents a page whose mapping has changed in the page table. Page size 2 MiB 622 | /// 623 | /// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs 624 | /// to be flushed from the TLB before it's accessed. This type is returned from function that 625 | /// change the mapping of a page to ensure that the TLB flush is not forgotten. 626 | pub const MapperFlush2MiB = struct { 627 | page: paging.Page2MiB, 628 | 629 | /// Create a new flush promise 630 | pub fn init(page: paging.Page2MiB) MapperFlush2MiB { 631 | return .{ .page = page }; 632 | } 633 | 634 | /// Flush the page from the TLB to ensure that the newest mapping is used. 635 | pub fn flush(self: MapperFlush2MiB) void { 636 | x86_64.instructions.tlb.flush(self.page.start_address); 637 | } 638 | 639 | comptime { 640 | std.testing.refAllDecls(@This()); 641 | } 642 | }; 643 | 644 | /// This type represents a page whose mapping has changed in the page table. Page size 1 GiB 645 | /// 646 | /// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs 647 | /// to be flushed from the TLB before it's accessed. This type is returned from function that 648 | /// change the mapping of a page to ensure that the TLB flush is not forgotten. 649 | pub const MapperFlush1GiB = struct { 650 | page: paging.Page1GiB, 651 | 652 | /// Create a new flush promise 653 | pub fn init(page: paging.Page1GiB) MapperFlush1GiB { 654 | return .{ .page = page }; 655 | } 656 | 657 | /// Flush the page from the TLB to ensure that the newest mapping is used. 658 | pub fn flush(self: MapperFlush1GiB) void { 659 | x86_64.instructions.tlb.flush(self.page.start_address); 660 | } 661 | 662 | comptime { 663 | std.testing.refAllDecls(@This()); 664 | } 665 | }; 666 | 667 | pub const MapToError = error{ 668 | /// An additional frame was needed for the mapping process, but the frame allocator 669 | /// returned `None`. 670 | FrameAllocationFailed, 671 | /// An upper level page table entry has the `huge_page` flag set, which means that the 672 | /// given page is part of an already mapped huge page. 673 | ParentEntryHugePage, 674 | /// The given page is already mapped to a physical frame. 675 | PageAlreadyMapped, 676 | }; 677 | 678 | /// An error indicating that an `unmap` call failed. 679 | pub const UnmapError = error{ 680 | /// An upper level page table entry has the `huge_page` flag set, which means that the 681 | /// given page is part of a huge page and can't be freed individually. 682 | ParentEntryHugePage, 683 | /// The given page is not mapped to a physical frame. 684 | PageNotMapped, 685 | /// The page table entry for the given page points to an invalid physical address. 686 | InvalidFrameAddress, 687 | }; 688 | 689 | /// An error indicating that an `update_flags` call failed. 690 | pub const FlagUpdateError = error{ 691 | /// The given page is not mapped to a physical frame. 692 | PageNotMapped, 693 | /// An upper level page table entry has the `huge_page` flag set, which means that the 694 | /// given page is part of a huge page and can't be freed individually. 695 | ParentEntryHugePage, 696 | }; 697 | 698 | /// An error indicating that an `translate` call failed. 699 | pub const TranslatePageError = error{ 700 | /// The given page is not mapped to a physical frame. 701 | PageNotMapped, 702 | /// An upper level page table entry has the `huge_page` flag set, which means that the 703 | /// given page is part of a huge page and can't be freed individually. 704 | ParentEntryHugePage, 705 | /// The page table entry for the given page points to an invalid physical address. 706 | InvalidFrameAddress, 707 | }; 708 | 709 | comptime { 710 | std.testing.refAllDecls(@This()); 711 | } 712 | --------------------------------------------------------------------------------