├── .cargo └── config.toml ├── .gitattributes ├── .github └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── README.md ├── boot └── stage1 │ ├── Cargo.toml │ ├── README.md │ ├── build.rs │ ├── link.x │ └── src │ ├── except.rs │ ├── gdb.rs │ ├── glballoc.rs │ ├── main.rs │ ├── panic.rs │ ├── startup.s │ └── util.rs ├── docs ├── ibm │ ├── CBE_Public_Registers.v1_5.02APR2007.pdf │ ├── IBM_CBE_Handbook_v1.1_24APR2007_pub.pdf │ └── IBM_CellBE_HIG_65nm_v1.01_8Jun2007.pdf └── images │ └── serial_terminal.png ├── powerpc64.json └── shared ├── core_reqs ├── Cargo.toml └── src │ └── lib.rs ├── sync ├── Cargo.toml └── src │ ├── lib.rs │ └── mutex.rs ├── xenon-cpu ├── Cargo.toml └── src │ ├── intrin.rs │ ├── lib.rs │ └── time.rs ├── xenon-enet ├── Cargo.lock ├── Cargo.toml └── src │ ├── lib.rs │ └── ring.rs └── xenon-soc ├── Cargo.toml └── src ├── iic.rs ├── lib.rs ├── smc.rs └── uart.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | 2 | [build] 3 | target = "powerpc64.json" 4 | 5 | [unstable] 6 | build-std = ["core", "alloc"] 7 | 8 | [profile.release] 9 | incremental = false 10 | opt-level = "z" 11 | lto = true 12 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.pdf filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: Continuous Integration 4 | 5 | jobs: 6 | check: 7 | name: Check 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - uses: actions-rs/toolchain@v1 12 | with: 13 | components: rust-src # needed to cross-compile std 14 | profile: minimal 15 | toolchain: nightly 16 | override: true 17 | - uses: actions-rs/cargo@v1 18 | with: 19 | command: check 20 | args: --all --target=powerpc64.json -Zbuild-std=core,alloc 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | **/target 3 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "atomic" 7 | version = "0.5.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" 10 | dependencies = [ 11 | "autocfg", 12 | ] 13 | 14 | [[package]] 15 | name = "autocfg" 16 | version = "1.0.1" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" 19 | 20 | [[package]] 21 | name = "bitflags" 22 | version = "1.3.2" 23 | source = "registry+https://github.com/rust-lang/crates.io-index" 24 | checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" 25 | 26 | [[package]] 27 | name = "buddyalloc" 28 | version = "0.1.5" 29 | source = "registry+https://github.com/rust-lang/crates.io-index" 30 | checksum = "a5ded974e2422fae8075cb72650eae5b12f076e480eb44930571f6bddf325b31" 31 | 32 | [[package]] 33 | name = "byteorder" 34 | version = "1.4.3" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" 37 | 38 | [[package]] 39 | name = "cfg-if" 40 | version = "1.0.0" 41 | source = "registry+https://github.com/rust-lang/crates.io-index" 42 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 43 | 44 | [[package]] 45 | name = "core_reqs" 46 | version = "0.1.0" 47 | 48 | [[package]] 49 | name = "log" 50 | version = "0.4.14" 51 | source = "registry+https://github.com/rust-lang/crates.io-index" 52 | checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" 53 | dependencies = [ 54 | "cfg-if", 55 | ] 56 | 57 | [[package]] 58 | name = "managed" 59 | version = "0.7.2" 60 | source = "registry+https://github.com/rust-lang/crates.io-index" 61 | checksum = "c75de51135344a4f8ed3cfe2720dc27736f7711989703a0b43aadf3753c55577" 62 | 63 | [[package]] 64 | name = "smoltcp" 65 | version = "0.7.5" 66 | source = "registry+https://github.com/rust-lang/crates.io-index" 67 | checksum = "3e4a069bef843d170df47e7c0a8bf8d037f217d9f5b325865acc3e466ffe40d3" 68 | dependencies = [ 69 | "bitflags", 70 | "byteorder", 71 | "log", 72 | "managed", 73 | ] 74 | 75 | [[package]] 76 | name = "stage1" 77 | version = "0.1.0" 78 | dependencies = [ 79 | "atomic", 80 | "buddyalloc", 81 | "core_reqs", 82 | "sync", 83 | "xenon-cpu", 84 | "xenon-soc", 85 | ] 86 | 87 | [[package]] 88 | name = "sync" 89 | version = "0.1.0" 90 | 91 | [[package]] 92 | name = "xenon-cpu" 93 | version = "0.1.0" 94 | 95 | [[package]] 96 | name = "xenon-enet" 97 | version = "0.1.0" 98 | dependencies = [ 99 | "smoltcp", 100 | "xenon-cpu", 101 | ] 102 | 103 | [[package]] 104 | name = "xenon-soc" 105 | version = "0.1.0" 106 | dependencies = [ 107 | "sync", 108 | "xenon-cpu", 109 | ] 110 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 2 | 3 | [workspace] 4 | members = [ 5 | "boot/stage1", 6 | "shared/core_reqs", 7 | "shared/xenon-cpu", 8 | "shared/xenon-enet", 9 | "shared/xenon-soc", 10 | "shared/sync", 11 | ] 12 | 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Xell-rs 2 | This is the [xell-reloaded](https://github.com/xenia-project/xell-reloaded) bootloader, rewritten in Rust. 3 | 4 | Currently this project is a huge WIP and can't do much, but eventually this can be used to boot Linux from TFTP or a local storage device. 5 | 6 | ![Serial Terminal](/docs/images/serial_terminal.png) 7 | 8 | ## Crates 9 | * boot/stage1: The very first stage bootloader. 10 | * shared/ 11 | * core_reqs: Bare-minimum functionality required for Rust's libcore. Originally from the [chocolate milk](https://github.com/gamozolabs/chocolate_milk/blob/643f47b901ceda1f688d3c20ff92b0f41af80251/shared/core_reqs/src/lib.rs) project. 12 | * sync: Xenon-specific mutex spinlock implementation 13 | * xenon-cpu: Xenon-specific CPU intrinsics 14 | * xenon-enet: Xenon fast ethernet driver 15 | * xenon-soc: Drivers for Xenon SoC functionality 16 | 17 | # License 18 | Licensed under either of 19 | * Apache License, Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0) 20 | * MIT license (https://opensource.org/licenses/MIT) 21 | at your option. -------------------------------------------------------------------------------- /boot/stage1/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "stage1" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | core_reqs = { path = "../../shared/core_reqs" } 10 | xenon-cpu = { path = "../../shared/xenon-cpu" } 11 | xenon-soc = { path = "../../shared/xenon-soc" } 12 | sync = { path = "../../shared/sync" } 13 | 14 | atomic = "0.5.0" 15 | buddyalloc = "0.1.5" -------------------------------------------------------------------------------- /boot/stage1/README.md: -------------------------------------------------------------------------------- 1 | # Stage 1 bootloader 2 | This is the first bit of code that runs after the glitched CD bootloader [jumps to us](https://github.com/Free60Project/tools/blob/ddcd9c55875257e671813ca857374e03b5247b1f/reset_glitch_hack/cdxell/cdxell.S#L62-L89). 3 | 4 | From there, it jumps to a bit of code implemented in `startup.s`. We set up the bare minimum system state required to run Rust code, and call `__start_rust` in `main.rs`. -------------------------------------------------------------------------------- /boot/stage1/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | println!("cargo:rerun-if-changed=link.x"); 3 | println!("cargo:rustc-link-arg=-Tboot/stage1/link.x"); 4 | } 5 | -------------------------------------------------------------------------------- /boot/stage1/link.x: -------------------------------------------------------------------------------- 1 | # ENTRY(_start) 2 | 3 | SECTIONS 4 | { 5 | . = 0x800000001c000000; 6 | 7 | .text 0x800000001c000000 : { 8 | KEEP(*(.text.startup)); 9 | *(.text .text.*); 10 | } 11 | .data : { *(.data) } 12 | .sdata : { *(.sdata) } 13 | .rodata : { *(.rodata) } 14 | 15 | .dynsym : { *(.dynsym) } 16 | .gnu.hash : { *(.gnu.hash) } 17 | .hash : { *(.hash) } 18 | .dynstr : { *(.dynstr) } 19 | .rela.dyn : { *(.rela.dyn) } 20 | .rela.opd : { *(.rela.opd) } 21 | .eh_frame_hdr : { *(.eh_frame_hdr) } 22 | .eh_frame : { *(.eh_frame) } 23 | 24 | .got : { 25 | __toc_start = .; 26 | 27 | *(.got) 28 | *(.toc) 29 | } 30 | 31 | . = ALIGN(256); 32 | __bss_start = .; 33 | .bss (NOLOAD) : { *(.bss .bss.*) } 34 | .sbss : { *(.sbss .sbss.*) } 35 | __bss_end = .; 36 | } 37 | -------------------------------------------------------------------------------- /boot/stage1/src/except.rs: -------------------------------------------------------------------------------- 1 | //! This module defines exception handlers. 2 | 3 | use atomic::{Atomic, Ordering}; 4 | use core::fmt::{self, Debug, Write}; 5 | 6 | use crate::{smc, uart, util::make_arithaddr}; 7 | 8 | use xenon_cpu::mfspr; 9 | 10 | pub const EXCEPTION_VECTORS: [usize; 17] = [ 11 | 0x00000000_00000100, // Reset 12 | 0x00000000_00000200, // Machine check 13 | 0x00000000_00000300, // Data storage 14 | 0x00000000_00000380, // Data segment 15 | 0x00000000_00000400, // Instruction storage 16 | 0x00000000_00000480, // Instruction segment 17 | 0x00000000_00000500, // External interrupt 18 | 0x00000000_00000600, // Alignment 19 | 0x00000000_00000700, // Program 20 | 0x00000000_00000800, // Floating point 21 | 0x00000000_00000900, // Decrementer 22 | 0x00000000_00000980, 23 | 0x00000000_00000c00, // System call 24 | 0x00000000_00000d00, // Trace 25 | 0x00000000_00000f00, // Performance 26 | 0x00000000_00001600, 27 | 0x00000000_00001800, 28 | ]; 29 | 30 | #[allow(dead_code)] 31 | #[derive(Copy, Clone, Debug)] 32 | #[non_exhaustive] // N.B: NECESSARY because we cast from integers. 33 | #[repr(u32)] 34 | pub enum ExceptionType { 35 | Reset = 0x10, 36 | MachineCheck = 0x20, 37 | Dsi = 0x30, 38 | DataSegment = 0x38, 39 | Isi = 0x40, 40 | InstructionSegment = 0x48, 41 | ExternalInterrupt = 0x50, 42 | Alignment = 0x60, 43 | Program = 0x70, 44 | FloatingPoint = 0x80, 45 | Decrementer = 0x90, 46 | SystemCall = 0xC0, 47 | Trace = 0xD0, 48 | Performance = 0xF0, 49 | } 50 | 51 | #[repr(C, align(512))] 52 | #[derive(Copy, Clone, Default)] 53 | pub struct CpuContext { 54 | pub r: [u64; 32], 55 | pub cr: u64, // 0x100 (256) 56 | pub lr: u64, // 0x108 (264) 57 | pub ctr: u64, // 0x110 (272) 58 | pub pc: u64, // 0x118 (280) 59 | pub msr: u64, // 0x120 (288) 60 | } 61 | 62 | impl Debug for CpuContext { 63 | fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> core::fmt::Result { 64 | core::writeln!(fmt, "r:")?; 65 | for i in 0..32 { 66 | core::writeln!(fmt, " {:>3}: {:016X}", i, self.r[i])?; 67 | } 68 | 69 | core::writeln!(fmt, "cr: {:016X}", self.cr)?; 70 | core::writeln!(fmt, "lr: {:016X}", self.lr)?; 71 | core::writeln!(fmt, "ctr: {:016X}", self.ctr)?; 72 | core::writeln!(fmt, "pc: {:016X}", self.pc)?; 73 | core::writeln!(fmt, "msr: {:016X}", self.msr)?; 74 | 75 | Ok(()) 76 | } 77 | } 78 | 79 | #[allow(dead_code)] 80 | impl CpuContext { 81 | pub const fn new() -> Self { 82 | Self { 83 | r: [0u64; 32], 84 | cr: 0u64, 85 | lr: 0u64, 86 | ctr: 0u64, 87 | pc: 0u64, 88 | msr: 0u64, 89 | } 90 | } 91 | 92 | pub fn with_hvcall(func: extern "C" fn() -> !, r1: u64) -> Self { 93 | Self { 94 | r: [ 95 | 0xBEBEBEBE_BEBEBEBE, // r0 96 | r1, // r1 97 | 0xBEBEBEBE_BEBEBEBE, // r2 98 | 0xBEBEBEBE_BEBEBEBE, // r3 99 | 0xBEBEBEBE_BEBEBEBE, // r4 100 | 0xBEBEBEBE_BEBEBEBE, // r5 101 | 0xBEBEBEBE_BEBEBEBE, // r6 102 | 0xBEBEBEBE_BEBEBEBE, // r7 103 | 0xBEBEBEBE_BEBEBEBE, // r8 104 | 0xBEBEBEBE_BEBEBEBE, // r9 105 | 0xBEBEBEBE_BEBEBEBE, // r10 106 | 0xBEBEBEBE_BEBEBEBE, // r11 107 | func as u64, // r12 108 | 0xBEBEBEBE_BEBEBEBE, // r13 109 | 0xBEBEBEBE_BEBEBEBE, // r14 110 | 0xBEBEBEBE_BEBEBEBE, // r15 111 | 0xBEBEBEBE_BEBEBEBE, // r16 112 | 0xBEBEBEBE_BEBEBEBE, // r17 113 | 0xBEBEBEBE_BEBEBEBE, // r18 114 | 0xBEBEBEBE_BEBEBEBE, // r19 115 | 0xBEBEBEBE_BEBEBEBE, // r20 116 | 0xBEBEBEBE_BEBEBEBE, // r21 117 | 0xBEBEBEBE_BEBEBEBE, // r22 118 | 0xBEBEBEBE_BEBEBEBE, // r23 119 | 0xBEBEBEBE_BEBEBEBE, // r24 120 | 0xBEBEBEBE_BEBEBEBE, // r25 121 | 0xBEBEBEBE_BEBEBEBE, // r26 122 | 0xBEBEBEBE_BEBEBEBE, // r27 123 | 0xBEBEBEBE_BEBEBEBE, // r28 124 | 0xBEBEBEBE_BEBEBEBE, // r29 125 | 0xBEBEBEBE_BEBEBEBE, // r30 126 | 0xBEBEBEBE_BEBEBEBE, // r31 127 | ], 128 | cr: 0xBEBEBEBE_BEBEBEBE, 129 | lr: 0xBEBEBEBE_BEBEBEBE, 130 | ctr: 0xBEBEBEBE_BEBEBEBE, 131 | pc: func as u64, 132 | msr: 0x90000000_00001000, // MSR[SF/HV/ME] 133 | } 134 | } 135 | 136 | pub fn with_svcall(func: extern "C" fn() -> !, r1: u64) -> Self { 137 | Self { 138 | r: [ 139 | 0xBEBEBEBE_BEBEBEBE, // r0 140 | r1, // r1 141 | 0xBEBEBEBE_BEBEBEBE, // r2 142 | 0xBEBEBEBE_BEBEBEBE, // r3 143 | 0xBEBEBEBE_BEBEBEBE, // r4 144 | 0xBEBEBEBE_BEBEBEBE, // r5 145 | 0xBEBEBEBE_BEBEBEBE, // r6 146 | 0xBEBEBEBE_BEBEBEBE, // r7 147 | 0xBEBEBEBE_BEBEBEBE, // r8 148 | 0xBEBEBEBE_BEBEBEBE, // r9 149 | 0xBEBEBEBE_BEBEBEBE, // r10 150 | 0xBEBEBEBE_BEBEBEBE, // r11 151 | func as u64, // r12 152 | 0xBEBEBEBE_BEBEBEBE, // r13 153 | 0xBEBEBEBE_BEBEBEBE, // r14 154 | 0xBEBEBEBE_BEBEBEBE, // r15 155 | 0xBEBEBEBE_BEBEBEBE, // r16 156 | 0xBEBEBEBE_BEBEBEBE, // r17 157 | 0xBEBEBEBE_BEBEBEBE, // r18 158 | 0xBEBEBEBE_BEBEBEBE, // r19 159 | 0xBEBEBEBE_BEBEBEBE, // r20 160 | 0xBEBEBEBE_BEBEBEBE, // r21 161 | 0xBEBEBEBE_BEBEBEBE, // r22 162 | 0xBEBEBEBE_BEBEBEBE, // r23 163 | 0xBEBEBEBE_BEBEBEBE, // r24 164 | 0xBEBEBEBE_BEBEBEBE, // r25 165 | 0xBEBEBEBE_BEBEBEBE, // r26 166 | 0xBEBEBEBE_BEBEBEBE, // r27 167 | 0xBEBEBEBE_BEBEBEBE, // r28 168 | 0xBEBEBEBE_BEBEBEBE, // r29 169 | 0xBEBEBEBE_BEBEBEBE, // r30 170 | 0xBEBEBEBE_BEBEBEBE, // r31 171 | ], 172 | cr: 0xBEBEBEBE_BEBEBEBE, 173 | lr: 0xBEBEBEBE_BEBEBEBE, 174 | ctr: 0xBEBEBEBE_BEBEBEBE, 175 | pc: func as u64, 176 | msr: 0x80000000_00001000, // MSR[SF/ME] 177 | } 178 | } 179 | } 180 | 181 | /// This is a per-processor area where context information is saved when 182 | /// an exception is encountered. 183 | #[no_mangle] 184 | static mut EXCEPTION_SAVE_AREA: [CpuContext; 6] = [CpuContext::new(); 6]; 185 | 186 | /// This area contains context information for per-process exception handlers. 187 | /// This is generally static and unmodified. 188 | #[no_mangle] 189 | static mut EXCEPTION_LOAD_AREA: [CpuContext; 6] = [CpuContext::new(); 6]; 190 | 191 | /// The definition of the application-defined exception handler. 192 | pub type ExceptionHandler = fn(ExceptionType, &mut CpuContext) -> Result<(), ()>; 193 | 194 | /// The application-defined exception handler. 195 | static EXCEPTION_HANDLER: Atomic> = Atomic::new(None); 196 | 197 | #[no_mangle] 198 | extern "C" fn handle_exception() -> ! { 199 | // FIXME: This may allow for unencoded enum discriminants to exist. 200 | let id: ExceptionType = { 201 | unsafe { core::mem::transmute(mfspr!(304) as u32) } // HPSRG0 202 | }; 203 | 204 | // SAFETY: We have exclusive access to the save area corresponding to this processor. 205 | let save_area: &mut CpuContext = unsafe { 206 | let pir = mfspr!(1023); 207 | &mut EXCEPTION_SAVE_AREA[pir as usize] 208 | }; 209 | 210 | match EXCEPTION_HANDLER.load(Ordering::Relaxed) { 211 | Some(ex) => { 212 | // If the handler successfully handles the exception, reload the calling context. 213 | if ex(id, save_area).is_ok() { 214 | unsafe { 215 | load_context(save_area); 216 | } 217 | } 218 | } 219 | 220 | // Fallback and handle the exception here. 221 | None => {} 222 | } 223 | 224 | let pir = unsafe { mfspr!(1023) }; 225 | 226 | let closure = |uart: &mut uart::UART| { 227 | core::writeln!(uart, "UNHANDLED EXCEPTION! Hit exception vector {:?}", id).unwrap(); 228 | core::writeln!(uart, "MSR: {:#?}", xenon_cpu::intrin::mfmsr()).unwrap(); 229 | core::writeln!(uart, "PIR: {:#?}", pir).unwrap(); 230 | core::writeln!(uart, "---- Saved registers:").unwrap(); 231 | core::writeln!(uart, " MSR: {:#?}", save_area.msr).unwrap(); 232 | core::writeln!(uart, " LR: {:#?}", save_area.lr).unwrap(); 233 | core::writeln!(uart, " PC: {:#?}", save_area.pc).unwrap(); 234 | }; 235 | 236 | // Attempt to lock the UART. If that fails (for example, because we took an exception 237 | // while the UART was locked), forcibly take it to print out error text. 238 | let res = { 239 | let mut tries = 0u64; 240 | 241 | loop { 242 | match uart::UART.try_lock(&closure) { 243 | Ok(_) => break Ok(()), 244 | Err(_) => { 245 | if tries > 50 { 246 | break Err(()); 247 | } 248 | 249 | tries += 1; 250 | xenon_cpu::time::delay(core::time::Duration::from_millis(100)); 251 | } 252 | } 253 | } 254 | }; 255 | 256 | if res.is_err() { 257 | let mut uart = unsafe { uart::UART.get_mut_unchecked() }; 258 | closure(&mut uart); 259 | } 260 | 261 | if pir == 0 { 262 | // Not good. Auto-reset the system. 263 | smc::SMC.lock(|smc| { 264 | smc.send_message(&[0x82043000u32, 0x00000000u32, 0x00000000u32, 0x00000000u32]); 265 | }); 266 | } 267 | 268 | loop {} 269 | } 270 | 271 | #[naked] 272 | #[no_mangle] 273 | pub unsafe extern "C" fn load_context(_ctx: &CpuContext) -> ! { 274 | asm!( 275 | "ld %r0, 0x100(%r3)", 276 | "mtcr %r0", 277 | "ld %r0, 0x108(%r3)", 278 | "mtlr %r0", 279 | "ld %r0, 0x110(%r3)", 280 | "mtctr %r0", 281 | "ld %r0, 0x118(%r3)", 282 | "mtsrr0 %r0", 283 | "ld %r0, 0x120(%r3)", 284 | "mtsrr1 %r0", 285 | "ld %r0, 0x00(%r3)", 286 | "ld %r1, 0x08(%r3)", 287 | "ld %r2, 0x10(%r3)", 288 | // N.B: r3 is loaded last. 289 | "ld %r4, 0x20(%r3)", 290 | "ld %r5, 0x28(%r3)", 291 | "ld %r6, 0x30(%r3)", 292 | "ld %r7, 0x38(%r3)", 293 | "ld %r8, 0x40(%r3)", 294 | "ld %r9, 0x48(%r3)", 295 | "ld %r10, 0x50(%r3)", 296 | "ld %r11, 0x58(%r3)", 297 | "ld %r12, 0x60(%r3)", 298 | "ld %r13, 0x68(%r3)", 299 | "ld %r14, 0x70(%r3)", 300 | "ld %r15, 0x78(%r3)", 301 | "ld %r16, 0x80(%r3)", 302 | "ld %r17, 0x88(%r3)", 303 | "ld %r18, 0x90(%r3)", 304 | "ld %r19, 0x98(%r3)", 305 | "ld %r20, 0xA0(%r3)", 306 | "ld %r21, 0xA8(%r3)", 307 | "ld %r22, 0xB0(%r3)", 308 | "ld %r23, 0xB8(%r3)", 309 | "ld %r24, 0xC0(%r3)", 310 | "ld %r25, 0xC8(%r3)", 311 | "ld %r26, 0xD0(%r3)", 312 | "ld %r27, 0xD8(%r3)", 313 | "ld %r28, 0xE0(%r3)", 314 | "ld %r29, 0xE8(%r3)", 315 | "ld %r30, 0xF0(%r3)", 316 | "ld %r31, 0xF8(%r3)", 317 | "ld %r3, 0x18(%r3)", 318 | "rfid", 319 | options(noreturn), 320 | ); 321 | } 322 | 323 | #[naked] 324 | unsafe extern "C" fn except_thunk() -> ! { 325 | asm!( 326 | "mtctr %r4", // Reload CTR with original value 327 | "mfspr %r4, 1023", // r4 = PIR 328 | "sldi %r4, %r4, 32 + 9", 329 | "oris %r4, %r4, EXCEPTION_SAVE_AREA@highest", 330 | "ori %r4, %r4, EXCEPTION_SAVE_AREA@higher", 331 | "rotldi %r4, %r4, 32", 332 | "oris %r4, %r4, EXCEPTION_SAVE_AREA@high", 333 | "ori %r4, %r4, EXCEPTION_SAVE_AREA@l", 334 | // Now save registers. 335 | "std %r0, 0x00(%r4)", 336 | "std %r1, 0x08(%r4)", 337 | "std %r2, 0x10(%r4)", 338 | "mfspr %r0, 304", // Reload R3, which was saved in HPSRG0. 339 | "std %r0, 0x18(%r4)", 340 | "mfspr %r0, 305", // Reload R4, which was saved in HSPRG1. 341 | "std %r0, 0x20(%r4)", 342 | "std %r5, 0x28(%r4)", 343 | "std %r6, 0x30(%r4)", 344 | "std %r7, 0x38(%r4)", 345 | "std %r8, 0x40(%r4)", 346 | "std %r9, 0x48(%r4)", 347 | "std %r10, 0x50(%r4)", 348 | "std %r11, 0x58(%r4)", 349 | "std %r12, 0x60(%r4)", 350 | "std %r13, 0x68(%r4)", 351 | "std %r14, 0x70(%r4)", 352 | "std %r15, 0x78(%r4)", 353 | "std %r16, 0x80(%r4)", 354 | "std %r17, 0x88(%r4)", 355 | "std %r18, 0x90(%r4)", 356 | "std %r19, 0x98(%r4)", 357 | "std %r20, 0xA0(%r4)", 358 | "std %r21, 0xA8(%r4)", 359 | "std %r22, 0xB0(%r4)", 360 | "std %r23, 0xB8(%r4)", 361 | "std %r24, 0xC0(%r4)", 362 | "std %r25, 0xC8(%r4)", 363 | "std %r26, 0xD0(%r4)", 364 | "std %r27, 0xD8(%r4)", 365 | "std %r28, 0xE0(%r4)", 366 | "std %r29, 0xE8(%r4)", 367 | "std %r30, 0xF0(%r4)", 368 | "std %r31, 0xF8(%r4)", 369 | "mfcr %r0", 370 | "std %r0, 0x100(%r4)", 371 | "mflr %r0", 372 | "std %r0, 0x108(%r4)", 373 | "mfctr %r0", 374 | "std %r0, 0x110(%r4)", 375 | "mfsrr0 %r0", 376 | "std %r0, 0x118(%r4)", 377 | "mfsrr1 %r0", 378 | "std %r0, 0x120(%r4)", 379 | "mtspr 304, %r3", // HPSRG0 = exception ID 380 | // Now load the exception load context. 381 | "b except_load_thunk", 382 | options(noreturn) 383 | ); 384 | } 385 | 386 | #[naked] 387 | #[no_mangle] 388 | unsafe extern "C" fn except_load_thunk() -> ! { 389 | asm!( 390 | "mfspr %r3, 1023", // r3 = PIR 391 | "sldi %r3, %r3, 32 + 9", 392 | // N.B: These instructions are patched later. 393 | "trap", 394 | "trap", 395 | "rotldi %r3, %r3, 32", 396 | "trap", 397 | "trap", 398 | "b load_context", 399 | options(noreturn) 400 | ) 401 | } 402 | 403 | /// Create a longjmp for an exception vector. 404 | /// This will preverse r3/r4 in HSPRG0 and HSPRG1, respectively. 405 | /// r3 will be loaded with the constant specified in the `id` parameter. 406 | /// r4 will be loaded with the value of CTR. 407 | const fn make_longjmp_exc(id: u16, target: usize) -> [u32; 11] { 408 | [ 409 | 0x7C704BA6, // mtspr HSPRG0, %r3 410 | 0x7C914BA6, // mtspr HSPRG1, %r4 411 | (0x3C600000 | ((target >> 48) & 0xFFFF)) as u32, // lis %r3, target[64:48] 412 | (0x60630000 | ((target >> 32) & 0xFFFF)) as u32, // ori %r3, %r3, target[48:32] 413 | 0x786307C6, // rldicr %r3, %r3, 32, 31 414 | (0x64630000 | ((target >> 16) & 0xFFFF)) as u32, // oris %r3, %r3, target[32:16] 415 | (0x60630000 | ((target >> 00) & 0xFFFF)) as u32, // ori %r3, %r3, target[16:0] 416 | 0x7C8902A6, // mfctr %r4 417 | 0x7C6903A6, // mtctr %r3 418 | (0x38600000 | (id as u32)), // li %r3, id 419 | 0x4E800420, // bctr 420 | ] 421 | } 422 | 423 | pub unsafe fn cause_exception() -> ! { 424 | // Trap. 425 | asm!("trap", options(noreturn)); 426 | } 427 | 428 | /// This function initializes the exception handler subsystem. 429 | /// 430 | /// # Safety 431 | /// This function should only be called once during startup. 432 | /// This will place jump stubs at the PowerPC exception vectors. 433 | /// 434 | /// Unsafe for obvious reasons. 435 | pub unsafe fn init_except(handler: Option) { 436 | EXCEPTION_HANDLER.store(handler, Ordering::Relaxed); 437 | 438 | // Set up the load area. 439 | EXCEPTION_LOAD_AREA = [ 440 | CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFF_0000), 441 | CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFE_0000), 442 | CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFD_0000), 443 | CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFC_0000), 444 | CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFB_0000), 445 | CpuContext::with_hvcall(handle_exception, 0x8000_0000_1EFA_0000), 446 | ]; 447 | 448 | // N.B: We have to patch the exception thunk to deal with PIE. 449 | { 450 | let save_area = &mut EXCEPTION_SAVE_AREA[0] as *mut _ as usize; 451 | let thunk_area = except_thunk as usize as *mut u32; 452 | 453 | // We have to use addition here because the PIR is pre-loaded into r4 by 454 | // the thunk, and a bitwise OR will not properly add it as an offset. 455 | // We only have to use addition on the lowest chunk, because the highest 456 | // offset is `0xA00` (5 << 9). 457 | let (arith_hi, arith_lo) = make_arithaddr(save_area as u32); 458 | 459 | // "oris %r4, %r4, EXCEPTION_SAVE_AREA@highest" 460 | thunk_area 461 | .offset(3) 462 | .write_volatile(0x64840000 | ((save_area >> 48) & 0xFFFF) as u32); 463 | // "ori %r4, %r4, EXCEPTION_SAVE_AREA@higher" 464 | thunk_area 465 | .offset(4) 466 | .write_volatile(0x60840000 | ((save_area >> 32) & 0xFFFF) as u32); 467 | // "oris %r4, %r4, EXCEPTION_SAVE_AREA@ha" 468 | thunk_area 469 | .offset(6) 470 | .write_volatile(0x64840000 | arith_hi as u32); 471 | // "addi %r4, %r4, EXCEPTION_SAVE_AREA@l" 472 | thunk_area 473 | .offset(7) 474 | .write_volatile(0x38840000 | arith_lo as u32); 475 | } 476 | 477 | // Ditto for the load thunk. 478 | { 479 | let load_area = &mut EXCEPTION_LOAD_AREA[0] as *mut _ as usize; 480 | let thunk_area = except_load_thunk as usize as *mut u32; 481 | 482 | let (arith_hi, arith_lo) = make_arithaddr(load_area as u32); 483 | 484 | // "oris %r3, %r3, EXCEPTION_LOAD_AREA@highest" 485 | thunk_area 486 | .offset(2) 487 | .write_volatile(0x64630000 | ((load_area >> 48) & 0xFFFF) as u32); 488 | // "ori %r3, %r3, EXCEPTION_LOAD_AREA@higher" 489 | thunk_area 490 | .offset(3) 491 | .write_volatile(0x60630000 | ((load_area >> 32) & 0xFFFF) as u32); 492 | // "oris %r3, %r3, EXCEPTION_LOAD_AREA@ha" 493 | thunk_area 494 | .offset(5) 495 | .write_volatile(0x64630000 | arith_hi as u32); 496 | // "addi %r3, %r3, EXCEPTION_LOAD_AREA@l" 497 | thunk_area 498 | .offset(6) 499 | .write_volatile(0x38630000 | arith_lo as u32); 500 | } 501 | 502 | for vec in EXCEPTION_VECTORS.iter() { 503 | let buf = make_longjmp_exc((*vec >> 4) as u16, except_thunk as usize); 504 | core::ptr::copy_nonoverlapping(buf.as_ptr(), *vec as *mut u32, buf.len()); 505 | } 506 | } 507 | 508 | #[cfg(test)] 509 | mod test { 510 | use crate::except::make_arithaddr; 511 | 512 | #[test] 513 | fn test_arithaddr() { 514 | assert_eq!(make_arithaddr(0x0B0B8018), 0x0B0C8018); 515 | } 516 | } 517 | -------------------------------------------------------------------------------- /boot/stage1/src/gdb.rs: -------------------------------------------------------------------------------- 1 | use core::num::NonZeroUsize; 2 | 3 | use gdbstub::{ 4 | target::{ 5 | ext::base::multithread::{MultiThreadOps, ThreadStopReason}, 6 | Target, 7 | }, 8 | GdbStubBuilder, 9 | }; 10 | use gdbstub_arch::ppc; 11 | 12 | struct BLTarget {} 13 | 14 | impl Target for BLTarget { 15 | type Arch = ppc::PowerPcAltivec32<()>; 16 | type Error = &'static str; 17 | 18 | fn base_ops(&mut self) -> gdbstub::target::ext::base::BaseOps { 19 | gdbstub::target::ext::base::BaseOps::MultiThread(self) 20 | } 21 | } 22 | 23 | impl MultiThreadOps for BLTarget { 24 | fn resume( 25 | &mut self, 26 | _default_resume_action: gdbstub::target::ext::base::ResumeAction, 27 | _gdb_interrupt: gdbstub::target::ext::base::GdbInterrupt<'_>, 28 | ) -> Result< 29 | gdbstub::target::ext::base::multithread::ThreadStopReason< 30 | ::Usize, 31 | >, 32 | Self::Error, 33 | > { 34 | // Does nothing for now... 35 | Ok(ThreadStopReason::DoneStep) 36 | } 37 | 38 | fn clear_resume_actions(&mut self) -> Result<(), Self::Error> { 39 | Ok(()) 40 | } 41 | 42 | fn set_resume_action( 43 | &mut self, 44 | _tid: gdbstub::common::Tid, 45 | _action: gdbstub::target::ext::base::ResumeAction, 46 | ) -> Result<(), Self::Error> { 47 | Ok(()) 48 | } 49 | 50 | fn read_registers( 51 | &mut self, 52 | _regs: &mut ::Registers, 53 | _tid: gdbstub::common::Tid, 54 | ) -> gdbstub::target::TargetResult<(), Self> { 55 | Ok(()) 56 | } 57 | 58 | fn write_registers( 59 | &mut self, 60 | _regs: &::Registers, 61 | _tid: gdbstub::common::Tid, 62 | ) -> gdbstub::target::TargetResult<(), Self> { 63 | Ok(()) 64 | } 65 | 66 | fn read_addrs( 67 | &mut self, 68 | start_addr: ::Usize, 69 | data: &mut [u8], 70 | tid: gdbstub::common::Tid, 71 | ) -> gdbstub::target::TargetResult<(), Self> { 72 | let src = unsafe { core::slice::from_raw_parts(start_addr as *const _, data.len()) }; 73 | 74 | Ok(()) 75 | } 76 | 77 | fn write_addrs( 78 | &mut self, 79 | start_addr: ::Usize, 80 | data: &[u8], 81 | tid: gdbstub::common::Tid, 82 | ) -> gdbstub::target::TargetResult<(), Self> { 83 | Ok(()) 84 | } 85 | 86 | fn list_active_threads( 87 | &mut self, 88 | thread_is_active: &mut dyn FnMut(gdbstub::common::Tid), 89 | ) -> Result<(), Self::Error> { 90 | thread_is_active(NonZeroUsize::new(1).unwrap()); 91 | 92 | Ok(()) 93 | } 94 | } 95 | 96 | pub fn entry(uart: &mut crate::uart::UART) { 97 | let mut target = BLTarget {}; 98 | 99 | let mut buf = [0u8; 4096]; 100 | let mut gdb = GdbStubBuilder::new(uart) 101 | .with_packet_buffer(&mut buf) 102 | .build() 103 | .unwrap(); 104 | 105 | match gdb.run(&mut target) { 106 | Ok(_) => {} 107 | Err(_) => {} 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /boot/stage1/src/glballoc.rs: -------------------------------------------------------------------------------- 1 | use buddyalloc::Heap; 2 | use core::alloc::{GlobalAlloc, Layout}; 3 | use sync::mutex::SpinMutex; 4 | 5 | const HEAP_START: *mut u8 = 0x8000_0000_0800_0000 as *mut u8; 6 | const HEAP_SIZE: usize = 0x0100_0000; 7 | 8 | /// Declare a simple heap locked behind a Mutex. 9 | struct LockedHeap(SpinMutex>); 10 | 11 | /// Implement Rust's [GlobalAlloc] trait for the locked heap. 12 | unsafe impl GlobalAlloc for LockedHeap { 13 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 14 | self.0 15 | .lock(|heap| heap.allocate(layout).unwrap_or(core::ptr::null_mut())) 16 | } 17 | 18 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 19 | self.0.lock(|heap| { 20 | heap.deallocate(ptr, layout); 21 | }); 22 | } 23 | } 24 | 25 | #[alloc_error_handler] 26 | fn alloc_error(_layout: Layout) -> ! { 27 | panic!("Allocation failed."); 28 | } 29 | 30 | #[global_allocator] 31 | static mut ALLOCATOR: LockedHeap<20> = 32 | unsafe { LockedHeap(SpinMutex::new(Heap::new_unchecked(HEAP_START, HEAP_SIZE))) }; 33 | -------------------------------------------------------------------------------- /boot/stage1/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature( 2 | alloc_error_handler, 3 | const_ptr_offset_from, 4 | global_asm, 5 | lang_items, 6 | naked_functions, 7 | asm 8 | )] 9 | #![no_std] 10 | #![no_main] 11 | 12 | use atomic::Atomic; 13 | use core::{ 14 | fmt::Write, 15 | sync::atomic::{AtomicU32, Ordering}, 16 | }; 17 | use xenon_cpu::{ 18 | intrin::{mfmsr, mtmsrl}, 19 | mfspr, 20 | }; 21 | use xenon_soc::{smc, uart}; 22 | use crate::util::bit; 23 | 24 | extern crate alloc; 25 | extern crate core_reqs; 26 | 27 | mod glballoc; 28 | mod except; 29 | mod panic; 30 | mod util; 31 | 32 | use except::ExceptionType; 33 | 34 | global_asm!(include_str!("startup.s")); 35 | 36 | static PROCESSORS: AtomicU32 = AtomicU32::new(0); 37 | 38 | macro_rules! println { 39 | ($($tts:tt)*) => { 40 | uart::UART.lock(|mut uart| { 41 | core::writeln!(&mut uart, $($tts)*).unwrap(); 42 | }); 43 | }; 44 | } 45 | 46 | macro_rules! print { 47 | ($($tts:tt)*) => { 48 | uart::UART.lock(|mut uart| { 49 | core::write!(&mut uart, $($tts)*).unwrap(); 50 | }); 51 | }; 52 | } 53 | 54 | fn read_line(uart: &mut uart::UART, line: &mut [u8]) -> usize { 55 | let mut n = 0usize; 56 | 57 | while n < line.len() { 58 | match uart.read_byte() { 59 | b'\r' => { 60 | uart.write(b"\r\n"); 61 | break; 62 | } 63 | 64 | // Backspace. 65 | 0x08 => { 66 | if n != 0 { 67 | // Clear the character from the screen. 68 | uart.write(b"\x08 \x08"); 69 | 70 | line[n] = b'\0'; 71 | n -= 1; 72 | } 73 | } 74 | 75 | byte => { 76 | uart.write_byte(byte); 77 | 78 | line[n] = byte; 79 | n += 1; 80 | } 81 | } 82 | } 83 | 84 | n 85 | } 86 | 87 | fn serial_terminal() { 88 | let mut buf = [0u8; 1024]; 89 | loop { 90 | print!("\n> "); 91 | 92 | let n = uart::UART.lock(|mut uart| read_line(&mut uart, &mut buf)); 93 | 94 | let line = match core::str::from_utf8(&buf[..n]) { 95 | Ok(l) => l, 96 | Err(_) => continue, 97 | }; 98 | 99 | let mut args = line.split(' '); 100 | match args.next() { 101 | Some("r64") => { 102 | let addr = { 103 | let addr_str = match args.next() { 104 | Some(a) => a, 105 | None => { 106 | println!("r64
"); 107 | continue; 108 | } 109 | }; 110 | 111 | match u64::from_str_radix(addr_str, 16) { 112 | Ok(n) => n, 113 | Err(_) => { 114 | println!("invalid address"); 115 | continue; 116 | } 117 | } 118 | }; 119 | 120 | let val = unsafe { core::ptr::read_volatile(addr as *const u64) }; 121 | println!("{:016X}", val); 122 | } 123 | 124 | Some("w64") => { 125 | let addr = { 126 | let addr_str = match args.next() { 127 | Some(a) => a, 128 | None => { 129 | println!("w64
"); 130 | continue; 131 | } 132 | }; 133 | 134 | match u64::from_str_radix(addr_str, 16) { 135 | Ok(n) => n, 136 | Err(_) => { 137 | println!("invalid address"); 138 | continue; 139 | } 140 | } 141 | }; 142 | 143 | let val = { 144 | let val_str = match args.next() { 145 | Some(a) => a, 146 | None => { 147 | println!("w64
"); 148 | continue; 149 | } 150 | }; 151 | 152 | match u64::from_str_radix(val_str, 16) { 153 | Ok(n) => n, 154 | Err(_) => { 155 | println!("invalid value"); 156 | continue; 157 | } 158 | } 159 | }; 160 | 161 | unsafe { 162 | core::ptr::write_volatile(addr as *mut u64, val); 163 | } 164 | } 165 | 166 | Some("reboot") => { 167 | println!("Rebooting system..."); 168 | smc::SMC.lock(|smc| { 169 | smc.restart_system(); 170 | }); 171 | } 172 | 173 | Some("except") => { 174 | println!("If you say so..."); 175 | unsafe { 176 | except::cause_exception(); 177 | } 178 | } 179 | 180 | Some("ping") => { 181 | println!("pong"); 182 | } 183 | 184 | Some("🍆") => { 185 | println!(";)"); 186 | } 187 | 188 | Some("exit") => { 189 | println!("Goodbye!"); 190 | return; 191 | } 192 | 193 | Some("") => {} 194 | 195 | Some(cmd) => { 196 | println!("Unknown command \"{}\"!", cmd); 197 | } 198 | 199 | None => {} 200 | } 201 | } 202 | } 203 | 204 | fn startup_exception_handler(ex: ExceptionType, ctx: &mut except::CpuContext) -> Result<(), ()> { 205 | let pir = xenon_cpu::intrin::pir(); 206 | uart::UART.lock(|uart| { 207 | let sp = unsafe { 208 | let sp: u64; 209 | asm!( 210 | "mr {}, %r1", 211 | out(reg) sp 212 | ); 213 | 214 | sp 215 | }; 216 | 217 | let toc = unsafe { 218 | let toc: u64; 219 | asm!( 220 | "mr {}, %r2", 221 | out(reg) toc 222 | ); 223 | 224 | toc 225 | }; 226 | 227 | core::writeln!(uart, "Exception on processor {:#?}!", pir).unwrap(); 228 | core::writeln!(uart, "EXC: {:?}", ex).unwrap(); 229 | core::writeln!(uart, "MSR: {:016X}", xenon_cpu::intrin::mfmsr()).unwrap(); 230 | core::writeln!(uart, "LPCR: {:016X}", unsafe { mfspr!(318) }).unwrap(); 231 | core::writeln!(uart, "LPIDR: {:016X}", unsafe { mfspr!(319) }).unwrap(); 232 | core::writeln!(uart, "HDEC: {:016X}", unsafe { mfspr!(310) }).unwrap(); 233 | core::writeln!(uart, "DEC: {:016X}", unsafe { mfspr!(22) }).unwrap(); 234 | core::writeln!(uart, "TOC: {:016X}", toc).unwrap(); 235 | core::writeln!(uart, "SP: {:016X}", sp).unwrap(); 236 | core::writeln!(uart, "CTX:\n{:>3?}", ctx).unwrap(); 237 | }); 238 | 239 | // Branch to thread entry. 240 | let context = except::CpuContext::with_hvcall(cpu_startup, 0x8000_0000_1E00_0000 - (pir << 16)); 241 | unsafe { 242 | except::load_context(&context); 243 | } 244 | } 245 | 246 | fn normal_exception_handler(ex: ExceptionType, _ctx: &mut except::CpuContext) -> Result<(), ()> { 247 | match ex { 248 | ExceptionType::ExternalInterrupt => Ok(()), 249 | 250 | _ => Err(()), 251 | } 252 | } 253 | 254 | fn exception_handler(ex: ExceptionType, ctx: &mut except::CpuContext) -> Result<(), ()> { 255 | match EXCEPTION_HANDLER_MODE.load(Ordering::Relaxed) { 256 | ExceptionMode::Startup => startup_exception_handler(ex, ctx), 257 | ExceptionMode::Normal => normal_exception_handler(ex, ctx), 258 | } 259 | } 260 | 261 | #[derive(Clone, Copy)] 262 | enum ExceptionMode { 263 | Startup, 264 | Normal, 265 | } 266 | 267 | static EXCEPTION_HANDLER_MODE: Atomic = Atomic::new(ExceptionMode::Startup); 268 | 269 | extern "C" fn cpu_startup() -> ! { 270 | let pir = xenon_cpu::intrin::pir(); 271 | PROCESSORS.fetch_or(1 << pir, Ordering::Relaxed); 272 | 273 | // Loop until all processors check in. 274 | while PROCESSORS.load(Ordering::Relaxed) != 0x3F {} 275 | 276 | // Enable external interrupts. 277 | unsafe { 278 | mtmsrl(bit(48)); 279 | } 280 | 281 | cpu_idle(); 282 | } 283 | 284 | fn cpu_idle() -> ! { 285 | let pir = xenon_cpu::intrin::pir(); 286 | 287 | loop {} 288 | } 289 | 290 | #[no_mangle] 291 | #[link_section = ".text.startup"] 292 | pub extern "C" fn __start_rust( 293 | pir: u64, 294 | src: u32, 295 | _msr: u64, 296 | hrmor: u64, 297 | pvr: u64, 298 | lpcr: u64, 299 | ) -> ! { 300 | unsafe { 301 | // Clear out the relocation routine written by startup.s 302 | core_reqs::memset(0x8000_0000_0000_0000 as *mut u8, 0x00, 0x100); 303 | 304 | // Disable all checkstops. Enable machine check exceptions. 305 | // Default: 0x07FFA7FE00000000 306 | core::ptr::write_volatile(0x8000_0200_0006_1060 as *mut u64, 0x0000_07FF_0000_0000); 307 | } 308 | 309 | uart::UART.lock(|uart| { 310 | if pir == 0 { 311 | uart.reset(uart::Speed::S115200); 312 | } 313 | 314 | let sp = unsafe { 315 | let sp: u64; 316 | asm!( 317 | "mr {}, %r1", 318 | out(reg) sp 319 | ); 320 | 321 | sp 322 | }; 323 | 324 | let toc = unsafe { 325 | let toc: u64; 326 | asm!( 327 | "mr {}, %r2", 328 | out(reg) toc 329 | ); 330 | 331 | toc 332 | }; 333 | 334 | core::writeln!(uart, "Hello from processor {:#?}!", pir).unwrap(); 335 | core::writeln!(uart, "MSR: {:016X}", mfmsr()).unwrap(); 336 | core::writeln!(uart, "HRMOR: {:016X}", hrmor).unwrap(); 337 | core::writeln!(uart, "RMOR: {:016X}", unsafe { mfspr!(312) }).unwrap(); 338 | core::writeln!(uart, "LPCR: {:016X}", lpcr).unwrap(); 339 | core::writeln!(uart, "LPIDR: {:016X}", unsafe { mfspr!(319) }).unwrap(); 340 | core::writeln!(uart, "PVR: {:016X}", pvr).unwrap(); 341 | core::writeln!(uart, "HDEC: {:016X}", unsafe { mfspr!(310) }).unwrap(); 342 | core::writeln!(uart, "DEC: {:016X}", unsafe { mfspr!(22) }).unwrap(); 343 | core::writeln!(uart, "TOC: {:016X}", toc).unwrap(); 344 | core::writeln!(uart, "SP: {:016X}", sp).unwrap(); 345 | core::writeln!(uart, "SRC: {:016X}", src).unwrap(); 346 | }); 347 | 348 | unsafe { 349 | except::init_except(Some(exception_handler)); 350 | } 351 | 352 | match src { 353 | // Startup from ROM 354 | /* 355 | 0 => { 356 | write!("Startup from ROM.\n"); 357 | 358 | // Setup a jump vector to branch to our startup code. 359 | let jmpbuf = make_longjmp(start_from_rom as usize); 360 | 361 | unsafe { 362 | core::ptr::copy_nonoverlapping( 363 | jmpbuf.as_ptr(), 364 | 0x80000000_00000100usize as *mut u32, 365 | jmpbuf.len(), 366 | ); 367 | } 368 | 369 | // Startup the secondary thread. 370 | unsafe { 371 | // CTRL.TE{0,1} = 0b11 372 | mtspr!(152, 0x00C0_0000); 373 | } 374 | } 375 | */ 376 | // Startup from OS (1) 377 | // HACK: Also going to apply this path for startup from ROM for development. 378 | 0 | 1 => { 379 | println!("Startup from OS."); 380 | 381 | // We'll need to catch all other cores that may still be running the OS. 382 | // Set a branch on the external interrupt vector, and trigger an IPI. 383 | println!("Triggering IPI on all other cores."); 384 | 385 | // Loop... 386 | while PROCESSORS.load(Ordering::Relaxed) != 0x3E { 387 | print!( 388 | "Waiting for other processors... {:02X} \r", 389 | PROCESSORS.load(Ordering::Relaxed) 390 | ); 391 | 392 | smc::SMC.lock(|smc| { 393 | smc.set_led(true, PROCESSORS.load(Ordering::Relaxed) as u8); 394 | }); 395 | 396 | unsafe { 397 | // Set the IRQL on all other processors to 0 (to unmask all interrupts). 398 | // The hypervisor isn't going to like this, but we set a detour on the interrupt vector earlier. 399 | for i in 1usize..6usize { 400 | let ptr = (0x8000_0200_0005_0000 + (i * 0x1000)) as *mut u64; 401 | ptr.offset(1).write_volatile(0); 402 | } 403 | 404 | // Trigger an IPI on all other processors, with vector 30. 405 | core::ptr::write_volatile(0x8000_0200_0005_0010 as *mut u64, 0x003F_0078); 406 | } 407 | 408 | xenon_cpu::time::delay(core::time::Duration::from_secs(1)); 409 | } 410 | 411 | println!("Processors captured."); 412 | } 413 | 414 | // Shouldn't hit this case. 415 | _ => loop {}, 416 | } 417 | 418 | smc::SMC.lock(|smc| { 419 | // Flash all green LEDs. 420 | smc.set_led(true, 0xF0); 421 | }); 422 | 423 | EXCEPTION_HANDLER_MODE.store(ExceptionMode::Normal, Ordering::Relaxed); 424 | println!("System captured."); 425 | 426 | PROCESSORS.fetch_or(1 << pir, Ordering::Relaxed); 427 | 428 | // Branch to thread entry. 429 | let context = except::CpuContext::with_hvcall(cpu_startup, 0x8000_0000_1E00_0000 - (pir << 16)); 430 | unsafe { 431 | except::load_context(&context); 432 | } 433 | } 434 | -------------------------------------------------------------------------------- /boot/stage1/src/panic.rs: -------------------------------------------------------------------------------- 1 | use core::{ffi::c_void, panic::PanicInfo}; 2 | 3 | use xenon_soc::uart; 4 | 5 | #[lang = "eh_personality"] 6 | extern "C" fn rust_eh_personality() {} 7 | 8 | /// This is the global Rust panic handler. 9 | /// Currently it does nothing, though eventually it should print a backtrace. 10 | #[panic_handler] 11 | #[no_mangle] 12 | pub fn panic(_info: &PanicInfo) -> ! { 13 | let uart = unsafe { uart::UART.get_mut_unchecked() }; 14 | 15 | uart.write(b"RUST PANIC!\r\n"); 16 | 17 | loop {} 18 | } 19 | 20 | #[allow(non_snake_case)] 21 | #[no_mangle] 22 | pub extern "C" fn _Unwind_Resume(_exception: *mut c_void) -> ! { 23 | loop {} 24 | } 25 | -------------------------------------------------------------------------------- /boot/stage1/src/startup.s: -------------------------------------------------------------------------------- 1 | #define esr 62 2 | #define ivpr 63 3 | #define pid 48 4 | #define ctrlrd 136 5 | #define ctrlwr 152 6 | #define pvr 287 7 | #define sprg0 272 8 | #define hsprg0 304 9 | #define hsprg1 305 10 | #define hdsisr 306 11 | #define hdar 307 12 | #define dbcr0 308 13 | #define dbcr1 309 14 | #define hdec 310 15 | #define hior 311 16 | #define rmor 312 17 | #define hrmor 313 18 | #define hsrr0 314 19 | #define hsrr1 315 20 | #define dac1 316 21 | #define dac2 317 22 | #define lpcr 318 23 | #define lpidr 319 24 | #define tsr 336 25 | #define tcr 340 26 | #define tsrl 896 27 | #define tsrr 897 28 | #define tscr 921 29 | #define ttr 922 30 | #define PpeTlbIndexHint 946 31 | #define PpeTlbIndex 947 32 | #define PpeTlbVpn 948 33 | #define PpeTlbRpn 949 34 | #define PpeTlbRmt 951 35 | #define dsr0 952 36 | #define drmr0 953 37 | #define dcidr0 954 38 | #define drsr1 955 39 | #define drmr1 956 40 | #define dcidr1 957 41 | #define issr0 976 42 | #define irmr0 977 43 | #define icidr0 978 44 | #define irsr1 979 45 | #define irmr1 980 46 | #define icidr1 981 47 | #define hid0 1008 48 | #define hid1 1009 49 | #define hid4 1012 50 | #define iabr 1010 51 | #define dabr 1013 52 | #define dabrx 1015 53 | #define buscsr 1016 54 | #define hid6 1017 55 | #define l2sr 1018 56 | #define BpVr 1022 57 | #define pir 1023 58 | 59 | .section .text.startup 60 | 61 | .globl _start 62 | _start: 63 | b start_from_rom // The CD loader will jump to this address. 64 | b start_from_libxenon 65 | b . // for future use 66 | b . 67 | b . 68 | b . 69 | b . 70 | b . 71 | 72 | .section .text 73 | .extern start_rust 74 | 75 | // Startup XeLL from already-running OS (dashboard or libxenon) 76 | start_from_libxenon: 77 | bl init_regs 78 | li %r4, 1 79 | b start_common 80 | 81 | // Startup XeLL from ROM. 82 | .globl start_from_rom 83 | start_from_rom: 84 | bl init_regs 85 | li %r4, 0 86 | 87 | // Intentional fallthrough. 88 | 89 | // R1 = stack 90 | // R2 = TOC 91 | // R3 = clobber 92 | // R4(store R30) = startup source 93 | // R10 = clobber 94 | // R11 = clobber 95 | // R30 = clobber 96 | start_common: 97 | mr %r30, %r4 // Relocate startup source. 98 | mfspr %r29, 318 // LPCR 99 | 100 | // disable MSR[EE] 101 | li %r3, 2 102 | mtmsrd %r3, 1 103 | 104 | li %r3, 2 105 | isync 106 | mtspr 318, %r3 // LPCR[RMI] = 1 (Real-Mode cache inhibited) 107 | isync 108 | li %r3, 0x3FF 109 | rldicr %r3, %r3, 32,31 110 | tlbiel %r3 // TLB invalidate (local) 0x000003FF_00000000 111 | sync 112 | isync 113 | 114 | mfspr %r10, 1009 // HID1 115 | li %r11, 3 116 | rldimi %r10, %r11, 58,4 // enable icache 117 | rldimi %r10, %r11, 38,25 // instr. prefetch 118 | sync 119 | mtspr 1009, %r10 // HID1 120 | sync 121 | isync 122 | 123 | mfspr %r10, 318 // LPCR 124 | li %r11, 1 125 | rldimi %r10, %r11, 1,62 126 | isync 127 | mtspr 318, %r10 // LPCR 128 | isync 129 | 130 | bl disable_hrmor 131 | bl relocate 132 | 133 | bl load_toc 134 | 135 | mfspr %r3, 1023 // PIR 136 | bl load_stack 137 | 138 | mfspr %r3, 1023 // PIR 139 | cmplwi %r3, 0 140 | bne 1f 141 | 142 | // Initialize BSS on processor 0 only. 143 | bl init_bss 144 | 145 | 1: 146 | mfspr %r3, 1023 // PIR 147 | mr %r4, %r30 // Startup source. 148 | mfmsr %r5 149 | mfspr %r6, 313 // HRMOR 150 | mfpvr %r7 151 | mr %r8, %r29 // LPCR 152 | 153 | bl __start_rust 154 | ori %r0, %r0, 0 155 | 156 | b . 157 | 158 | // Initialize hardware registers. 159 | // R3 = clobber 160 | init_regs: 161 | or %r2, %r2, %r2 // normal priority 162 | 163 | // Set up the HID (Hardware Implementation Dependent) registers. 164 | // Refer to Cell Broadband Engine Registers, v1.5 165 | 166 | // HID0: Implementation differs per CPU, but some bits are reused. 167 | // On the Cell Broadband Engine, this just inhibits things we probably don't want. 168 | li %r3, 0 169 | mtspr 1008, %r3 // HID0 170 | sync 171 | isync 172 | 173 | // As per the Cell Broadband Engine Hardware Initialization Guide. 174 | // Enable the L1 data cache. 175 | // 0x00003F0000000000 176 | li %r3, 0x3f00 177 | rldicr %r3, %r3, 32,31 178 | mtspr 1012, %r3 // HID4 179 | sync 180 | isync 181 | 182 | // As per Cell Broadband Engine Hardware Initialization Guide. 183 | // Enable the L1 instruction cache, and make 0x100 the reset vector for thread 0. 184 | // DIS_SYSRST_REG = 1 (Disable config ring system reset vector) 185 | // 0x9C30104000000000 186 | lis %r3, 0x9c30 187 | ori %r3,%r3, 0x1040 188 | rldicr %r3, %r3, 32,31 189 | mtspr 1009, %r3 // HID1 190 | sync 191 | isync 192 | 193 | // Initialize RMSC to set the real mode address boundary to 2TB. 194 | // RMSC = 0b1110b 195 | // LB = 0b1000 (64KB / 16MB large page table size) 196 | // TB = 0b1 (Time base enabled) 197 | // 0x0001803800000000 198 | lis %r3, 1 199 | ori %r3,%r3, 0x8038 200 | rldicr %r3, %r3, 32,31 201 | mtspr 1017, %r3 // HID6 202 | sync 203 | isync 204 | 205 | // Thread Switch Control Register (tscr) 206 | // WEXT = 1 207 | // PBUMP = 1 (boost thread priority level to medium when interrupt pending) 208 | // FPCF = 1 209 | // PSCTP = 1 (privileged can change priority) 210 | // 0x001D0000 211 | lis %r3, 0x1d 212 | mtspr 921, %r3 // TSCR 213 | sync 214 | isync 215 | 216 | // Thread Switch Timeout Register 217 | // TTM = 0x1000 (thread interrupted after executing 4096 instructions) 218 | li %r3, 0x1000 219 | mtspr 922, %r3 // TTR 220 | sync 221 | isync 222 | 223 | blr 224 | 225 | // Initialize BSS 226 | // R10 = clobber 227 | // R11 = clobber 228 | // CTR = clobber 229 | init_bss: 230 | ld %r10, __bss_start@toc(%r2) 231 | ld %r11, __bss_end@toc(%r2) 232 | sub %r11, %r11, %r10 // r11 = (end - start) 233 | srdi %r11, %r11, 2 // r11 /= 4 234 | subi %r10, %r10, 4 235 | cmplwi %r11, 0 236 | beq 1f 237 | 238 | mtctr %r11 239 | li %r11, 0 240 | 241 | .bss_loop: 242 | stwu %r11, 4(%r10) 243 | bdnz .bss_loop 244 | 245 | 1: 246 | blr 247 | 248 | // Sets the high bit in PC, disabling HRMOR. 249 | // R3 = clobber 250 | // R10 = clobber 251 | disable_hrmor: 252 | mflr %r10 253 | 254 | lis %r3, 0x8000 255 | sldi %r3, %r3, 32 256 | or %r3, %r3, %r10 257 | 258 | mtlr %r3 259 | blr 260 | 261 | // Sets up the stack. 262 | // R1(out) = stack 263 | // R3(in/out) = pir / clobber 264 | load_stack: 265 | // set stack 266 | // R1 = 0x80000000_1E000000 267 | lis %r1, 0x8000 268 | rldicr %r1, %r1, 32,31 269 | oris %r1, %r1, 0x1e00 270 | 271 | slwi %r3, %r3, 16 // 64k stack per thread 272 | sub %r1, %r1, %r3 273 | subi %r1, %r1, 0x80 274 | blr 275 | 276 | // R3 = lr 277 | load_lr: 278 | mflr %r3 279 | blr 280 | 281 | // Loads the table of contents pointer into R2. 282 | // R0 = clobber 283 | // R2 = TOC 284 | // R3 = clobber 285 | load_toc: 286 | mflr %r0 287 | bcl 20, 31, $+4 288 | 0: 289 | mflr %r3 290 | ld %r2, (p_toc - 0b)(%r3) 291 | add %r2, %r2, %r3 292 | mtlr %r0 293 | blr 294 | 295 | .balign 8 296 | p_toc: .8byte __toc_start + 0x8000 - 0b 297 | 298 | // Relocates the bootloader to the compiled-in address. 299 | // R2 = TOC 300 | // R3 = clobber 301 | // R4 = clobber 302 | // R5 = clobber 303 | // R6 = clobber 304 | // R7 = clobber 305 | // R8 = clobber (LR save) 306 | // R9 = clobber (relocate_memmove) 307 | relocate: 308 | mflr %r8 309 | 310 | // Load the TOC. 311 | bl load_toc 312 | 313 | bl load_lr 314 | 0: 315 | 316 | // Relocate the relocation routine to 0x8000_0000_0000_0000. 317 | addi %r4, %r3, __relocate_memmove_start - 0b 318 | lis %r3, 0x8000 319 | sldi %r3, %r3, 32 320 | mr %r9, %r3 321 | li %r5, __relocate_memmove_end - __relocate_memmove_start 322 | bl relocate_memmove 323 | 324 | // Great. Now relocate the bootloader. 325 | lis %r3, (__toc_start + 0x8000)@highest 326 | ori %r3, %r3, (__toc_start + 0x8000)@higher 327 | sldi %r3, %r3, 32 328 | oris %r3, %r3, (__toc_start + 0x8000)@high 329 | ori %r3, %r3, (__toc_start + 0x8000)@l 330 | 331 | // R4 = move delta 332 | sub %r4, %r3, %r2 333 | 334 | // Relocate the return address. 335 | add %r8, %r8, %r4 336 | 337 | // R3 = DST 338 | lis %r3, _start@highest 339 | ori %r3, %r3, _start@higher 340 | sldi %r3, %r3, 32 341 | oris %r3, %r3, _start@high 342 | ori %r3, %r3, _start@l 343 | 344 | // R4 = SRC 345 | sub %r4, %r3, %r4 346 | 347 | // R5 = LEN 348 | lis %r5, 0x1 349 | 350 | // Restore the return address, and branch to the relocation memmove. 351 | mtlr %r8 352 | mtctr %r9 353 | bctr 354 | 355 | __relocate_memmove_start: 356 | 357 | // R3 = dst 358 | // R4 = src 359 | // R5 = len 360 | // R6 = clobber 361 | // R7 = clobber 362 | // This routine is purposefully kept compatible with the SysV calling ABI. 363 | relocate_memmove: 364 | cmpld %r3, %r4 365 | bge forward 366 | 367 | backward: 368 | add %r4, %r4, %r5 369 | addi %r6, %r5, 1 370 | add %r5, %r3, %r5 371 | mtctr %r6 372 | 373 | backward_loop: 374 | bdzlr 375 | lbz %r6, -0x1(%r4) 376 | subi %r7, %r5, 1 377 | subi %r4, %r4, 1 378 | stb %r6, -0x1(%r5) 379 | mr %r5, %r7 380 | b backward_loop 381 | 382 | forward: 383 | subi %r4, %r4, 1 384 | addi %r6, %r5, 1 385 | subi %r5, %r3, 1 386 | mtctr %r6 387 | bdzlr 388 | 389 | forward_loop: 390 | lbz %r6, 0x1(%r4) 391 | addi %r7, %r5, 1 392 | addi %r4, %r4, 1 393 | stb %r6, 0x1(%r5) 394 | mr %r5, %r7 395 | bdnz forward_loop 396 | 397 | blr 398 | 399 | __relocate_memmove_end: 400 | -------------------------------------------------------------------------------- /boot/stage1/src/util.rs: -------------------------------------------------------------------------------- 1 | //! This file defines utility functionality. 2 | 3 | #[allow(dead_code)] 4 | pub const fn bit(b: u64) -> u64 { 5 | 0x8000_0000_0000_0000 >> b 6 | } 7 | 8 | #[allow(dead_code)] 9 | pub const fn make_longjmp(target: usize, p1: u64) -> [u32; 17] { 10 | [ 11 | (0x3C600000 | ((target >> 48) & 0xFFFF)) as u32, // lis %r3, target[64:48] 12 | (0x60630000 | ((target >> 32) & 0xFFFF)) as u32, // ori %r3, %r3, target[48:32] 13 | 0x786307C6, // rldicr %r3, %r3, 32, 31 14 | (0x64630000 | ((target >> 16) & 0xFFFF)) as u32, // oris %r3, %r3, target[32:16] 15 | (0x60630000 | ((target >> 00) & 0xFFFF)) as u32, // ori %r3, %r3, target[16:0] 16 | 0x7C7A03A6, // mtsrr0 %r3 17 | // Clear MSR[EE/IR/DR] 18 | 0x3c800000, // lis %r4, 0x0000 19 | 0x60848030, // ori %r4, %r4, 0x8030 20 | 0x7C6000A6, // mfmsr %r3 21 | 0x7C632078, // andc %r3, %r3, %r4 22 | 0x7C7B03A6, // mtsrr1 %r3 23 | // Load the parameter. 24 | (0x3C600000 | ((p1 >> 48) & 0xFFFF)) as u32, // lis %r3, p1[64:48] 25 | (0x60630000 | ((p1 >> 32) & 0xFFFF)) as u32, // ori %r3, %r3, p1[48:32] 26 | 0x786307C6, // rldicr %r3, %r3, 32, 31 27 | (0x64630000 | ((p1 >> 16) & 0xFFFF)) as u32, // oris %r3, %r3, p1[32:16] 28 | (0x60630000 | ((p1 >> 00) & 0xFFFF)) as u32, // ori %r3, %r3, p1[16:0] 29 | // Branch to target. 30 | 0x4C000024, // rfid 31 | ] 32 | } 33 | 34 | /// Calculate |a - b|. 35 | pub const fn abs_diff(a: usize, b: usize) -> usize { 36 | if a > b { 37 | a - b 38 | } else { 39 | b - a 40 | } 41 | } 42 | 43 | /// Make an appropriate branch opcode located at `address` that 44 | /// jumps to `target`. 45 | /// 46 | /// This routine will panic if the delta is too large to be represented 47 | /// with a single branch instruction. 48 | pub const fn make_reljump(address: usize, target: usize) -> u32 { 49 | let diff = abs_diff(target, address); 50 | let offset = target.wrapping_sub(address); 51 | 52 | // If the offset can fit within a single branch instruction, use it. 53 | if diff < 0x7F_FFFF { 54 | (0x4800_0000 | (offset & 0x00FF_FFFC)) as u32 55 | } else { 56 | panic!("Offset too large for relative jump!"); 57 | } 58 | } 59 | 60 | /// Create an address suitable for loading using signed arithmetic, 61 | /// i.e: 62 | /// 63 | /// ```asm 64 | /// lis %rX, @ha 65 | /// addi %rX, @l 66 | /// ``` 67 | pub const fn make_arithaddr(addr: u32) -> (u16, u16) { 68 | let lo = (addr & 0xFFFF) as u16; 69 | let hi = { ((addr >> 16) as u16) + if (lo & 0x8000) != 0 { 1 } else { 0 } }; 70 | 71 | (hi, lo) 72 | } 73 | -------------------------------------------------------------------------------- /docs/ibm/CBE_Public_Registers.v1_5.02APR2007.pdf: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:cc9d37b5d49f1bc6b349d8c9670dfeca62373341b3693012563f595ce5139e6b 3 | size 2449025 4 | -------------------------------------------------------------------------------- /docs/ibm/IBM_CBE_Handbook_v1.1_24APR2007_pub.pdf: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:9f7d20f5b14c7f3cd17035454e96b3200033dfaa23a063980d3955e2b606ce17 3 | size 11309066 4 | -------------------------------------------------------------------------------- /docs/ibm/IBM_CellBE_HIG_65nm_v1.01_8Jun2007.pdf: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:a751ab5269b782978125b70758d3de0941b8122b48494c99ed35ca4a96aca496 3 | size 2494018 4 | -------------------------------------------------------------------------------- /docs/images/serial_terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xenia-project/xell-rs/187a3982739a8325cad11b0749cb055d9110d52a/docs/images/serial_terminal.png -------------------------------------------------------------------------------- /powerpc64.json: -------------------------------------------------------------------------------- 1 | { 2 | "arch": "powerpc64", 3 | "cpu": "ppc64", 4 | "relocation-model": "static", 5 | "features": "-altivec,-hard-float", 6 | "crt-static-respected": true, 7 | "data-layout": "E-m:e-i64:64-n32:64-v256:256:256-v512:512:512", 8 | "dynamic-linking": true, 9 | "env": "gnu", 10 | "executables": true, 11 | "has-elf-tls": true, 12 | "has-rpath": true, 13 | "linker": "rust-lld", 14 | "linker-flavor": "ld.lld", 15 | "linker-is-gnu": true, 16 | "llvm-abiname": "elfv2", 17 | "llvm-target": "powerpc64-unknown-linux-gnu", 18 | "max-atomic-width": 64, 19 | "os": "linux", 20 | "position-independent-executables": false, 21 | "pre-link-args": { 22 | "gcc": [ 23 | "-m64" 24 | ] 25 | }, 26 | "relro-level": "partial", 27 | "target-endian": "big", 28 | "target-family": [ 29 | "unix" 30 | ], 31 | "target-mcount": "_mcount", 32 | "target-pointer-width": "64" 33 | } 34 | -------------------------------------------------------------------------------- /shared/core_reqs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "core_reqs" 3 | version = "0.1.0" 4 | authors = ["Brandon Falk "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] -------------------------------------------------------------------------------- /shared/core_reqs/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Requirements for Rust libcore. These are just basic libc `mem*()` routines 2 | //! as well as some intrinsics to get access to 64-bit integers in 32-bit land 3 | //! 4 | //! This code is from [chocolate milk](https://github.com/gamozolabs/chocolate_milk/blob/643f47b901ceda1f688d3c20ff92b0f41af80251/shared/core_reqs/src/lib.rs). 5 | 6 | #![feature(global_asm, llvm_asm)] 7 | #![no_std] 8 | 9 | /// libc `memcpy` implementation in Rust 10 | /// 11 | /// This implementation of `memcpy` is overlap safe, making it technically 12 | /// `memmove`. 13 | /// 14 | /// # Parameters 15 | /// 16 | /// * `dest` - Pointer to memory to copy to 17 | /// * `src` - Pointer to memory to copy from 18 | /// * `n` - Number of bytes to copy 19 | /// 20 | /// # Safety 21 | /// This function copies raw memory! You must make sure the two areas point to valid memory. 22 | /// In addition, you MUST make sure `src` and `dst` do not overlap! 23 | #[no_mangle] 24 | pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 { 25 | memmove(dest, src, n) 26 | } 27 | 28 | /// libc `memmove` implementation in Rust 29 | /// 30 | /// # Parameters 31 | /// 32 | /// * `dest` - Pointer to memory to copy to 33 | /// * `src` - Pointer to memory to copy from 34 | /// * `n` - Number of bytes to copy 35 | /// 36 | /// # Safety 37 | /// This function copies raw memory! You must make sure the two areas point to valid memory. 38 | #[no_mangle] 39 | pub unsafe extern "C" fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 { 40 | if src < dest as *const u8 { 41 | // copy backwards 42 | let mut ii = n; 43 | while ii != 0 { 44 | ii -= 1; 45 | *dest.add(ii) = *src.add(ii); 46 | } 47 | } else { 48 | // copy forwards 49 | let mut ii = 0; 50 | while ii < n { 51 | *dest.add(ii) = *src.add(ii); 52 | ii += 1; 53 | } 54 | } 55 | 56 | dest 57 | } 58 | 59 | /// libc `memset` implementation in Rust 60 | /// 61 | /// # Parameters 62 | /// 63 | /// * `s` - Pointer to memory to set 64 | /// * `c` - Character to set `n` bytes in `s` to 65 | /// * `n` - Number of bytes to set 66 | /// 67 | /// # Safety 68 | /// This function modifies raw memory! You must make sure the `s` points to valid memory. 69 | #[no_mangle] 70 | #[cfg(target_arch = "powerpc64")] 71 | pub unsafe extern "C" fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 { 72 | if n == 0 { 73 | return s; 74 | } 75 | 76 | let mut ii = n; 77 | while ii != 0 { 78 | ii -= 1; 79 | *s.add(ii) = c as u8; 80 | } 81 | 82 | s 83 | } 84 | 85 | /// libc `memset` implementation in Rust 86 | /// 87 | /// # Parameters 88 | /// 89 | /// * `s` - Pointer to memory to set 90 | /// * `c` - Character to set `n` bytes in `s` to 91 | /// * `n` - Number of bytes to set 92 | /// 93 | #[no_mangle] 94 | #[cfg(target_arch = "x86")] 95 | pub unsafe extern "C" fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 { 96 | if n == 0 { 97 | return s; 98 | } 99 | 100 | llvm_asm!(r#" 101 | rep stosb 102 | "# :: "{edi}"(s), "{ecx}"(n), "{eax}"(c) : "memory", "edi", "ecx", "eax" : 103 | "volatile", "intel"); 104 | 105 | s 106 | } 107 | 108 | /// libc `memset` implementation in Rust 109 | /// 110 | /// # Parameters 111 | /// 112 | /// * `s` - Pointer to memory to set 113 | /// * `c` - Character to set `n` bytes in `s` to 114 | /// * `n` - Number of bytes to set 115 | /// 116 | #[no_mangle] 117 | #[cfg(target_arch = "x86_64")] 118 | pub unsafe extern "C" fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 { 119 | if n == 0 { 120 | return s; 121 | } 122 | 123 | llvm_asm!(r#" 124 | rep stosb 125 | "# :: "{rdi}"(s), "{rcx}"(n), "{eax}"(c) : "memory", "rdi", "rcx", "eax" : 126 | "volatile", "intel"); 127 | 128 | s 129 | } 130 | 131 | /// libc `memcmp` implementation in Rust 132 | /// 133 | /// # Parameters 134 | /// 135 | /// * `s1` - Pointer to memory to compare with s2 136 | /// * `s2` - Pointer to memory to compare with s1 137 | /// * `n` - Number of bytes to set 138 | /// 139 | /// # Safety 140 | /// This function is generally safe to use as long as the raw memory 141 | /// is accessible. 142 | #[no_mangle] 143 | pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { 144 | let mut ii = 0; 145 | while ii < n { 146 | let a = *s1.add(ii); 147 | let b = *s2.add(ii); 148 | if a != b { 149 | return a as i32 - b as i32; 150 | } 151 | ii += 1; 152 | } 153 | 154 | 0 155 | } 156 | 157 | /// libc `bcmp` implementation in Rust 158 | /// 159 | /// # Parameters 160 | /// 161 | /// * `s1` - Pointer to memory to compare with s2 162 | /// * `s2` - Pointer to memory to compare with s1 163 | /// * `n` - Number of bytes to compare 164 | /// 165 | /// # Safety 166 | /// This function is generally safe to use as long as the raw memory 167 | /// is accessible. 168 | #[no_mangle] 169 | pub unsafe extern "C" fn bcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { 170 | let mut ii = 0; 171 | while ii < n { 172 | let a = *s1.add(ii); 173 | let b = *s2.add(ii); 174 | if a != b { 175 | return 1; 176 | } 177 | ii += 1; 178 | } 179 | 180 | 0 181 | } 182 | 183 | // Making a fake __CxxFrameHandler3 in Rust causes a panic, this is hacky 184 | // workaround where we declare it as a function that will just crash if it. 185 | // We should never hit this so it doesn't matter. 186 | #[cfg(target_arch = "x86_64")] 187 | global_asm!( 188 | r#" 189 | .global __CxxFrameHandler3 190 | __CxxFrameHandler3: 191 | ud2 192 | "# 193 | ); 194 | 195 | /// Whether or not floats are used. This is used by the MSVC calling convention 196 | /// and it just has to exist. 197 | #[export_name = "_fltused"] 198 | pub static FLTUSED: usize = 0; 199 | -------------------------------------------------------------------------------- /shared/sync/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sync" 3 | version = "0.1.0" 4 | edition = "2018" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | -------------------------------------------------------------------------------- /shared/sync/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | pub mod mutex; 4 | -------------------------------------------------------------------------------- /shared/sync/src/mutex.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | cell::UnsafeCell, 3 | sync::atomic::{AtomicU32, Ordering}, 4 | }; 5 | 6 | /// This struct implements a naive spinlock that guards data contained within. 7 | #[repr(align(16))] 8 | pub struct SpinMutex { 9 | lock_count: AtomicU32, 10 | inner: UnsafeCell, 11 | } 12 | 13 | // Implement Send + Sync for a SpinMutex containing an object that implements Send. 14 | // The object does not have to implement Sync since it will only be accessed from a single thread. 15 | unsafe impl Send for SpinMutex {} 16 | unsafe impl Sync for SpinMutex {} 17 | 18 | #[allow(dead_code)] 19 | impl SpinMutex { 20 | pub const fn new(inner: T) -> Self { 21 | Self { 22 | lock_count: AtomicU32::new(0), 23 | inner: UnsafeCell::new(inner), 24 | } 25 | } 26 | 27 | /// Retrieves the inner value without attempting to lock the spinlock, 28 | /// or seeing if the spinlock is already locked. Unsafe for obvious reasons. 29 | #[allow(clippy::mut_from_ref)] 30 | pub unsafe fn get_mut_unchecked(&self) -> &mut T { 31 | &mut *self.inner.get() 32 | } 33 | 34 | /// This function will attempt to lock the mutex and call the passed-in closure. 35 | pub fn try_lock(&self, f: impl FnOnce(&mut T) -> R) -> Result { 36 | // Attempt to acquire the lock. 37 | match self 38 | .lock_count 39 | .compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed) 40 | { 41 | Ok(_) => {} 42 | Err(_) => return Err(()), 43 | } 44 | 45 | // We now have exclusive access to the data within. 46 | let r = f(unsafe { &mut *self.inner.get() }); 47 | 48 | // Release the lock. 49 | self.lock_count.fetch_sub(1, Ordering::Release); 50 | 51 | Ok(r) 52 | } 53 | 54 | /// This function will call the passed-in closure when the mutex is locked. 55 | pub fn lock(&self, f: impl FnOnce(&mut T) -> R) -> R { 56 | // Acquire the lock. 57 | loop { 58 | match self 59 | .lock_count 60 | .compare_exchange(0, 1, Ordering::Acquire, Ordering::Relaxed) 61 | { 62 | Ok(_) => break, 63 | Err(_) => continue, 64 | } 65 | } 66 | 67 | // We have exclusive access to the data within. 68 | let r = f(unsafe { &mut *self.inner.get() }); 69 | 70 | // Release the lock. 71 | // We have to do this without lwarx/stwcx due to a processor race condition. 72 | // This is probably safe(?) 73 | self.lock_count.store(0, Ordering::Release); 74 | 75 | r 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /shared/xenon-cpu/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xenon-cpu" 3 | version = "0.1.0" 4 | edition = "2018" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | -------------------------------------------------------------------------------- /shared/xenon-cpu/src/intrin.rs: -------------------------------------------------------------------------------- 1 | #[inline] 2 | pub fn mftb() -> u128 { 3 | let mut tbu: u64; 4 | let mut tbl: u64; 5 | let mut tbu2: u64; 6 | 7 | loop { 8 | unsafe { 9 | asm!( 10 | "mftbu {0}", 11 | "mftb {1}", 12 | "mftbu {2}", 13 | 14 | out(reg) tbu, 15 | out(reg) tbl, 16 | out(reg) tbu2, 17 | ); 18 | } 19 | 20 | // Finished loading if the upper timebase did not change. 21 | if tbu == tbu2 { 22 | break; 23 | } 24 | } 25 | 26 | (tbu as u128) << 64 | tbl as u128 27 | } 28 | 29 | #[inline] 30 | pub fn pir() -> u64 { 31 | let pir; 32 | unsafe { 33 | asm!("mfspr {}, 1023", out(reg) pir); 34 | } 35 | 36 | pir 37 | } 38 | 39 | #[inline] 40 | pub fn mfmsr() -> u64 { 41 | let msr; 42 | unsafe { 43 | asm!("mfmsr {}", out(reg) msr); 44 | } 45 | 46 | msr 47 | } 48 | 49 | #[inline] 50 | pub unsafe fn mtmsr(msr: u64) { 51 | asm!("mtmsrd {}, 0", in(reg) msr); 52 | } 53 | 54 | #[inline] 55 | pub unsafe fn mtmsrl(msr: u64) { 56 | asm!("mtmsrd {}, 1", in(reg) msr); 57 | } 58 | 59 | #[macro_export] 60 | macro_rules! mtspr { 61 | ($spr:literal, $val:expr) => { 62 | asm!( 63 | "mtspr {spr}, {0}", 64 | in(reg_nonzero) $val, 65 | spr = const $spr, 66 | ) 67 | }; 68 | } 69 | 70 | #[macro_export] 71 | macro_rules! mfspr { 72 | ($spr:literal) => { 73 | { 74 | let mut val = 0u64; 75 | asm!( 76 | "mfspr {0}, {spr}", 77 | out(reg_nonzero) val, 78 | spr = const $spr, 79 | ); 80 | 81 | val 82 | } 83 | }; 84 | } 85 | -------------------------------------------------------------------------------- /shared/xenon-cpu/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(asm)] 2 | #![no_std] 3 | 4 | pub mod intrin; 5 | pub mod time; 6 | -------------------------------------------------------------------------------- /shared/xenon-cpu/src/time.rs: -------------------------------------------------------------------------------- 1 | const TIMEBASE_FREQ: u64 = 3192000000 / 64; 2 | 3 | use crate::intrin::mftb; 4 | use core::time::Duration; 5 | 6 | fn tdelay(time: u128) { 7 | let tgt = time.saturating_add(mftb() as u128); 8 | while mftb() < tgt {} 9 | } 10 | 11 | pub fn delay(length: Duration) { 12 | tdelay((length.as_nanos() * TIMEBASE_FREQ as u128) / 1_000_000_000); 13 | } 14 | -------------------------------------------------------------------------------- /shared/xenon-enet/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "bitflags" 7 | version = "1.2.1" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" 10 | 11 | [[package]] 12 | name = "byteorder" 13 | version = "1.4.3" 14 | source = "registry+https://github.com/rust-lang/crates.io-index" 15 | checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" 16 | 17 | [[package]] 18 | name = "cfg-if" 19 | version = "1.0.0" 20 | source = "registry+https://github.com/rust-lang/crates.io-index" 21 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 22 | 23 | [[package]] 24 | name = "libc" 25 | version = "0.2.94" 26 | source = "registry+https://github.com/rust-lang/crates.io-index" 27 | checksum = "18794a8ad5b29321f790b55d93dfba91e125cb1a9edbd4f8e3150acc771c1a5e" 28 | 29 | [[package]] 30 | name = "log" 31 | version = "0.4.14" 32 | source = "registry+https://github.com/rust-lang/crates.io-index" 33 | checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" 34 | dependencies = [ 35 | "cfg-if", 36 | ] 37 | 38 | [[package]] 39 | name = "managed" 40 | version = "0.7.2" 41 | source = "registry+https://github.com/rust-lang/crates.io-index" 42 | checksum = "c75de51135344a4f8ed3cfe2720dc27736f7711989703a0b43aadf3753c55577" 43 | 44 | [[package]] 45 | name = "smoltcp" 46 | version = "0.7.1" 47 | source = "registry+https://github.com/rust-lang/crates.io-index" 48 | checksum = "97173c1ef35b0a09304cb3882eba594761243005847cbbf6124f966e8da6519a" 49 | dependencies = [ 50 | "bitflags", 51 | "byteorder", 52 | "libc", 53 | "log", 54 | "managed", 55 | ] 56 | 57 | [[package]] 58 | name = "xenon-enet" 59 | version = "0.1.0" 60 | dependencies = [ 61 | "smoltcp", 62 | ] 63 | -------------------------------------------------------------------------------- /shared/xenon-enet/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xenon-enet" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | smoltcp = { version = "0.7.5", default-features = false, features = [ 10 | "log", "proto-ipv4", "proto-ipv6" 11 | ] } 12 | 13 | xenon-cpu = { path = "../xenon-cpu" } 14 | -------------------------------------------------------------------------------- /shared/xenon-enet/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | #![feature(iter_zip)] 3 | 4 | mod ring; 5 | 6 | extern crate alloc; 7 | 8 | use ring::{Ring, RxRing, TxRing}; 9 | 10 | use alloc::boxed::Box; 11 | use core::{ptr::NonNull, time::Duration}; 12 | use smoltcp::phy::{self, Device}; 13 | 14 | #[allow(dead_code)] 15 | #[repr(u32)] 16 | enum Register { 17 | TxConfig = 0x00, 18 | TxDescriptorBase = 0x04, 19 | TxDescriptorStatus = 0x0C, 20 | RxConfig = 0x10, 21 | RxDescriptorBase = 0x14, 22 | // RxDescriptorStatus(?) = 0x18, 23 | InterruptStatus = 0x20, 24 | InterruptMask = 0x24, 25 | Config0 = 0x28, 26 | Power = 0x30, 27 | PhyConfig = 0x40, 28 | PhyControl = 0x44, 29 | Config1 = 0x50, 30 | RetryCount = 0x54, 31 | MulticastFilterControl = 0x60, 32 | Address0 = 0x62, 33 | MulticastHigh = 0x68, 34 | MaxPacketSize = 0x78, 35 | Address1 = 0x7A, 36 | } 37 | 38 | // Flag bit guesses: 39 | // 0x8000_0000: Hardware ownership bit 40 | // 0x4000_0000: ?? 41 | // 0x0020_0000: (TX) last buffer? e.g. packet not split 42 | // 0x0002_0000: (TX) interrupt related? 43 | // 0x0001_0000: (TX) interrupt related? 44 | 45 | const HWDESC_FLAG_HW_OWNED: u32 = 0x80000000; 46 | const HWDESC_CAP_LAST_ENTRY: u32 = 0x80000000; // N.B: This is set in the `capacity` field. 47 | 48 | #[repr(C, align(2048))] 49 | #[derive(Clone)] 50 | pub struct EthernetBuffer([u8; 2048]); 51 | 52 | impl Default for EthernetBuffer { 53 | fn default() -> Self { 54 | Self([0u8; 2048]) 55 | } 56 | } 57 | 58 | #[repr(C, packed)] 59 | struct MacAddress([u8; 6]); 60 | 61 | impl From for MacAddress { 62 | fn from(n: u64) -> Self { 63 | let bytes = n.to_be_bytes(); 64 | Self(bytes[2..].try_into().unwrap()) 65 | } 66 | } 67 | 68 | /// Transfer descriptor, as defined by hardware. 69 | /// 70 | /// Descriptors can follow the following state machine: 71 | /// * RX 72 | /// * Free: 73 | /// * `len != 0`: Network packet contained within buffer. 74 | /// * `len == 0`: No receive buffer set. (implies `capacity` == 0) 75 | /// * Busy: Owned by hardware; pending packet RX 76 | /// * TX 77 | /// * Free: Descriptor is free for queueing a network TX. 78 | /// * Transmitted packet contained within; can free buffer. 79 | /// * No transmit buffer set. 80 | /// * Busy: Owned by hardware; pending packet TX 81 | #[repr(C, align(16))] 82 | #[derive(Clone, Copy)] 83 | struct HwDescriptor { 84 | /// Length of the packet contained within `addr`, if any. 85 | len: u32, 86 | /// Flags interpreted by the hardware, such as an ownership bit or interrupt 87 | /// routing bits. 88 | flags: u32, 89 | /// Physical address of an in-memory buffer used to contain the packet. 90 | addr: u32, 91 | /// Capacity of the in-memory buffer, with the high bit aliased as an "end-of-ring" bit. 92 | capacity: u32, 93 | } 94 | 95 | impl HwDescriptor { 96 | fn new() -> Self { 97 | Self { 98 | len: 0, 99 | flags: 0, 100 | addr: 0, 101 | capacity: 0, 102 | } 103 | } 104 | 105 | /// Query to see if this descriptor is currently busy (owned by hardware) at this point in time. 106 | fn is_busy(&self) -> bool { 107 | (unsafe { core::ptr::read_volatile(&self.flags) } & HWDESC_FLAG_HW_OWNED) != 0 108 | } 109 | } 110 | 111 | impl Default for HwDescriptor { 112 | fn default() -> Self { 113 | Self { 114 | len: 0x0000_0000, 115 | flags: 0x0000_0000, 116 | addr: 0x0BAD_F00D, 117 | capacity: 0x0000_0000, 118 | } 119 | } 120 | } 121 | 122 | #[repr(align(16))] 123 | pub struct EthernetDevice { 124 | mmio: core::ptr::NonNull, 125 | 126 | rx_ring: Ring, 127 | tx_ring: Ring, 128 | } 129 | 130 | impl EthernetDevice { 131 | /// Constructs a new [EthernetDevice]. 132 | /// 133 | /// SAFETY: The caller _MUST_ ensure that there is only one instance 134 | /// of this object at a time. Multiple instances will cause undefined behavior. 135 | pub unsafe fn new() -> Self { 136 | let mut obj = Self { 137 | mmio: NonNull::new_unchecked(0x8000_0200_EA00_1400 as *mut u8), 138 | 139 | rx_ring: Ring::new(), 140 | tx_ring: Ring::new(), 141 | }; 142 | 143 | obj.reset(); 144 | obj 145 | } 146 | 147 | fn write(&mut self, reg: Register, val: T) { 148 | // SAFETY: The access is bounded by Register, and cannot arbitrarily overflow. 149 | unsafe { 150 | core::ptr::write_volatile(self.mmio.as_ptr().offset(reg as isize) as *mut T, val); 151 | } 152 | } 153 | 154 | fn read(&mut self, reg: Register) -> T { 155 | // SAFETY: The access is bounded by Register, and cannot arbitrarily overflow. 156 | unsafe { core::ptr::read_volatile(self.mmio.as_ptr().offset(reg as isize) as *mut T) } 157 | } 158 | 159 | fn reset(&mut self) { 160 | // N.B: The magic numbers are from: 161 | // https://github.com/xenia-project/linux/blob/8b3cd8b6e99453ad854a5441092ed87b70385f37/drivers/net/ethernet/xenon/xenon_net.c#L370-L438 162 | 163 | // Zero out the interrupt mask. 164 | self.write(Register::InterruptMask, 0x00000000); 165 | 166 | // Reset the chip. 167 | self.write(Register::Config0, 0x0855_8001); 168 | xenon_cpu::time::delay(Duration::from_micros(100)); 169 | self.write(Register::Config0, 0x0855_0001); 170 | 171 | self.write(Register::PhyControl, 0x00000004); 172 | xenon_cpu::time::delay(Duration::from_micros(100)); 173 | self.write(Register::PhyControl, 0x00000000); 174 | 175 | self.write(Register::MaxPacketSize, 1522u32); 176 | 177 | self.write(Register::Config1, 0x2360); 178 | 179 | self.write(Register::MulticastFilterControl, 0x0E38); 180 | 181 | self.write(Register::Address0, MacAddress::from(0x69_42_00_00_00_00)); 182 | self.write(Register::Address1, MacAddress::from(0x69_42_00_00_00_01)); 183 | 184 | self.write(Register::TxConfig, 0x0000_1C00); 185 | self.write(Register::RxConfig, 0x0010_1C00); 186 | 187 | self.write(Register::PhyConfig, 0x0400_1901); 188 | 189 | // Write out the TX descriptor ring base 0. 190 | self.write(Register::TxConfig, 0x0000_1C00); 191 | self.write(Register::TxDescriptorBase, self.tx_ring.phys_base() as u32); 192 | 193 | // Write out the TX descriptor ring base 1. 194 | // FIXME: The originating implementation was hacked together. Why do they use the same ring twice? 195 | self.write(Register::TxConfig, 0x0001_1C00); 196 | self.write(Register::TxDescriptorBase, self.tx_ring.phys_base() as u32); 197 | self.write(Register::TxConfig, 0x0000_1C00); 198 | 199 | // Write out the RX descriptor ring base. 200 | self.write(Register::RxDescriptorBase, self.rx_ring.phys_base() as u32); 201 | 202 | // ??? 203 | self.write(Register::PhyConfig, 0x0400_1001); 204 | self.write(Register::Config1, 0u32); 205 | self.write(Register::Config0, 0x0855_0001); 206 | 207 | // Enable RX/TX 208 | self.write(Register::TxConfig, 0x0000_1C01); 209 | self.write(Register::RxConfig, 0x0010_1C11); 210 | 211 | // Disable all interrupts. 212 | self.write(Register::InterruptMask, 0x0000_0000); 213 | } 214 | } 215 | 216 | /// Represents a token that, when consumed, yields a received packet. 217 | pub struct EthernetRxToken<'ring, const N: usize>(ring::CompleteDescriptor<'ring, ring::RxRing, N>); 218 | 219 | /// Represents a token that, when consumed, takes ownership of a buffer containing a packet to be sent. 220 | pub struct EthernetTxToken<'ring, const M: usize>(ring::FreeDescriptor<'ring, ring::TxRing, M>); 221 | 222 | // Implement the smoltcp interface to the Xenon ethernet device. 223 | impl<'dev, const N: usize, const M: usize> Device<'dev> for EthernetDevice { 224 | type RxToken = EthernetRxToken<'dev, N>; 225 | type TxToken = EthernetTxToken<'dev, M>; 226 | 227 | fn receive(&'dev mut self) -> Option<(Self::RxToken, Self::TxToken)> { 228 | // Free up completed TX descriptors. 229 | while let Some(desc) = self.tx_ring.get_next_complete() { 230 | // Free the desc, drop the inner buffer. Maybe attempt to reuse it in the future. 231 | desc.free(); 232 | } 233 | 234 | // Requeue free RX descriptors. 235 | while let Some(desc) = self.rx_ring.get_next_free() { 236 | let buf = Box::new(EthernetBuffer::default()); 237 | 238 | // Submit the descriptor back to hardware. 239 | desc.submit(buf); 240 | } 241 | 242 | Some(( 243 | EthernetRxToken(self.rx_ring.get_next_complete()?), 244 | EthernetTxToken(self.tx_ring.get_next_free()?), 245 | )) 246 | } 247 | 248 | fn transmit(&'dev mut self) -> Option { 249 | // Free up completed TX descriptors. 250 | while let Some(desc) = self.tx_ring.get_next_complete() { 251 | // Free the desc, drop the inner buffer. Maybe attempt to reuse it in the future. 252 | desc.free(); 253 | } 254 | 255 | // Now try to get the next free entry again. In most cases, it will point to an 256 | // entry we just freed. 257 | Some(EthernetTxToken(self.tx_ring.get_next_free()?)) 258 | } 259 | 260 | fn capabilities(&self) -> smoltcp::phy::DeviceCapabilities { 261 | let mut caps = smoltcp::phy::DeviceCapabilities::default(); 262 | 263 | caps.max_transmission_unit = 1522; 264 | caps.max_burst_size = None; 265 | caps.checksum = smoltcp::phy::ChecksumCapabilities::ignored(); 266 | caps 267 | } 268 | } 269 | 270 | impl<'a, const N: usize> phy::RxToken for EthernetRxToken<'a, N> { 271 | fn consume(self, _timestamp: smoltcp::time::Instant, f: F) -> smoltcp::Result 272 | where 273 | F: FnOnce(&mut [u8]) -> smoltcp::Result, 274 | { 275 | let (mut buf, len) = self.0.free(); 276 | f(&mut buf.0[..len]) 277 | } 278 | } 279 | 280 | impl<'a, const M: usize> phy::TxToken for EthernetTxToken<'a, M> { 281 | fn consume( 282 | self, 283 | _timestamp: smoltcp::time::Instant, 284 | len: usize, 285 | f: F, 286 | ) -> smoltcp::Result 287 | where 288 | F: FnOnce(&mut [u8]) -> smoltcp::Result, 289 | { 290 | let mut buf = Box::new(EthernetBuffer::default()); 291 | let res = f(&mut buf.0[..len])?; 292 | 293 | self.0.submit(buf, len); 294 | 295 | Ok(res) 296 | } 297 | } 298 | -------------------------------------------------------------------------------- /shared/xenon-enet/src/ring.rs: -------------------------------------------------------------------------------- 1 | //! This module contains code for ethernet ring management. 2 | 3 | use super::{EthernetBuffer, HwDescriptor}; 4 | use core::{marker::PhantomData, sync::atomic::Ordering}; 5 | 6 | extern crate alloc; 7 | use alloc::boxed::Box; 8 | 9 | /// An individual "logical" descriptor, used to track extra information 10 | /// associated with hardware descriptors. 11 | #[derive(Default, Clone)] 12 | struct LogicalDescriptor { 13 | /// The managed heap buffer assigned to this descriptor, if any. 14 | buf: Option>, 15 | } 16 | 17 | impl core::fmt::Debug for LogicalDescriptor { 18 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 19 | f.debug_struct("LogicalDescriptor") 20 | .field("buf", &self.buf.is_some()) 21 | .finish() 22 | } 23 | } 24 | 25 | pub trait RingType {} 26 | 27 | /// Receive ring marker 28 | pub struct RxRing; 29 | /// Transfer ring marker 30 | pub struct TxRing; 31 | 32 | impl RingType for RxRing {} 33 | impl RingType for TxRing {} 34 | 35 | /// This structure represents a ring of DMA buffer descriptors for a Xenon MAC. 36 | /// 37 | /// # Hardware interaction 38 | /// Hardware and software may both access descriptors in the ring at the same time. 39 | /// 40 | /// When a descriptor is ready for hardware processing, the ownership bit is flipped 41 | /// such that the hardware now "owns" the descriptor. 42 | /// Processing means sending a packet for TX, or the reception of a packet for RX. 43 | /// 44 | /// When the hardware owns the descriptor, we may not touch it whatsoever. 45 | /// As such, this interface offers no way to retrieve hardware-owned descriptors. 46 | /// 47 | /// When a descriptor is finished processing, hardware will turn off the ownership 48 | /// bit, handing ownership back to us. At this point, we can take the buffer out of 49 | /// the descriptor and process or free it. 50 | pub struct Ring { 51 | _ring_type: PhantomData, 52 | 53 | /// A contiguous array of hardware descriptors. The hardware will receive a pointer to this. 54 | /// This _assumes_ that the MMU is disabled, and `va == pa`. 55 | hw_descriptors: Box<[HwDescriptor; N]>, 56 | 57 | /// Associated logical descriptors, tracking extra information that can't live inside 58 | /// of the hardware descriptors. 59 | descriptors: [LogicalDescriptor; N], 60 | 61 | /// The next busy descriptor, without wraparound. 62 | next_busy: usize, 63 | /// The next free descriptor, without wraparound. If `next_free` == `next_busy`, all descriptors are free. 64 | next_free: usize, 65 | } 66 | 67 | impl Ring { 68 | /// Construct a new ethernet ring, with an allocation backed by the global allocator. 69 | pub fn new() -> Self { 70 | let mut hw_descriptors = Box::new([HwDescriptor::default(); N]); 71 | hw_descriptors.last_mut().unwrap().capacity = super::HWDESC_CAP_LAST_ENTRY; 72 | 73 | const LOGDESC_INIT: LogicalDescriptor = LogicalDescriptor { buf: None }; 74 | 75 | Self { 76 | _ring_type: PhantomData, 77 | hw_descriptors, 78 | descriptors: [LOGDESC_INIT; N], 79 | 80 | next_busy: 0, 81 | next_free: 0, 82 | } 83 | } 84 | 85 | pub fn reset(&mut self) { 86 | self.next_busy = 0; 87 | self.next_free = 0; 88 | } 89 | 90 | /// Retrieve the next unused descriptor, if any. 91 | pub fn get_next_free<'ring>(&'ring mut self) -> Option> { 92 | // If `next_free` is >= `N` slots away from `next_busy`, 93 | // the entire ring has been consumed. 94 | if self.next_free - self.next_busy >= N { 95 | None 96 | } else { 97 | // N.B: Do not increment `next_free` here. The descriptor must do that when submitted. 98 | // Because of the mutable borrow against `self`, callers cannot fetch more than 99 | // one descriptor at a time. 100 | let idx = self.next_free % N; 101 | 102 | Some(FreeDescriptor { ring: self, idx }) 103 | } 104 | } 105 | 106 | /// Retrieve the next completed descriptor, if any. 107 | pub fn get_next_complete(&mut self) -> Option> { 108 | if self.next_busy == self.next_free { 109 | None 110 | } else { 111 | let idx = self.next_busy % N; 112 | 113 | // Now, we need to check and see if the HW ownership bit if set. 114 | // If so, do not return a reference. 115 | if self.hw_descriptors[idx].is_busy() { 116 | None 117 | } else { 118 | Some(CompleteDescriptor { ring: self, idx }) 119 | } 120 | } 121 | } 122 | 123 | /// Return the physical base pointer of this ring. 124 | pub fn phys_base(&self) -> usize { 125 | self.hw_descriptors.as_ptr() as usize 126 | } 127 | } 128 | 129 | unsafe fn read_mod_write_volatile(addr: *mut T, func: impl FnOnce(T) -> T) { 130 | let oval = core::ptr::read_volatile(addr); 131 | let nval = func(oval); 132 | core::ptr::write_volatile(addr, nval); 133 | } 134 | 135 | /// Represents a safe interface for a particular free hardware descriptor. 136 | pub struct FreeDescriptor<'a, S: RingType, const N: usize> { 137 | /// The ring that owns this descriptor. 138 | ring: &'a mut Ring, 139 | /// The wrapped-around descriptor index. 140 | idx: usize, 141 | } 142 | 143 | // Actions corresponding to a free descriptor on the RX ring. 144 | impl<'a, const N: usize> FreeDescriptor<'a, RxRing, N> { 145 | /// Submit this descriptor to hardware. 146 | pub fn submit(self, buf: Box) { 147 | // Update the hardware descriptor. 148 | let hw_desc = &mut self.ring.hw_descriptors[self.idx]; 149 | unsafe { 150 | core::ptr::write_volatile(&mut hw_desc.len, 0); // RX: 0 bytes initial length 151 | core::ptr::write_volatile(&mut hw_desc.addr, buf.0.as_ptr() as u32); 152 | 153 | read_mod_write_volatile(&mut hw_desc.capacity, |v| { 154 | // N.B: Avoid overwriting HWDESC_CAP_LAST_ENTRY. 155 | (v & super::HWDESC_CAP_LAST_ENTRY) | (buf.0.len() as u32 & 0x7FFF_FFFF) 156 | }); 157 | 158 | // Prevent reordering of the above writes and the below ownership flag modification. 159 | core::sync::atomic::fence(Ordering::SeqCst); 160 | 161 | // TODO: Figure out what magic bit 0x4000_0000 is. 162 | core::ptr::write_volatile( 163 | &mut hw_desc.flags, 164 | super::HWDESC_FLAG_HW_OWNED | 0x4000_0000, 165 | ); 166 | } 167 | 168 | self.ring.next_free += 1; 169 | } 170 | } 171 | 172 | // Actions corresponding to a free descriptor on the TX ring. 173 | impl<'a, const N: usize> FreeDescriptor<'a, TxRing, N> { 174 | /// Submit this descriptor to hardware. 175 | pub fn submit(self, buf: Box, len: usize) { 176 | // Update the hardware descriptor. 177 | let hw_desc = &mut self.ring.hw_descriptors[self.idx]; 178 | unsafe { 179 | core::ptr::write_volatile(&mut hw_desc.len, len as u32); 180 | core::ptr::write_volatile(&mut hw_desc.addr, buf.0.as_ptr() as u32); 181 | 182 | read_mod_write_volatile(&mut hw_desc.capacity, |v| { 183 | // N.B: Avoid overwriting HWDESC_CAP_LAST_ENTRY. 184 | (v & super::HWDESC_CAP_LAST_ENTRY) | (buf.0.len() as u32 & 0x7FFF_FFFF) 185 | }); 186 | 187 | // Prevent reordering of the above writes and the below ownership flag modification. 188 | core::sync::atomic::fence(Ordering::SeqCst); 189 | 190 | // TODO: Figure out the magic bits 0x4023_0000. 191 | core::ptr::write_volatile( 192 | &mut hw_desc.flags, 193 | super::HWDESC_FLAG_HW_OWNED | 0x4023_0000, 194 | ); 195 | } 196 | 197 | // Update the logical descriptor. 198 | self.ring.descriptors[self.idx].buf.replace(buf); 199 | self.ring.next_free += 1; 200 | } 201 | } 202 | 203 | /// Represents a safe interface for a particular completed hardware descriptor. 204 | pub struct CompleteDescriptor<'a, S: RingType, const N: usize> { 205 | ring: &'a mut Ring, 206 | idx: usize, 207 | } 208 | 209 | impl<'a, S: RingType, const N: usize> CompleteDescriptor<'a, S, N> { 210 | /// Mark a previously finished descriptor as free, taking the buffer out of it. 211 | /// This returns a tuple of the buffer and the length used by hardware. 212 | pub fn free(self) -> (Box, usize) { 213 | // Clear out the descriptor. 214 | let hw_desc = &mut self.ring.hw_descriptors[self.idx]; 215 | let len = unsafe { 216 | core::ptr::write_volatile(&mut hw_desc.addr, 0x0BADF00D); 217 | core::ptr::read_volatile(&hw_desc.len) 218 | }; 219 | 220 | // Take the buffer from the logical descriptor. 221 | let buf = self.ring.descriptors[self.idx] 222 | .buf 223 | .take() 224 | .expect("no buffer in completed descriptor"); 225 | 226 | self.ring.next_busy += 1; 227 | (buf, len as usize) 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /shared/xenon-soc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xenon-soc" 3 | version = "0.1.0" 4 | edition = "2018" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | xenon-cpu = { path = "../xenon-cpu" } 10 | sync = { path = "../sync" } 11 | -------------------------------------------------------------------------------- /shared/xenon-soc/src/iic.rs: -------------------------------------------------------------------------------- 1 | //! Integrated Interrupt Controller (IIC) 2 | 3 | const IIC_BASE: u64 = 0x80000200_00050000; 4 | 5 | #[repr(usize)] 6 | #[allow(dead_code)] 7 | enum Register { 8 | WhoAmI = 0x00, 9 | CurrentTaskPriority = 0x08, 10 | IpiDispatch = 0x10, 11 | ActiveIrqs = 0x20, 12 | AssertedIrqs = 0x28, 13 | Ack = 0x50, 14 | AckPri = 0x58, 15 | Eoi = 0x60, 16 | EoiPri = 0x68, 17 | SpuriousVector = 0x70, 18 | } 19 | 20 | #[repr(u8)] 21 | #[allow(dead_code)] 22 | #[derive(Debug)] 23 | pub enum Interrupt { 24 | Ipi4 = 2, 25 | Ipi3 = 4, 26 | Smm = 5, 27 | Sfcx = 6, 28 | SataHdd = 8, 29 | SataCdrom = 9, 30 | Ohci0 = 11, 31 | Ehci0 = 12, 32 | Ohci1 = 13, 33 | Ehci1 = 14, 34 | Xma = 16, 35 | Audio = 17, 36 | Enet = 19, 37 | Xps = 21, 38 | Graphics = 22, 39 | Profiler = 24, 40 | Biu = 25, 41 | Ioc = 26, 42 | Fsb = 27, 43 | Ipi2 = 28, 44 | Clock = 29, 45 | Ipi1 = 30, 46 | None = 31, 47 | } 48 | 49 | pub struct Iic { 50 | mmio: &'static mut [u8], 51 | } 52 | 53 | #[allow(dead_code)] 54 | impl Iic { 55 | pub fn local() -> Self { 56 | let id = xenon_cpu::intrin::pir(); 57 | let base = IIC_BASE + (0x1000 * id); 58 | 59 | // SAFETY: It should always be safe to get a pointer to the current CPU's 60 | // interrupt controller. 61 | Self { 62 | mmio: unsafe { core::slice::from_raw_parts_mut(base as *mut _, 0x1000) }, 63 | } 64 | } 65 | 66 | fn write(&self, reg: Register, val: T) { 67 | unsafe { 68 | core::ptr::write_volatile(&self.mmio[reg as usize] as *const _ as *mut T, val); 69 | } 70 | } 71 | 72 | fn read(&self, reg: Register) -> T { 73 | unsafe { core::ptr::read_volatile(&self.mmio[reg as usize] as *const _ as *mut T) } 74 | } 75 | 76 | /// Acknowledge and get an interrupt (if one is pending). 77 | pub fn acknowledge(&self) -> Option { 78 | let raw_int = self.read::(Register::Ack) >> 2; 79 | let spv = self.read::(Register::SpuriousVector) >> 2; 80 | 81 | if raw_int == spv { 82 | None 83 | } else { 84 | unsafe { core::mem::transmute((raw_int & 0x1F) as u8) } 85 | } 86 | } 87 | 88 | /// Signal the end of an IIC interrupt. 89 | pub fn eoi(&self, int: Interrupt) { 90 | let raw_int = int as u32; 91 | self.write::(Register::Eoi, (raw_int as u64) << 2); 92 | } 93 | 94 | pub fn set_priority(&self, prio: Interrupt) { 95 | self.write(Register::CurrentTaskPriority, (prio as u64) << 2); 96 | self.read::(Register::CurrentTaskPriority); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /shared/xenon-soc/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | 3 | pub mod iic; 4 | pub mod smc; 5 | pub mod uart; 6 | -------------------------------------------------------------------------------- /shared/xenon-soc/src/smc.rs: -------------------------------------------------------------------------------- 1 | //! This file includes routines to communicate with the SMC. 2 | use sync::mutex::SpinMutex; 3 | 4 | const SMC_ADDRESS: *mut u32 = 0x8000_0200_EA00_1000 as *mut u32; 5 | 6 | pub struct SMC {} 7 | 8 | impl SMC { 9 | const fn new() -> Self { 10 | Self {} 11 | } 12 | 13 | pub fn send_message(&mut self, msg: &[u32; 4]) { 14 | unsafe { 15 | while (core::ptr::read_volatile(SMC_ADDRESS.offset(33)) & 0x04000000) == 0 {} 16 | 17 | core::ptr::write_volatile::(SMC_ADDRESS.offset(33), 0x04000000); 18 | core::ptr::write_volatile::(SMC_ADDRESS.offset(32), msg[0]); 19 | core::ptr::write_volatile::(SMC_ADDRESS.offset(32), msg[1]); 20 | core::ptr::write_volatile::(SMC_ADDRESS.offset(32), msg[2]); 21 | core::ptr::write_volatile::(SMC_ADDRESS.offset(32), msg[3]); 22 | core::ptr::write_volatile::(SMC_ADDRESS.offset(33), 0x00000000); 23 | } 24 | } 25 | 26 | pub fn restart_system(&mut self) { 27 | self.send_message(&[0x82043000u32, 0x00000000u32, 0x00000000u32, 0x00000000u32]); 28 | } 29 | 30 | /// Start the boot animation on the front LED panel. 31 | pub fn start_led_bootanim(&mut self) { 32 | let buf: [u32; 4] = [0x8C010000, 0x00000000, 0x00000000, 0x00000000]; 33 | 34 | self.send_message(&buf); 35 | } 36 | 37 | pub fn set_led(&mut self, ovflag: bool, value: u8) { 38 | let buf: [u32; 4] = [ 39 | 0x99000000 | ((ovflag as u32) << 16) | ((value as u32) << 8), 40 | 0x00000000, 41 | 0x00000000, 42 | 0x00000000, 43 | ]; 44 | 45 | self.send_message(&buf); 46 | } 47 | } 48 | 49 | pub static SMC: SpinMutex = SpinMutex::new(SMC::new()); 50 | -------------------------------------------------------------------------------- /shared/xenon-soc/src/uart.rs: -------------------------------------------------------------------------------- 1 | //! This file defines the UART interface on the SMC. 2 | use sync::mutex::SpinMutex; 3 | 4 | use core::fmt::Write; 5 | 6 | const UART_BASE: *mut u32 = 0x8000_0200_EA00_1000 as *mut u32; 7 | 8 | #[allow(dead_code)] 9 | pub enum Speed { 10 | S115200, // 0b11100110 11 | S38400, // 0b10110010 12 | S19200, // 0b01100011 13 | } 14 | 15 | pub struct UART {} 16 | 17 | impl UART { 18 | const fn new() -> Self { 19 | Self {} 20 | } 21 | 22 | /// Reset the UART controller with the specified speed. 23 | pub fn reset(&mut self, speed: Speed) { 24 | unsafe { 25 | core::ptr::write_volatile( 26 | UART_BASE.offset(7), 27 | match speed { 28 | Speed::S115200 => 0xE6010000, 29 | Speed::S38400 => 0xB2010000, 30 | Speed::S19200 => 0x63010000, 31 | }, 32 | ); 33 | } 34 | } 35 | 36 | fn data_pending(&mut self) -> bool { 37 | unsafe { 38 | // Busy loop while the SMC is busy. 39 | while (core::ptr::read_volatile(UART_BASE.offset(6)) & 0xFCFF_FFFF) != 0 {} 40 | 41 | core::ptr::read_volatile(UART_BASE.offset(6)) & 0x01000000 != 0 42 | } 43 | } 44 | 45 | pub fn read_byte(&mut self) -> u8 { 46 | // Wait for available character. 47 | while !self.data_pending() {} 48 | 49 | unsafe { (core::ptr::read_volatile(UART_BASE.offset(4)) >> 24) as u8 } 50 | } 51 | 52 | pub fn write_byte(&mut self, byte: u8) { 53 | unsafe { 54 | // Wait for the SMC to be ready. 55 | while (core::ptr::read_volatile(UART_BASE.offset(6)) & 0x02000000) == 0 {} 56 | 57 | core::ptr::write_volatile(UART_BASE.offset(5), (byte as u32) << 24); 58 | } 59 | } 60 | 61 | pub fn read(&mut self, mut data: &mut [u8]) { 62 | while !data.is_empty() { 63 | data[0] = self.read_byte(); 64 | data = &mut data[1..]; 65 | } 66 | } 67 | 68 | pub fn write(&mut self, data: &[u8]) { 69 | for b in data { 70 | self.write_byte(*b); 71 | } 72 | } 73 | } 74 | 75 | impl Write for UART { 76 | fn write_str(&mut self, s: &str) -> core::fmt::Result { 77 | for c in s.as_bytes().iter() { 78 | // Prepend newline characters with a carriage return. 79 | if *c == b'\n' { 80 | self.write_byte(b'\r'); 81 | } 82 | 83 | self.write_byte(*c); 84 | } 85 | 86 | Ok(()) 87 | } 88 | } 89 | 90 | pub static UART: SpinMutex = SpinMutex::new(UART::new()); 91 | --------------------------------------------------------------------------------