├── .cargo └── config.toml ├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .vscode └── license.code-snippets ├── LICENSE ├── README.md ├── riscv64imac-unknown-none.json ├── rust-toolchain.toml ├── spark ├── .cargo │ └── config.toml ├── .gitignore ├── .vscode │ └── license.code-snippets ├── Cargo.lock ├── Cargo.toml ├── build.rs ├── conf │ ├── riscv-sbi.ld │ ├── riscv-uefi.ld │ ├── riscv64-sbi.ld │ ├── riscv64-uefi.ld │ ├── riscv64gc-unknown-none.json │ └── riscv64gc-unknown-uefi.json └── src │ ├── config.rs │ ├── console │ ├── mod.rs │ └── sbi.rs │ ├── dev │ ├── acpi.rs │ ├── block │ │ ├── ahci │ │ │ ├── hba │ │ │ │ └── mod.rs │ │ │ └── mod.rs │ │ ├── mod.rs │ │ └── nvme │ │ │ ├── controller.rs │ │ │ ├── identify.rs │ │ │ ├── mod.rs │ │ │ └── queue.rs │ ├── fw_cfg.rs │ ├── mod.rs │ ├── pcie.rs │ └── uart │ │ ├── mod.rs │ │ └── ns16550.rs │ ├── fs │ ├── fat │ │ ├── bpb.rs │ │ ├── dir.rs │ │ └── mod.rs │ └── mod.rs │ ├── io.rs │ ├── malloc.rs │ ├── mem │ ├── mod.rs │ ├── pmm │ │ ├── freelist_allocator.rs │ │ ├── init_ranges.rs │ │ └── mod.rs │ └── vmm.rs │ ├── panic.rs │ ├── proto.rs │ ├── proto │ ├── bootelf.rs │ └── limine.rs │ ├── rtld.rs │ ├── smp.rs │ ├── spark.rs │ ├── sys │ ├── fdt.rs │ ├── mod.rs │ ├── sbi │ │ ├── locore.s │ │ ├── mod.rs │ │ └── start.rs │ └── uefi │ │ ├── locore.s │ │ ├── mod.rs │ │ └── start.rs │ ├── test.rs │ ├── time.rs │ ├── trap.rs │ └── util │ ├── maybe_static_arc.rs │ └── mod.rs └── tools ├── symbol_map ├── Cargo.lock ├── Cargo.toml └── src │ ├── generate.rs │ └── lib.rs └── xtask ├── Cargo.lock ├── Cargo.toml └── src ├── build.rs ├── main.rs ├── run.rs └── util.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | xtask = "run --manifest-path ./tools/xtask/Cargo.toml --" 3 | 4 | 5 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Spark CI 3 | 4 | "on": 5 | push: 6 | branches: 7 | - main 8 | pull_request: 9 | branches: 10 | - "**" 11 | 12 | env: 13 | CARGO_UNSTABLE_SPARSE_REGISTRY: true 14 | 15 | jobs: 16 | main: 17 | name: main 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - name: checkout the source 22 | uses: actions/checkout@v3 23 | with: 24 | fetch-depth: 1 25 | 26 | - name: fetch the rust toolchain 27 | uses: actions-rs/toolchain@v1 28 | with: 29 | profile: minimal 30 | toolchain: nightly 31 | components: clippy, rustfmt, rust-src 32 | 33 | - name: install dependencies 34 | run: | 35 | sudo apt-get update 36 | sudo apt-get install -y llvm lld 37 | 38 | - name: build bootloader 39 | run: cargo xtask build --ci 40 | 41 | - name: deploy build artifacts 42 | uses: actions/upload-artifact@v3 43 | with: 44 | path: build/ 45 | 46 | - name: build the documentation 47 | run: cargo xtask doc --ci 48 | 49 | - name: publish documentation 50 | uses: peaceiris/actions-gh-pages@v3 51 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' 52 | with: 53 | deploy_key: ${{ secrets.DOCS_DEPLOY_KEY }} 54 | external_repository: bolt-os/spark-docs 55 | publish_branch: main 56 | publish_dir: target/riscv64gc-unknown-none/doc 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /build 2 | target/ 3 | **/.vscode/settings.json 4 | .hdd/ 5 | .debug/ 6 | -------------------------------------------------------------------------------- /.vscode/license.code-snippets: -------------------------------------------------------------------------------- 1 | { 2 | "License": { 3 | "description": "project license header", 4 | "prefix": "#license", 5 | "body": [ 6 | "/*", 7 | " * Copyright (c) 2022 xvanc and contributors", 8 | " *", 9 | " * Redistribution and use in source and binary forms, with or without modification,", 10 | " * are permitted provided that the following conditions are met:", 11 | " *", 12 | " * 1. Redistributions of source code must retain the above copyright notice,", 13 | " * this list of conditions and the following disclaimer.", 14 | " *", 15 | " * 2. Redistributions in binary form must reproduce the above copyright notice,", 16 | " * this list of conditions and the following disclaimer in the documentation", 17 | " * and/or other materials provided with the distribution.", 18 | " *", 19 | " * 3. Neither the name of the copyright holder nor the names of its contributors", 20 | " * may be used to endorse or promote products derived from this software without", 21 | " * specific prior written permission.", 22 | " *", 23 | " * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY", 24 | " * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES", 25 | " * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.", 26 | " * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,", 27 | " * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", 28 | " * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", 29 | " * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", 30 | " * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", 31 | " * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", 32 | " *", 33 | " * SPDX-License-Identifier: BSD-3-Clause", 34 | " */", 35 | ] 36 | }, 37 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2022-2023 xvanc and contributors 2 | 3 | Redistribution and use in source and binary forms, with or without modification, 4 | are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, 7 | this list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | 3. Neither the name of the copyright holder nor the names of its contributors 14 | may be used to endorse or promote products derived from this software without 15 | specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY 18 | EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 | OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 | IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 21 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Spark 2 | 3 | `spark` is a 64-bit bootloader for RISC-V, implementing the [Limine Boot Protocol](https://github.com/limine-bootloader/limine/blob/trunk/PROTOCOL.md) 4 | for both SBI and UEFI systems. 5 | 6 | ## Building 7 | 8 | `spark` uses a custom build tool called `xtask` which wraps `cargo` and controls the entire 9 | build process. To build the bootloader, simply run `cargo xtask build [--release]`. 10 | The final build artifacts will be output into the `build/` directory. 11 | 12 | For a full list of the available build options, run `cargo xtask build --help`. 13 | 14 | ## Using the Bootloader 15 | 16 | Currently `spark` is limited to reading from NVMe drives. 17 | 18 | The easiest way to get up and running is using QEMU's `vvfat` driver to emulate a FAT filesystem 19 | from a directory on your host: 20 | 21 | ``` 22 | -device nvme,serial=deadbeff,drive=disk1 23 | -drive id=disk1,format=raw,if=none,file=fat:rw:path/to/directory 24 | ``` 25 | 26 | Alternatively, you can create a disk image to use as the backing for the drive: 27 | 28 | ``` 29 | -device nvme,serial=deadbeff,drive=disk1 30 | -drive id=disk1,format=raw,if=none,file=path/to/disk.img 31 | ``` 32 | 33 | ### Example 34 | 35 | `spark.cfg`: 36 | ``` 37 | boot "my awesome kernel" { 38 | protocol = "limine"; 39 | kernel-path = "boot:///boot/kernel.elf"; 40 | } 41 | ``` 42 | 43 | ``` 44 | $ mkdir -p ./root 45 | $ cp path/to/kernel.elf ./root/ 46 | $ cp path/to/spark.cfg ./root/ 47 | $ qemu-system-riscv64 -machine virt -cpu rv64 \ 48 | -kernel spark.bin \ 49 | -device nvme,serial=deadbeff,drive=disk1 \ 50 | -drive id=disk1,format=raw,if=none,file=./root 51 | ``` 52 | 53 | # Running 54 | 55 | To run it in a virtual machine make sure you have qemu installed 56 | Just use ``cargo xtask run`` 57 | If you encounter an error with qemu try running ``cargo xtask run -- -cpu rv64`` 58 | -------------------------------------------------------------------------------- /riscv64imac-unknown-none.json: -------------------------------------------------------------------------------- 1 | { 2 | "arch": "riscv64", 3 | "code-model": "medium", 4 | "cpu": "generic-rv64", 5 | "crt-objects-fallback": "false", 6 | "data-layout": "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128", 7 | "features": "+m,+a,+c", 8 | "linker": "rust-lld", 9 | "linker-flavor": "ld.lld", 10 | "llvm-abiname": "lp64", 11 | "llvm-target": "riscv64-unknown-unknown", 12 | "max-atomic-width": "64", 13 | "panic-strategy": "abort", 14 | "position-independent-executables": true, 15 | "relro-level": "full", 16 | "stack-probes": { 17 | "kind": "call" 18 | }, 19 | "supported-sanitizers": [ 20 | "kernel-address" 21 | ], 22 | "relocation-model": "pie", 23 | "static-position-independent-executables": true, 24 | "target-pointer-width": "64" 25 | } 26 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly" 3 | profile = "minimal" 4 | components = [ "rustfmt", "rust-src" ] 5 | -------------------------------------------------------------------------------- /spark/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [unstable] 2 | build-std = ["core", "alloc", "compiler_builtins"] 3 | build-std-features = ["compiler-builtins-mem"] 4 | 5 | [build] 6 | target = "conf/riscv64gc-unknown-none.json" -------------------------------------------------------------------------------- /spark/.gitignore: -------------------------------------------------------------------------------- 1 | _/ -------------------------------------------------------------------------------- /spark/.vscode/license.code-snippets: -------------------------------------------------------------------------------- 1 | { 2 | "License": { 3 | "description": "project license header", 4 | "prefix": "#license", 5 | "body": [ 6 | "// SPDX-FileCopyrightText: 2022-2023 xvanc and contributors", 7 | "// SPDX-License-Identifier: BSD-3-Clause", 8 | ] 9 | }, 10 | } -------------------------------------------------------------------------------- /spark/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "acpi" 7 | version = "0.1.0" 8 | source = "git+https://github.com/bolt-os/acpi?rev=8b1520a#8b1520afed494ac2079b5fc939a4699b3a401471" 9 | dependencies = [ 10 | "bitflags 1.3.2", 11 | "log", 12 | ] 13 | 14 | [[package]] 15 | name = "anyhow" 16 | version = "1.0.75" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" 19 | 20 | [[package]] 21 | name = "autocfg" 22 | version = "1.1.0" 23 | source = "registry+https://github.com/rust-lang/crates.io-index" 24 | checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" 25 | 26 | [[package]] 27 | name = "bitflags" 28 | version = "1.3.2" 29 | source = "registry+https://github.com/rust-lang/crates.io-index" 30 | checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" 31 | 32 | [[package]] 33 | name = "bitflags" 34 | version = "2.4.1" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" 37 | 38 | [[package]] 39 | name = "bolt-libelf" 40 | version = "0.1.0" 41 | source = "git+https://github.com/bolt-os/libelf#1f562d76d9aa084a965b76f45e57b13fc8bd5745" 42 | dependencies = [ 43 | "bitflags 1.3.2", 44 | ] 45 | 46 | [[package]] 47 | name = "bytemuck" 48 | version = "1.14.0" 49 | source = "registry+https://github.com/rust-lang/crates.io-index" 50 | checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" 51 | dependencies = [ 52 | "bytemuck_derive", 53 | ] 54 | 55 | [[package]] 56 | name = "bytemuck_derive" 57 | version = "1.5.0" 58 | source = "registry+https://github.com/rust-lang/crates.io-index" 59 | checksum = "965ab7eb5f8f97d2a083c799f3a1b994fc397b2fe2da5d1da1626ce15a39f2b1" 60 | dependencies = [ 61 | "proc-macro2", 62 | "quote", 63 | "syn", 64 | ] 65 | 66 | [[package]] 67 | name = "fdt" 68 | version = "0.1.0" 69 | source = "git+https://github.com/bolt-os/fdt.git#48d175cefcb068901282142469ca26e5042ddca7" 70 | dependencies = [ 71 | "anyhow", 72 | "bytemuck", 73 | "libsa", 74 | ] 75 | 76 | [[package]] 77 | name = "gimli" 78 | version = "0.28.1" 79 | source = "registry+https://github.com/rust-lang/crates.io-index" 80 | checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" 81 | 82 | [[package]] 83 | name = "libsa" 84 | version = "0.1.0" 85 | source = "git+https://github.com/bolt-os/libsa#f54301e79c255807f0a8f938b5a2e8df831bfe1c" 86 | dependencies = [ 87 | "bytemuck", 88 | ] 89 | 90 | [[package]] 91 | name = "limine" 92 | version = "0.1.0" 93 | source = "git+https://github.com/bolt-os/limine-rs?rev=85f7db3#85f7db3f7835991977efd07913317c0877bb9e1a" 94 | dependencies = [ 95 | "bitflags 1.3.2", 96 | "uuid", 97 | ] 98 | 99 | [[package]] 100 | name = "linkset" 101 | version = "0.1.0" 102 | source = "git+https://github.com/xvanc/linkset.git#431c7d424665c4f53ee47f3c9bb1ddfd54503e98" 103 | dependencies = [ 104 | "spin", 105 | ] 106 | 107 | [[package]] 108 | name = "lock_api" 109 | version = "0.4.11" 110 | source = "registry+https://github.com/rust-lang/crates.io-index" 111 | checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" 112 | dependencies = [ 113 | "autocfg", 114 | "scopeguard", 115 | ] 116 | 117 | [[package]] 118 | name = "log" 119 | version = "0.4.20" 120 | source = "registry+https://github.com/rust-lang/crates.io-index" 121 | checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" 122 | 123 | [[package]] 124 | name = "proc-macro2" 125 | version = "1.0.69" 126 | source = "registry+https://github.com/rust-lang/crates.io-index" 127 | checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" 128 | dependencies = [ 129 | "unicode-ident", 130 | ] 131 | 132 | [[package]] 133 | name = "quote" 134 | version = "1.0.33" 135 | source = "registry+https://github.com/rust-lang/crates.io-index" 136 | checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" 137 | dependencies = [ 138 | "proc-macro2", 139 | ] 140 | 141 | [[package]] 142 | name = "sbi" 143 | version = "0.2.0" 144 | source = "registry+https://github.com/rust-lang/crates.io-index" 145 | checksum = "29cb0870400aca7e4487e8ec1e93f9d4288da763cb1da2cedc5102e62b6522ad" 146 | 147 | [[package]] 148 | name = "scopeguard" 149 | version = "1.2.0" 150 | source = "registry+https://github.com/rust-lang/crates.io-index" 151 | checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" 152 | 153 | [[package]] 154 | name = "spark" 155 | version = "0.1.0" 156 | dependencies = [ 157 | "acpi", 158 | "anyhow", 159 | "bitflags 2.4.1", 160 | "bolt-libelf", 161 | "fdt", 162 | "libsa", 163 | "limine", 164 | "linkset", 165 | "log", 166 | "sbi", 167 | "spin", 168 | "symbol_map", 169 | "uefi", 170 | "unwinding", 171 | "uuid", 172 | ] 173 | 174 | [[package]] 175 | name = "spin" 176 | version = "0.9.8" 177 | source = "registry+https://github.com/rust-lang/crates.io-index" 178 | checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" 179 | dependencies = [ 180 | "lock_api", 181 | ] 182 | 183 | [[package]] 184 | name = "symbol_map" 185 | version = "0.1.0" 186 | dependencies = [ 187 | "libsa", 188 | ] 189 | 190 | [[package]] 191 | name = "syn" 192 | version = "2.0.39" 193 | source = "registry+https://github.com/rust-lang/crates.io-index" 194 | checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" 195 | dependencies = [ 196 | "proc-macro2", 197 | "quote", 198 | "unicode-ident", 199 | ] 200 | 201 | [[package]] 202 | name = "uefi" 203 | version = "0.1.0" 204 | source = "git+https://github.com/bolt-os/uefi?rev=d1510e3#d1510e368fba5dd63cf63f810d28137c15ec55b4" 205 | dependencies = [ 206 | "bitflags 1.3.2", 207 | "limine", 208 | ] 209 | 210 | [[package]] 211 | name = "unicode-ident" 212 | version = "1.0.12" 213 | source = "registry+https://github.com/rust-lang/crates.io-index" 214 | checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" 215 | 216 | [[package]] 217 | name = "unwinding" 218 | version = "0.2.1" 219 | source = "registry+https://github.com/rust-lang/crates.io-index" 220 | checksum = "37a19a21a537f635c16c7576f22d0f2f7d63353c1337ad4ce0d8001c7952a25b" 221 | dependencies = [ 222 | "gimli", 223 | ] 224 | 225 | [[package]] 226 | name = "uuid" 227 | version = "1.6.1" 228 | source = "registry+https://github.com/rust-lang/crates.io-index" 229 | checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" 230 | -------------------------------------------------------------------------------- /spark/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "spark" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["xvanc "] 6 | publish = false 7 | license = "BSD-3-Clause" 8 | description = "A 64-bit bootloader for RISC-V" 9 | readme = "./README.md" 10 | repository = "https://github.com/bolt-os/spark" 11 | 12 | [lib] 13 | name = "spark" 14 | path = "src/spark.rs" 15 | crate-type = ["staticlib"] 16 | 17 | [features] 18 | default = [ 19 | "acpi", 20 | "fdt", 21 | 22 | "dev-fw_cfg", 23 | "dev-nvme", 24 | "dev-pcie", 25 | 26 | "fs-fat", 27 | 28 | "proto-limine", 29 | ] 30 | 31 | acpi = ["dep:acpi"] 32 | fdt = ["dep:fdt"] 33 | 34 | dev-ahci = ["dev-pcie"] 35 | dev-fw_cfg = [] 36 | dev-nvme = ["dev-pcie"] 37 | dev-pcie = [] 38 | 39 | fs-fat = [] 40 | 41 | proto-bootelf = [] 42 | proto-limine = ["limine"] 43 | 44 | [dependencies] 45 | acpi = { git = "https://github.com/bolt-os/acpi", rev = "8b1520a", optional = true } 46 | anyhow = { version = "1.0", default-features = false } 47 | bitflags = "2.4" 48 | bolt-libelf = { git = "https://github.com/bolt-os/libelf" } 49 | fdt = { git = "https://github.com/bolt-os/fdt.git", optional = true, features = ["anyhow"] } 50 | libsa = { git = "https://github.com/bolt-os/libsa" } 51 | log = "0.4" 52 | sbi = "0.2" 53 | spin = "0.9" 54 | symbol_map = { path = "../tools/symbol_map" } 55 | uuid = { version = "1.0", default-features = false } 56 | 57 | [dependencies.limine] 58 | git = "https://github.com/bolt-os/limine-rs" 59 | rev = "85f7db3" 60 | features = ["bootloader", "uuid"] 61 | optional = true 62 | 63 | [dependencies.linkset] 64 | git = "https://github.com/xvanc/linkset.git" 65 | default-features = false 66 | features = ["spin"] 67 | 68 | [dependencies.unwinding] 69 | version = "0.2" 70 | default-features = false 71 | features = ["unwinder", "fde-static", "personality", "panic"] 72 | 73 | [target.'cfg(uefi)'.dependencies] 74 | uefi = { git = "https://github.com/bolt-os/uefi", rev = "d1510e3", features = ["limine"] } 75 | -------------------------------------------------------------------------------- /spark/build.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use std::env; 5 | 6 | fn main() { 7 | let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap(); 8 | if target_os == "uefi" { 9 | println!("cargo:rustc-cfg=uefi"); 10 | } else { 11 | println!("cargo:rustc-cfg=sbi"); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /spark/conf/riscv-sbi.ld: -------------------------------------------------------------------------------- 1 | 2 | SECTIONS { 3 | . = 0; 4 | PROVIDE(__executable_start = .); 5 | PROVIDE(__spark_start = .); 6 | PROVIDE(__image_base = .); 7 | 8 | PROVIDE(__text = .); 9 | .text : { 10 | *(.text._start) 11 | *(.text .text.*) 12 | } 13 | PROVIDE(__etext = .); 14 | 15 | .rodata : { *(.rodata .rodata.*) } 16 | .eh_frame_hdr : { KEEP(*(.eh_frame_hdr)) } 17 | PROVIDE(__eh_frame = .); 18 | .eh_frame : { KEEP(*(.eh_frame)) } 19 | .dynsym : { *(.dynsym) } 20 | .dynstr : { *(.dynstr) } 21 | .rela : { *(.rela*) } 22 | .dynamic : { *(.dynamic) } 23 | .got : { *(.got) } 24 | .data.rel.ro : { *(.data.rel.ro*) } 25 | .data : { 26 | *(.data .data.*) 27 | *(.sdata .sdata.*) 28 | } 29 | PROVIDE(__global_pointer$ = .); 30 | PROVIDE(__bss = .); 31 | .bss : { 32 | *(.sbss .sbss.*) 33 | *(.dynbss) 34 | *(.bss .bss.*) 35 | *(COMMON) 36 | . += 0x100000; 37 | . = ALIGN(128); 38 | PROVIDE(__boot_stackp = .); 39 | } 40 | PROVIDE(__ebss = .); 41 | PROVIDE(__spark_end = .); 42 | PROVIDE(__image_size = . - __image_base); 43 | } 44 | -------------------------------------------------------------------------------- /spark/conf/riscv-uefi.ld: -------------------------------------------------------------------------------- 1 | 2 | SECTIONS { 3 | . = 0; 4 | PROVIDE(__image_base = .); 5 | PROVIDE(__spark_start = .); 6 | 7 | .text : { 8 | KEEP(*(.pe_headers)) 9 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 10 | 11 | PROVIDE(__text = .); 12 | *(.text._start) 13 | *(.text .text.*) 14 | } 15 | .plt : { *(.plt .plt.*) } 16 | PROVIDE(__etext = .); 17 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 18 | PROVIDE(__text_size = .); 19 | 20 | PROVIDE(__rodata = .); 21 | .hash : { *(.hash) } 22 | .gnu.hash : { *(.gnu.hash) } 23 | .dynsym : { *(.dynsym) } 24 | .dynstr : { *(.dynstr) } 25 | .rela : { *(.rela*) } 26 | .rodata : { *(.rodata .rodata.*) } 27 | .note.gnu.build-id : { KEEP(*(.note.gnu.build-id)) } 28 | .eh_frame_hdr : { KEEP(*(.eh_frame_hdr)) } 29 | PROVIDE(__eh_frame = .); 30 | .eh_frame : { KEEP(*(.eh_frame)) } 31 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 32 | PROVIDE(__rodata_size = . - __rodata); 33 | 34 | PROVIDE(__sbat = .); 35 | .data.sbat : { KEEP(*(.sbat)) } 36 | PROVIDE(__sbat_sizev = 1); 37 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 38 | PROVIDE(__sbat_size = . - __sbat); 39 | 40 | PROVIDE(__reloc = .); 41 | .data.reloc : { KEEP(*(.data.reloc)) } 42 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 43 | PROVIDE(__reloc_size = . - __reloc); 44 | 45 | PROVIDE(__data = .); 46 | .tdata : { *(.tdata .tdata.*) } 47 | .tbss : { *(.tbss .tbss.*) } 48 | .data.rel.ro : { *(.data.rel.ro .data.rel.ro.*) } 49 | .dynamic : { *(.dynamic) } 50 | . = DATA_SEGMENT_RELRO_END(0, .); 51 | .got : { *(.got .got.*) } 52 | .got.plt : { *(.got.plt .got.plt.*) } 53 | .data : { *(.data .data.*) *(.sdata .sdata.*) } 54 | PROVIDE(__data_size_init = . - __data); 55 | PROVIDE(__global_pointer$ = .); 56 | PROVIDE(__bss = .); 57 | .bss : 58 | { 59 | *(.sbss .sbss.*) 60 | *(.bss .bss.*) 61 | *(.dynbss) 62 | . += 0x100000; 63 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 64 | PROVIDE(__boot_stackp = .); 65 | } 66 | PROVIDE(__ebss = .); 67 | PROVIDE(__data_size = . - __data); 68 | PROVIDE(__image_size = . - __image_base); 69 | PROVIDE(__spark_end = .); 70 | } 71 | -------------------------------------------------------------------------------- /spark/conf/riscv64-sbi.ld: -------------------------------------------------------------------------------- 1 | 2 | SECTIONS { 3 | . = 0; 4 | PROVIDE(__executable_start = .); 5 | PROVIDE(__image_base = .); 6 | 7 | PROVIDE(__text = .); 8 | .text : { 9 | *(.text._start) 10 | *(.text .text.*) 11 | } 12 | PROVIDE(__etext = .); 13 | 14 | .rodata : { *(.rodata .rodata.*) } 15 | .eh_frame_hdr : { KEEP(*(.eh_frame_hdr)) } 16 | .eh_frame : { PROVIDE(__eh_frame = .); KEEP(*(.eh_frame)) } 17 | .dynsym : { *(.dynsym) } 18 | .dynstr : { *(.dynstr) } 19 | .rela : { *(.rela*) } 20 | .dynamic : { *(.dynamic) } 21 | .got : { *(.got) } 22 | .data.rel.ro : { *(.data.rel.ro*) } 23 | .data : { 24 | *(.data .data.*) 25 | *(.sdata .sdata.*) 26 | } 27 | PROVIDE(__global_pointer$ = .); 28 | PROVIDE(__bss = .); 29 | .bss : { 30 | *(.sbss .sbss.*) 31 | *(.dynbss) 32 | *(.bss .bss.*) 33 | *(COMMON) 34 | . += 0x100000; 35 | . = ALIGN(128); 36 | PROVIDE(__boot_stackp = .); 37 | } 38 | PROVIDE(__ebss = .); 39 | PROVIDE(__image_size = . - __image_base); 40 | } 41 | -------------------------------------------------------------------------------- /spark/conf/riscv64-uefi.ld: -------------------------------------------------------------------------------- 1 | 2 | SECTIONS { 3 | . = 0; 4 | PROVIDE(__image_base = .); 5 | PROVIDE(__spark_start = .); 6 | 7 | .text : { 8 | KEEP(*(.pe_headers)) 9 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 10 | 11 | PROVIDE(__text = .); 12 | *(.text._start) 13 | *(.text .text.*) 14 | } 15 | .plt : { *(.plt .plt.*) } 16 | PROVIDE(__etext = .); 17 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 18 | PROVIDE(__text_size = .); 19 | 20 | PROVIDE(__rodata = .); 21 | .hash : { *(.hash) } 22 | .gnu.hash : { *(.gnu.hash) } 23 | .dynsym : { *(.dynsym) } 24 | .dynstr : { *(.dynstr) } 25 | .rela : { *(.rela*) } 26 | .rodata : { *(.rodata .rodata.*) } 27 | .note.gnu.build-id : { KEEP(*(.note.gnu.build-id)) } 28 | .eh_frame_hdr : { KEEP(*(.eh_frame_hdr)) } 29 | PROVIDE(__eh_frame = .); 30 | .eh_frame : { KEEP(*(.eh_frame)) } 31 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 32 | PROVIDE(__rodata_size = . - __rodata); 33 | 34 | PROVIDE(__sbat = .); 35 | .data.sbat : { KEEP(*(.sbat)) } 36 | PROVIDE(__sbat_sizev = 1); 37 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 38 | PROVIDE(__sbat_size = . - __sbat); 39 | 40 | PROVIDE(__reloc = .); 41 | .data.reloc : { KEEP(*(.data.reloc)) } 42 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 43 | PROVIDE(__reloc_size = . - __reloc); 44 | 45 | PROVIDE(__data = .); 46 | .tdata : { *(.tdata .tdata.*) } 47 | .tbss : { *(.tbss .tbss.*) } 48 | .data.rel.ro : { *(.data.rel.ro .data.rel.ro.*) } 49 | .dynamic : { *(.dynamic) } 50 | . = DATA_SEGMENT_RELRO_END(0, .); 51 | .got : { *(.got .got.*) } 52 | .got.plt : { *(.got.plt .got.plt.*) } 53 | .data : { *(.data .data.*) *(.sdata .sdata.*) } 54 | PROVIDE(__data_size_init = . - __data); 55 | PROVIDE(__global_pointer$ = .); 56 | PROVIDE(__bss = .); 57 | .bss : 58 | { 59 | *(.sbss .sbss.*) 60 | *(.bss .bss.*) 61 | *(.dynbss) 62 | . += 0x100000; 63 | . = ALIGN(CONSTANT(MAXPAGESIZE)); 64 | PROVIDE(__boot_stackp = .); 65 | } 66 | PROVIDE(__ebss = .); 67 | PROVIDE(__data_size = . - __data); 68 | PROVIDE(__image_size = . - __image_base); 69 | PROVIDE(__spark_end = .); 70 | } 71 | -------------------------------------------------------------------------------- /spark/conf/riscv64gc-unknown-none.json: -------------------------------------------------------------------------------- 1 | { 2 | "arch": "riscv64", 3 | "code-model": "medium", 4 | "cpu": "generic-rv64", 5 | "data-layout": "e-m:e-p:64:64-i64:64-i128:128-n64-S128", 6 | "features": "+m,+a,+f,+d,+c", 7 | "linker": "rust-lld", 8 | "linker-flavor": "ld.lld", 9 | "llvm-abiname": "lp64d", 10 | "llvm-target": "riscv64", 11 | "max-atomic-width": "64", 12 | "panic-strategy": "abort", 13 | "position-independent-executables": true, 14 | "relro-level": "full", 15 | "stack-probes": { 16 | "kind": "call" 17 | }, 18 | "relocation-model": "pic", 19 | "static-position-independent-executables": true, 20 | "target-pointer-width": "64" 21 | } -------------------------------------------------------------------------------- /spark/conf/riscv64gc-unknown-uefi.json: -------------------------------------------------------------------------------- 1 | { 2 | "arch": "riscv64", 3 | "code-model": "medium", 4 | "cpu": "generic-rv64", 5 | "data-layout": "e-m:e-p:64:64-i64:64-i128:128-n64-S128", 6 | "features": "+m,+a,+f,+d,+c", 7 | "linker": "rust-lld", 8 | "linker-flavor": "ld.lld", 9 | "llvm-abiname": "lp64d", 10 | "llvm-target": "riscv64", 11 | "max-atomic-width": "64", 12 | "os": "uefi", 13 | "panic-strategy": "abort", 14 | "position-independent-executables": true, 15 | "relro-level": "full", 16 | "stack-probes": { 17 | "kind": "call" 18 | }, 19 | "relocation-model": "pic", 20 | "static-position-independent-executables": true, 21 | "target-pointer-width": "64" 22 | } -------------------------------------------------------------------------------- /spark/src/config.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use core::{fmt, mem, str::Chars}; 5 | 6 | /// A location within a source 7 | #[derive(Clone, Copy, Debug)] 8 | struct SourceLocation(u32, u32); 9 | 10 | impl fmt::Display for SourceLocation { 11 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 12 | write!(f, "{}:{}", self.0 + 1, self.1 + 1) 13 | } 14 | } 15 | 16 | #[derive(Clone, Copy, Debug)] 17 | struct Token<'a> { 18 | location: (u32, u32), 19 | kind: TokenKind<'a>, 20 | } 21 | 22 | impl Token<'_> { 23 | /// Returns the location of the token in the source input 24 | /// 25 | /// The returned location is 0-indexed. 26 | const fn location(&self) -> SourceLocation { 27 | SourceLocation(self.location.0, self.location.1) 28 | } 29 | } 30 | 31 | #[derive(Clone, Copy, Debug, PartialEq)] 32 | enum TokenKind<'a> { 33 | Ident(&'a str), 34 | Number(&'a str), 35 | String(&'a str), 36 | Bool(bool), 37 | Semi, 38 | Colon, 39 | OpenBrace, 40 | CloseBrace, 41 | Eq, 42 | Comment, 43 | } 44 | 45 | impl fmt::Display for TokenKind<'_> { 46 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 47 | let s = match self { 48 | TokenKind::Ident(_) => "ident", 49 | TokenKind::Number(_) => "number", 50 | TokenKind::String(_) => "string", 51 | TokenKind::Bool(_) => "bool", 52 | TokenKind::Semi => ";", 53 | TokenKind::Colon => ":", 54 | TokenKind::OpenBrace => "{", 55 | TokenKind::CloseBrace => "}", 56 | TokenKind::Eq => "=", 57 | TokenKind::Comment => "comment", 58 | }; 59 | write!(f, "{s}") 60 | } 61 | } 62 | 63 | struct Lexer<'src> { 64 | position: (u32, u32), 65 | input: Chars<'src>, 66 | } 67 | 68 | impl<'src> Lexer<'src> { 69 | fn new(input: &'src str) -> Lexer<'src> { 70 | Self { 71 | position: (0, 0), 72 | input: input.chars(), 73 | } 74 | } 75 | 76 | fn bump(&mut self) -> Option { 77 | let c = self.input.next(); 78 | if c == Some('\n') { 79 | self.position.0 += 1; 80 | self.position.1 = 0; 81 | } else { 82 | self.position.1 += 1; 83 | } 84 | c 85 | } 86 | 87 | fn first(&self) -> char { 88 | self.input.clone().next().unwrap_or('\0') 89 | } 90 | 91 | fn second(&self) -> char { 92 | self.input.clone().nth(1).unwrap_or('\0') 93 | } 94 | 95 | fn next_token(&mut self) -> Option> { 96 | while self.first().is_whitespace() { 97 | self.bump()?; 98 | } 99 | 100 | let location = self.position; 101 | 102 | macro_rules! matchtok { 103 | ($kind:ident) => {{ 104 | self.bump()?; 105 | TokenKind::$kind 106 | }}; 107 | } 108 | 109 | let kind = match self.first() { 110 | ';' => matchtok!(Semi), 111 | ':' => matchtok!(Colon), 112 | '{' => matchtok!(OpenBrace), 113 | '}' => matchtok!(CloseBrace), 114 | '=' => matchtok!(Eq), 115 | '"' => { 116 | self.bump()?; 117 | let start = self.input.as_str(); 118 | let mut len = 0; 119 | while self.first() != '"' { 120 | len += 1; 121 | self.bump()?; 122 | } 123 | let inner = &start[..len]; 124 | self.bump()?; 125 | TokenKind::String(inner) 126 | } 127 | '_' | '-' | 'a'..='z' | 'A'..='Z' => { 128 | let start = self.input.as_str(); 129 | let mut len = 0; 130 | while matches!(self.first(), '_' | '-' | 'a'..='z' | 'A'..='Z' | '0'..='9') { 131 | len += 1; 132 | self.bump()?; 133 | } 134 | match &start[..len] { 135 | "true" => TokenKind::Bool(true), 136 | "false" => TokenKind::Bool(false), 137 | ident => TokenKind::Ident(ident), 138 | } 139 | } 140 | '/' if self.second() == '/' => { 141 | while self.bump()? != '\n' {} 142 | TokenKind::Comment 143 | } 144 | '\0' => return None, 145 | unk => todo!("{unk:?}"), 146 | }; 147 | 148 | Some(Token { location, kind }) 149 | } 150 | } 151 | 152 | #[derive(Debug)] 153 | struct Parser<'src> { 154 | tokens: Vec>, 155 | index: usize, 156 | } 157 | 158 | impl<'src> Parser<'src> { 159 | fn parse_token(&mut self, expected: TokenKind) -> Option> { 160 | let token = self.tokens.get(self.index)?; 161 | let found = token.kind; 162 | 163 | if mem::discriminant(&found) == mem::discriminant(&expected) { 164 | self.index += 1; 165 | Some(*token) 166 | } else { 167 | log::error!( 168 | "{}: expected `{expected}`, found `{found}`", 169 | token.location() 170 | ); 171 | None 172 | } 173 | } 174 | 175 | fn parse_entry(&mut self, key: &'src str, name: Option<&'src str>) -> Option> { 176 | let mut params = vec![]; 177 | let mut entries = vec![]; 178 | 179 | loop { 180 | if matches!( 181 | self.tokens.get(self.index), 182 | None | Some(Token { 183 | kind: TokenKind::CloseBrace, 184 | .. 185 | }) 186 | ) { 187 | break; 188 | } 189 | 190 | let token = self.tokens.get(self.index)?; 191 | let key = match token.kind { 192 | TokenKind::Ident(ident) => { 193 | self.index += 1; 194 | ident 195 | } 196 | kind => { 197 | log::error!("{}: expected `ident`, found `{kind}`", token.location()); 198 | return None; 199 | } 200 | }; 201 | 202 | let name = if let TokenKind::String(name) = self.tokens.get(self.index)?.kind { 203 | self.index += 1; 204 | Some(name) 205 | } else { 206 | None 207 | }; 208 | 209 | let token = self.tokens.get(self.index)?; 210 | match token.kind { 211 | TokenKind::Eq => { 212 | assert!(name.is_none()); 213 | self.index += 1; 214 | 215 | let token = self.tokens.get(self.index)?; 216 | let value = match token.kind { 217 | TokenKind::Number(num) => Value::Number(num), 218 | TokenKind::Bool(bool) => Value::Bool(bool), 219 | TokenKind::String(str) => Value::String(str), 220 | kind => { 221 | log::error!("{}: expected `value`, found `{kind}`", token.location()); 222 | return None; 223 | } 224 | }; 225 | self.index += 1; 226 | 227 | self.parse_token(TokenKind::Semi)?; 228 | params.push(Param { key, value }); 229 | } 230 | TokenKind::OpenBrace => { 231 | self.index += 1; 232 | let entry = self.parse_entry(key, name)?; 233 | self.parse_token(TokenKind::CloseBrace)?; 234 | entries.push(entry); 235 | } 236 | _ => { 237 | log::debug!("BREAK"); 238 | return None; 239 | } 240 | } 241 | } 242 | 243 | Some(Entry { 244 | key, 245 | name, 246 | params, 247 | entries, 248 | }) 249 | } 250 | } 251 | 252 | pub fn parse_config_file(input: &[u8]) -> Entry { 253 | let mut lexer = Lexer::new(core::str::from_utf8(input).unwrap()); 254 | let mut tokens = vec![]; 255 | while let Some(token) = lexer.next_token() { 256 | if token.kind != TokenKind::Comment { 257 | tokens.push(token); 258 | } 259 | } 260 | let mut parser = Parser { tokens, index: 0 }; 261 | let entry = parser.parse_entry("", None).unwrap(); 262 | entry 263 | } 264 | 265 | #[derive(Debug)] 266 | pub struct Entry<'src> { 267 | pub key: &'src str, 268 | pub name: Option<&'src str>, 269 | pub params: Vec>, 270 | pub entries: Vec>, 271 | } 272 | 273 | impl<'src> Entry<'src> { 274 | pub fn param(&self, key: &str) -> Option<&Value<'src>> { 275 | self.params.iter().find_map(|param| { 276 | if param.key == key { 277 | Some(¶m.value) 278 | } else { 279 | None 280 | } 281 | }) 282 | } 283 | } 284 | 285 | #[derive(Debug)] 286 | pub struct Param<'src> { 287 | pub key: &'src str, 288 | pub value: Value<'src>, 289 | } 290 | 291 | impl<'src> Param<'src> {} 292 | 293 | /// A parameter value 294 | #[derive(Debug)] 295 | pub enum Value<'src> { 296 | /// A boolean 297 | Bool(bool), 298 | 299 | /// An integer in some base 300 | /// 301 | /// The contained string is guaranteed to a valid representation of an integer in 302 | /// the specified base. It is stored as a string here so as not to impose any restrictions 303 | /// on the value, the consumer can use `.parse()` to get the desired integer type. 304 | Number(&'src str), 305 | 306 | /// A string 307 | String(&'src str), 308 | } 309 | -------------------------------------------------------------------------------- /spark/src/console/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | mod sbi; 5 | 6 | use crate::{dev::uart, sys::fdt, util::maybe_static_arc::MaybeStaticArc}; 7 | use alloc::sync::Arc; 8 | use core::{fmt, time::Duration}; 9 | use spin::Mutex; 10 | 11 | #[derive(Debug)] 12 | pub enum Error { 13 | TimedOut, 14 | NoDevice, 15 | Uart(uart::Error), 16 | } 17 | 18 | pub type Result = core::result::Result; 19 | 20 | pub trait ConsoleBackend: Send + Sync { 21 | fn transmit(&self, byte: u8) -> Result<()>; 22 | fn receive(&self, duration: Option) -> Result; 23 | } 24 | 25 | #[repr(C)] 26 | pub struct Driver { 27 | pub name: &'static str, 28 | pub compatible: &'static [&'static str], 29 | pub init: fn(&fdt::Node) -> anyhow::Result>, 30 | } 31 | 32 | linkset::declare!(console_drivers: Driver); 33 | 34 | pub macro console_driver($driver:expr) { 35 | linkset::entry!(console_drivers, Driver, $driver); 36 | } 37 | 38 | pub struct Console { 39 | inner: Mutex, 40 | } 41 | 42 | struct ConsoleInner { 43 | driver: MaybeStaticArc, 44 | } 45 | 46 | impl ConsoleInner { 47 | fn write_byte(&mut self, byte: u8) -> Result<()> { 48 | self.driver.transmit(byte) 49 | } 50 | } 51 | 52 | impl fmt::Write for ConsoleInner { 53 | fn write_str(&mut self, s: &str) -> fmt::Result { 54 | for byte in s.bytes() { 55 | self.write_byte(byte).map_err(|_| fmt::Error)?; 56 | } 57 | Ok(()) 58 | } 59 | } 60 | 61 | static CONSOLE: Mutex> = 62 | Mutex::new(MaybeStaticArc::Static(&sbi::SBI_CONSOLE)); 63 | 64 | pub fn console() -> MaybeStaticArc { 65 | CONSOLE.lock().clone() 66 | } 67 | 68 | pub fn print(args: fmt::Arguments) { 69 | let _ = fmt::write(&mut *console().inner.lock(), args); 70 | } 71 | 72 | pub macro print($fmt:literal $(, $arg:expr)* $(,)?) { 73 | print(format_args!($fmt, $($arg),*)) 74 | } 75 | 76 | pub macro println { 77 | () => { print!("\n") }, 78 | ($fmt:literal $(, $arg:expr)* $(,)?) => { 79 | print!("{}\n", format_args!($fmt, $($arg),*)) 80 | }, 81 | } 82 | 83 | #[cfg(sbi)] 84 | pub fn init() { 85 | let fdt = fdt::get_fdt(); 86 | 87 | let driver = 'probe: { 88 | if let Some(node) = fdt 89 | .find_node("/chosen") 90 | .and_then(|node| node.property_as::<&str>("stdout-path")) 91 | .map(|path| &path[..path.find(':').unwrap_or(path.len())]) 92 | .and_then(|path| fdt.find_node(path)) 93 | { 94 | for driver in console_drivers.as_slice() { 95 | if !node.is_compatible_any(driver.compatible) { 96 | continue; 97 | } 98 | let backend = match (driver.init)(&node) { 99 | Ok(backend) => backend, 100 | Err(error) => { 101 | println!("probe failed: {error}"); 102 | continue; 103 | } 104 | }; 105 | break 'probe backend; 106 | } 107 | } 108 | 109 | let Some(nodes) = fdt.find_node("/soc").map(|node| node.children()) else { 110 | // No console found. 111 | println!("no /soc"); 112 | return; 113 | }; 114 | 115 | for node in nodes { 116 | if !node.is_enabled() { 117 | continue; 118 | } 119 | 120 | for driver in console_drivers.as_slice() { 121 | if !node.is_compatible_any(driver.compatible) { 122 | continue; 123 | } 124 | let backend = match (driver.init)(&node) { 125 | Ok(backend) => backend, 126 | Err(error) => { 127 | println!("probe failed: {error}"); 128 | continue; 129 | } 130 | }; 131 | break 'probe backend; 132 | } 133 | } 134 | 135 | // No console found. 136 | return; 137 | }; 138 | 139 | *CONSOLE.lock() = MaybeStaticArc::Arc(Arc::new(Console { 140 | inner: Mutex::new(ConsoleInner { 141 | driver: MaybeStaticArc::Arc(driver), 142 | }), 143 | })); 144 | } 145 | -------------------------------------------------------------------------------- /spark/src/console/sbi.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use super::{Console, ConsoleBackend, ConsoleInner, Error}; 5 | use crate::util::maybe_static_arc::MaybeStaticArc; 6 | use spin::Mutex; 7 | 8 | #[derive(Clone, Copy)] 9 | pub enum SbiConsole { 10 | Legacy, 11 | } 12 | 13 | impl ConsoleBackend for SbiConsole { 14 | fn receive(&self, _duration: Option) -> super::Result { 15 | Err(Error::NoDevice) 16 | } 17 | 18 | fn transmit(&self, byte: u8) -> super::Result<()> { 19 | match *self { 20 | Self::Legacy => { 21 | sbi::legacy::console_putchar(byte); 22 | Ok(()) 23 | } 24 | } 25 | } 26 | } 27 | 28 | pub static SBI_CONSOLE: Console = Console { 29 | inner: Mutex::new(ConsoleInner { 30 | driver: MaybeStaticArc::Static(&SbiConsole::Legacy), 31 | }), 32 | }; 33 | -------------------------------------------------------------------------------- /spark/src/dev/acpi.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![cfg(feature = "acpi")] 5 | 6 | static mut RSDP: Option<*mut u8> = None; 7 | static mut ROOT: Option<::acpi::RootTable> = None; 8 | 9 | #[derive(Clone, Copy, Debug)] 10 | struct Bridge; 11 | 12 | impl ::acpi::Bridge for Bridge { 13 | fn map(&self, phys: usize, _size: usize) -> usize { 14 | phys 15 | } 16 | 17 | fn remap(&self, virt: usize, _new_size: usize) -> usize { 18 | virt 19 | } 20 | 21 | fn unmap(&self, _virt: usize) {} 22 | } 23 | 24 | pub fn init(rsdp: *mut u8) { 25 | unsafe { 26 | RSDP = Some(rsdp); 27 | ROOT = Some(::acpi::RootTable::new(rsdp, Bridge)); 28 | } 29 | } 30 | 31 | pub fn get_rsdp() -> Option<*mut u8> { 32 | unsafe { RSDP } 33 | } 34 | 35 | pub fn get_table() -> Option<*const T> { 36 | let root = unsafe { ROOT.as_ref()? }; 37 | root.get_table::() 38 | } 39 | -------------------------------------------------------------------------------- /spark/src/dev/block/ahci/hba/mod.rs: -------------------------------------------------------------------------------- 1 | use core::cell::SyncUnsafeCell; 2 | use libsa::{ 3 | endian::{LittleEndianU16, LittleEndianU32, LittleEndianU64}, 4 | volatile::{Volatile, VolatileSplitPtr}, 5 | }; 6 | 7 | #[repr(C)] 8 | pub struct Memory { 9 | host_capability: Volatile, 10 | global_host_control: Volatile, 11 | interrupt_status: Volatile, 12 | ports_implemented: Volatile, 13 | version: Volatile, 14 | ccc_control: Volatile, 15 | ccc_ports: Volatile, 16 | enclosure_management_location: Volatile, 17 | enclosure_management_control: Volatile, 18 | host_capabilities_extended: Volatile, 19 | bios_handoff_control_status: Volatile, 20 | _reserved0: [u8; 0x74], 21 | _vendor0: [u8; 0x60], 22 | ports: [Port; 32], 23 | } 24 | 25 | impl Memory { 26 | pub fn iter_ports(&self) -> PortIterator { 27 | PortIterator { 28 | ports_implemented: self.ports_implemented.read(), 29 | ports: &self.ports, 30 | next_index: 0, 31 | } 32 | } 33 | } 34 | 35 | pub struct PortIterator<'a> { 36 | ports_implemented: u32, 37 | ports: &'a [Port], 38 | next_index: usize, 39 | } 40 | 41 | impl<'a> Iterator for PortIterator<'a> { 42 | type Item = &'a Port; 43 | 44 | fn next(&mut self) -> Option { 45 | while self.next_index < 32 && (self.ports_implemented & (1 << self.next_index)) == 0 { 46 | self.next_index += 1; 47 | } 48 | 49 | if self.next_index < 32 { 50 | let cur_index = self.next_index; 51 | self.next_index += 1; 52 | self.ports.get(cur_index) 53 | } else { 54 | None 55 | } 56 | } 57 | } 58 | 59 | #[repr(C, packed)] 60 | pub struct PRDT { 61 | data_addr: LittleEndianU64, 62 | _rsvd0: u32, 63 | bits: LittleEndianU32, 64 | } 65 | 66 | impl PRDT { 67 | const EMPTY: Self = Self { 68 | data_addr: LittleEndianU64::new(0), 69 | _rsvd0: 0, 70 | bits: LittleEndianU32::new(0), 71 | }; 72 | } 73 | 74 | #[repr(C, packed)] 75 | pub struct HostToDevice { 76 | ty: u8, 77 | bits1: u8, 78 | command: u8, 79 | feature_low: u8, 80 | lba0: u8, 81 | lba1: u8, 82 | lba2: u8, 83 | device: u8, 84 | lba3: u8, 85 | lba4: u8, 86 | lba5: u8, 87 | feature_high: u8, 88 | sector_count_low: u8, 89 | sector_count_high: u8, 90 | iso_cmd_compl: u8, 91 | control: u8, 92 | _rsvd0: [u8; 4], 93 | } 94 | 95 | #[repr(C, align(1024))] 96 | pub struct Command { 97 | bits: LittleEndianU16, 98 | prd_table_len: LittleEndianU16, 99 | prd_byte_count: LittleEndianU32, 100 | cmd_tbl_address: LittleEndianU64, 101 | } 102 | 103 | #[repr(C, align(128))] 104 | struct CommandTable { 105 | fis: HostToDevice, 106 | rsvd0: [u8; 128 - core::mem::size_of::()], 107 | prdt: [PRDT; 10], 108 | } 109 | 110 | static COMMAND: SyncUnsafeCell = SyncUnsafeCell::new(Command { 111 | bits: LittleEndianU16::new(0), 112 | prd_table_len: LittleEndianU16::new(0), 113 | prd_byte_count: LittleEndianU32::new(0), 114 | cmd_tbl_address: LittleEndianU64::new(0), 115 | }); 116 | 117 | const COMMAND_TABLE_PRDT_COUNT: u16 = 10; 118 | static COMMAND_TBL: SyncUnsafeCell = SyncUnsafeCell::new(CommandTable { 119 | fis: HostToDevice { 120 | ty: 0x27, // host to device 121 | bits1: 1 << 7, // `command` bit 122 | command: 0x25, // read DMA 123 | feature_low: 0, 124 | lba0: 0, 125 | lba1: 0, 126 | lba2: 0, 127 | device: 1 << 6, // LBA mode 128 | lba3: 0, 129 | lba4: 0, 130 | lba5: 0, 131 | feature_high: 0, 132 | sector_count_low: 0, 133 | sector_count_high: 0, 134 | iso_cmd_compl: 0, 135 | control: 0, 136 | _rsvd0: [0u8; 4], 137 | }, 138 | rsvd0: [0u8; 128 - core::mem::size_of::()], 139 | prdt: [PRDT::EMPTY; COMMAND_TABLE_PRDT_COUNT as usize], 140 | }); 141 | 142 | /// For the HBA to write received FISes. DO NOT read or write to this. 143 | #[repr(C, align(256))] 144 | struct FisReceived([u8; 256]); 145 | static FIS_RECEIVED: SyncUnsafeCell = SyncUnsafeCell::new(FisReceived([0u8; 256])); 146 | 147 | #[repr(C)] 148 | #[allow(dead_code)] 149 | pub struct Port { 150 | cmd_list_ptr: VolatileSplitPtr, 151 | fis_list_ptr: VolatileSplitPtr, 152 | int_status: Volatile, 153 | int_enable: Volatile, 154 | command_status: Volatile, 155 | _rsvd0: [u8; 4], 156 | task_file_data: Volatile, 157 | pub signature: Volatile, 158 | pub sata_status: Volatile, 159 | sata_control: Volatile, 160 | sata_error: Volatile, 161 | sata_active: Volatile, 162 | command_issue: Volatile, 163 | sata_notify: Volatile, 164 | fis_switch_control: Volatile, 165 | _rsvd1: [u8; 11], 166 | _vendor0: [u8; 4], 167 | } 168 | 169 | impl Port { 170 | const SECTOR_SIZE: usize = 512; 171 | 172 | pub const SATA_STATUS_READY: u32 = (1 << 8) | (3 << 0); 173 | pub const ATA_PORT_CLASS: u32 = 0x00000101; 174 | pub const ATA_DEV_BUSY: u8 = 0x80; 175 | pub const ATA_DEV_DRQ: u8 = 0x08; 176 | 177 | pub fn configure(&self) { 178 | const FRE: u32 = 1 << 4; 179 | const ST: u32 = 1 << 0; 180 | const FR: u32 = 1 << 14; 181 | const CR: u32 = 1 << 15; 182 | 183 | // Stop command processing. 184 | self.command_status.write(LittleEndianU32::new( 185 | self.command_status.read().get() & !(FRE | ST), 186 | )); 187 | while (self.command_status.read().get() & (FR | CR)) > 0 { 188 | core::hint::spin_loop(); 189 | } 190 | 191 | self.cmd_list_ptr.set(COMMAND.get()); 192 | self.fis_list_ptr.set(FIS_RECEIVED.get()); 193 | 194 | // Restart command processing. 195 | while (self.command_status.read().get() & CR) > 0 { 196 | core::hint::spin_loop(); 197 | } 198 | self.command_status.write(LittleEndianU32::new( 199 | self.command_status.read().get() | ST | FRE, 200 | )); 201 | } 202 | 203 | // SAFETY: This function assumes the port it belongs to is the only one being actively utilized. 204 | pub fn read(&self, sector_base: usize, buffer: &mut [u8]) { 205 | assert_eq!( 206 | (self.sata_status.read().get() & Self::SATA_STATUS_READY), 207 | Self::SATA_STATUS_READY, 208 | "AHCI device must be in a proper ready state" 209 | ); 210 | assert!( 211 | self.signature.read().get() == Self::ATA_PORT_CLASS, 212 | "AHCI device is not a supported class" 213 | ); 214 | 215 | // Wait for pending port tasks to complete. 216 | while (self.task_file_data.read().get() & ((Self::ATA_DEV_BUSY | Self::ATA_DEV_DRQ) as u32)) 217 | > 0 218 | { 219 | core::hint::spin_loop(); 220 | } 221 | 222 | // align to SECTOR_SIZE 223 | let sector_count = ((buffer.len() / Self::SECTOR_SIZE) * Self::SECTOR_SIZE) as u16; 224 | 225 | // Clear interrupts 226 | self.int_status.write(LittleEndianU32::new(0)); 227 | 228 | // Get first command 229 | let command = unsafe { &mut *self.cmd_list_ptr.get() }; 230 | command.bits = LittleEndianU16::new( 231 | (core::mem::size_of::() / core::mem::size_of::()) as u16, 232 | ); 233 | command.cmd_tbl_address = LittleEndianU64::new(COMMAND_TBL.get() as usize as u64); 234 | command.prd_table_len = LittleEndianU16::new(COMMAND_TABLE_PRDT_COUNT); 235 | command.prd_byte_count = LittleEndianU32::new(0); 236 | 237 | let cmd_tbl = unsafe { &mut *COMMAND_TBL.get() }; 238 | let fis = &mut cmd_tbl.fis; 239 | fis.lba0 = (sector_base >> 0) as u8; 240 | fis.lba1 = (sector_base >> 8) as u8; 241 | fis.lba2 = (sector_base >> 16) as u8; 242 | fis.lba3 = (sector_base >> 24) as u8; 243 | fis.lba4 = (sector_base >> 32) as u8; 244 | fis.lba5 = (sector_base >> 40) as u8; 245 | fis.sector_count_low = sector_count as u8; 246 | fis.sector_count_high = (sector_count >> 8) as u8; 247 | 248 | // TODO don't just assume 512b sector size, read from Identify packet 249 | let buffer_base = buffer.as_ptr() as usize; 250 | let prdts = &mut cmd_tbl.prdt; 251 | let sectors_per_prdt = ((2_usize.pow(21) * 2) / Self::SECTOR_SIZE) as u16; 252 | 253 | let mut buffer_offset = buffer_base; 254 | let mut remaining_sectors = sector_count; 255 | for prdt in prdts { 256 | let prdt_sector_count = core::cmp::min(sectors_per_prdt, remaining_sectors); 257 | let prdt_byte_count = (prdt_sector_count as usize) * Self::SECTOR_SIZE; 258 | 259 | prdt.data_addr = LittleEndianU64::new(buffer_offset as u64); 260 | prdt.bits = LittleEndianU32::new(prdt_byte_count as u32); 261 | 262 | remaining_sectors -= prdt_sector_count; 263 | buffer_offset += prdt_byte_count; 264 | 265 | if remaining_sectors == 0 { 266 | break; 267 | } 268 | } 269 | 270 | self.command_issue.write(LittleEndianU32::new(1)); 271 | 272 | while (self.command_issue.read().get() & 1) > 0 { 273 | core::hint::spin_loop(); 274 | 275 | // TODO check for errors 276 | } 277 | } 278 | } 279 | -------------------------------------------------------------------------------- /spark/src/dev/block/ahci/mod.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "dev-ahci")] 2 | 3 | use crate::{ 4 | dev::{pcie::Device, DeviceDriver}, 5 | io, 6 | }; 7 | use anyhow::anyhow; 8 | 9 | pub mod hba; 10 | 11 | #[used] 12 | #[link_section = "device_drivers"] 13 | static AHCI_PCI_DRIVER: DeviceDriver = DeviceDriver { 14 | name: "ahci", 15 | probe_fdt: None, 16 | probe_pci: Some(Ahci::from_pci_device), 17 | }; 18 | 19 | #[allow(dead_code)] 20 | pub struct Ahci<'a> { 21 | device: &'a Device, 22 | sata_ports: Vec<&'a hba::Port>, 23 | } 24 | 25 | impl Ahci<'_> { 26 | fn from_pci_device(device: &Device) -> crate::Result<()> { 27 | if device.ident.class != 1 || device.ident.subclass != 6 { 28 | return Ok(()); 29 | } 30 | 31 | let pci_bar5 = device 32 | .bars() 33 | .nth(5) 34 | .ok_or_else(|| anyhow!("AHCI device does not have 5th BAR"))?; 35 | 36 | // # Safety: AHCI spec promises this is valid. 37 | let hba_mem = unsafe { (pci_bar5.read_addr() as *mut hba::Memory).as_mut() }.unwrap(); 38 | 39 | device.enable_bus_master(); 40 | device.enable_memory_write_and_invalidate(); 41 | 42 | hba_mem.iter_ports().for_each(|port| { 43 | if (port.sata_status.read().get() & hba::Port::SATA_STATUS_READY) > 0 44 | && port.signature.read().get() == hba::Port::ATA_PORT_CLASS 45 | { 46 | port.configure(); 47 | super::register(Box::new(AhciPort { port })).ok(); 48 | } 49 | }); 50 | 51 | Ok(()) 52 | } 53 | } 54 | 55 | struct AhciPort<'a> { 56 | port: &'a hba::Port, 57 | } 58 | 59 | // # Safety: We're single threaded. 60 | unsafe impl Send for AhciPort<'_> {} 61 | unsafe impl Sync for AhciPort<'_> {} 62 | 63 | impl core::fmt::Debug for AhciPort<'_> { 64 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 65 | write!(f, "AhciPort") 66 | } 67 | } 68 | 69 | impl super::BlockIo for AhciPort<'_> { 70 | fn block_size(&self) -> u64 { 71 | todo!(); 72 | } 73 | 74 | fn capacity(&self) -> u64 { 75 | todo!(); 76 | } 77 | 78 | fn read_blocks(&self, lba: u64, buffer: &mut [u8]) -> io::Result<()> { 79 | assert_eq!(lba & 0xFF, 0, "address must be sector-aligned"); 80 | self.port.read(lba as usize, buffer); 81 | Ok(()) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /spark/src/dev/block/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{io, size_of}; 5 | use alloc::sync::{Arc, Weak}; 6 | use core::{cmp, fmt::Debug}; 7 | use spin::{mutex::SpinMutex, RwLock}; 8 | use uuid::Uuid; 9 | 10 | pub mod ahci; 11 | pub mod nvme; 12 | 13 | /// Devices which provide a block-oriented interface 14 | pub trait BlockIo: Send + Sync + Debug { 15 | /// Returns the logical block size of the device, in bytes. 16 | fn block_size(&self) -> u64; 17 | 18 | fn capacity(&self) -> u64; 19 | 20 | /// Read blocks from the device. 21 | /// 22 | /// # Errors 23 | /// 24 | /// The length of `buf` must be a multiple of the device's [`block_size`](BlockIo::block_size). 25 | fn read_blocks(&self, lba: u64, buf: &mut [u8]) -> io::Result<()>; 26 | 27 | /// Returns the device's UUID, if any. 28 | fn uuid(&self) -> Option { 29 | None 30 | } 31 | 32 | fn read(&self, mut offset: u64, buf: &mut [u8]) -> io::Result { 33 | static LOCAL_BUF: SpinMutex> = SpinMutex::new(vec![]); 34 | 35 | let block_size = self.block_size(); 36 | 37 | let mut local_buf = LOCAL_BUF.lock(); 38 | let cur_len = local_buf.len(); 39 | local_buf.resize(cur_len.max(block_size as usize), 0); 40 | 41 | let mut buf_offset = 0; 42 | let mut count = buf.len() as u64; 43 | 44 | // Read leading 45 | let block_offset = offset & (block_size - 1); 46 | if block_offset > 0 { 47 | let read = cmp::min(count, block_size - block_offset); 48 | self.read_blocks(offset / block_size, &mut local_buf)?; 49 | buf[..read as usize] 50 | .copy_from_slice(&local_buf[block_offset as usize..][..read as usize]); 51 | offset += read; 52 | buf_offset += read; 53 | count -= read; 54 | if count == 0 { 55 | return Ok(buf.len() - count as usize); 56 | } 57 | } 58 | 59 | // Read full 60 | let blocks = count / block_size; 61 | if blocks > 0 { 62 | let bytes = blocks * block_size; 63 | self.read_blocks( 64 | offset / block_size, 65 | &mut buf[buf_offset as usize..][..bytes as usize], 66 | )?; 67 | offset += bytes; 68 | buf_offset += bytes; 69 | count -= bytes; 70 | } 71 | 72 | // Read trailing 73 | if count > 0 { 74 | assert!(count < block_size); 75 | self.read_blocks(offset / block_size, &mut local_buf)?; 76 | buf[buf_offset as usize..].copy_from_slice(&local_buf[..count as usize]); 77 | } 78 | 79 | Ok(buf.len()) 80 | } 81 | } 82 | 83 | #[derive(Debug)] 84 | pub struct Disk { 85 | device: Box, 86 | volumes: Vec>, 87 | disk_guid: Option, 88 | } 89 | 90 | impl Disk { 91 | pub fn volumes(&self) -> &[Arc] { 92 | &self.volumes 93 | } 94 | } 95 | 96 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 97 | pub enum PartitionType { 98 | Mbr(u8), 99 | Gpt(Uuid), 100 | } 101 | 102 | #[derive(Debug)] 103 | pub struct Volume { 104 | disk: Weak, 105 | block_size: u64, 106 | offset: u64, 107 | capacity: u64, 108 | uuid: Option, 109 | pub partition_type: PartitionType, 110 | pub partition_guid: Option, 111 | } 112 | 113 | unsafe impl Sync for Volume {} 114 | 115 | impl BlockIo for Volume { 116 | fn read_blocks(&self, lba: u64, buf: &mut [u8]) -> crate::io::Result<()> { 117 | if lba + ((buf.len() as u64 + self.block_size - 1) / self.block_size) >= self.capacity { 118 | return Err(io::Error::OutOfBounds); 119 | } 120 | self.disk 121 | .upgrade() 122 | .unwrap() 123 | .device 124 | .read_blocks(self.offset + lba, buf) 125 | } 126 | 127 | fn block_size(&self) -> u64 { 128 | self.block_size 129 | } 130 | 131 | fn capacity(&self) -> u64 { 132 | self.capacity 133 | } 134 | 135 | fn uuid(&self) -> Option { 136 | self.uuid 137 | } 138 | } 139 | 140 | pub static DISKS: RwLock>> = RwLock::new(vec![]); 141 | 142 | pub fn register(device: Box) -> io::Result<()> { 143 | let block_size = device.block_size(); 144 | let mut sector = vec![0; block_size as usize]; 145 | 146 | let mut volumes = vec![]; 147 | let mut disk = Arc::new(Disk { 148 | device, 149 | volumes: vec![], 150 | disk_guid: None, 151 | }); 152 | 153 | let disk_guid = 'probe: { 154 | 'mbr: { 155 | disk.device.read_blocks(0, &mut sector)?; 156 | 157 | if sector[510..512] != [0x55, 0xaa] { 158 | // Invalid MBR, assume raw media. 159 | break 'probe None; 160 | } 161 | 162 | let partition_table = unsafe { 163 | sector[0x1be..][..size_of!([MbrPartitionEntry; 4])] 164 | .as_ptr() 165 | .cast::<[MbrPartitionEntry; 4]>() 166 | .read_unaligned() 167 | }; 168 | for entry in partition_table { 169 | match entry.partition_type { 170 | 0x00 => continue, // empty partition 171 | 0xee => { 172 | // GPT 173 | assert!(volumes.is_empty()); 174 | break 'mbr; 175 | } 176 | _ => {} 177 | } 178 | 179 | volumes.push(Arc::new(Volume { 180 | disk: Arc::downgrade(&disk), 181 | block_size, 182 | offset: entry.lba_start as u64, 183 | capacity: entry.lba_size as u64, 184 | uuid: None, 185 | partition_type: PartitionType::Mbr(entry.partition_type), 186 | partition_guid: None, 187 | })); 188 | } 189 | 190 | break 'probe None; 191 | } 192 | 193 | // Scan GPT 194 | { 195 | disk.device.read_blocks(1, &mut sector)?; 196 | let gpt_header = unsafe { 197 | sector[..size_of!(GptHeader)] 198 | .as_ptr() 199 | .cast::() 200 | .read() 201 | }; 202 | 203 | let mut lba = gpt_header.partition_table_lba; 204 | let mut offset = 0; 205 | let mut index = 0; 206 | 207 | disk.device.read_blocks(lba, &mut sector)?; 208 | loop { 209 | if index >= gpt_header.partition_entries as usize { 210 | break; 211 | } 212 | if offset + size_of!(GptPartitionEntry) > block_size as usize { 213 | offset = 0; 214 | lba += 1; 215 | disk.device.read_blocks(lba, &mut sector)?; 216 | } 217 | 218 | let entry = unsafe { 219 | &*sector[offset..][..size_of!(GptPartitionEntry)] 220 | .as_ptr() 221 | .cast::() 222 | }; 223 | offset += gpt_header.partition_entry_size as usize; 224 | index += 1; 225 | 226 | if entry.partition_type == PARTITION_TYPE_UNUSED { 227 | continue; 228 | } 229 | 230 | volumes.push(Arc::new(Volume { 231 | disk: Arc::downgrade(&disk), 232 | block_size, 233 | offset: entry.start_lba, 234 | capacity: (entry.end_lba - entry.start_lba) + 1, 235 | uuid: Some(entry.partition_uuid), 236 | partition_type: PartitionType::Gpt(entry.partition_type), 237 | partition_guid: None, 238 | })); 239 | } 240 | 241 | Some(gpt_header.disk_guid) 242 | } 243 | }; 244 | 245 | { 246 | // SAFETY: We hold the only strong reference and none of the `Weak`s in the `Volume`s 247 | // can possibly be dereferenced until we add the disk to the list. (in addition to our 248 | // complete lack of threads.) 249 | let disk = unsafe { Arc::get_mut_unchecked(&mut disk) }; 250 | 251 | disk.volumes = volumes; 252 | disk.disk_guid = disk_guid; 253 | } 254 | 255 | DISKS.write().push(disk); 256 | Ok(()) 257 | } 258 | 259 | #[repr(C)] 260 | #[derive(Debug)] 261 | struct MbrPartitionEntry { 262 | status: u8, 263 | chs_start: [u8; 3], 264 | partition_type: u8, 265 | chs_end: [u8; 3], 266 | lba_start: u32, 267 | lba_size: u32, 268 | } 269 | 270 | #[repr(C)] 271 | #[derive(Debug)] 272 | struct GptHeader { 273 | signature: [u8; 8], 274 | revision: u32, 275 | header_size: u32, 276 | header_crc32: u32, 277 | reserved0: u32, 278 | this_lba: u64, 279 | alternate_lba: u64, 280 | first_usable_lba: u64, 281 | last_usable_lba: u64, 282 | disk_guid: Uuid, 283 | partition_table_lba: u64, 284 | partition_entries: u32, 285 | partition_entry_size: u32, 286 | partition_table_crc32: u32, 287 | reserved1: u32, 288 | } 289 | 290 | const PARTITION_TYPE_UNUSED: Uuid = Uuid::from_bytes([ 291 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 292 | ]); 293 | 294 | #[repr(C)] 295 | #[derive(Debug)] 296 | struct GptPartitionEntry { 297 | partition_type: Uuid, 298 | partition_uuid: Uuid, 299 | start_lba: u64, 300 | end_lba: u64, 301 | attributes: GptPartitionAttrs, 302 | partition_name: [u8; 72], 303 | } 304 | 305 | bitflags::bitflags! { 306 | #[repr(transparent)] 307 | #[derive(Clone, Copy, Debug)] 308 | struct GptPartitionAttrs : u64 { 309 | const REQUIRED = 1 << 0; 310 | const NO_BLOCK_IO = 1 << 1; 311 | const LEGACY_BIOS_BOOTABLE = 1 << 2; 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /spark/src/dev/block/nvme/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![cfg(all(sbi, feature = "dev-nvme"))] 5 | 6 | //! NVM Express 7 | 8 | use crate::{ 9 | dev::{ 10 | block::{self, BlockIo}, 11 | pcie, DeviceDriver, 12 | }, 13 | io, page_align_up, 14 | vmm::PAGE_SIZE, 15 | }; 16 | use alloc::sync::Arc; 17 | use anyhow::anyhow; 18 | use core::cmp; 19 | use libsa::endian::u32_le; 20 | use spin::Mutex; 21 | 22 | mod controller; 23 | mod identify; 24 | mod queue; 25 | 26 | use self::{ 27 | controller::{Controller, DataPtr, IoCommand}, 28 | identify::{IdentifyController, NvmCommandSet, NvmIdentifyNamespace}, 29 | }; 30 | 31 | #[used] 32 | #[link_section = "device_drivers"] 33 | static NVME_DRIVER: DeviceDriver = DeviceDriver { 34 | name: "nvme", 35 | probe_fdt: None, 36 | #[cfg(feature = "dev-pcie")] 37 | probe_pci: Some(pci_init), 38 | }; 39 | 40 | /// Queue Select 41 | /// 42 | /// The driver implements two sets of Submission and Completion queues: the required Admin queue 43 | /// and, since all requests in the bootloader are synchronous, a single I/O queue. 44 | #[derive(Clone, Copy, Debug)] 45 | enum Queue { 46 | Admin = 0, 47 | Io = 1, 48 | } 49 | 50 | fn pci_init(dev: &pcie::Device) -> crate::Result<()> { 51 | if dev.ident.class != 1 || dev.ident.subclass != 8 { 52 | return Ok(()); 53 | } 54 | 55 | let mmio_base = dev 56 | .bars() 57 | .next() 58 | .ok_or_else(|| anyhow!("missing BAR0")) 59 | .map(|bar| bar.read_addr() as *mut u32_le)?; 60 | 61 | dev.enable_bus_master(); 62 | 63 | let Some(ctlr) = controller::Controller::initialize(mmio_base) else { 64 | return Err(anyhow!("failed to initialize controller")); 65 | }; 66 | 67 | init_common(ctlr) 68 | } 69 | 70 | /// Common controller initialization 71 | /// 72 | /// This function is called after transport-specific initialization is complete. 73 | fn init_common(mut ctlr: Controller) -> crate::Result<()> { 74 | let ctlr_info = ctlr.identify::(None)?; 75 | 76 | let max_tx_size = ((1 << ctlr_info.mdts) * ctlr.capabilities().min_page_size()) as u64; 77 | 78 | let active_namespaces = 79 | ctlr.identify::>(None)?; 80 | 81 | let ctlr_arc = Arc::new(Mutex::new(ctlr)); 82 | for nsid in active_namespaces.iter() { 83 | let mut ctlr = ctlr_arc.lock(); 84 | 85 | let ns_info = match ctlr.identify::(Some(nsid.get())) { 86 | Ok(info) => info, 87 | Err(error) => { 88 | log::warn!("failed to identify namespace #{nsid}: {error}"); 89 | continue; 90 | } 91 | }; 92 | let lba_format = ns_info.lbaf[ns_info.flbas as usize]; 93 | if lba_format.metadata_size() != 0 { 94 | log::warn!( 95 | "skipping namespace #{nsid} with metadata size {}", 96 | lba_format.metadata_size() 97 | ); 98 | continue; 99 | } 100 | let block_size = lba_format.lba_data_size(); 101 | 102 | drop(ctlr); 103 | let device = Box::new(Namespace { 104 | nsid: nsid.get(), 105 | controller: Arc::clone(&ctlr_arc), 106 | block_size, 107 | capacity: ns_info.nsze.get(), 108 | max_tx_blocks: max_tx_size / block_size as u64, 109 | }); 110 | if let Err(error) = block::register(device) { 111 | log::error!("failed to register namespace #{nsid}: {error}"); 112 | continue; 113 | } 114 | } 115 | 116 | Ok(()) 117 | } 118 | 119 | /// A namespace in an NVM subsystem 120 | #[derive(Debug)] 121 | struct Namespace { 122 | controller: Arc>, 123 | nsid: u32, 124 | block_size: usize, 125 | capacity: u64, 126 | max_tx_blocks: u64, 127 | } 128 | 129 | struct ChainedPrpLists { 130 | lists: Vec>, 131 | } 132 | 133 | #[repr(align(4096))] 134 | struct PrpList { 135 | prps: [u64; 512], 136 | count: usize, 137 | } 138 | 139 | impl PrpList { 140 | fn new(addr: u64) -> Box { 141 | Box::new(Self { 142 | prps: { 143 | let mut prps = [0; 512]; 144 | prps[0] = addr; 145 | prps 146 | }, 147 | count: 1, 148 | }) 149 | } 150 | 151 | fn push(&mut self, addr: u64) -> Option> { 152 | match self.count { 153 | index @ ..=510 => { 154 | self.prps[index] = addr; 155 | self.count += 1; 156 | None 157 | } 158 | index @ 511 => { 159 | let new_list = Self::new(addr); 160 | self.prps[index] = new_list.prps.as_ptr().addr() as u64; 161 | self.count += 1; 162 | Some(new_list) 163 | } 164 | _ => panic!(), 165 | } 166 | } 167 | } 168 | 169 | impl ChainedPrpLists { 170 | fn new() -> Self { 171 | Self { lists: vec![] } 172 | } 173 | 174 | fn addr(&self) -> usize { 175 | self.lists[0].prps.as_ptr().addr() 176 | } 177 | 178 | fn push_addr(&mut self, addr: u64) { 179 | if let Some(list) = self.lists.last_mut() { 180 | if let Some(new_list) = list.push(addr) { 181 | self.lists.push(new_list); 182 | } 183 | } else { 184 | self.lists.push(PrpList::new(addr)); 185 | } 186 | } 187 | } 188 | 189 | impl BlockIo for Namespace { 190 | fn block_size(&self) -> u64 { 191 | self.block_size as u64 192 | } 193 | 194 | fn capacity(&self) -> u64 { 195 | self.capacity 196 | } 197 | 198 | fn read_blocks(&self, mut addr: u64, buf: &mut [u8]) -> crate::io::Result<()> { 199 | // Length of buffer must be a multiple of the block size. 200 | if buf.len() & (self.block_size - 1) != 0 { 201 | return Err(io::Error::InvalidArgument); 202 | } 203 | 204 | let mut blocks = buf.len() / self.block_size; 205 | let mut buf_offset = 0; 206 | while blocks > 0 { 207 | let buf_ptr = buf[buf_offset..].as_mut_ptr(); 208 | let r_blocks = cmp::min(cmp::min(blocks, self.max_tx_blocks as usize), 0x10000); 209 | let r_len = r_blocks * self.block_size; 210 | 211 | let mut prp = [0u64; 2]; 212 | prp[0] = buf_ptr.addr() as u64; 213 | let align_space = match buf_ptr.align_offset(PAGE_SIZE) { 214 | 0 => PAGE_SIZE, 215 | x => x, 216 | }; 217 | let _list = if align_space < r_len { 218 | // The transfer crosses at least 1 page boundary 219 | let start = buf_ptr.addr() + align_space; 220 | let remaining_len = r_len - align_space; 221 | if remaining_len > PAGE_SIZE { 222 | // The transfer crosses >= 2 pages, a PRP List is needed. 223 | let num_prps = page_align_up!(remaining_len) / PAGE_SIZE; 224 | let mut prp_list = ChainedPrpLists::new(); 225 | for i in 0..num_prps { 226 | prp_list.push_addr((start + i * PAGE_SIZE) as u64); 227 | } 228 | prp[1] = prp_list.addr() as u64; 229 | Some(prp_list) 230 | } else { 231 | if align_space > 0 { 232 | prp[1] = start as u64; 233 | } 234 | None 235 | } 236 | } else { 237 | None 238 | }; 239 | 240 | let mut ctlr = self.controller.lock(); 241 | ctlr.io_command(IoCommand::Read) 242 | .namespace_id(self.nsid) 243 | .data_ptr(DataPtr::Prp(prp[0], prp[1])) 244 | .cdw10(addr as u32) 245 | .cdw11((addr >> 32) as u32) 246 | .cdw12(r_blocks as u32 - 1) 247 | .execute() 248 | .map(|_| ()) 249 | .map_err(|err| { 250 | log::error!("error: {err:?}"); 251 | io::Error::DeviceError 252 | })?; 253 | 254 | blocks -= r_blocks; 255 | addr += r_blocks as u64; 256 | buf_offset += r_len; 257 | } 258 | 259 | Ok(()) 260 | } 261 | } 262 | -------------------------------------------------------------------------------- /spark/src/dev/block/nvme/queue.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{pages_for, pmm, size_of, vmm::PAGE_SIZE}; 5 | use core::ptr; 6 | use libsa::endian::{u16_le, u32_le, u64_le}; 7 | 8 | #[repr(C)] 9 | #[derive(Clone, Debug, Default)] 10 | pub struct SubmissionQueueEntry { 11 | pub cdw0: u32_le, 12 | pub nsid: u32_le, 13 | pub cdw2: u32_le, 14 | pub cdw3: u32_le, 15 | pub mptr: u64_le, 16 | pub dptr: [u64_le; 2], 17 | pub cdw10: u32_le, 18 | pub cdw11: u32_le, 19 | pub cdw12: u32_le, 20 | pub cdw13: u32_le, 21 | pub cdw14: u32_le, 22 | pub cdw15: u32_le, 23 | } 24 | 25 | #[repr(C)] 26 | pub struct CompletionQueueEntry { 27 | pub dw0: u32_le, 28 | pub dw1: u32_le, 29 | pub sq_head: u16_le, 30 | pub sq_ident: u16_le, 31 | pub cmd_ident: u16_le, 32 | pub status: u16_le, 33 | } 34 | 35 | bitflags::bitflags! { 36 | #[repr(transparent)] 37 | #[derive(Clone, Copy, Debug)] 38 | pub struct CompletionStatus : u16 { 39 | const PHASE = 1 << 0; 40 | } 41 | } 42 | 43 | impl CompletionStatus { 44 | pub fn code_type(self) -> u8 { 45 | (self.bits() >> 9 & 7) as u8 46 | } 47 | 48 | pub fn code(self) -> u8 { 49 | (self.bits() >> 1) as u8 50 | } 51 | } 52 | 53 | /// A pair of Submission and Completion Queues 54 | #[derive(Debug)] 55 | #[allow(clippy::module_name_repetitions)] 56 | pub struct QueuePair { 57 | pub subq: *mut SubmissionQueueEntry, 58 | pub comq: *mut CompletionQueueEntry, 59 | len: u32, 60 | sub_idx: u32, 61 | com_idx: u32, 62 | } 63 | 64 | impl QueuePair { 65 | /// Allocate a new Queue Pair. 66 | /// 67 | /// Since all requests are synchronous, the minimum number of queue entries are allocated. 68 | pub fn new() -> Option { 69 | let subq_size = size_of!(SubmissionQueueEntry); 70 | let comq_size = size_of!(CompletionQueueEntry); 71 | let subq_frames = pages_for!(subq_size); 72 | let comq_frames = pages_for!(comq_size); 73 | let qpair_frames = subq_frames + comq_frames; 74 | 75 | pmm::alloc_frames(qpair_frames).map(|base_addr| { 76 | let subq_base = base_addr; 77 | let comq_base = base_addr + subq_frames * PAGE_SIZE; 78 | 79 | // We need to ensure all PHASE bits are cleared to `0`. 80 | unsafe { (base_addr as *mut u8).write_bytes(0, qpair_frames * PAGE_SIZE) }; 81 | 82 | QueuePair { 83 | subq: subq_base as *mut SubmissionQueueEntry, 84 | comq: comq_base as *mut CompletionQueueEntry, 85 | // NOTE: Apparently some controllers will misbehave if the submission 86 | // and completion queues do not contain the same number of entries. 87 | len: ((subq_frames * PAGE_SIZE) / subq_size) as _, 88 | sub_idx: 0, 89 | com_idx: 0, 90 | } 91 | }) 92 | } 93 | 94 | pub const fn len(&self) -> usize { 95 | self.len as _ 96 | } 97 | 98 | pub fn submit(&mut self, cmd: SubmissionQueueEntry) -> (u16, u16, bool) { 99 | unsafe { self.subq.add(self.sub_idx as usize).write_volatile(cmd) }; 100 | self.sub_idx = (self.sub_idx + 1) % self.len; 101 | 102 | let com_idx = self.com_idx; 103 | let phase = unsafe { self.comq.add(com_idx as _).read_volatile().status.get() }; 104 | (self.com_idx as _, self.sub_idx as _, phase & 0x1 != 0) 105 | } 106 | 107 | pub fn complete<'a>(&mut self) -> (&'a CompletionQueueEntry, u16) { 108 | let old_head = self.com_idx; 109 | self.com_idx = (self.com_idx + 1) % self.len; 110 | (unsafe { &*self.comq.add(old_head as _) }, self.com_idx as _) 111 | } 112 | 113 | pub fn completion_status(&self, com_idx: u16) -> CompletionStatus { 114 | unsafe { 115 | let bits = ptr::addr_of!((*self.comq.add(com_idx as _)).status).read_volatile(); 116 | CompletionStatus::from_bits_retain(bits.get()) 117 | } 118 | } 119 | } 120 | 121 | impl Drop for QueuePair { 122 | fn drop(&mut self) { 123 | let subq_size = size_of!(SubmissionQueueEntry); 124 | let comq_size = size_of!(CompletionQueueEntry); 125 | let subq_frames = pages_for!(subq_size); 126 | let comq_frames = pages_for!(comq_size); 127 | let qpair_frames = subq_frames + comq_frames; 128 | 129 | unsafe { pmm::free_frames(self.subq.addr(), qpair_frames) }; 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /spark/src/dev/fw_cfg.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![cfg(feature = "dev-fw_cfg")] 5 | 6 | use self::mmio::{DmaPacket, MmioWindow}; 7 | use core::{cell::OnceCell, fmt, ptr::addr_of}; 8 | use libsa::{ 9 | endian::{BigEndianU16, BigEndianU32, BigEndianU64}, 10 | volatile::Volatile, 11 | }; 12 | 13 | #[repr(C)] 14 | #[derive(Clone)] 15 | pub struct File { 16 | size: BigEndianU32, 17 | sel: BigEndianU16, 18 | rsvd: u16, 19 | name: [u8; 56], 20 | } 21 | 22 | impl File { 23 | pub const fn size(&self) -> usize { 24 | self.size.get() as _ 25 | } 26 | 27 | pub fn name(&self) -> Option<&str> { 28 | let mut len = 0; 29 | while len < self.name.len() { 30 | if self.name[len] == 0 { 31 | break; 32 | } 33 | len += 1; 34 | } 35 | core::str::from_utf8(&self.name[..len]).ok() 36 | } 37 | } 38 | 39 | #[allow(clippy::missing_fields_in_debug)] 40 | impl fmt::Debug for File { 41 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 42 | let mut dbg = f.debug_struct("File"); 43 | 44 | dbg.field("size", &self.size()); 45 | dbg.field("sel", &self.sel); 46 | 47 | if let Some(valid_str) = self.name() { 48 | dbg.field("name", &valid_str); 49 | } else { 50 | dbg.field("name", &self.name); 51 | } 52 | 53 | dbg.finish() 54 | } 55 | } 56 | 57 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 58 | pub enum Error { 59 | BadPointer, 60 | DmaError, 61 | } 62 | 63 | pub struct FwCfg { 64 | window: MmioWindow, 65 | files: OnceCell>, 66 | } 67 | 68 | impl FwCfg { 69 | pub fn new(ptr: *mut u8) -> Result { 70 | Ok(FwCfg { 71 | window: MmioWindow::new(ptr)?, 72 | files: OnceCell::new(), 73 | }) 74 | } 75 | 76 | /// Returns a list of all available files 77 | pub fn files(&self) -> &[File] { 78 | self.files.get_or_init(|| unsafe { 79 | self.window.write_ctrl(0x19.into()); 80 | (0..self.window.read::().get()) 81 | .map(|_| self.window.read()) 82 | .collect() 83 | }) 84 | } 85 | 86 | /// Search for a file at the given `path` 87 | pub fn lookup(&self, path: &str) -> Option<&File> { 88 | self.files().iter().find(|f| f.name() == Some(path)) 89 | } 90 | 91 | /// Read the contents of a file into a buffer 92 | pub fn read_file(&self, file: &File) -> Result, Error> { 93 | let mut buf = Box::<[u8]>::new_uninit_slice(file.size()); 94 | 95 | unsafe { 96 | self.dma_command( 97 | Some(file.sel), 98 | DmaCommand::SELECT | DmaCommand::READ, 99 | file.size.get(), 100 | buf.as_mut_ptr().addr() as _, 101 | )?; 102 | 103 | Ok(buf.assume_init().into()) 104 | } 105 | } 106 | 107 | unsafe fn dma_command( 108 | &self, 109 | sel: Option, 110 | cmd: DmaCommand, 111 | length: u32, 112 | address: u64, 113 | ) -> Result<(), Error> { 114 | let control = ((sel.unwrap_or_default().get() as u32) << 16) | cmd.bits(); 115 | let packet = DmaPacket { 116 | control: Volatile::new(BigEndianU32::new(control)), 117 | length: BigEndianU32::new(length), 118 | address: BigEndianU64::new(address), 119 | }; 120 | 121 | // Issue the command by writing the address of the DmaPacket to 122 | // the DMA Control Register. 123 | self.window 124 | .write_dma_ctrl(BigEndianU64::new(addr_of!(packet).addr() as _)); 125 | 126 | // Wait for completion or error 127 | // 128 | // Currently QEMU completes all commands immediately, so we likely won't wait 129 | // at all, but fw_cfg may become asynchronous in the future making this necessary. 130 | loop { 131 | let ctrl = packet.control.read().get(); 132 | 133 | if ctrl & 0x1 != 0 { 134 | return Err(Error::DmaError); 135 | } 136 | if ctrl == 0 { 137 | break; 138 | } 139 | } 140 | 141 | Ok(()) 142 | } 143 | } 144 | 145 | bitflags::bitflags! { 146 | #[repr(transparent)] 147 | struct DmaCommand : u32 { 148 | const ERROR = 1 << 0; 149 | const READ = 1 << 1; 150 | const SKIP = 1 << 2; 151 | const SELECT = 1 << 3; 152 | const WRITE = 1 << 4; 153 | } 154 | } 155 | 156 | mod mmio { 157 | use super::Error; 158 | use core::{cell::UnsafeCell, mem::MaybeUninit, ptr::NonNull}; 159 | use libsa::{ 160 | endian::{BigEndianU16, BigEndianU32, BigEndianU64}, 161 | volatile::Volatile, 162 | }; 163 | 164 | #[repr(C)] 165 | struct MmioWindowInner { 166 | data: UnsafeCell, 167 | control: Volatile, 168 | dma_control: Volatile, 169 | } 170 | 171 | pub struct MmioWindow { 172 | inner: NonNull, 173 | } 174 | 175 | impl MmioWindow { 176 | pub fn new(window_ptr: *mut u8) -> Result { 177 | let window_ptr = window_ptr.cast::(); 178 | 179 | if !window_ptr.is_aligned() { 180 | return Err(Error::BadPointer); 181 | } 182 | 183 | Ok(MmioWindow { 184 | inner: NonNull::new(window_ptr).ok_or(Error::BadPointer)?, 185 | }) 186 | } 187 | 188 | fn inner(&self) -> &MmioWindowInner { 189 | unsafe { self.inner.as_ref() } 190 | } 191 | 192 | pub unsafe fn read_u8(&self) -> u8 { 193 | self.inner().data.get().cast::().read_volatile() 194 | } 195 | 196 | pub unsafe fn read_bytes_raw(&self, dst: *mut u8, size: usize) { 197 | let mut written = 0; 198 | while written < size { 199 | dst.add(written).write(self.read_u8()); 200 | written += 1; 201 | } 202 | } 203 | 204 | pub unsafe fn read(&self) -> T { 205 | let mut uninit = MaybeUninit::::uninit(); 206 | self.read_bytes_raw(uninit.as_mut_ptr().cast(), core::mem::size_of::()); 207 | uninit.assume_init() 208 | } 209 | 210 | pub unsafe fn write_ctrl(&self, value: BigEndianU16) { 211 | self.inner().control.write(value); 212 | } 213 | 214 | pub unsafe fn write_dma_ctrl(&self, value: BigEndianU64) { 215 | self.inner().dma_control.write(value); 216 | } 217 | } 218 | 219 | #[repr(C)] 220 | pub struct DmaPacket { 221 | pub control: Volatile, 222 | pub length: BigEndianU32, 223 | pub address: BigEndianU64, 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /spark/src/dev/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub mod acpi; 5 | pub mod block; 6 | pub mod fw_cfg; 7 | pub mod pcie; 8 | pub mod uart; 9 | 10 | #[cfg(sbi)] 11 | use {crate::sys::fdt, core::mem::size_of, libsa::extern_sym}; 12 | #[cfg(uefi)] 13 | use {core::fmt, spin::Mutex, uefi::proto::Proto}; 14 | 15 | #[cfg(sbi)] 16 | pub struct DeviceDriver { 17 | pub name: &'static str, 18 | #[cfg(all(sbi, feature = "dev-pcie"))] 19 | pub probe_pci: Option crate::Result<()>>, 20 | pub probe_fdt: Option crate::Result<()>>, 21 | } 22 | 23 | #[cfg(sbi)] 24 | pub fn device_drivers() -> &'static [DeviceDriver] { 25 | let drivers_start = extern_sym!(__start_device_drivers as DeviceDriver); 26 | let drivers_end = extern_sym!(__stop_device_drivers as DeviceDriver); 27 | let len = (drivers_end.addr() - drivers_start.addr()) / size_of::(); 28 | 29 | unsafe { core::slice::from_raw_parts(drivers_start, len) } 30 | } 31 | 32 | #[cfg(sbi)] 33 | pub fn init() { 34 | let fdt = fdt::get_fdt(); 35 | log::debug!("scanning device tree"); 36 | 37 | let Some(soc_node) = fdt.find_node("/soc") else { 38 | log::error!("device tree missing `/soc` node"); 39 | return; 40 | }; 41 | for node in soc_node.children() { 42 | for driver in device_drivers() { 43 | if let Some(init) = driver.probe_fdt { 44 | if let Err(error) = init(&node) { 45 | log::error!("{}: {error}", driver.name); 46 | } 47 | } 48 | } 49 | } 50 | } 51 | 52 | #[cfg(uefi)] 53 | pub fn init() { 54 | use uefi::proto::media::block_io::BlockIo as BlockIoProto; 55 | let bs = uefi::boot_services(); 56 | 57 | let handles = bs 58 | .handles_by_protocol::() 59 | .unwrap(); 60 | 61 | for handle in &*handles { 62 | let proto = bs.protocol_for_handle::(*handle).unwrap(); 63 | let media = proto.media(); 64 | 65 | // Skip partitions. 66 | if media.logical_partition { 67 | continue; 68 | } 69 | 70 | let dev = Box::new(UefiBlockDevice { 71 | media_id: media.media_id, 72 | capacity: media.last_block + 1, 73 | block_size: media.block_size as u64, 74 | proto: Mutex::new(proto), 75 | }); 76 | block::register(dev).unwrap(); 77 | } 78 | } 79 | 80 | #[cfg(uefi)] 81 | struct UefiBlockDevice { 82 | proto: Mutex>, 83 | media_id: u32, 84 | capacity: u64, 85 | block_size: u64, 86 | } 87 | 88 | #[cfg(uefi)] 89 | unsafe impl Send for UefiBlockDevice {} 90 | #[cfg(uefi)] 91 | unsafe impl Sync for UefiBlockDevice {} 92 | 93 | #[cfg(uefi)] 94 | #[allow(clippy::missing_fields_in_debug)] 95 | impl fmt::Debug for UefiBlockDevice { 96 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 97 | f.debug_struct("UefiBlockDevice") 98 | .field("media_id", &self.media_id) 99 | .field("capacity", &self.capacity) 100 | .field("block_size", &self.block_size) 101 | .finish() 102 | } 103 | } 104 | 105 | #[cfg(uefi)] 106 | impl block::BlockIo for UefiBlockDevice { 107 | fn block_size(&self) -> u64 { 108 | self.block_size 109 | } 110 | 111 | fn capacity(&self) -> u64 { 112 | self.capacity 113 | } 114 | 115 | fn read_blocks(&self, lba: u64, buf: &mut [u8]) -> crate::io::Result<()> { 116 | let mut proto = self.proto.lock(); 117 | proto.read_blocks(self.media_id, lba, buf)?; 118 | Ok(()) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /spark/src/dev/uart/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | mod ns16550; 5 | 6 | use crate::console::{self, ConsoleBackend}; 7 | use anyhow::anyhow; 8 | use core::time::Duration; 9 | 10 | #[derive(Debug)] 11 | pub enum Error { 12 | InvalidSpeed, 13 | Overrun, 14 | Framing, 15 | Parity, 16 | TimedOut, 17 | } 18 | 19 | impl From for anyhow::Error { 20 | fn from(error: Error) -> anyhow::Error { 21 | anyhow!("{error:?}") 22 | } 23 | } 24 | 25 | impl From for console::Error { 26 | fn from(error: Error) -> console::Error { 27 | match error { 28 | Error::TimedOut => console::Error::TimedOut, 29 | error => console::Error::Uart(error), 30 | } 31 | } 32 | } 33 | 34 | pub type Result = core::result::Result; 35 | 36 | #[derive(Clone, Copy)] 37 | pub struct BusAccess { 38 | base: *mut u8, 39 | width: u8, 40 | shift: u8, 41 | } 42 | 43 | unsafe impl Send for BusAccess {} 44 | unsafe impl Sync for BusAccess {} 45 | 46 | impl BusAccess { 47 | pub const fn new(base: *mut u8, width: u8, shift: u8) -> BusAccess { 48 | Self { base, width, shift } 49 | } 50 | 51 | pub unsafe fn read(self, offset: usize) -> u32 { 52 | let ptr = self.base.add(offset << self.shift); 53 | match self.width { 54 | 1 => ptr.read_volatile() as u32, 55 | 2 => ptr.cast::().read_volatile() as u32, 56 | 4 => ptr.cast::().read_volatile(), 57 | w => unimplemented!("invalid access width: {w} bytes"), 58 | } 59 | } 60 | 61 | pub unsafe fn write(self, offset: usize, value: u32) { 62 | let ptr = self.base.add(offset << self.shift); 63 | match self.width { 64 | 1 => ptr.write_volatile(value as u8), 65 | 2 => ptr.cast::().write_volatile(value as u16), 66 | 4 => ptr.cast::().write_volatile(value), 67 | w => unimplemented!("invalid access width: {w} bytes"), 68 | } 69 | } 70 | } 71 | 72 | #[derive(Clone, Copy)] 73 | pub struct Baud(pub u32); 74 | 75 | impl Baud { 76 | pub const B115200: Baud = Baud(115200); 77 | } 78 | 79 | pub trait UartDevice: Send + Sync { 80 | fn receive(&self) -> Result; 81 | fn receive_timeout(&self, duration: Duration) -> Result; 82 | fn transmit(&self, byte: u8) -> Result<()>; 83 | } 84 | 85 | impl ConsoleBackend for T { 86 | fn receive(&self, duration: Option) -> console::Result { 87 | match duration { 88 | Some(duration) => Ok(self.receive_timeout(duration)?), 89 | None => Ok(self.receive()?), 90 | } 91 | } 92 | 93 | fn transmit(&self, byte: u8) -> console::Result<()> { 94 | Ok(self.transmit(byte)?) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /spark/src/dev/uart/ns16550.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use super::{Baud, BusAccess, Result}; 5 | use crate::{ 6 | console::{self, console_driver, ConsoleBackend}, 7 | time::Timeout, 8 | }; 9 | use alloc::sync::Arc; 10 | use anyhow::{anyhow, Context}; 11 | use core::time::Duration; 12 | 13 | bitflags::bitflags! { 14 | #[repr(transparent)] 15 | #[derive(Clone, Copy, Debug)] 16 | pub struct InterruptEnable : u8 { 17 | const RX_DATA_AVAILABLE = 1 << 0; 18 | const TX_HOLDING_REGISTER_EMPTY = 1 << 1; 19 | const RX_LINE_STATUS = 1 << 2; 20 | const MODEM_STATUS = 1 << 3; 21 | } 22 | 23 | #[repr(transparent)] 24 | #[derive(Clone, Copy, Debug)] 25 | pub struct FifoControl : u8 { 26 | const ENABLE = 1 << 0; 27 | const RX_RESET = 1 << 1; 28 | const TX_RESET = 1 << 2; 29 | const DMA_MODE_SELECT = 1 << 3; 30 | const RX_TRIGGER_LO = 1 << 6; 31 | const RX_TRIGGER_HI = 1 << 7; 32 | } 33 | 34 | #[repr(transparent)] 35 | #[derive(Clone, Copy, Debug)] 36 | pub struct LineControl : u8 { 37 | const WORD_LENGTH_LO = 1 << 0; 38 | const WORD_LENGTH_HI = 1 << 1; 39 | const STOP_BITS = 1 << 2; 40 | const PARITY_ENABLE = 1 << 3; 41 | const EVEN_PARITY = 1 << 4; 42 | const STICK_PARITY = 1 << 5; 43 | const SET_BREAK = 1 << 6; 44 | const DIVISOR_LATCH_ACCESS = 1 << 7; 45 | } 46 | 47 | #[repr(transparent)] 48 | #[derive(Clone, Copy, Debug)] 49 | pub struct LineStatus : u8 { 50 | const DATA_READY = 1 << 0; 51 | const OVERRUN_ERROR = 1 << 1; 52 | const PARITY_ERROR = 1 << 2; 53 | const FRAMING_ERROR = 1 << 3; 54 | const BREAK = 1 << 4; 55 | const TX_HOLDING_REGISTER_EMPTY = 1 << 5; 56 | const TX_EMPTY = 1 << 6; 57 | 58 | } 59 | } 60 | 61 | mod reg { 62 | pub const DATA: usize = 0; 63 | pub const INTERRUPT_ENABLE: usize = 1; 64 | pub const FIFO_CONTROL: usize = 2; 65 | pub const LINE_CONTROL: usize = 3; 66 | pub const LINE_STATUS: usize = 5; 67 | 68 | pub const DIVISOR_LO: usize = 0; 69 | pub const DIVISOR_HI: usize = 1; 70 | } 71 | 72 | pub struct Uart { 73 | ba: BusAccess, 74 | baud_freq: u32, 75 | current_speed: Baud, 76 | } 77 | 78 | impl Uart { 79 | pub fn new(ba: BusAccess, baud_freq: u32) -> Uart { 80 | let mut uart = Self { 81 | ba, 82 | baud_freq, 83 | current_speed: Baud(0), 84 | }; 85 | uart.current_speed = divisor_to_baud(baud_freq, uart.read_divisor()); 86 | uart 87 | } 88 | 89 | fn read_register(&self, reg: usize) -> u8 { 90 | unsafe { self.ba.read(reg) as u8 } 91 | } 92 | 93 | fn write_register(&self, reg: usize, val: u8) { 94 | unsafe { self.ba.write(reg, val as u32) }; 95 | } 96 | 97 | pub fn line_control(&self) -> LineControl { 98 | LineControl::from_bits_retain(self.read_register(reg::LINE_CONTROL)) 99 | } 100 | 101 | pub fn set_line_control(&self, val: LineControl) { 102 | self.write_register(reg::LINE_CONTROL, val.bits()); 103 | } 104 | 105 | pub fn line_status(&self) -> LineStatus { 106 | LineStatus::from_bits_retain(self.read_register(reg::LINE_STATUS)) 107 | } 108 | 109 | pub fn set_fifo_control(&self, val: FifoControl) { 110 | self.write_register(reg::FIFO_CONTROL, val.bits()); 111 | } 112 | 113 | fn read_divisor(&self) -> u16 { 114 | let lcr = self.line_control(); 115 | self.set_line_control(lcr | LineControl::DIVISOR_LATCH_ACCESS); 116 | let lo = self.read_register(reg::DIVISOR_LO); 117 | let hi = self.read_register(reg::DIVISOR_HI); 118 | self.set_line_control(lcr); 119 | u16::from_le_bytes([lo, hi]) 120 | } 121 | 122 | fn write_divisor(&self, div: u16) { 123 | let [lo, hi] = div.to_le_bytes(); 124 | let lcr = self.line_control(); 125 | self.set_line_control(lcr | LineControl::DIVISOR_LATCH_ACCESS); 126 | self.write_register(reg::DIVISOR_LO, lo); 127 | self.write_register(reg::DIVISOR_HI, hi); 128 | self.set_line_control(lcr); 129 | } 130 | 131 | pub fn initialize(&mut self, baud: Baud) -> Result<()> { 132 | self.write_register(reg::INTERRUPT_ENABLE, 0); 133 | self.write_divisor(baud_to_divisor(self.baud_freq, baud)?); 134 | self.set_line_control(LineControl::WORD_LENGTH_HI | LineControl::WORD_LENGTH_LO); 135 | self.set_fifo_control( 136 | FifoControl::ENABLE 137 | | FifoControl::RX_RESET 138 | | FifoControl::TX_RESET 139 | | FifoControl::RX_TRIGGER_HI 140 | | FifoControl::RX_TRIGGER_LO, 141 | ); 142 | Ok(()) 143 | } 144 | 145 | pub fn check_errors(&self) -> Result<()> { 146 | let lsr = self.line_status(); 147 | if lsr.contains(LineStatus::FRAMING_ERROR) { 148 | Err(super::Error::Framing) 149 | } else if lsr.contains(LineStatus::OVERRUN_ERROR) { 150 | Err(super::Error::Overrun) 151 | } else if lsr.contains(LineStatus::PARITY_ERROR) { 152 | Err(super::Error::Parity) 153 | } else { 154 | Ok(()) 155 | } 156 | } 157 | 158 | pub fn receive_timeout(&self, duration: Duration) -> Result { 159 | let timeout = Timeout::start(duration); 160 | while !self.line_status().contains(LineStatus::DATA_READY) { 161 | self.check_errors()?; 162 | if timeout.expired() { 163 | return Err(super::Error::TimedOut); 164 | } 165 | } 166 | Ok(self.read_register(reg::DATA)) 167 | } 168 | 169 | pub fn receive(&self) -> Result { 170 | self.receive_timeout(Duration::MAX) 171 | } 172 | 173 | pub fn transmit(&self, byte: u8) -> Result<()> { 174 | while !self 175 | .line_status() 176 | .contains(LineStatus::TX_HOLDING_REGISTER_EMPTY) 177 | { 178 | self.check_errors()?; 179 | } 180 | self.write_register(reg::DATA, byte); 181 | Ok(()) 182 | } 183 | } 184 | 185 | fn baud_to_divisor(freq: u32, baud: Baud) -> Result { 186 | match u16::try_from(freq / (baud.0 * 16)) { 187 | Ok(divisor) => Ok(divisor), 188 | Err(_) => Err(super::Error::InvalidSpeed), 189 | } 190 | } 191 | 192 | fn divisor_to_baud(freq: u32, div: u16) -> Baud { 193 | Baud(freq / (div as u32 * 16)) 194 | } 195 | 196 | impl super::UartDevice for Uart { 197 | fn receive(&self) -> Result { 198 | self.receive() 199 | } 200 | 201 | fn receive_timeout(&self, duration: Duration) -> Result { 202 | self.receive_timeout(duration) 203 | } 204 | 205 | fn transmit(&self, byte: u8) -> Result<()> { 206 | self.transmit(byte) 207 | } 208 | } 209 | 210 | console_driver!(console::Driver { 211 | name: "ns16550", 212 | compatible: &["ns16550", "ns16550a", "snps,dw-apb-uart"], 213 | init: init_fdt, 214 | }); 215 | 216 | fn init_fdt(node: &fdt::Node) -> anyhow::Result> { 217 | let reg = node.reg_by_index(0)?; 218 | let width = node.try_property_as::("reg-io-width")?.unwrap_or(1); 219 | let shift = node 220 | .try_property_as::("reg-shift")? 221 | .unwrap_or_default(); 222 | let baud_freq = node 223 | .try_property_as::("clock-frequency")? 224 | .with_context(|| anyhow!("missing `clock-frequency` property"))?; 225 | 226 | let ba = BusAccess::new(reg.addr as *mut u8, width as u8, shift as u8); 227 | let mut uart = Uart::new(ba, baud_freq); 228 | 229 | uart.initialize(Baud::B115200)?; 230 | 231 | Ok(Arc::new(uart)) 232 | } 233 | -------------------------------------------------------------------------------- /spark/src/fs/fat/bpb.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | //! BIOS Parameter Block 5 | 6 | use core::ops; 7 | 8 | mod private { 9 | pub trait Sealed {} 10 | 11 | impl Sealed for super::Fat16 {} 12 | impl Sealed for super::Fat32 {} 13 | } 14 | 15 | pub trait FatType: private::Sealed { 16 | type BpbData; 17 | } 18 | 19 | #[derive(Debug)] 20 | pub struct Fat16; 21 | 22 | impl FatType for Fat16 { 23 | type BpbData = BpbFat12And16; 24 | } 25 | 26 | #[derive(Debug)] 27 | pub struct Fat32; 28 | 29 | impl FatType for Fat32 { 30 | type BpbData = BpbFat32; 31 | } 32 | 33 | #[repr(C)] 34 | #[derive(Debug)] 35 | pub struct Bpb { 36 | pub common: Common, 37 | pub type_data: F::BpbData, 38 | } 39 | 40 | impl ops::Deref for Bpb { 41 | type Target = F::BpbData; 42 | 43 | fn deref(&self) -> &Self::Target { 44 | &self.type_data 45 | } 46 | } 47 | 48 | impl Bpb { 49 | pub const fn root_cluster(&self) -> u32 { 50 | u32::from_le_bytes(self.type_data.root_cluster) 51 | } 52 | } 53 | 54 | #[repr(C)] 55 | #[derive(Debug)] 56 | pub struct Common { 57 | pub boot_jmp: [u8; 3], 58 | pub oem_name: [u8; 8], 59 | pub bytes_per_sector: [u8; 2], 60 | pub sectors_per_cluster: u8, 61 | pub reserved_sectors: [u8; 2], 62 | pub num_fats: u8, 63 | pub root_entry_count: [u8; 2], 64 | pub total_sectors_16: [u8; 2], 65 | pub media: u8, 66 | pub fat_size_16: [u8; 2], 67 | pub sectors_per_track: [u8; 2], 68 | pub num_heads: [u8; 2], 69 | pub hidden_sectors: [u8; 4], 70 | pub total_sectors_32: [u8; 4], 71 | } 72 | 73 | #[repr(C)] 74 | #[derive(Debug)] 75 | pub struct BpbFat12And16 { 76 | pub drive_number: u8, 77 | pub reserved1: u8, 78 | pub boot_signature: u8, 79 | pub volume_id: [u8; 4], 80 | pub volume_label: [u8; 11], 81 | pub fs_type: [u8; 8], 82 | pub _rsvd0: [u8; 448], 83 | pub signature: [u8; 2], 84 | } 85 | 86 | #[repr(C)] 87 | #[derive(Debug)] 88 | pub struct BpbFat32 { 89 | pub fat_size_32: [u8; 4], 90 | pub ext_flags: [u8; 2], 91 | pub fs_version: [u8; 2], 92 | pub root_cluster: [u8; 4], 93 | pub fs_info: [u8; 2], 94 | pub bk_boot_sector: [u8; 2], 95 | pub reserved: [u8; 12], 96 | pub drive_number: u8, 97 | pub reserved1: u8, 98 | pub boot_signature: u8, 99 | pub volume_id: [u8; 4], 100 | pub volume_label: [u8; 11], 101 | pub fs_type: [u8; 8], 102 | pub _rsvd0: [u8; 420], 103 | pub signature: [u8; 2], 104 | } 105 | 106 | #[derive(Debug)] 107 | pub enum Superblock { 108 | Fat16(Bpb), 109 | Fat32(Bpb), 110 | } 111 | 112 | impl ops::Deref for Superblock { 113 | type Target = Common; 114 | 115 | fn deref(&self) -> &Self::Target { 116 | match self { 117 | Self::Fat16(bpb) => &bpb.common, 118 | Self::Fat32(bpb) => &bpb.common, 119 | } 120 | } 121 | } 122 | 123 | impl Common { 124 | pub const fn bytes_per_sector(&self) -> u64 { 125 | u16::from_le_bytes(self.bytes_per_sector) as u64 126 | } 127 | 128 | pub const fn root_entry_count(&self) -> u64 { 129 | u16::from_le_bytes(self.root_entry_count) as u64 130 | } 131 | 132 | pub const fn sectors_per_cluster(&self) -> u64 { 133 | self.sectors_per_cluster as u64 134 | } 135 | 136 | pub const fn reserved_sectors(&self) -> u64 { 137 | u16::from_le_bytes(self.reserved_sectors) as u64 138 | } 139 | 140 | pub const fn root_directory_sectors(&self) -> u64 { 141 | ((self.root_entry_count() * 32) + (self.bytes_per_sector() - 1)) / self.bytes_per_sector() 142 | } 143 | 144 | pub const fn total_sectors(&self) -> u64 { 145 | let total16 = u16::from_le_bytes(self.total_sectors_16) as u64; 146 | if total16 == 0 { 147 | u32::from_le_bytes(self.total_sectors_32) as u64 148 | } else { 149 | total16 150 | } 151 | } 152 | 153 | pub fn fat_size(&self, maybe32: &[u8]) -> u64 { 154 | let size_16 = u16::from_le_bytes(self.fat_size_16) as u64; 155 | if size_16 == 0 { 156 | u32::from_le_bytes(maybe32.try_into().unwrap()) as u64 157 | } else { 158 | size_16 159 | } 160 | } 161 | 162 | pub fn data_sectors(&self, maybe32: &[u8]) -> u64 { 163 | self.total_sectors() 164 | - (self.reserved_sectors() 165 | + (self.num_fats as u64 * self.fat_size(maybe32)) 166 | + self.root_directory_sectors()) 167 | } 168 | 169 | pub fn cluster_count(&self, maybe32: &[u8]) -> u64 { 170 | self.data_sectors(maybe32) / self.sectors_per_cluster() 171 | } 172 | } 173 | 174 | impl Superblock { 175 | pub fn fat_entry_scale(&self) -> u64 { 176 | match self { 177 | Self::Fat16(_) => 2, 178 | Self::Fat32(_) => 4, 179 | } 180 | } 181 | 182 | pub fn fat_offset_for_cluster(&self, cluster: u32) -> (u64, usize) { 183 | let fat_offset = cluster as u64 * self.fat_entry_scale(); 184 | let lba = self.reserved_sectors() + fat_offset / self.bytes_per_sector(); 185 | let offset = fat_offset % self.bytes_per_sector(); 186 | (lba, offset as usize) 187 | } 188 | 189 | pub fn fat_size(&self) -> u64 { 190 | let size_16 = u16::from_le_bytes(self.fat_size_16) as u64; 191 | match self { 192 | Self::Fat16(_) => size_16, 193 | Self::Fat32(bpb) => { 194 | if size_16 == 0 { 195 | u32::from_le_bytes(bpb.type_data.fat_size_32) as u64 196 | } else { 197 | size_16 198 | } 199 | } 200 | } 201 | } 202 | 203 | pub fn first_data_sector(&self) -> u64 { 204 | self.reserved_sectors() 205 | + (self.num_fats as u64 * self.fat_size()) 206 | + self.root_directory_sectors() 207 | } 208 | 209 | pub fn cluster_to_lba(&self, cluster: u32) -> u64 { 210 | self.first_data_sector() + (cluster as u64 - 2) * self.sectors_per_cluster() 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /spark/src/fs/fat/dir.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{fs::FileType, io, size_of}; 5 | 6 | pub const ENTRY_SIZE: usize = 32; 7 | 8 | #[derive(Debug)] 9 | pub struct DirEntry { 10 | pub file_type: FileType, 11 | pub cluster: u32, 12 | pub size: u64, 13 | } 14 | 15 | bitflags::bitflags! { 16 | #[repr(transparent)] 17 | pub struct DirAttrs : u8 { 18 | const READ_ONLY = 0x01; 19 | const HIDDEN = 0x02; 20 | const SYSTEM = 0x04; 21 | const VOLUME_ID = 0x08; 22 | const DIRECTORY = 0x10; 23 | const ARCHIVE = 0x20; 24 | const LONG_NAME = 0x0f; 25 | } 26 | } 27 | 28 | impl From for FileType { 29 | fn from(attrs: DirAttrs) -> Self { 30 | if attrs.contains(DirAttrs::DIRECTORY) { 31 | FileType::Directory 32 | } else { 33 | FileType::Regular 34 | } 35 | } 36 | } 37 | 38 | impl DirAttrs { 39 | pub const fn new(bits: u8) -> DirAttrs { 40 | Self::from_bits_retain(bits) 41 | } 42 | } 43 | 44 | #[repr(C)] 45 | pub struct ShortDirEntry { 46 | name: [u8; 8], 47 | extension: [u8; 3], 48 | attrs: DirAttrs, 49 | _reserved: u8, 50 | creation_time_s: u8, 51 | creation_time: u16, 52 | creation_date: [u8; 2], 53 | accessed_date: [u8; 2], 54 | cluster_hi: u16, 55 | modification_time: u16, 56 | modification_date: [u8; 2], 57 | cluster_lo: u16, 58 | pub size: u32, 59 | } 60 | 61 | impl ShortDirEntry { 62 | pub fn name(&self) -> String { 63 | let mut name = String::new(); 64 | for c in self.name { 65 | if c == b' ' { 66 | break; 67 | } 68 | name.push(c as char); 69 | } 70 | if self.extension[0] != b' ' { 71 | name.push('.'); 72 | for c in self.extension { 73 | if c == b' ' { 74 | break; 75 | } 76 | name.push(c as char); 77 | } 78 | } 79 | name 80 | } 81 | 82 | pub const fn cluster(&self) -> u32 { 83 | ((self.cluster_hi as u32) << 16) | self.cluster_lo as u32 84 | } 85 | } 86 | 87 | #[repr(C)] 88 | pub struct LongDirEntry { 89 | order: u8, 90 | name1: [u8; 10], // 1..=5 91 | attrs: u8, 92 | _reserved: u8, 93 | checksum: u8, 94 | name2: [u8; 12], // 6..=11 95 | cluster_lo: u16, 96 | name3: [u8; 4], // 12..=13 97 | } 98 | 99 | impl LongDirEntry { 100 | pub fn name_piece_raw(&self) -> [u16; 13] { 101 | let mut buf = [0u16; 13]; 102 | 103 | unsafe { 104 | buf.as_mut_ptr() 105 | .cast::() 106 | .add(0) 107 | .copy_from(self.name1.as_ptr(), 10); 108 | buf.as_mut_ptr() 109 | .cast::() 110 | .add(10) 111 | .copy_from(self.name2.as_ptr(), 12); 112 | buf.as_mut_ptr() 113 | .cast::() 114 | .add(22) 115 | .copy_from(self.name3.as_ptr(), 2); 116 | } 117 | 118 | buf 119 | } 120 | } 121 | 122 | const _: () = { 123 | assert!(size_of!(ShortDirEntry) == ENTRY_SIZE); 124 | assert!(size_of!(LongDirEntry) == ENTRY_SIZE); 125 | }; 126 | 127 | pub struct LongName { 128 | buf: [u16; 256], 129 | offset: usize, 130 | } 131 | 132 | impl LongName { 133 | pub fn new() -> LongName { 134 | Self { 135 | buf: [0; 256], 136 | offset: 256, 137 | } 138 | } 139 | 140 | fn len(&self) -> usize { 141 | 256 - self.offset 142 | } 143 | 144 | pub fn is_empty(&self) -> bool { 145 | self.len() == 0 146 | } 147 | 148 | fn as_slice(&self) -> &[u16] { 149 | if self.is_empty() { 150 | &[] 151 | } else { 152 | &self.buf[self.offset..] 153 | } 154 | } 155 | 156 | pub fn push(&mut self, piece: &[u16]) -> io::Result<()> { 157 | if piece.len() > self.offset { 158 | return Err(io::Error::NameTooLong); 159 | } 160 | self.offset -= piece.len(); 161 | self.buf[self.offset..][..piece.len()].copy_from_slice(piece); 162 | Ok(()) 163 | } 164 | 165 | fn clear(&mut self) { 166 | self.offset = 256; 167 | } 168 | 169 | fn to_string(&self) -> Option { 170 | String::from_utf16(self.as_slice()).ok() 171 | } 172 | 173 | pub fn finish(&mut self) -> Option { 174 | let string = self.to_string()?; 175 | self.clear(); 176 | Some(string) 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /spark/src/fs/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{dev::block::Volume, io, size_of}; 5 | use alloc::sync::Arc; 6 | use libsa::extern_sym; 7 | 8 | mod fat; 9 | 10 | /// A filesystem driver compiled into the bootloader 11 | #[repr(C)] 12 | pub struct FilesystemDriver { 13 | name: &'static str, 14 | 15 | /// "Mount" an instance of this filesystem onto the provided [`Volume`] 16 | /// 17 | /// If successful, this function returns a [`File`] object for the root directory 18 | /// of the filesystem. 19 | mount: fn(volume: &Arc) -> io::Result>, 20 | } 21 | 22 | pub trait File: Send { 23 | fn open(&mut self, path: &str) -> io::Result>; 24 | fn size(&mut self) -> u64; 25 | fn read(&mut self, buf: &mut [u8]) -> io::Result; 26 | fn position(&mut self) -> u64; 27 | fn volume(&self) -> &Arc; 28 | 29 | fn read_to_end(&mut self) -> io::Result> { 30 | let size = (self.size() - self.position()) as usize; 31 | let mut buf = vec![0; size]; 32 | let mut read = 0; 33 | while read < size { 34 | read += self.read(&mut buf[read..])?; 35 | } 36 | Ok(buf) 37 | } 38 | } 39 | 40 | fn filesystem_drivers() -> &'static [FilesystemDriver] { 41 | let data = extern_sym!(__start_fs_drivers as FilesystemDriver); 42 | let len = (extern_sym!(__stop_fs_drivers).addr() - data.addr()) / size_of!(FilesystemDriver); 43 | unsafe { core::slice::from_raw_parts(data, len) } 44 | } 45 | 46 | pub fn mount(volume: &Arc) -> io::Result> { 47 | for driver in filesystem_drivers() { 48 | match (driver.mount)(volume) { 49 | Ok(file) => return Ok(file), 50 | Err(io::Error::Unsupported) => continue, 51 | Err(err) => return Err(err), 52 | } 53 | } 54 | Err(io::Error::NotFound) 55 | } 56 | 57 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 58 | pub enum FileType { 59 | Regular, 60 | Directory, 61 | } 62 | -------------------------------------------------------------------------------- /spark/src/io.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | struct Logger; 5 | 6 | impl log::Log for Logger { 7 | fn enabled(&self, _metadata: &log::Metadata) -> bool { 8 | true 9 | } 10 | 11 | fn log(&self, record: &log::Record) { 12 | println!("{}: {}", record.target(), record.args()); 13 | } 14 | 15 | fn flush(&self) {} 16 | } 17 | 18 | static LOGGER: Logger = Logger; 19 | 20 | pub fn init() { 21 | log::set_logger(&LOGGER).unwrap(); 22 | log::set_max_level(log::LevelFilter::Trace); 23 | } 24 | 25 | pub type Result = core::result::Result; 26 | 27 | #[allow(clippy::enum_variant_names)] 28 | #[derive(Clone, Debug, Eq, PartialEq)] 29 | pub enum Error { 30 | DeviceError, 31 | InvalidArgument, 32 | IsADirectory, 33 | NameTooLong, 34 | NotADirectory, 35 | NotFound, 36 | Other, 37 | OutOfBounds, 38 | TimedOut, 39 | Unsupported, 40 | } 41 | 42 | impl core::fmt::Display for Error { 43 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 44 | core::fmt::Debug::fmt(self, f) 45 | } 46 | } 47 | 48 | impl From for anyhow::Error { 49 | fn from(value: Error) -> Self { 50 | anyhow::anyhow!("{value}") 51 | } 52 | } 53 | 54 | #[cfg(uefi)] 55 | impl From for Error { 56 | fn from(value: uefi::Status) -> Self { 57 | use uefi::Status; 58 | match value { 59 | Status::DEVICE_ERROR => Self::DeviceError, 60 | Status::TIMEOUT => Self::TimedOut, 61 | _ => { 62 | log::error!("uefi->io: {value:?}"); 63 | Self::Other 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /spark/src/malloc.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use core::{alloc::GlobalAlloc, ptr}; 5 | #[cfg(sbi)] 6 | use { 7 | crate::{pages_for, pmm, vmm::PAGE_SIZE}, 8 | core::cmp::Ordering, 9 | }; 10 | 11 | #[cfg(uefi)] 12 | struct BootServicesAllocator; 13 | 14 | #[cfg(uefi)] 15 | unsafe impl GlobalAlloc for BootServicesAllocator { 16 | unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { 17 | use uefi::table::MemoryType; 18 | let bs = uefi::boot_services(); 19 | match bs.allocate_pool(MemoryType::LOADER_DATA, layout.size()) { 20 | Ok(ptr) => ptr, 21 | Err(err) => { 22 | log::error!("{err:?}"); 23 | ptr::null_mut() 24 | } 25 | } 26 | } 27 | 28 | unsafe fn dealloc(&self, ptr: *mut u8, _layout: core::alloc::Layout) { 29 | let bs = uefi::boot_services(); 30 | if let Err(err) = bs.free_pool(ptr) { 31 | log::error!("{err:?}"); 32 | panic!(); 33 | } 34 | } 35 | } 36 | 37 | #[cfg(uefi)] 38 | #[global_allocator] 39 | static MALLOC: BootServicesAllocator = BootServicesAllocator; 40 | 41 | #[cfg(sbi)] 42 | struct BadButGoodEnoughAllocator; 43 | 44 | #[cfg(sbi)] 45 | unsafe impl GlobalAlloc for BadButGoodEnoughAllocator { 46 | unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { 47 | assert!(layout.align() <= PAGE_SIZE); 48 | 49 | let num_frames = pages_for!(layout.size()); 50 | pmm::alloc_frames(num_frames).map_or_else(ptr::null_mut, |addr| { 51 | let ptr = addr as *mut u8; 52 | ptr.write_bytes(0, num_frames * PAGE_SIZE); 53 | ptr 54 | }) 55 | } 56 | 57 | unsafe fn realloc( 58 | &self, 59 | ptr: *mut u8, 60 | layout: core::alloc::Layout, 61 | new_size: usize, 62 | ) -> *mut u8 { 63 | let old_frames = pages_for!(layout.size()); 64 | let new_frames = pages_for!(new_size); 65 | 66 | match new_frames.cmp(&old_frames) { 67 | Ordering::Equal => ptr, 68 | Ordering::Less => { 69 | let free_frames = old_frames - new_frames; 70 | let free_base = ptr as usize + PAGE_SIZE * new_frames; 71 | 72 | pmm::free_frames(free_base, free_frames); 73 | 74 | ptr 75 | } 76 | Ordering::Greater => pmm::alloc_frames(new_frames).map_or_else(ptr::null_mut, |addr| { 77 | let new_ptr = addr as *mut u8; 78 | new_ptr.copy_from(ptr, layout.size()); 79 | pmm::free_frames(ptr as usize, old_frames); 80 | new_ptr 81 | }), 82 | } 83 | } 84 | 85 | unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { 86 | pmm::free_frames(ptr as usize, pages_for!(layout.size())); 87 | } 88 | } 89 | 90 | #[cfg(sbi)] 91 | #[global_allocator] 92 | static MALLOC: BadButGoodEnoughAllocator = BadButGoodEnoughAllocator; 93 | -------------------------------------------------------------------------------- /spark/src/mem/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub mod pmm; 5 | pub mod vmm; 6 | 7 | #[macro_export] 8 | macro_rules! pages_for { 9 | ($size:expr) => { 10 | ($size as usize + $crate::vmm::PAGE_SIZE - 1) / $crate::vmm::PAGE_SIZE 11 | }; 12 | ($size:expr, $page_size:expr) => {{ 13 | let page_size = $page_size; 14 | ($size as usize + (page_size - 1)) / page_size 15 | }}; 16 | (type $t:ty $(, $page_size:expr)?) => { 17 | pages_for!(::core::mem::size_of::<$t>() $(, $page_size)?) 18 | }; 19 | } 20 | 21 | #[macro_export] 22 | macro_rules! page_offset { 23 | ($x:expr) => { 24 | $x & ($crate::vmm::PAGE_SIZE - 1) 25 | }; 26 | } 27 | 28 | #[macro_export] 29 | macro_rules! page_align_down { 30 | ($x:expr) => { 31 | $x & !($crate::vmm::PAGE_SIZE - 1) 32 | }; 33 | } 34 | 35 | #[macro_export] 36 | macro_rules! page_align_up { 37 | ($x:expr) => { 38 | ($x + $crate::vmm::PAGE_SIZE - 1) & !($crate::vmm::PAGE_SIZE - 1) 39 | }; 40 | } 41 | 42 | #[macro_export] 43 | macro_rules! size_of { 44 | ($t:ty) => { 45 | ::core::mem::size_of::<$t>() 46 | }; 47 | } 48 | 49 | pub unsafe fn cast_slice(buf: &[u8]) -> &T { 50 | debug_assert!(buf.len() >= size_of!(T)); 51 | let ptr = buf.as_ptr().cast::(); 52 | debug_assert!(ptr.is_aligned()); 53 | &*ptr 54 | } 55 | -------------------------------------------------------------------------------- /spark/src/mem/pmm/freelist_allocator.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use core::{mem, ptr}; 5 | 6 | pub struct Tag { 7 | prev: *mut Tag, 8 | next: *mut Tag, 9 | pub base: usize, 10 | pub size: usize, 11 | } 12 | 13 | fn pow2_align_up(x: usize, align: usize) -> usize { 14 | (x + (align - 1)) & !(align - 1) 15 | } 16 | 17 | impl Tag { 18 | unsafe fn prev(self: *const Self) -> *mut Tag { 19 | (*self).prev 20 | } 21 | 22 | unsafe fn next(self: *const Self) -> *mut Tag { 23 | (*self).next 24 | } 25 | 26 | unsafe fn base(self: *const Self) -> usize { 27 | (*self).base 28 | } 29 | 30 | unsafe fn size(self: *const Self) -> usize { 31 | (*self).size 32 | } 33 | 34 | unsafe fn end(self: *const Self) -> usize { 35 | self.base() + self.size() 36 | } 37 | 38 | unsafe fn is_empty(self: *const Self) -> bool { 39 | self.size() == 0 40 | } 41 | 42 | unsafe fn can_satisfy(self: *const Self, size: usize, align: usize) -> Option { 43 | assert!(!self.is_empty()); 44 | let start = pow2_align_up(self.base(), align); 45 | (start < self.end() && self.end() - start >= size).then_some(start - self.base()) 46 | } 47 | } 48 | 49 | pub struct FreelistAllocator { 50 | head: *mut Tag, 51 | tail: *mut Tag, 52 | len: usize, 53 | } 54 | 55 | unsafe impl Send for FreelistAllocator {} 56 | unsafe impl Sync for FreelistAllocator {} 57 | 58 | impl FreelistAllocator { 59 | // Returns an iterator over all tags. 60 | fn tags(&self) -> impl Iterator { 61 | let mut tag = self.head; 62 | core::iter::from_fn(move || unsafe { 63 | if tag.is_null() { 64 | None 65 | } else { 66 | let next = tag.next(); 67 | Some(mem::replace(&mut tag, next)) 68 | } 69 | }) 70 | } 71 | 72 | // Unlink a tag and decrement `self.len`. 73 | unsafe fn remove_tag(&mut self, tag: *mut Tag) { 74 | let prev = tag.prev(); 75 | let next = tag.next(); 76 | 77 | if prev.is_null() { 78 | self.head = next; 79 | } else { 80 | (*prev).next = next; 81 | } 82 | if next.is_null() { 83 | self.tail = prev; 84 | } else { 85 | (*next).prev = prev; 86 | } 87 | self.len -= 1; 88 | } 89 | 90 | // Link a tag. 91 | unsafe fn link_tag(&mut self, tag: *mut Tag, prev: *mut Tag, next: *mut Tag) { 92 | if next.is_null() { 93 | self.tail = tag; 94 | } else { 95 | (*next).prev = tag; 96 | } 97 | if prev.is_null() { 98 | self.head = tag; 99 | } else { 100 | (*prev).next = tag; 101 | } 102 | } 103 | 104 | // Link a tag and decrement `self.len`. 105 | unsafe fn insert_tag(&mut self, tag: *mut Tag, prev: *mut Tag, next: *mut Tag) { 106 | self.link_tag(tag, prev, next); 107 | self.len += 1; 108 | } 109 | 110 | // Insert (add/deallocate) a region. 111 | unsafe fn insert(&mut self, mut base: usize, mut size: usize) { 112 | let mut prev = ptr::null_mut(); 113 | let mut next = ptr::null_mut(); 114 | for tag in self.tags() { 115 | prev = next; 116 | next = tag; 117 | if next.base() > base { 118 | break; 119 | } 120 | } 121 | 122 | if !prev.is_null() && prev.end() == base { 123 | base = prev.base(); 124 | size += prev.size(); 125 | prev = prev.prev(); 126 | self.len -= 1; 127 | } 128 | 129 | if !next.is_null() && (base + size) == next.base() { 130 | size += next.size(); 131 | next = next.next(); 132 | self.len -= 1; 133 | } 134 | 135 | let new = ptr::from_exposed_addr_mut::(base); 136 | *new = Tag { 137 | prev, 138 | next, 139 | base, 140 | size, 141 | }; 142 | 143 | self.insert_tag(new, prev, next); 144 | } 145 | 146 | // Find and remove (allocate) a region. 147 | unsafe fn remove(&mut self, size: usize, align: usize) -> Option { 148 | for tag in self.tags() { 149 | if let Some(offset) = tag.can_satisfy(size, align) { 150 | let addr = tag.base() + offset; 151 | 152 | if offset == 0 { 153 | // Take from the front. 154 | 155 | // If this would leave the region empty, just remove the tag. 156 | if tag.size() == size { 157 | self.remove_tag(tag); 158 | } else { 159 | // Move the tag to after the allocation. 160 | let base = tag.base() + size; 161 | let new = ptr::from_exposed_addr_mut::(base); 162 | *new = Tag { 163 | prev: tag.prev(), 164 | next: tag.next(), 165 | base, 166 | size: tag.size() - size, 167 | }; 168 | self.link_tag(new, tag.prev(), tag.next()); 169 | } 170 | } else if offset + size == tag.size() { 171 | // Take from the back. 172 | (*tag).size -= size; 173 | } else { 174 | // Take from the middle. 175 | 176 | // Create a new tag for the region after the allocation. 177 | let new = ptr::from_exposed_addr_mut::(addr + size); 178 | *new = Tag { 179 | prev: tag, 180 | next: tag.next(), 181 | base: addr, 182 | size: tag.size() - offset, 183 | }; 184 | 185 | (*tag).size = offset; 186 | self.insert_tag(new, tag.next(), tag); 187 | } 188 | 189 | return Some(addr); 190 | } 191 | } 192 | None 193 | } 194 | } 195 | 196 | impl FreelistAllocator { 197 | /// Create a new, empty `FreelistAllocator`. 198 | pub const fn new() -> FreelistAllocator { 199 | Self { 200 | head: ptr::null_mut(), 201 | tail: ptr::null_mut(), 202 | len: 0, 203 | } 204 | } 205 | 206 | /// Returns the number of entries in the list 207 | pub const fn len(&self) -> usize { 208 | self.len 209 | } 210 | 211 | /// Returns an iterator over all entries in the list 212 | pub fn entries(&self) -> impl Iterator { 213 | self.tags().map(|tag| unsafe { &*tag }) 214 | } 215 | 216 | /// Add a region to the list 217 | pub unsafe fn add_region(&mut self, base: usize, size: usize) { 218 | self.insert(base, size); 219 | } 220 | 221 | pub fn allocate(&mut self, size: usize, align: usize) -> Option { 222 | unsafe { self.remove(size, align) } 223 | } 224 | 225 | pub unsafe fn deallocate(&mut self, base: usize, size: usize) { 226 | self.insert(base, size); 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /spark/src/mem/pmm/init_ranges.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use core::fmt; 5 | 6 | #[derive(Clone, Copy)] 7 | pub struct Range { 8 | pub base: usize, 9 | pub size: usize, 10 | } 11 | 12 | impl fmt::Debug for Range { 13 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 14 | write!( 15 | f, 16 | "Range {{ base: {:#x}, size: {:#x}, end: {:#x} }}", 17 | self.base, 18 | self.size, 19 | self.end(), 20 | ) 21 | } 22 | } 23 | 24 | impl Range { 25 | fn end(self) -> usize { 26 | self.base + self.size 27 | } 28 | 29 | fn is_empty(self) -> bool { 30 | self.size == 0 31 | } 32 | 33 | fn overlaps_with(self, other: Self) -> bool { 34 | other.base < self.end() && other.end() > self.base 35 | } 36 | 37 | fn contains(self, other: Self) -> bool { 38 | self.base <= other.base && other.end() <= self.end() 39 | } 40 | } 41 | 42 | pub struct InitRanges { 43 | ranges: [Range; MAX_RANGES], 44 | len: usize, 45 | removed: bool, 46 | } 47 | 48 | impl InitRanges { 49 | pub const fn new() -> InitRanges { 50 | Self { 51 | ranges: [Range { base: 0, size: 0 }; MAX_RANGES], 52 | len: 0, 53 | removed: false, 54 | } 55 | } 56 | 57 | pub fn ranges(&self) -> &[Range] { 58 | &self.ranges[..self.len] 59 | } 60 | 61 | fn ranges_mut(&mut self) -> &mut [Range] { 62 | &mut self.ranges[..self.len] 63 | } 64 | 65 | fn insert_range(&mut self, index: usize, range: Range) { 66 | assert!(self.len < MAX_RANGES); 67 | self.ranges.copy_within(index..self.len, index + 1); 68 | self.ranges[index] = range; 69 | self.len += 1; 70 | } 71 | 72 | fn remove_range(&mut self, index: usize) { 73 | self.ranges.copy_within(index + 1.., index); 74 | self.len -= 1; 75 | } 76 | 77 | fn range_overlaps(&self, range: Range) -> bool { 78 | self.ranges().iter().any(|r| r.overlaps_with(range)) 79 | } 80 | 81 | pub fn insert(&mut self, base: usize, size: usize) { 82 | let range = Range { base, size }; 83 | let index = self.ranges().partition_point(|r| r.base < range.base); 84 | 85 | assert!( 86 | !self.removed, 87 | "cannot insert new ranges after ranges have been removed" 88 | ); 89 | assert!(!self.range_overlaps(range)); 90 | 91 | // Check if we can merge with the previous range. 92 | if index > 0 { 93 | let prev = &mut self.ranges[index - 1]; 94 | if prev.end() == range.base { 95 | prev.size += size; 96 | // Check if we've closed a gap. 97 | if index < self.len { 98 | let next = self.ranges[index]; 99 | let prev = &mut self.ranges[index - 1]; 100 | if prev.end() == next.base { 101 | prev.size += next.size; 102 | self.remove_range(index); 103 | } 104 | } 105 | return; 106 | } 107 | } 108 | 109 | // Check if we can merge with the next range. 110 | if index < self.len { 111 | let next = &mut self.ranges[index]; 112 | if range.end() == next.base { 113 | next.base = base; 114 | next.size += size; 115 | // Check if we've closed a gap. 116 | if index > 0 { 117 | let next = *next; 118 | let prev = &mut self.ranges[index - 1]; 119 | if prev.end() == next.base { 120 | prev.size += next.size; 121 | self.remove_range(index); 122 | } 123 | } 124 | return; 125 | } 126 | } 127 | 128 | assert!(index < MAX_RANGES, "too many memory ranges"); 129 | self.insert_range(index, range); 130 | } 131 | 132 | pub fn remove(&mut self, base: usize, size: usize) { 133 | let range = Range { base, size }; 134 | 135 | let (index, from) = self 136 | .ranges_mut() 137 | .iter_mut() 138 | .enumerate() 139 | .find(|(_, r)| r.contains(range)) 140 | .expect("`remove()` called on invalid range"); 141 | 142 | if base == from.base { 143 | from.size -= size; 144 | from.base = base + size; 145 | if from.is_empty() { 146 | self.remove_range(index); 147 | } 148 | return; 149 | } 150 | 151 | if range.end() == from.end() { 152 | from.size -= size; 153 | if from.is_empty() { 154 | self.remove_range(index); 155 | } 156 | return; 157 | } 158 | 159 | let new = Range { 160 | base: range.end(), 161 | size: from.end() - range.end(), 162 | }; 163 | 164 | from.size = range.base - from.base; 165 | self.insert_range(index + 1, new); 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /spark/src/mem/pmm/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #[cfg(sbi)] 5 | mod freelist_allocator; 6 | #[cfg(sbi)] 7 | mod init_ranges; 8 | 9 | use crate::vmm::PAGE_SIZE; 10 | use core::sync::atomic::{AtomicUsize, Ordering}; 11 | #[cfg(sbi)] 12 | use { 13 | crate::{page_align_down, page_align_up, pages_for, size_of, sys::fdt}, 14 | freelist_allocator::FreelistAllocator, 15 | init_ranges::InitRanges, 16 | libsa::extern_sym, 17 | spin::Mutex, 18 | }; 19 | 20 | #[cfg(sbi)] 21 | pub static PHYSMAP: Mutex = Mutex::new(FreelistAllocator::new()); 22 | pub static MAX_PHYS_ADDR: AtomicUsize = AtomicUsize::new(0); 23 | 24 | /// Allocate frames of physical memory 25 | pub fn alloc_frames(num_frames: usize) -> Option { 26 | alloc_frames_impl(num_frames) 27 | } 28 | 29 | #[cfg(uefi)] 30 | fn alloc_frames_impl(num_frames: usize) -> Option { 31 | use uefi::table::{AllocPagesType, MemoryType}; 32 | let bs = uefi::boot_services(); 33 | bs.allocate_pages(AllocPagesType::Any, MemoryType::LOADER_DATA, num_frames) 34 | .map(|addr| addr as usize) 35 | .ok() 36 | } 37 | 38 | #[cfg(sbi)] 39 | fn alloc_frames_impl(num_frames: usize) -> Option { 40 | let mut physmap = PHYSMAP.lock(); 41 | physmap.allocate(num_frames * PAGE_SIZE, PAGE_SIZE) 42 | } 43 | 44 | /// Free physical frames of memory 45 | /// 46 | /// # Safety 47 | /// 48 | /// The memory being freed must have previously been allocated by a call 49 | /// to [`alloc_frames()`] or [`alloc_frames_aligned()`]. The only exception to 50 | /// this is when the physical memory allocator is first initialized. 51 | /// 52 | /// # Panics 53 | /// 54 | /// On UEFI this function may panic if the firmware returns an error. 55 | pub unsafe fn free_frames(base: usize, num_frames: usize) { 56 | free_frames_impl(base, num_frames); 57 | } 58 | 59 | #[cfg(uefi)] 60 | unsafe fn free_frames_impl(base: usize, num_frames: usize) { 61 | let bs = uefi::boot_services(); 62 | bs.free_pages(base as u64, num_frames).unwrap(); 63 | } 64 | 65 | #[cfg(sbi)] 66 | unsafe fn free_frames_impl(base: usize, num_frames: usize) { 67 | let mut physmap = PHYSMAP.lock(); 68 | physmap.deallocate(base, num_frames * PAGE_SIZE); 69 | } 70 | 71 | /// Generate a Limine memory map and exit boot services. 72 | /// 73 | /// # Panics 74 | /// 75 | /// This function may panic if it fails to allocate memory for the map or fails to exit 76 | /// boot services. 77 | pub fn generate_limine_memory_map(vmspace: &mut super::vmm::AddressSpace) -> limine::MemoryMap { 78 | generate_limine_memory_map_impl(vmspace) 79 | } 80 | 81 | #[cfg(all(uefi, feature = "proto-limine"))] 82 | fn generate_limine_memory_map_impl(vmspace: &mut super::vmm::AddressSpace) -> limine::MemoryMap { 83 | use core::mem::MaybeUninit; 84 | use uefi::table::MemoryDescriptor; 85 | 86 | let bs = uefi::boot_services(); 87 | 88 | // Get the size of the memory map. 89 | let map_info = bs.get_memory_map_info().unwrap(); 90 | 91 | // Allocate buffers for everything we need: 92 | // - the memory map itself 93 | // - the Limine memory map entries 94 | // - the pointers to the Limine entries 95 | let buffer = Box::leak(vec![0; map_info.buffer_size].into_boxed_slice()); 96 | let len = map_info.buffer_size / map_info.descriptor_size; 97 | let limine_entries = Box::leak(Box::<[limine::MemoryMapEntry]>::new_uninit_slice(len)); 98 | let limine_ptrs = Box::leak(Box::new_uninit_slice(len)); 99 | 100 | // Get the memory map. 101 | let map_info = bs.get_memory_map(buffer, map_info.map_key).unwrap(); 102 | 103 | // Exit boot services. 104 | if let Err(status) = bs.exit_boot_services(uefi::image_handle(), map_info.map_key) { 105 | panic!("failed to exit boot services: {status:?}"); 106 | } 107 | 108 | // Translate the UEFI memory map into a Limine memory map. 109 | let mut i = 0; 110 | let mut offset = 0; 111 | loop { 112 | if offset + map_info.descriptor_size >= map_info.buffer_size { 113 | break; 114 | } 115 | 116 | let efi_entry = unsafe { 117 | &*buffer[offset..][..map_info.descriptor_size] 118 | .as_ptr() 119 | .cast::() 120 | }; 121 | 122 | let limine_entry = limine::MemoryMapEntry::new( 123 | efi_entry.phys as usize, 124 | efi_entry.num_pages as usize * PAGE_SIZE, 125 | efi_entry.kind.into(), 126 | ); 127 | 128 | unsafe { 129 | let ptr = limine_entries 130 | .as_mut_ptr() 131 | .add(i) 132 | .cast::(); 133 | ptr.write(limine_entry); 134 | 135 | limine_ptrs 136 | .as_mut_ptr() 137 | .add(i) 138 | .write(MaybeUninit::new(vmspace.direct_map_ptr_mut(ptr))); 139 | } 140 | 141 | i += 1; 142 | offset += map_info.descriptor_size; 143 | } 144 | 145 | unsafe { 146 | let ptr = MaybeUninit::slice_assume_init_mut(limine_ptrs).as_mut_ptr(); 147 | limine::MemoryMap::new(ptr, i) 148 | } 149 | } 150 | 151 | #[cfg(all(sbi, feature = "proto-limine"))] 152 | fn generate_limine_memory_map_impl(vmspace: &mut super::vmm::AddressSpace) -> limine::MemoryMap { 153 | let map = PHYSMAP.lock(); 154 | 155 | let needed = pages_for!((size_of!(usize) + size_of!(limine::MemoryMapEntry)) * map.len()); 156 | drop(map); 157 | 158 | let buffer = alloc_frames(needed).unwrap() as *mut u8; 159 | 160 | let map = PHYSMAP.lock(); 161 | 162 | unsafe { 163 | let num_entries = map.entries().count(); 164 | let len = map.len(); 165 | assert_eq!(num_entries, len); 166 | 167 | let pointers = buffer.cast::<*mut limine::MemoryMapEntry>(); 168 | let entries = buffer 169 | .add(size_of!(*mut limine::MemoryMapEntry) * map.len()) 170 | .cast::(); 171 | 172 | buffer.write_bytes(0, needed * PAGE_SIZE); 173 | 174 | for (i, tag) in map.entries().enumerate() { 175 | println!("{:#010x} {:#010x}", tag.base, tag.size); 176 | let entry = entries.add(i); 177 | 178 | pointers.add(i).write(vmspace.direct_map_ptr_mut(entry)); 179 | entry.write(limine::MemoryMapEntry::new( 180 | tag.base, 181 | tag.size, 182 | limine::MemoryKind::Usable, 183 | )); 184 | 185 | println!("{:#010x?}", *entry); 186 | } 187 | 188 | limine::MemoryMap::new( 189 | pointers.with_addr(vmspace.higher_half_start() + pointers.addr()), 190 | map.len(), 191 | ) 192 | } 193 | } 194 | 195 | /// Initialize the physical memory allocator from the information in the Device Tree 196 | #[cfg(sbi)] 197 | pub fn init() { 198 | let mut init_ranges = InitRanges::<64>::new(); 199 | let mut max_phys_addr = 0; 200 | 201 | let fdt = fdt::get_fdt(); 202 | 203 | // Add all `/memory*` nodes. 204 | for node in fdt 205 | .root() 206 | .children() 207 | .filter(|node| node.name.starts_with("memory")) 208 | { 209 | for reg in node.reg().into_iter().flatten().filter_map(Result::ok) { 210 | let end = reg.addr + reg.size; 211 | max_phys_addr = max_phys_addr.max(end as usize); 212 | init_ranges.insert(reg.addr as usize, reg.size as usize); 213 | } 214 | } 215 | 216 | MAX_PHYS_ADDR.store(max_phys_addr, Ordering::Relaxed); 217 | 218 | // Remove all `/reserved-memory` nodes. 219 | for node in fdt 220 | .find_node("/reserved-memory") 221 | .into_iter() 222 | .flat_map(|node| node.children()) 223 | { 224 | for reg in node.reg().into_iter().flatten().filter_map(Result::ok) { 225 | init_ranges.remove(reg.addr as usize, reg.size as usize); 226 | } 227 | } 228 | 229 | // Remove all entries in the memory reservation block. 230 | for entry in fdt.memory_reservations { 231 | init_ranges.remove(entry.addr.get() as usize, entry.size.get() as usize); 232 | } 233 | 234 | // Remove the DTB. 235 | init_ranges.remove(fdt.as_ptr().addr(), fdt.total_size()); 236 | 237 | // Remove the bootloader itself. 238 | let spark_start = extern_sym!(__image_base).addr(); 239 | let spark_size = extern_sym!(__image_size).addr(); 240 | init_ranges.remove(spark_start, spark_size); 241 | 242 | // Initialize PMM. 243 | let mut physmap = PHYSMAP.lock(); 244 | for range in init_ranges.ranges() { 245 | let base = page_align_up!(range.base); 246 | let size = page_align_down!(range.base + range.size - 1) - base; 247 | unsafe { physmap.add_region(base, size) }; 248 | } 249 | } 250 | -------------------------------------------------------------------------------- /spark/src/panic.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | mod uw { 5 | use core::{ffi::c_void, ptr::addr_of_mut}; 6 | 7 | use unwinding::abi::{UnwindContext, UnwindReasonCode, _Unwind_Backtrace}; 8 | 9 | struct TraceData<'a, T> { 10 | data: &'a mut T, 11 | f: &'a mut dyn FnMut(&UnwindContext<'_>, &mut T) -> UnwindReasonCode, 12 | } 13 | 14 | extern "C" fn backtrace_callback( 15 | ctx: &UnwindContext<'_>, 16 | data: *mut c_void, 17 | ) -> UnwindReasonCode { 18 | let data = unsafe { &mut *data.cast::>() }; 19 | (data.f)(ctx, data.data) 20 | } 21 | 22 | pub fn backtrace(data: &mut T, mut f: F) -> UnwindReasonCode 23 | where 24 | F: FnMut(&UnwindContext<'_>, &mut T) -> UnwindReasonCode, 25 | { 26 | let mut data = TraceData { data, f: &mut f }; 27 | let data = addr_of_mut!(data).cast::(); 28 | 29 | _Unwind_Backtrace(backtrace_callback::, data) 30 | } 31 | } 32 | 33 | use crate::hcf; 34 | use anyhow::anyhow; 35 | use core::{ 36 | ptr, 37 | sync::atomic::{AtomicBool, Ordering}, 38 | }; 39 | use libsa::extern_sym; 40 | use symbol_map::{Symbol, SymbolMap}; 41 | 42 | #[inline] 43 | fn reloc_offset() -> usize { 44 | let offset; 45 | unsafe { 46 | asm!("lla {}, __image_base", out(reg) offset, options(nomem, nostack, preserves_flags)); 47 | } 48 | offset 49 | } 50 | 51 | // Provide a fallback definition of the symbol map so the first link (without the generated 52 | // symbol map) succeeds. We use a `u64` so it will have the proper alignment. A valid 53 | // signature in the first 4 bytes is checked before creating a `SymbolMap`, so we don't need 54 | // to worry about providing a full `SymbolMapHeader`. 55 | global_asm!( 56 | r#" 57 | .pushsection .rodata.__dummy_symbol_map,"a",@progbits 58 | .weak __symbol_map 59 | .weak __symbol_map_size 60 | .p2align 3 61 | __symbol_map: 62 | .4byte 0 63 | .popsection 64 | "# 65 | ); 66 | 67 | fn get_symbol_map() -> Result, &'static str> { 68 | unsafe { 69 | let ptr = extern_sym!(__symbol_map as u8); 70 | let len = extern_sym!(__symbol_map_size).addr(); 71 | let bytes = &*ptr::slice_from_raw_parts(ptr, len); 72 | SymbolMap::new(bytes) 73 | } 74 | } 75 | 76 | #[inline(never)] 77 | pub fn trace_stack() -> anyhow::Result<()> { 78 | use unwinding::abi::*; 79 | 80 | println!("----- STACK TRACE -----"); 81 | 82 | let symbol_map = get_symbol_map().map_err(|err| anyhow!("failed to get symbol map: {err}"))?; 83 | 84 | let mut count = 0usize; 85 | uw::backtrace(&mut count, move |ctx, count| { 86 | let ip = _Unwind_GetIP(ctx); 87 | let orig_ip = ip - reloc_offset(); 88 | 89 | print!("{count:4}: {ip:#018x} ({orig_ip:#018x}) - "); 90 | 91 | if let Some(Symbol { name, addr, .. }) = symbol_map.lookup(orig_ip as u64) { 92 | let offset = orig_ip - addr as usize; 93 | println!("{name} + {offset:#x}"); 94 | } else { 95 | println!(""); 96 | } 97 | 98 | *count += 1; 99 | UnwindReasonCode::NO_REASON 100 | }); 101 | 102 | println!("-----------------------"); 103 | 104 | Ok(()) 105 | } 106 | 107 | #[panic_handler] 108 | fn rust_panic(info: &core::panic::PanicInfo) -> ! { 109 | println!("bootloader panic!\n{info}"); 110 | 111 | static IN_PANIC: AtomicBool = AtomicBool::new(false); 112 | 113 | if IN_PANIC.swap(true, Ordering::SeqCst) { 114 | hcf(); 115 | } 116 | 117 | if let Err(error) = trace_stack() { 118 | println!("failed to get stack trace: {error}"); 119 | } 120 | 121 | hcf(); 122 | } 123 | -------------------------------------------------------------------------------- /spark/src/proto.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub mod bootelf; 5 | pub mod limine; 6 | -------------------------------------------------------------------------------- /spark/src/proto/bootelf.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![cfg(feature = "proto-bootelf")] 5 | 6 | //! `bootelf` protocol 7 | //! 8 | //! The `bootelf` protocol is a very simple boot protocol for ELF kernels. 9 | //! It provides nearly the same machine state as that of SBI, but the ELF is properly mapped 10 | //! into a virtual address space. 11 | //! 12 | //! # Memory Map 13 | //! 14 | //! In addition to the kernel image, two mappings of all physical memory are created: 15 | //! 16 | //! - an identity map (physical address == virtual address) 17 | //! - a direct map at the beginning of the higher half 18 | //! 19 | //! The beginning of the higher half depends on the paging mode, which can be determined by 20 | //! reading the `satp` register. 21 | //! 22 | //! # Registers 23 | //! 24 | //! - `a0` - hart ID 25 | //! - `a1` - physical address of DTB 26 | //! - `a2` - physical address of kernel image 27 | //! - `gp` - if present in the ELF, the value of the `__global_pointer$` symbol 28 | //! - `sstatus.SIE` and `sie` are set to 0, all interrupts are disabled 29 | //! 30 | //! All other registers are undefined. 31 | 32 | use crate::{ 33 | config::{Entry, Value}, 34 | dev::fdt::DTB_PTR, 35 | fs::File, 36 | rtld::Rtld, 37 | vmm, BOOT_HART_ID, 38 | }; 39 | use core::sync::atomic::Ordering; 40 | use elf::Elf; 41 | 42 | pub fn main(mut fs: Box, config: &Entry) -> anyhow::Result { 43 | let Some(Value::String(kernel_path)) = config.param("kernel-path") else { 44 | panic!(); 45 | }; 46 | 47 | let kernel_path = kernel_path.strip_prefix("boot://").unwrap(); 48 | let mut kernel_file = fs.open(kernel_path)?; 49 | let kernel_data = kernel_file.read_to_end()?; 50 | let kernel_elf = Elf::new(&kernel_data).unwrap(); 51 | let mut rtld = Rtld::new(&kernel_elf).unwrap(); 52 | 53 | let paging_mode = vmm::get_max_paging_mode(); 54 | let mut vmspace = vmm::AddressSpace::new(paging_mode, paging_mode.higher_half_start()); 55 | 56 | rtld.load_image(); 57 | rtld.map_image(&mut vmspace).unwrap(); 58 | rtld.do_relocations(); 59 | 60 | let entry_point = rtld.reloc(rtld.elf.entry_point() as _); 61 | let global_pointer = rtld 62 | .elf 63 | .symbol_table() 64 | .and_then(|symtab| { 65 | symtab 66 | .find(|s| s.name() == Some("__global_pointer$")) 67 | .map(|sym| sym.value()) 68 | }) 69 | .unwrap_or(0); 70 | let boot_hartid = BOOT_HART_ID.load(Ordering::Relaxed); 71 | let dtb_ptr = DTB_PTR.load(Ordering::Relaxed); 72 | 73 | unsafe { 74 | vmspace.switch_to(); 75 | spinup( 76 | boot_hartid, 77 | dtb_ptr, 78 | rtld.image_base, 79 | global_pointer as _, 80 | entry_point, 81 | ); 82 | } 83 | } 84 | 85 | #[naked] 86 | unsafe extern "C" fn spinup( 87 | hart_id: usize, 88 | dtb_ptr: *mut u8, 89 | phys_base: usize, 90 | global_pointer: usize, 91 | entry_point: usize, 92 | ) -> ! { 93 | asm!( 94 | " 95 | mv gp, a3 96 | csrci sstatus, 0x2 97 | csrw sie, zero 98 | csrw stvec, zero 99 | csrw sscratch, zero 100 | jr a4 101 | ", 102 | options(noreturn) 103 | ); 104 | } 105 | -------------------------------------------------------------------------------- /spark/src/rtld.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{ 5 | page_align_down, page_align_up, pages_for, pmm, size_of, 6 | vmm::{AddressSpace, MapError}, 7 | }; 8 | use core::cmp; 9 | use elf::{DynTag, Elf, Rela, RelocKind, Segment, SegmentKind}; 10 | 11 | #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] 12 | pub enum LoadError { 13 | LowerHalfSegment, 14 | OverlappingSegments, 15 | NoSegments, 16 | TruncatedSegment, 17 | } 18 | 19 | pub struct Rtld<'a, 'elf> { 20 | pub elf: &'a Elf<'elf>, 21 | load_segments: Vec>, 22 | link_base: usize, 23 | // link_end: usize, 24 | image_size: usize, 25 | pub image_base: usize, 26 | flags: RtldFlags, 27 | reloc_offset: usize, 28 | } 29 | 30 | bitflags::bitflags! { 31 | struct RtldFlags : u32 { 32 | const IMAGE_LOADED = 1 << 0; 33 | } 34 | } 35 | 36 | impl<'elf, 'a: 'elf> Rtld<'a, 'elf> { 37 | pub fn new(elf: &'a Elf<'elf>) -> Result, LoadError> { 38 | let mut load_segments = vec![]; 39 | let mut link_base = usize::MAX; 40 | let mut link_end = usize::MIN; 41 | // let mut dynamic = None; 42 | // let mut tls = None; 43 | 44 | for segment in elf.segments() { 45 | match segment.kind() { 46 | SegmentKind::Load => { 47 | let base = segment.virtual_address() as usize; 48 | let end = base + segment.mem_size(); 49 | 50 | if segment.mem_size() < segment.file_size() { 51 | return Err(LoadError::TruncatedSegment); 52 | } 53 | 54 | link_base = cmp::min(link_base, base); 55 | link_end = cmp::max(link_end, end); 56 | load_segments.push(segment); 57 | } 58 | // SegmentKind::Dynamic => { 59 | // assert!(dynamic.is_none()); 60 | // dynamic = Some(segment); 61 | // } 62 | // SegmentKind::Tls => { 63 | // assert!(tls.is_none()); 64 | // tls = Some(segment); 65 | // } 66 | _ => continue, 67 | } 68 | } 69 | 70 | if load_segments.is_empty() { 71 | return Err(LoadError::NoSegments); 72 | } 73 | 74 | let image_size = link_end - link_base; 75 | 76 | Ok(Self { 77 | elf, 78 | load_segments, 79 | link_base, 80 | image_size, 81 | image_base: 0, 82 | reloc_offset: 0, 83 | flags: RtldFlags::empty(), 84 | }) 85 | } 86 | 87 | // pub fn set_relocation_offset(&mut self, offset: usize) { 88 | // assert!( 89 | // !self.flags.contains(RtldFlags::IMAGE_LOADED), 90 | // "the relocation offset cannot be changed once the image has been loaded" 91 | // ); 92 | // self.reloc_offset = offset; 93 | // } 94 | // 95 | // pub fn check_ptr(&self, ptr: *const T) -> bool { 96 | // let obj_start = ptr.addr(); 97 | // let obj_end = obj_start + size_of!(T); 98 | // let img_start = self.image_base; 99 | // let img_end = img_start + self.image_size; 100 | // 101 | // img_start <= obj_start && obj_end <= img_end 102 | // } 103 | 104 | pub fn reloc(&self, addr: usize) -> usize { 105 | self.reloc_offset.wrapping_add(addr) 106 | } 107 | 108 | pub fn reloc_signed(&self, addr: isize) -> usize { 109 | self.reloc_offset.wrapping_add_signed(addr) 110 | } 111 | 112 | pub fn map_image(&mut self, vmspace: &mut AddressSpace) -> Result<(), LoadError> { 113 | assert!(self.flags.contains(RtldFlags::IMAGE_LOADED)); 114 | self.flags |= RtldFlags::IMAGE_LOADED; 115 | 116 | for segment in &self.load_segments { 117 | let virt = segment.virtual_address() as usize; 118 | let phys = self.image_base + (virt - self.link_base); 119 | let virt_p = page_align_down!(virt); 120 | let phys_p = page_align_down!(phys); 121 | 122 | if virt < vmspace.higher_half_start() { 123 | return Err(LoadError::LowerHalfSegment); 124 | } 125 | 126 | vmspace 127 | .map_pages( 128 | virt_p, 129 | phys_p, 130 | page_align_up!(virt + segment.mem_size()) - virt_p, 131 | segment.flags().into(), 132 | ) 133 | .map_err(|err| match err { 134 | MapError::OverlappingMappings => LoadError::OverlappingSegments, 135 | MapError::InvalidFlags => panic!("this shouldn't happen"), 136 | MapError::MisalignedAddr => unreachable!(), /* they damn well better be aligned */ 137 | }) 138 | .unwrap(); 139 | } 140 | 141 | Ok(()) 142 | } 143 | 144 | pub fn load_base(&self) -> usize { 145 | assert!(self.flags.contains(RtldFlags::IMAGE_LOADED)); 146 | self.reloc(self.link_base) 147 | } 148 | 149 | pub fn load_image(&mut self) { 150 | assert!(!self.flags.contains(RtldFlags::IMAGE_LOADED)); 151 | self.flags |= RtldFlags::IMAGE_LOADED; 152 | 153 | self.image_base = pmm::alloc_frames(pages_for!(self.image_size)).unwrap(); 154 | 155 | for segment in &self.load_segments { 156 | let virt = segment.virtual_address() as usize; 157 | let phys = self.image_base + (virt - self.link_base); 158 | 159 | unsafe { 160 | core::ptr::copy_nonoverlapping( 161 | segment.file_data().as_ptr(), 162 | phys as *mut u8, 163 | segment.file_size(), 164 | ); 165 | core::ptr::write_bytes( 166 | (phys + segment.file_size()) as *mut u8, 167 | 0, 168 | segment.mem_size() - segment.file_size(), 169 | ); 170 | } 171 | } 172 | } 173 | 174 | /// Convert a virtual address within the object to a physical address 175 | /// 176 | /// This is required for the Limine protocol to access the requests before we know 177 | /// the paging mode that will be used. 178 | pub fn to_image_ptr(&self, addr: usize) -> usize { 179 | assert!(self.flags.contains(RtldFlags::IMAGE_LOADED)); 180 | self.image_base + (addr - self.link_base) 181 | } 182 | 183 | pub fn relocation_table(&self) -> Option<&'elf [Rela]> { 184 | self.elf.dynamic_table().and_then(|dyntab| { 185 | let mut addr = None; 186 | let mut size = None; 187 | let mut count = None; 188 | 189 | for entry in dyntab.table_raw() { 190 | match entry.tag { 191 | DynTag::RELA => addr = Some(entry.value), 192 | DynTag::RELASZ => size = Some(entry.value), 193 | DynTag::RELACOUNT => count = Some(entry.value), 194 | DynTag::RELAENT => assert_eq!(entry.value, size_of!(Rela)), 195 | _ => {} 196 | } 197 | } 198 | 199 | let addr = self.image_base + (addr? - self.link_base); 200 | let len = size? / size_of!(Rela); 201 | if let Some(count) = count { 202 | assert_eq!(len, count); 203 | } 204 | 205 | unsafe { Some(core::slice::from_raw_parts(addr as *const _, len)) } 206 | }) 207 | } 208 | 209 | pub fn do_relocations(&self) { 210 | let Some(relocation_table) = self.relocation_table() else { 211 | return; 212 | }; 213 | 214 | for reloc_entry in relocation_table { 215 | let location = self.reloc(reloc_entry.offset as usize); 216 | let location = self.image_base + (location - self.load_base()); 217 | 218 | match reloc_entry.kind() { 219 | RelocKind::RISCV_NONE => {} 220 | RelocKind::RISCV_RELATIVE => { 221 | let value = self.reloc_signed(reloc_entry.addend as isize); 222 | unsafe { *(location as *mut usize) = value }; 223 | } 224 | // RelocKind::RISCV_IRELATIVE => object.has_ifuncs = true, 225 | _ => panic!(), 226 | } 227 | } 228 | } 229 | } 230 | 231 | #[no_mangle] 232 | pub extern "C" fn _relocate(reloc_slide: usize, mut dyntab: *const elf::Dyn) -> usize { 233 | const RELOC_ERROR: usize = 1 << 63; 234 | const RELOC_OK: usize = 0; 235 | 236 | let relocation_table = unsafe { 237 | let mut table_addr = None; 238 | let mut table_size = None; 239 | let mut entry_size = None; 240 | 241 | loop { 242 | let entry = dyntab.read(); 243 | 244 | match entry.tag { 245 | DynTag::NULL => break, 246 | DynTag::RELA => table_addr = Some(entry.value), 247 | DynTag::RELASZ => table_size = Some(entry.value), 248 | DynTag::RELAENT => entry_size = Some(entry.value), 249 | _ => {} 250 | } 251 | 252 | dyntab = dyntab.add(1); 253 | } 254 | 255 | if table_addr.is_none() && entry_size.is_none() { 256 | // There are no relocations 257 | return RELOC_OK; 258 | } 259 | 260 | let Some(table_addr) = table_addr else { 261 | return RELOC_ERROR; 262 | }; 263 | let Some(table_size) = table_size else { 264 | return RELOC_ERROR; 265 | }; 266 | let Some(entry_size) = entry_size else { 267 | return RELOC_ERROR; 268 | }; 269 | 270 | if entry_size != size_of!(elf::Rela) { 271 | return RELOC_ERROR; 272 | } 273 | 274 | let data = reloc_slide.wrapping_add(table_addr) as *const elf::Rela; 275 | let len = table_size / entry_size; 276 | 277 | core::slice::from_raw_parts(data, len) 278 | }; 279 | 280 | for relocation in relocation_table { 281 | match relocation.kind() { 282 | RelocKind::RISCV_NONE => {} 283 | RelocKind::RISCV_RELATIVE => { 284 | let target = reloc_slide.wrapping_add(relocation.offset as usize); 285 | let value = reloc_slide.wrapping_add_signed(relocation.addend as isize); 286 | unsafe { *(target as *mut usize) = value }; 287 | } 288 | _ => return RELOC_ERROR, 289 | } 290 | } 291 | 292 | RELOC_OK 293 | } 294 | -------------------------------------------------------------------------------- /spark/src/smp.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #[cfg(uefi)] 5 | use crate::dev; 6 | #[cfg(feature = "fdt")] 7 | use crate::sys::fdt; 8 | 9 | pub struct Cpu { 10 | pub hartid: usize, 11 | pub processor_uid: u32, 12 | } 13 | 14 | pub fn cpus() -> Vec { 15 | #[cfg(all(feature = "acpi", uefi))] 16 | if let Some(madt) = dev::acpi::get_table::() { 17 | return unsafe { 18 | acpi::madt::iter_madt(madt) 19 | .filter_map(|entry| match entry { 20 | acpi::madt::Entry::RiscvIntc { 21 | processor_uid, 22 | hartid, 23 | .. 24 | } => Some(Cpu { 25 | processor_uid, 26 | hartid: hartid as usize, 27 | }), 28 | _ => None, 29 | }) 30 | .collect() 31 | }; 32 | } 33 | 34 | #[cfg(feature = "fdt")] 35 | if let Some(fdt) = fdt::try_get_fdt() { 36 | let mut cpus = vec![]; 37 | for node in fdt.cpus() { 38 | let Ok(reg) = node.reg_by_index(0) else { 39 | continue; 40 | }; 41 | cpus.push(Cpu { 42 | hartid: reg.addr as _, 43 | processor_uid: 0, 44 | }); 45 | } 46 | return cpus; 47 | } 48 | 49 | panic!(); 50 | } 51 | -------------------------------------------------------------------------------- /spark/src/spark.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![doc = include_str!("../../README.md")] 5 | #![no_std] 6 | #![no_main] 7 | #![feature( 8 | custom_test_frameworks, 9 | prelude_import, 10 | arbitrary_self_types, // https://github.com/rust-lang/rust/issues/44874 11 | array_windows, // https://github.com/rust-lang/rust/issues/75027 12 | asm_const, // https://github.com/rust-lang/rust/issues/93332 13 | const_mut_refs, // https://github.com/rust-lang/rust/issues/57349 14 | decl_macro, // https://github.com/rust-lang/rust/issues/39412 15 | get_mut_unchecked, // https://github.com/rust-lang/rust/issues/63292 16 | maybe_uninit_slice, // https://github.com/rust-lang/rust/issues/63569 17 | naked_functions, // https://github.com/rust-lang/rust/issues/32408 18 | never_type, // https://github.com/rust-lang/rust/issues/35121 19 | new_uninit, // https://github.com/rust-lang/rust/issues/63291 20 | offset_of, // https://github.com/rust-lang/rust/issues/106655 21 | pointer_is_aligned, // https://github.com/rust-lang/rust/issues/96284 22 | result_option_inspect, // https://github.com/rust-lang/rust/issues/91345 23 | slice_flatten, // https://github.com/rust-lang/rust/issues/95629 24 | slice_ptr_get, // https://github.com/rust-lang/rust/issues/74265 25 | slice_ptr_len, // https://github.com/rust-lang/rust/issues/71146 26 | strict_provenance, // https://github.com/rust-lang/rust/issues/95228 27 | )] 28 | #![reexport_test_harness_main = "test_main"] 29 | #![test_runner(test::runner)] 30 | #![warn(clippy::pedantic)] 31 | #![deny( 32 | clippy::semicolon_if_nothing_returned, 33 | clippy::debug_assert_with_mut_call 34 | )] 35 | #![allow( 36 | internal_features, 37 | clippy::cast_lossless, 38 | clippy::cast_possible_truncation, 39 | clippy::cast_ptr_alignment, 40 | clippy::enum_glob_use, 41 | clippy::inline_always, 42 | clippy::items_after_statements, 43 | clippy::module_name_repetitions, 44 | clippy::must_use_candidate, 45 | clippy::unreadable_literal, 46 | clippy::wildcard_imports 47 | )] 48 | 49 | extern crate alloc; 50 | 51 | #[prelude_import] 52 | #[allow(unused_imports)] 53 | use crate::prelude::*; 54 | #[allow(unused_imports)] 55 | mod prelude { 56 | pub use crate::console::{print, println}; 57 | pub use alloc::{ 58 | borrow::ToOwned, 59 | boxed::Box, 60 | format, 61 | string::{String, ToString}, 62 | vec, 63 | vec::Vec, 64 | }; 65 | pub use core::{ 66 | arch::{asm, global_asm}, 67 | prelude::rust_2021::*, 68 | }; 69 | } 70 | 71 | mod config; 72 | mod console; 73 | mod dev; 74 | mod fs; 75 | mod io; 76 | mod malloc; 77 | mod mem; 78 | mod panic; 79 | mod proto; 80 | mod rtld; 81 | mod smp; 82 | mod sys; 83 | mod test; 84 | mod time; 85 | mod trap; 86 | mod util; 87 | 88 | pub use anyhow::Result; 89 | pub use mem::{pmm, vmm}; 90 | 91 | use config::Value; 92 | use core::sync::atomic::AtomicUsize; 93 | 94 | pub fn hcf() -> ! { 95 | println!("bruh."); 96 | loop { 97 | core::hint::spin_loop(); 98 | } 99 | } 100 | 101 | static BOOT_HART_ID: AtomicUsize = AtomicUsize::new(0); 102 | 103 | static SPARK_CFG_PATHS: &[&str] = &["/boot/spark.cfg", "/spark.cfg"]; 104 | 105 | fn main() -> ! { 106 | // Search each volume on each disk for the config file. 107 | let mut config_file = 'b: { 108 | for disk in dev::block::DISKS.read().iter() { 109 | for volume in disk.volumes() { 110 | let mut root = match fs::mount(volume) { 111 | Ok(file) => file, 112 | Err(io::Error::Unsupported) => { 113 | // no driver for this filesystem 114 | continue; 115 | } 116 | Err(err) => { 117 | log::warn!("error mounting volume: {err:?}"); 118 | continue; 119 | } 120 | }; 121 | 122 | for path in SPARK_CFG_PATHS { 123 | match root.open(path) { 124 | Ok(file) => { 125 | break 'b file; 126 | } 127 | Err(io::Error::NotFound) => {} 128 | Err(err) => { 129 | log::warn!("error opening path {path:?}: {err:?}"); 130 | continue; 131 | } 132 | } 133 | } 134 | } 135 | } 136 | 137 | panic!("cannot find `spark.cfg` on any device"); 138 | }; 139 | 140 | let boot_config_data = config_file.read_to_end().unwrap(); 141 | let boot_config = config::parse_config_file(&boot_config_data); 142 | 143 | let boot_entry = boot_config.entries.first().expect("no boot entry"); 144 | let protocol = match boot_entry.param("protocol") { 145 | Some(Value::String(proto)) => *proto, 146 | None => panic!("`protocol` parameter was not specified"), 147 | _ => panic!("`protocol` parameter is not a string"), 148 | }; 149 | 150 | match protocol { 151 | #[cfg(feature = "proto-bootelf")] 152 | "bootelf" => proto::bootelf::main(config_file, boot_entry).unwrap(), 153 | #[cfg(feature = "proto-limine")] 154 | "limine" => proto::limine::main(config_file, boot_entry).unwrap(), 155 | _ => panic!("protocol `{protocol}` is not supported"), 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /spark/src/sys/fdt.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![cfg(feature = "fdt")] 5 | 6 | use core::fmt::Display; 7 | 8 | pub use fdt::*; 9 | 10 | static mut FDT: Option = None; 11 | 12 | pub unsafe fn init(dtb: *const u8) -> &'static Fdt<'static> { 13 | assert!(FDT.is_none(), "device tree is already initialized"); 14 | let fdt = match Fdt::from_ptr(dtb) { 15 | Ok(fdt) => fdt, 16 | Err(error) => panic!("invalid device tree: {error:?}"), 17 | }; 18 | FDT = Some(fdt); 19 | FDT.as_ref().unwrap_unchecked() 20 | } 21 | 22 | #[cfg(sbi)] 23 | pub fn get_fdt() -> &'static Fdt<'static> { 24 | let Some(fdt) = (unsafe { FDT.as_ref() }) else { 25 | panic!("device tree not yet initialized"); 26 | }; 27 | fdt 28 | } 29 | 30 | #[allow(clippy::unnecessary_wraps)] 31 | pub fn try_get_fdt() -> Option<&'static Fdt<'static>> { 32 | match unsafe { FDT.as_ref() } { 33 | Some(fdt) => Some(fdt), 34 | #[cfg(sbi)] 35 | None => panic!("no device tree"), 36 | #[cfg(not(sbi))] 37 | None => None, 38 | } 39 | } 40 | 41 | pub trait NodeExt { 42 | fn error(&self, error: P) -> anyhow::Error; 43 | } 44 | 45 | impl<'f, 'dtb: 'f> NodeExt for Node<'f, 'dtb> { 46 | fn error(&self, error: P) -> anyhow::Error { 47 | anyhow::anyhow!("{}: {error}", self.name) 48 | } 49 | } 50 | 51 | #[allow(dead_code)] 52 | #[cfg(feature = "fdt")] 53 | fn print_fdt(fdt: &fdt::Fdt) { 54 | fn print_fdt_node(node: &fdt::Node, depth: &mut usize) { 55 | (0..*depth).for_each(|_| print!(" ")); 56 | println!("{} {{", node.name); 57 | *depth += 1; 58 | for prop in node.properties() { 59 | (0..*depth).for_each(|_| print!(" ")); 60 | 61 | print!("{}", prop.name); 62 | if prop.is_empty() { 63 | println!(";"); 64 | continue; 65 | } 66 | print!(" = "); 67 | 68 | match prop.name { 69 | // "interrupt-map" 70 | // if node 71 | // .compatible() 72 | // .unwrap() 73 | // .all() 74 | // .any(|c| c == "pci-host-ecam-generic") => 75 | // { 76 | // let mut chunks = prop 77 | // .value 78 | // .chunks_exact(4) 79 | // .map(|c| u32::from_be_bytes(c.try_into().unwrap())); 80 | // println!("["); 81 | // while let Some(x) = chunks.next() { 82 | // let _y = chunks.next().unwrap(); 83 | // let _z = chunks.next().unwrap(); 84 | // let intn = chunks.next().unwrap(); 85 | // let ctrl = chunks.next().unwrap(); 86 | // let cintr = chunks.next().unwrap(); 87 | // 88 | // let bus = (x >> 16) & 0xff; 89 | // let dev = (x >> 11) & 0x1f; 90 | // let func = (x >> 8) & 0x7; 91 | // 92 | // println!(" {bus:02x}:{dev:02x}:{func:02x} INT{} on controller {ctrl:#x}, vector {cintr}", (b'A' - 1 + intn as u8) as char); 93 | // } 94 | // } 95 | "compatible" => { 96 | for (n, s) in prop.string_list().unwrap().enumerate() { 97 | if n > 0 { 98 | print!(", "); 99 | } 100 | print!("{s:?}"); 101 | } 102 | println!(";"); 103 | } 104 | "stdout-path" | "riscv,isa" | "status" | "mmu-type" | "model" | "device_type" => { 105 | println!("{};", prop.string().unwrap()); 106 | } 107 | _ => { 108 | print!("<"); 109 | for (n, cell) in prop.as_cell_slice().iter().enumerate() { 110 | if n > 0 { 111 | print!(", "); 112 | } 113 | print!("{cell:#0110x}"); 114 | } 115 | println!(">;"); 116 | } 117 | } 118 | } 119 | for node in node.children() { 120 | print_fdt_node(&node, depth); 121 | } 122 | *depth -= 1; 123 | (0..*depth).for_each(|_| print!(" ")); 124 | println!("}};"); 125 | } 126 | let root = fdt.root(); 127 | let mut depth = 0; 128 | print_fdt_node(&root, &mut depth); 129 | } 130 | -------------------------------------------------------------------------------- /spark/src/sys/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub mod fdt; 5 | pub mod sbi; 6 | pub mod uefi; 7 | -------------------------------------------------------------------------------- /spark/src/sys/sbi/locore.s: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | .pushsection .text 5 | 6 | .section .text._start,"ax",@progbits 7 | .global _start 8 | .type _start,@function 9 | _start: 10 | /* 11 | * Load the GP register with the global pointer (provided by the linker script). 12 | * Linker relaxations must be disabled as they rely on GP being set. 13 | */ 14 | .option push 15 | .option norelax 16 | lla gp, __global_pointer$ 17 | .option pop 18 | 19 | lla sp, __boot_stackp 20 | 21 | /* 22 | * Set up the trap handler.. which may or may not work before relocations 23 | * have been performed. 24 | */ 25 | lla t0, trap_entry 26 | csrw stvec, t0 27 | 28 | /* 29 | * Clear the .bss segment. 30 | * 31 | * The linker script ensures __bss is properly aligned, and the size is at least 32 | * 8 bytes. 33 | */ 34 | lla t0, __bss 35 | lla t1, __ebss 36 | 0: sd zero, (t0) 37 | addi t0, t0, 8 38 | bltu t0, t1, 0b 39 | 40 | /* 41 | * We may or may not have been loaded to our linked address. 42 | */ 43 | mv s0, a0 44 | mv s1, a1 45 | lla a0, __image_base 46 | lla a1, _DYNAMIC 47 | call _relocate 48 | bnez a0, error 49 | mv a0, s0 50 | mv a1, s1 51 | 52 | mv fp, zero 53 | call spark_main 54 | 55 | error: 56 | csrci sstatus, 0x2 57 | wfi 58 | j . 59 | .size _start, . - _start 60 | 61 | 62 | .altmacro 63 | 64 | .macro STORE_GP_REG reg 65 | .if reg != 2 66 | sd x\reg, (8 * \reg)(sp) 67 | .endif 68 | .endm 69 | 70 | .macro LOAD_GP_REG reg 71 | .if reg != 2 72 | ld x\reg, (8 * \reg)(sp) 73 | .endif 74 | .endm 75 | 76 | .macro STORE_REGS 77 | .set reg, 0 78 | .rept 32 79 | STORE_GP_REG %reg 80 | .set reg, reg + 1 81 | .endr 82 | .endm 83 | 84 | .macro LOAD_REGS 85 | .set reg, 0 86 | .rept 32 87 | LOAD_GP_REG %reg 88 | .set reg, reg + 1 89 | .endr 90 | .endm 91 | 92 | 93 | // Trap Entry Point 94 | // 95 | // Due to restrictions of the `stvec` CSR, this entry point must be aligned on a 4-byte boundary. 96 | .section .text.trap_entry,"ax",@progbits 97 | .global trap_entry 98 | .align 4 99 | trap_entry: 100 | // TODO: What if the stack is fucked? 101 | 102 | addi sp, sp, -(8 * 68) 103 | STORE_REGS 104 | 105 | csrr t0, sstatus 106 | csrr t1, scause 107 | csrr t2, sepc 108 | csrr t3, stval 109 | sd t0, (8 * (64 + 0))(sp) 110 | sd t1, (8 * (64 + 1))(sp) 111 | sd t2, (8 * (64 + 2))(sp) 112 | sd t3, (8 * (64 + 3))(sp) 113 | 114 | // Prepare to enter Rust. 115 | // Align the stack to 8 bytes, stash the previous stack pointer (and pointer to the trap 116 | // frame) in a saved register that won't be clobbered by the calling convention. 117 | mv s1, sp 118 | andi sp, sp, ~0xf 119 | 120 | // Make it look as if the trapped code "called" this function. 121 | // This stops stack traces from missing the trapped function. 122 | addi sp, sp, -16 123 | sd t2, 8(sp) 124 | sd fp, 0(sp) 125 | addi fp, sp, 16 126 | 127 | mv a0, s1 128 | call trap_handler 129 | 130 | // Restore the old stack pointer. 131 | mv sp, s1 132 | 133 | // It's unlikely that we return, since we don't use interrupts all traps will end up being 134 | // (fatal) exceptions. In case we ever do, reload `sstatus` and `sepc` so the trap handler 135 | // can make any necessary changes. 136 | ld t0, (8 * (64 + 0))(sp) 137 | ld t1, (8 * (64 + 2))(sp) 138 | csrw sstatus, t0 139 | csrw sepc, t1 140 | 141 | // Reload all the registers we saved earlier and return to the trapped code. 142 | LOAD_REGS 143 | addi sp, sp, (8 * 68) 144 | 145 | sret 146 | 147 | .popsection 148 | -------------------------------------------------------------------------------- /spark/src/sys/sbi/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | mod start; 5 | -------------------------------------------------------------------------------- /spark/src/sys/sbi/start.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![cfg(sbi)] 5 | 6 | use crate::{console, dev, hcf, io, pmm, sys::fdt, time, BOOT_HART_ID}; 7 | use core::sync::atomic::Ordering; 8 | 9 | global_asm!(include_str!("locore.s"), options(raw)); 10 | 11 | #[no_mangle] 12 | extern "C" fn spark_main(hartid: usize, dtb_ptr: *mut u8) -> ! { 13 | // Initialize the logger 14 | // TODO: Use legacy SBI console until we probe for consoles. 15 | io::init(); 16 | 17 | BOOT_HART_ID.store(hartid, Ordering::Relaxed); 18 | 19 | // Install the Device Tree 20 | let fdt = unsafe { fdt::init(dtb_ptr) }; 21 | 22 | let Some(timebase_freq) = fdt.property_as::("/cpus/timebase-frequency") else { 23 | log::error!("device tree missing `/cpus/timebase-frequency` property"); 24 | hcf(); 25 | }; 26 | time::init(timebase_freq as u64); 27 | 28 | // Bootstrap memory allocation 29 | pmm::init(); 30 | 31 | console::init(); 32 | 33 | // Probe the full device tree before we search for a boot partition 34 | dev::init(); 35 | 36 | crate::main(); 37 | } 38 | -------------------------------------------------------------------------------- /spark/src/sys/uefi/locore.s: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | .pushsection .text 5 | 6 | .section .pe_headers 7 | 8 | mz_header: 9 | .ascii "MZ" // MZ magic 10 | .4byte 0x7ff0006f // j 0x1000 11 | .zero 54 12 | .4byte pe_header // offset to PE header 13 | 14 | .p2align 3 15 | pe_header: 16 | .ascii "PE\0\0" // PE magic 17 | .2byte 0x5064 // machine (IMAGE_FILE_MACHINE_RISCV64) 18 | .2byte section_count // num sections 19 | .4byte 0 // creation timestamp 20 | .4byte 0 // symbol table offset 21 | .4byte 0 // num symbols 22 | .2byte section_table - opt_header // optional header size 23 | .2byte 0x0226 // characteristics 24 | // IMAGE_FILE_EXECUTABLE_IMAGE 25 | // IMAGE_FILE_LINE_NUMS_STRIPPED 26 | // IMAGE_FILE_LARGE_ADDRESS_AWARE 27 | // IMAGE_FILE_DEBUG_STRIPPED 28 | 29 | opt_header: 30 | .2byte 0x020b // magic number (PE32+) 31 | .byte 0x02, 0x26 // maj,min linker version 32 | .4byte __text_size // size of code 33 | .4byte __data_size_init // size of initialized data section 34 | .4byte 0 // size of uninitialized data section 35 | .4byte _start // entry point (relative to __image_base) 36 | .4byte __text // code base (relative to __image_base) 37 | .8byte __image_base // image base 38 | .4byte 0x1000 // section alignment 39 | .4byte 0x1000 // file alignment 40 | .2byte 0, 0 // maj,min os version 41 | .2byte 0, 0 // maj,min image version 42 | .2byte 0, 0 // maj,min subsys version 43 | .4byte 0 // win32 version (must be 0) 44 | .4byte __image_size // size of image 45 | .4byte __text // size of headers (multiple of file alignment) 46 | .4byte 0 // checksum 47 | .2byte 10 // subsystem (IMAGE_SUBSYSTEM_EFI_APPLICATION) 48 | .2byte 0x8160 // dll characteristics 49 | // IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA 50 | // IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE 51 | // IMAGE_DLLCHARACTERISTICS_NX_COMPAT 52 | // IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 53 | .8byte 0 // size of stack reserve 54 | .8byte 0 // size of stack commit 55 | .8byte 0 // size of heap reserve 56 | .8byte 0 // size of heap commit 57 | .4byte 0 // loader flags 58 | .4byte data_dirs_count // num data dir entries 59 | 60 | data_dirs: 61 | .4byte 0, 0 // export table 62 | .4byte 0, 0 // import table 63 | .4byte 0, 0 // resource table 64 | .4byte 0, 0 // exception table 65 | .4byte 0, 0 // certificate table 66 | .4byte reloc, 12 // base relocation table 67 | .set data_dirs_count, (. - data_dirs) / 8 68 | 69 | section_table: 70 | .ascii ".text\0\0\0" // name 71 | .4byte __text_size // memory size 72 | .4byte __text // virt address 73 | .4byte __text_size // file size 74 | .4byte __text // file offset 75 | .4byte 0 // relocations 76 | .4byte 0 // line numbers 77 | .2byte 0 // num relocations 78 | .2byte 0 // num line numbers 79 | .4byte 0x60000020 // characteristics 80 | // IMAGE_SCN_CNT_CODE 81 | // IMAGE_SCN_MEM_EXECUTE 82 | // IMAGE_SCN_MEM_READ 83 | 84 | .ascii ".rdata\0\0" 85 | .4byte __rodata_size 86 | .4byte __rodata 87 | .4byte __rodata_size 88 | .4byte __rodata 89 | .4byte 0 90 | .4byte 0 91 | .2byte 0 92 | .2byte 0 93 | .4byte 0x40000040 // characteristics 94 | // IMAGE_SCN_CNT_INITIALIZED_DATA 95 | // IMAGE_SCN_MEM_READ 96 | 97 | .ascii ".reloc\0\0" 98 | .4byte 12 99 | .4byte reloc 100 | .4byte 0x1000 101 | .4byte reloc 102 | .4byte 0 103 | .4byte 0 104 | .2byte 0 105 | .2byte 0 106 | .4byte 0x40000040 // characteristics 107 | // IMAGE_SCN_CNT_INITIALIZED_DATA 108 | // IMAGE_SCN_MEM_READ 109 | 110 | .ascii ".sbat\0\0\0" 111 | .4byte __sbat_sizev 112 | .4byte __sbat 113 | .4byte __sbat_size 114 | .4byte __sbat 115 | .4byte 0 116 | .4byte 0 117 | .2byte 0 118 | .2byte 0 119 | .4byte 0x40000000 // characteristics 120 | // IMAGE_SCN_MEM_READ 121 | 122 | .ascii ".data\0\0\0" 123 | .4byte __data_size 124 | .4byte __data 125 | .4byte __data_size_init 126 | .4byte __data 127 | .4byte 0 128 | .4byte 0 129 | .2byte 0 130 | .2byte 0 131 | .4byte 0xc0000040 // characteristics 132 | // IMAGE_SCN_CNT_INITIALIZED_DATA 133 | // IMAGE_SCN_MEM_READ 134 | // IMAGE_SCN_MEM_WRITE 135 | .set section_count, (. - section_table) / 40 136 | 137 | .section .data.reloc 138 | reloc: 139 | .4byte 0x1000 140 | .4byte 12 141 | .2byte 0 142 | 143 | .section .note.GNU-stack,"",@progbits 144 | 145 | .section .text._start,"ax",@progbits 146 | .global _start 147 | .type _start,@function 148 | _start: 149 | 150 | .option push 151 | .option norelax 152 | lla gp, __global_pointer$ 153 | .option pop 154 | 155 | mv t0, sp 156 | lla sp, __boot_stackp 157 | 158 | addi sp, sp, -(8 * 4) 159 | sd s0, 0x00(sp) 160 | sd s1, 0x08(sp) 161 | sd s2, 0x10(sp) 162 | sd s3, 0x18(sp) 163 | 164 | mv s0, a0 165 | mv s1, a1 166 | mv s2, ra 167 | mv s3, sp 168 | 169 | /* 170 | * We may or may not have been loaded to our linked address. 171 | */ 172 | lla a0, __image_base 173 | lla a1, _DYNAMIC 174 | call _relocate 175 | bnez a0, error 176 | 177 | mv a0, s0 178 | mv a1, s1 179 | mv fp, zero 180 | call spark_main 181 | 182 | error: 183 | mv ra, s2 184 | mv sp, s3 185 | ld s0, 0x00(sp) 186 | ld s1, 0x08(sp) 187 | ld s2, 0x10(sp) 188 | ld s3, 0x18(sp) 189 | addi sp, sp, (8 * 4) 190 | 191 | ret 192 | .size _start, . - _start 193 | 194 | .popsection 195 | -------------------------------------------------------------------------------- /spark/src/sys/uefi/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #[cfg(uefi)] 5 | mod start; 6 | -------------------------------------------------------------------------------- /spark/src/sys/uefi/start.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use crate::{dev, io, sys::fdt, BOOT_HART_ID}; 5 | use core::sync::atomic::Ordering; 6 | use uefi::{ 7 | proto::riscv::RiscvBoot, 8 | table::{SystemTable, TableGuid}, 9 | Handle, 10 | }; 11 | 12 | global_asm!(include_str!("locore.s"), options(raw)); 13 | 14 | global_asm!( 15 | r#" 16 | .section .data.sbat 17 | sbat: 18 | .ascii "sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md\n" 19 | "#, 20 | concat!( 21 | r#".ascii "spark,1,Spark,spark,"#, 22 | env!("CARGO_PKG_VERSION"), 23 | r#",https://github.com/bolt-os/spark\n""#, 24 | ), 25 | "__sbat_endv:", 26 | ); 27 | 28 | #[no_mangle] 29 | extern "C" fn spark_main(image: Handle, system_table: &'static SystemTable) -> ! { 30 | unsafe { uefi::bootstrap(image, system_table) }; 31 | 32 | io::init(); 33 | println!(); 34 | 35 | // Print the address we've been loaded to for easier debugging. 36 | let image_base: usize; 37 | unsafe { 38 | asm!("lla {}, __image_base", out(reg) image_base, options(nomem, nostack)); 39 | } 40 | log::debug!("image base: {image_base:#x}"); 41 | 42 | let boot_services = uefi::boot_services(); 43 | 44 | let mut riscv_boot_proto = boot_services 45 | .first_protocol::() 46 | .expect("risc-v boot protocol is not available"); 47 | 48 | let hartid = riscv_boot_proto 49 | .get_boot_hartid() 50 | .expect("failed to get bsp's hart id"); 51 | BOOT_HART_ID.store(hartid, Ordering::Relaxed); 52 | 53 | let config_table = system_table.config_table(); 54 | 55 | if let Some(ptr) = config_table.get_table(TableGuid::ACPI_20) { 56 | dev::acpi::init(ptr.cast()); 57 | } else if let Some(ptr) = config_table.get_table(TableGuid::ACPI) { 58 | dev::acpi::init(ptr.cast()); 59 | } 60 | 61 | if let Some(ptr) = config_table.get_table(TableGuid::DEVICE_TREE) { 62 | unsafe { fdt::init(ptr.cast()) }; 63 | } 64 | 65 | dev::init(); 66 | 67 | crate::main(); 68 | } 69 | -------------------------------------------------------------------------------- /spark/src/test.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | #![cfg(test)] 5 | 6 | pub fn runner(_tests: &[&dyn Fn()]) { 7 | todo!(); 8 | } 9 | -------------------------------------------------------------------------------- /spark/src/time.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use core::{ 5 | sync::atomic::{AtomicU64, Ordering}, 6 | time::Duration, 7 | }; 8 | 9 | const MICROS_PER_SECOND: u64 = 1000000; 10 | 11 | /// System Time Frequency 12 | /// 13 | /// This value is the rate at which the `time` CSR is incremented. 14 | static TIMEBASE_FREQUENCY: AtomicU64 = AtomicU64::new(0); 15 | 16 | fn timebase_frequency() -> u64 { 17 | TIMEBASE_FREQUENCY.load(Ordering::Relaxed) 18 | } 19 | 20 | #[cfg(target_arch = "riscv64")] 21 | fn get_monotonic_count() -> u64 { 22 | let count: u64; 23 | 24 | unsafe { 25 | asm!("rdtime {}", out(reg) count, options(nomem, nostack, preserves_flags)); 26 | } 27 | 28 | count 29 | } 30 | 31 | #[derive(Clone, Copy)] 32 | pub struct Instant(u64); 33 | 34 | impl Instant { 35 | pub fn now() -> Instant { 36 | Self(get_monotonic_count()) 37 | } 38 | 39 | pub fn duration_since(self, earlier: Instant) -> Duration { 40 | self.checked_duration_since(earlier).unwrap_or_default() 41 | } 42 | 43 | pub fn checked_duration_since(self, earlier: Instant) -> Option { 44 | let freq = timebase_frequency(); 45 | let ticks_per_micro = (freq + MICROS_PER_SECOND - 1) / MICROS_PER_SECOND; 46 | 47 | let diff = self.0.checked_sub(earlier.0)?; 48 | 49 | let secs = diff / freq; 50 | let rems = diff % freq; 51 | let nanos = (rems / ticks_per_micro) * 1000; 52 | 53 | Some(Duration::new(secs, nanos as u32)) 54 | } 55 | } 56 | 57 | impl core::ops::Sub for Instant { 58 | type Output = Duration; 59 | 60 | fn sub(self, rhs: Self) -> Self::Output { 61 | self.duration_since(rhs) 62 | } 63 | } 64 | 65 | pub fn init(frequency: u64) { 66 | TIMEBASE_FREQUENCY.store(frequency, Ordering::Relaxed); 67 | log::info!("timebase frequency: {}", timebase_frequency()); 68 | } 69 | 70 | #[derive(Clone, Copy)] 71 | pub struct Timeout { 72 | start: Instant, 73 | duration: Duration, 74 | } 75 | 76 | impl Timeout { 77 | pub fn start(duration: Duration) -> Timeout { 78 | Timeout { 79 | start: Instant::now(), 80 | duration, 81 | } 82 | } 83 | 84 | pub fn expired(&self) -> bool { 85 | Instant::now() - self.start >= self.duration 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /spark/src/trap.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | struct TrapFrame { 5 | gpr: [usize; 32], 6 | #[cfg(target_feature = "d")] 7 | #[allow(dead_code)] 8 | fpr: [usize; 32], 9 | sstatus: u64, 10 | scause: u64, 11 | sepc: u64, 12 | stval: u64, 13 | } 14 | 15 | impl TrapFrame { 16 | fn is_exception(&self) -> bool { 17 | self.scause & 1 << 63 == 0 18 | } 19 | 20 | fn is_interrupt(&self) -> bool { 21 | !self.is_exception() 22 | } 23 | 24 | fn cause(&self) -> u64 { 25 | self.scause & !(1 << 63) 26 | } 27 | } 28 | 29 | fn dump_registers(tf: &TrapFrame) { 30 | static ABI_NAMES: &[&str] = &[ 31 | " ", "ra", "t0", "sp", "t1", "gp", "t2", "tp", "t3", "s0", "t4", "s1", "t5", "s2", "t6", 32 | "s3", "a0", "s4", "a1", "s5", "a2", "s6", "a3", "s7", "a4", "s8", "a5", "s9", "a6", "s10", 33 | "a7", "s11", 34 | ]; 35 | static ISA_NUMS: &[u8] = &[ 36 | 0, 1, 5, 2, 6, 3, 7, 4, 28, 8, 29, 9, 30, 18, 31, 19, 10, 20, 11, 21, 12, 22, 13, 23, 14, 37 | 24, 15, 25, 16, 26, 17, 27, 38 | ]; 39 | 40 | for (isa_num, abi_name) in ISA_NUMS.chunks(2).zip(ABI_NAMES.chunks(2)) { 41 | println!( 42 | "x{: <2} {: <3}: {:#018x} x{: <2} {: <3}: {:#018x}", 43 | isa_num[0], 44 | abi_name[0], 45 | tf.gpr[isa_num[0] as usize], 46 | isa_num[1], 47 | abi_name[1], 48 | tf.gpr[isa_num[1] as usize], 49 | ); 50 | } 51 | } 52 | 53 | static EXCEPTION_NAMES: &[Option<&str>] = &[ 54 | Some("instruction alignment fault"), 55 | Some("instruction access fault"), 56 | Some("illegal instruction"), 57 | Some("breakpoint"), 58 | Some("load address misaligned"), 59 | Some("load access fault"), 60 | Some("store/amo address misaligned"), 61 | Some("store/amo access fault"), 62 | Some("environment call from u-mode"), 63 | Some("environment call from s-mode"), 64 | None, 65 | None, 66 | Some("instruction page fault"), 67 | Some("load page fault"), 68 | None, 69 | Some("store/amo page fault"), 70 | ]; 71 | 72 | #[no_mangle] 73 | extern "C" fn trap_handler(tf: &mut TrapFrame) { 74 | if tf.is_interrupt() { 75 | log::error!("unhandled interrupt #{}", tf.cause()); 76 | return; 77 | } 78 | 79 | if let Some(name) = EXCEPTION_NAMES.get(tf.cause() as usize).copied().flatten() { 80 | log::error!("{name}"); 81 | } else { 82 | log::error!("unhandled exception ({})", tf.cause()); 83 | } 84 | 85 | println!( 86 | "sstatus: {:#018x}, sepc: {:#018x}, stval: {:#018x}", 87 | tf.sstatus, tf.sepc, tf.stval 88 | ); 89 | dump_registers(tf); 90 | 91 | panic!("fatal exception"); 92 | } 93 | -------------------------------------------------------------------------------- /spark/src/util/maybe_static_arc.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | use alloc::sync::Arc; 5 | use core::ops::Deref; 6 | 7 | pub enum MaybeStaticArc { 8 | Static(&'static T), 9 | Arc(Arc), 10 | } 11 | 12 | impl Deref for MaybeStaticArc { 13 | type Target = T; 14 | 15 | fn deref(&self) -> &Self::Target { 16 | match self { 17 | Self::Static(ptr) => ptr, 18 | Self::Arc(ptr) => ptr, 19 | } 20 | } 21 | } 22 | 23 | impl Clone for MaybeStaticArc { 24 | fn clone(&self) -> Self { 25 | match self { 26 | Self::Arc(arc) => Self::Arc(Arc::clone(arc)), 27 | Self::Static(ptr) => Self::Static(ptr), 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /spark/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2022-2023 xvanc and contributors 2 | // SPDX-License-Identifier: BSD-3-Clause 3 | 4 | pub mod maybe_static_arc; 5 | -------------------------------------------------------------------------------- /tools/symbol_map/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "bitflags" 7 | version = "1.3.2" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" 10 | 11 | [[package]] 12 | name = "bolt-libelf" 13 | version = "0.1.0" 14 | source = "git+https://github.com/bolt-os/libelf.git#1f562d76d9aa084a965b76f45e57b13fc8bd5745" 15 | dependencies = [ 16 | "bitflags", 17 | ] 18 | 19 | [[package]] 20 | name = "bytemuck" 21 | version = "1.14.0" 22 | source = "registry+https://github.com/rust-lang/crates.io-index" 23 | checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" 24 | dependencies = [ 25 | "bytemuck_derive", 26 | ] 27 | 28 | [[package]] 29 | name = "bytemuck_derive" 30 | version = "1.5.0" 31 | source = "registry+https://github.com/rust-lang/crates.io-index" 32 | checksum = "965ab7eb5f8f97d2a083c799f3a1b994fc397b2fe2da5d1da1626ce15a39f2b1" 33 | dependencies = [ 34 | "proc-macro2", 35 | "quote", 36 | "syn", 37 | ] 38 | 39 | [[package]] 40 | name = "libsa" 41 | version = "0.1.0" 42 | source = "git+https://github.com/bolt-os/libsa.git#f54301e79c255807f0a8f938b5a2e8df831bfe1c" 43 | dependencies = [ 44 | "bytemuck", 45 | ] 46 | 47 | [[package]] 48 | name = "proc-macro2" 49 | version = "1.0.70" 50 | source = "registry+https://github.com/rust-lang/crates.io-index" 51 | checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" 52 | dependencies = [ 53 | "unicode-ident", 54 | ] 55 | 56 | [[package]] 57 | name = "quote" 58 | version = "1.0.33" 59 | source = "registry+https://github.com/rust-lang/crates.io-index" 60 | checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" 61 | dependencies = [ 62 | "proc-macro2", 63 | ] 64 | 65 | [[package]] 66 | name = "rustc-demangle" 67 | version = "0.1.23" 68 | source = "registry+https://github.com/rust-lang/crates.io-index" 69 | checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" 70 | 71 | [[package]] 72 | name = "symbol_map" 73 | version = "0.1.0" 74 | dependencies = [ 75 | "bolt-libelf", 76 | "bytemuck", 77 | "libsa", 78 | "rustc-demangle", 79 | ] 80 | 81 | [[package]] 82 | name = "syn" 83 | version = "2.0.39" 84 | source = "registry+https://github.com/rust-lang/crates.io-index" 85 | checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" 86 | dependencies = [ 87 | "proc-macro2", 88 | "quote", 89 | "unicode-ident", 90 | ] 91 | 92 | [[package]] 93 | name = "unicode-ident" 94 | version = "1.0.12" 95 | source = "registry+https://github.com/rust-lang/crates.io-index" 96 | checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" 97 | -------------------------------------------------------------------------------- /tools/symbol_map/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "symbol_map" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [features] 7 | default = [] 8 | bytemuck = ["dep:bytemuck", "libsa/bytemuck"] 9 | xtask = ["bytemuck", "dep:bolt-libelf", "dep:rustc-demangle"] 10 | 11 | [dependencies] 12 | bytemuck = { version = "1.14.0", optional = true, features = ["derive"] } 13 | bolt-libelf = { git = "https://github.com/bolt-os/libelf.git", optional = true } 14 | libsa = { git = "https://github.com/bolt-os/libsa.git" } 15 | rustc-demangle = { version = "0.1.23", optional = true } 16 | -------------------------------------------------------------------------------- /tools/symbol_map/src/generate.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 xvanc and contributors 3 | * SPDX-License-Identifier: BSD-3-Clause 4 | */ 5 | 6 | extern crate alloc; 7 | extern crate std; 8 | 9 | use crate::{RawSymbol, SymbolMapHeader, MAGIC}; 10 | use alloc::{boxed::Box, format, string::String, vec}; 11 | use core::{fmt, mem::size_of}; 12 | use elf::{Elf, SymbolKind}; 13 | use libsa::endian::u32_le; 14 | use std::{fs, io, path::Path}; 15 | 16 | #[derive(Debug)] 17 | pub enum Error { 18 | /// Invalid ELF file 19 | InvalidElf(&'static str), 20 | /// The executable does not contain a symbol table 21 | NoSymbolTable, 22 | /// An individual symbol name is too large 23 | SymbolTooBig, 24 | /// The generated symbol map would be too large 25 | MapTooBig, 26 | 27 | Io(io::Error), 28 | } 29 | 30 | impl From for Error { 31 | fn from(error: io::Error) -> Error { 32 | Error::Io(error) 33 | } 34 | } 35 | 36 | impl fmt::Display for Error { 37 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 38 | match self { 39 | Error::InvalidElf(error) => write!(f, "invalid ELF: {error}"), 40 | Error::NoSymbolTable => write!(f, "no symbol table"), 41 | Error::SymbolTooBig => write!(f, "symbol name too large"), 42 | Error::MapTooBig => write!(f, "generated symbol map would be too large"), 43 | Error::Io(error) => write!(f, "io error: {error}"), 44 | } 45 | } 46 | } 47 | 48 | impl std::error::Error for Error {} 49 | 50 | #[inline(always)] 51 | pub fn generate>(path: P) -> Result, Error> { 52 | generate_(path.as_ref()) 53 | } 54 | 55 | fn generate_(path: &Path) -> Result, Error> { 56 | let file_data = fs::read(path)?; 57 | let elf = Elf::new(&file_data).map_err(Error::InvalidElf)?; 58 | let symbol_table = elf.symbol_table().ok_or(Error::NoSymbolTable)?; 59 | let string_table = elf.string_table().ok_or(Error::NoSymbolTable)?; 60 | 61 | let mut table = vec![]; 62 | let mut string = String::new(); 63 | 64 | for symbol in symbol_table 65 | .symbols() 66 | .filter(|s| s.kind() == SymbolKind::Func) 67 | { 68 | if let Some(name) = string_table.get_string(symbol.name_index()) { 69 | let name = format!("{:#}", rustc_demangle::demangle(name)); 70 | let name_offset = string.len(); 71 | string.push_str(&name); 72 | table.push(RawSymbol { 73 | addr: symbol.value().into(), 74 | size: symbol.size().into(), 75 | name: u32::try_from(name_offset) 76 | .map_err(|_| Error::MapTooBig)? 77 | .into(), 78 | name_len: u32::try_from(name.len()) 79 | .map_err(|_| Error::SymbolTooBig)? 80 | .into(), 81 | }) 82 | } 83 | } 84 | 85 | table.sort_unstable_by_key(|s| s.addr); 86 | 87 | let hdr_offset = 0; 88 | let hdr_size = size_of::(); 89 | let tab_offset = hdr_offset + hdr_size; 90 | let tab_size = table.len() * size_of::(); 91 | let str_offset = tab_offset + tab_size; 92 | let str_size = string.len(); 93 | let total_size = str_offset + str_size; 94 | 95 | if total_size > 0x100000000 { 96 | return Err(Error::MapTooBig); 97 | } 98 | 99 | let header = SymbolMapHeader { 100 | magic: MAGIC, 101 | total_size: u32_le::new(total_size as u32), 102 | reserved0: 0, 103 | table_offset: u32_le::new(tab_offset as u32), 104 | table_len: u32_le::new(table.len() as u32), 105 | string_offset: u32_le::new(str_offset as u32), 106 | string_len: u32_le::new(string.len() as u32), 107 | }; 108 | 109 | let mut buf = vec![0u8; total_size].into_boxed_slice(); 110 | 111 | let header_buf = bytemuck::from_bytes_mut(&mut buf[..hdr_size]); 112 | *header_buf = header; 113 | 114 | let table_buf = bytemuck::cast_slice_mut(&mut buf[tab_offset..][..tab_size]); 115 | table_buf.copy_from_slice(&table); 116 | 117 | buf[str_offset..].copy_from_slice(string.as_bytes()); 118 | 119 | Ok(buf) 120 | } 121 | -------------------------------------------------------------------------------- /tools/symbol_map/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2023 xvanc and contributors 3 | * SPDX-License-Identifier: BSD-3-Clause 4 | */ 5 | 6 | #![no_std] 7 | 8 | #[cfg(feature = "xtask")] 9 | mod generate; 10 | #[cfg(feature = "xtask")] 11 | pub use generate::*; 12 | 13 | use core::{mem::size_of, ptr}; 14 | use libsa::endian::{u32_le, u64_le}; 15 | 16 | pub const MAGIC: [u8; 8] = *b"SPARKSYM"; 17 | 18 | #[repr(C, align(8))] 19 | #[derive(Clone, Copy, Debug)] 20 | #[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))] 21 | pub struct SymbolMapHeader { 22 | pub magic: [u8; 8], 23 | reserved0: u32, 24 | pub total_size: u32_le, 25 | pub table_offset: u32_le, 26 | pub table_len: u32_le, 27 | pub string_offset: u32_le, 28 | pub string_len: u32_le, 29 | } 30 | 31 | impl SymbolMapHeader { 32 | pub fn total_size(&self) -> usize { 33 | self.total_size.get() as usize 34 | } 35 | } 36 | 37 | #[repr(C)] 38 | #[derive(Clone, Copy, Debug)] 39 | #[cfg_attr(feature = "bytemuck", derive(bytemuck::Pod, bytemuck::Zeroable))] 40 | pub struct RawSymbol { 41 | pub addr: u64_le, 42 | pub size: u64_le, 43 | pub name: u32_le, 44 | pub name_len: u32_le, 45 | } 46 | 47 | pub struct SymbolMap<'sym> { 48 | symbols: &'sym [RawSymbol], 49 | strings: &'sym str, 50 | } 51 | 52 | impl<'sym> SymbolMap<'sym> { 53 | pub fn new(bytes: &'sym [u8]) -> Result, &'static str> { 54 | if bytes.as_ptr().align_offset(8) != 0 { 55 | return Err("unaligned"); 56 | } 57 | if bytes.len() < size_of::() { 58 | return Err("buffer too small"); 59 | } 60 | 61 | let header = unsafe { &*bytes.as_ptr().cast::() }; 62 | 63 | if header.magic != MAGIC { 64 | return Err("invalid magic"); 65 | } 66 | if bytes.len() < header.total_size() { 67 | return Err("buffer too small"); 68 | } 69 | 70 | let offset = header.table_offset.get() as usize; 71 | let len = header.table_len.get() as usize; 72 | let size = len * size_of::(); 73 | let symbols = bytes[offset..][..size].as_ptr().cast::(); 74 | let symbols = unsafe { &*ptr::slice_from_raw_parts(symbols, len) }; 75 | 76 | let offset = header.string_offset.get() as usize; 77 | let len = header.string_len.get() as usize; 78 | let Ok(strings) = core::str::from_utf8(&bytes[offset..][..len]) else { 79 | return Err("non UTF-8 string table"); 80 | }; 81 | 82 | Ok(SymbolMap { symbols, strings }) 83 | } 84 | 85 | pub fn symbols(&self) -> impl Iterator> { 86 | self.symbols 87 | .iter() 88 | .map(|raw| Symbol::from_raw(self.strings, raw)) 89 | } 90 | 91 | pub fn lookup(&self, addr: u64) -> Option> { 92 | let index = match self 93 | .symbols 94 | .binary_search_by_key(&addr, |raw| raw.addr.get()) 95 | { 96 | Ok(index) => index, 97 | Err(index) => index.saturating_sub(1), 98 | }; 99 | let raw = &self.symbols[index]; 100 | if raw.addr <= addr && addr < raw.addr + raw.size { 101 | Some(Symbol::from_raw(self.strings, raw)) 102 | } else { 103 | None 104 | } 105 | } 106 | } 107 | 108 | pub struct Symbol<'sym> { 109 | pub name: &'sym str, 110 | pub addr: u64, 111 | pub size: u64, 112 | } 113 | 114 | impl<'sym> Symbol<'sym> { 115 | fn from_raw(strings: &'sym str, raw: &'sym RawSymbol) -> Symbol<'sym> { 116 | let name = &strings[raw.name.get() as usize..][..raw.name_len.get() as usize]; 117 | Symbol { 118 | name, 119 | addr: raw.addr.get(), 120 | size: raw.size.get(), 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /tools/xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "spark_xtask" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | anyhow = "1.0.75" 8 | bolt-libelf = { git = "https://github.com/bolt-os/libelf.git" } 9 | clap = { version = "*", features = ["derive"] } 10 | rustc-demangle = "0.1.23" 11 | symbol_map = { path = "../symbol_map", features = ["xtask"] } 12 | walkdir = "*" 13 | xtask = { git = "https://github.com/bolt-os/xtask.git" } 14 | -------------------------------------------------------------------------------- /tools/xtask/src/build.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022-2023 xvanc and contributors 3 | * SPDX-License-Identifier: BSD-3-Clause 4 | */ 5 | 6 | use crate::{ 7 | util::{self, elf_to_binary}, 8 | BuildContext, 9 | }; 10 | use std::{ 11 | fs, 12 | ops::Deref, 13 | path::{Path, PathBuf}, 14 | process::Command, 15 | }; 16 | use xtask::{concat_paths, process::CommandExt}; 17 | 18 | #[derive(clap::Parser)] 19 | pub struct BuildArguments { 20 | #[clap(flatten)] 21 | pub general: crate::Arguments, 22 | } 23 | 24 | impl Deref for BuildArguments { 25 | type Target = crate::Arguments; 26 | 27 | fn deref(&self) -> &Self::Target { 28 | &self.general 29 | } 30 | } 31 | 32 | #[derive(Clone, Copy, Eq, PartialEq)] 33 | pub enum BuildCmd { 34 | Build, 35 | Check, 36 | Doc, 37 | } 38 | 39 | pub fn main(ctx: &BuildContext, args: &BuildArguments, cmd: BuildCmd) -> anyhow::Result<()> { 40 | let rustflags = format!( 41 | "--cfg spark_platform=\"{}\" {} -C panic=unwind -C force-unwind-tables ", 42 | args.target.platform(), 43 | if args.verbose { "-v" } else { "" } 44 | ); 45 | 46 | Command::new(&ctx.cargo_cmd) 47 | .arg("fmt") 48 | .arg_if(args.verbose, "-vv") 49 | .arg_if(args.ci, "--check") 50 | .current_dir(&ctx.paths.spark_dir) 51 | .env("RUSTFLAGS", &rustflags) 52 | .execute()?; 53 | 54 | Command::new(&ctx.cargo_cmd) 55 | .args::<&[_], _>(match cmd { 56 | BuildCmd::Build => &[ 57 | "build", 58 | "-Zunstable-options", 59 | "-Zbuild-std=core,alloc,compiler_builtins", 60 | "-Zbuild-std-features=compiler-builtins-mem", 61 | ], 62 | BuildCmd::Check => &["check"], 63 | BuildCmd::Doc => &["doc", "--package", "spark", "--document-private-items"], 64 | }) 65 | .args(["--profile", &ctx.rust_profile, "--target"]) 66 | .arg(&ctx.rust_target) 67 | .arg("--target-dir") 68 | .arg(&ctx.paths.rust_out_dir) 69 | .arg_if(args.verbose, "-vv") 70 | .current_dir(&ctx.paths.spark_dir) 71 | .env("RUSTFLAGS", &rustflags) 72 | .execute()?; 73 | 74 | if cmd == BuildCmd::Build { 75 | let spark_elf = link_spark(ctx, args, None)?; 76 | let symbol_map = create_symbol_map(ctx, spark_elf)?; 77 | let spark_elf = link_spark(ctx, args, Some(symbol_map))?; 78 | 79 | let spark_bin = spark_elf.with_extension("bin"); 80 | elf_to_binary(spark_elf, spark_bin)?; 81 | } 82 | 83 | Ok(()) 84 | } 85 | 86 | fn link_spark( 87 | ctx: &BuildContext, 88 | args: &BuildArguments, 89 | symbol_map: Option, 90 | ) -> anyhow::Result { 91 | let spark_lib = ctx.paths.rust_build_dir.join("libspark.a"); 92 | let spark_elf = ctx.paths.build_dir.join("spark.elf"); 93 | let linker_script = 94 | concat_paths!(ctx.paths.spark_dir, "conf", args.target.as_str()).with_extension("ld"); 95 | 96 | let mut linker = Command::new(&ctx.linker_cmd); 97 | 98 | linker.args([ 99 | "-static", 100 | "-pie", 101 | "--no-dynamic-linker", 102 | "--whole-archive", 103 | "--gc-sections", 104 | "--eh-frame-hdr", 105 | "-znostart-stop-gc", 106 | "-zrelro", 107 | &format!("--script={}", linker_script.display()), 108 | ]); 109 | linker.arg("-o").arg(&spark_elf).arg(spark_lib); 110 | 111 | if let Some(symbol_map) = symbol_map { 112 | linker.arg(symbol_map); 113 | } 114 | 115 | linker.execute()?; 116 | 117 | Ok(spark_elf) 118 | } 119 | 120 | fn create_symbol_map>(ctx: &BuildContext, path: P) -> anyhow::Result { 121 | let map_bin = ctx.paths.build_dir.join("symbol-map.bin"); 122 | let map_obj = map_bin.with_extension("o"); 123 | 124 | fs::write(&map_bin, symbol_map::generate(path)?)?; 125 | util::rustc_create_obj_from_bin( 126 | ctx, 127 | "__symbol_map", 128 | r#".rodata.__symbol_map,"a",@progbits"#, 129 | map_bin, 130 | &map_obj, 131 | )?; 132 | 133 | Ok(map_obj) 134 | } 135 | -------------------------------------------------------------------------------- /tools/xtask/src/main.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022-2023 xvanc and contributors 3 | * SPDX-License-Identifier: BSD-3-Clause 4 | */ 5 | 6 | #![feature(exit_status_error)] 7 | 8 | mod build; 9 | mod run; 10 | mod util; 11 | 12 | use build::BuildCmd; 13 | use std::{env, ffi::OsString, fmt, ops::Deref, path::PathBuf, process::Command, str::FromStr}; 14 | use xtask::concat_paths; 15 | 16 | #[allow(non_camel_case_types)] 17 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 18 | enum Target { 19 | riscv64_sbi, 20 | riscv64_uefi, 21 | } 22 | 23 | impl Target { 24 | fn as_str(self) -> &'static str { 25 | match self { 26 | Self::riscv64_sbi => "riscv64-sbi", 27 | Self::riscv64_uefi => "riscv64-uefi", 28 | } 29 | } 30 | 31 | fn arch(self) -> &'static str { 32 | match self { 33 | Self::riscv64_sbi | Self::riscv64_uefi => "riscv64", 34 | } 35 | } 36 | 37 | fn platform(self) -> &'static str { 38 | match self { 39 | Self::riscv64_sbi => "sbi", 40 | Self::riscv64_uefi => "uefi", 41 | } 42 | } 43 | 44 | fn rust_triple(self) -> &'static str { 45 | match self { 46 | Self::riscv64_sbi | Self::riscv64_uefi => "riscv64imac-unknown-none", 47 | } 48 | } 49 | } 50 | 51 | impl fmt::Display for Target { 52 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 53 | f.write_str(self.as_str()) 54 | } 55 | } 56 | 57 | impl FromStr for Target { 58 | type Err = String; 59 | 60 | fn from_str(s: &str) -> Result { 61 | match s { 62 | "riscv64-sbi" => Ok(Self::riscv64_sbi), 63 | "riscv64-uefi" => Ok(Self::riscv64_uefi), 64 | _ => Err(format!("`{s}` is not a supported target")), 65 | } 66 | } 67 | } 68 | 69 | struct Paths { 70 | spark_dir: PathBuf, // /spark 71 | build_dir: PathBuf, // /// 72 | rust_out_dir: PathBuf, // //target 73 | rust_build_dir: PathBuf, // /// 74 | } 75 | 76 | struct BuildContext { 77 | cargo_cmd: OsString, 78 | rustc_cmd: OsString, 79 | linker_cmd: OsString, 80 | paths: Paths, 81 | rust_profile: String, 82 | rust_target: OsString, 83 | } 84 | 85 | #[derive(Clone, clap::Parser)] 86 | pub struct Arguments { 87 | #[clap(long, default_value = "riscv64-sbi")] 88 | target: Target, 89 | #[clap(long)] 90 | release: bool, 91 | #[clap(short, long)] 92 | verbose: bool, 93 | #[clap(long)] 94 | ci: bool, 95 | 96 | #[clap(long)] 97 | cargo: Option, 98 | #[clap(long)] 99 | rustc: Option, 100 | #[clap(long)] 101 | linker: Option, 102 | } 103 | 104 | #[derive(clap::Parser)] 105 | enum Subcommand { 106 | #[clap(alias = "b")] 107 | Build(build::BuildArguments), 108 | #[clap(alias = "c")] 109 | Check(build::BuildArguments), 110 | #[clap(alias = "d")] 111 | Doc(build::BuildArguments), 112 | #[clap(alias = "r")] 113 | Run(run::RunArguments), 114 | } 115 | 116 | impl Deref for Subcommand { 117 | type Target = Arguments; 118 | 119 | fn deref(&self) -> &Self::Target { 120 | match self { 121 | Self::Build(args) => args, 122 | Self::Check(args) => args, 123 | Self::Doc(args) => args, 124 | Self::Run(args) => args, 125 | } 126 | } 127 | } 128 | 129 | macro_rules! default_var { 130 | ($args:expr, $name:ident, $var:expr, $default:expr) => { 131 | 'b: { 132 | if let Some(cmd) = &$args.$name { 133 | break 'b cmd.clone(); 134 | } 135 | if let Some(cmd) = env::var_os($var) { 136 | break 'b cmd; 137 | } 138 | OsString::from($default) 139 | } 140 | }; 141 | } 142 | 143 | fn find_linker(args: &Arguments) -> anyhow::Result { 144 | let cmds: &[_] = match args.target { 145 | Target::riscv64_sbi | Target::riscv64_uefi => { 146 | &["ld.lld", "riscv64-unknown-elf-ld", "riscv64-elf-ld", "ld"] 147 | } 148 | }; 149 | 150 | for cmd in cmds { 151 | match check_linker(args, cmd) { 152 | Ok(true) => return Ok(cmd.into()), 153 | Ok(false) => (), 154 | Err(error) => { 155 | eprintln!("{error}"); 156 | } 157 | } 158 | } 159 | 160 | let arch = args.target.arch(); 161 | 162 | Err(anyhow::anyhow!( 163 | "Cannot find a suitable linker.\n\ 164 | Make sure a linker that supports the {arch} architecture is in your PATH,\ 165 | or specify one with the `--linker` argument or the `LD` environment variable.", 166 | )) 167 | } 168 | 169 | fn check_linker(args: &Arguments, cmd: &str) -> anyhow::Result { 170 | let output = Command::new(cmd).arg("-V").output()?; 171 | output.status.exit_ok()?; 172 | let stdout = String::from_utf8(output.stdout)?; 173 | 174 | if stdout.starts_with("LLD") || stdout.contains(" LLD ") { 175 | return Ok(true); 176 | } 177 | 178 | let arch = match args.target { 179 | Target::riscv64_sbi | Target::riscv64_uefi => "elf64lriscv", 180 | }; 181 | if stdout.starts_with("GNU ld") && stdout.contains(arch) { 182 | return Ok(true); 183 | } 184 | 185 | Ok(false) 186 | } 187 | 188 | fn main() -> anyhow::Result<()> { 189 | let args = ::parse(); 190 | let ctx = { 191 | let (profile, rust_profile) = match args.release { 192 | true => ("release", "release"), 193 | false => ("debug", "dev"), 194 | }; 195 | 196 | let pwd = env::current_dir()?; 197 | let spark_dir = pwd.join("spark"); 198 | let out_dir = pwd.join("build"); 199 | let build_dir = concat_paths!(out_dir, args.target.as_str(), profile); 200 | let rust_out_dir = out_dir.join("target"); 201 | let rust_build_dir = concat_paths!(rust_out_dir, args.target.rust_triple(), profile); 202 | 203 | xtask::fs::make_dir(&build_dir)?; 204 | 205 | let rust_target = match args.target { 206 | Target::riscv64_sbi | Target::riscv64_uefi => { 207 | concat_paths!(pwd, "riscv64imac-unknown-none.json").into_os_string() 208 | } 209 | }; 210 | 211 | BuildContext { 212 | cargo_cmd: default_var!(args, cargo, "CARGO", "cargo"), 213 | rustc_cmd: default_var!(args, rustc, "RUSTC", "rustc"), 214 | linker_cmd: default_var!(args, linker, "LD", find_linker(&args)?), 215 | paths: Paths { 216 | spark_dir, 217 | build_dir, 218 | rust_out_dir, 219 | rust_build_dir, 220 | }, 221 | rust_profile: rust_profile.to_string(), 222 | rust_target, 223 | } 224 | }; 225 | 226 | match args { 227 | Subcommand::Build(args) => build::main(&ctx, &args, BuildCmd::Build)?, 228 | Subcommand::Check(args) => build::main(&ctx, &args, BuildCmd::Check)?, 229 | Subcommand::Doc(args) => build::main(&ctx, &args, BuildCmd::Doc)?, 230 | Subcommand::Run(args) => run::main(&ctx, &args)?, 231 | } 232 | 233 | Ok(()) 234 | } 235 | -------------------------------------------------------------------------------- /tools/xtask/src/run.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022-2023 xvanc and contributors 3 | * SPDX-License-Identifier: BSD-3-Clause 4 | */ 5 | 6 | use crate::{build, BuildContext, Target}; 7 | use std::{ffi::OsString, ops::Deref, path::PathBuf, process::Command}; 8 | use xtask::{concat_paths, process::CommandExt}; 9 | 10 | #[derive(clap::Parser)] 11 | pub struct RunArguments { 12 | #[clap(flatten)] 13 | pub general: crate::Arguments, 14 | /// Machine to emulate 15 | #[clap(long, default_value = "virt")] 16 | pub machine: String, 17 | /// CPU to emulate 18 | #[clap(long, default_value = "rv64")] 19 | pub cpu: String, 20 | /// Number of CPUs to emulate 21 | #[clap(long, default_value = "4")] 22 | pub smp: String, 23 | /// RAM size in MB 24 | #[clap(long, default_value = "512")] 25 | pub ram: String, 26 | /// Enable debug logging 27 | #[clap(long, default_value = "int,guest_errors")] 28 | pub log: Option>, 29 | /// Wait for a debugger to attach 30 | #[clap(short, long)] 31 | pub debugger: bool, 32 | /// Path to QEMU executable 33 | #[clap(long, default_value = "qemu-system-riscv64")] 34 | pub qemu: PathBuf, 35 | /// Extra arguments to pass to QEMU 36 | #[clap(last = true)] 37 | pub qemu_args: Vec, 38 | } 39 | 40 | impl Deref for RunArguments { 41 | type Target = crate::Arguments; 42 | 43 | fn deref(&self) -> &Self::Target { 44 | &self.general 45 | } 46 | } 47 | 48 | pub fn main(ctx: &BuildContext, args: &RunArguments) -> anyhow::Result<()> { 49 | { 50 | let build_args = build::BuildArguments { 51 | general: args.general.clone(), 52 | }; 53 | build::main(ctx, &build_args, build::BuildCmd::Build)?; 54 | } 55 | 56 | let spark_bin = ctx.paths.build_dir.join("spark.bin"); 57 | let mut qemu = Command::new(&args.qemu); 58 | 59 | #[rustfmt::skip] 60 | qemu.args([ 61 | "-machine", &args.machine, 62 | "-cpu", &args.cpu, 63 | "-smp", &args.smp, 64 | "-m", &args.ram, 65 | 66 | "-serial", "mon:stdio", 67 | ]); 68 | 69 | if args.debugger { 70 | qemu.args(["-s", "-S"]); 71 | } 72 | 73 | if let Some(opts) = &args.log { 74 | let log_path = concat_paths!(ctx.paths.build_dir, "log", "cpu%d.txt"); 75 | xtask::fs::make_dir(log_path.parent().unwrap())?; 76 | qemu.arg("-D") 77 | .arg(log_path) 78 | .arg("-d") 79 | .arg(opts.as_deref().unwrap_or("int,guest_errors")); 80 | } 81 | 82 | match args.target { 83 | Target::riscv64_sbi => { 84 | qemu.arg("-kernel").arg(spark_bin); 85 | } 86 | Target::riscv64_uefi => { 87 | todo!("need to fetch ovmf"); 88 | } 89 | } 90 | 91 | qemu.args(&args.qemu_args).execute()?; 92 | 93 | Ok(()) 94 | } 95 | -------------------------------------------------------------------------------- /tools/xtask/src/util.rs: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022-2023 xvanc and contributors 3 | * SPDX-License-Identifier: BSD-3-Clause 4 | */ 5 | 6 | use crate::BuildContext; 7 | use elf::{Elf, SegmentKind}; 8 | use std::{ 9 | fs::{self, File}, 10 | io::{Seek, SeekFrom, Write}, 11 | path::Path, 12 | process::{Command, Stdio}, 13 | }; 14 | use xtask::process::CommandExt; 15 | 16 | pub fn rustc_create_obj_from_bin, Po: AsRef>( 17 | ctx: &BuildContext, 18 | name: &str, 19 | section_name: &str, 20 | bin_path: Pb, 21 | obj_path: Po, 22 | ) -> anyhow::Result<()> { 23 | rustc_create_obj_from_bin_( 24 | ctx, 25 | name, 26 | section_name, 27 | bin_path.as_ref(), 28 | obj_path.as_ref(), 29 | ) 30 | } 31 | 32 | fn rustc_create_obj_from_bin_( 33 | ctx: &BuildContext, 34 | name: &str, 35 | section_name: &str, 36 | bin_path: &Path, 37 | obj_path: &Path, 38 | ) -> anyhow::Result<()> { 39 | let mut rustc = Command::new(&ctx.rustc_cmd) 40 | .args(["+nightly", "--emit", "obj", "-", "--target"]) 41 | .arg(&ctx.rust_target) 42 | .arg("-o") 43 | .arg(obj_path) 44 | .log_command() 45 | .stdin(Stdio::piped()) 46 | .log_command() 47 | .spawn()?; 48 | 49 | rustc.stdin.take().unwrap().write_all( 50 | format!( 51 | r##" 52 | #![allow(internal_features)] 53 | #![feature(no_core, rustc_attrs)] 54 | #![no_core] 55 | #![no_main] 56 | 57 | #[rustc_builtin_macro] 58 | macro_rules! global_asm {{ () => (); }} 59 | 60 | global_asm!( 61 | r#" 62 | .pushsection {section_name} 63 | 64 | .global {name} 65 | .p2align 4 66 | {name}: 67 | .incbin "{}" 68 | .size {name}, . - {name} 69 | 70 | .global {name}_size 71 | .set {name}_size, . - {name} 72 | 73 | .popsection 74 | "# 75 | ); 76 | "##, 77 | bin_path.display() 78 | ) 79 | .as_bytes(), 80 | )?; 81 | 82 | rustc.wait()?.exit_ok()?; 83 | 84 | Ok(()) 85 | } 86 | 87 | pub fn elf_to_binary, Pb: AsRef>( 88 | elf_path: Pe, 89 | bin_path: Pb, 90 | ) -> anyhow::Result<()> { 91 | elf_to_binary_(elf_path.as_ref(), bin_path.as_ref()) 92 | } 93 | 94 | fn elf_to_binary_(elf_path: &Path, bin_path: &Path) -> anyhow::Result<()> { 95 | let file_data = fs::read(elf_path)?; 96 | let elf = Elf::new(&file_data).unwrap(); 97 | let mut out = File::create(bin_path)?; 98 | 99 | for segment in elf.segments().filter(|s| s.kind() == SegmentKind::Load) { 100 | out.seek(SeekFrom::Start(segment.virtual_address()))?; 101 | out.write_all(segment.file_data())?; 102 | } 103 | 104 | Ok(()) 105 | } 106 | --------------------------------------------------------------------------------