├── .cargo └── config.toml ├── .github └── workflows │ └── xtask.yml ├── .gitignore ├── .vscode ├── launch.json └── settings.json ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── aarch64 ├── Cargo.toml ├── lib │ ├── bcm2710-rpi-3-b.dtb │ ├── bcm2710-rpi-3-b.dts │ ├── bcm2711-rpi-4-b.dtb │ ├── bcm2711-rpi-4-b.dts │ ├── config_default.toml │ ├── config_raspi4b.toml │ └── kernel.ld └── src │ ├── devcons.rs │ ├── io.rs │ ├── kmem.rs │ ├── l.S │ ├── mailbox.rs │ ├── main.rs │ ├── pagealloc.rs │ ├── param.rs │ ├── registers.rs │ ├── runtime.rs │ ├── trap.S │ ├── trap.rs │ ├── uartmini.rs │ ├── uartpl011.rs │ └── vm.rs ├── lib ├── aarch64-unknown-none-elf.json ├── riscv64-unknown-none-elf.json └── x86_64-unknown-none-elf.json ├── port ├── Cargo.toml ├── lib │ └── test │ │ └── fdt │ │ ├── readme.txt │ │ ├── test1.dtb │ │ └── test1.dts ├── src │ ├── allocator.rs │ ├── bitmapalloc.rs │ ├── dat.rs │ ├── devcons.rs │ ├── fdt.rs │ ├── lib.rs │ ├── mcslock.rs │ └── mem.rs └── tests │ └── fdt_test.rs ├── riscv64 ├── Cargo.toml ├── README.md ├── lib │ ├── config_default.toml │ └── kernel.ld └── src │ ├── l.S │ ├── main.rs │ ├── platform │ ├── mod.rs │ ├── nezha │ │ ├── devcons.rs │ │ └── mod.rs │ └── virt │ │ ├── devcons.rs │ │ └── mod.rs │ ├── runtime.rs │ ├── sbi.rs │ └── uart16550.rs ├── rust-toolchain.toml ├── rustfmt.toml ├── x86_64 ├── Cargo.toml ├── lib │ ├── config_default.toml │ └── kernel.ld └── src │ ├── dat.rs │ ├── devcons.rs │ ├── l.S │ ├── main.rs │ ├── pio.rs │ ├── proc.rs │ ├── runtime.rs │ └── uart16550.rs └── xtask ├── Cargo.lock ├── Cargo.toml ├── doc └── config_example.toml └── src ├── config.rs └── main.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | xtask = "run --package xtask --" 3 | -------------------------------------------------------------------------------- /.github/workflows/xtask.yml: -------------------------------------------------------------------------------- 1 | name: xtask 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | tests: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v3 18 | - name: Run tests 19 | run: cargo xtask test --verbose 20 | 21 | build-x86_64: 22 | runs-on: ubuntu-latest 23 | 24 | steps: 25 | - uses: actions/checkout@v3 26 | - name: Build 27 | run: cargo xtask build --arch x86-64 --verbose 28 | 29 | build-aarch64: 30 | runs-on: ubuntu-latest 31 | 32 | steps: 33 | - uses: actions/checkout@v3 34 | - name: Build 35 | run: cargo xtask build --arch aarch64 --verbose 36 | 37 | build-riscv64: 38 | runs-on: ubuntu-latest 39 | 40 | steps: 41 | - uses: actions/checkout@v3 42 | - name: Build 43 | run: cargo xtask build --arch riscv64 --verbose 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "lldb", 9 | "request": "custom", 10 | "name": "Debug QEMU (aarch64)", 11 | "targetCreateCommands": [ 12 | "target create ${workspaceFolder}/target/aarch64-unknown-none-elf/debug/aarch64" 13 | ], 14 | "processCreateCommands": [ 15 | "gdb-remote localhost:1234" 16 | ] 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "git.alwaysSignOff": true, 3 | "rust-analyzer.check.overrideCommand": [ 4 | "cargo", 5 | "xtask", 6 | "check", 7 | "--json" 8 | ] 9 | } -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "aarch64" 7 | version = "0.1.0" 8 | dependencies = [ 9 | "bitstruct", 10 | "num_enum", 11 | "port", 12 | ] 13 | 14 | [[package]] 15 | name = "anstream" 16 | version = "0.6.15" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" 19 | dependencies = [ 20 | "anstyle", 21 | "anstyle-parse", 22 | "anstyle-query", 23 | "anstyle-wincon", 24 | "colorchoice", 25 | "is_terminal_polyfill", 26 | "utf8parse", 27 | ] 28 | 29 | [[package]] 30 | name = "anstyle" 31 | version = "1.0.8" 32 | source = "registry+https://github.com/rust-lang/crates.io-index" 33 | checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" 34 | 35 | [[package]] 36 | name = "anstyle-parse" 37 | version = "0.2.5" 38 | source = "registry+https://github.com/rust-lang/crates.io-index" 39 | checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" 40 | dependencies = [ 41 | "utf8parse", 42 | ] 43 | 44 | [[package]] 45 | name = "anstyle-query" 46 | version = "1.1.1" 47 | source = "registry+https://github.com/rust-lang/crates.io-index" 48 | checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" 49 | dependencies = [ 50 | "windows-sys", 51 | ] 52 | 53 | [[package]] 54 | name = "anstyle-wincon" 55 | version = "3.0.4" 56 | source = "registry+https://github.com/rust-lang/crates.io-index" 57 | checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" 58 | dependencies = [ 59 | "anstyle", 60 | "windows-sys", 61 | ] 62 | 63 | [[package]] 64 | name = "bit_field" 65 | version = "0.10.2" 66 | source = "registry+https://github.com/rust-lang/crates.io-index" 67 | checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" 68 | 69 | [[package]] 70 | name = "bitflags" 71 | version = "1.3.2" 72 | source = "registry+https://github.com/rust-lang/crates.io-index" 73 | checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" 74 | 75 | [[package]] 76 | name = "bitflags" 77 | version = "2.6.0" 78 | source = "registry+https://github.com/rust-lang/crates.io-index" 79 | checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" 80 | 81 | [[package]] 82 | name = "bitstruct" 83 | version = "0.1.1" 84 | source = "registry+https://github.com/rust-lang/crates.io-index" 85 | checksum = "a1b10c3912af09af44ea1dafe307edb5ed374b2a32658eb610e372270c9017b4" 86 | dependencies = [ 87 | "bitstruct_derive", 88 | ] 89 | 90 | [[package]] 91 | name = "bitstruct_derive" 92 | version = "0.1.0" 93 | source = "registry+https://github.com/rust-lang/crates.io-index" 94 | checksum = "35fd19022c2b750d14eb9724c204d08ab7544570105b3b466d8a9f2f3feded27" 95 | dependencies = [ 96 | "proc-macro2", 97 | "quote", 98 | "syn 1.0.109", 99 | ] 100 | 101 | [[package]] 102 | name = "clap" 103 | version = "4.5.18" 104 | source = "registry+https://github.com/rust-lang/crates.io-index" 105 | checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" 106 | dependencies = [ 107 | "clap_builder", 108 | "clap_derive", 109 | ] 110 | 111 | [[package]] 112 | name = "clap_builder" 113 | version = "4.5.18" 114 | source = "registry+https://github.com/rust-lang/crates.io-index" 115 | checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" 116 | dependencies = [ 117 | "anstream", 118 | "anstyle", 119 | "clap_lex", 120 | "strsim", 121 | ] 122 | 123 | [[package]] 124 | name = "clap_derive" 125 | version = "4.5.18" 126 | source = "registry+https://github.com/rust-lang/crates.io-index" 127 | checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" 128 | dependencies = [ 129 | "heck", 130 | "proc-macro2", 131 | "quote", 132 | "syn 2.0.77", 133 | ] 134 | 135 | [[package]] 136 | name = "clap_lex" 137 | version = "0.7.2" 138 | source = "registry+https://github.com/rust-lang/crates.io-index" 139 | checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" 140 | 141 | [[package]] 142 | name = "colorchoice" 143 | version = "1.0.2" 144 | source = "registry+https://github.com/rust-lang/crates.io-index" 145 | checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" 146 | 147 | [[package]] 148 | name = "equivalent" 149 | version = "1.0.1" 150 | source = "registry+https://github.com/rust-lang/crates.io-index" 151 | checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" 152 | 153 | [[package]] 154 | name = "hashbrown" 155 | version = "0.14.5" 156 | source = "registry+https://github.com/rust-lang/crates.io-index" 157 | checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" 158 | 159 | [[package]] 160 | name = "heck" 161 | version = "0.5.0" 162 | source = "registry+https://github.com/rust-lang/crates.io-index" 163 | checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" 164 | 165 | [[package]] 166 | name = "indexmap" 167 | version = "2.5.0" 168 | source = "registry+https://github.com/rust-lang/crates.io-index" 169 | checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" 170 | dependencies = [ 171 | "equivalent", 172 | "hashbrown", 173 | ] 174 | 175 | [[package]] 176 | name = "is_terminal_polyfill" 177 | version = "1.70.1" 178 | source = "registry+https://github.com/rust-lang/crates.io-index" 179 | checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" 180 | 181 | [[package]] 182 | name = "memchr" 183 | version = "2.7.4" 184 | source = "registry+https://github.com/rust-lang/crates.io-index" 185 | checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" 186 | 187 | [[package]] 188 | name = "num_enum" 189 | version = "0.7.3" 190 | source = "registry+https://github.com/rust-lang/crates.io-index" 191 | checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" 192 | dependencies = [ 193 | "num_enum_derive", 194 | ] 195 | 196 | [[package]] 197 | name = "num_enum_derive" 198 | version = "0.7.3" 199 | source = "registry+https://github.com/rust-lang/crates.io-index" 200 | checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" 201 | dependencies = [ 202 | "proc-macro2", 203 | "quote", 204 | "syn 2.0.77", 205 | ] 206 | 207 | [[package]] 208 | name = "port" 209 | version = "0.1.0" 210 | dependencies = [ 211 | "bitflags 2.6.0", 212 | ] 213 | 214 | [[package]] 215 | name = "proc-macro2" 216 | version = "1.0.86" 217 | source = "registry+https://github.com/rust-lang/crates.io-index" 218 | checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" 219 | dependencies = [ 220 | "unicode-ident", 221 | ] 222 | 223 | [[package]] 224 | name = "quote" 225 | version = "1.0.37" 226 | source = "registry+https://github.com/rust-lang/crates.io-index" 227 | checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" 228 | dependencies = [ 229 | "proc-macro2", 230 | ] 231 | 232 | [[package]] 233 | name = "raw-cpuid" 234 | version = "10.7.0" 235 | source = "registry+https://github.com/rust-lang/crates.io-index" 236 | checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" 237 | dependencies = [ 238 | "bitflags 1.3.2", 239 | ] 240 | 241 | [[package]] 242 | name = "riscv64" 243 | version = "0.1.0" 244 | dependencies = [ 245 | "port", 246 | ] 247 | 248 | [[package]] 249 | name = "serde" 250 | version = "1.0.210" 251 | source = "registry+https://github.com/rust-lang/crates.io-index" 252 | checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" 253 | dependencies = [ 254 | "serde_derive", 255 | ] 256 | 257 | [[package]] 258 | name = "serde_derive" 259 | version = "1.0.210" 260 | source = "registry+https://github.com/rust-lang/crates.io-index" 261 | checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" 262 | dependencies = [ 263 | "proc-macro2", 264 | "quote", 265 | "syn 2.0.77", 266 | ] 267 | 268 | [[package]] 269 | name = "serde_spanned" 270 | version = "0.6.8" 271 | source = "registry+https://github.com/rust-lang/crates.io-index" 272 | checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" 273 | dependencies = [ 274 | "serde", 275 | ] 276 | 277 | [[package]] 278 | name = "strsim" 279 | version = "0.11.1" 280 | source = "registry+https://github.com/rust-lang/crates.io-index" 281 | checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" 282 | 283 | [[package]] 284 | name = "syn" 285 | version = "1.0.109" 286 | source = "registry+https://github.com/rust-lang/crates.io-index" 287 | checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" 288 | dependencies = [ 289 | "proc-macro2", 290 | "quote", 291 | "unicode-ident", 292 | ] 293 | 294 | [[package]] 295 | name = "syn" 296 | version = "2.0.77" 297 | source = "registry+https://github.com/rust-lang/crates.io-index" 298 | checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" 299 | dependencies = [ 300 | "proc-macro2", 301 | "quote", 302 | "unicode-ident", 303 | ] 304 | 305 | [[package]] 306 | name = "target-lexicon" 307 | version = "0.12.16" 308 | source = "registry+https://github.com/rust-lang/crates.io-index" 309 | checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" 310 | 311 | [[package]] 312 | name = "toml" 313 | version = "0.8.19" 314 | source = "registry+https://github.com/rust-lang/crates.io-index" 315 | checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" 316 | dependencies = [ 317 | "serde", 318 | "serde_spanned", 319 | "toml_datetime", 320 | "toml_edit", 321 | ] 322 | 323 | [[package]] 324 | name = "toml_datetime" 325 | version = "0.6.8" 326 | source = "registry+https://github.com/rust-lang/crates.io-index" 327 | checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" 328 | dependencies = [ 329 | "serde", 330 | ] 331 | 332 | [[package]] 333 | name = "toml_edit" 334 | version = "0.22.22" 335 | source = "registry+https://github.com/rust-lang/crates.io-index" 336 | checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" 337 | dependencies = [ 338 | "indexmap", 339 | "serde", 340 | "serde_spanned", 341 | "toml_datetime", 342 | "winnow", 343 | ] 344 | 345 | [[package]] 346 | name = "unicode-ident" 347 | version = "1.0.13" 348 | source = "registry+https://github.com/rust-lang/crates.io-index" 349 | checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" 350 | 351 | [[package]] 352 | name = "utf8parse" 353 | version = "0.2.2" 354 | source = "registry+https://github.com/rust-lang/crates.io-index" 355 | checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" 356 | 357 | [[package]] 358 | name = "windows-sys" 359 | version = "0.52.0" 360 | source = "registry+https://github.com/rust-lang/crates.io-index" 361 | checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" 362 | dependencies = [ 363 | "windows-targets", 364 | ] 365 | 366 | [[package]] 367 | name = "windows-targets" 368 | version = "0.52.6" 369 | source = "registry+https://github.com/rust-lang/crates.io-index" 370 | checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" 371 | dependencies = [ 372 | "windows_aarch64_gnullvm", 373 | "windows_aarch64_msvc", 374 | "windows_i686_gnu", 375 | "windows_i686_gnullvm", 376 | "windows_i686_msvc", 377 | "windows_x86_64_gnu", 378 | "windows_x86_64_gnullvm", 379 | "windows_x86_64_msvc", 380 | ] 381 | 382 | [[package]] 383 | name = "windows_aarch64_gnullvm" 384 | version = "0.52.6" 385 | source = "registry+https://github.com/rust-lang/crates.io-index" 386 | checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" 387 | 388 | [[package]] 389 | name = "windows_aarch64_msvc" 390 | version = "0.52.6" 391 | source = "registry+https://github.com/rust-lang/crates.io-index" 392 | checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" 393 | 394 | [[package]] 395 | name = "windows_i686_gnu" 396 | version = "0.52.6" 397 | source = "registry+https://github.com/rust-lang/crates.io-index" 398 | checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" 399 | 400 | [[package]] 401 | name = "windows_i686_gnullvm" 402 | version = "0.52.6" 403 | source = "registry+https://github.com/rust-lang/crates.io-index" 404 | checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" 405 | 406 | [[package]] 407 | name = "windows_i686_msvc" 408 | version = "0.52.6" 409 | source = "registry+https://github.com/rust-lang/crates.io-index" 410 | checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" 411 | 412 | [[package]] 413 | name = "windows_x86_64_gnu" 414 | version = "0.52.6" 415 | source = "registry+https://github.com/rust-lang/crates.io-index" 416 | checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" 417 | 418 | [[package]] 419 | name = "windows_x86_64_gnullvm" 420 | version = "0.52.6" 421 | source = "registry+https://github.com/rust-lang/crates.io-index" 422 | checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" 423 | 424 | [[package]] 425 | name = "windows_x86_64_msvc" 426 | version = "0.52.6" 427 | source = "registry+https://github.com/rust-lang/crates.io-index" 428 | checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" 429 | 430 | [[package]] 431 | name = "winnow" 432 | version = "0.6.20" 433 | source = "registry+https://github.com/rust-lang/crates.io-index" 434 | checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" 435 | dependencies = [ 436 | "memchr", 437 | ] 438 | 439 | [[package]] 440 | name = "x86" 441 | version = "0.52.0" 442 | source = "registry+https://github.com/rust-lang/crates.io-index" 443 | checksum = "2781db97787217ad2a2845c396a5efe286f87467a5810836db6d74926e94a385" 444 | dependencies = [ 445 | "bit_field", 446 | "bitflags 1.3.2", 447 | "raw-cpuid", 448 | ] 449 | 450 | [[package]] 451 | name = "x86_64" 452 | version = "0.1.0" 453 | dependencies = [ 454 | "bitstruct", 455 | "port", 456 | "x86", 457 | ] 458 | 459 | [[package]] 460 | name = "xtask" 461 | version = "0.1.0" 462 | dependencies = [ 463 | "clap", 464 | "serde", 465 | "target-lexicon", 466 | "toml", 467 | ] 468 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ 4 | "aarch64", 5 | "x86_64", 6 | "port", 7 | "riscv64", 8 | "xtask" 9 | ] 10 | 11 | [profile.dev] 12 | panic = "abort" 13 | 14 | [profile.release] 15 | panic = "abort" 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Dan Cross 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # r9 2 | [Plan 9](https://plan9.io/plan9/) in Rust 3 | 4 | R9 is a reimplementation of the plan9 kernel in Rust. It is 5 | not only inspired by but in many ways derived from the original 6 | [Plan 9](https://plan9.io/plan9/) source code. 7 | 8 | ## Building 9 | 10 | We use `cargo` and the `xtask` pattern to build the kernel. 11 | 12 | To build r9 for x86_64, we assume you have cloned the git repository 13 | somewhere convenient. Then simply change into the top-level 14 | directory and, `cargo xtask build --arch x86-64`. 15 | 16 | To build for aarch64, run `cargo xtask build --arch aarch64` (Currently only Raspberry Pi 3 is supported). 17 | 18 | There are other useful `xtask` subcommands; run 19 | `cargo xtask help` to see what is available. 20 | 21 | Right now, r9 is not self-hosting. 22 | 23 | ## Runtime Dependencies 24 | 25 | `cargo xtask dist`, which `cargo xtask qemu` depends on, requires `llvm-objcopy`. 26 | This is expected to live in the rust toolchain path. You can install by running: 27 | ``` 28 | rustup component add llvm-tools 29 | ``` 30 | 31 | If you get `No such file or directory (os error 2)` messages, 32 | then install `llvm` separate from the rust toolchain and set: 33 | ``` 34 | OBJCOPY=$(which llvm-objcopy) cargo xtask qemukvm 35 | ``` 36 | 37 | If `No such file or directory (os error 2)` messages persist, 38 | check to ensure `qemu` or `qemu-kvm` is installed and the 39 | `qemu-system-x86_64` binary is in your path (or `qemu-system-aarch64` in the case of aarch64). 40 | 41 | ## Running on Qemu 42 | 43 | R9 can be run using qemu for the various supported architectures: 44 | 45 | |Arch|Platform|Commandline| 46 | |----|--------|-----------| 47 | |aarch64|raspi3b|cargo xtask qemu --arch aarch64 --verbose| 48 | |aarch64|raspi4b|cargo xtask qemu --arch aarch64 --config raspi4b --verbose| 49 | |x86-64|q35|cargo xtask qemu --arch x86-64 --verbose| 50 | |x86-64 (with kvm)|q35|cargo xtask qemu --arch x86-64 --kvm --verbose| 51 | |riscv|virt|cargo xtask qemu --arch riscv64 --verbose| 52 | 53 | ## Running on Real Hardware™️ 54 | 55 | R9 has been run on the following hardware to a greater or lesser degree: 56 | - Raspberry Pi 4 (Gets as far as printing 'r9' via the miniuart) 57 | 58 | ### Raspberry Pi, Netboot 59 | 60 | Assuming you can set up a TFTP server (good luck, it's incredibly fiddly, but for what it's worth, dnsmasq can work occasionally), and assuming the location of your netboot directory, you can build and copy the binary using the following command: 61 | ``` 62 | cargo xtask dist --arch aarch64 --verbose && cp target/aarch64-unknown-none-elf/debug/aarch64-qemu.gz ../netboot/kernel8.img 63 | ``` 64 | 65 | This copies a compressed binary, which should be much faster to copy across the network. 66 | 67 | The Raspberry Pi firmware loads `config.txt` before the kernel. Here we can set which UART to use, amongst other things. The following contents will set up to use the miniuart: 68 | ``` 69 | enable_uart=1 70 | core_freq_min=500 71 | ``` -------------------------------------------------------------------------------- /aarch64/Cargo.toml: -------------------------------------------------------------------------------- 1 | cargo-features = ["per-package-target"] 2 | 3 | [package] 4 | name = "aarch64" 5 | version = "0.1.0" 6 | edition = "2021" 7 | default-target = "aarch64-unknown-none" 8 | 9 | [dependencies] 10 | bitstruct = "0.1" 11 | port = { path = "../port" } 12 | num_enum = { version = "0.7", default-features = false } 13 | -------------------------------------------------------------------------------- /aarch64/lib/bcm2710-rpi-3-b.dtb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dancrossnyc/r9/79e016003f1e7a46dc66d9d785a6f4d6f887db78/aarch64/lib/bcm2710-rpi-3-b.dtb -------------------------------------------------------------------------------- /aarch64/lib/bcm2711-rpi-4-b.dtb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dancrossnyc/r9/79e016003f1e7a46dc66d9d785a6f4d6f887db78/aarch64/lib/bcm2711-rpi-4-b.dtb -------------------------------------------------------------------------------- /aarch64/lib/config_default.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "lib/aarch64-unknown-none-elf.json" 3 | buildflags = ["-Z", "build-std=core,alloc"] 4 | 5 | [link] 6 | # linker script to use 7 | script = 'aarch64/lib/kernel.ld' 8 | 9 | # kernel load address to insert into kernel.ld 10 | load-address = '0xffff800000100000 - 0x80000' 11 | 12 | [qemu] 13 | machine = "raspi3b" 14 | dtb = "aarch64/lib/bcm2710-rpi-3-b.dtb" 15 | -------------------------------------------------------------------------------- /aarch64/lib/config_raspi4b.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "lib/aarch64-unknown-none-elf.json" 3 | buildflags = ["-Z", "build-std=core,alloc"] 4 | 5 | [link] 6 | # linker script to use 7 | script = 'aarch64/lib/kernel.ld' 8 | 9 | # kernel load address to insert into kernel.ld 10 | load-address = '0xffff800000100000 - 0x80000' 11 | 12 | [qemu] 13 | machine = "raspi4b" 14 | dtb = "aarch64/lib/bcm2711-rpi-4-b.dtb" 15 | -------------------------------------------------------------------------------- /aarch64/lib/kernel.ld: -------------------------------------------------------------------------------- 1 | /* 2 | * Linker script for R9. 3 | */ 4 | 5 | ENTRY(start) 6 | 7 | SECTIONS { 8 | /* Entrypoint for Raspberry Pi will be at 0x80000 */ 9 | . = ${LOAD-ADDRESS}; 10 | 11 | boottext = .; 12 | .text.boot : ALIGN(4096) { 13 | *(.boottext .bootdata) 14 | . = ALIGN(4096); 15 | eboottext = .; 16 | . = ALIGN(2097152); 17 | esys = .; 18 | } 19 | 20 | text = .; 21 | .text : ALIGN(4096) { 22 | *(.text* .stub .gnu.linkonce.t.*) 23 | . = ALIGN(2097152); 24 | etext = .; 25 | } 26 | 27 | rodata = .; 28 | .rodata : ALIGN(4096) { 29 | *(.rodata* .gnu.linkonce.r.*) 30 | . = ALIGN(2097152); 31 | erodata = .; 32 | } 33 | 34 | data = .; 35 | .data : ALIGN(4096) { 36 | *(.data*) 37 | } 38 | .got : ALIGN(4096) { 39 | *(.got) 40 | } 41 | .got.plt : ALIGN(4096) { 42 | *(.got.plt) 43 | } 44 | edata = .; 45 | 46 | bss = .; 47 | .bss : ALIGN(4096) { 48 | *(.bss*) 49 | *(COMMON) 50 | . = ALIGN(2097152); 51 | } 52 | ebss = .; 53 | 54 | /* Reserve section for early pagetables. Align to 2MiB to allow us to map 55 | as a 2MiB page.Note that this won't be needed once we transition to 56 | recursive pagetables. 57 | TODO Just use the heap when we enable recursive pagetables? */ 58 | . = ALIGN(2 * 1024 * 1024); 59 | early_pagetables = .; 60 | . += 2 * 1024 * 1024; 61 | eearly_pagetables = .; 62 | 63 | end = .; 64 | PROVIDE(end = .); 65 | 66 | /DISCARD/ : { 67 | *(.eh_frame .note.GNU-stack) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /aarch64/src/devcons.rs: -------------------------------------------------------------------------------- 1 | // Racy to start. 2 | 3 | use crate::param::KZERO; 4 | use crate::uartmini::MiniUart; 5 | use core::cell::SyncUnsafeCell; 6 | use core::mem::MaybeUninit; 7 | use port::devcons::Console; 8 | use port::fdt::DeviceTree; 9 | 10 | // The aarch64 devcons implementation is focussed on Raspberry Pi 3, 4 for now. 11 | 12 | // Useful links 13 | // - Raspberry Pi Processors 14 | // https://www.raspberrypi.com/documentation/computers/processors.html 15 | // - Raspberry Pi Hardware 16 | // https://www.raspberrypi.com/documentation/computers/raspberry-pi.html 17 | // - Raspi3 BCM2837 18 | // Datasheet (BCM2835) https://datasheets.raspberrypi.com/bcm2835/bcm2835-peripherals.pdf 19 | // - Raspi4 BCM2711 20 | // Datasheet https://datasheets.raspberrypi.com/bcm2711/bcm2711-peripherals.pdf 21 | // - Mailbox 22 | // https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface 23 | 24 | // Raspberry Pi 3 has 2 UARTs, Raspbery Pi 4 has 4: 25 | // - UART0 PL011 26 | // - UART1 miniUART 27 | // - UART2 PL011 (rpi4) 28 | // - UART3 PL011 (rpi4) 29 | 30 | // TODO 31 | // - Detect board type and set MMIO base address accordingly 32 | // https://wiki.osdev.org/Detecting_Raspberry_Pi_Board 33 | // - Break out mailbox, gpio code 34 | 35 | pub fn init(dt: &DeviceTree) { 36 | Console::new(|| { 37 | let uart = MiniUart::new(dt, KZERO); 38 | uart.init(); 39 | 40 | static UART: SyncUnsafeCell> = 41 | SyncUnsafeCell::new(MaybeUninit::uninit()); 42 | unsafe { 43 | let cons = &mut *UART.get(); 44 | cons.write(uart); 45 | cons.assume_init_mut() 46 | } 47 | }); 48 | } 49 | -------------------------------------------------------------------------------- /aarch64/src/io.rs: -------------------------------------------------------------------------------- 1 | use core::ptr::{read_volatile, write_volatile}; 2 | use port::mem::VirtRange; 3 | 4 | #[allow(dead_code)] 5 | pub enum GpioPull { 6 | Off = 0, 7 | Down, 8 | Up, 9 | } 10 | 11 | /// Delay for count cycles 12 | #[allow(dead_code)] 13 | pub fn delay(count: u32) { 14 | for _ in 0..count { 15 | core::hint::spin_loop(); 16 | } 17 | } 18 | 19 | /// Write val into the reg RegBlock at offset from reg.addr. 20 | /// Panics if offset is outside any range specified by reg.len. 21 | pub fn write_reg(range: &VirtRange, offset: usize, val: u32) { 22 | let dst = range.offset_addr(offset).expect("offset outside bounds"); 23 | unsafe { write_volatile(dst as *mut u32, val) } 24 | } 25 | 26 | /// Write val|old into the reg RegBlock at offset from reg.addr, 27 | /// where `old` is the existing value. 28 | /// Panics if offset is outside any range specified by reg.len. 29 | #[allow(dead_code)] 30 | pub fn write_or_reg(range: &VirtRange, offset: usize, val: u32) { 31 | let dst = range.offset_addr(offset).expect("offset outside bounds"); 32 | unsafe { 33 | let old = read_volatile(dst as *const u32); 34 | write_volatile(dst as *mut u32, val | old) 35 | } 36 | } 37 | 38 | /// Read from the reg RegBlock at offset from reg.addr. 39 | /// Panics if offset is outside any range specified by reg.len. 40 | pub fn read_reg(range: &VirtRange, offset: usize) -> u32 { 41 | let src = range.offset_addr(offset).expect("offset outside bounds"); 42 | unsafe { read_volatile(src as *const u32) } 43 | } 44 | -------------------------------------------------------------------------------- /aarch64/src/kmem.rs: -------------------------------------------------------------------------------- 1 | use crate::param::KZERO; 2 | use port::mem::{PhysAddr, PhysRange}; 3 | 4 | // These map to definitions in kernel.ld 5 | extern "C" { 6 | static etext: [u64; 0]; 7 | static erodata: [u64; 0]; 8 | static ebss: [u64; 0]; 9 | static early_pagetables: [u64; 0]; 10 | static eearly_pagetables: [u64; 0]; 11 | } 12 | 13 | pub fn text_addr() -> usize { 14 | 0xffff_8000_0000_0000 15 | } 16 | 17 | pub fn etext_addr() -> usize { 18 | unsafe { etext.as_ptr().addr() } 19 | } 20 | 21 | pub fn erodata_addr() -> usize { 22 | unsafe { erodata.as_ptr().addr() } 23 | } 24 | 25 | pub fn ebss_addr() -> usize { 26 | unsafe { ebss.as_ptr().addr() } 27 | } 28 | 29 | pub fn early_pagetables_addr() -> usize { 30 | unsafe { early_pagetables.as_ptr().addr() } 31 | } 32 | 33 | pub fn eearly_pagetables_addr() -> usize { 34 | unsafe { eearly_pagetables.as_ptr().addr() } 35 | } 36 | 37 | pub const fn physaddr_as_virt(pa: PhysAddr) -> usize { 38 | (pa.addr() as usize).wrapping_add(KZERO) 39 | } 40 | 41 | pub const fn physaddr_as_ptr_mut(pa: PhysAddr) -> *mut T { 42 | physaddr_as_virt(pa) as *mut T 43 | } 44 | 45 | pub const fn from_virt_to_physaddr(va: usize) -> PhysAddr { 46 | PhysAddr::new((va - KZERO) as u64) 47 | } 48 | 49 | pub fn from_ptr_to_physaddr(a: *const T) -> PhysAddr { 50 | from_virt_to_physaddr(a.addr()) 51 | } 52 | 53 | pub fn early_pages_range() -> PhysRange { 54 | PhysRange::new( 55 | from_virt_to_physaddr(early_pagetables_addr()), 56 | from_virt_to_physaddr(eearly_pagetables_addr()), 57 | ) 58 | } 59 | -------------------------------------------------------------------------------- /aarch64/src/mailbox.rs: -------------------------------------------------------------------------------- 1 | use crate::io::{read_reg, write_reg}; 2 | use crate::param::KZERO; 3 | use core::cell::SyncUnsafeCell; 4 | use core::mem::MaybeUninit; 5 | use port::fdt::DeviceTree; 6 | use port::mcslock::{Lock, LockNode}; 7 | use port::mem::{PhysAddr, PhysRange, VirtRange}; 8 | 9 | const MBOX_READ: usize = 0x00; 10 | const MBOX_STATUS: usize = 0x18; 11 | const MBOX_WRITE: usize = 0x20; 12 | 13 | const MBOX_FULL: u32 = 0x8000_0000; 14 | const MBOX_EMPTY: u32 = 0x4000_0000; 15 | 16 | static MAILBOX: Lock> = Lock::new("mailbox", None); 17 | 18 | /// Mailbox init. Mainly initialises a lock to ensure only one mailbox request 19 | /// can be made at a time. We have no heap at this point, so creating a mailbox 20 | /// that can be initialised based off the devicetree is rather convoluted. 21 | pub fn init(dt: &DeviceTree) { 22 | let node = LockNode::new(); 23 | let mut mailbox = MAILBOX.lock(&node); 24 | *mailbox = Some({ 25 | static MAYBE_MAILBOX: SyncUnsafeCell> = 26 | SyncUnsafeCell::new(MaybeUninit::uninit()); 27 | unsafe { 28 | let maybe_mailbox = &mut *MAYBE_MAILBOX.get(); 29 | maybe_mailbox.write(Mailbox::new(dt, KZERO)); 30 | maybe_mailbox.assume_init_mut() 31 | } 32 | }); 33 | } 34 | 35 | /// https://developer.arm.com/documentation/ddi0306/b/CHDGHAIG 36 | /// https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface 37 | struct Mailbox { 38 | pub mbox_range: VirtRange, 39 | } 40 | 41 | impl Mailbox { 42 | fn new(dt: &DeviceTree, mmio_virt_offset: usize) -> Mailbox { 43 | Mailbox { 44 | mbox_range: VirtRange::from( 45 | &dt.find_compatible("brcm,bcm2835-mbox") 46 | .next() 47 | .and_then(|uart| dt.property_translated_reg_iter(uart).next()) 48 | .and_then(|reg| reg.regblock()) 49 | .unwrap() 50 | .with_offset(mmio_virt_offset as u64), 51 | ), 52 | } 53 | } 54 | 55 | fn request(&self, req: &mut Message) 56 | where 57 | T: Copy, 58 | U: Copy, 59 | { 60 | // Read status register until full flag not set 61 | while (read_reg(&self.mbox_range, MBOX_STATUS) & MBOX_FULL) != 0 {} 62 | 63 | // Write the request address combined with the channel to the write register 64 | let channel = ChannelId::ArmToVc as u32; 65 | let uart_mbox_u32 = req as *const _ as u32; 66 | let r = (uart_mbox_u32 & !0xF) | channel; 67 | write_reg(&self.mbox_range, MBOX_WRITE, r); 68 | 69 | // Wait for response 70 | // FIXME: two infinite loops - can go awry 71 | loop { 72 | while (read_reg(&self.mbox_range, MBOX_STATUS) & MBOX_EMPTY) != 0 {} 73 | let response = read_reg(&self.mbox_range, MBOX_READ); 74 | if response == r { 75 | break; 76 | } 77 | } 78 | } 79 | } 80 | 81 | #[repr(u8)] 82 | enum ChannelId { 83 | ArmToVc = 8, 84 | } 85 | 86 | #[repr(C)] 87 | #[derive(Debug, Clone, Copy)] 88 | struct Request { 89 | size: u32, // size in bytes 90 | code: u32, // request code (0) 91 | tags: T, 92 | } 93 | 94 | #[repr(C)] 95 | #[derive(Debug, Clone, Copy)] 96 | struct Response { 97 | size: u32, // size in bytes 98 | code: u32, // response code 99 | tags: T, 100 | } 101 | 102 | #[repr(C)] 103 | #[derive(Debug, Clone, Copy)] 104 | struct Tag { 105 | tag_id0: TagId, 106 | tag_buffer_size0: u32, 107 | tag_code0: u32, 108 | body: T, 109 | end_tag: u32, 110 | } 111 | 112 | #[repr(C, align(16))] 113 | #[derive(Clone, Copy)] 114 | union Message { 115 | request: Request, 116 | response: Response, 117 | } 118 | 119 | type MessageWithTags = Message, Tag>; 120 | 121 | fn request(code: u32, tags: &Tag) -> U 122 | where 123 | T: Copy, 124 | U: Copy, 125 | { 126 | let size = size_of::>() as u32; 127 | let req = Request::> { size, code, tags: *tags }; 128 | let mut msg = MessageWithTags { request: req }; 129 | let node = LockNode::new(); 130 | let mut mailbox = MAILBOX.lock(&node); 131 | mailbox.as_deref_mut().unwrap().request(&mut msg); 132 | let res = unsafe { msg.response }; 133 | res.tags.body 134 | } 135 | 136 | // https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface#tags-arm-to-vc 137 | #[repr(u32)] 138 | #[derive(Debug, Clone, Copy)] 139 | enum TagId { 140 | GetFirmwareRevision = 0x0000_0001, 141 | GetBoardModel = 0x0001_0001, 142 | GetBoardRevision = 0x0001_0002, 143 | GetBoardMacAddress = 0x0001_0003, 144 | GetBoardSerial = 0x0001_0004, 145 | GetArmMemory = 0x0001_0005, 146 | GetVcMemory = 0x0001_0006, 147 | SetClockRate = 0x0003_8002, 148 | } 149 | 150 | #[repr(C)] 151 | #[derive(Debug, Clone, Copy)] 152 | struct SetClockRateRequest { 153 | clock_id: u32, 154 | rate_hz: u32, 155 | skip_setting_turbo: u32, 156 | } 157 | 158 | #[repr(C)] 159 | #[derive(Debug, Clone, Copy)] 160 | struct SetClockRateResponse { 161 | clock_id: u32, 162 | rate_hz: u32, 163 | } 164 | 165 | #[allow(dead_code)] 166 | pub fn set_clock_rate(clock_id: u32, rate_hz: u32, skip_setting_turbo: u32) { 167 | let tags = Tag:: { 168 | tag_id0: TagId::SetClockRate, 169 | tag_buffer_size0: 12, 170 | tag_code0: 0, 171 | body: SetClockRateRequest { clock_id, rate_hz, skip_setting_turbo }, 172 | end_tag: 0, 173 | }; 174 | let _: SetClockRateResponse = request(0, &tags); 175 | } 176 | 177 | #[repr(C)] 178 | #[derive(Debug, Clone, Copy)] 179 | struct EmptyRequest {} 180 | 181 | #[repr(C)] 182 | #[derive(Debug, Clone, Copy)] 183 | struct MemoryResponse { 184 | base_addr: u32, 185 | size: u32, 186 | } 187 | 188 | #[repr(C)] 189 | #[derive(Debug, Clone, Copy)] 190 | #[allow(dead_code)] 191 | pub struct MemoryInfo { 192 | pub start: u32, 193 | pub size: u32, 194 | pub end: u32, 195 | } 196 | 197 | pub fn get_arm_memory() -> PhysRange { 198 | let tags = Tag:: { 199 | tag_id0: TagId::GetArmMemory, 200 | tag_buffer_size0: 12, 201 | tag_code0: 0, 202 | body: EmptyRequest {}, 203 | end_tag: 0, 204 | }; 205 | let res: MemoryResponse = request(0, &tags); 206 | let start = res.base_addr; 207 | let size = res.size; 208 | let end = start + size; 209 | 210 | PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64)) 211 | } 212 | 213 | pub fn get_vc_memory() -> PhysRange { 214 | let tags = Tag:: { 215 | tag_id0: TagId::GetVcMemory, 216 | tag_buffer_size0: 12, 217 | tag_code0: 0, 218 | body: EmptyRequest {}, 219 | end_tag: 0, 220 | }; 221 | let res: MemoryResponse = request(0, &tags); 222 | let start = res.base_addr; 223 | let size = res.size; 224 | let end = start + size; 225 | 226 | PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64)) 227 | } 228 | 229 | pub fn get_firmware_revision() -> u32 { 230 | let tags = Tag:: { 231 | tag_id0: TagId::GetFirmwareRevision, 232 | tag_buffer_size0: 4, 233 | tag_code0: 0, 234 | body: EmptyRequest {}, 235 | end_tag: 0, 236 | }; 237 | request::<_, u32>(0, &tags) 238 | } 239 | 240 | pub fn get_board_model() -> u32 { 241 | let tags = Tag:: { 242 | tag_id0: TagId::GetBoardModel, 243 | tag_buffer_size0: 4, 244 | tag_code0: 0, 245 | body: EmptyRequest {}, 246 | end_tag: 0, 247 | }; 248 | request::<_, u32>(0, &tags) 249 | } 250 | 251 | pub fn get_board_revision() -> u32 { 252 | let tags = Tag:: { 253 | tag_id0: TagId::GetBoardRevision, 254 | tag_buffer_size0: 4, 255 | tag_code0: 0, 256 | body: EmptyRequest {}, 257 | end_tag: 0, 258 | }; 259 | request::<_, u32>(0, &tags) 260 | } 261 | 262 | #[repr(C)] 263 | #[derive(Debug, Clone, Copy)] 264 | pub struct MacAddress { 265 | pub a: u8, 266 | pub b: u8, 267 | pub c: u8, 268 | pub d: u8, 269 | pub e: u8, 270 | pub f: u8, 271 | } 272 | 273 | pub fn get_board_macaddr() -> MacAddress { 274 | let tags = Tag:: { 275 | tag_id0: TagId::GetBoardMacAddress, 276 | tag_buffer_size0: 6, 277 | tag_code0: 0, 278 | body: EmptyRequest {}, 279 | end_tag: 0, 280 | }; 281 | request::<_, MacAddress>(0, &tags) 282 | } 283 | 284 | pub fn get_board_serial() -> u64 { 285 | let tags = Tag:: { 286 | tag_id0: TagId::GetBoardSerial, 287 | tag_buffer_size0: 8, 288 | tag_code0: 0, 289 | body: EmptyRequest {}, 290 | end_tag: 0, 291 | }; 292 | // FIXME: Treating this a `u64` gets us a memory address. Pointer fun ahead. 293 | // Wrapping in a struct holding a single u64 doesn't work either. 294 | let res: [u32; 2] = request(0, &tags); 295 | ((res[0] as u64) << 32) | res[1] as u64 296 | } 297 | -------------------------------------------------------------------------------- /aarch64/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::upper_case_acronyms)] 2 | #![allow(internal_features)] 3 | #![cfg_attr(not(any(test)), no_std)] 4 | #![cfg_attr(not(test), no_main)] 5 | #![feature(alloc_error_handler)] 6 | #![feature(core_intrinsics)] 7 | #![feature(strict_provenance)] 8 | #![feature(sync_unsafe_cell)] 9 | #![forbid(unsafe_op_in_unsafe_fn)] 10 | 11 | mod devcons; 12 | mod io; 13 | mod kmem; 14 | mod mailbox; 15 | mod pagealloc; 16 | mod param; 17 | mod registers; 18 | mod trap; 19 | mod uartmini; 20 | mod uartpl011; 21 | mod vm; 22 | 23 | use crate::kmem::from_virt_to_physaddr; 24 | use crate::vm::kernel_root; 25 | use core::ffi::c_void; 26 | use core::ptr; 27 | use port::fdt::DeviceTree; 28 | use port::mem::PhysRange; 29 | use port::println; 30 | use vm::PageTable; 31 | 32 | #[cfg(not(test))] 33 | core::arch::global_asm!(include_str!("l.S")); 34 | 35 | static mut KPGTBL: PageTable = PageTable::empty(); 36 | 37 | unsafe fn print_memory_range(name: &str, start: &*const c_void, end: &*const c_void) { 38 | let start = start as *const _ as u64; 39 | let end = end as *const _ as u64; 40 | let size = end - start; 41 | println!(" {name}{start:#x}..{end:#x} ({size:#x})"); 42 | } 43 | 44 | fn print_binary_sections() { 45 | extern "C" { 46 | static boottext: *const c_void; 47 | static eboottext: *const c_void; 48 | static text: *const c_void; 49 | static etext: *const c_void; 50 | static rodata: *const c_void; 51 | static erodata: *const c_void; 52 | static data: *const c_void; 53 | static edata: *const c_void; 54 | static bss: *const c_void; 55 | static end: *const c_void; 56 | } 57 | 58 | println!("Binary sections:"); 59 | unsafe { 60 | print_memory_range("boottext:\t", &boottext, &eboottext); 61 | print_memory_range("text:\t\t", &text, &etext); 62 | print_memory_range("rodata:\t", &rodata, &erodata); 63 | print_memory_range("data:\t\t", &data, &edata); 64 | print_memory_range("bss:\t\t", &bss, &end); 65 | print_memory_range("total:\t", &boottext, &end); 66 | } 67 | } 68 | 69 | fn print_physical_memory_info() { 70 | println!("Physical memory map:"); 71 | let arm_mem = mailbox::get_arm_memory(); 72 | println!(" Memory:\t{arm_mem} ({:#x})", arm_mem.size()); 73 | let vc_mem = mailbox::get_vc_memory(); 74 | println!(" Video:\t{vc_mem} ({:#x})", vc_mem.size()); 75 | } 76 | 77 | fn print_memory_info() { 78 | println!("Memory usage:"); 79 | let (used, total) = pagealloc::usage_bytes(); 80 | println!(" Used:\t\t{used:#016x}"); 81 | println!(" Total:\t{total:#016x}"); 82 | } 83 | 84 | // https://github.com/raspberrypi/documentation/blob/develop/documentation/asciidoc/computers/raspberry-pi/revision-codes.adoc 85 | fn print_pi_name(board_revision: u32) { 86 | let name = match board_revision { 87 | 0xa21041 => "Raspberry Pi 2B", 88 | 0xa02082 => "Raspberry Pi 3B", 89 | 0xb03115 => "Raspberry Pi 4B", 90 | 0xa220a0 => "Raspberry Compute Module 3", 91 | _ => "Unrecognised", 92 | }; 93 | println!(" Board Name:\t{name}"); 94 | } 95 | 96 | fn print_board_info() { 97 | println!("Board information:"); 98 | let board_revision = mailbox::get_board_revision(); 99 | print_pi_name(board_revision); 100 | println!(" Board Rev:\t{board_revision:#010x}"); 101 | let model = mailbox::get_board_model(); 102 | println!(" Board Model:\t{model:#010x}"); 103 | let serial = mailbox::get_board_serial(); 104 | println!(" Serial Num:\t{serial:#010x}"); 105 | let mailbox::MacAddress { a, b, c, d, e, f } = mailbox::get_board_macaddr(); 106 | println!(" MAC Address:\t{a:02x}:{b:02x}:{c:02x}:{d:02x}:{e:02x}:{f:02x}"); 107 | let fw_revision = mailbox::get_firmware_revision(); 108 | println!(" Firmware Rev:\t{fw_revision:#010x}"); 109 | } 110 | 111 | /// dtb_va is the virtual address of the DTB structure. The physical address is 112 | /// assumed to be dtb_va-KZERO. 113 | #[no_mangle] 114 | pub extern "C" fn main9(dtb_va: usize) { 115 | trap::init(); 116 | 117 | // Parse the DTB before we set up memory so we can correctly map it 118 | let dt = unsafe { DeviceTree::from_usize(dtb_va).unwrap() }; 119 | 120 | // Set up uart so we can log as early as possible 121 | mailbox::init(&dt); 122 | devcons::init(&dt); 123 | 124 | println!(); 125 | println!("r9 from the Internet"); 126 | println!("DTB found at: {:#x}", dtb_va); 127 | println!("midr_el1: {:?}", registers::MidrEl1::read()); 128 | 129 | print_binary_sections(); 130 | print_physical_memory_info(); 131 | print_board_info(); 132 | 133 | // Map address space accurately using rust VM code to manage page tables 134 | unsafe { 135 | let dtb_range = PhysRange::with_len(from_virt_to_physaddr(dtb_va).addr(), dt.size()); 136 | vm::init(&mut *ptr::addr_of_mut!(KPGTBL), dtb_range, mailbox::get_arm_memory()); 137 | vm::switch(&*ptr::addr_of!(KPGTBL)); 138 | } 139 | 140 | // From this point we can use the global allocator 141 | 142 | print_memory_info(); 143 | 144 | kernel_root().print_recursive_tables(); 145 | 146 | println!("looping now"); 147 | 148 | #[allow(clippy::empty_loop)] 149 | loop {} 150 | } 151 | mod runtime; 152 | -------------------------------------------------------------------------------- /aarch64/src/pagealloc.rs: -------------------------------------------------------------------------------- 1 | /// This module acts as an interface between the portable allocator and the 2 | /// arch-specific use of it. 3 | /// 4 | /// The page allocator is constructed and finalised in a number of phases: 5 | /// 1. `init_page_allocator` to create a fixed size allocator assuming everything 6 | /// is in use except a small number of statically defined pages available for 7 | /// setting up the initial page tables. 8 | /// 2. `free_unused_ranges` to mark available ranges as the inverse of the 9 | /// physical memory map within the bounds of the available memory. 10 | use crate::kmem; 11 | use crate::kmem::physaddr_as_ptr_mut; 12 | use crate::vm::Page4K; 13 | use port::bitmapalloc::BitmapPageAlloc; 14 | use port::bitmapalloc::BitmapPageAllocError; 15 | use port::mem::PhysRange; 16 | use port::{ 17 | mcslock::{Lock, LockNode}, 18 | mem::PAGE_SIZE_4K, 19 | }; 20 | 21 | /// Set up bitmap page allocator assuming everything is allocated. 22 | static PAGE_ALLOC: Lock> = Lock::new( 23 | "page_alloc", 24 | const { BitmapPageAlloc::<32, PAGE_SIZE_4K>::new_all_allocated(PAGE_SIZE_4K) }, 25 | ); 26 | 27 | /// The bitmap allocator has all pages marked as allocated initially. We'll 28 | /// add some pages (mark free) to allow us to set up the page tables and build 29 | /// a memory map. Once the memory map has been build, we can mark all the unused 30 | /// space as available. This allows us to use only one page allocator throughout. 31 | pub fn init_page_allocator() { 32 | let node = LockNode::new(); 33 | let mut lock = PAGE_ALLOC.lock(&node); 34 | let page_alloc = &mut *lock; 35 | 36 | let early_pages_range = kmem::early_pages_range(); 37 | if let Err(err) = page_alloc.mark_free(&early_pages_range) { 38 | panic!("Couldn't mark early pages free: range: {} err: {:?}", early_pages_range, err); 39 | } 40 | } 41 | 42 | /// Free unused pages in mem that aren't covered by the memory map. Assumes 43 | /// that custom_map is sorted. 44 | pub fn free_unused_ranges<'a>( 45 | available_mem: &PhysRange, 46 | used_ranges: impl Iterator, 47 | ) -> Result<(), BitmapPageAllocError> { 48 | let node = LockNode::new(); 49 | let mut lock = PAGE_ALLOC.lock(&node); 50 | let page_alloc = &mut *lock; 51 | 52 | page_alloc.free_unused_ranges(available_mem, used_ranges) 53 | } 54 | 55 | /// Try to allocate a page 56 | pub fn allocate() -> Result<&'static mut Page4K, BitmapPageAllocError> { 57 | let node = LockNode::new(); 58 | let mut lock = PAGE_ALLOC.lock(&node); 59 | let page_alloc = &mut *lock; 60 | 61 | match page_alloc.allocate() { 62 | Ok(page_pa) => Ok(unsafe { &mut *physaddr_as_ptr_mut::(page_pa) }), 63 | Err(err) => Err(err), 64 | } 65 | } 66 | 67 | /// Return a tuple of (bytes used, total bytes available) based on the page allocator. 68 | pub fn usage_bytes() -> (usize, usize) { 69 | let node = LockNode::new(); 70 | let mut lock = PAGE_ALLOC.lock(&node); 71 | let page_alloc = &mut *lock; 72 | page_alloc.usage_bytes() 73 | } 74 | -------------------------------------------------------------------------------- /aarch64/src/param.rs: -------------------------------------------------------------------------------- 1 | // This needs to match KZERO in l.S 2 | pub const KZERO: usize = 0xffff_8000_0000_0000; 3 | -------------------------------------------------------------------------------- /aarch64/src/registers.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_upper_case_globals)] 2 | 3 | use bitstruct::bitstruct; 4 | use core::fmt; 5 | use num_enum::TryFromPrimitive; 6 | use port::mem::{PhysRange, PAGE_SIZE_2M}; 7 | 8 | // GPIO registers 9 | pub const GPFSEL1: usize = 0x04; // GPIO function select register 1 10 | pub const GPPUD: usize = 0x94; // GPIO pin pull up/down enable 11 | pub const GPPUDCLK0: usize = 0x98; // GPIO pin pull up/down enable clock 0 12 | 13 | // UART 0 (PL011) registers 14 | pub const UART0_DR: usize = 0x00; // Data register 15 | pub const UART0_FR: usize = 0x18; // Flag register 16 | pub const UART0_IBRD: usize = 0x24; // Integer baud rate divisor 17 | pub const UART0_FBRD: usize = 0x28; // Fractional baud rate divisor 18 | pub const UART0_LCRH: usize = 0x2c; // Line control register 19 | pub const UART0_CR: usize = 0x30; // Control register 20 | pub const UART0_IMSC: usize = 0x38; // Interrupt mask set clear register 21 | pub const UART0_ICR: usize = 0x44; // Interrupt clear register 22 | 23 | // AUX registers, offset from aux_reg 24 | pub const AUX_ENABLE: usize = 0x04; // AUX enable register (Mini Uart, SPIs) 25 | 26 | // UART1 registers, offset from miniuart_reg 27 | pub const AUX_MU_IO: usize = 0x00; // AUX IO data register 28 | pub const AUX_MU_IER: usize = 0x04; // Mini Uart interrupt enable register 29 | pub const AUX_MU_IIR: usize = 0x08; // Mini Uart interrupt identify register 30 | pub const AUX_MU_LCR: usize = 0x0c; // Mini Uart line control register 31 | pub const AUX_MU_MCR: usize = 0x10; // Mini Uart line control register 32 | pub const AUX_MU_LSR: usize = 0x14; // Mini Uart line status register 33 | pub const AUX_MU_CNTL: usize = 0x20; // Mini Uart control register 34 | pub const AUX_MU_BAUD: usize = 0x28; // Mini Uart baudrate register 35 | 36 | bitstruct! { 37 | #[derive(Copy, Clone)] 38 | pub struct MidrEl1(pub u64) { 39 | revision: u8 = 0..4; 40 | partnum: u16 = 4..16; 41 | architecture: u8 = 16..20; 42 | variant: u8 = 20..24; 43 | implementer: u16 = 24..32; 44 | } 45 | } 46 | 47 | impl MidrEl1 { 48 | pub fn read() -> Self { 49 | #[cfg(not(test))] 50 | { 51 | let mut value: u64; 52 | unsafe { 53 | core::arch::asm!("mrs {value}, midr_el1", value = out(reg) value); 54 | } 55 | Self(value) 56 | } 57 | #[cfg(test)] 58 | Self(0) 59 | } 60 | 61 | pub fn partnum_enum(&self) -> Result { 62 | PartNum::try_from(self.partnum()).map_err(|e| e.number) 63 | } 64 | } 65 | 66 | impl fmt::Debug for MidrEl1 { 67 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 68 | f.debug_struct("MidrEl1") 69 | .field("revision", &format_args!("{:#x}", self.revision())) 70 | .field( 71 | "partnum", 72 | &format_args!("{:?}", self.partnum_enum().unwrap_or(PartNum::Unknown)), 73 | ) 74 | .field("architecture", &format_args!("{:#x}", self.architecture())) 75 | .field("variant", &format_args!("{:#x}", self.variant())) 76 | .field("implementer", &format_args!("{:#x}", self.implementer())) 77 | .finish() 78 | } 79 | } 80 | 81 | /// Known IDs for midr_el1's partnum 82 | #[derive(Debug, Eq, PartialEq, TryFromPrimitive)] 83 | #[repr(u16)] 84 | pub enum PartNum { 85 | Unknown = 0, 86 | RaspberryPi1 = 0xb76, 87 | RaspberryPi2 = 0xc07, 88 | RaspberryPi3 = 0xd03, 89 | RaspberryPi4 = 0xd08, 90 | } 91 | 92 | impl PartNum { 93 | /// Return the physical MMIO base range for the Raspberry Pi MMIO 94 | pub fn mmio(&self) -> Option { 95 | let len = 2 * PAGE_SIZE_2M; 96 | match self { 97 | Self::RaspberryPi1 => Some(PhysRange::with_len(0x20000000, len)), 98 | Self::RaspberryPi2 | Self::RaspberryPi3 => Some(PhysRange::with_len(0x3f000000, len)), 99 | Self::RaspberryPi4 => Some(PhysRange::with_len(0xfe000000, len)), 100 | Self::Unknown => None, 101 | } 102 | } 103 | } 104 | 105 | pub fn rpi_mmio() -> Option { 106 | MidrEl1::read().partnum_enum().ok().and_then(|p| p.mmio()) 107 | } 108 | 109 | bitstruct! { 110 | #[derive(Copy, Clone)] 111 | pub struct EsrEl1(pub u64) { 112 | iss: u32 = 0..25; 113 | il: bool = 25; 114 | ec: u8 = 26..32; 115 | iss2: u8 = 32..37; 116 | } 117 | } 118 | 119 | impl EsrEl1 { 120 | /// Try to convert the error into an ExceptionClass enum, or return the original number 121 | /// as the error. 122 | pub fn exception_class_enum(&self) -> Result { 123 | ExceptionClass::try_from(self.ec()).map_err(|e| e.number) 124 | } 125 | } 126 | 127 | impl fmt::Debug for EsrEl1 { 128 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 129 | f.debug_struct("EsrEl1") 130 | .field("iss", &format_args!("{:#010x}", self.iss())) 131 | .field("il", &format_args!("{}", self.il())) 132 | .field("ec", &format_args!("{:?}", self.exception_class_enum())) 133 | .field("iss2", &format_args!("{:#04x}", self.iss2())) 134 | .finish() 135 | } 136 | } 137 | 138 | /// Exception class maps to ESR_EL1 EC bits[31:26]. We skip aarch32 exceptions. 139 | #[derive(Debug, Eq, PartialEq, TryFromPrimitive)] 140 | #[repr(u8)] 141 | pub enum ExceptionClass { 142 | Unknown = 0, 143 | WaitFor = 1, 144 | FloatSimd = 7, 145 | Ls64 = 10, 146 | BranchTargetException = 13, 147 | IllegalExecutionState = 14, 148 | MsrMrsSystem = 24, 149 | Sve = 25, 150 | Tstart = 27, 151 | PointerAuthFailure = 28, 152 | Sme = 29, 153 | GranuleProtectionCheck = 30, 154 | InstructionAbortLowerEl = 32, 155 | InstructionAbortSameEl = 33, 156 | PcAlignmentFault = 34, 157 | DataAbortLowerEl = 36, 158 | DataAbortSameEl = 37, 159 | SpAlignmentFault = 38, 160 | MemoryOperationException = 39, 161 | TrappedFloatingPointException = 44, 162 | SError = 47, 163 | BreakpointLowerEl = 48, 164 | BreakpointSameEl = 49, 165 | SoftwareStepLowerEl = 50, 166 | SoftwareStepSameEl = 51, 167 | WatchpointLowerEl = 52, 168 | WatchpointSameEl = 53, 169 | Brk = 60, 170 | } 171 | 172 | bitstruct! { 173 | #[derive(Copy, Clone)] 174 | pub struct EsrEl1IssInstructionAbort(pub u32) { 175 | ifsc: u8 = 0..6; 176 | s1ptw: bool = 7; 177 | ea: bool = 9; 178 | fnv: bool = 10; 179 | set: u8 = 11..13; 180 | } 181 | } 182 | 183 | #[allow(dead_code)] 184 | impl EsrEl1IssInstructionAbort { 185 | pub fn from_esr_el1(r: EsrEl1) -> Option { 186 | r.exception_class_enum() 187 | .ok() 188 | .filter(|ec| *ec == ExceptionClass::InstructionAbortSameEl) 189 | .map(|_| EsrEl1IssInstructionAbort(r.iss())) 190 | } 191 | 192 | pub fn instruction_fault(&self) -> Result { 193 | InstructionFaultStatusCode::try_from(self.ifsc()).map_err(|e| e.number) 194 | } 195 | } 196 | 197 | #[derive(Debug, Eq, PartialEq, TryFromPrimitive)] 198 | #[repr(u8)] 199 | pub enum InstructionFaultStatusCode { 200 | AddressSizeFaultLevel0 = 0, 201 | AddressSizeFaultLevel1 = 1, 202 | AddressSizeFaultLevel2 = 2, 203 | AddressSizeFaultLevel3 = 3, 204 | TranslationFaultLevel0 = 4, 205 | TranslationFaultLevel1 = 5, 206 | TranslationFaultLevel2 = 6, 207 | TranslationFaultLevel3 = 7, 208 | AccessFlagFaultLevel0 = 8, 209 | AccessFlagFaultLevel1 = 9, 210 | AccessFlagFaultLevel2 = 10, 211 | AccessFlagFaultLevel3 = 11, 212 | PermissionFaultLevel0 = 12, 213 | PermissionFaultLevel1 = 13, 214 | PermissionFaultLevel2 = 14, 215 | PermissionFaultLevel3 = 15, 216 | SyncExtAbortNotOnWalkOrUpdate = 16, 217 | SyncExtAbortOnWalkOrUpdateLevelNeg1 = 19, 218 | SyncExtAbortOnWalkOrUpdateLevel0 = 20, 219 | SyncExtAbortOnWalkOrUpdateLevel1 = 21, 220 | SyncExtAbortOnWalkOrUpdateLevel2 = 22, 221 | SyncExtAbortOnWalkOrUpdateLevel3 = 23, 222 | SyncParityOrEccErrOnMemAccessNotOnWalk = 24, 223 | SyncParityOrEccErrOnMemAccessOnWalkOrUpdateLevelNeg1 = 27, 224 | SyncParityOrEccErrOnMemAccessOnWalkOrUpdateLevel0 = 28, 225 | SyncParityOrEccErrOnMemAccessOnWalkOrUpdateLevel1 = 29, 226 | SyncParityOrEccErrOnMemAccessOnWalkOrUpdateLevel2 = 30, 227 | SyncParityOrEccErrOnMemAccessOnWalkOrUpdateLevel3 = 31, 228 | GranuleProtectFaultOnWalkOrUpdateLevelNeg1 = 35, 229 | GranuleProtectFaultOnWalkOrUpdateLevel0 = 36, 230 | GranuleProtectFaultOnWalkOrUpdateLevel1 = 37, 231 | GranuleProtectFaultOnWalkOrUpdateLevel2 = 38, 232 | GranuleProtectFaultOnWalkOrUpdateLevel3 = 39, 233 | GranuleProtectFaultNotOnWalkOrUpdateLevel = 40, 234 | AddressSizeFaultLevelNeg1 = 41, 235 | TranslationFaultLevelNeg1 = 43, 236 | TlbConflictAbort = 48, 237 | UnsupportedAtomicHardwareUpdateFault = 49, 238 | } 239 | 240 | #[cfg(test)] 241 | mod tests { 242 | use super::*; 243 | 244 | // This test is useful for making sense of early-stage exceptions. Qemu 245 | // will report an exception of the form below. Copy the ESR value into 246 | // this test to break it down. 247 | // 248 | // Exception return from AArch64 EL2 to AArch64 EL1 PC 0x8006c 249 | // Taking exception 3 [Prefetch Abort] on CPU 0 250 | // ...from EL1 to EL1 251 | // ...with ESR 0x21/0x86000004 252 | // ...with FAR 0x80090 253 | // ...with ELR 0x80090 254 | // ...to EL1 PC 0x200 PSTATE 0x3c5 255 | #[test] 256 | fn test_parse_esr_el1() { 257 | let r = EsrEl1(0x86000004); 258 | assert_eq!(r.exception_class_enum().unwrap(), ExceptionClass::InstructionAbortSameEl); 259 | assert_eq!( 260 | EsrEl1IssInstructionAbort::from_esr_el1(r).unwrap().instruction_fault().unwrap(), 261 | InstructionFaultStatusCode::TranslationFaultLevel0 262 | ); 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /aarch64/src/runtime.rs: -------------------------------------------------------------------------------- 1 | #![cfg(not(test))] 2 | 3 | extern crate alloc; 4 | 5 | use crate::kmem::physaddr_as_virt; 6 | use crate::registers::rpi_mmio; 7 | use crate::uartmini::MiniUart; 8 | use alloc::alloc::Layout; 9 | use core::fmt::Write; 10 | use core::panic::PanicInfo; 11 | use port::devcons::PanicConsole; 12 | use port::mem::VirtRange; 13 | 14 | // TODO 15 | // - Add qemu integration test 16 | // - Use Console via println!() macro once available 17 | // - Add support for raspi4 18 | #[panic_handler] 19 | pub fn panic(info: &PanicInfo) -> ! { 20 | let mmio = physaddr_as_virt(rpi_mmio().expect("mmio base detect failed").start()); 21 | 22 | let gpio_range = VirtRange::with_len(mmio + 0x200000, 0xb4); 23 | let aux_range = VirtRange::with_len(mmio + 0x215000, 0x8); 24 | let miniuart_range = VirtRange::with_len(mmio + 0x215040, 0x40); 25 | 26 | let uart = MiniUart { gpio_range, aux_range, miniuart_range }; 27 | //uart.init(); 28 | 29 | PanicConsole::new(uart).write_fmt(format_args!("{}\n", info)).unwrap(); 30 | 31 | // TODO Once the Console is available, we should use this 32 | // println!("{}", info); 33 | 34 | #[allow(clippy::empty_loop)] 35 | loop {} 36 | } 37 | 38 | #[alloc_error_handler] 39 | fn oom(_layout: Layout) -> ! { 40 | panic!("oom"); 41 | } 42 | -------------------------------------------------------------------------------- /aarch64/src/trap.S: -------------------------------------------------------------------------------- 1 | .section .text 2 | 3 | .equ SYNC_INVALID_EL1t, 0 4 | .equ IRQ_INVALID_EL1t, 1 5 | .equ FIQ_INVALID_EL1t, 2 6 | .equ ERROR_INVALID_EL1t, 3 7 | 8 | .equ SYNC_INVALID_EL1h, 4 9 | .equ IRQ_INVALID_EL1h, 5 10 | .equ FIQ_INVALID_EL1h, 6 11 | .equ ERROR_INVALID_EL1h, 7 12 | 13 | .equ SYNC_INVALID_EL0_64, 8 14 | .equ IRQ_INVALID_EL0_64, 9 15 | .equ FIQ_INVALID_EL0_64, 10 16 | .equ ERROR_INVALID_EL0_64, 11 17 | 18 | .equ SYNC_INVALID_EL0_32, 12 19 | .equ IRQ_INVALID_EL0_32, 13 20 | .equ FIQ_INVALID_EL0_32, 14 21 | .equ ERROR_INVALID_EL0_32, 15 22 | 23 | // Ventry aligns to 128 bytes and branches to label, and is used for the interrupt vector table 24 | .macro ventry label 25 | .balign 128 26 | b \label 27 | .endm 28 | 29 | // We save all general purpose registers, and a few useful system registers: 30 | // - x0-28 31 | // - x29 (Frame pointer) 32 | // - x30 (Link register) 33 | // We additionally save the registers below, so that they're availalble via the TrapFrame 34 | // - ESR_EL1 (Exception syndrome register EL1) 35 | // - ELR_EL1 (Exception link register EL1) 36 | // - FAR_EL1 (Fault address register EL1) 37 | .macro handle_interrupt type 38 | sub sp, sp, #288 39 | 40 | // Caller-saved registers, FP 41 | stp x0, x1, [sp, #16 * 0] 42 | stp x2, x3, [sp, #16 * 1] 43 | stp x4, x5, [sp, #16 * 2] 44 | stp x6, x7, [sp, #16 * 3] 45 | stp x8, x9, [sp, #16 * 4] 46 | stp x10, x11, [sp, #16 * 5] 47 | stp x12, x13, [sp, #16 * 6] 48 | stp x14, x15, [sp, #16 * 7] 49 | stp x16, x17, [sp, #16 * 8] 50 | stp x18, x19, [sp, #16 * 9] 51 | stp x20, x21, [sp, #16 * 10] 52 | stp x22, x23, [sp, #16 * 11] 53 | stp x24, x25, [sp, #16 * 12] 54 | stp x26, x27, [sp, #16 * 13] 55 | stp x28, x29, [sp, #16 * 14] 56 | 57 | // LR, ESR_EL1 58 | mrs x0, esr_el1 59 | stp x30, x0, [sp, #16 * 15] 60 | 61 | // ELR_EL1, FAR_EL1 62 | mrs x1, elr_el1 63 | mrs x2, far_el1 64 | stp x1, x2, [sp, #16 * 16] 65 | 66 | // Interrupt type 67 | ldr x3, =\type 68 | str x3, [sp, #16 * 17] 69 | 70 | // Pass pointer to TrapFrame (on stack) as the first arg 71 | mov x0, sp 72 | bl trap_unsafe 73 | 74 | // Restore caller-saved registers 75 | ldp x0, x1, [sp, #16 * 0] 76 | ldp x2, x3, [sp, #16 * 1] 77 | ldp x4, x5, [sp, #16 * 2] 78 | ldp x6, x7, [sp, #16 * 3] 79 | ldp x8, x9, [sp, #16 * 4] 80 | ldp x10, x11, [sp, #16 * 5] 81 | ldp x12, x13, [sp, #16 * 6] 82 | ldp x14, x15, [sp, #16 * 7] 83 | ldp x16, x17, [sp, #16 * 8] 84 | ldp x18, x19, [sp, #16 * 9] 85 | ldp x20, x21, [sp, #16 * 10] 86 | ldp x22, x23, [sp, #16 * 11] 87 | ldp x24, x25, [sp, #16 * 12] 88 | ldp x26, x27, [sp, #16 * 13] 89 | ldp x28, x29, [sp, #16 * 14] 90 | ldr x30, [sp, #16 * 15] 91 | 92 | // Remaining stack frame consists of systems registers we can just ignore 93 | add sp, sp, #288 94 | 95 | eret 96 | .endm 97 | 98 | /// The exception vector table for exceptions taken to EL1. 99 | /// Each entry is 16 instructions/128 bytes. 100 | /// Ventry handles alignment of individual entries. 101 | .balign 2048 102 | .globl exception_vectors 103 | exception_vectors: 104 | // Current EL with SP0 105 | ventry sync_invalid_el1t // Synchronous EL1t 106 | ventry irq_invalid_el1t // IRQ EL1t 107 | ventry fiq_invalid_el1t // FIQ EL1t 108 | ventry error_invalid_el1t // Error EL1t 109 | 110 | // Current EL with SPx 111 | ventry sync_invalid_el1h // Synchronous EL1h 112 | ventry irq_invalid_el1h // IRQ EL1h 113 | ventry fiq_invalid_el1h // FIQ EL1h 114 | ventry error_invalid_el1h // Error EL1h 115 | 116 | // Lower EL using AArch64 117 | ventry sync_invalid_el0_64 // Synchronous 64-bit EL0 118 | ventry irq_invalid_el0_64 // IRQ 64-bit EL0 119 | ventry fiq_invalid_el0_64 // FIQ 64-bit EL0 120 | ventry error_invalid_el0_64 // Error 64-bit EL0 121 | 122 | // Lower EL using AArch32 123 | ventry sync_invalid_el0_32 // Synchronous 32-bit EL0 124 | ventry irq_invalid_el0_32 // IRQ 32-bit EL0 125 | ventry fiq_invalid_el0_32 // FIQ 32-bit EL0 126 | ventry error_invalid_el0_32 // Error 32-bit EL0 127 | 128 | sync_invalid_el1t: 129 | handle_interrupt SYNC_INVALID_EL1t 130 | 131 | irq_invalid_el1t: 132 | handle_interrupt IRQ_INVALID_EL1t 133 | 134 | fiq_invalid_el1t: 135 | handle_interrupt FIQ_INVALID_EL1t 136 | 137 | error_invalid_el1t: 138 | handle_interrupt ERROR_INVALID_EL1t 139 | 140 | sync_invalid_el1h: 141 | handle_interrupt SYNC_INVALID_EL1h 142 | 143 | irq_invalid_el1h: 144 | handle_interrupt IRQ_INVALID_EL1h 145 | 146 | fiq_invalid_el1h: 147 | handle_interrupt FIQ_INVALID_EL1h 148 | 149 | error_invalid_el1h: 150 | handle_interrupt ERROR_INVALID_EL1h 151 | 152 | sync_invalid_el0_64: 153 | handle_interrupt SYNC_INVALID_EL0_64 154 | 155 | irq_invalid_el0_64: 156 | handle_interrupt IRQ_INVALID_EL0_64 157 | 158 | fiq_invalid_el0_64: 159 | handle_interrupt FIQ_INVALID_EL0_64 160 | 161 | error_invalid_el0_64: 162 | handle_interrupt ERROR_INVALID_EL0_64 163 | 164 | sync_invalid_el0_32: 165 | handle_interrupt SYNC_INVALID_EL0_32 166 | 167 | irq_invalid_el0_32: 168 | handle_interrupt IRQ_INVALID_EL0_32 169 | 170 | fiq_invalid_el0_32: 171 | handle_interrupt FIQ_INVALID_EL0_32 172 | 173 | error_invalid_el0_32: 174 | handle_interrupt ERROR_INVALID_EL0_32 175 | -------------------------------------------------------------------------------- /aarch64/src/trap.rs: -------------------------------------------------------------------------------- 1 | use crate::registers::EsrEl1; 2 | use port::println; 3 | 4 | #[cfg(not(test))] 5 | core::arch::global_asm!(include_str!("trap.S")); 6 | 7 | pub fn init() { 8 | #[cfg(not(test))] 9 | unsafe { 10 | // Set up a vector table for any exception that is taken to EL1, then enable IRQ 11 | core::arch::asm!( 12 | "adr {tmp}, exception_vectors", 13 | "msr vbar_el1, {tmp}", 14 | "msr DAIFClr, #2", 15 | tmp = out(reg) _, 16 | ); 17 | } 18 | } 19 | 20 | /// Register frame at time interrupt was taken 21 | #[derive(Copy, Clone, Debug)] 22 | #[repr(C)] 23 | pub struct TrapFrame { 24 | x0: u64, 25 | x1: u64, 26 | x2: u64, 27 | x3: u64, 28 | x4: u64, 29 | x5: u64, 30 | x6: u64, 31 | x7: u64, 32 | x8: u64, 33 | x9: u64, 34 | x10: u64, 35 | x11: u64, 36 | x12: u64, 37 | x13: u64, 38 | x14: u64, 39 | x15: u64, 40 | x16: u64, 41 | x17: u64, 42 | x18: u64, 43 | x19: u64, 44 | x20: u64, 45 | x21: u64, 46 | x22: u64, 47 | x23: u64, 48 | x24: u64, 49 | x25: u64, 50 | x26: u64, 51 | x27: u64, 52 | x28: u64, 53 | frame_pointer: u64, // x29 54 | link_register: u64, // x30 55 | esr_el1: EsrEl1, 56 | elr_el1: u64, 57 | far_el1: u64, 58 | interrupt_type: u64, 59 | } 60 | 61 | #[no_mangle] 62 | pub extern "C" fn trap_unsafe(frame: *mut TrapFrame) { 63 | unsafe { trap(&mut *frame) } 64 | } 65 | 66 | fn trap(frame: &mut TrapFrame) { 67 | // Just print out the frame and loop for now 68 | // TODO Make it a little prettier and more space efficient 69 | println!("{:#x?}", frame); 70 | loop { 71 | core::hint::spin_loop(); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /aarch64/src/uartmini.rs: -------------------------------------------------------------------------------- 1 | use port::devcons::Uart; 2 | use port::fdt::DeviceTree; 3 | use port::mem::VirtRange; 4 | 5 | use crate::io::{delay, read_reg, write_or_reg, write_reg}; 6 | use crate::registers::{ 7 | AUX_ENABLE, AUX_MU_BAUD, AUX_MU_CNTL, AUX_MU_IER, AUX_MU_IIR, AUX_MU_IO, AUX_MU_LCR, 8 | AUX_MU_LSR, AUX_MU_MCR, GPFSEL1, GPPUD, GPPUDCLK0, 9 | }; 10 | 11 | /// MiniUart is assigned to UART1 on the Raspberry Pi. It is easier to use with 12 | /// real hardware, as it requires no additional configuration. Conversely, it's 13 | /// harded to use with QEMU, as it can't be used with the `nographic` switch. 14 | pub struct MiniUart { 15 | pub gpio_range: VirtRange, 16 | pub aux_range: VirtRange, 17 | pub miniuart_range: VirtRange, 18 | } 19 | 20 | #[allow(dead_code)] 21 | impl MiniUart { 22 | pub fn new(dt: &DeviceTree, mmio_virt_offset: usize) -> MiniUart { 23 | // Bcm2835 and bcm2711 are essentially the same for our needs here. 24 | // If fdt.rs supported aliases well, we could try to just look up 'gpio'. 25 | let gpio_range = VirtRange::from( 26 | &dt.find_compatible("brcm,bcm2835-gpio") 27 | .next() 28 | .or_else(|| dt.find_compatible("brcm,bcm2711-gpio").next()) 29 | .and_then(|uart| dt.property_translated_reg_iter(uart).next()) 30 | .and_then(|reg| reg.regblock()) 31 | .unwrap() 32 | .with_offset(mmio_virt_offset as u64), 33 | ); 34 | 35 | // Find a compatible aux 36 | let aux_range = VirtRange::from( 37 | &dt.find_compatible("brcm,bcm2835-aux") 38 | .next() 39 | .and_then(|uart| dt.property_translated_reg_iter(uart).next()) 40 | .and_then(|reg| reg.regblock()) 41 | .unwrap() 42 | .with_offset(mmio_virt_offset as u64), 43 | ); 44 | 45 | // Find a compatible miniuart 46 | let miniuart_range = VirtRange::from( 47 | &dt.find_compatible("brcm,bcm2835-aux-uart") 48 | .next() 49 | .and_then(|uart| dt.property_translated_reg_iter(uart).next()) 50 | .and_then(|reg| reg.regblock()) 51 | .unwrap() 52 | .with_offset(mmio_virt_offset as u64), 53 | ); 54 | 55 | MiniUart { gpio_range, aux_range, miniuart_range } 56 | } 57 | 58 | pub fn init(&self) { 59 | // Set GPIO pins 14 and 15 to be used for UART1. This is done by 60 | // setting the appropriate flags in GPFSEL1 to ALT5, which is 61 | // represented by the 0b010 62 | let mut gpfsel1 = read_reg(&self.gpio_range, GPFSEL1); 63 | gpfsel1 &= !((7 << 12) | (7 << 15)); 64 | gpfsel1 |= (2 << 12) | (2 << 15); 65 | write_reg(&self.gpio_range, GPFSEL1, gpfsel1); 66 | 67 | write_reg(&self.gpio_range, GPPUD, 0); 68 | delay(150); 69 | write_reg(&self.gpio_range, GPPUDCLK0, (1 << 14) | (1 << 15)); 70 | delay(150); 71 | write_reg(&self.gpio_range, GPPUDCLK0, 0); 72 | 73 | // Enable mini uart - required to write to its registers 74 | write_or_reg(&self.aux_range, AUX_ENABLE, 1); 75 | write_reg(&self.miniuart_range, AUX_MU_CNTL, 0); 76 | // 8-bit 77 | write_reg(&self.miniuart_range, AUX_MU_LCR, 3); 78 | write_reg(&self.miniuart_range, AUX_MU_MCR, 0); 79 | // Disable interrupts 80 | write_reg(&self.miniuart_range, AUX_MU_IER, 0); 81 | // Clear receive/transmit FIFOs 82 | write_reg(&self.miniuart_range, AUX_MU_IIR, 0xc6); 83 | 84 | // We want 115200 baud. This is calculated as: 85 | // system_clock_freq / (8 * (baudrate_reg + 1)) 86 | // For now we're making assumptions about the clock frequency 87 | // TODO Get the clock freq via the mailbox, and update if it changes. 88 | // let arm_clock_rate = 500000000.0; 89 | // let baud_rate_reg = arm_clock_rate / (8.0 * 115200.0) + 1.0; 90 | //write_reg(self.miniuart_reg, AUX_MU_BAUD, baud_rate_reg as u32); 91 | write_reg(&self.miniuart_range, AUX_MU_BAUD, 270); 92 | 93 | // Finally enable transmit 94 | write_reg(&self.miniuart_range, AUX_MU_CNTL, 3); 95 | } 96 | } 97 | 98 | impl Uart for MiniUart { 99 | fn putb(&self, b: u8) { 100 | // Wait for UART to become ready to transmit 101 | while read_reg(&self.miniuart_range, AUX_MU_LSR) & (1 << 5) == 0 { 102 | core::hint::spin_loop(); 103 | } 104 | write_reg(&self.miniuart_range, AUX_MU_IO, b as u32); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /aarch64/src/uartpl011.rs: -------------------------------------------------------------------------------- 1 | use crate::io::{delay, read_reg, write_reg, GpioPull}; 2 | use crate::mailbox; 3 | use crate::registers::{ 4 | GPPUD, GPPUDCLK0, UART0_CR, UART0_DR, UART0_FBRD, UART0_FR, UART0_IBRD, UART0_ICR, UART0_IMSC, 5 | UART0_LCRH, 6 | }; 7 | use port::devcons::Uart; 8 | use port::fdt::DeviceTree; 9 | use port::mem::VirtRange; 10 | 11 | #[allow(dead_code)] 12 | pub struct Pl011Uart { 13 | gpio_range: VirtRange, 14 | pl011_range: VirtRange, 15 | } 16 | 17 | /// PL011 is the default in qemu (UART0), but a bit fiddly to use on a real 18 | /// Raspberry Pi board, as it needs additional configuration in the config 19 | /// and EEPROM (rpi4) to assign to the serial GPIO pins. 20 | #[allow(dead_code)] 21 | impl Pl011Uart { 22 | pub fn new(dt: &DeviceTree) -> Pl011Uart { 23 | // TODO use aliases? 24 | let gpio_range = VirtRange::from( 25 | &dt.find_compatible("brcm,bcm2835-gpio") 26 | .next() 27 | .and_then(|uart| dt.property_translated_reg_iter(uart).next()) 28 | .and_then(|reg| reg.regblock()) 29 | .unwrap(), 30 | ); 31 | 32 | // Find a compatible pl011 uart 33 | let pl011_range = VirtRange::from( 34 | &dt.find_compatible("arm,pl011") 35 | .next() 36 | .and_then(|uart| dt.property_translated_reg_iter(uart).next()) 37 | .and_then(|reg| reg.regblock()) 38 | .unwrap(), 39 | ); 40 | 41 | Pl011Uart { gpio_range, pl011_range } 42 | } 43 | 44 | pub fn init(&self) { 45 | // Disable UART0 46 | write_reg(&self.pl011_range, UART0_CR, 0); 47 | 48 | // Turn pull up/down off for pins 14/15 (tx/rx) 49 | self.gpiosetpull(14, GpioPull::Off); 50 | self.gpiosetpull(15, GpioPull::Off); 51 | 52 | // Clear interrupts 53 | write_reg(&self.pl011_range, UART0_ICR, 0x7ff); 54 | 55 | // Set the uart clock rate to 3MHz 56 | let uart_clock_rate_hz = 3_000_000; 57 | mailbox::set_clock_rate(2, uart_clock_rate_hz, 0); 58 | 59 | // Set the baud rate via the integer and fractional baud rate regs 60 | let baud_rate = 115200; 61 | let baud_rate_divisor = (uart_clock_rate_hz as f32) / ((16 * baud_rate) as f32); 62 | let int_brd = baud_rate_divisor as u32; 63 | let frac_brd = (((baud_rate_divisor - (int_brd as f32)) * 64.0) + 0.5) as u32; 64 | write_reg(&self.pl011_range, UART0_IBRD, int_brd); 65 | write_reg(&self.pl011_range, UART0_FBRD, frac_brd); 66 | 67 | // Enable FIFOs (tx and rx), 8 bit 68 | write_reg(&self.pl011_range, UART0_LCRH, 0x70); 69 | 70 | // Mask all interrupts 71 | write_reg(&self.pl011_range, UART0_IMSC, 0x7f2); 72 | 73 | // Enable UART0, receive only 74 | write_reg(&self.pl011_range, UART0_CR, 0x81); 75 | } 76 | 77 | fn gpiosetpull(&self, pin: u32, pull: GpioPull) { 78 | // The GPIO pull up/down bits are spread across consecutive registers GPPUDCLK0 to GPPUDCLK1 79 | // GPPUDCLK0: pins 0-31 80 | // GPPUDCLK1: pins 32-53 81 | let reg_offset = pin as usize / 32; 82 | // Number of bits to shift pull, in order to affect the required pin (just 1 bit) 83 | let pud_bit = 1 << (pin % 32); 84 | // Which GPPUDCLK register to use 85 | let gppudclk_reg = GPPUDCLK0 + reg_offset * 4; 86 | 87 | // You can't read the GPPUD registers, so to set the state we first set the PUD value we want... 88 | write_reg(&self.pl011_range, GPPUD, pull as u32); 89 | // ...wait 150 cycles for it to set 90 | delay(150); 91 | // ...set the appropriate PUD bit 92 | write_reg(&self.pl011_range, gppudclk_reg, pud_bit); 93 | // ...wait 150 cycles for it to set 94 | delay(150); 95 | // ...clear up 96 | write_reg(&self.pl011_range, GPPUD, 0); 97 | write_reg(&self.pl011_range, gppudclk_reg, 0); 98 | } 99 | } 100 | 101 | impl Uart for Pl011Uart { 102 | fn putb(&self, b: u8) { 103 | // Wait for UART to become ready to transmit. 104 | while read_reg(&self.pl011_range, UART0_FR) & (1 << 5) != 0 {} 105 | write_reg(&self.pl011_range, UART0_DR, b as u32); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /aarch64/src/vm.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_upper_case_globals)] 2 | 3 | use crate::{ 4 | kmem::{ 5 | ebss_addr, erodata_addr, etext_addr, from_ptr_to_physaddr, from_virt_to_physaddr, 6 | physaddr_as_ptr_mut, physaddr_as_virt, text_addr, 7 | }, 8 | pagealloc, 9 | registers::rpi_mmio, 10 | }; 11 | use bitstruct::bitstruct; 12 | use core::fmt; 13 | use core::ptr::write_volatile; 14 | use num_enum::{FromPrimitive, IntoPrimitive}; 15 | use port::{ 16 | bitmapalloc::BitmapPageAllocError, 17 | mem::{PhysAddr, PhysRange, PAGE_SIZE_1G, PAGE_SIZE_2M, PAGE_SIZE_4K}, 18 | }; 19 | 20 | #[cfg(not(test))] 21 | use port::println; 22 | 23 | #[allow(dead_code)] 24 | #[derive(Debug, Clone, Copy, PartialEq)] 25 | pub enum PageSize { 26 | Page4K, 27 | Page2M, 28 | Page1G, 29 | } 30 | 31 | impl PageSize { 32 | const fn size(&self) -> usize { 33 | match self { 34 | PageSize::Page4K => PAGE_SIZE_4K, 35 | PageSize::Page2M => PAGE_SIZE_2M, 36 | PageSize::Page1G => PAGE_SIZE_1G, 37 | } 38 | } 39 | } 40 | 41 | #[repr(C, align(4096))] 42 | #[derive(Clone, Copy)] 43 | pub struct Page4K([u8; PAGE_SIZE_4K]); 44 | 45 | impl Page4K { 46 | pub fn clear(&mut self) { 47 | unsafe { 48 | core::intrinsics::volatile_set_memory(&mut self.0, 0u8, 1); 49 | } 50 | } 51 | } 52 | 53 | #[derive(Debug, IntoPrimitive, FromPrimitive)] 54 | #[repr(u8)] 55 | pub enum Mair { 56 | #[num_enum(default)] 57 | Normal = 0, 58 | Device = 1, 59 | } 60 | 61 | #[derive(Debug, IntoPrimitive, FromPrimitive)] 62 | #[repr(u8)] 63 | pub enum AccessPermission { 64 | #[num_enum(default)] 65 | PrivRw = 0, 66 | AllRw = 1, 67 | PrivRo = 2, 68 | AllRo = 3, 69 | } 70 | 71 | #[derive(Debug, IntoPrimitive, FromPrimitive)] 72 | #[repr(u8)] 73 | pub enum Shareable { 74 | #[num_enum(default)] 75 | Non = 0, // Non-shareable (single core) 76 | Unpredictable = 1, // Unpredictable! 77 | Outer = 2, // Outer shareable (shared across CPUs, GPU) 78 | Inner = 3, // Inner shareable (shared across CPUs) 79 | } 80 | 81 | bitstruct! { 82 | /// AArch64 supports various granule and page sizes. We assume 48-bit 83 | /// addresses. This is documented in the 'Translation table descriptor 84 | /// formats' section of the Arm Architecture Reference Manual. 85 | /// The virtual address translation breakdown is documented in the 'Translation 86 | /// Process' secrtion of the Arm Architecture Reference Manual. 87 | #[derive(Copy, Clone, PartialEq)] 88 | #[repr(transparent)] 89 | pub struct Entry(u64) { 90 | valid: bool = 0; 91 | page_or_table: bool = 1; 92 | mair_index: Mair = 2..5; 93 | non_secure: bool = 5; 94 | access_permission: AccessPermission = 6..8; 95 | shareable: Shareable = 8..10; 96 | accessed: bool = 10; // Was accessed by code 97 | addr: u64 = 12..48; 98 | pxn: bool = 53; // Privileged eXecute Never 99 | uxn: bool = 54; // Unprivileged eXecute Never 100 | } 101 | } 102 | 103 | impl Entry { 104 | pub const fn empty() -> Entry { 105 | Entry(0) 106 | } 107 | 108 | fn rw_kernel_data() -> Self { 109 | Entry(0) 110 | .with_shareable(Shareable::Inner) 111 | .with_accessed(true) 112 | .with_uxn(true) 113 | .with_pxn(true) 114 | .with_mair_index(Mair::Normal) 115 | .with_valid(true) 116 | } 117 | 118 | fn ro_kernel_data() -> Self { 119 | Entry(0) 120 | .with_access_permission(AccessPermission::PrivRo) 121 | .with_shareable(Shareable::Inner) 122 | .with_accessed(true) 123 | .with_uxn(true) 124 | .with_pxn(true) 125 | .with_mair_index(Mair::Normal) 126 | .with_valid(true) 127 | } 128 | 129 | fn ro_kernel_text() -> Self { 130 | Entry(0) 131 | .with_access_permission(AccessPermission::PrivRw) 132 | .with_shareable(Shareable::Inner) 133 | .with_accessed(true) 134 | .with_uxn(true) 135 | .with_pxn(false) 136 | .with_mair_index(Mair::Normal) 137 | .with_valid(true) 138 | } 139 | 140 | fn ro_kernel_device() -> Self { 141 | Entry(0) 142 | .with_access_permission(AccessPermission::PrivRw) 143 | .with_shareable(Shareable::Inner) 144 | .with_accessed(true) 145 | .with_uxn(true) 146 | .with_pxn(true) 147 | .with_mair_index(Mair::Device) 148 | .with_valid(true) 149 | } 150 | 151 | const fn with_phys_addr(self, pa: PhysAddr) -> Self { 152 | Entry(self.0).with_addr(pa.addr() >> 12) 153 | } 154 | 155 | /// Return the physical page address pointed to by this entry 156 | fn phys_page_addr(self) -> PhysAddr { 157 | PhysAddr::new(self.addr() << 12) 158 | } 159 | 160 | fn virt_page_addr(self) -> usize { 161 | physaddr_as_virt(self.phys_page_addr()) 162 | } 163 | 164 | fn table(self, level: Level) -> bool { 165 | self.page_or_table() && level != Level::Level3 166 | } 167 | } 168 | 169 | impl fmt::Debug for Entry { 170 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 171 | write!(f, "Entry: {:#x} ", self.addr() << 12)?; 172 | if self.valid() { 173 | write!(f, " Valid")?; 174 | } else { 175 | write!(f, " Invalid")?; 176 | } 177 | if self.page_or_table() { 178 | write!(f, " Page/Table")?; 179 | } else { 180 | write!(f, " Block")?; 181 | } 182 | write!(f, " {:?}", self.mair_index())?; 183 | if self.non_secure() { 184 | write!(f, " NonSecure")?; 185 | } else { 186 | write!(f, " Secure")?; 187 | } 188 | write!(f, " {:?} {:?}", self.access_permission(), self.shareable())?; 189 | if self.accessed() { 190 | write!(f, " Accessed")?; 191 | } 192 | if self.pxn() { 193 | write!(f, " PXN")?; 194 | } 195 | if self.uxn() { 196 | write!(f, " UXN")?; 197 | } 198 | Ok(()) 199 | } 200 | } 201 | 202 | /// Levels start at the lowest number (most significant) and increase from 203 | /// there. Four levels would support (for example) 4kiB granules with 4KiB 204 | /// pages using Level0 - Level3, while three would support 2MiB pages with the 205 | /// same size granules, using only Level0 - Level2. 206 | #[derive(Debug, Clone, Copy, PartialEq)] 207 | pub enum Level { 208 | Level0, 209 | Level1, 210 | Level2, 211 | Level3, 212 | } 213 | 214 | impl Level { 215 | /// Returns the next level to translate 216 | pub fn next(&self) -> Option { 217 | match self { 218 | Level::Level0 => Some(Level::Level1), 219 | Level::Level1 => Some(Level::Level2), 220 | Level::Level2 => Some(Level::Level3), 221 | Level::Level3 => None, 222 | } 223 | } 224 | 225 | pub fn depth(&self) -> usize { 226 | match self { 227 | Level::Level0 => 0, 228 | Level::Level1 => 1, 229 | Level::Level2 => 2, 230 | Level::Level3 => 3, 231 | } 232 | } 233 | } 234 | 235 | pub fn va_index(va: usize, level: Level) -> usize { 236 | match level { 237 | Level::Level0 => (va >> 39) & 0x1ff, 238 | Level::Level1 => (va >> 30) & 0x1ff, 239 | Level::Level2 => (va >> 21) & 0x1ff, 240 | Level::Level3 => (va >> 12) & 0x1ff, 241 | } 242 | } 243 | 244 | /// Returns a tuple of page table indices for the given virtual address 245 | #[cfg(test)] 246 | fn va_indices(va: usize) -> (usize, usize, usize, usize) { 247 | ( 248 | va_index(va, Level::Level0), 249 | va_index(va, Level::Level1), 250 | va_index(va, Level::Level2), 251 | va_index(va, Level::Level3), 252 | ) 253 | } 254 | 255 | /// Return the virtual address for the page table at level `level` for the 256 | /// given virtual address, assuming the use of recursive page tables. 257 | fn recursive_table_addr(va: usize, level: Level) -> usize { 258 | let indices_mask = 0x0000_ffff_ffff_f000; 259 | let indices = va & indices_mask; 260 | let shift = match level { 261 | Level::Level0 => 36, 262 | Level::Level1 => 27, 263 | Level::Level2 => 18, 264 | Level::Level3 => 9, 265 | }; 266 | let recursive_indices = match level { 267 | Level::Level0 => (511 << 39) | (511 << 30) | (511 << 21) | (511 << 12), 268 | Level::Level1 => (511 << 39) | (511 << 30) | (511 << 21), 269 | Level::Level2 => (511 << 39) | (511 << 30), 270 | Level::Level3 => 511 << 39, 271 | }; 272 | 0xffff_0000_0000_0000 | recursive_indices | ((indices >> shift) & indices_mask) 273 | } 274 | 275 | #[derive(Debug)] 276 | #[allow(dead_code)] 277 | pub enum PageTableError { 278 | AllocationFailed(BitmapPageAllocError), 279 | EntryIsNotTable, 280 | PhysRangeIsZero, 281 | } 282 | 283 | impl From for PageTableError { 284 | fn from(err: BitmapPageAllocError) -> PageTableError { 285 | PageTableError::AllocationFailed(err) 286 | } 287 | } 288 | 289 | #[repr(C, align(4096))] 290 | pub struct Table { 291 | entries: [Entry; 512], 292 | } 293 | 294 | impl Table { 295 | /// Return a mutable entry from the table based on the virtual address and 296 | /// the level. (It uses the level to extract the index from the correct 297 | /// part of the virtual address). 298 | pub fn entry_mut(&mut self, level: Level, va: usize) -> Result<&mut Entry, PageTableError> { 299 | let idx = va_index(va, level); 300 | Ok(&mut self.entries[idx]) 301 | } 302 | 303 | /// Return the next table in the walk. If it doesn't exist, create it. 304 | fn next_mut(&mut self, level: Level, va: usize) -> Result<&mut Table, PageTableError> { 305 | // Try to get a valid page table entry. If it doesn't exist, create it. 306 | let index = va_index(va, level); 307 | let mut entry = self.entries[index]; 308 | if !entry.valid() { 309 | // Create a new page table and write the entry into the parent table 310 | let table = Self::alloc_pagetable()?; 311 | entry = Entry::rw_kernel_data() 312 | .with_phys_addr(from_ptr_to_physaddr(table)) 313 | .with_page_or_table(true); 314 | unsafe { 315 | write_volatile(&mut self.entries[index], entry); 316 | } 317 | } 318 | 319 | if !entry.table(level) { 320 | return Err(PageTableError::EntryIsNotTable); 321 | } 322 | 323 | // Return the address of the next table as a recursive address 324 | let recursive_page_addr = recursive_table_addr(va, level.next().unwrap()); 325 | Ok(unsafe { &mut *(recursive_page_addr as *mut Table) }) 326 | } 327 | 328 | fn alloc_pagetable() -> Result<&'static mut Table, PageTableError> { 329 | let page = pagealloc::allocate()?; 330 | page.clear(); 331 | Ok(unsafe { &mut *(page as *mut Page4K as *mut Table) }) 332 | } 333 | } 334 | 335 | pub type PageTable = Table; 336 | 337 | impl PageTable { 338 | pub const fn empty() -> PageTable { 339 | PageTable { entries: [Entry::empty(); 512] } 340 | } 341 | 342 | /// Ensure there's a mapping from va to entry, creating any intermediate 343 | /// page tables that don't already exist. If a mapping already exists, 344 | /// replace it. 345 | fn map_to( 346 | &mut self, 347 | entry: Entry, 348 | va: usize, 349 | page_size: PageSize, 350 | ) -> Result<(), PageTableError> { 351 | // We change the last entry of the root page table to the address of 352 | // self for the duration of this method. This allows us to work with 353 | // this hierarchy of pagetables even if it's not the current translation 354 | // table. We *must* return it to its original state on exit. 355 | // TODO Only do this if self != kernel_root() 356 | let old_recursive_entry = kernel_root().entries[511]; 357 | let temp_recursive_entry = Entry::rw_kernel_data() 358 | .with_phys_addr(from_ptr_to_physaddr(self)) 359 | .with_page_or_table(true); 360 | 361 | unsafe { 362 | write_volatile(&mut kernel_root().entries[511], temp_recursive_entry); 363 | // TODO Need to invalidate the single cache entry 364 | invalidate_all_tlb_entries(); 365 | }; 366 | 367 | let dest_entry = match page_size { 368 | PageSize::Page4K => self 369 | .next_mut(Level::Level0, va) 370 | .and_then(|t1| t1.next_mut(Level::Level1, va)) 371 | .and_then(|t2| t2.next_mut(Level::Level2, va)) 372 | .and_then(|t3| t3.entry_mut(Level::Level3, va)), 373 | PageSize::Page2M => self 374 | .next_mut(Level::Level0, va) 375 | .and_then(|t1| t1.next_mut(Level::Level1, va)) 376 | .and_then(|t2| t2.entry_mut(Level::Level2, va)), 377 | PageSize::Page1G => { 378 | self.next_mut(Level::Level0, va).and_then(|t1| t1.entry_mut(Level::Level1, va)) 379 | } 380 | }; 381 | 382 | // Entries at level 3 should have the page flag set 383 | let entry = 384 | if page_size == PageSize::Page4K { entry.with_page_or_table(true) } else { entry }; 385 | 386 | unsafe { 387 | write_volatile(dest_entry?, entry); 388 | // Return the recursive entry to its original state 389 | write_volatile(&mut kernel_root().entries[511], old_recursive_entry); 390 | // TODO Need to invalidate the single cache entry (+ optionally the recursive entry) 391 | invalidate_all_tlb_entries(); 392 | } 393 | 394 | Ok(()) 395 | } 396 | 397 | /// Map the physical range using the requested page size. 398 | /// This aligns on page size boundaries, and rounds the requested range so 399 | /// that both the alignment requirements are met and the requested range are 400 | /// covered. 401 | /// TODO Assuming some of these requests are dynamic, but should not fail, 402 | /// we should fall back to the smaller page sizes if the requested size 403 | /// fails. 404 | pub fn map_phys_range( 405 | &mut self, 406 | range: &PhysRange, 407 | entry: Entry, 408 | page_size: PageSize, 409 | ) -> Result<(usize, usize), PageTableError> { 410 | let mut startva = None; 411 | let mut endva = 0; 412 | for pa in range.step_by_rounded(page_size.size()) { 413 | let va = physaddr_as_virt(pa); 414 | self.map_to(entry.with_phys_addr(pa), va, page_size)?; 415 | startva.get_or_insert(va); 416 | endva = va + page_size.size(); 417 | } 418 | startva.map(|startva| (startva, endva)).ok_or(PageTableError::PhysRangeIsZero) 419 | } 420 | 421 | /// Recursively write out all the tables and all its children 422 | pub fn print_recursive_tables(&self) { 423 | println!("Root va:{:p}", self); 424 | self.print_table_at_level(Level::Level0, 0xffff_ffff_ffff_f000); 425 | } 426 | 427 | /// Recursively write out the table and all its children 428 | fn print_table_at_level(&self, level: Level, table_va: usize) { 429 | let indent = 2 + level.depth() * 2; 430 | println!("{:indent$}Table {:?} va:{:p}", "", level, self); 431 | for (i, &pte) in self.entries.iter().enumerate() { 432 | if pte.valid() { 433 | print_pte(indent, i, level, pte); 434 | 435 | // Recurse into child table (unless it's the recursive index) 436 | if i != 511 && pte.table(level) { 437 | let next_nevel = level.next().unwrap(); 438 | let child_va = (table_va << 9) | (i << 12); 439 | let child_table = unsafe { &*(child_va as *const PageTable) }; 440 | child_table.print_table_at_level(next_nevel, child_va); 441 | } 442 | } 443 | } 444 | } 445 | } 446 | 447 | impl fmt::Debug for PageTable { 448 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 449 | write!(f, "{:x}", (self as *const Self).addr()) 450 | } 451 | } 452 | 453 | /// Helper to print out PTE as part of a table 454 | fn print_pte(indent: usize, i: usize, level: Level, pte: Entry) { 455 | if pte.table(level) { 456 | println!("{:indent$}[{:03}] Table {:?} (pte:{:#016x})", "", i, pte, pte.0,); 457 | } else { 458 | println!( 459 | "{:indent$}[{:03}] Entry va:{:#018x} -> {:?} (pte:{:#016x})", 460 | "", 461 | i, 462 | pte.virt_page_addr(), 463 | pte, 464 | pte.0, 465 | ); 466 | } 467 | } 468 | 469 | pub unsafe fn init(kpage_table: &mut PageTable, dtb_range: PhysRange, available_mem: PhysRange) { 470 | pagealloc::init_page_allocator(); 471 | 472 | // We use recursive page tables, but we have to be careful in the init call, 473 | // since the kpage_table is not currently pointed to by ttbr1_el1. Any 474 | // recursive addressing of (511, 511, 511, 511) always points to the 475 | // physical address of the root page table, which isn't what we want here 476 | // because kpage_table hasn't been switched to yet. 477 | 478 | // Write the recursive entry 479 | unsafe { 480 | let entry = Entry::rw_kernel_data() 481 | .with_phys_addr(from_ptr_to_physaddr(kpage_table)) 482 | .with_page_or_table(true); 483 | write_volatile(&mut kpage_table.entries[511], entry); 484 | } 485 | 486 | // TODO leave the first page unmapped to catch null pointer dereferences in unsafe code 487 | let custom_map = { 488 | let text_range = 489 | PhysRange(from_virt_to_physaddr(text_addr())..from_virt_to_physaddr(etext_addr())); 490 | let data_range = PhysRange::with_len( 491 | from_virt_to_physaddr(etext_addr()).addr(), 492 | erodata_addr() - etext_addr(), 493 | ); 494 | let bss_range = PhysRange::with_len( 495 | from_virt_to_physaddr(erodata_addr()).addr(), 496 | ebss_addr() - erodata_addr(), 497 | ); 498 | 499 | let mmio_range = rpi_mmio().expect("mmio base detect failed"); 500 | 501 | let mut map = [ 502 | ("DTB", dtb_range, Entry::ro_kernel_data(), PageSize::Page4K), 503 | ("Kernel Text", text_range, Entry::ro_kernel_text(), PageSize::Page2M), 504 | ("Kernel Data", data_range, Entry::ro_kernel_data(), PageSize::Page2M), 505 | ("Kernel BSS", bss_range, Entry::rw_kernel_data(), PageSize::Page2M), 506 | ("MMIO", mmio_range, Entry::ro_kernel_device(), PageSize::Page2M), 507 | ]; 508 | map.sort_by_key(|a| a.1.start()); 509 | map 510 | }; 511 | 512 | println!("Memory map:"); 513 | for (name, range, flags, page_size) in custom_map.iter() { 514 | let mapped_range = 515 | kpage_table.map_phys_range(range, *flags, *page_size).expect("init mapping failed"); 516 | 517 | println!( 518 | " {:14}{:#018x}..{:#018x} to {:#018x}..{:#018x} flags: {:?} page_size: {:?}", 519 | name, 520 | range.start().addr(), 521 | range.end().addr(), 522 | mapped_range.0, 523 | mapped_range.1, 524 | flags, 525 | page_size 526 | ); 527 | } 528 | 529 | if let Err(err) = pagealloc::free_unused_ranges(&available_mem, custom_map.map(|m| m.1).iter()) 530 | { 531 | panic!("Couldn't mark unused pages as free: err: {:?}", err); 532 | } 533 | } 534 | 535 | /// Return the root kernel page table physical address 536 | fn ttbr1_el1() -> u64 { 537 | #[cfg(not(test))] 538 | { 539 | let mut addr: u64; 540 | unsafe { 541 | core::arch::asm!("mrs {value}, ttbr1_el1", value = out(reg) addr); 542 | } 543 | addr 544 | } 545 | #[cfg(test)] 546 | 0 547 | } 548 | 549 | // TODO this should just call invalidate_all_tlb_entries afterwards? 550 | #[allow(unused_variables)] 551 | pub unsafe fn switch(kpage_table: &PageTable) { 552 | #[cfg(not(test))] 553 | unsafe { 554 | let pt_phys = from_ptr_to_physaddr(kpage_table).addr(); 555 | // https://forum.osdev.org/viewtopic.php?t=36412&p=303237 556 | core::arch::asm!( 557 | "msr ttbr1_el1, {pt_phys}", 558 | "tlbi vmalle1is", // invalidate all TLB entries 559 | "dsb ish", // ensure write has completed 560 | "isb", // synchronize context and ensure that no instructions 561 | // are fetched using the old translation 562 | pt_phys = in(reg) pt_phys); 563 | } 564 | } 565 | 566 | #[allow(unused_variables)] 567 | pub unsafe fn invalidate_all_tlb_entries() { 568 | #[cfg(not(test))] 569 | unsafe { 570 | // https://forum.osdev.org/viewtopic.php?t=36412&p=303237 571 | core::arch::asm!( 572 | "tlbi vmalle1is", // invalidate all TLB entries 573 | "dsb ish", // ensure write has completed 574 | "isb" 575 | ); // synchronize context and ensure that no instructions 576 | // are fetched using the old translation 577 | } 578 | } 579 | 580 | /// Return the root kernel page table 581 | pub fn kernel_root() -> &'static mut PageTable { 582 | unsafe { &mut *physaddr_as_ptr_mut::(PhysAddr::new(ttbr1_el1())) } 583 | } 584 | 585 | #[cfg(test)] 586 | mod tests { 587 | use super::*; 588 | 589 | #[test] 590 | fn can_break_down_va() { 591 | assert_eq!(va_indices(0xffff8000049fd000), (256, 0, 36, 509)); 592 | } 593 | 594 | #[test] 595 | fn test_to_use_for_debugging_vaddrs() { 596 | assert_eq!(va_indices(0xffff8000049fd000), (256, 0, 36, 509)); 597 | } 598 | 599 | #[test] 600 | fn test_recursive_table_addr() { 601 | assert_eq!(va_indices(0xffff800008000000), (256, 0, 64, 0)); 602 | assert_eq!( 603 | va_indices(recursive_table_addr(0xffff800008000000, Level::Level0)), 604 | (511, 511, 511, 511) 605 | ); 606 | assert_eq!( 607 | va_indices(recursive_table_addr(0xffff800008000000, Level::Level1)), 608 | (511, 511, 511, 256) 609 | ); 610 | assert_eq!( 611 | va_indices(recursive_table_addr(0xffff800008000000, Level::Level2)), 612 | (511, 511, 256, 0) 613 | ); 614 | assert_eq!( 615 | va_indices(recursive_table_addr(0xffff800008000000, Level::Level3)), 616 | (511, 256, 0, 64) 617 | ); 618 | } 619 | } 620 | -------------------------------------------------------------------------------- /lib/aarch64-unknown-none-elf.json: -------------------------------------------------------------------------------- 1 | { 2 | "arch": "aarch64", 3 | "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", 4 | "disable-redzone": true, 5 | "executables": true, 6 | "features": "+strict-align,+neon,+fp-armv8", 7 | "linker": "rust-lld", 8 | "linker-flavor": "ld.lld", 9 | "llvm-target": "aarch64-unknown-none", 10 | "max-atomic-width": 128, 11 | "panic-strategy": "abort", 12 | "relocation-model": "pie", 13 | "target-pointer-width": "64", 14 | "pre-link-args": { 15 | "ld.lld": [ 16 | "-nostdlib" 17 | ] 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /lib/riscv64-unknown-none-elf.json: -------------------------------------------------------------------------------- 1 | { 2 | "arch": "riscv64", 3 | "code-model": "medium", 4 | "cpu": "generic-rv64", 5 | "data-layout": "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128", 6 | "eh-frame-header": false, 7 | "emit-debug-gdb-scripts": false, 8 | "features": "+m,+a,+f,+d,+c", 9 | "is-builtin": false, 10 | "linker": "rust-lld", 11 | "linker-flavor": "ld.lld", 12 | "llvm-abiname": "lp64d", 13 | "llvm-target": "riscv64", 14 | "max-atomic-width": 64, 15 | "panic-strategy": "abort", 16 | "relocation-model": "pie", 17 | "target-pointer-width": "64", 18 | "pre-link-args": { 19 | "ld.lld": [ 20 | "-nostdlib" 21 | ] 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /lib/x86_64-unknown-none-elf.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "x86_64-unknown-none-elf", 3 | "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", 4 | "linker-flavor": "ld.lld", 5 | "linker": "rust-lld", 6 | "target-endian": "little", 7 | "target-pointer-width": "64", 8 | "target-c-int-width": "32", 9 | "arch": "x86_64", 10 | "os": "none", 11 | "executables": true, 12 | "relocation-model": "pie", 13 | "code-model": "medium", 14 | "disable-redzone": true, 15 | "features": "-mmx,-sse,+soft-float", 16 | "panic-strategy": "abort", 17 | "frame-pointer": "always", 18 | "pre-link-args": { 19 | "ld.lld": [ 20 | "-nostdlib" 21 | ] 22 | } 23 | } -------------------------------------------------------------------------------- /port/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "port" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | bitflags = "2.5" 8 | -------------------------------------------------------------------------------- /port/lib/test/fdt/readme.txt: -------------------------------------------------------------------------------- 1 | This folder contains test files for the devicetree code in the fdt module. Each dtb has the corresponding dts for reference. 2 | 3 | - test1.dtb: A copy of the bcm2710-rpi-3-b used for Raspberry Pi 3B -------------------------------------------------------------------------------- /port/lib/test/fdt/test1.dtb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dancrossnyc/r9/79e016003f1e7a46dc66d9d785a6f4d6f887db78/port/lib/test/fdt/test1.dtb -------------------------------------------------------------------------------- /port/src/allocator.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021 The Hypatia Authors 2 | // All rights reserved 3 | // 4 | // Use of this source code is governed by an MIT-style 5 | // license that can be found in the LICENSE file or at 6 | // https://opensource.org/licenses/MIT. 7 | 8 | #![allow(clippy::too_long_first_doc_paragraph)] 9 | 10 | use alloc::alloc::{AllocError, Allocator, Layout}; 11 | use core::ptr::NonNull; 12 | use core::sync::atomic::{AtomicUsize, Ordering}; 13 | use core::{mem, ptr}; 14 | 15 | /// The allocator works in terms of an owned region of memory 16 | /// that is represented by a Block, which describes the region 17 | /// in terms of a non-nil pointer and a length. A Block is an 18 | /// analogue of a mutable slice. 19 | /// 20 | /// At some point, it may make sense to replace this with a 21 | /// slice pointer, but too many of the interfaces there are not 22 | /// (yet) stable. 23 | #[derive(Clone, Copy, Debug)] 24 | pub struct Block { 25 | ptr: NonNull, 26 | len: usize, 27 | } 28 | 29 | impl Block { 30 | /// Creates a new block from raw parts. This is analogous 31 | /// to `core::slice::from_raw_parts`. 32 | /// 33 | /// # Safety 34 | /// The caller must ensure that the pointer and length given 35 | /// are appropriate for the construction of a new block. 36 | pub const unsafe fn new_from_raw_parts(ptr: *mut u8, len: usize) -> Block { 37 | let ptr = unsafe { NonNull::new_unchecked(ptr) }; 38 | Block { ptr, len } 39 | } 40 | 41 | /// Splits a block into two sub-blocks. 42 | pub fn split_at_mut(self, offset: usize) -> Option<(Block, Block)> { 43 | let len = self.len(); 44 | if offset > len { 45 | return None; 46 | } 47 | let ptr = self.as_ptr(); 48 | let a = unsafe { Block::new_from_raw_parts(ptr, offset) }; 49 | let b = unsafe { Block::new_from_raw_parts(ptr.wrapping_add(offset), len - offset) }; 50 | Some((a, b)) 51 | } 52 | 53 | /// Returns a raw mutable pointer to the beginning of the 54 | /// owned region. 55 | pub fn as_ptr(self) -> *mut u8 { 56 | self.ptr.as_ptr() 57 | } 58 | 59 | /// Returns the length of the region. 60 | fn len(self) -> usize { 61 | self.len 62 | } 63 | } 64 | 65 | /// A Bump Allocator takes ownership a region of memory, called 66 | /// an "arena", represented by a Block, and maintains a cursor 67 | /// into that region. The cursor denotes the point between 68 | /// allocated and unallocated memory in the arena. 69 | pub struct BumpAlloc { 70 | arena: Block, 71 | cursor: AtomicUsize, 72 | } 73 | 74 | impl BumpAlloc { 75 | /// Creates a new bump allocator over the given Block. 76 | /// Takes ownership of the provided region. 77 | pub const fn new(arena: Block) -> BumpAlloc { 78 | BumpAlloc { arena, cursor: AtomicUsize::new(0) } 79 | } 80 | 81 | /// Allocates the requested number of bytes with the given 82 | /// alignment. Returns `None` if the allocation cannot be 83 | /// satisfied, otherwise returns `Some` of a pair of blocks: 84 | /// the first contains the prefix before the (aligned) block 85 | /// and the second is the requested block itself. 86 | pub fn try_alloc(&self, align: usize, size: usize) -> Option<(Block, Block)> { 87 | let base = self.arena.as_ptr(); 88 | let mut first = ptr::null_mut(); 89 | let mut adjust = 0; 90 | self.cursor 91 | .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |current| { 92 | first = base.wrapping_add(current); 93 | adjust = first.align_offset(align); 94 | let offset = current.checked_add(adjust).expect("alignment overflow"); 95 | let next = offset.checked_add(size).expect("size overflow"); 96 | (next <= self.arena.len()).then_some(next) 97 | }) 98 | .ok()?; 99 | let prefix = unsafe { Block::new_from_raw_parts(first, adjust) }; 100 | let ptr = first.wrapping_add(adjust); 101 | let block = unsafe { Block::new_from_raw_parts(ptr, size) }; 102 | Some((prefix, block)) 103 | } 104 | } 105 | 106 | /// BumpAlloc implements the allocator interface, and is 107 | /// suitable for e.g. page allocators and so forth. Dealloc is 108 | /// unimplemented and will panic. 109 | unsafe impl Allocator for BumpAlloc { 110 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 111 | let (_, block) = self.try_alloc(layout.size(), layout.align()).ok_or(AllocError)?; 112 | Ok(NonNull::slice_from_raw_parts(block.ptr, block.len())) 113 | } 114 | 115 | unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) { 116 | unimplemented!(); 117 | } 118 | } 119 | 120 | // # QuickFit allocator for small objects. 121 | // 122 | // This is an implementation of the QuickFit[Wei88] allocator 123 | // for small objects, suitable for managing small heaps in 124 | // memory constrained environments, such as boot loaders and 125 | // standalone debuggers. 126 | // 127 | // [Wei88] Charles B. Weinstock and William A. Wulf. 1988. 128 | // Quick Fit: An Efficient Algorithm for Heap Storage 129 | // Allocation. ACM SIGPLAN Notices 23, 10 (Oct. 1988), 130 | // 141-148. https://doi.org/10.1145/51607.51619 131 | 132 | const ALLOC_UNIT_SHIFT: usize = 6; 133 | const ALLOC_UNIT_SIZE: usize = 1 << ALLOC_UNIT_SHIFT; 134 | const MIN_ALLOC_SIZE: usize = ALLOC_UNIT_SIZE; 135 | const MAX_QUICK_SHIFT: usize = 14; 136 | const MAX_QUICK_SIZE: usize = 1 << MAX_QUICK_SHIFT; 137 | 138 | const NUM_QLISTS: usize = 14 - ALLOC_UNIT_SHIFT + 1; 139 | const NUM_HASH_BUCKETS: usize = 31; // Prime. 140 | 141 | /// A linked block header containing size, alignment, and 142 | /// address information for the block. This is used both for 143 | /// linking unallocated blocks into one of the free lists and 144 | /// for keeping track of blocks allocated from the `misc` list. 145 | /// 146 | /// For irregularly sized allocations, the header keeps track of 147 | /// the block's layout data, its virtual address, and a link 148 | /// pointer. Such a header is either not in any list, if newly 149 | /// allocated and not yet freed, or always in exactly one of two 150 | /// lists: the free list, or a hash chain of allocated blocks. 151 | /// We do this because we need some way to preserve the 152 | /// allocation size after the initial allocation from the tail, 153 | /// and because misc blocks can be reused in a first-fit manner, 154 | /// we cannot rely on a `Layout` to recover the size of the 155 | /// block, so we must store it somewhere. By allocating a tag 156 | /// outside of the buffer, which we look up in a hash table as 157 | /// needed, we can maintain this information without adding 158 | /// additional complexity to allocation. 159 | /// 160 | /// For blocks on one of the quick lists, the size, address and 161 | /// alignment fields are redundant, but convenient. 162 | /// 163 | /// We use the link pointer to point to the next entry in the 164 | /// list in all cases. 165 | #[derive(Debug)] 166 | #[repr(C, align(64))] 167 | struct Header { 168 | next: Option>, 169 | addr: NonNull, 170 | size: usize, 171 | align: usize, 172 | } 173 | 174 | impl Header { 175 | /// Returns a new header for a block of the given size and 176 | /// alignment at the given address. 177 | fn new(addr: NonNull, size: usize, align: usize, next: Option>) -> Header { 178 | Header { next, addr, size, align } 179 | } 180 | } 181 | 182 | /// The QuickFit allocator itself. The allocator takes 183 | /// ownership of a bump allocator for the tail, and contains a 184 | /// set of lists for the quick blocks, as well as a misc list 185 | /// for unusually sized regions, and a hash table of headers 186 | /// describing current misc allocations. As mentioned above, 187 | /// these last data are kept outside of the allocations to keep 188 | /// allocation simple. 189 | #[repr(C)] 190 | pub struct QuickFit { 191 | tail: BumpAlloc, 192 | qlists: [Option>; NUM_QLISTS], 193 | misc: Option>, 194 | allocated_misc: [Option>; NUM_HASH_BUCKETS], 195 | } 196 | 197 | impl QuickFit { 198 | /// Constructs a QuickFit from the given `tail`. 199 | pub const fn new(tail: BumpAlloc) -> QuickFit { 200 | let qlists = [None; NUM_QLISTS]; 201 | let misc = None; 202 | let allocated_misc = [None; NUM_HASH_BUCKETS]; 203 | QuickFit { tail, qlists, misc, allocated_misc } 204 | } 205 | 206 | /// Allocates a block of memory of the requested size and 207 | /// alignment. Returns a pointer to such a block, or nil if 208 | /// the block cannot be allocated. 209 | pub fn malloc(&mut self, layout: Layout) -> *mut u8 { 210 | let (size, align) = Self::adjust(layout); 211 | let p = self.alloc_quick(size, align); 212 | p.or_else(|| self.alloc_tail(size, align)).map(|p| p.as_ptr()).unwrap_or(ptr::null_mut()) 213 | } 214 | 215 | /// Adjusts the given layout so that blocks allocated from 216 | /// one of the quick lists are appropriately sized and 217 | /// aligned. Otherwise, returns the original size and 218 | /// alignment. 219 | fn adjust(layout: Layout) -> (usize, usize) { 220 | let size = layout.size(); 221 | let align = layout.align(); 222 | if size > MAX_QUICK_SIZE { 223 | return (size, align); 224 | } 225 | let size = usize::max(MIN_ALLOC_SIZE, size.next_power_of_two()); 226 | let align = usize::max(layout.align(), size); 227 | (size, align) 228 | } 229 | 230 | /// Attempts to allocate from an existing list: for requests 231 | /// that can be satisfied from one of the quick lists, try 232 | /// and do so; otherwise, attempt an allocation from the 233 | /// misc list. 234 | fn alloc_quick(&mut self, size: usize, align: usize) -> Option> { 235 | if size <= MAX_QUICK_SIZE && align == size { 236 | let k: usize = size.ilog2() as usize - ALLOC_UNIT_SHIFT; 237 | let (node, list) = Self::head(self.qlists[k].take()); 238 | self.qlists[k] = list; 239 | node.map(|header| unsafe { header.as_ref() }.addr) 240 | } else { 241 | self.alloc_misc(size, align) 242 | } 243 | } 244 | 245 | /// Allocates a block from the misc list. This is a simple 246 | /// first-fit allocator. 247 | fn alloc_misc(&mut self, size: usize, align: usize) -> Option> { 248 | let (node, list) = 249 | Self::unlink(self.misc.take(), |node| size <= node.size && align <= node.align); 250 | self.misc = list; 251 | node.map(|mut header| { 252 | let header = unsafe { header.as_mut() }; 253 | let k = Self::hash(header.addr.as_ptr()); 254 | header.next = self.allocated_misc[k].take(); 255 | self.allocated_misc[k] = NonNull::new(header); 256 | header.addr 257 | }) 258 | } 259 | 260 | /// Allocates an aligned block of size `size` from `tail`. 261 | /// If `tail` is not already aligned to the given alignment, 262 | /// then we try to free blocks larger than or equal in size 263 | /// to the minimum allocation unit into the quick lists 264 | /// until it is. 265 | fn alloc_tail(&mut self, size: usize, align: usize) -> Option> { 266 | let (prefix, block) = { self.tail.try_alloc(size, align)? }; 267 | self.free_prefix(prefix); 268 | Some(block.ptr) 269 | } 270 | 271 | /// Frees a prefix that came from a tail allocation. This 272 | /// attempts to store blocks into the quick lists. 273 | fn free_prefix(&mut self, prefix: Block) { 274 | let mut prefix = Self::align_prefix(prefix); 275 | while let Some(rest) = self.try_free_prefix(prefix) { 276 | prefix = rest; 277 | } 278 | } 279 | 280 | /// Aligns the prefix to the minimum allocation size. 281 | fn align_prefix(prefix: Block) -> Block { 282 | let ptr = prefix.as_ptr(); 283 | let len = prefix.len(); 284 | let offset = ptr.align_offset(MIN_ALLOC_SIZE); 285 | assert!(offset <= len); 286 | unsafe { Block::new_from_raw_parts(ptr.wrapping_add(offset), len - offset) } 287 | } 288 | 289 | /// Tries to free the largest section of the prefix that it 290 | /// can, returning the remainder if it did so. Otherwise, 291 | /// returns None. 292 | fn try_free_prefix(&mut self, prefix: Block) -> Option { 293 | let ptr: *mut u8 = prefix.as_ptr(); 294 | for k in (0..NUM_QLISTS).rev() { 295 | let size = 1 << (k + ALLOC_UNIT_SHIFT); 296 | if prefix.len() >= size && ptr.align_offset(size) == 0 { 297 | let (_, rest) = prefix.split_at_mut(size)?; 298 | self.free(ptr, Layout::from_size_align(size, size).unwrap()); 299 | return (rest.len() >= MIN_ALLOC_SIZE).then_some(rest); 300 | } 301 | } 302 | None 303 | } 304 | 305 | /// Attempts to reallocate the given block to a new size. 306 | /// 307 | /// This has a small optimization for the most common case, 308 | /// where a block is being realloc'd to grow as data is 309 | /// accumulated: it's subtle, but if the original block was 310 | /// allocated from one of the quick lists, and the new size 311 | /// can be accommodated by the existing allocation, simply 312 | /// return the existing block pointer. Otherwise, allocate 313 | /// a new block, copy, and free the old block. 314 | /// 315 | /// Note that the case of a reduction in size might result 316 | /// in a new allocation. This is because we rely on the 317 | /// accuracy of the `Layout` to find the correct quicklist 318 | /// to store the block onto on free. If we reduced below 319 | /// the size of the current block, we would lose the layout 320 | /// information and potentially leak memory. But this is 321 | /// very uncommon. 322 | /// 323 | /// We make no effort to optimize the case of a `realloc` in 324 | /// a `misc` block, as a) it is relatively uncommon to do so 325 | /// and b) there may not be a buffer tag for such a block 326 | /// yet (one isn't allocated until the block is freed), and 327 | /// the implementation would need to be more complex as a 328 | /// result. 329 | /// 330 | /// # Safety 331 | /// Must be called with a valid block pointer, layout, and 332 | /// size. 333 | pub unsafe fn realloc(&mut self, block: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { 334 | if block.is_null() { 335 | return self.malloc(layout); 336 | } 337 | let new_layout = Layout::from_size_align(new_size, layout.align()).expect("layout"); 338 | let (size, align) = Self::adjust(new_layout); 339 | if size == layout.size() && align == layout.align() { 340 | return block; 341 | } 342 | let np = self.malloc(new_layout); 343 | if !np.is_null() { 344 | unsafe { 345 | ptr::copy(block, np, usize::min(layout.size(), new_size)); 346 | } 347 | self.free(block, layout) 348 | } 349 | np 350 | } 351 | 352 | /// Frees a block of memory characterized by the `layout` 353 | /// argument. If the block can be freed to one of the 354 | /// quick lists, it is; otherwise, it is treated as a misc 355 | /// block and freed there. 356 | pub fn free(&mut self, block: *mut u8, layout: Layout) { 357 | let Some(block) = NonNull::new(block) else { 358 | return; 359 | }; 360 | let (size, align) = Self::adjust(layout); 361 | if size <= MAX_QUICK_SIZE && align == size { 362 | let k: usize = size.ilog2() as usize - ALLOC_UNIT_SHIFT; 363 | let header = Header::new(block, size, align, self.qlists[k].take()); 364 | assert_eq!(block.align_offset(mem::align_of::
()), 0); 365 | let p = block.cast::
(); 366 | unsafe { 367 | ptr::write(p.as_ptr(), header); 368 | } 369 | self.qlists[k] = Some(p); 370 | } else { 371 | self.free_misc(block, size, align); 372 | } 373 | } 374 | 375 | /// Frees a block to the misc list. This looks up the given 376 | /// address in the hash of allocated misc blocks to find its 377 | /// header. 378 | /// 379 | /// If the block header is not found in the hash table, we 380 | /// assume that the block was allocated from the tail and 381 | /// this is the first time it's been freed, so we allocate a 382 | /// header for it and link that into the misc list. 383 | /// 384 | /// If we cannot allocate a header in the usual way, we take 385 | /// it from the block to be freed, which is guaranteed to be 386 | /// large enough to hold a header, since anything smaller 387 | /// would have been allocated from one of the quick lists, 388 | /// and thus freed through that path. 389 | fn free_misc(&mut self, mut block: NonNull, mut size: usize, mut align: usize) { 390 | let mut header = self 391 | .unlink_allocated_misc(block) 392 | .or_else(|| { 393 | let hblock = self.malloc(Layout::new::
()).cast::
(); 394 | let hblock = hblock 395 | .is_null() 396 | .then(|| { 397 | let offset = block.align_offset(MIN_ALLOC_SIZE); 398 | let hblock = block.as_ptr().wrapping_add(offset); 399 | let next = hblock.wrapping_add(MIN_ALLOC_SIZE); 400 | block = unsafe { NonNull::new_unchecked(next) }; 401 | size -= offset + MIN_ALLOC_SIZE; 402 | align = MIN_ALLOC_SIZE; 403 | hblock.cast() 404 | }) 405 | .expect("allocated header block"); 406 | let header = Header::new(block, size, align, None); 407 | unsafe { 408 | ptr::write(hblock, header); 409 | } 410 | NonNull::new(hblock) 411 | }) 412 | .expect("header"); 413 | let header = unsafe { header.as_mut() }; 414 | header.next = self.misc.take(); 415 | self.misc = NonNull::new(header); 416 | } 417 | 418 | /// Unlinks the header for the given address from the hash 419 | /// table for allocated misc blocks and returns it, if such 420 | /// a header exists. If the block associated with the 421 | /// address has not been freed yet, it's possible that no 422 | /// header for it exists yet, in which case we return None. 423 | fn unlink_allocated_misc(&mut self, block: NonNull) -> Option> { 424 | let k = Self::hash(block.as_ptr()); 425 | let list = self.allocated_misc[k].take(); 426 | let (node, list) = Self::unlink(list, |node| node.addr == block); 427 | self.allocated_misc[k] = list; 428 | node 429 | } 430 | 431 | /// Unlinks the first node matching the given predicate from 432 | /// the given list, if it exists, returning the node, or 433 | /// None, and the list head. The list head will be None if 434 | /// the list is empty. 435 | fn unlink( 436 | mut list: Option>, 437 | predicate: F, 438 | ) -> (Option>, Option>) 439 | where 440 | F: Fn(&Header) -> bool, 441 | { 442 | let mut prev: Option> = None; 443 | while let Some(mut node) = list { 444 | let node = unsafe { node.as_mut() }; 445 | if predicate(node) { 446 | let next = node.next.take(); 447 | if let Some(mut prev) = prev { 448 | let prev = unsafe { prev.as_mut() }; 449 | prev.next = next; 450 | } else { 451 | list = next; 452 | } 453 | return (NonNull::new(node), list); 454 | } 455 | prev = NonNull::new(node); 456 | list = node.next; 457 | } 458 | (None, list) 459 | } 460 | 461 | /// Splits the list into it's first element and tail and 462 | /// returns both. 463 | fn head(list: Option>) -> (Option>, Option>) { 464 | Self::unlink(list, |_| true) 465 | } 466 | 467 | /// Hashes a pointer value. This is the bit mixing algorithm 468 | /// from Murmur3. 469 | fn hash(ptr: *mut u8) -> usize { 470 | let mut k = ptr.addr(); 471 | k ^= k >> 33; 472 | k = k.wrapping_mul(0xff51afd7ed558ccd); 473 | k ^= k >> 33; 474 | k = k.wrapping_mul(0xc4ceb9fe1a85ec53); 475 | (k >> 33) % NUM_HASH_BUCKETS 476 | } 477 | } 478 | 479 | #[cfg(not(test))] 480 | mod global { 481 | use super::{Block, BumpAlloc, QuickFit}; 482 | use alloc::alloc::{GlobalAlloc, Layout}; 483 | use core::mem; 484 | use core::ptr; 485 | use core::sync::atomic::{AtomicPtr, Ordering}; 486 | 487 | const GLOBAL_HEAP_SIZE: usize = 4 * 1024 * 1024; 488 | 489 | /// A GlobalHeap is an aligned wrapper around an owned 490 | /// buffer. 491 | #[repr(C, align(4096))] 492 | struct GlobalHeap([u8; GLOBAL_HEAP_SIZE]); 493 | impl GlobalHeap { 494 | const fn new() -> GlobalHeap { 495 | Self([0u8; GLOBAL_HEAP_SIZE]) 496 | } 497 | } 498 | 499 | /// GlobalQuickAlloc is a wrapper around a QuickFit over a 500 | /// GlobalHeap that uses interior mutability to implement 501 | /// the GlobalAlloc trait. 502 | struct GlobalQuickAlloc(AtomicPtr); 503 | impl GlobalQuickAlloc { 504 | fn with_allocator(&self, thunk: F) -> R 505 | where 506 | F: FnOnce(&mut QuickFit) -> R, 507 | { 508 | let a = self.0.swap(ptr::null_mut(), Ordering::Relaxed); 509 | assert!(!a.is_null(), "global allocator is nil"); 510 | let r = thunk(unsafe { &mut *a }); 511 | self.0.swap(a, Ordering::Relaxed); 512 | r 513 | } 514 | } 515 | 516 | unsafe impl GlobalAlloc for GlobalQuickAlloc { 517 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 518 | self.with_allocator(|quick| quick.malloc(layout)) 519 | } 520 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { 521 | self.with_allocator(|quick| quick.free(ptr, layout)); 522 | } 523 | unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { 524 | self.with_allocator(|quick| unsafe { quick.realloc(ptr, layout, new_size) }) 525 | } 526 | } 527 | 528 | #[global_allocator] 529 | static GLOBAL_ALLOCATOR: GlobalQuickAlloc = GlobalQuickAlloc(AtomicPtr::new({ 530 | static mut HEAP: GlobalHeap = GlobalHeap::new(); 531 | static mut ALLOC: QuickFit = QuickFit::new(BumpAlloc::new(unsafe { 532 | Block::new_from_raw_parts((&raw mut HEAP).cast(), mem::size_of::()) 533 | })); 534 | &raw mut ALLOC 535 | })); 536 | } 537 | -------------------------------------------------------------------------------- /port/src/bitmapalloc.rs: -------------------------------------------------------------------------------- 1 | use core::fmt; 2 | 3 | use crate::mem::{PhysAddr, PhysRange}; 4 | 5 | /// Simple bitmap. Bear in mind that logically, bit 0 is the rightmost bit, 6 | /// so writing out as bytes will have the bits logically reversed. 7 | struct Bitmap { 8 | bytes: [u8; SIZE_BYTES], 9 | } 10 | 11 | impl Bitmap { 12 | pub const fn new(init_value: u8) -> Self { 13 | Self { bytes: [init_value; SIZE_BYTES] } 14 | } 15 | 16 | /// Is bit `i` within the bitmap set? 17 | pub fn is_set(&self, i: usize) -> bool { 18 | let byte_idx = i / 8; 19 | let bit_idx = i % 8; 20 | let byte = self.bytes[byte_idx]; 21 | byte & (1 << bit_idx) > 0 22 | } 23 | 24 | /// Set bit `i` within the bitmap 25 | pub fn set(&mut self, i: usize, b: bool) { 26 | let byte_idx = i / 8; 27 | let bit_idx = i % 8; 28 | if b { 29 | self.bytes[byte_idx] |= 1 << bit_idx; 30 | } else { 31 | self.bytes[byte_idx] &= !(1 << bit_idx); 32 | } 33 | } 34 | } 35 | 36 | #[derive(Debug, PartialEq)] 37 | pub enum BitmapPageAllocError { 38 | NotEnoughBitmaps, 39 | OutOfBounds, 40 | MisalignedAddr, 41 | OutOfSpace, 42 | NotAllocated, 43 | } 44 | 45 | /// Allocator where each page is represented by a single bit. 46 | /// 0: free, 1: allocated 47 | /// `end` is used to indicate the extent of the memory. Anything beyond this 48 | /// will be marked as allocated. 49 | pub struct BitmapPageAlloc { 50 | bitmaps: [Bitmap; NUM_BITMAPS], 51 | alloc_page_size: usize, // Size of pages represented by single bit 52 | end: PhysAddr, // Upper bound of physical memory 53 | next_pa_to_scan: PhysAddr, // PhysAddr from which to start scanning for next allocation 54 | } 55 | 56 | impl 57 | BitmapPageAlloc 58 | { 59 | pub const fn new_all_allocated(alloc_page_size: usize) -> Self { 60 | let end = PhysAddr::new((NUM_BITMAPS * BITMAP_SIZE_BYTES * 8 * alloc_page_size) as u64); 61 | Self { 62 | bitmaps: [const { Bitmap::::new(0xff) }; NUM_BITMAPS], 63 | alloc_page_size, 64 | end, 65 | next_pa_to_scan: PhysAddr::new(0), 66 | } 67 | } 68 | 69 | /// Returns number of physical bytes a single bitmap can cover. 70 | const fn bytes_per_bitmap_byte(&self) -> usize { 71 | 8 * self.alloc_page_size 72 | } 73 | 74 | /// Returns number of physical bytes a single bitmap can cover. 75 | const fn bytes_per_bitmap(&self) -> usize { 76 | BITMAP_SIZE_BYTES * self.bytes_per_bitmap_byte() 77 | } 78 | 79 | /// Returns number of physical bytes covered by all bitmaps. 80 | const fn max_bytes(&self) -> usize { 81 | NUM_BITMAPS * self.bytes_per_bitmap() 82 | } 83 | 84 | /// Mark the bits corresponding to the given physical range as allocated, 85 | /// regardless of the existing state. 86 | pub fn mark_allocated(&mut self, range: &PhysRange) -> Result<(), BitmapPageAllocError> { 87 | self.mark_range(range, true, true) 88 | } 89 | 90 | /// Mark the bits corresponding to the given physical range as free, 91 | /// regardless of the existing state. 92 | pub fn mark_free(&mut self, range: &PhysRange) -> Result<(), BitmapPageAllocError> { 93 | self.mark_range(range, false, true) 94 | } 95 | 96 | /// Free unused pages in mem that aren't covered by the memory map. Assumes 97 | /// that custom_map is sorted and that available_mem can be used to set the 98 | /// upper bound of the allocator. 99 | pub fn free_unused_ranges<'a>( 100 | &mut self, 101 | available_mem: &PhysRange, 102 | used_ranges: impl Iterator, 103 | ) -> Result<(), BitmapPageAllocError> { 104 | let mut next_start = available_mem.start(); 105 | for range in used_ranges { 106 | if next_start < range.0.start { 107 | self.mark_free(&PhysRange::new(next_start, range.0.start))?; 108 | } 109 | if next_start < range.0.end { 110 | next_start = range.0.end; 111 | } 112 | } 113 | if next_start < available_mem.end() { 114 | self.mark_free(&PhysRange::new(next_start, available_mem.end()))?; 115 | } 116 | 117 | self.end = available_mem.0.end; 118 | 119 | // Mark everything past the end point as allocated 120 | let end_range = PhysRange::new(self.end, PhysAddr::new(self.max_bytes() as u64)); 121 | self.mark_range(&end_range, true, false)?; 122 | 123 | self.next_pa_to_scan = PhysAddr::new(0); // Just set to 0 for simplicity - could be smarter 124 | 125 | Ok(()) 126 | } 127 | 128 | /// Try to allocate the next available page. 129 | pub fn allocate(&mut self) -> Result { 130 | let (first_bitmap_idx, first_byte_idx, _) = self.physaddr_as_indices(self.next_pa_to_scan); 131 | 132 | let found_indices = self 133 | .indices_from(first_bitmap_idx, first_byte_idx) 134 | .find(|indices| self.byte(indices) != 0xff); 135 | 136 | if let Some(indices) = found_indices { 137 | // Mark the page as allocated and return the address 138 | let byte = &mut self.bitmaps[indices.bitmap].bytes[indices.byte]; 139 | let num_leading_ones = byte.trailing_ones() as usize; 140 | *byte |= 1 << num_leading_ones; 141 | 142 | let pa = self.indices_as_physaddr(indices.bitmap, indices.byte, num_leading_ones); 143 | self.next_pa_to_scan = pa; 144 | Ok(pa) 145 | } else { 146 | Err(BitmapPageAllocError::OutOfSpace) 147 | } 148 | } 149 | 150 | /// Deallocate the page corresponding to the given PhysAddr. 151 | pub fn deallocate(&mut self, pa: PhysAddr) -> Result<(), BitmapPageAllocError> { 152 | if pa > self.end { 153 | return Err(BitmapPageAllocError::OutOfBounds); 154 | } 155 | 156 | let (bitmap_idx, byte_idx, bit_idx) = self.physaddr_as_indices(pa); 157 | 158 | let bitmap = &mut self.bitmaps[bitmap_idx]; 159 | if !bitmap.is_set(8 * byte_idx + bit_idx) { 160 | return Err(BitmapPageAllocError::NotAllocated); 161 | } 162 | bitmap.set(bit_idx, false); 163 | 164 | self.next_pa_to_scan = pa; // Next allocation will reuse this 165 | 166 | Ok(()) 167 | } 168 | 169 | /// Return a tuple of (bytes used, total bytes available) based on the page allocator. 170 | pub fn usage_bytes(&self) -> (usize, usize) { 171 | // We count free because the last bits might be marked partially 'allocated' 172 | // if the end comes in the middle of a byte in the bitmap. 173 | let mut free_bytes: usize = 0; 174 | for indices in self.indices() { 175 | free_bytes += self.byte(&indices).count_zeros() as usize * self.alloc_page_size; 176 | } 177 | let total = self.end.0 as usize; 178 | (total - free_bytes, total) 179 | } 180 | 181 | /// For the given physaddr, returns a tuple of (the bitmap containing pa, 182 | /// the index of the byte containing the pa, and the index of the bit within that byte). 183 | fn physaddr_as_indices(&self, pa: PhysAddr) -> (usize, usize, usize) { 184 | assert_eq!(pa.addr() % self.alloc_page_size as u64, 0); 185 | 186 | // Get the index of the bitmap containing the pa 187 | let bytes_per_bitmap = self.bytes_per_bitmap(); 188 | let bitmap_idx = pa.addr() as usize / bytes_per_bitmap; 189 | 190 | // Get the byte within the bitmap representing the pa 191 | let pa_offset_into_bitmap = pa.addr() as usize % bytes_per_bitmap; 192 | let bytes_per_bitmap_byte = self.bytes_per_bitmap_byte(); 193 | let byte_idx = pa_offset_into_bitmap / bytes_per_bitmap_byte; 194 | 195 | // Finally get the bit within the byte 196 | let bit_idx = 197 | (pa_offset_into_bitmap - (byte_idx * bytes_per_bitmap_byte)) / self.alloc_page_size; 198 | 199 | (bitmap_idx, byte_idx, bit_idx) 200 | } 201 | 202 | /// Given the bitmap index, byte index within the bitmap, and bit index within the byte, 203 | /// return the corresponding PhysAddr. 204 | fn indices_as_physaddr(&self, bitmap_idx: usize, byte_idx: usize, bit_idx: usize) -> PhysAddr { 205 | PhysAddr::new( 206 | ((bitmap_idx * self.bytes_per_bitmap()) 207 | + (byte_idx * self.bytes_per_bitmap_byte()) 208 | + (bit_idx * self.alloc_page_size)) as u64, 209 | ) 210 | } 211 | 212 | fn mark_range( 213 | &mut self, 214 | range: &PhysRange, 215 | mark_allocated: bool, 216 | check_end: bool, 217 | ) -> Result<(), BitmapPageAllocError> { 218 | if check_end && range.0.end > self.end { 219 | return Err(BitmapPageAllocError::NotEnoughBitmaps); 220 | } 221 | 222 | for pa in range.step_by_rounded(self.alloc_page_size) { 223 | let (bitmap_idx, byte_idx, bit_idx) = self.physaddr_as_indices(pa); 224 | if bitmap_idx >= self.bitmaps.len() { 225 | return Err(BitmapPageAllocError::OutOfBounds); 226 | } 227 | 228 | let bitmap = &mut self.bitmaps[bitmap_idx]; 229 | bitmap.set(8 * byte_idx + bit_idx, mark_allocated); 230 | } 231 | Ok(()) 232 | } 233 | 234 | /// Iterate over each of the bytes in turn. Iterates only over the bytes 235 | /// covering pages up to `end`. If `end` is within one of the bytes, that 236 | /// byte will be returned. 237 | fn indices(&self) -> impl Iterator + '_ { 238 | self.indices_from(0, 0) 239 | } 240 | 241 | /// Iterate over each of the bytes in turn, starting from a particular bitmap 242 | /// and byte, and looping to iterate across all bytes. Iterates only over the bytes 243 | /// covering pages up to `end`. If `end` is within one of the bytes, that 244 | /// byte will be returned. 245 | fn indices_from( 246 | &self, 247 | start_bitmap_idx: usize, 248 | start_byte_idx: usize, 249 | ) -> impl Iterator + '_ { 250 | let mut bitmap_idx = start_bitmap_idx; 251 | let mut byte_idx = start_byte_idx; 252 | let mut passed_first = false; 253 | let mut currpa = self.indices_as_physaddr(bitmap_idx, byte_idx, 0); 254 | 255 | core::iter::from_fn(move || { 256 | // Catch when we've iterated to the end of the last bitmap and need to 257 | // cycle back to the start 258 | if bitmap_idx >= self.bitmaps.len() || currpa >= self.end { 259 | bitmap_idx = 0; 260 | byte_idx = 0; 261 | currpa = PhysAddr::new(0); 262 | } 263 | 264 | // Catch when we've iterated over all the bytes 265 | if passed_first && bitmap_idx == start_bitmap_idx && byte_idx == start_byte_idx { 266 | return None; 267 | } 268 | passed_first = true; 269 | 270 | // Return the byte and prepare for the next 271 | let indices = ByteIndices { bitmap: bitmap_idx, byte: byte_idx }; 272 | byte_idx += 1; 273 | if byte_idx >= BITMAP_SIZE_BYTES { 274 | byte_idx = 0; 275 | bitmap_idx += 1; 276 | currpa.0 += self.alloc_page_size as u64; 277 | } 278 | Some(indices) 279 | }) 280 | } 281 | 282 | fn byte(&self, indices: &ByteIndices) -> u8 { 283 | self.bitmaps[indices.bitmap].bytes[indices.byte] 284 | } 285 | 286 | #[cfg(test)] 287 | fn bytes(&self) -> Vec { 288 | self.indices().map(|idx| self.byte(&idx)).collect::>() 289 | } 290 | 291 | #[cfg(test)] 292 | fn bytes_from(&self, start_bitmap_idx: usize, start_byte_idx: usize) -> Vec { 293 | self.indices_from(start_bitmap_idx, start_byte_idx) 294 | .map(|idx| self.byte(&idx)) 295 | .collect::>() 296 | } 297 | } 298 | 299 | struct ByteIndices { 300 | bitmap: usize, 301 | byte: usize, 302 | } 303 | 304 | /// fmt::Debug is useful in small test cases, but would be too verbose for a 305 | /// realistic bitmap. 306 | impl fmt::Debug 307 | for BitmapPageAlloc 308 | { 309 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 310 | write!(f, "0x")?; 311 | for b in self.indices() { 312 | write!(f, "{:02x}", self.byte(&b))?; 313 | } 314 | Ok(()) 315 | } 316 | } 317 | 318 | #[cfg(test)] 319 | mod tests { 320 | use super::*; 321 | 322 | #[test] 323 | fn bitmap_new() { 324 | let bitmap = Bitmap::<4096>::new(0); 325 | for byte in bitmap.bytes { 326 | assert_eq!(byte, 0x00); 327 | } 328 | } 329 | 330 | #[test] 331 | fn bitmap_set() { 332 | let mut bitmap = Bitmap::<4096>::new(0); 333 | assert!(!bitmap.is_set(0)); 334 | bitmap.set(0, true); 335 | assert!(bitmap.is_set(0)); 336 | 337 | // Assert only this bit is set 338 | assert_eq!(bitmap.bytes[0], 1); 339 | for i in 1..bitmap.bytes.len() { 340 | assert_eq!(bitmap.bytes[i], 0); 341 | } 342 | } 343 | 344 | #[test] 345 | fn iterate() { 346 | let alloc = BitmapPageAlloc::<2, 2>::new_all_allocated(4); 347 | assert_eq!(alloc.bytes(), vec![255; 4]); 348 | assert_eq!(alloc.bytes_from(1, 0), vec![255; 4]); 349 | } 350 | 351 | #[test] 352 | fn bitmappagealloc_mark_allocated_and_free() -> Result<(), BitmapPageAllocError> { 353 | // Create a new allocator and mark it all freed 354 | // 2 bitmaps, 2 bytes per bitmap, mapped to pages of 4 bytes 355 | // 32 bits, 128 bytes physical memory 356 | let mut alloc = BitmapPageAlloc::<2, 2>::new_all_allocated(4); 357 | alloc.mark_free(&PhysRange::with_end(0, alloc.max_bytes() as u64))?; 358 | 359 | // Mark a range as allocated - 10 bits 360 | alloc.mark_allocated(&PhysRange::with_end(4, 44))?; 361 | assert_eq!(alloc.bytes(), [0xfe, 0x07, 0x00, 0x00]); 362 | 363 | // Deallocate a range - first 2 bits 364 | alloc.mark_free(&PhysRange::with_end(0, 8))?; 365 | assert_eq!(alloc.bytes(), [0xfc, 0x07, 0x00, 0x00]); 366 | Ok(()) 367 | } 368 | 369 | #[test] 370 | fn bitmappagealloc_allocate_and_deallocate() -> Result<(), BitmapPageAllocError> { 371 | // Create a new allocator and mark it all freed 372 | // 2 bitmaps, 2 bytes per bitmap, mapped to pages of 4 bytes 373 | // 32 bits, 128 bytes physical memory 374 | let mut alloc = BitmapPageAlloc::<2, 2>::new_all_allocated(4); 375 | alloc.mark_free(&PhysRange::with_end(0, alloc.max_bytes() as u64))?; 376 | assert_eq!(alloc.usage_bytes(), (0, 128)); 377 | 378 | // Mark a range as allocated - 10 bits 379 | alloc.mark_allocated(&PhysRange::with_end(4, 44))?; 380 | assert_eq!(alloc.usage_bytes(), (40, 128)); 381 | assert_eq!(alloc.bytes(), [0xfe, 0x07, 0x00, 0x00]); 382 | 383 | // Now try to allocate the next 3 free pages 384 | assert_eq!(alloc.allocate()?, PhysAddr::new(0)); 385 | assert_eq!(alloc.allocate()?, PhysAddr::new(44)); 386 | assert_eq!(alloc.allocate()?, PhysAddr::new(48)); 387 | 388 | // Allocate until we run out of pages. At this point there are 19 pages left, 389 | // so allocate them, and then assert one more fails 390 | for _ in 0..19 { 391 | alloc.allocate()?; 392 | } 393 | assert_eq!(alloc.bytes(), [0xff, 0xff, 0xff, 0xff]); 394 | assert_eq!(alloc.allocate().unwrap_err(), BitmapPageAllocError::OutOfSpace); 395 | 396 | // Now try to deallocate the second page 397 | assert!(alloc.deallocate(PhysAddr::new(4)).is_ok()); 398 | assert_eq!(alloc.bytes(), [0xfd, 0xff, 0xff, 0xff]); 399 | 400 | // Ensure double deallocation fails 401 | assert_eq!( 402 | alloc.deallocate(PhysAddr::new(4)).unwrap_err(), 403 | BitmapPageAllocError::NotAllocated 404 | ); 405 | assert_eq!(alloc.bytes(), [0xfd, 0xff, 0xff, 0xff]); 406 | 407 | // Allocate once more, expecting the physical address we just deallocated 408 | assert_eq!(alloc.allocate()?, PhysAddr::new(4)); 409 | 410 | Ok(()) 411 | } 412 | 413 | #[test] 414 | fn physaddr_as_indices() { 415 | let alloc = BitmapPageAlloc::<2, 4096>::new_all_allocated(4096); 416 | let bytes_per_bitmap = alloc.bytes_per_bitmap() as u64; 417 | 418 | assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(0)), (0, 0, 0)); 419 | assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(4096)), (0, 0, 1)); 420 | assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(8192)), (0, 0, 2)); 421 | assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(4096 * 8)), (0, 1, 0)); 422 | assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(4096 * 9)), (0, 1, 1)); 423 | assert_eq!(alloc.physaddr_as_indices(PhysAddr::new(bytes_per_bitmap)), (1, 0, 0)); 424 | assert_eq!( 425 | alloc.physaddr_as_indices(PhysAddr::new(bytes_per_bitmap + 4096 * 9)), 426 | (1, 1, 1) 427 | ); 428 | } 429 | 430 | #[test] 431 | fn indices_as_physaddr() { 432 | let alloc = BitmapPageAlloc::<2, 4096>::new_all_allocated(4096); 433 | let bytes_per_bitmap = alloc.bytes_per_bitmap() as u64; 434 | 435 | assert_eq!(alloc.indices_as_physaddr(0, 0, 0), PhysAddr::new(0)); 436 | assert_eq!(alloc.indices_as_physaddr(0, 0, 1), PhysAddr::new(4096)); 437 | assert_eq!(alloc.indices_as_physaddr(0, 1, 0), PhysAddr::new(4096 * 8)); 438 | assert_eq!(alloc.indices_as_physaddr(0, 1, 1), PhysAddr::new(4096 * 9)); 439 | assert_eq!(alloc.indices_as_physaddr(1, 0, 0), PhysAddr::new(bytes_per_bitmap)); 440 | assert_eq!(alloc.indices_as_physaddr(1, 1, 1), PhysAddr::new(bytes_per_bitmap + 4096 * 9)); 441 | } 442 | } 443 | -------------------------------------------------------------------------------- /port/src/dat.rs: -------------------------------------------------------------------------------- 1 | extern crate alloc; 2 | 3 | use alloc::sync::Arc; 4 | use bitflags::bitflags; 5 | use core::ptr::NonNull; 6 | use core::result::Result; 7 | 8 | pub struct Error {} 9 | 10 | pub struct Chan { 11 | _offset: u64, 12 | _devoffset: u64, 13 | _typ: u16, 14 | _dev: u32, 15 | _mode: u16, 16 | _flag: u16, 17 | _qid: Qid, 18 | _fid: u32, 19 | _iounit: u32, 20 | // umh: Option<&Mutex>, 21 | // umqlock: Obviated by Mutex in umh? 22 | _umc: Option>, 23 | _uri: usize, 24 | _dri: usize, 25 | // dirrock: Option<&Mutex<*const ()>>, 26 | // rockqlock: Obviated by Mutex in dirrock? 27 | _nrock: usize, 28 | _mrock: usize, 29 | _ismtpt: bool, 30 | // mcp: *mut Mntcache, 31 | // mux: *mut Mnt, 32 | _aux: *mut (), 33 | _pgrpid: Qid, 34 | _mid: u32, 35 | // mchan: Arc, 36 | _mqid: Qid, 37 | // path: *const Path, 38 | } 39 | 40 | #[allow(dead_code)] 41 | pub struct Device { 42 | _dc: u32, 43 | _name: &'static str, 44 | _attached: bool, 45 | } 46 | 47 | bitflags! { 48 | pub struct Mode: u16 { 49 | const READ = 0; 50 | const WRITE = 1; 51 | const OEXEC = 2; 52 | const OTRUNC = 4; 53 | const OCEXEC = 5; 54 | const ORCLOSE = 6; 55 | const OEXCL = 12; 56 | } 57 | } 58 | 59 | pub trait Dev { 60 | fn reset(); 61 | fn init(); 62 | fn shutdown(); 63 | fn attach(spec: &[u8]) -> Chan; 64 | fn walk(&self, c: &Chan, nc: &mut Chan, name: &[&[u8]]) -> Walkqid; 65 | fn stat(&self, c: &Chan, sb: &mut [u8]) -> Result<(), Error>; 66 | fn open(&mut self, c: &Chan, mode: u32) -> Chan; 67 | fn create(&mut self, c: &mut Chan, name: &[u8], mode: Mode, perms: u32); 68 | fn close(&mut self, c: Chan); 69 | fn read(&mut self, c: &mut Chan, buf: &mut [u8], offset: u64) -> Result; 70 | fn bread(&mut self, c: &mut Chan, bnum: u64, offset: u64) -> Result; 71 | fn write(&mut self, c: &mut Chan, buf: &[u8], offset: u64) -> Result; 72 | fn bwrite(&mut self, c: &mut Chan, block: &Block, offset: u64) -> Result; 73 | fn remove(&mut self, c: &mut Chan); 74 | fn wstat(&mut self, c: &mut Chan, sb: &[u8]) -> Result<(), Error>; 75 | fn power(&mut self, on: bool); 76 | fn config(&mut self /* other args */) -> Result<(), Error>; 77 | } 78 | 79 | pub struct Block { 80 | // TODO(cross): block linkage. 81 | _rp: usize, 82 | _wp: usize, 83 | _lim: *const u8, 84 | _base: *mut u8, 85 | _flag: u16, 86 | _checksum: u16, 87 | _magic: u32, 88 | } 89 | 90 | pub struct Qid { 91 | _path: u64, 92 | _vers: u32, 93 | _typ: u8, 94 | } 95 | 96 | pub struct Walkqid { 97 | _clone: Arc, 98 | _qids: [Qid], 99 | } 100 | -------------------------------------------------------------------------------- /port/src/devcons.rs: -------------------------------------------------------------------------------- 1 | use crate::mcslock::{Lock, LockNode}; 2 | use core::fmt; 3 | 4 | const fn ctrl(b: u8) -> u8 { 5 | b - b'@' 6 | } 7 | 8 | #[allow(dead_code)] 9 | const BACKSPACE: u8 = ctrl(b'H'); 10 | #[allow(dead_code)] 11 | const DELETE: u8 = 0x7F; 12 | #[allow(dead_code)] 13 | const CTLD: u8 = ctrl(b'D'); 14 | #[allow(dead_code)] 15 | const CTLP: u8 = ctrl(b'P'); 16 | #[allow(dead_code)] 17 | const CTLU: u8 = ctrl(b'U'); 18 | 19 | pub trait Uart { 20 | fn putb(&self, b: u8); 21 | } 22 | 23 | static CONS: Lock> = Lock::new("cons", None); 24 | 25 | /// Console is what should be used in almost all cases, as it ensures threadsafe 26 | /// use of the console. 27 | pub struct Console; 28 | 29 | impl Console { 30 | /// Create a locking console. Assumes at this point we can use atomics. 31 | pub fn new(uart_fn: F) -> Self 32 | where 33 | F: FnOnce() -> &'static mut dyn Uart, 34 | { 35 | let node = LockNode::new(); 36 | let mut cons = CONS.lock(&node); 37 | *cons = Some(uart_fn()); 38 | Self 39 | } 40 | 41 | pub fn putstr(&mut self, s: &str) { 42 | // XXX: Just for testing. 43 | 44 | let node = LockNode::new(); 45 | let mut uart_guard = CONS.lock(&node); 46 | let uart = uart_guard.as_deref_mut().unwrap(); 47 | for b in s.bytes() { 48 | putb(uart, b); 49 | } 50 | } 51 | } 52 | 53 | impl fmt::Write for Console { 54 | fn write_str(&mut self, s: &str) -> fmt::Result { 55 | self.putstr(s); 56 | Ok(()) 57 | } 58 | } 59 | 60 | /// PanicConsole should only be used in the very early stages of booting, when 61 | /// we're not sure we can use locks. This can be particularly useful for 62 | /// implementing an early panic handler. 63 | pub struct PanicConsole 64 | where 65 | T: Uart, 66 | { 67 | uart: T, 68 | } 69 | 70 | impl PanicConsole 71 | where 72 | T: Uart, 73 | { 74 | pub fn new(uart: T) -> Self { 75 | Self { uart } 76 | } 77 | 78 | pub fn putstr(&mut self, s: &str) { 79 | // XXX: Just for testing. 80 | 81 | for b in s.bytes() { 82 | putb(&mut self.uart, b); 83 | } 84 | } 85 | } 86 | 87 | impl fmt::Write for PanicConsole 88 | where 89 | T: Uart, 90 | { 91 | fn write_str(&mut self, s: &str) -> fmt::Result { 92 | self.putstr(s); 93 | Ok(()) 94 | } 95 | } 96 | 97 | pub fn print(args: fmt::Arguments) { 98 | // XXX: Just for testing. 99 | use fmt::Write; 100 | let mut cons: Console = Console {}; 101 | cons.write_fmt(args).unwrap(); 102 | } 103 | 104 | #[macro_export] 105 | macro_rules! println { 106 | () => ($crate::print!("\n")); 107 | ($($arg:tt)*) => ($crate::print!("{}\n", format_args!($($arg)*))); 108 | } 109 | 110 | #[macro_export] 111 | macro_rules! print { 112 | ($($args:tt)*) => {{ 113 | $crate::devcons::print(format_args!($($args)*)) 114 | }}; 115 | } 116 | 117 | fn putb(uart: &mut dyn Uart, b: u8) { 118 | if b == b'\n' { 119 | uart.putb(b'\r'); 120 | } else if b == BACKSPACE { 121 | uart.putb(b); 122 | uart.putb(b' '); 123 | } 124 | uart.putb(b); 125 | } 126 | -------------------------------------------------------------------------------- /port/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::upper_case_acronyms)] 2 | #![allow(clippy::too_long_first_doc_paragraph)] 3 | #![cfg_attr(not(any(test)), no_std)] 4 | #![feature(allocator_api)] 5 | #![feature(maybe_uninit_slice)] 6 | #![feature(step_trait)] 7 | #![feature(strict_provenance)] 8 | #![forbid(unsafe_op_in_unsafe_fn)] 9 | 10 | extern crate alloc; 11 | 12 | pub mod allocator; 13 | pub mod bitmapalloc; 14 | pub mod dat; 15 | pub mod devcons; 16 | pub mod fdt; 17 | pub mod mcslock; 18 | pub mod mem; 19 | -------------------------------------------------------------------------------- /port/src/mcslock.rs: -------------------------------------------------------------------------------- 1 | //! MCS locks 2 | //! 3 | //! Reference: 4 | //! 5 | //! John M. Mellor-Crummey and Michael L. Scott. 1991. Algorithms 6 | //! for Scalable Synchronization on Shared Memory Multiprocessors. 7 | //! ACM Transactions on Computer Systems 9, 1 (Feb. 1991), 21–65. 8 | //! DOI: https://doi.org/10.1145/103727.103729 9 | 10 | use core::cell::UnsafeCell; 11 | use core::hint; 12 | use core::marker::{Send, Sized, Sync}; 13 | use core::ops::{Deref, DerefMut}; 14 | use core::ptr; 15 | use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; 16 | 17 | /// Represents a node in the lock structure. Note, is cacheline 18 | /// aligned. 19 | #[repr(align(64))] 20 | pub struct LockNode { 21 | next: AtomicPtr, 22 | locked: AtomicBool, 23 | } 24 | 25 | impl LockNode { 26 | pub const fn new() -> LockNode { 27 | LockNode { next: AtomicPtr::new(ptr::null_mut()), locked: AtomicBool::new(false) } 28 | } 29 | } 30 | 31 | impl Default for LockNode { 32 | fn default() -> Self { 33 | Self::new() 34 | } 35 | } 36 | 37 | /// An MCS lock. 38 | pub struct MCSLock { 39 | _name: &'static str, 40 | queue: AtomicPtr, 41 | } 42 | 43 | impl MCSLock { 44 | pub const fn new(name: &'static str) -> MCSLock { 45 | MCSLock { _name: name, queue: AtomicPtr::new(ptr::null_mut()) } 46 | } 47 | 48 | pub fn lock<'a>(&self, node: &'a LockNode) -> &'a LockNode { 49 | node.next.store(ptr::null_mut(), Ordering::Release); 50 | node.locked.store(false, Ordering::Release); 51 | let p = node as *const _ as *mut _; 52 | let predecessor = self.queue.swap(p, Ordering::AcqRel); 53 | if !predecessor.is_null() { 54 | let predecessor = unsafe { &*predecessor }; 55 | node.locked.store(true, Ordering::Release); 56 | predecessor.next.store(p, Ordering::Release); 57 | while node.locked.load(Ordering::Acquire) { 58 | hint::spin_loop(); 59 | } 60 | } 61 | node 62 | } 63 | 64 | pub fn unlock(&self, node: &LockNode) { 65 | if node.next.load(Ordering::Acquire).is_null() { 66 | let p = node as *const _ as *mut _; 67 | if self 68 | .queue 69 | .compare_exchange_weak(p, ptr::null_mut(), Ordering::AcqRel, Ordering::Relaxed) 70 | .is_ok() 71 | { 72 | return; 73 | } 74 | while node.next.load(Ordering::Acquire).is_null() { 75 | hint::spin_loop(); 76 | } 77 | } 78 | let next = node.next.load(Ordering::Acquire); 79 | let next = unsafe { &*next }; 80 | next.locked.store(false, Ordering::Release); 81 | } 82 | } 83 | 84 | pub struct Lock { 85 | lock: UnsafeCell, 86 | data: UnsafeCell, 87 | } 88 | 89 | unsafe impl Send for Lock {} 90 | unsafe impl Sync for Lock {} 91 | 92 | impl Lock { 93 | pub const fn new(name: &'static str, data: T) -> Lock { 94 | Lock { lock: UnsafeCell::new(MCSLock::new(name)), data: UnsafeCell::new(data) } 95 | } 96 | 97 | pub fn lock<'a>(&'a self, node: &'a LockNode) -> LockGuard<'a, T> { 98 | let node = unsafe { &mut *self.lock.get() }.lock(node); 99 | LockGuard { lock: &self.lock, node, data: unsafe { &mut *self.data.get() } } 100 | } 101 | } 102 | 103 | pub struct LockGuard<'a, T: ?Sized + 'a> { 104 | lock: &'a UnsafeCell, 105 | node: &'a LockNode, 106 | data: &'a mut T, 107 | } 108 | impl<'a, T> Deref for LockGuard<'a, T> { 109 | type Target = T; 110 | 111 | fn deref(&self) -> &T { 112 | self.data 113 | } 114 | } 115 | 116 | impl<'a, T> DerefMut for LockGuard<'a, T> { 117 | fn deref_mut(&mut self) -> &mut T { 118 | self.data 119 | } 120 | } 121 | 122 | impl<'a, T: ?Sized> Drop for LockGuard<'a, T> { 123 | fn drop(&mut self) { 124 | unsafe { &mut *self.lock.get() }.unlock(self.node); 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /port/src/mem.rs: -------------------------------------------------------------------------------- 1 | use crate::fdt::RegBlock; 2 | use core::{ 3 | fmt, 4 | iter::{Step, StepBy}, 5 | ops::{self, Range}, 6 | }; 7 | 8 | pub const PAGE_SIZE_4K: usize = 4 << 10; 9 | pub const PAGE_SIZE_2M: usize = 2 << 20; 10 | pub const PAGE_SIZE_1G: usize = 1 << 30; 11 | 12 | pub struct VirtRange(pub Range); 13 | 14 | impl VirtRange { 15 | pub fn with_len(start: usize, len: usize) -> Self { 16 | Self(start..start + len) 17 | } 18 | 19 | pub fn offset_addr(&self, offset: usize) -> Option { 20 | let addr = self.0.start + offset; 21 | if self.0.contains(&addr) { 22 | Some(addr) 23 | } else { 24 | None 25 | } 26 | } 27 | 28 | pub fn start(&self) -> usize { 29 | self.0.start 30 | } 31 | 32 | pub fn end(&self) -> usize { 33 | self.0.end 34 | } 35 | } 36 | 37 | impl From<&RegBlock> for VirtRange { 38 | fn from(r: &RegBlock) -> Self { 39 | let start = r.addr as usize; 40 | let end = start + r.len.unwrap_or(0) as usize; 41 | VirtRange(start..end) 42 | } 43 | } 44 | 45 | #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)] 46 | #[repr(transparent)] 47 | pub struct PhysAddr(pub u64); 48 | 49 | impl PhysAddr { 50 | pub const fn new(value: u64) -> Self { 51 | PhysAddr(value) 52 | } 53 | 54 | pub const fn addr(&self) -> u64 { 55 | self.0 56 | } 57 | 58 | pub const fn round_up(&self, step: u64) -> PhysAddr { 59 | assert!(step.is_power_of_two()); 60 | PhysAddr((self.0 + step - 1) & !(step - 1)) 61 | } 62 | 63 | pub const fn round_down(&self, step: u64) -> PhysAddr { 64 | assert!(step.is_power_of_two()); 65 | PhysAddr(self.0 & !(step - 1)) 66 | } 67 | } 68 | 69 | impl ops::Add for PhysAddr { 70 | type Output = PhysAddr; 71 | 72 | fn add(self, offset: u64) -> PhysAddr { 73 | PhysAddr(self.0 + offset) 74 | } 75 | } 76 | 77 | /// Note that this implementation will round down the startpa and round up the endpa 78 | impl Step for PhysAddr { 79 | fn steps_between(&startpa: &Self, &endpa: &Self) -> Option { 80 | if startpa.0 <= endpa.0 { 81 | match endpa.0.checked_sub(startpa.0) { 82 | Some(result) => usize::try_from(result).ok(), 83 | None => None, 84 | } 85 | } else { 86 | None 87 | } 88 | } 89 | 90 | fn forward_checked(startpa: Self, count: usize) -> Option { 91 | startpa.0.checked_add(count as u64).map(PhysAddr) 92 | } 93 | 94 | fn backward_checked(startpa: Self, count: usize) -> Option { 95 | startpa.0.checked_sub(count as u64).map(PhysAddr) 96 | } 97 | } 98 | 99 | impl fmt::Debug for PhysAddr { 100 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 101 | write!(f, "PhysAddr({:#016x})", self.0)?; 102 | Ok(()) 103 | } 104 | } 105 | 106 | pub struct PhysRange(pub Range); 107 | 108 | impl PhysRange { 109 | pub fn new(start: PhysAddr, end: PhysAddr) -> Self { 110 | Self(start..end) 111 | } 112 | 113 | pub fn with_end(start: u64, end: u64) -> Self { 114 | Self(PhysAddr(start)..PhysAddr(end)) 115 | } 116 | 117 | pub fn with_len(start: u64, len: usize) -> Self { 118 | Self(PhysAddr(start)..PhysAddr(start + len as u64)) 119 | } 120 | 121 | #[allow(dead_code)] 122 | pub fn offset_addr(&self, offset: u64) -> Option { 123 | let addr = self.0.start + offset; 124 | if self.0.contains(&addr) { 125 | Some(addr) 126 | } else { 127 | None 128 | } 129 | } 130 | 131 | pub fn start(&self) -> PhysAddr { 132 | self.0.start 133 | } 134 | 135 | pub fn end(&self) -> PhysAddr { 136 | self.0.end 137 | } 138 | 139 | pub fn size(&self) -> usize { 140 | (self.0.end.addr() - self.0.start.addr()) as usize 141 | } 142 | 143 | pub fn step_by_rounded(&self, step_size: usize) -> StepBy> { 144 | let startpa = self.start().round_down(step_size as u64); 145 | let endpa = self.end().round_up(step_size as u64); 146 | (startpa..endpa).step_by(step_size) 147 | } 148 | } 149 | 150 | impl fmt::Display for PhysRange { 151 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 152 | write!(f, "{:#016x}..{:#016x}", self.0.start.addr(), self.0.end.addr()) 153 | } 154 | } 155 | 156 | impl From<&RegBlock> for PhysRange { 157 | fn from(r: &RegBlock) -> Self { 158 | let start = PhysAddr(r.addr); 159 | let end = start + r.len.unwrap_or(0); 160 | PhysRange(start..end) 161 | } 162 | } 163 | 164 | #[cfg(test)] 165 | mod tests { 166 | use super::*; 167 | 168 | #[test] 169 | fn physaddr_step() { 170 | let range = PhysRange(PhysAddr::new(4096)..PhysAddr::new(4096 * 3)); 171 | let pas = range.step_by_rounded(PAGE_SIZE_4K).collect::>(); 172 | assert_eq!(pas, [PhysAddr::new(4096), PhysAddr::new(4096 * 2)]); 173 | } 174 | 175 | #[test] 176 | fn physaddr_step_rounds_up_and_down() { 177 | // Start should round down to 8192 178 | // End should round up to 16384 179 | let range = PhysRange(PhysAddr::new(9000)..PhysAddr::new(5000 * 3)); 180 | let pas = range.step_by_rounded(PAGE_SIZE_4K).collect::>(); 181 | assert_eq!(pas, [PhysAddr::new(4096 * 2), PhysAddr::new(4096 * 3)]); 182 | } 183 | 184 | #[test] 185 | fn physaddr_step_2m() { 186 | let range = 187 | PhysRange(PhysAddr::new(0x3f000000)..PhysAddr::new(0x3f000000 + 4 * 1024 * 1024)); 188 | let pas = range.step_by_rounded(PAGE_SIZE_2M).collect::>(); 189 | assert_eq!(pas, [PhysAddr::new(0x3f000000), PhysAddr::new(0x3f000000 + 2 * 1024 * 1024)]); 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /port/tests/fdt_test.rs: -------------------------------------------------------------------------------- 1 | use port::fdt::{DeviceTree, Range, RangeMapping, RegBlock, TranslatedReg}; 2 | 3 | static TEST1_DTB: &[u8] = include_bytes!("../lib/test/fdt/test1.dtb"); 4 | 5 | #[test] 6 | fn find_by_path() { 7 | let dt = DeviceTree::new(TEST1_DTB).unwrap(); 8 | 9 | // Find the first node. Next token should not be the same node. 10 | let root = dt.find_by_path("/").unwrap(); 11 | assert_eq!(dt.node_name(&root).unwrap(), ""); 12 | 13 | // Misc lookups 14 | let soc = dt.find_by_path("/soc").unwrap(); 15 | assert_eq!(dt.node_name(&soc).unwrap(), "soc"); 16 | 17 | let eth = dt.find_by_path("/reserved-memory/linux,cma").unwrap(); 18 | assert_eq!(dt.node_name(ð).unwrap(), "linux,cma"); 19 | 20 | assert_eq!(dt.find_by_path("/bar"), None); 21 | assert_eq!(dt.find_by_path("/reserved-memory/foo"), None); 22 | } 23 | 24 | #[test] 25 | fn traverse_tree() { 26 | let dt = DeviceTree::new(TEST1_DTB).unwrap(); 27 | 28 | let root = dt.root().unwrap(); 29 | assert_eq!(dt.node_name(&root).unwrap(), ""); 30 | assert_eq!(root.depth(), 0); 31 | 32 | let aliases = dt.children(&root).nth(0).unwrap(); 33 | assert_eq!(dt.node_name(&aliases).unwrap(), "aliases"); 34 | assert_eq!(aliases.depth(), 1); 35 | 36 | let soc = dt.children(&root).nth(4).unwrap(); 37 | assert_eq!(dt.node_name(&soc).unwrap(), "soc"); 38 | assert_eq!(soc.depth(), 1); 39 | let uart = dt.children(&soc).nth(4).unwrap(); 40 | assert_eq!(dt.node_name(&uart).unwrap(), "serial@7e201000"); 41 | assert_eq!(uart.depth(), 2); 42 | 43 | let uart_parent = dt.parent(&uart).unwrap(); 44 | assert_eq!(dt.node_name(&uart_parent).unwrap(), "soc"); 45 | assert_eq!(uart_parent, soc); 46 | } 47 | 48 | #[test] 49 | fn find_compatible() { 50 | let dt = DeviceTree::new(TEST1_DTB).unwrap(); 51 | 52 | // Simple test for compatible where there's only a single match in the string list 53 | let mut dma_iter = dt.find_compatible("shared-dma-pool"); 54 | let dma = dma_iter.next().unwrap(); 55 | assert_eq!(dt.node_name(&dma).unwrap(), "linux,cma"); 56 | assert_eq!(dma.depth(), 2); 57 | assert!(dma_iter.next().is_none()); 58 | 59 | // First, then second matching compatible strings for the same element 60 | assert_eq!( 61 | dt.find_compatible("arm,pl011").flat_map(|n| dt.node_name(&n)).collect::>(), 62 | vec!["serial@7e201000"] 63 | ); 64 | assert_eq!( 65 | dt.find_compatible("arm,primecell").flat_map(|n| dt.node_name(&n)).collect::>(), 66 | vec!["serial@7e201000"] 67 | ); 68 | 69 | // Find multiple matching nodes 70 | assert_eq!( 71 | dt.find_compatible("brcm,bcm2835-sdhci") 72 | .flat_map(|n| dt.node_name(&n)) 73 | .collect::>(), 74 | vec!["mmc@7e300000", "mmcnr@7e300000"] 75 | ); 76 | 77 | // Doesn't find substrings 78 | assert!(dt 79 | .find_compatible("arm") 80 | .flat_map(|n| dt.node_name(&n)) 81 | .collect::>() 82 | .is_empty()); 83 | 84 | // No match 85 | assert!(dt 86 | .find_compatible("xxxx") 87 | .flat_map(|n| dt.node_name(&n)) 88 | .collect::>() 89 | .is_empty()); 90 | } 91 | 92 | #[test] 93 | fn get_cells() { 94 | let dt = DeviceTree::new(TEST1_DTB).unwrap(); 95 | 96 | let node = dt.find_by_path("/reserved-memory").unwrap(); 97 | assert_eq!( 98 | dt.property(&node, "#address-cells").and_then(|p| dt.property_value_as_u32(&p)), 99 | Some(1) 100 | ); 101 | assert_eq!( 102 | dt.property(&node, "#size-cells").and_then(|p| dt.property_value_as_u32(&p)), 103 | Some(1) 104 | ); 105 | 106 | let node = dt.find_by_path("/soc/spi@7e204000").unwrap(); 107 | assert_eq!( 108 | dt.property(&node, "#address-cells").and_then(|p| dt.property_value_as_u32(&p)), 109 | Some(1) 110 | ); 111 | assert_eq!( 112 | dt.property(&node, "#size-cells").and_then(|p| dt.property_value_as_u32(&p)), 113 | Some(0) 114 | ); 115 | } 116 | 117 | #[test] 118 | fn get_reg() { 119 | let dt = DeviceTree::new(TEST1_DTB).unwrap(); 120 | 121 | let uart = dt.find_by_path("/soc/serial@7e201000").unwrap(); 122 | let uart_reg_raw = dt 123 | .property(&uart, "reg") 124 | .map(|p| dt.property_value_as_u32_iter(&p).collect::>()) 125 | .unwrap(); 126 | assert_eq!(uart_reg_raw, vec![0x7e20_1000, 0x200]); 127 | 128 | // Basic case - 1 addr and 1 length 129 | let uart_reg = dt.property_reg_iter(uart).collect::>(); 130 | assert_eq!(uart_reg, vec![RegBlock { addr: 0x7e20_1000, len: Some(0x200) }]); 131 | 132 | // Example with no length 133 | let spidev = dt.find_by_path("/soc/spi@7e204000/spidev@0").unwrap(); 134 | let spidev_reg = dt.property_reg_iter(spidev).collect::>(); 135 | assert_eq!(spidev_reg, vec![RegBlock { addr: 0x0, len: None }]); 136 | 137 | // Example with > 1 reg 138 | let watchdog = dt.find_by_path("/soc/watchdog@7e100000").unwrap(); 139 | let watchdog_reg = dt.property_reg_iter(watchdog).collect::>(); 140 | assert_eq!( 141 | watchdog_reg, 142 | vec![ 143 | RegBlock { addr: 0x7e100000, len: Some(0x114) }, 144 | RegBlock { addr: 0x7e00a000, len: Some(0x24) } 145 | ] 146 | ); 147 | } 148 | 149 | #[test] 150 | fn get_ranges() { 151 | let dt = DeviceTree::new(TEST1_DTB).unwrap(); 152 | 153 | // Get raw reg 154 | let uart = dt.find_by_path("/soc/serial@7e201000").unwrap(); 155 | let uart_reg = dt.property_reg_iter(uart).collect::>(); 156 | assert_eq!(uart_reg, vec![RegBlock { addr: 0x7e20_1000, len: Some(0x200) }]); 157 | 158 | // Get ranges for parent 159 | let soc = dt.parent(&uart).unwrap(); 160 | let soc_ranges = dt.property_range_iter(soc).collect::>(); 161 | assert_eq!( 162 | soc_ranges, 163 | vec![ 164 | Range::Translated(RangeMapping { 165 | child_bus_addr: 0x7e000000, 166 | parent_bus_addr: 0x3f000000, 167 | len: 0x1000000 168 | }), 169 | Range::Translated(RangeMapping { 170 | child_bus_addr: 0x40000000, 171 | parent_bus_addr: 0x40000000, 172 | len: 0x1000 173 | }), 174 | ] 175 | ); 176 | } 177 | 178 | #[test] 179 | fn get_translated_reg() { 180 | let dt = DeviceTree::new(TEST1_DTB).unwrap(); 181 | 182 | // Get translated reg, based on parent ranges 183 | let uart = dt.find_by_path("/soc/serial@7e201000").unwrap(); 184 | let uart_reg = dt.property_translated_reg_iter(uart).collect::>(); 185 | assert_eq!( 186 | uart_reg, 187 | vec![TranslatedReg::Translated(RegBlock { addr: 0x3f20_1000, len: Some(0x200) })] 188 | ); 189 | } 190 | -------------------------------------------------------------------------------- /riscv64/Cargo.toml: -------------------------------------------------------------------------------- 1 | cargo-features = ["per-package-target"] 2 | 3 | [package] 4 | name = "riscv64" 5 | version = "0.1.0" 6 | edition = "2021" 7 | default-target = "riscv64gc-unknown-none-elf" 8 | 9 | [dependencies] 10 | port = { path = "../port" } 11 | 12 | [features] 13 | opensbi = [] 14 | 15 | [lints.rust] 16 | unexpected_cfgs = { level = "warn", check-cfg = ['cfg(platform, values("nezha"))', 'cfg(platform, values("virt"))'] } 17 | -------------------------------------------------------------------------------- /riscv64/README.md: -------------------------------------------------------------------------------- 1 | build setup 2 | 3 | ``` 4 | rustup target add riscv64gc-unknown-none-elf 5 | ``` 6 | 7 | build: 8 | 9 | ``` 10 | cargo xtask build --arch riscv64 11 | ``` 12 | 13 | run: 14 | 15 | ``` 16 | cargo xtask qemu --arch riscv64 17 | ``` 18 | -------------------------------------------------------------------------------- /riscv64/lib/config_default.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "lib/riscv64-unknown-none-elf.json" 3 | buildflags = [ 4 | "-Z", "build-std=core,alloc" 5 | ] 6 | 7 | [link] 8 | arch = 'riscv' 9 | script = 'riscv64/lib/kernel.ld' 10 | load-address = '0x80200000' 11 | 12 | [config] 13 | platform = "virt" 14 | -------------------------------------------------------------------------------- /riscv64/lib/kernel.ld: -------------------------------------------------------------------------------- 1 | OUTPUT_ARCH(${ARCH}) 2 | ENTRY(start) 3 | 4 | SECTIONS { 5 | . = ${LOAD-ADDRESS}; 6 | .text : ALIGN(4096) { 7 | *(.text.entry) 8 | *(.text*) 9 | . = ALIGN(2097152); 10 | PROVIDE(etext = .); 11 | } 12 | 13 | .rodata : ALIGN(4096) { 14 | *(.rodata*) 15 | *(.srodata*) 16 | . = ALIGN(2097152); 17 | PROVIDE(erodata = .); 18 | } 19 | 20 | .data : ALIGN(4096) { 21 | *(.data*) 22 | *(.sdata*) 23 | . = ALIGN(2097152); 24 | PROVIDE(edata = .); 25 | } 26 | 27 | .bss : ALIGN(4096) { 28 | *(.bss*) 29 | *(.sbss*) 30 | *(COMMON) 31 | . = ALIGN(2097152); 32 | PROVIDE(end = .); 33 | } 34 | 35 | /DISCARD/ : { 36 | *(.eh_frame) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /riscv64/src/l.S: -------------------------------------------------------------------------------- 1 | .section .text.entry 2 | .globl start 3 | start: 4 | bnez a0, 1f 5 | la sp, stack // set the stack pointer 6 | li t0, 4096 * 4 7 | add sp, sp, t0 // add stack length 8 | call main9 9 | 1: 10 | wfi 11 | j 1b 12 | 13 | .bss 14 | .balign 4096 15 | stack: .space 4096 * 4 16 | -------------------------------------------------------------------------------- /riscv64/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(alloc_error_handler)] 2 | #![feature(sync_unsafe_cell)] 3 | #![cfg_attr(not(any(test)), no_std)] 4 | #![cfg_attr(not(test), no_main)] 5 | #![allow(clippy::upper_case_acronyms)] 6 | #![forbid(unsafe_op_in_unsafe_fn)] 7 | 8 | mod platform; 9 | mod runtime; 10 | mod sbi; 11 | mod uart16550; 12 | 13 | use port::println; 14 | 15 | use crate::platform::{devcons, platform_init}; 16 | use port::fdt::DeviceTree; 17 | 18 | #[cfg(not(test))] 19 | core::arch::global_asm!(include_str!("l.S")); 20 | 21 | #[no_mangle] 22 | pub extern "C" fn main9(hartid: usize, dtb_ptr: usize) -> ! { 23 | let dt = unsafe { DeviceTree::from_usize(dtb_ptr).unwrap() }; 24 | crate::devcons::init(&dt); 25 | platform_init(); 26 | 27 | println!(); 28 | println!("r9 from the Internet"); 29 | println!("Domain0 Boot HART = {hartid}"); 30 | println!("DTB found at: {dtb_ptr:#x}"); 31 | 32 | #[cfg(not(test))] 33 | sbi::shutdown(); 34 | #[cfg(test)] 35 | loop {} 36 | } 37 | -------------------------------------------------------------------------------- /riscv64/src/platform/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(platform = "nezha")] 2 | pub mod nezha; 3 | #[cfg(platform = "nezha")] 4 | pub use crate::platform::nezha::*; 5 | 6 | #[cfg(any(test, platform = "virt", not(platform = "nezha")))] 7 | pub mod virt; 8 | #[cfg(any(test, platform = "virt", not(platform = "nezha")))] 9 | pub use crate::platform::virt::*; 10 | -------------------------------------------------------------------------------- /riscv64/src/platform/nezha/devcons.rs: -------------------------------------------------------------------------------- 1 | // Racy to start. 2 | 3 | use core::mem::MaybeUninit; 4 | 5 | use crate::uart16550::Uart16550; 6 | use port::{devcons::Console, fdt::DeviceTree}; 7 | 8 | pub fn init(dt: &DeviceTree) { 9 | let uart0_reg = dt 10 | .find_compatible("uart0") 11 | .next() 12 | .and_then(|uart| dt.property_translated_reg_iter(uart).next()) 13 | .and_then(|reg| reg.regblock()) 14 | .unwrap(); 15 | 16 | Console::new(|| { 17 | let mut uart = Uart16550::new(uart0_reg); 18 | uart.init(115_200); 19 | 20 | static mut UART: MaybeUninit = MaybeUninit::uninit(); 21 | 22 | unsafe { 23 | UART.write(uart); 24 | UART.assume_init_mut() 25 | } 26 | }); 27 | } 28 | -------------------------------------------------------------------------------- /riscv64/src/platform/nezha/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod devcons; 2 | 3 | pub fn platform_init() {} 4 | -------------------------------------------------------------------------------- /riscv64/src/platform/virt/devcons.rs: -------------------------------------------------------------------------------- 1 | // Racy to start. 2 | 3 | use core::cell::SyncUnsafeCell; 4 | use core::mem::MaybeUninit; 5 | 6 | use crate::uart16550::Uart16550; 7 | use port::{devcons::Console, fdt::DeviceTree}; 8 | 9 | pub fn init(dt: &DeviceTree) { 10 | let ns16550a_reg = dt 11 | .find_compatible("ns16550a") 12 | .next() 13 | .and_then(|uart| dt.property_translated_reg_iter(uart).next()) 14 | .and_then(|reg| reg.regblock()) 15 | .unwrap(); 16 | 17 | Console::new(|| { 18 | let mut uart = Uart16550::new(ns16550a_reg); 19 | uart.init(115_200); 20 | 21 | static CONS: SyncUnsafeCell> = 22 | SyncUnsafeCell::new(MaybeUninit::uninit()); 23 | unsafe { 24 | let cons = &mut *CONS.get(); 25 | cons.write(uart); 26 | cons.assume_init_mut() 27 | } 28 | }); 29 | } 30 | -------------------------------------------------------------------------------- /riscv64/src/platform/virt/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod devcons; 2 | 3 | pub fn platform_init() {} 4 | -------------------------------------------------------------------------------- /riscv64/src/runtime.rs: -------------------------------------------------------------------------------- 1 | #![cfg(not(test))] 2 | 3 | extern crate alloc; 4 | 5 | use alloc::alloc::Layout; 6 | use core::arch::asm; 7 | use core::panic::PanicInfo; 8 | 9 | use port::{print, println}; 10 | 11 | #[no_mangle] 12 | extern "C" fn eh_personality() {} 13 | 14 | #[panic_handler] 15 | fn panic(info: &PanicInfo) -> ! { 16 | print!("Panic: "); 17 | if let Some(p) = info.location() { 18 | println!("line {}, file {}: {}", p.line(), p.file(), info.message()); 19 | } else { 20 | println!("no information available."); 21 | } 22 | abort(); 23 | } 24 | #[no_mangle] 25 | extern "C" fn abort() -> ! { 26 | loop { 27 | unsafe { 28 | asm!("wfi"); 29 | } 30 | } 31 | } 32 | 33 | #[alloc_error_handler] 34 | fn oom(_layout: Layout) -> ! { 35 | panic!("oom"); 36 | } 37 | -------------------------------------------------------------------------------- /riscv64/src/sbi.rs: -------------------------------------------------------------------------------- 1 | //! SBI interface. 2 | //! 3 | //! Chapter 5: Legacy Extensions 4 | 5 | #![allow(dead_code)] 6 | 7 | const SBI_SET_TIMER: usize = 0; 8 | const SBI_CONSOLE_PUTCHAR: usize = 1; 9 | const SBI_CONSOLE_GETCHAR: usize = 2; 10 | const _SBI_CLEAR_IPI: usize = 3; 11 | const _SBI_SEND_IPI: usize = 4; 12 | const _SBI_REMOTE_FENCE_I: usize = 5; 13 | const _SBI_REMOTE_SFENCE_VMA: usize = 6; 14 | const _SBI_REMOTE_SFENCE_VMA_ASID: usize = 7; 15 | const SBI_SHUTDOWN: usize = 8; 16 | 17 | #[cfg(target_arch = "riscv64")] 18 | fn sbi_call_legacy(eid: usize, arg0: usize, arg1: usize, arg2: usize) -> usize { 19 | let ret; 20 | unsafe { 21 | core::arch::asm!( 22 | "ecall", 23 | inlateout("x10") arg0 => ret, 24 | in("x11") arg1, 25 | in("x12") arg2, 26 | in("x17") eid 27 | ); 28 | } 29 | ret 30 | } 31 | 32 | #[cfg(not(target_arch = "riscv64"))] 33 | fn sbi_call_legacy(_eid: usize, _arg0: usize, _arg1: usize, _arg2: usize) -> usize { 34 | 0 35 | } 36 | 37 | pub fn _set_timer(timer: usize) { 38 | sbi_call_legacy(SBI_SET_TIMER, timer, 0, 0); 39 | } 40 | 41 | #[deprecated = "expected to be deprecated; no replacement"] 42 | pub fn _consputb(c: u8) { 43 | sbi_call_legacy(SBI_CONSOLE_PUTCHAR, c as usize, 0, 0); 44 | } 45 | 46 | #[deprecated = "expected to be deprecated; no replacement"] 47 | pub fn _consgetb() -> u8 { 48 | sbi_call_legacy(SBI_CONSOLE_GETCHAR, 0, 0, 0).try_into().unwrap() 49 | } 50 | 51 | pub fn shutdown() -> ! { 52 | sbi_call_legacy(SBI_SHUTDOWN, 0, 0, 0); 53 | panic!("shutdown failed!"); 54 | } 55 | -------------------------------------------------------------------------------- /riscv64/src/uart16550.rs: -------------------------------------------------------------------------------- 1 | use core::convert::TryInto; 2 | use core::fmt::Error; 3 | use core::fmt::Write; 4 | 5 | use port::devcons::Uart; 6 | use port::fdt::RegBlock; 7 | 8 | pub struct Uart16550 { 9 | pub ns16550a_reg: RegBlock, 10 | } 11 | 12 | impl Write for Uart16550 { 13 | fn write_str(&mut self, out: &str) -> Result<(), Error> { 14 | for c in out.bytes() { 15 | self.put(c); 16 | } 17 | Ok(()) 18 | } 19 | } 20 | 21 | impl Uart for Uart16550 { 22 | fn putb(&self, b: u8) { 23 | let ptr = self.ns16550a_reg.addr as *mut u8; 24 | unsafe { 25 | ptr.add(0).write_volatile(b); 26 | } 27 | } 28 | } 29 | 30 | impl Uart16550 { 31 | pub fn new(ns16550a_reg: RegBlock) -> Self { 32 | Uart16550 { ns16550a_reg } 33 | } 34 | 35 | pub fn init(&mut self, baud: u32) { 36 | let ptr = self.ns16550a_reg.addr as *mut u8; 37 | unsafe { 38 | let lcr = 3; // word length 39 | ptr.add(3).write_volatile(lcr); // set word length 40 | ptr.add(2).write_volatile(1); // enable FIFO 41 | ptr.add(1).write_volatile(1); // enable receiver interrupts 42 | let divisor: u16 = (2_227_900 / (baud * 16)) as u16; // set baud rate 43 | let divisor_least: u8 = (divisor & 0xff).try_into().unwrap(); 44 | let divisor_most: u8 = (divisor >> 8).try_into().unwrap(); 45 | ptr.add(3).write_volatile(lcr | 1 << 7); // access DLAB 46 | ptr.add(0).write_volatile(divisor_least); // DLL 47 | ptr.add(1).write_volatile(divisor_most); // DLM 48 | ptr.add(3).write_volatile(lcr); // close DLAB 49 | } 50 | } 51 | 52 | pub fn put(&mut self, c: u8) { 53 | let ptr = self.ns16550a_reg.addr as *mut u8; 54 | unsafe { 55 | ptr.add(0).write_volatile(c); 56 | } 57 | } 58 | 59 | #[allow(dead_code)] 60 | pub fn get(&mut self) -> Option { 61 | let ptr = self.ns16550a_reg.addr as *mut u8; 62 | unsafe { 63 | if ptr.add(5).read_volatile() & 1 == 0 { 64 | None 65 | } else { 66 | Some(ptr.add(0).read_volatile()) 67 | } 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly-2024-09-27" 3 | components = [ "rustfmt", "rust-src", "clippy", "llvm-tools" ] 4 | targets = [ 5 | "aarch64-unknown-none", 6 | "riscv64gc-unknown-none-elf", 7 | "x86_64-unknown-none" 8 | ] 9 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | use_small_heuristics = "Max" 3 | newline_style = "Unix" 4 | -------------------------------------------------------------------------------- /x86_64/Cargo.toml: -------------------------------------------------------------------------------- 1 | cargo-features = ["per-package-target"] 2 | 3 | [package] 4 | name = "x86_64" 5 | version = "0.1.0" 6 | edition = "2021" 7 | default-target = "x86_64-unknown-none" 8 | 9 | [dependencies] 10 | bitstruct = "0.1" 11 | x86 = "0.52" 12 | port = { path = "../port" } 13 | -------------------------------------------------------------------------------- /x86_64/lib/config_default.toml: -------------------------------------------------------------------------------- 1 | [build] 2 | target = "lib/x86_64-unknown-none-elf.json" 3 | buildflags = [ 4 | "-Z", "build-std=core,alloc" 5 | ] 6 | 7 | [link] 8 | script = 'x86_64/lib/kernel.ld' 9 | load-address = '0xffff800000100000' 10 | 11 | -------------------------------------------------------------------------------- /x86_64/lib/kernel.ld: -------------------------------------------------------------------------------- 1 | /* 2 | * Linker script for R9. 3 | */ 4 | 5 | ENTRY(start) 6 | 7 | SECTIONS { 8 | /* 9 | * start the kernel at 0x0xffff_8000_0020_0000 10 | * This preserves some RAM between 1MiB and the 11 | * start of the kernel for critical structures. 12 | */ 13 | . = ${LOAD-ADDRESS}; 14 | 15 | PROVIDE(boottext = .); 16 | .text.boot : ALIGN(4096) { 17 | *(.boottext .bootdata) 18 | . = ALIGN(4096); 19 | PROVIDE(eboottext = .); 20 | . = ALIGN(2097152); 21 | PROVIDE(esys = .); 22 | } 23 | 24 | PROVIDE(text = .); 25 | .text : ALIGN(4096) { 26 | *(.text* .stub .gnu.linkonce.t.*) 27 | . = ALIGN(2097152); 28 | PROVIDE(etext = .); 29 | } 30 | 31 | .rodata : ALIGN(4096) { 32 | *(.rodata* .gnu.linkonce.r.*) 33 | . = ALIGN(2097152); 34 | PROVIDE(erodata = .); 35 | } 36 | 37 | .data : ALIGN(4096) { 38 | *(.data*) 39 | } 40 | .got : ALIGN(4096) { 41 | *(.got) 42 | } 43 | .got.plt : ALIGN(4096) { 44 | *(.got.plt) 45 | } 46 | PROVIDE(edata = .); 47 | 48 | .bss : ALIGN(4096) { 49 | *(.bss*) 50 | *(COMMON) 51 | . = ALIGN(2097152); 52 | } 53 | PROVIDE(end = .); 54 | 55 | /DISCARD/ : { 56 | *(.eh_frame .note.GNU-stack) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /x86_64/src/dat.rs: -------------------------------------------------------------------------------- 1 | // 2 | -------------------------------------------------------------------------------- /x86_64/src/devcons.rs: -------------------------------------------------------------------------------- 1 | // Racy to start. 2 | 3 | use core::cell::SyncUnsafeCell; 4 | use port::devcons::{Console, Uart}; 5 | 6 | struct Uart16550 { 7 | port: u16, 8 | } 9 | 10 | impl Uart for Uart16550 { 11 | fn putb(&self, b: u8) { 12 | crate::uart16550::putb(self.port, b); 13 | } 14 | } 15 | 16 | pub fn init() { 17 | Console::new(|| { 18 | static CONS: SyncUnsafeCell = SyncUnsafeCell::new(Uart16550 { port: 0x3f8 }); 19 | unsafe { &mut *CONS.get() } 20 | }); 21 | } 22 | -------------------------------------------------------------------------------- /x86_64/src/l.S: -------------------------------------------------------------------------------- 1 | // It gets ugly to try to link this at some low address 2 | // and then have the rest of the kernel linked high; that 3 | // goes doubly for any attempt to load at a random address. 4 | // 5 | // So you have to learn to write position independent 6 | // code here. 7 | // 8 | // It will make you stronger. 9 | // 10 | // Assuming you survive the training. 11 | 12 | // Useful definitions. 13 | .set GdtNULL, (0<<3) 14 | .set GdtCODE64, (1<<3) 15 | .set GdtCODE32, (2<<3) 16 | .set GdtDATA32, (3<<3) 17 | 18 | .set SegREAD, (1<<41) 19 | .set SegWRITE, (1<<42) 20 | .set SegCODE, (1<<43) 21 | .set SegDATA, (0<<43) 22 | .set SegMB1, (1<<44) 23 | .set SegPRESENT, (1<<47) 24 | .set SegLONG, (1<<53) 25 | 26 | .set Seg32DEFAULT, (1<<54) 27 | .set Seg32GRAN, (1<<55) 28 | .set Seg32LIMIT, ((0xF<<48)+0xFFFF) 29 | .set Seg32DEF, (Seg32DEFAULT|Seg32GRAN|Seg32LIMIT) 30 | 31 | .set MULTIBOOT_FLAG_PGALIGN, (1<<0) 32 | .set MULTIBOOT_FLAG_MEMINFO, (1<<1) 33 | .set MULTIBOOT_MAGIC, 0x1BADB002 34 | .set MULTIBOOT_FLAGS, (MULTIBOOT_FLAG_PGALIGN | MULTIBOOT_FLAG_MEMINFO) 35 | .set MULTIBOOT_CHECKSUM, -(MULTIBOOT_MAGIC + MULTIBOOT_FLAGS) 36 | 37 | .set PTSZ, 4096 38 | .set PGSZ, 4096 39 | .set MACHSTKSZ, (8*PGSZ) 40 | 41 | .set KZERO, 0xffff800000000000 42 | .set MiB, (1<<20) 43 | .set KSYS, (KZERO+MiB+PGSZ) 44 | .set KTZERO, (KZERO+2*MiB) 45 | 46 | .set Cr0PE, (1<<0) // Protected Mode Enable 47 | .set Cr0MP, (1<<1) // Monitor Coprocessor 48 | .set Cr0TS, (1<<7) // Task Switched 49 | .set Cr0WP, (1<<16) // Write Protect 50 | .set Cr0NW, (1<<29) // Not Writethrough 51 | .set Cr0CD, (1<<30) // Cache Disable 52 | .set Cr0PG, (1<<31) // Paging Enable 53 | 54 | .set Cr4PSE, (1<<4) // Page-Size Extensions 55 | .set Cr4PAE, (1<<5) // Physical Address Extension 56 | .set Cr4PGE, (1<<7) // Page-Global Enable 57 | 58 | .set IA32_EFER, 0xc0000080 // Extended Feature Enable 59 | 60 | .set EferSCE, (1<<0) // System Call Extension 61 | .set EferLME, (1<<8) // Long Mode Enable 62 | .set EferNXE, (1<<11) // No-Execute Enable 63 | 64 | .set PteP, (1<<0) // Present 65 | .set PteRW, (1<<1) // Read/Write 66 | .set PtePS, (1<<7) // Page Size 67 | 68 | .align 4 69 | .section .boottext, "awx" 70 | multiboot_header: 71 | .long MULTIBOOT_MAGIC 72 | .long MULTIBOOT_FLAGS 73 | .long MULTIBOOT_CHECKSUM 74 | 75 | // When we get here we are in protected mode with a GDT. We set 76 | // up IA32e mode and get into long mode with paging enabled. 77 | .code32 78 | .align 4 79 | .globl start 80 | start: 81 | cli 82 | cld 83 | 84 | // Save the multiboot magic number. 85 | movl %eax, %ebp 86 | 87 | // Make the basic page tables for CPU0 to map 0-4GiB 88 | // physical to KZERO, in addition to an identity map 89 | // for the switch from protected to paged mode. There 90 | // is an assumption here that the creation and later 91 | // removal of the identity map will not interfere with 92 | // the KZERO mappings. 93 | // 94 | // We assume a recent processor with Page Size Extensions 95 | // and use two 2MiB entries. 96 | 97 | // Zero the stack, page tables, vsvm, unused pages, m, sys, etc. 98 | movl $(KSYS-KZERO), %esi 99 | movl $((KTZERO-KSYS)/4), %ecx 100 | xorl %eax, %eax 101 | movl %esi, %edi 102 | rep stosl 103 | 104 | // We could zero the BSS here, but the loader does it for us. 105 | 106 | // Set the stack and find the start of the page tables. 107 | movl %esi, %eax 108 | addl $MACHSTKSZ, %eax 109 | movl %eax, %esp // Give ourselves a stack 110 | 111 | // %eax points to the PML4 that we'll use for double-mapping 112 | // low RAM and KZERO. 113 | movl %eax, %cr3 // load the MMU; paging still disabled 114 | movl %eax, %edx 115 | addl $(2*PTSZ|PteRW|PteP), %edx // EPML3 at IPML4 + 2*PTSZ 116 | movl %edx, (%eax) // IPML4E for identity map 117 | movl %edx, 2048(%eax) // IPML4E for KZERO 118 | 119 | // The next page frame contains a PML4 that removes the double 120 | // mapping, leaving only KZERO mapped. 121 | addl $PTSZ, %eax // EPML4 at IPML4 + PTSZ 122 | movl %edx, 2048(%eax) // EPML4E for EMPL3 at KZERO 123 | 124 | // Fill in the early PML3 (PDPT) to point the early PML2's (PDs) 125 | // that provide the initial 4GiB mapping in the kernel. 126 | addl $PTSZ, %eax // EPML3 at EPML4 + PTSZ 127 | addl $PTSZ, %edx // EPML2[0] at EPML3 + PTSZ 128 | movl %edx, (%eax) // EPML3E for EPML2[0] 129 | addl $PTSZ, %edx // EPML2[1] at EPML2[0] + PTSZ 130 | movl %edx, 8(%eax) // EPML3E for EPML2[1] 131 | addl $PTSZ, %edx // EPML2[2] at EPML2[1] + PTSZ 132 | movl %edx, 16(%eax) // EPML3E for EPML2[2] 133 | addl $PTSZ, %edx // EPML2[3] at EPML2[2] + PTSZ 134 | movl %edx, 24(%eax) // EPML3E for EPML2[3] 135 | 136 | // Map the first 4GiB (the entire 32-bit) address space. 137 | // Note that this requires 16KiB. 138 | // 139 | // The first 2MiB are mapped using 4KiB pages. The first 1MiB 140 | // memory contains holes for MMIO and ROM and other things that 141 | // we want special attributes for. We'll set those in the 142 | // kernel proper, but we provide 4KiB pages here. There is 4KiB 143 | // of RAM for the PT immediately after the PDs. 144 | addl $PTSZ, %eax // PML2[0] at PML3[0] + PTSZ 145 | movl $2048, %ecx // 2048 * 2MiB pages covers 4GiB 146 | movl $(PtePS|PteRW|PteP), %edx // Large page PDEs 147 | 1: movl %edx, (%eax) // PDE for 2MiB pages 148 | addl $8, %eax 149 | addl $(2<<20), %edx 150 | subl $1, %ecx 151 | test %ecx, %ecx 152 | jnz 1b 153 | 154 | // %eax now points to the page after the EPML2s, which is the real 155 | // self-referential PML4. 156 | // Map the first 192 entries for the upper portion of the address 157 | // space to PML3s; this is the primordial root of sharing for the 158 | // kernel. 159 | movl %eax, %edx 160 | addl $(PTSZ|PteRW|PteP), %edx // PML3[0] at PML4 + PTSZ 161 | movl $256, %ecx 162 | 1: movl %edx, (%eax, %ecx, 8) 163 | addl $PTSZ, %edx 164 | incl %ecx 165 | cmp $(256+192), %ecx 166 | jne 1b 167 | 168 | // Enable and activate Long Mode. From the manual: 169 | // make sure Page Size Extentions are off, and Page Global 170 | // Extensions and Physical Address Extensions are on in CR4; 171 | // set Long Mode Enable in the Extended Feature Enable MSR; 172 | // set Paging Enable in CR0; 173 | // make an inter-segment jump to the Long Mode code. 174 | // It`s all in 32-bit mode until the jump is made. 175 | movl %cr4, %eax 176 | andl $~Cr4PSE, %eax // Page Size 177 | orl $(Cr4PGE|Cr4PAE), %eax // Page Global, Phys. Address 178 | movl %eax, %cr4 179 | 180 | movl $IA32_EFER, %ecx // Extended Feature Enable 181 | rdmsr 182 | // Enable long mode, NX bit in PTEs and SYSCALL/SYSREG. 183 | orl $(EferSCE|EferLME|EferNXE), %eax 184 | wrmsr 185 | 186 | movl %cr0, %edx 187 | andl $~(Cr0CD|Cr0NW|Cr0TS|Cr0MP), %edx 188 | orl $(Cr0PG|Cr0WP), %edx // Paging Enable 189 | movl %edx, %cr0 190 | 191 | // Load the 64-bit GDT 192 | movl $(gdtdesc-KZERO), %eax 193 | lgdt (%eax) 194 | 195 | ljmpl $GdtCODE64, $(1f-KZERO) 196 | 197 | .code64 198 | 1: 199 | // Long mode. Welcome to 2003. Jump out of the identity map 200 | // and into the kernel address space. 201 | 202 | // Load a 64-bit GDT in the kernel address space. 203 | movabsq $gdtdescv, %rax 204 | lgdt (%rax) 205 | 206 | // Zero out the segment registers: they are not used in long mode. 207 | xorl %edx, %edx 208 | movw %dx, %ds 209 | movw %dx, %es 210 | movw %dx, %fs 211 | movw %dx, %gs 212 | movw %dx, %ss 213 | 214 | // We can now use linked addresses for the stack and code. 215 | // We'll jump into the kernel from here. 216 | movabsq $KZERO, %rax 217 | addq %rax, %rsp 218 | movabsq $warp64, %rax 219 | jmp *%rax 220 | 221 | .text 222 | .code64 223 | warp64: 224 | // At this point, we are fully in the kernel virtual 225 | // address space and we can discard the identity mapping. 226 | // There is a PML4 sans identity map 4KiB beyond the 227 | // current PML4; load that, which also flushes the TLB. 228 | movq %cr3, %rax 229 | addq $PTSZ, %rax 230 | movq %rax, %cr3 // Also flushes TLB. 231 | 232 | // &sys->mach is the first argument to main() 233 | movabsq $KSYS, %rdi 234 | addq $(MACHSTKSZ+(1+1+1+4+1+192)*PTSZ+PGSZ), %rdi 235 | movq %rbp, %rsi // multiboot magic 236 | movq %rbx, %rdx // multiboot info pointer 237 | 238 | // Push a dummy stack frame and jump to `main`. 239 | pushq $0 240 | movq $0, %rbp 241 | leaq main9(%rip), %rax 242 | push %rax 243 | pushq $2 // clear flags 244 | popfq 245 | ret 246 | ud2 247 | 248 | // no deposit, no return 249 | // do not resuscitate 250 | .globl ndnr 251 | ndnr: 252 | sti 253 | hlt 254 | jmp ndnr 255 | 256 | // Start-up request IPI handler. 257 | // 258 | // This code is executed on an application processor in response 259 | // to receiving a Start-up IPI (SIPI) from another processor. The 260 | // vector given in the SIPI determines the memory address the 261 | // where the AP starts execution. 262 | // 263 | // The AP starts in real-mode, with 264 | // CS selector set to the startup memory address/16; 265 | // CS base set to startup memory address; 266 | // CS limit set to 64KiB; 267 | // CPL and IP set to 0. 268 | // 269 | // This must be placed on a 4KiB boundary, and while it may seem 270 | // like this should be in a text section, it is deliberately not. 271 | // The AP entry code is copied to a page in low memory at APENTRY 272 | // for execution, so as far as the rest of the kernel is concerned 273 | // it is simply read-only data. We put it into .rodata so that it 274 | // is mapped onto a non-executable page and the kernel cannot 275 | // accidentally jump into it once it is running in C code on a 276 | // real page table. 277 | // 278 | // The 16-bit code loads a basic GDT, turns on 32-bit protected 279 | // mode and makes an inter-segment jump to the protected mode code 280 | // right after. 281 | // 282 | // 32-bit code enables long mode and paging, sets a stack and 283 | // jumps to 64-bit mode, which fixes up virtual addresses for 284 | // the stack and PC and jumps into C. 285 | 286 | .set APENTRY, 0x3000 287 | .set APPERCPU, (0x4000-8) 288 | 289 | .section .rodata 290 | 291 | .globl b1978, e1978 292 | .code16 293 | .align 4096 294 | b1978: 295 | // We start here in real mode. Welcome to 1978. 296 | cli 297 | cld 298 | 299 | lgdtl (APENTRY+(apgdtdesc-b1978)) 300 | 301 | movl %cr0, %eax 302 | orl $Cr0PE, %eax 303 | movl %eax, %cr0 304 | 305 | ljmpl $GdtCODE32, $(b1982-KZERO) 306 | 307 | .align 16 308 | gdt: 309 | // 0: Null segment 310 | .quad 0 311 | // 8: Kernel 64-bit code segment 312 | .quad (SegREAD|SegCODE|SegMB1|SegPRESENT|SegLONG) 313 | // 16: Kernel 32-bit code segment (for bootstrapping APs) 314 | .quad (SegREAD|SegCODE|SegMB1|SegPRESENT|Seg32DEF) 315 | // 24: Kernel 32-bit data segment (for bootstrapping APs) 316 | .quad (SegREAD|SegWRITE|SegMB1|SegPRESENT|Seg32DEF) 317 | egdt: 318 | 319 | .skip 6 320 | apgdtdesc: 321 | .word egdt - gdt - 1 322 | .long (APENTRY+gdt-b1978) 323 | 324 | e1978: 325 | 326 | .text 327 | .code32 328 | b1982: 329 | // Protected mode. Welcome to 1982. 330 | movw $GdtDATA32, %ax 331 | movw %ax, %ds 332 | movw %ax, %es 333 | movw %ax, %fs 334 | movw %ax, %gs 335 | movw %ax, %ss 336 | 337 | // load the PML4 with the shared page table address; 338 | // make an identity map for the inter-segment jump below, 339 | // using the stack space to hold a temporary PDP and PD; 340 | // enable and activate long mode; 341 | // make an inter-segment jump to the long mode code. 342 | movl $(KSYS-KZERO+MACHSTKSZ), %eax // Page table 343 | movl %eax, %cr3 // load the mmu 344 | 345 | // Enable and activate Long Mode. 346 | movl %cr4, %eax 347 | andl $~Cr4PSE, %eax // Page Size 348 | orl $(Cr4PGE|Cr4PAE), %eax // Page Global, Phys. Address 349 | movl %eax, %cr4 350 | 351 | movl $IA32_EFER, %ecx // Extended Feature Enable 352 | rdmsr 353 | orl $(EferSCE|EferLME|EferNXE), %eax 354 | wrmsr // Long Mode Enable 355 | 356 | movl %cr0, %edx 357 | andl $~(Cr0CD|Cr0NW|Cr0TS|Cr0MP), %edx 358 | orl $(Cr0PG|Cr0WP), %edx // Paging Enable 359 | movl %edx, %cr0 360 | 361 | ljmp $GdtCODE64, $(1f-KZERO) 362 | 363 | .code64 364 | 1: 365 | movq APPERCPU, %rdi 366 | addq $MACHSTKSZ, %rdi 367 | movq %rdi, %rsp // set stack 368 | addq $(PTSZ+PGSZ), %rdi // Mach * 369 | 370 | movabsq $apwarp64, %rax 371 | pushq %rax 372 | ret 373 | ud2 374 | 375 | apwarp64: 376 | movabsq $gdtdescv, %rax 377 | lgdt (%rax) 378 | 379 | xorl %edx, %edx 380 | movw %dx, %ds 381 | movw %dx, %es 382 | movw %dx, %fs 383 | movw %dx, %gs 384 | movw %dx, %ss 385 | 386 | movq %cr3, %rax 387 | addq $(7*PTSZ), %rax 388 | movq %rax, %cr3 // flush TLB 389 | 390 | pushq $0 391 | movq $0, %rbp 392 | movq 8(%rdi), %rax // m->splpc 393 | pushq %rax 394 | pushq $2 // Clear flags 395 | popfq 396 | ret // Call squidboy 397 | ud2 398 | 399 | .section .rodata 400 | 401 | .align 16 402 | .skip 6 403 | gdtdesc: 404 | .word egdt - gdt - 1 405 | .long (gdt - KZERO) 406 | 407 | .align 16 408 | .skip 6 409 | gdtdescv: 410 | .word egdt - gdt - 1 411 | .quad gdt 412 | -------------------------------------------------------------------------------- /x86_64/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(alloc_error_handler)] 2 | #![feature(naked_functions)] 3 | #![feature(sync_unsafe_cell)] 4 | #![cfg_attr(not(any(test)), no_std)] 5 | #![cfg_attr(not(test), no_main)] 6 | #![allow(clippy::upper_case_acronyms)] 7 | #![forbid(unsafe_op_in_unsafe_fn)] 8 | 9 | mod dat; 10 | mod devcons; 11 | mod pio; 12 | mod proc; 13 | mod uart16550; 14 | 15 | use proc::{swtch, Label}; 16 | 17 | #[cfg(not(test))] 18 | core::arch::global_asm!(include_str!("l.S"), options(att_syntax)); 19 | 20 | use port::println; 21 | 22 | static mut THRSTACK: [u64; 1024] = [0; 1024]; 23 | static mut CTX: u64 = 0; 24 | static mut THR: u64 = 0; 25 | 26 | fn jumpback() { 27 | println!("in a thread"); 28 | unsafe { 29 | let thr = &mut *(THR as *mut Label); 30 | let ctx = &mut *(CTX as *mut Label); 31 | swtch(thr, ctx); 32 | } 33 | } 34 | 35 | #[no_mangle] 36 | pub extern "C" fn main9() { 37 | devcons::init(); 38 | println!(); 39 | println!("r9 from the Internet"); 40 | println!("looping now"); 41 | let mut ctx = Label::new(); 42 | let mut thr = Label::new(); 43 | thr.pc = jumpback as usize as u64; 44 | unsafe { 45 | thr.sp = &mut THRSTACK[1023] as *mut _ as u64; 46 | CTX = &mut ctx as *mut _ as u64; 47 | THR = &mut thr as *mut _ as u64; 48 | swtch(&mut ctx, &mut thr); 49 | } 50 | println!("came out the other side of a context switch"); 51 | #[allow(clippy::empty_loop)] 52 | loop {} 53 | } 54 | 55 | mod runtime; 56 | -------------------------------------------------------------------------------- /x86_64/src/pio.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused_variables, dead_code)] 2 | 3 | pub unsafe fn outb(port: u16, b: u8) { 4 | #[cfg(not(test))] 5 | unsafe { 6 | core::arch::asm!("outb %al, %dx", in("dx") port, in("al") b, options(att_syntax)); 7 | } 8 | } 9 | 10 | pub unsafe fn outw(port: u16, w: u16) { 11 | #[cfg(not(test))] 12 | unsafe { 13 | core::arch::asm!("outw %ax, %dx", in("dx") port, in("ax") w, options(att_syntax)); 14 | } 15 | } 16 | 17 | pub unsafe fn outl(port: u16, l: u32) { 18 | #[cfg(not(test))] 19 | unsafe { 20 | core::arch::asm!("outl %eax, %dx", in("dx") port, in("ax") l, options(att_syntax)); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /x86_64/src/proc.rs: -------------------------------------------------------------------------------- 1 | use core::arch::asm; 2 | 3 | #[repr(C)] 4 | pub struct Label { 5 | pub pc: u64, 6 | pub sp: u64, 7 | pub fp: u64, 8 | rbx: u64, 9 | r12: u64, 10 | r13: u64, 11 | r14: u64, 12 | r15: u64, 13 | } 14 | 15 | impl Label { 16 | pub const fn new() -> Label { 17 | Label { pc: 0, sp: 0, fp: 0, rbx: 0, r12: 0, r13: 0, r14: 0, r15: 0 } 18 | } 19 | } 20 | 21 | #[naked] 22 | pub unsafe extern "C" fn swtch(save: &mut Label, next: &mut Label) { 23 | unsafe { 24 | asm!( 25 | r#" 26 | movq (%rsp), %rax 27 | movq %rax, 0(%rdi) 28 | movq %rsp, 8(%rdi) 29 | movq %rbp, 16(%rdi) 30 | movq %rbx, 24(%rdi) 31 | movq %r12, 32(%rdi) 32 | movq %r13, 40(%rdi) 33 | movq %r14, 48(%rdi) 34 | movq %r15, 56(%rdi) 35 | 36 | movq 0(%rsi), %rax 37 | movq 8(%rsi), %rsp 38 | movq 16(%rsi), %rbp 39 | movq 24(%rsi), %rbx 40 | movq 32(%rsi), %r12 41 | movq 40(%rsi), %r13 42 | movq 48(%rsi), %r14 43 | movq 56(%rsi), %r15 44 | movq %rax, (%rsp) 45 | xorl %eax, %eax 46 | ret"#, 47 | options(att_syntax, noreturn) 48 | ); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /x86_64/src/runtime.rs: -------------------------------------------------------------------------------- 1 | #![cfg(not(test))] 2 | 3 | extern crate alloc; 4 | 5 | use alloc::alloc::Layout; 6 | use core::panic::PanicInfo; 7 | 8 | #[panic_handler] 9 | pub fn panic(_info: &PanicInfo) -> ! { 10 | #[allow(clippy::empty_loop)] 11 | loop {} 12 | } 13 | 14 | #[alloc_error_handler] 15 | fn oom(_layout: Layout) -> ! { 16 | panic!("oom"); 17 | } 18 | -------------------------------------------------------------------------------- /x86_64/src/uart16550.rs: -------------------------------------------------------------------------------- 1 | //! Simple UART driver to get started. 2 | 3 | pub fn putb(port: u16, b: u8) { 4 | unsafe { 5 | crate::pio::outb(port, b); 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /xtask/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "atty" 7 | version = "0.2.14" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" 10 | dependencies = [ 11 | "hermit-abi", 12 | "libc", 13 | "winapi", 14 | ] 15 | 16 | [[package]] 17 | name = "autocfg" 18 | version = "1.1.0" 19 | source = "registry+https://github.com/rust-lang/crates.io-index" 20 | checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" 21 | 22 | [[package]] 23 | name = "bitflags" 24 | version = "1.3.2" 25 | source = "registry+https://github.com/rust-lang/crates.io-index" 26 | checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" 27 | 28 | [[package]] 29 | name = "clap" 30 | version = "3.1.18" 31 | source = "registry+https://github.com/rust-lang/crates.io-index" 32 | checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" 33 | dependencies = [ 34 | "atty", 35 | "bitflags", 36 | "clap_lex", 37 | "indexmap", 38 | "strsim", 39 | "termcolor", 40 | "textwrap", 41 | ] 42 | 43 | [[package]] 44 | name = "clap_lex" 45 | version = "0.2.0" 46 | source = "registry+https://github.com/rust-lang/crates.io-index" 47 | checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" 48 | dependencies = [ 49 | "os_str_bytes", 50 | ] 51 | 52 | [[package]] 53 | name = "hashbrown" 54 | version = "0.11.2" 55 | source = "registry+https://github.com/rust-lang/crates.io-index" 56 | checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" 57 | 58 | [[package]] 59 | name = "hermit-abi" 60 | version = "0.1.19" 61 | source = "registry+https://github.com/rust-lang/crates.io-index" 62 | checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" 63 | dependencies = [ 64 | "libc", 65 | ] 66 | 67 | [[package]] 68 | name = "indexmap" 69 | version = "1.8.1" 70 | source = "registry+https://github.com/rust-lang/crates.io-index" 71 | checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" 72 | dependencies = [ 73 | "autocfg", 74 | "hashbrown", 75 | ] 76 | 77 | [[package]] 78 | name = "libc" 79 | version = "0.2.126" 80 | source = "registry+https://github.com/rust-lang/crates.io-index" 81 | checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" 82 | 83 | [[package]] 84 | name = "os_str_bytes" 85 | version = "6.1.0" 86 | source = "registry+https://github.com/rust-lang/crates.io-index" 87 | checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" 88 | 89 | [[package]] 90 | name = "strsim" 91 | version = "0.10.0" 92 | source = "registry+https://github.com/rust-lang/crates.io-index" 93 | checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" 94 | 95 | [[package]] 96 | name = "termcolor" 97 | version = "1.1.3" 98 | source = "registry+https://github.com/rust-lang/crates.io-index" 99 | checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" 100 | dependencies = [ 101 | "winapi-util", 102 | ] 103 | 104 | [[package]] 105 | name = "textwrap" 106 | version = "0.15.0" 107 | source = "registry+https://github.com/rust-lang/crates.io-index" 108 | checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" 109 | 110 | [[package]] 111 | name = "winapi" 112 | version = "0.3.9" 113 | source = "registry+https://github.com/rust-lang/crates.io-index" 114 | checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" 115 | dependencies = [ 116 | "winapi-i686-pc-windows-gnu", 117 | "winapi-x86_64-pc-windows-gnu", 118 | ] 119 | 120 | [[package]] 121 | name = "winapi-i686-pc-windows-gnu" 122 | version = "0.4.0" 123 | source = "registry+https://github.com/rust-lang/crates.io-index" 124 | checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 125 | 126 | [[package]] 127 | name = "winapi-util" 128 | version = "0.1.5" 129 | source = "registry+https://github.com/rust-lang/crates.io-index" 130 | checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" 131 | dependencies = [ 132 | "winapi", 133 | ] 134 | 135 | [[package]] 136 | name = "winapi-x86_64-pc-windows-gnu" 137 | version = "0.4.0" 138 | source = "registry+https://github.com/rust-lang/crates.io-index" 139 | checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 140 | 141 | [[package]] 142 | name = "xtask" 143 | version = "0.1.0" 144 | dependencies = [ 145 | "clap", 146 | ] 147 | -------------------------------------------------------------------------------- /xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xtask" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | clap = { version = "4.5", features = ["derive"] } 10 | serde = { version = "1.0", features = ["derive"] } 11 | target-lexicon = { version = "0.12" } 12 | toml = "0.8" 13 | -------------------------------------------------------------------------------- /xtask/doc/config_example.toml: -------------------------------------------------------------------------------- 1 | # This is an example configuration ! 2 | 3 | [build] 4 | target = "lib/riscv64-unknown-none-elf.json" 5 | buildflags = [ 6 | "-Z", "build-std=core,alloc" 7 | ] 8 | 9 | [link] 10 | # arch to insert into kernel.ld 11 | arch = 'riscv' 12 | 13 | # linker script to use 14 | script = 'riscv64/lib/kernel.ld' 15 | 16 | # kernel load address to insert into kernel.ld 17 | load-address = '0x80200000' 18 | 19 | [config] 20 | # platform/board possible values: empty, raspi3b, vfive2, nezha, virt etc. 21 | # example usage => #[cfg(platform = "virt")] 22 | platform = "nezha" 23 | 24 | # a list of features for the target 25 | features = ["mmu_sv39"] 26 | 27 | # 28 | # currently available configuration sections are dev, ip, link, nodev, nouart 29 | # the section name is always the prefix for the configuration 30 | # 31 | # example usage for section "dev" 32 | # 33 | # dev = [ 34 | # 'arch', 35 | # 'cap', 36 | # 'foo="baz"' 37 | # ] 38 | # 39 | # this will create a the following configuration options 40 | # dev_arch", dev_cap and dev_foo="baz" 41 | # 42 | # #[cfg(dev_arch)] 43 | # #[cfg(dev_foo="baz")] 44 | 45 | dev = [ 46 | 'arch', 47 | 'cap', 48 | 'cons', 49 | 'dup', 50 | 'env', 51 | 'ip', 52 | 'mnt', 53 | 'mntn', 54 | 'pipe', 55 | 'proc', 56 | 'regress', 57 | 'root', 58 | 'segment', 59 | 'srv', 60 | 'uart', 61 | 'ws' 62 | ] 63 | 64 | ip = [ 65 | 'tcp', 66 | 'udp', 67 | 'ipifc', 68 | 'icmp', 69 | 'icmp6', 70 | 'gre' 71 | ] 72 | 73 | link = [ 74 | 'ethermedium', 75 | 'loopbackmedium', 76 | 'netdevmedium' 77 | ] 78 | 79 | nodev = [ 80 | 'kbin', 81 | 'kprof' 82 | ] 83 | 84 | nouart = [ 85 | 'pci' 86 | ] 87 | -------------------------------------------------------------------------------- /xtask/src/config.rs: -------------------------------------------------------------------------------- 1 | /// Test 2 | /// 3 | use crate::{Command, Profile}; 4 | 5 | use serde::{Deserialize, Serialize}; 6 | use std::{ 7 | collections::HashMap, 8 | fs::{self, create_dir_all, File}, 9 | io::Write, 10 | process::exit, 11 | }; 12 | 13 | /// build section 14 | #[derive(Debug, Serialize, Deserialize)] 15 | pub struct Build { 16 | /// The buildflags controls build-time operations and compiler settings. 17 | pub buildflags: Option>, 18 | 19 | /// A list of custom flags to pass to all compiler invocations that Cargo performs. 20 | pub rustflags: Option>, 21 | 22 | /// Build for the given architecture. 23 | pub target: String, 24 | } 25 | 26 | /// Config section 27 | /// currently available configuration sections are dev, ip, link, nodev, nouart 28 | /// the section name is becomes the prefix for the configuration option 29 | /// example usage for section "dev" 30 | /// ```toml 31 | /// dev = [ 32 | /// 'arch', 33 | /// 'cap', 34 | /// 'foo="baz"' 35 | /// ] 36 | /// ``` 37 | /// this will create the following configuration options 38 | /// dev_arch, dev_cap and dev_foo="baz" 39 | /// 40 | /// usage example: 41 | /// ```rust 42 | /// #[cfg(dev_arch)] 43 | /// pub mod devarch; 44 | /// ``` 45 | /// ```rust 46 | /// #[cfg(dev_foo = "baz")] 47 | /// pub mod foobaz; 48 | /// ``` 49 | #[derive(Debug, Serialize, Deserialize)] 50 | pub struct Config { 51 | pub dev: Option>, 52 | pub features: Option>, 53 | pub ip: Option>, 54 | pub link: Option>, 55 | pub nodev: Option>, 56 | pub nouart: Option>, 57 | 58 | /// platform/board possible values: empty, raspi3b, vfive2, nezha, virt etc. 59 | /// 60 | /// example usage 61 | /// ´´´rust 62 | /// #[cfg(platform = "virt")] 63 | /// pub mod virt; 64 | /// ``` 65 | pub platform: Option, 66 | 67 | /// Filepath of DTB file relative to crate 68 | pub dtb: Option, 69 | } 70 | 71 | /// Qemu section 72 | /// Affects arguments to be passed to qemu - doesn't affect build artefacts. 73 | #[derive(Debug, Serialize, Deserialize)] 74 | pub struct Qemu { 75 | /// Machine (`-M`) value for qemu: raspi3b, raspi4b, etc. 76 | pub machine: Option, 77 | 78 | /// Filepath of DTB file relative to crate 79 | pub dtb: Option, 80 | } 81 | 82 | /// the TOML document 83 | #[derive(Debug, Serialize, Deserialize)] 84 | pub struct Configuration { 85 | pub build: Option, 86 | pub config: Option, 87 | pub link: Option>, 88 | pub qemu: Option, 89 | } 90 | 91 | impl Configuration { 92 | pub fn load(filename: String) -> Self { 93 | let contents = match fs::read_to_string(filename.clone()) { 94 | Ok(c) => c, 95 | Err(_) => { 96 | eprintln!("Could not read file `{filename}`"); 97 | exit(1); 98 | } 99 | }; 100 | let config: Configuration = match toml::from_str(&contents) { 101 | Ok(d) => d, 102 | Err(e) => { 103 | eprintln!("TOML: Unable to load data from `{}`", filename); 104 | eprintln!("{e}"); 105 | exit(1); 106 | } 107 | }; 108 | config 109 | } 110 | } 111 | 112 | fn apply_build(cmd: &mut Command, rustflags: &mut Vec, config: &Configuration) { 113 | if let Some(config) = &config.build { 114 | let target = &config.target; 115 | cmd.arg("--target").arg(target); 116 | 117 | if let Some(flags) = &config.buildflags { 118 | // add the buildflags to the command 119 | for f in flags { 120 | cmd.arg(f); 121 | } 122 | } 123 | 124 | if let Some(flags) = &config.rustflags { 125 | // store the passed rustflags temporarily 126 | for f in flags { 127 | rustflags.push(f.to_string()); 128 | } 129 | } 130 | } 131 | } 132 | 133 | fn apply_platform_config(cmd: &mut Command, rustflags: &mut Vec, config: &Configuration) { 134 | if let Some(config) = &config.config { 135 | // if the target will use features make them available 136 | if let Some(features) = &config.features { 137 | let mut joined = features.join(","); 138 | if !features.is_empty() && joined.is_empty() { 139 | joined = features.first().unwrap().into(); 140 | } 141 | cmd.arg(format!("--features={joined}")); 142 | } 143 | 144 | if let Some(platform) = &config.platform { 145 | rustflags.push("--cfg".into()); 146 | rustflags.push(format!("platform=\"{}\"", platform)); 147 | } 148 | 149 | if let Some(devices) = &config.dev { 150 | // get all [config] 'dev' settings 151 | for dev in devices { 152 | rustflags.push("--cfg".into()); 153 | 154 | // prefix the setting 155 | rustflags.push(format!("dev_{dev}")); 156 | } 157 | } 158 | 159 | if let Some(ips) = &config.ip { 160 | // get all [config] 'ip' settings 161 | for ip in ips { 162 | rustflags.push("--cfg".into()); 163 | 164 | // prefix the setting 165 | rustflags.push(format!("ip_{ip}")); 166 | } 167 | } 168 | if let Some(links) = &config.link { 169 | // get all [config] 'link' settings 170 | for link in links { 171 | rustflags.push("--cfg".into()); 172 | 173 | // prefix the setting 174 | rustflags.push(format!("link_{link}")); 175 | } 176 | } 177 | 178 | if let Some(nodevs) = &config.nodev { 179 | // get all [config] 'nodev' settings 180 | for nodev in nodevs { 181 | rustflags.push("--cfg".into()); 182 | 183 | // prefix the setting 184 | rustflags.push(format!("nodev_{nodev}")); 185 | } 186 | } 187 | 188 | if let Some(nouarts) = &config.nouart { 189 | // get all [config] 'nodev' settings 190 | for nouart in nouarts { 191 | rustflags.push("--cfg".into()); 192 | 193 | // prefix the setting 194 | rustflags.push(format!("nouart_{nouart}")); 195 | } 196 | } 197 | } 198 | } 199 | 200 | fn apply_link( 201 | rustflags: &mut Vec, 202 | config: &Configuration, 203 | target: &str, 204 | profile: &Profile, 205 | workspace_path: &str, 206 | ) { 207 | // we don't need to handle the linker script for clippy 208 | if let Some(link) = &config.link { 209 | let filename = link["script"].clone(); 210 | 211 | // do we have a linker script ? 212 | if !filename.is_empty() { 213 | let mut contents = match fs::read_to_string(format!("{}/{}", workspace_path, filename)) 214 | { 215 | Ok(c) => c, 216 | Err(_) => { 217 | eprintln!("Could not read file `{filename}`"); 218 | exit(1); 219 | } 220 | }; 221 | 222 | // replace the placeholders with the values from the TOML 223 | if let Some(link) = &config.link { 224 | for l in link.iter() { 225 | match l.0.as_str() { 226 | "arch" => contents = contents.replace("${ARCH}", l.1), 227 | "load-address" => contents = contents.replace("${LOAD-ADDRESS}", l.1), 228 | "script" => {} // do nothing for the script option 229 | _ => eprintln!("ignoring unknown option '{} = {}'", l.0, l.1), 230 | } 231 | } 232 | } 233 | 234 | // construct the path to the target directory 235 | let path = format!( 236 | "{}/target/{}/{}", 237 | workspace_path, 238 | target, 239 | profile.to_string().to_lowercase() 240 | ); 241 | 242 | // make sure the target directory exists 243 | if !std::path::Path::new(&path).exists() { 244 | // if not, create it 245 | let _ = create_dir_all(&path); 246 | } 247 | 248 | // everything is setup, now create the linker script 249 | // in the target directory 250 | let mut file = File::create(format!("{}/kernel.ld", path)).unwrap(); 251 | let _ = file.write_all(contents.as_bytes()); 252 | 253 | // pass the script path to the rustflags 254 | rustflags.push(format!("-Clink-args=-T{}/kernel.ld", path)); 255 | } 256 | } 257 | } 258 | 259 | fn apply_qemu_config(cmd: &mut Command, config: &Configuration) { 260 | if let Some(config) = &config.qemu { 261 | if let Some(machine) = &config.machine { 262 | cmd.arg("-M"); 263 | cmd.arg(machine); 264 | } 265 | if let Some(dtb) = &config.dtb { 266 | cmd.arg("-dtb"); 267 | cmd.arg(dtb); 268 | } 269 | } 270 | } 271 | 272 | fn apply_rustflags(cmd: &mut Command, rustflags: &[String]) { 273 | // pass the collected rustflags 274 | // !! this overrides the build.rustflags from the target Cargo.toml !! 275 | if !rustflags.is_empty() { 276 | let flat = rustflags.join(" "); 277 | cmd.arg("--config"); 278 | cmd.arg(format!("build.rustflags='{}'", flat)); 279 | } 280 | } 281 | 282 | pub fn apply_to_clippy_step(cmd: &mut Command, config: &Configuration) { 283 | let mut rustflags: Vec = Vec::new(); 284 | apply_platform_config(cmd, &mut rustflags, config); 285 | apply_rustflags(cmd, &rustflags); 286 | } 287 | 288 | pub fn apply_to_build_step( 289 | cmd: &mut Command, 290 | config: &Configuration, 291 | target: &str, 292 | profile: &Profile, 293 | workspace_path: &str, 294 | ) { 295 | let mut rustflags: Vec = Vec::new(); 296 | apply_build(cmd, &mut rustflags, config); 297 | apply_platform_config(cmd, &mut rustflags, config); 298 | apply_link(&mut rustflags, config, target, profile, workspace_path); 299 | apply_rustflags(cmd, &rustflags); 300 | } 301 | 302 | pub fn apply_to_qemu_step(cmd: &mut Command, config: &Configuration) { 303 | apply_qemu_config(cmd, config); 304 | } 305 | --------------------------------------------------------------------------------