├── .dockerignore ├── .github └── workflows │ └── docker-build.yml ├── .gitignore ├── .gitmodules ├── CNAME ├── Dockerfile ├── LICENSE ├── README.md ├── _config.yml ├── boot ├── banners │ ├── riscv.txt │ └── welcome.txt └── buildroot │ ├── riscv64-linux-busybox-asciiinvaders.config │ ├── riscv64-linux-busybox-micropython.config │ └── riscv64-linux-busybox.config ├── docker └── entrypoint.py ├── docs ├── conduct.md ├── logo.png ├── running.md └── security.md ├── justfile ├── manifest.toml ├── rustfmt.toml └── src ├── hypervisor ├── .cargo │ └── config ├── Cargo.toml ├── mason.toml └── src │ ├── capsule.rs │ ├── debug.rs │ ├── error.rs │ ├── hardware.rs │ ├── heap.rs │ ├── irq.rs │ ├── loader.rs │ ├── lock.rs │ ├── main.rs │ ├── manifest.rs │ ├── message.rs │ ├── panic.rs │ ├── pcore.rs │ ├── physmem.rs │ ├── scheduler.rs │ ├── service.rs │ ├── vcore.rs │ └── virtmem.rs └── services ├── .cargo └── config ├── Cargo.toml └── mason.toml /.dockerignore: -------------------------------------------------------------------------------- 1 | # streamline the containerization process by leaving out files not needed to build 2 | target 3 | Dockerfile 4 | .dockerignore 5 | .github 6 | *.md 7 | *.png 8 | CNAME 9 | _config.yml -------------------------------------------------------------------------------- /.github/workflows/docker-build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | 11 | build: 12 | 13 | runs-on: ubuntu-20.04 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Build Diosix-on-Qemu Docker image 18 | run: docker build --tag diosix:latest . 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # stop this sort of cruft ending up in the repo 2 | *~ 3 | *.swp 4 | *.swo 5 | *.o 6 | *.a 7 | *.bin 8 | *.iso 9 | *.out 10 | *.elf 11 | *.img 12 | .DS_Store 13 | 14 | # ignore compiler output directories 15 | target/ 16 | 17 | # ignore guest OS binaries 18 | boot/guests/ 19 | 20 | # ignore rustfmt stuff 21 | **/*.rs.bk 22 | 23 | # ignore IDE stuff for now 24 | .vscode* 25 | .vscode/* 26 | 27 | # ignore Cargo metadata 28 | Cargo.lock -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "src/services/src/supervisor-riscv"] 2 | path = src/services/src/supervisor-riscv 3 | url = https://github.com/diodesign/supervisor-riscv.git 4 | [submodule "src/mason"] 5 | path = src/mason 6 | url = https://github.com/diodesign/mason.git 7 | [submodule "src/mkdmfs"] 8 | path = src/mkdmfs 9 | url = https://github.com/diodesign/mkdmfs 10 | [submodule "src/hypervisor/src/byterider"] 11 | path = src/hypervisor/src/byterider 12 | url = https://github.com/diodesign/byterider.git 13 | [submodule "src/hypervisor/src/devicetree"] 14 | path = src/hypervisor/src/devicetree 15 | url = https://github.com/diodesign/devicetree.git 16 | [submodule "src/hypervisor/src/platform-riscv"] 17 | path = src/hypervisor/src/platform-riscv 18 | url = https://github.com/diodesign/platform-riscv.git 19 | [submodule "src/services/src/gooey"] 20 | path = src/services/src/gooey 21 | url = https://github.com/diodesign/gooey.git 22 | -------------------------------------------------------------------------------- /CNAME: -------------------------------------------------------------------------------- 1 | diosix.org -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Containerized environment for building, running, and testing Diosix 3 | # This container targets RV64GC only 4 | # 5 | # Author: Chris Williams 6 | # 7 | 8 | # Establish base OS 9 | FROM debian:testing 10 | 11 | # Bring in the necessary tools 12 | RUN apt update && apt upgrade -y 13 | RUN apt -y install python3 python3-flask build-essential pkg-config git curl binutils-riscv64-linux-gnu qemu-system-misc libssl-dev 14 | 15 | # Bring in the environment runtime script 16 | COPY ./docker/entrypoint.py / 17 | 18 | # Bring in the project source code 19 | COPY . /diosix 20 | 21 | # Define where we'll work 22 | WORKDIR /diosix 23 | 24 | # Install necessary bits and pieces of Rust and just, pull in the submodules, and then build diosix 25 | RUN curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain nightly -y \ 26 | && . $HOME/.cargo/env \ 27 | && cargo install --force just \ 28 | && git submodule update --init --recursive \ 29 | && just build 30 | 31 | # Define the environment in which we'll run commands 32 | ENTRYPOINT [ "/entrypoint.py" ] 33 | 34 | # Default run command: boot the hypervisor as normal. Use 'just test' to run unit tests or 'just build' to test it builds 35 | CMD [ "just" ] 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018-2021 Chris Williams 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build](https://github.com/diodesign/diosix/workflows/Build/badge.svg)](https://github.com/diodesign/diosix/actions?query=workflow%3A%22Build%22) [![License: MIT](https://img.shields.io/github/license/diodesign/diosix)](https://github.com/diodesign/diosix/blob/main/LICENSE) [![Language: Rust](https://img.shields.io/badge/language-rust-yellow.svg)](https://www.rust-lang.org/) [![Platform: riscv64](https://img.shields.io/badge/platform-riscv64-lightblue.svg)](https://riscv.org/) 2 | 3 | ## Welcome guide 4 | 5 | 1. [About this project](#intro) 6 | 1. [Quickstart using Qemu](#qemu) 7 | 1. [Quickstart using Google Cloud Run](#cloudrun) 8 | 1. [Run Diosix from source in a container](#container) 9 | 1. [Run Diosix from source without a container](#nocontainer) 10 | 1. [Frequently anticipated questions](#faq) 11 | 1. [Contact, contributions, security, and code of conduct](#contact) 12 | 1. [Copyright, distribution, and license](#copyright) 13 | 14 | ## About this project 15 | 16 | Diosix strives to be a lightweight, fast, and secure multiprocessor bare-metal hypervisor written [in Rust](https://www.rust-lang.org/) for 64-bit [RISC-V](https://riscv.org/) computers. Though this project is a work in progress, you can boot and use guest operating systems with it. 17 | 18 | Below is a recording of a user logging into a RISC-V Linux guest OS on Diosix and using Micropython to print "hello world!" 19 | 20 | [![asciicast](https://asciinema.org/a/395307.svg)](https://asciinema.org/a/395307) 21 | 22 | ## Quickstart using Qemu 23 | 24 | To run a prebuilt version of Diosix and a RISC-V Linux guest within Qemu, first ensure you have installed a 64-bit RISC-V-capable version of the emulator. This is included in the `qemu-system-misc` package on Debian 10, for example. 25 | 26 | Next, fetch from the [`binaries`](https://github.com/diodesign/diosix/tree/binaries) branch an executable containing the hypervisor and its guest: 27 | 28 | ``` 29 | wget https://github.com/diodesign/diosix/raw/binaries/diosix/diosix-0.0.2-debug-20210404 30 | ``` 31 | 32 | Run Diosix on a dual-core Qemu system with 1GB of RAM: 33 | 34 | ``` 35 | qemu-system-riscv64 -bios none -nographic -machine virt -smp 2 -m 1G -kernel diosix-0.0.2-debug-20210404 36 | ``` 37 | 38 | Once booted, the hypervisor will start its included Linux guest OS. When you see the following prompt, log into the guest as `root` with no password: 39 | 40 | ``` 41 | Welcome to Busybox/Linux with Micropython 42 | buildroot-guest login: 43 | ``` 44 | 45 | Micropython, zsh, and less are provided as well as BusyBox. Press `Control-a` then `x` to exit the Qemu RISC-V emulator. Note: the guest instance is temporary, and any data saved inside it will be lost when you end the emulation. 46 | 47 | ## Quickstart using Google Cloud Run 48 | 49 | To run RISC-V Linux on Diosix from your browser using Google Cloud, click the button below. You will need a Google Cloud account. 50 | 51 | [![Run on Google Cloud](https://deploy.cloud.run/button.svg)](https://deploy.cloud.run?git_repo=https://github.com/diodesign/diosix) 52 | 53 | When prompted, confirm you trust the Diosix repository and allow Google Cloud Shell to make Google Cloud API calls. Cloud Shell will next ask you to choose which Google Cloud project and region to use for this process. 54 | 55 | Once selected, Google Cloud Run will create a Docker container image of Diosix, built from its latest source code, with a prebuilt Linux guest OS. 56 | 57 | To start this container, run this command in Cloud Shell: 58 | 59 | ``` 60 | docker run --rm -ti `docker images | grep -o -E "(gcr\.io\/){1}([a-z0-9\-]+)\/(diosix){1}"` 61 | ``` 62 | 63 | As with the Qemu quickstart guide, log into the guest using `root` as the username with no password. Press `Control-a` then `x` to exit the Qemu RISC-V emulator and shut down the container. Close the Cloud Shell to end the session. 64 | 65 | Note: you will be [billed](https://cloud.google.com/run/pricing) by Google for any resources used to build and run this container beyond your free allowance. Cloud Run documentation is [here](https://cloud.google.com/run). 66 | 67 | ## Run Diosix from source in a container 68 | 69 | To build Diosix from source and boot it within Qemu with a prebuilt guest OS within a Docker container on your own system, create a container image of the software: 70 | 71 | ``` 72 | git clone https://github.com/diodesign/diosix.git 73 | cd diosix 74 | docker build --tag diosix . 75 | ``` 76 | 77 | And start the container: 78 | 79 | ``` 80 | docker run -ti --rm diosix 81 | ``` 82 | 83 | As with Google Cloud Run, log into the provided guest Linux OS environment as `root` with no password. Press `Control-a` then `x` to exit the Qemu emulator and shut down and delete the container. 84 | 85 | ## Run Diosix from source without a container 86 | 87 | To build and run Diosix on real hardware, or within Qemu or Spike, from its latest source code without using Docker, follow [these instructions](docs/running.md). 88 | 89 | ## Frequently anticipated questions 90 | 91 | **Q. What can Diosix do right now?** 92 | 93 | **A.** It initializes a compatible RISC-V system, and runs one or more guest operating systems in hardware-isolated virtualized environments called capsules. System services also run in capsules using a provided runtime. One such service offers a virtual console through which the user can interact with guest capsules. 94 | 95 | Diosix supports systems with multiple CPU cores, and preemptively schedules capsules' virtual cores to run on their physical counterparts. It handles interrupts and exceptions, instruction emulation, serial IO, memory management and protection, and capsule and system service management. 96 | 97 | It partially implements the [SBI specification](https://github.com/riscv/riscv-sbi-doc/blob/master/riscv-sbi.adoc) as implementation ID 5. It parses Device Tree configuration data from the motherboard firmware to discover the available hardware, and generates Device Tree structures describing virtualized environments for guest OSes to parse. 98 | 99 | **Q. Will you support other processor architectures?** 100 | 101 | **A.** Though the project is focused on RISC-V, Diosix is structured so that the hypervisor's core code is portable. Platform-specific code is kept separate and included during the build process: a port to another architecture would need to provide those platform-specific crates. If you want to contribute and maintain support for other architectures, please get in touch. Ports to other open hardware platforms, such as OpenPOWER, and architectures similar to RISC-V, such as Arm, would be welcome. 102 | 103 | **Q. Why no support for 32-bit RISC-V processors?** 104 | 105 | **A.** Diosix initally supported 32 and 64-bit RISC-V CPU cores. However, 32-bit support was dropped in March 2021 to prioritize fixing bugs, adding features, and updating documentation. If you wish to maintain RV32 support for Diosix, please get in touch. 106 | 107 | **Q. Does Diosix rely on KVM, Qemu, or similar?** 108 | 109 | **A.** No. Diosix is a strictly bare-metal, type-1 original hypervisor designed to run just above the ROM firmware level. It doesn't sit on top of any other virtualization library or layer, such as Linux's KVM, nor Xen. Qemu is used as a development environment, and it is not required to run Diosix on real hardware. 110 | 111 | **Q. What are the minimum requirements to run Diosix?** 112 | 113 | **A.** Diosix by default expects 1MB of RAM per CPU core plus space in memory to store the hypervisor and its payload, and space to run guests. For example, a quad-core system with a 32MB Diosix payload (containing the hypervisor, a guest OS, console system service, and boot banners), running three guests instances with 128MB of memory each, would comfortably fit within 512MB of host RAM. The exact requirements are tunable: if your target hardware has limited RAM, Diosix's footprint can be scaled down as well as up. 114 | 115 | ## Contact, contributions, security issue reporting, and code of conduct 116 | 117 | Email [hello@diosix.org](mailto:hello@diosix.org) if you have any questions or issues to raise, wish to get involved, or have source to contribute. If you have found a security flaw, please follow [these steps](docs/security.md) to report the bug. 118 | 119 | You can also submit pull requests or raise issues via GitHub, though please consider disclosing security-related matters privately. You are more than welcome to use the [discussion boards](https://github.com/diodesign/diosix/discussions/) to ask questions and suggest features. 120 | 121 | Please observe the project's [code of conduct](docs/conduct.md) when participating. 122 | 123 | ## Copyright, distribution, and license 124 | 125 | Copyright © Chris Williams, 2018-2021. See [LICENSE](https://github.com/diodesign/diosix/blob/main/LICENSE) for distribution and use of source code, binaries, and documentation. 126 | 127 | More information can be found [here](https://github.com/diodesign/diosix/blob/binaries/README.md) on the contents of the guest OS binaries used by Diosix. The diosix.org [illustration](docs/logo.png) is a combination of artwork kindly provided by [Katerina Limpitsouni](https://undraw.co/license) and [RISC-V International](https://riscv.org/about/risc-v-branding-guidelines/). 128 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-minimal 2 | title: Diosix hypervisor 3 | description: Diosix is an open-source bare-metal hypervisor written in Rust for multi-core RISC-V systems 4 | show_downloads: no 5 | logo: docs/logo.png 6 | -------------------------------------------------------------------------------- /boot/banners/riscv.txt: -------------------------------------------------------------------------------- 1 | `.-:+ossssssssssssssssssssssssssssy 2 | ./ssssssssssssssssssssssssssy 3 | `/ssssssssssssssssssssssssy 4 | yyyyyyyyyyyyys+:` :sssssssssssssssssssssssy 5 | MMMMMMMMMMMMMMMMNh: :ssssssssssssssssssssssy 6 | MMMMMMMMMMMMMMMMMMMs +sssssssssssssssssssssy 7 | MMMMMMMMMMMMMMMMMMMM/ :sssssssssssssssssssss/ 8 | MMMMMMMMMMMMMMMMMMMMy .ssssssssssssssssssso- 9 | MMMMMMMMMMMMMMMMMMMM/ :ssssssssssssssssss/` 10 | MMMMMMMMMMMMMMMMMMMy +sssssssssssssssso- 11 | MMMMMMMMMMMMMMMMNd/ :ssssssssssssssss/` 12 | MMMdddddddddhhs+- :ssssssssssssssso- 13 | MMN .+sssssssssssssss+` `+ 14 | MMM: .-+ssssssssssssssso: -hM 15 | MMMMy. ..:/+ossssssssssssssss+. `+NMM 16 | MMMMMNo` -osssssssssssssssssso: -hMMMM 17 | MMMMMMMm/ `/osssssssssssssss+. +NMMMMM 18 | MMMMMMMMMh- ./sssssssssssss: .hMMMMMMM 19 | MMMMMMMMMMNy. .+ssssssssso. +NMMMMMMMM 20 | MMMMMMMMMMMMN+` -ossssss:` .hMMMMMMMMMM 21 | MMMMMMMMMMMMMMd: `:ssso. +NMMMMMMMMMMM 22 | MMMMMMMMMMMMMMMMh. `//` .hMMMMMMMMMMMMM 23 | MMMMMMMMMMMMMMMMMNs` /NMMMMMMMMMMMMMM 24 | MMMMMMMMMMMMMMMMMMMN/ `hMMMMMMMMMMMMMMMM -------------------------------------------------------------------------------- /boot/banners/welcome.txt: -------------------------------------------------------------------------------- 1 | :::::::-. ::: ... .::::::. ::: .,:: .: 2 | ;;, `';,;;; .;;;;;;;. ;;;` ` ;;; `;;;, .,;; 3 | `[[ [[[[[,[[ \[[,'[==/[[[[,[[[ '[[,,[[' 4 | $$, $$$$$$$$, $$$ ''' $$$$ Y$$$P 5 | 888_,o8P'888"888,_ _,88P 88b dP888 oP"``"Yo, 6 | MMMMP"` MMM "YMMMMMP" "YMmMY" MMM,m" "Mm, 7 | 8 | https://diosix.org :: The Rust-RISC-V bare-metal hypervisor 9 | See README and LICENSE for usage, copyright, and distribution -------------------------------------------------------------------------------- /docker/entrypoint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # Define containerized environment for running Diosix on Qemu 4 | # 5 | # On Google Cloud Run: Creates HTTP server on port 8080 6 | # or whatever was specified using the PORT system variable. 7 | # Outputs via the HTTP port. This requires K_SERVICE to be set. 8 | # 9 | # On all other environments: Log to stdout 10 | # 11 | # syntax: entrypoint.py 12 | # 13 | # Author: Chris Williams 14 | # 15 | 16 | import os 17 | import sys 18 | 19 | from flask import Flask 20 | 21 | # the paths to load the Cargo config and select the diosix directory are derived from the Dockerfile 22 | # this python isn't the most elegant -- feel free to fix up and send a pull request 23 | 24 | if __name__ == "__main__": 25 | if not os.environ.get('K_SERVICE'): 26 | print('Running locally') 27 | os.system('. $HOME/.cargo/env && cd /diosix && {}'.format(' '.join(sys.argv[1:]))) 28 | else: 29 | print('Running HTTP service {} {} {} for Google Cloud', os.environ.get('K_SERVICE'), os.environ.get('K_REVISION'), os.environ.get('K_CONFIGURATION')) 30 | app = Flask(__name__) 31 | @app.route('/') 32 | def ContainerService(): 33 | return 'Container built. Use docker images and docker run in the Google Cloud shell to run this container.\n' 34 | app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080))) 35 | -------------------------------------------------------------------------------- /docs/conduct.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in 6 | [Diosix](https://diosix.org) a harassment-free experience for everyone, 7 | regardless of age, body size, visible or invisible disability, ethnicity, 8 | sex characteristics, gender identity and expression, level of experience, 9 | education, socio-economic status, nationality, personal appearance, race, 10 | caste, color, religion, or sexual identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement responsibilities 40 | 41 | The project lead is responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior deemed inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | The project lead has the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the Diosix project in public spaces. 55 | Examples of representing our project include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the Diosix [project lead](mailto:chrisw@diosix.org). 63 | 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | The project lead is obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | The project lead will follow these Community Impact Guidelines in determining 72 | the consequences for any action deemed in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from the project lead, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | [https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0]. 120 | 121 | Community Impact Guidelines were inspired by 122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 123 | 124 | For answers to common questions about this code of conduct, see the FAQ at 125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available 126 | at [https://www.contributor-covenant.org/translations][translations]. 127 | 128 | [homepage]: https://www.contributor-covenant.org 129 | [v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html 130 | [Mozilla CoC]: https://github.com/mozilla/diversity 131 | [FAQ]: https://www.contributor-covenant.org/faq 132 | [translations]: https://www.contributor-covenant.org/translations -------------------------------------------------------------------------------- /docs/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/diodesign/diosix/68e9de71d0a097cf5f49d07a3ad61ac3e4414da7/docs/logo.png -------------------------------------------------------------------------------- /docs/running.md: -------------------------------------------------------------------------------- 1 | # Building and running Diosix 2 | 3 | These instructions will walk you through building and running Diosix. They assume you are using a GNU/Linux system running [Debian Testing](https://www.debian.org/) or equivalent, and that you are comfortable using the command line to navigate your file system and run programs. 4 | 5 | ## Objectives 6 | 7 | The outcome will involve booting one or more guest operating systems, such as Linux, on Diosix within a Qemu emulated environment. You can also just build the project to install its executable package on real hardware. 8 | 9 | ## Table of contents 10 | 11 | 1. [Getting started](#prep) 12 | 1. [Run Diosix in Qemu](#qemu) 13 | 1. [Using the system console](#console) 14 | 1. [Run Diosix in Spike](#spike) 15 | 1. [Run Diosix on real hardware](#realhw) 16 | 1. [Build without running](#buildonly) 17 | 1. [Options for building and running](#opts) 18 | 1. [Output build diagnostic messages](#opt_quiet) 19 | 1. [Target a specific CPU architecture](#opt_target) 20 | 1. [Build release-ready software](#opt_quality) 21 | 1. [Set the number of emulated CPU cores](#opt_cpus) 22 | 1. [Disable downloads of guest OSes](#opt_no_guest_fetch) 23 | 24 | ## Getting started 25 | 26 | These steps will prepare your system for building and running Diosix using its latest source code. 27 | 28 | 1. Ensure you have the necessary dependencies installed: 29 | 30 | ``` 31 | sudo apt update 32 | sudo apt -y install build-essential pkg-config git curl binutils-riscv64-linux-gnu qemu-system-misc libssl-dev 33 | ``` 34 | 35 | 2. If you have not yet installed the Rust toolchain, follow [these instructions](https://www.rust-lang.org/tools/install) to do so. Make the `nightly` version of Rust the default toolchain: 36 | 37 | ``` 38 | rustup default nightly 39 | ``` 40 | 41 | 3. Install [`just`](https://github.com/casey/just), which Diosix uses to automate the steps needed to build and run the project: 42 | 43 | ``` 44 | cargo install --force just 45 | ``` 46 | 47 | 4. Fetch the Diosix source code and enter its directory: 48 | 49 | ``` 50 | git clone --recurse-submodules https://github.com/diodesign/diosix.git 51 | cd diosix 52 | ``` 53 | 54 | ## Run Diosix in Qemu 55 | 56 | Once you have completed the [preparatory steps](#prep), run Diosix in the Qemu RISC-V emulator: 57 | 58 | ``` 59 | just 60 | ``` 61 | 62 | This will check to see if Diosix needs to be built. If so, the project will automatically create an executable containing the hypervisor and a simple file-system containing the system services, a set of welcome text, and one or more guest OS binaries. The contents of this exccutable are specified by the project's [`manifest.toml`](../manifest.toml) configuration file. 63 | 64 | Diosix is then booted in a Qemu RISC-V environment, and the hypervisor will start the included services and guests. To exit the emulator, press `Control-a` then `x`. The guest OSes provided by default are BusyBox-based Linux operating systems. To log in, use the username `root`. No password is required. 65 | 66 | ## Using the system console 67 | 68 | By default, Diosix will run a system service called `gooey` that provides a very simple user interface. This is accessed through the terminal when using Qemu, and on real hardware, through the system's first serial port. 69 | 70 | `gooey` will show messages and information from the hypervisor in red, and assign other colors to individual guests. For example, the first guest will use yellow to output its text, blue for the second guest, and purple for the third. By default, Diosix includes one guest. To include more, edit the `manifest.toml` file to add extra guests, and run Diosix again. 71 | 72 | Currently, `gooey` displays output text from all capsules, though when typing into it, either via Qemu or a real system's serial port, that input text is sent only to the first guest. The coloring of the input and output text can be temporarily altered by the guest, for example when listing files with `ls` and displaying executables in a special color. The exact colors seen may vary depending on the color scheme used by your terminal. 73 | 74 | ## Run Diosix in Spike 75 | 76 | Once you have completed the [preparatory steps](#prep), run Diosix in the Spike RISC-V simulator: 77 | 78 | ``` 79 | just spike 80 | ``` 81 | 82 | Press `Control-c` to enter Spike's interactive debug mode. Instructions on how to use this mode are [here](https://github.com/riscv/riscv-isa-sim#interactive-debug-mode). Enter the command `q` or press `Control-c` again to quit the simulator from the debug mode. Note that support for Spike is not yet complete. 83 | 84 | ## Run Diosix on real hardware 85 | 86 | **Warning: Follow the next steps with care! The storage device specified below will be reformatted with a new GPT-based partitioning scheme, with the hypervisor and its dmfs image stored in partition 1. This will render any prior data on the device inaccessible. See [LICENSE](../LICENSE) for more information on the conditions and terms of use of this documentation** 87 | 88 | Once you have completed the [preparatory steps](#prep), build Diosix and install it on an SD card or similar storage device for use in a physical system: 89 | 90 | ``` 91 | just disk=/dev/sdX install 92 | ``` 93 | 94 | Replace `/dev/sdX` with the path of the storage device you wish to install Diosix on. The installation process will require superuser privileges via `sudo`, and so your user account must be a `sudoer` for this just recipe to work. Once complete, the device can be used in a compatible computer. So far, this recipe supports: 95 | 96 | * SiFive's [HiFive Unleashed](https://www.sifive.com/boards/hifive-unleashed). To run Diosix on this system: 97 | 1. Ensure the Unleashed board's boot mode switches are all set to `1`. 98 | 1. Insert a microSD card into the host building Diosix and run the above command, replacing `/dev/sdX` with the card's path to install the hypervisor to the card. 99 | 1. Remove the microSD card and insert it into the Unleashed board. 100 | 1. Connect the host to the Unleashed board's microUSB port via a suitable USB cable. 101 | 1. Power on or reset the Unleashed board. 102 | 1. Run the command `sudo screen /dev/ttyUSBX 115200` on the host to access the board's serial port console. You should replace `/dev/ttyUSBX` with the Unleashed's USB-to-serial interface. Typically, `X` is `1`. 103 | 1. You should see Diosix's output in the serial port console. 104 | 105 | Note that support for real hardware is not yet complete. 106 | 107 | ## Build without running 108 | 109 | To build Diosix without running the software: 110 | 111 | ``` 112 | just build 113 | ``` 114 | 115 | This will create an executable package of the hypervisor, services, and guests, as described [above](#qemu), at `src/hypervisor/target/diosix`. On RISC-V targets, this executable can be loaded by a suitable bootloader as a machine-level OpenSBI implementation. It expects to be loaded at the start of RAM at physical address `0x80000000` with a pointer to a valid Device Tree describing the hardware in register `a1`. It communicates through the serial port as configured by the firmware. 116 | 117 | Whether just building Diosix or building and running it, the build phase of the workflow will automatically use all available host CPU cores concurrently. 118 | 119 | ## Options for building and running 120 | 121 | You can customize the processes of building and running Diosix by passing parameters to `just`. 122 | 123 | The parameters are space separated and must follow `just` before any command, such as `build`, is given. For example, to just build an optimized, non-debug Diosix with output from the toolchain components enabled: 124 | 125 | ``` 126 | just quiet=no quality=release build 127 | ``` 128 | 129 | Below is a list of supported parameters. 130 | 131 | ### Output build diagnostic messages 132 | 133 | By default, the output of Diosix's toolchain components, such as `mkdmfs` and `cargo`, are suppressed during the build process. To see their output during build, set the `quiet` parameter to `no`, as in: 134 | 135 | ``` 136 | just quiet=no 137 | ``` 138 | 139 | This parameter can be used with `just` and `just build`. 140 | 141 | ### Target a specific CPU architecture 142 | 143 | By default, Diosix is built for general-purpose 64-bit RISC-V (RV64GC) processors. To build Diosix for a particular CPU architecture, use the table below to find the `target` parameter for the required supported architecture. 144 | 145 | | Supported CPU architecture | `target` parameter value | 146 | |----------|--------------------------------| 147 | | RV64GC | `riscv64gc-unknown-none-elf` | 148 | | RV64IMAC | `riscv64imac-unknown-none-elf` | 149 | 150 | Then pass the `target` parameter to `just build` in the form of: 151 | 152 | ``` 153 | just target= 154 | ``` 155 | 156 | For example, the RV64IMAC architecture's `target` parameter value is `riscv64imac-unknown-none-elf`. To build for that architecture, use: 157 | 158 | ``` 159 | just target=riscv64imac-unknown-none-elf 160 | ``` 161 | 162 | This parameter can be used with `just` and `just build`. 163 | 164 | ### Build release-ready software 165 | 166 | By default, an unoptimized debug version of Diosix is built that outputs diagnostic information to the virtual console. To build an optimized version of Diosix that does not output diagnostic messages, and may be suitable for general release, set the parameter `quality` to `release`, as in: 167 | 168 | ``` 169 | just quality=release 170 | ``` 171 | 172 | Diosix's portable code uses [macros](../src/hypervisor/src/debug.rs) to output information for the user. The table below describes which macros are active for a given build quality, and the common usage of each macro. These are the macros that should be used by other parts of the project. 173 | 174 | | Macro | Usage | Debug | Release | 175 | |-------|-------|-------|---------| 176 | | `hvalert` | Critical messages from the hypervisor | Active | Active | 177 | | `hvdebug` | Diagnostic messages from the hypervisor | Active | Inactive | 178 | | `hvdebugraw` | `hvdebug` but without any context, such as CPU ID, nor an automatic newline | Active | Inactive | 179 | 180 | This parameter can be used with `just` and `just build`. 181 | 182 | ### Set the number of emulated CPU cores 183 | 184 | By default, Qemu runs Diosix on a four-core emulated system with 1GB of RAM. To override the number of CPU cores, set the `cpus` parameter to the number of cores required. For example, to boot Diosix on a dual-core emulated system: 185 | 186 | ``` 187 | just cpus=2 188 | ``` 189 | 190 | This parameter can be used with `just`. It has no effect with `just build`. 191 | 192 | ### Disable downloads of guest OSes 193 | 194 | By default, when Diosix's `manifest.toml` file specifies a guest OS that is not present in the build tree, it will fetch a copy of the guest from the internet so that it can be included in the final package. To prevent this from happening, set the parameter `guests-download` to `no`: 195 | 196 | ``` 197 | just guests-download=no 198 | ``` 199 | 200 | This will cause an error if a guest is required and not found in the build tree. This parameter can be used with `just` and `just build`. 201 | -------------------------------------------------------------------------------- /docs/security.md: -------------------------------------------------------------------------------- 1 | # Reporting security issues 2 | 3 | The [Diosix](https://diosix.org) project takes security bugs extremely seriously. Your efforts to responsibly disclose your findings are very much appreciated, and every effort will be made to acknowledge your contributions. To privately report a security vulnerability, email [security@diosix.org](mailto:security@diosix.org) with further details. 4 | 5 | The project's maintainers will send a response as soon as possible indicating the next steps in handling your report. After the initial reply to your report, the team will keep you informed of the project's progress toward a fix and public advisory, and may ask for additional information or guidance. 6 | 7 | # PGP public key 8 | 9 | If you wish to use PGP to securely disclose a vulnerability, use the public key below. This key expires March 6, 2023. 10 | 11 | ``` 12 | -----BEGIN PGP PUBLIC KEY BLOCK----- 13 | 14 | mQINBGBDQOwBEADdMIZZBXv45PZb18ltCNOXdLXgmnpqQoZFubQqy01e8LAP+aMM 15 | 6fSE2KL08WP1i8S8fm3yJcmnYMBOiCQ4E8Z72lwoH+WVNFKsJBBPB5VuLagLQT7u 16 | 9Cw46tgHf5pozzi/AacoX/Ts7oztJFfu/G7rsQ9ib2meELfrCS4rDDZycqUuPVZQ 17 | eeCUSvLAKr3jJIZ+QuIz5r/j65IHnhyinhyO7q2KR5zkV2H/EXnVsJRkuWZHTSnO 18 | aGTIC5/hg4r76wBx5cC0w1+QefkqWjYN11p4BXsiOmA8FQpGmZvZypPJpfo1c2X8 19 | Hfti7G7vLSllGftxO7FcS5fI+A+PZH08fv3jn3/e/GmlvIprlu4jr6rcwYSl4604 20 | hgPM6X2sb1nXKQjplqSRFoSw+owGEvtIHXn1mUBEFXqHKIbWH9fyT8SOMJsQ8Ecs 21 | VteFRB8k11azjFRT3ElJZFcXDV4DhlGRpLxCGLyvJkV/4hOhGuU2buW81wNbElNb 22 | kffHglmFChRCuWOST/Ekyf6cusgsWa7LWDBCxyeB8ROq7yACWnfldM6zao78JKCE 23 | X4HF1aU7IvFDYIUf5imfz+zIXK7rMXCH7COZjQ19zUh8qXYc3H44Pj4riCW1Z0PO 24 | 9I/+goziuIvdWYZtSu17WBTQbeQ4XUMjDOMMajXSsQSWJfbEvo0pfwm8ewARAQAB 25 | tC1EaW9zaXggc2VjdXJpdHkgUEdQIGtleSA8c2VjdXJpdHlAZGlvc2l4Lm9yZz6J 26 | AlQEEwEIAD4WIQSw35X/qtSTqqURNsCwR85JX5x0pgUCYENA7AIbAwUJA8JnAAUL 27 | CQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCwR85JX5x0pirFD/4pz1768E6ZAxNY 28 | dp829yFoOw0dRWDPCQr/5cH6tM1k/7fBqpQ0PFvyz5GHiA9UuJG/oT+OaLCULHlF 29 | 4H4eG0Ta48C/EqiBmC4JLLV+Elb141kjbw+3z7vdwdud+up+IbvTEkNiPQehx09b 30 | xySj0RRi/8H3Q8Sy8wX7UsvKsxeys56VkxDrPikyX5IBlyg3k0rRqf2RmEuuIIyn 31 | 3dC/ECODbaE9qf1VlWfG25Ww4vsLWWXEXhwoLTxolePFSkKP6ZRbUJtybRLkzxVT 32 | gbeKGaB+18f/tY6ZOdqOwfUDbGNCb1hUtOEEp0oWEhd8hNiL3msHJ/Iut0RhbZPs 33 | KgeplrsURG9iX6OznX2I4G1SYr0nH/QccMeBlAABMw0qN1oiVZVc1SML2cPxxrue 34 | +UO0BZIv/hvBVhBvXG/lYkATiYgs32ie2ZbpTuvSD3725xbTTGqBrxUs6RsKMZLQ 35 | 2JwyfsmOUVbzJyn3bkalYXOhCu97OjCvwNTPmwLuQr9LefF2/mqjdKr4SkWbQ1mP 36 | P19rVuh+9maxLq7uq76ZZLCgqCmNyBuLia3fYmB01NDaf/Z1093HGieFf5XC6j6m 37 | i3BKJ/PlUhCgp4rcdmcW92cfBjwxdTbRZau6ZR2qdJeAVxz4BTCtLm0yeN5HI6RV 38 | zmsKdYNOU3yZVTrMoivPs1ubbf0VRbkCDQRgQ0DsARAAyKqLQEPUB7D2unQk0niU 39 | 6bBRuacxxeaGGJ104p49Z5pqGkGVSz9l49gk4aZSMoC+zL6j3PAOg8ohyqRu+Et+ 40 | oRTsdgFwH1H0u2Y3f8RKcPSKJGRV1xSq8aPXo6k7BUF5ZlQN7ikF8fiVMGihK1No 41 | d0sOMbpjpHU8CqlOQ2EQ3PTcU37qyCTBA6mCOtROz1sZfxUIo7iA2nzMx873G45y 42 | ZQW+3ks4w3jaMbO4wOVHJm2PrTwmkznTcC13fY3FVBLfVknx8R5b61pC46Teyi+P 43 | psJ6HFhuOVOjvP3S/gWluwG4PGv4kXiNy+1uj9HrOQZGvXpPlu+5jWujbXT42RDr 44 | qg+w78E0nzi9O4OfpzLpdURvRbl8UK8O/1o+xMxSvfNnv3i6gwDasKBS/DBglNjN 45 | G1nFJnfLxEa7X/N1DAiqmCbd2YOOFVjuHLxK7ID8UbeeUQdmwMIS0Akr3Vf2YM6j 46 | 91TFeCak2OBtT2b1kCxzFcpjM6veWtcr7tiqcesCFYLGPB349e6+Q2YjP+8wfG3T 47 | rSOwoPIDMQPH+FO5GjUOUPrTs7gj+81BOivMdXSgNU9txrBCQHRNPG0a7XDoqUvC 48 | rOBG4dLQycvrnwRPn7UAigeqZ7RX/FCsSc4DghQFzCNNeDJM6FdoOtCJh72OfWzt 49 | CxXq7b0bVqNqrhwvItKRd1UAEQEAAYkCPAQYAQgAJhYhBLDflf+q1JOqpRE2wLBH 50 | zklfnHSmBQJgQ0DsAhsMBQkDwmcAAAoJELBHzklfnHSmUicQAI123JFZerMWEEVS 51 | rmEUNk1rzvv/kIyEIFWAMWL0ooOO/Gd05YPTswgcKU1SRIuXZCOAU1eG3cl251uY 52 | kTsSTq7p6DwP7MQjxy9vR5GDE2qoKGPJ+l/309TMgKAAu4eYdulHLYxhpTENbTbc 53 | 6QVprS4ZIm7PmNK1xvSszEuFGZTkmS+N0Q5Q7TxBa0sX4Uu/neVWQoid2WDvhbgy 54 | 0RGWYmBvGPQKlsv996ddeD2yZDJOBYnhIiJN4rUqsUFT6WkSUi+J/2KATqTMzA5g 55 | wW9CktBDYs/6QIR+cwaPB1TtNu+11+tMkYGX4RW+cen5xmbMqVG/aTGY3OK83MLk 56 | WwVzJwl9JxSrkhyOBibSLsaG2y4tCmnU0AgsiXx9TeniuJeICw1rN9ppDwkOGgFj 57 | mzHz0r/pshP8xkVvbed82DvuiKhbfii46R8B3rWIQpKiXGBLRdsDXrwWEq1qFMXR 58 | LzqWeVONmS1jeYqjp35kvXZiKwmwljQ/nWFv3vo1lHpPdJx4nXoxMyovqvUPHF92 59 | CfQ/giplTj9bktOXvLoUPzHzg5sdvsTwttcQ9TtWcXYG4dhDNhLGUmpFcGxaXnx8 60 | 34cjpYnzWUhpfnGECSkvPlzSPzBNDz2AfGkKK1QIbKmF6eYQ9qIaMAkJz5BXbFmA 61 | rh+XUjjZcrUAWyTBdZnTp70PccFe 62 | =CPPb 63 | -----END PGP PUBLIC KEY BLOCK----- 64 | ``` -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | # 2 | # just makefile for the diosix project 3 | # 4 | # You will need just to go any further. install it using: 5 | # cargo install --force just 6 | # 7 | # Build and run diosix in Qemu, using the defaults: 8 | # just 9 | # 10 | # Build and run diosix in Spike, using the defaults: 11 | # just spike 12 | # 13 | # Only build diosix using the defaults: 14 | # just build 15 | # 16 | # A link is created at src/hypervisor/target/diosix pointing to the location 17 | # of the built ELF executable package containing the hypervisor, its services, and guests. 18 | # A flat binary of this ELF is created at src/hypervisor/target/diosix.bin 19 | # 20 | # Repartition a disk, typically an SD card, and install diosix on it (requires root via sudo) 21 | # just install 22 | # 23 | # set vendor to a supported vendor. eg: as sifive for SiFive Unleashed boards 24 | # set disk to the device to erase and install diosix on. eg: /dev/sdb 25 | # 26 | # Eg: 27 | # just vendor=sifive disk=/dev/sdb install 28 | 29 | # You can control the workflow by setting parameters. These must go after just and before 30 | # the command, such as build. Eg, for a verbose build-only process: 31 | # just quiet=no build 32 | # 33 | # Supported parameters 34 | # 35 | # Set target to the architecture you want to build for. Eg: 36 | # just target=riscv64imac-unknown-none-elf 37 | # 38 | # Set qemubin to the path of the Qemu system emulator binary you want to use to run diosix, Eg: 39 | # just qemubin=qemu-system-riscv64 40 | # 41 | # Set spikebin to the path of the Spike binary you want to use to run diosix, Eg: 42 | # just spikebin=$HOME/src/riscv-isa-sim/build/spike 43 | # 44 | # Set spikeisa to the RISC-V ISA to use with Spike, eg: 45 | # just spikeisa=RV64IMAC 46 | # 47 | # Set objcopybin to the objcopy suitable for the target architecture. Eg: 48 | # just objcopybin=riscv64-linux-gnu-objcopy install 49 | # 50 | # Set quality to release or debug to build a release or debug-grade build respectively. Eg: 51 | # just quality=release 52 | # just quality=debug 53 | # 54 | # Set quiet to no to see mkdmfs and cargo's usual output. 55 | # Set to yes to only report warnings and errors. Eg: 56 | # just quiet=no 57 | # just quiet=yes 58 | # 59 | # Set cpus to the number of CPU cores to run within qemu and spike, eg: 60 | # just cpus=1 61 | # 62 | # Force debug text output via Qemu's serial port by setting qemuprint to yes, eg: 63 | # just qemuprint=yes 64 | # 65 | # Force debug text output via SiFive's serial port by setting sifiveprint to yes, eg: 66 | # just sifiveprint=yes 67 | # 68 | # Force debug text output via Spike's HTIF by setting htifprint to yes, eg: 69 | # just htifprint=yes 70 | # 71 | # Disable hypervisor's regular integrity checks by setting integritychecks to no, eg: 72 | # just integritychecks=no 73 | # 74 | # Disable including services by setting services to no, eg: 75 | # just services=no 76 | # 77 | # Disable including guest OSes by setting quests to no, eg: 78 | # just guests=no 79 | # 80 | # Disable downlaoding guest OS images by setting guests-download to no, eg: 81 | # just guests-download=no 82 | # 83 | # Disable building guest OSes using buildroot by setting guests-build to no, eg: 84 | # just guests-build=no 85 | # 86 | # The defaults are: 87 | # qemubin qemu-system-riscv64 88 | # spikebin spike 89 | # spikeisa RV64IMAFDC 90 | # target riscv64gc-unknown-none-elf 91 | # objcopybin riscv64-linux-gnu-objcopy 92 | # quality debug 93 | # quiet yes 94 | # cpus 4 95 | # qemuprint no 96 | # sifiveprint no 97 | # htifprint no 98 | # integritychecks yes 99 | # services yes 100 | # guests yes 101 | # guests-download yes 102 | # guests-build yes 103 | # vendor sifive 104 | # 105 | # Author: Chris Williams 106 | # See LICENSE for usage and distribution 107 | # See README for further instructions 108 | # 109 | 110 | # let the user know what we're up to 111 | msgprefix := "--> " 112 | buildmsg := msgprefix + "Building" 113 | cleanmsg := msgprefix + "Cleaning build tree" 114 | rustupmsg := msgprefix + "Ensuring Rust can build for" 115 | builtmsg := msgprefix + "Diosix built and ready to use at" 116 | qemumsg := msgprefix + "Running Diosix in Qemu" 117 | spikemsg := msgprefix + "Running Diosix in Spike" 118 | installmsg := msgprefix + "Installing" 119 | installedmsg := msgprefix + "Diosix installed on disk" 120 | 121 | # define defaults, these are overriden by the command line 122 | target := "riscv64gc-unknown-none-elf" 123 | qemubin := "qemu-system-riscv64" 124 | spikebin := "spike" 125 | spikeisa := "RV64IMAFDC" 126 | objcopybin := "riscv64-linux-gnu-objcopy" 127 | quality := "debug" 128 | quiet := "yes" 129 | cpus := "4" 130 | qemuprint := "no" 131 | sifiveprint := "no" 132 | htifprint := "no" 133 | integritychecks := "yes" 134 | services := "yes" 135 | guests := "yes" 136 | guests-download := "yes" 137 | guests-build := "yes" 138 | final-exe-path := "src/hypervisor/target/diosix" 139 | vendor := "sifive" 140 | disk := "/dev/null" 141 | 142 | # generate cargo switches 143 | quality_sw := if quality == "debug" { "debug" } else { "release" } 144 | release_sw := if quality == "release" { "--release " } else { "" } 145 | quiet_sw := if quiet == "yes" { "--quiet " } else { "" } 146 | quiet_redir_sw := if quiet == "yes" { "> /dev/null " } else { "" } 147 | verbose_sw := if quiet == "no" { "--verbose " } else { "" } 148 | qemuprint_sw := if qemuprint == "yes" { "--features qemuprint" } else { "" } 149 | sifiveprint_sw := if sifiveprint == "yes" { "--features sifiveprint" } else { "" } 150 | htifprint_sw := if htifprint == "yes" { "--features htifprint" } else { "" } 151 | cargo_sw := quiet_sw + release_sw + "--target " + target 152 | integritychecks_sw := if integritychecks == "yes" { "--features integritychecks" } else { "" } 153 | services_sw := if services == "no" { "--skip-services" } else { "" } 154 | guests_sw := if guests == "no" { "--skip-guests" } else { "" } 155 | downloads_sw := if guests-download == "no" { "--skip-downloads" } else { "" } 156 | builds_sw := if guests-build == "no" { "--skip-buildroot" } else { "" } 157 | 158 | # the default recipe 159 | # build diosix with its components, and run it within qemu 160 | @qemu: build 161 | echo "{{qemumsg}}" 162 | {{qemubin}} -bios none -nographic -machine virt -smp {{cpus}} -m 1G -kernel {{final-exe-path}} 163 | 164 | # build diosix, and run it within spike 165 | @spike: build 166 | echo "{{spikemsg}}" 167 | {{spikebin}} --isa={{spikeisa}} -p{{cpus}} -m1024 {{final-exe-path}} 168 | 169 | # build and install diosix with its components onto a disk (requires root via sudo) 170 | @install: build 171 | {{objcopybin}} -O binary {{final-exe-path}} {{final-exe-path}}.bin {{quiet_redir_sw}} 172 | echo "{{installmsg}} {{final-exe-path}}.bin on {{disk}}" 173 | sudo sgdisk --clear --new=1:2048:65536 --change-name=1:bootloader --typecode=1:2E54B353-1271-4842-806F-E436D6AF6985 -g {{disk}} {{quiet_redir_sw}} 174 | sudo dd if={{final-exe-path}}.bin of={{disk}}1 bs=512 {{quiet_redir_sw}} 2>&1 175 | echo "{{installedmsg}}" 176 | 177 | # the core workflow for building diosix and its components 178 | # a link is created at final-exe-path to the final packaged executable 179 | @build: _descr _rustup _hypervisor 180 | ln -fs {{target}}/{{quality_sw}}/hypervisor {{final-exe-path}} 181 | echo "{{builtmsg}} {{final-exe-path}}" 182 | 183 | # let the user know what's going to happen 184 | @_descr: 185 | echo "{{buildmsg}} {{quality_sw}}-grade Diosix for {{target}} systems" 186 | 187 | # build the hypervisor and ensure it has a boot file system to include 188 | @_hypervisor: _mkdmfs 189 | echo "{{buildmsg}} hypervisor" 190 | cd src/hypervisor && cargo build {{cargo_sw}} {{qemuprint_sw}} {{sifiveprint_sw}} {{htifprint_sw}} {{integritychecks_sw}} 191 | 192 | # build and run the dmfs generator to include banners and system services. 193 | # mkdmfs is configured by manifest.toml in the project root directory. 194 | # the output fs image is linked in the hypervisor and unpacked at run-time 195 | # 196 | # the target directory stores the dmfs image file 197 | @_mkdmfs: _services 198 | echo "{{buildmsg}} dmfs image" 199 | cd src/mkdmfs && cargo run {{quiet_sw}} -- -t {{target}} -q {{quality_sw}} {{verbose_sw}} {{services_sw}} {{guests_sw}} {{downloads_sw}} {{builds_sw}} 200 | 201 | # build the system services 202 | @_services: 203 | echo "{{buildmsg}} system services" 204 | cd src/services && cargo build {{cargo_sw}} 205 | 206 | # make sure we've got the cross-compiler installed and setup 207 | @_rustup: 208 | echo "{{rustupmsg}} {{target}}" 209 | rustup {{quiet_sw}} target install {{target}} 210 | 211 | # delete intermediate build files and update cargo dependencies to start afresh 212 | @clean: 213 | echo "{{cleanmsg}}" 214 | -cd src/hypervisor && cargo {{quiet_sw}} clean && cargo {{quiet_sw}} update 215 | -cd src/services && cargo {{quiet_sw}} clean && cargo {{quiet_sw}} update 216 | -cd src/mkdmfs && cargo {{quiet_sw}} clean && cargo {{quiet_sw}} update 217 | 218 | # FIXME: the framework for this is broken. 219 | # run unit tests for each major component 220 | # @_test: 221 | # -cd src/hypervisor && cargo {{quiet_sw}} test 222 | # -cd src/services && cargo {{quiet_sw}} test 223 | # -cd src/mkdmfs && cargo {{quiet_sw}} test 224 | 225 | # are we allowed one easter egg? 226 | @_incredible: 227 | echo "No, you're incredible." -------------------------------------------------------------------------------- /manifest.toml: -------------------------------------------------------------------------------- 1 | # Define the contents of a base system the hypervisor can boot 2 | # 3 | # This base system contains welcome banner(s), system services, 4 | # and guests OSes that are bundled with the hypervisor binary 5 | # and unpacked at run-time 6 | # 7 | # See src/mkdmfs/src/main.rs for the file format 8 | # 9 | # Directory paths are relative to this manifest.toml file 10 | # 11 | # See the notes below on booting more than one guest 12 | # 13 | 14 | # these settings can be overridden by whatever's parsing this file, usually mkdmfs 15 | [defaults] 16 | arch = "riscv64gc-unknown-none-elf" 17 | quality = "debug" 18 | outfile = "src/mkdmfs/target/dmfs.img" 19 | 20 | # define where to find the welcome banners 21 | [banners] 22 | path = "boot/banners" 23 | welcome = "boot/banners/welcome.txt" 24 | 25 | # define system services to include 26 | [services] 27 | include = [ "gooey" ] 28 | 29 | # define each individual service 30 | 31 | # this is the console usre-interface. it is granted permission to access the system console and 32 | # also other capsules' console buffers to route input and output text between the user and guests 33 | [service.gooey] 34 | path = "src/services" 35 | description = "console interface" 36 | properties = [ "auto_crash_restart", "service_console", "console_write", "console_read", "hv_log_read" ] 37 | ram = 64 38 | cpus = 2 39 | 40 | # define guests that may join us during boot 41 | 42 | # a mildly useful Linux with busybox, micropython, zsh, and less 43 | [guest.riscv64-linux-busybox-asciiinvaders] 44 | path = "boot/guests" 45 | url = "https://github.com/diodesign/diosix/raw/binaries/buildroot-guests/riscv64-linux-busybox-asciiinvaders" 46 | description = "64-bit RISC-V Linux with asciiinvaders and more" 47 | ram = 256 48 | cpus = 2 49 | 50 | # a mildly useful Linux with busybox, micropython, zsh, and less 51 | [guest.riscv64-linux-busybox-micropython] 52 | path = "boot/guests" 53 | url = "https://github.com/diodesign/diosix/raw/binaries/buildroot-guests/riscv64-linux-busybox-micropython" 54 | description = "64-bit RISC-V Linux with Busybox, Micropython, zsh, less" 55 | ram = 128 56 | cpus = 2 57 | 58 | # a bare-bones Linux and Busybox 59 | [guest.riscv64-linux-busybox] 60 | path = "boot/guests" 61 | url = "https://github.com/diodesign/diosix/raw/binaries/buildroot-guests/riscv64-linux-busybox" 62 | description = "64-bit RISC-V Linux with Busybox" 63 | ram = 128 64 | cpus = 2 65 | 66 | # select the guests to include for a given target 67 | 68 | # To include and boot more than one guest, add more than one entry to the guests array 69 | # for the architecture you wish to target. For example, to run three guests, 70 | # a Linux OS with Micropython installed, and two smaller Linux OSes, populate the guests array 71 | # for the default target as follows: 72 | # 73 | # [target.riscv64gc-unknown-none-elf] 74 | # guests = [ "riscv64-linux-busybox-micropython", "riscv64-linux-busybox", "riscv64-linux-busybox-asciiinvaders" ] 75 | # 76 | 77 | # RV64GC: this is the default target, loading one guest only by default 78 | [target.riscv64gc-unknown-none-elf] 79 | guests = [ "riscv64-linux-busybox-micropython" ] 80 | 81 | # RV64IMAC: alternative target, loading one guest only by default 82 | [target.riscv64imac-unknown-none-elf] 83 | guests = [ "riscv64-linux-busybox-micropython" ] 84 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Diosix coding style 2 | # 3 | # Pretty much follows Rust style, but with braces and blocks always on newlines 4 | 5 | control_brace_style = "AlwaysNextLine" 6 | brace_style = "AlwaysNextLine" 7 | ignore = [ "src/contrib" ] 8 | -------------------------------------------------------------------------------- /src/hypervisor/.cargo/config: -------------------------------------------------------------------------------- 1 | # 2 | # diosix hypervisor platform-specific linker settings 3 | # 4 | # (c) Chris Williams, 2019-2021. 5 | # See LICENSE for usage and copying. 6 | # 7 | 8 | # 9 | # set the default build triple 10 | # 11 | [build] 12 | target = "riscv64gc-unknown-none-elf" 13 | 14 | # Find the linker for 64-bit RISC-V (IMAC) targets 15 | [target.riscv64imac-unknown-none-elf] 16 | rustflags = [ "-Z", "pre-link-arg=-nostartfiles", "-C", "link-arg=-Tsrc/platform-riscv/link.ld", "-C", "link-arg=--no-eh-frame-hdr" ] 17 | linker = "riscv64-linux-gnu-ld" 18 | ar = "riscv64-linux-gnu-ar" 19 | 20 | # Find the linker for 64-bit RISC-V (GC) targets 21 | [target.riscv64gc-unknown-none-elf] 22 | rustflags = [ "-Z", "pre-link-arg=-nostartfiles", "-C", "link-arg=-Tsrc/platform-riscv/link.ld", "-C", "link-arg=--no-eh-frame-hdr" ] 23 | linker = "riscv64-linux-gnu-ld" 24 | ar = "riscv64-linux-gnu-ar" 25 | -------------------------------------------------------------------------------- /src/hypervisor/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hypervisor" 3 | version = "0.0.2" 4 | authors = ["Chris Williams "] 5 | license = "MIT" 6 | build = "../mason/build.rs" 7 | publish = false 8 | edition = "2018" 9 | 10 | [[bin]] 11 | name = "hypervisor" 12 | path = "src/main.rs" 13 | 14 | [build-dependencies] 15 | regex = "1.4.2" 16 | toml = "0.5.8" 17 | serde = "1.0.118" 18 | serde_derive = "1.0.118" 19 | 20 | [features] 21 | qemuprint = [] # enable to force debug text through Qemu's serial port 22 | sifiveprint = [] # enable to force debug text through SiFive's standard serial port 23 | htifprint = [] # enable to force debug text through Spike's HTIF 24 | integritychecks = [] # enable to check integrity of per-CPU structures from overwrites */ 25 | 26 | # local and special dependencies 27 | [dependencies] 28 | devicetree = { path = "src/devicetree" } 29 | dmfs = { path = "../mkdmfs/dmfs" } 30 | xmas-elf = { git = "https://github.com/nrc/xmas-elf.git" } 31 | 32 | # external dependencies 33 | [dependencies.hashbrown] 34 | version = "0.9.1" 35 | features = [ "nightly" ] 36 | 37 | [dependencies.lazy_static] 38 | version = "1.4.0" 39 | features = [ "spin_no_std" ] 40 | 41 | # supported build targets - don't forget to update .cargo with details for the linker and runner when adding new ports 42 | [target.riscv64imac-unknown-none-elf.dependencies] 43 | platform = { path = "src/platform-riscv" } 44 | 45 | [target.riscv64gc-unknown-none-elf.dependencies] 46 | platform = { path = "src/platform-riscv" } 47 | -------------------------------------------------------------------------------- /src/hypervisor/mason.toml: -------------------------------------------------------------------------------- 1 | # Configure Mason to build non-Rust portions of the diosix hypervisor 2 | # 3 | # Directory paths are relative to this manifest.toml file 4 | 5 | [defaults] 6 | # include_files is a colon-separated list of arbitrary files 7 | # to include in the hypervisor's executable 8 | include_files = [ "../mkdmfs/target/dmfs.img" ] 9 | 10 | # Set up assembly code directories for supported architectures 11 | # asm_dirs is a colon-separated list of directories to scan 12 | # for assembly code to build and link to the hypervisor's executable 13 | [target.riscv64imac-unknown-none-elf] 14 | asm_dirs = [ "src/platform-riscv/asm" ] 15 | 16 | [target.riscv64gc-unknown-none-elf] 17 | asm_dirs = [ "src/platform-riscv/asm" ] -------------------------------------------------------------------------------- /src/hypervisor/src/debug.rs: -------------------------------------------------------------------------------- 1 | /* diosix debug console output code 2 | * 3 | * (c) Chris Williams, 2019-2021. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | /* to avoid warnings about super::hardware when qemuprint is active */ 9 | #![allow(unused_imports)] 10 | 11 | use super::error::Cause; 12 | use core::fmt; 13 | use super::lock::Mutex; 14 | use alloc::vec::Vec; 15 | use alloc::string::String; 16 | use super::hardware; 17 | use super::service; 18 | use super::message; 19 | 20 | /* here's the logic for the hypervisor's debug queues 21 | * all the hvprint macros feed into DEBUG_QUEUE 22 | * the hypervisor will select a physical CPU core in between workloads to drain DEBUG_QUEUE 23 | * DEBUG_QUEUE will be drained into two channels: DEBUG_LOG, and the system debug output port 24 | (typically a serial port) if a user interface capsule isn't running 25 | * the user interface capsule will drain DEBUG_LOG 26 | * DEBUG_LOG will have a fixed limit to avoid it chewing up too much RAM 27 | * if the qemuprint feature is active, the system debug output port will always be the 28 | Qemu virt serial port regardless of what's in the host hardware's device tree 29 | */ 30 | 31 | const DEBUG_LOG_MAX_LEN: usize = 64 * 1024; /* 64KB max length for debug log buffer */ 32 | 33 | lazy_static! 34 | { 35 | pub static ref DEBUG_LOCK: Mutex = Mutex::new("primary debug lock", false); 36 | static ref DEBUG_QUEUE: Mutex = Mutex::new("debug output queue", String::new()); 37 | static ref DEBUG_LOG: Mutex> = Mutex::new("debug log buffer", Vec::new()); 38 | } 39 | 40 | /* top level debug macros */ 41 | /* bad news: bug detection, failures, etc. */ 42 | #[macro_export] 43 | macro_rules! hvalert 44 | { 45 | ($fmt:expr) => (hvprintln!("[!] CPU {}: {}", $crate::pcore::PhysicalCore::get_id(), $fmt)); 46 | ($fmt:expr, $($arg:tt)*) => (hvprintln!(concat!("[!] CPU {}: ", $fmt), $crate::pcore::PhysicalCore::get_id(), $($arg)*)); 47 | } 48 | 49 | /* only output if debug build is enabled */ 50 | #[macro_export] 51 | #[cfg(debug_assertions)] 52 | macro_rules! hvdebug 53 | { 54 | ($fmt:expr) => (hvprintln!("[?] CPU {}: {}", $crate::pcore::PhysicalCore::get_id(), $fmt)); 55 | ($fmt:expr, $($arg:tt)*) => (hvprintln!(concat!("[?] CPU {}: ", $fmt), $crate::pcore::PhysicalCore::get_id(), $($arg)*)); 56 | } 57 | 58 | /* silence debug if disabled */ 59 | #[macro_export] 60 | #[cfg(not(debug_assertions))] 61 | macro_rules! hvdebug 62 | { 63 | ($fmt:expr) => ({}); 64 | ($fmt:expr, $($arg:tt)*) => ({}); 65 | } 66 | 67 | /* don't include any metadata nor add a newline */ 68 | #[macro_export] 69 | #[cfg(debug_assertions)] 70 | macro_rules! hvdebugraw 71 | { 72 | ($fmt:expr) => (hvprint!("{}", $fmt)); 73 | ($fmt:expr, $($arg:tt)*) => (hvprint!(concat!($fmt), $($arg)*)); 74 | } 75 | 76 | /* silence debug if disabled */ 77 | #[macro_export] 78 | #[cfg(not(debug_assertions))] 79 | macro_rules! hvdebugraw 80 | { 81 | ($fmt:expr) => ({}); 82 | ($fmt:expr, $($arg:tt)*) => ({}); 83 | } 84 | 85 | /* low-level macros for hypervisor-only hvprintln and hvprint debug output routines */ 86 | macro_rules! hvprintln 87 | { 88 | ($fmt:expr) => (hvprint!(concat!($fmt, "\r\n"))); 89 | ($fmt:expr, $($arg:tt)*) => (hvprint!(concat!($fmt, "\r\n"), $($arg)*)); 90 | } 91 | 92 | macro_rules! hvprint 93 | { 94 | ($($arg:tt)*) => 95 | ({ 96 | use core::fmt::Write; 97 | { 98 | let mut hvprint_lock = $crate::debug::DEBUG_LOCK.lock(); 99 | *hvprint_lock = true; 100 | 101 | unsafe { $crate::debug::CONSOLE.write_fmt(format_args!($($arg)*)).unwrap(); } 102 | } 103 | }); 104 | } 105 | 106 | macro_rules! debughousekeeper 107 | { 108 | () => ($crate::debug::drain_queue()); 109 | } 110 | 111 | /* create a generic debug console writer */ 112 | pub struct ConsoleWriter; 113 | pub static mut CONSOLE: ConsoleWriter = ConsoleWriter {}; 114 | 115 | impl fmt::Write for ConsoleWriter 116 | { 117 | /* write the given string either to the debug queue, which will 118 | be outputted as normal by the hypervisor or the user interface service, 119 | or force output through a build-time-selected interface */ 120 | fn write_str(&mut self, s: &str) -> core::fmt::Result 121 | { 122 | /* check if we're forcing output to a particular hardware port */ 123 | if cfg!(feature = "qemuprint") 124 | { 125 | for c in s.as_bytes() 126 | { 127 | if cfg!(target_arch = "riscv64") 128 | { 129 | let tx_register = 0x10000000; /* qemu's RV64 virt UART data register in memory */ 130 | unsafe { *(tx_register as *mut u8) = *c }; 131 | } 132 | } 133 | } 134 | else if cfg!(feature = "sifiveprint") 135 | { 136 | let tx_register = 0x10010000; /* sifive's UART tx register in memory */ 137 | for c in s.as_bytes() 138 | { 139 | /* when reading the word-length tx write register, it's zero if we're OK to write to it */ 140 | while unsafe { *(tx_register as *mut u32) } != 0 {} 141 | unsafe { *(tx_register as *mut u32) = *c as u32 }; 142 | } 143 | } 144 | else if cfg!(feature = "htifprint") 145 | { 146 | extern "C" { fn platform_write_to_htif(byte: u8); } 147 | for c in s.as_bytes() 148 | { 149 | unsafe { platform_write_to_htif(*c) } 150 | } 151 | } 152 | else 153 | { 154 | /* queue the output for printing out later when ready */ 155 | DEBUG_QUEUE.lock().push_str(s); 156 | } 157 | Ok(()) 158 | } 159 | } 160 | 161 | /* if no user interface is available yet, copy the queue into the system debug output port. 162 | then regardless of the UI service, drain the debug queue into the debug logging buffer. 163 | 164 | if output is being forced to a particular port (eg, using qemuprint or sifiveprint) 165 | then this function shouldn't have anything to do. a side effect of this is that 166 | the UI service is then disconnected from the hypervisor's debug output, which means 167 | there may be conflicts. forcing hypervisor output to a particular interface should 168 | be used for early debugging, before any capsules are started */ 169 | pub fn drain_queue() 170 | { 171 | /* avoid blocking if we can't write at this time */ 172 | if DEBUG_LOCK.is_locked() == false 173 | { 174 | /* acquire main debug lock and pretend to do something to it 175 | to keep the toolchain happy */ 176 | let mut debug_lock = DEBUG_LOCK.lock(); 177 | *debug_lock = true; 178 | 179 | let mut debug_queue = DEBUG_QUEUE.lock(); 180 | let mut debug_log = DEBUG_LOG.lock(); 181 | 182 | /* copy the debug queue out to the system debug output port ourselves if there's no user interface yet */ 183 | if service::is_registered(service::ServiceType::ConsoleInterface) == false 184 | { 185 | if hardware::write_debug_string(&debug_queue) == false 186 | { 187 | /* we may not even know what hardware is available yet, 188 | so bail out and try again later */ 189 | return; 190 | } 191 | } 192 | 193 | /* drain the debug queue to the log buffer so it can be fetched later by the 194 | user interface service */ 195 | for c in debug_queue.as_str().chars() 196 | { 197 | debug_log.push(c); 198 | } 199 | debug_queue.clear(); 200 | 201 | /* truncate the log buffer if it's too long */ 202 | if debug_log.len() > DEBUG_LOG_MAX_LEN 203 | { 204 | let to_truncate = debug_log.len() - DEBUG_LOG_MAX_LEN; 205 | debug_log.drain(0..to_truncate); 206 | } 207 | } 208 | } 209 | 210 | /* pick off the next character in the hypervisor log output buffer, 211 | or None if the buffer is empty */ 212 | pub fn get_log_char() -> Option 213 | { 214 | let mut debug_log = DEBUG_LOG.lock(); 215 | if debug_log.len() > 0 216 | { 217 | return Some(debug_log.remove(0)); 218 | } 219 | None 220 | } -------------------------------------------------------------------------------- /src/hypervisor/src/error.rs: -------------------------------------------------------------------------------- 1 | /* diosix error codes 2 | * 3 | * (c) Chris Williams, 2019-2021. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | /* how things can go wrong */ 9 | #[derive(Debug)] 10 | pub enum Cause 11 | { 12 | /* misc */ 13 | NotImplemented, 14 | 15 | /* debug */ 16 | DebugInitFailed, 17 | 18 | /* devices */ 19 | DeviceTreeBad, 20 | CantCloneDevices, 21 | BootDeviceTreeBad, 22 | 23 | /* physical CPU cores */ 24 | PhysicalCoreBadID, 25 | PhysicalCoreCountUnknown, 26 | 27 | /* capsule services */ 28 | ServiceAlreadyRegistered, 29 | ServiceAlreadyOwner, 30 | ServiceNotAllowed, 31 | ServiceNotFound, 32 | 33 | /* messages */ 34 | MessageBadType, 35 | 36 | /* heap */ 37 | HeapNotInUse, 38 | HeapBadBlock, 39 | HeapNoFreeMem, 40 | HeapBadSize, 41 | HeapBadMagic, 42 | 43 | /* virtual core management */ 44 | VirtualCoreBadID, 45 | VirtualCoreAWOL, 46 | 47 | /* host physical memory */ 48 | PhysNoRAMFound, 49 | PhysNotEnoughFreeRAM, 50 | PhysRegionTooSmall, 51 | PhysRegionCollision, 52 | PhysRegionNoMatch, 53 | PhysRegionSplitOutOfBounds, 54 | PhysRegionRegionAlignmentFailure, 55 | PhysRegionSmallNotMultiple, 56 | PhysRegionLargeNotMultiple, 57 | 58 | /* capsule virtual memory */ 59 | VirtMemPhysNotSet, 60 | 61 | /* capsules */ 62 | CapsuleIDExhaustion, 63 | CapsuleBadID, 64 | CapsuleCannotRestart, 65 | CapsuleCantDie, 66 | CapsuleCantRestart, 67 | CapsuleBufferEmpty, 68 | CapsuleBufferWriteFailed, 69 | CapsuleMaxVCores, 70 | CapsuleBadPermissions, 71 | CapsulePropertyNotFound, 72 | 73 | /* scheduler and timer */ 74 | SchedNoTimer, 75 | 76 | /* supervisor binary loading */ 77 | LoaderUnrecognizedCPUArch, 78 | LoaderSupervisorTooLarge, 79 | LoaderSupervisorFileSizeTooLarge, 80 | LoaderSupervisorEntryOutOfRange, 81 | LoaderUnrecognizedSupervisor, 82 | LoaderSupervisorBadImageOffset, 83 | LoaderSupervisorBadPhysOffset, 84 | LoaderSupervisorBadDynamicArea, 85 | LoaderSupervisorBadRelaEntrySize, 86 | LoaderSupervisorRelaTableTooBig, 87 | LoaderSupervisorBadRelaTblEntry, 88 | LoaderSupervisorUnknownRelaType, 89 | LoaderBadEntry, 90 | 91 | /* manifest errors */ 92 | ManifestBadFS, 93 | ManifestNoSuchAsset 94 | } 95 | -------------------------------------------------------------------------------- /src/hypervisor/src/hardware.rs: -------------------------------------------------------------------------------- 1 | /* diosix abstracted hardware manager 2 | * 3 | * (c) Chris Williams, 2019-2021. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | use alloc::vec::Vec; 9 | use super::lock::Mutex; 10 | use platform::devices::Devices; 11 | use platform::physmem::{PhysMemBase, PhysMemSize}; 12 | use platform::timer; 13 | use super::error::Cause; 14 | 15 | lazy_static! 16 | { 17 | /* acquire HARDWARE before accessing any system hardware */ 18 | static ref HARDWARE: Mutex> = Mutex::new("hardware management", None); 19 | } 20 | 21 | /* parse_and_init 22 | Parse a device tree structure to create a base set of hardware devices. 23 | also initialize the devices so they can be used. 24 | call before using acquire_hardware_lock() to access HARDWARE. 25 | => device_tree = byte slice containing the device tree in physical memory 26 | <= return Ok for success, or error code on failure 27 | */ 28 | pub fn parse_and_init(dtb: &[u8]) -> Result<(), Cause> 29 | { 30 | match Devices::new(dtb) 31 | { 32 | Ok(dt) => 33 | { 34 | *(HARDWARE.lock()) = Some(dt); 35 | return Ok(()) 36 | }, 37 | Err(e) => 38 | { 39 | hvalert!("Unable to parse system Device Tree ({:?})", e); 40 | return Err(Cause::DeviceTreeBad); 41 | } 42 | } 43 | } 44 | 45 | /* routines to interact with the system's base devices */ 46 | 47 | /* write the string msg out to the debug logging console. 48 | if the system is busy, return 49 | => msg = string to write out (not necessarily zero term'd) 50 | <= true if able to write, false if not */ 51 | pub fn write_debug_string(msg: &str) -> bool 52 | { 53 | /* avoid blocking if we can */ 54 | if HARDWARE.is_locked() == true 55 | { 56 | return false; 57 | } 58 | 59 | match &*(HARDWARE.lock()) 60 | { 61 | Some(d) => 62 | { 63 | d.write_debug_string(msg); 64 | true 65 | }, 66 | None => false 67 | } 68 | } 69 | 70 | /* read a single character from the debuging console, or None if none. 71 | this does not block */ 72 | pub fn read_debug_char() -> Option 73 | { 74 | /* avoid blocking on a lock if we can */ 75 | if HARDWARE.is_locked() == true 76 | { 77 | return None; 78 | } 79 | 80 | match &*(HARDWARE.lock()) 81 | { 82 | Some(d) => d.read_debug_char(), 83 | None => None 84 | } 85 | } 86 | 87 | /* return number of discovered logical CPU cores, or None if value unavailable */ 88 | pub fn get_nr_cpu_cores() -> Option 89 | { 90 | match &*(HARDWARE.lock()) 91 | { 92 | Some(d) => Some(d.get_nr_cpu_cores()), 93 | None => None 94 | } 95 | } 96 | 97 | /* return a list of the physical RAM chunks present in the system, 98 | or None if we can't read the available memory */ 99 | pub fn get_phys_ram_chunks() -> Option> 100 | { 101 | match &*(HARDWARE.lock()) 102 | { 103 | Some(d) => Some(d.get_phys_ram_areas()), 104 | None => None 105 | } 106 | } 107 | 108 | /* return total amount of physical RAM present in the system */ 109 | pub fn get_phys_ram_total() -> Option 110 | { 111 | if let Some(areas) = get_phys_ram_chunks() 112 | { 113 | let mut total = 0; 114 | for area in areas 115 | { 116 | total = total + area.size; 117 | } 118 | 119 | return Some(total); 120 | } 121 | 122 | None 123 | } 124 | 125 | /* for this CPU core, enable scheduler timer interrupt */ 126 | pub fn scheduler_timer_start() 127 | { 128 | match &*(HARDWARE.lock()) 129 | { 130 | Some(d) => d.scheduler_timer_start(), 131 | None => () 132 | }; 133 | } 134 | 135 | /* tell the scheduler to interrupt this core 136 | when duration number of timer ticks or sub-seconds passes */ 137 | pub fn scheduler_timer_next_in(duration: timer::TimerValue) 138 | { 139 | match &*(HARDWARE.lock()) 140 | { 141 | Some(d) => d.scheduler_timer_next_in(duration), 142 | None => () 143 | }; 144 | } 145 | 146 | /* tell the scheduler to interrupt this core when the system clock equals 147 | target value in ticks or sub-seconds as its current value */ 148 | pub fn scheduler_timer_at(target: timer::TimerValue) 149 | { 150 | match &*(HARDWARE.lock()) 151 | { 152 | Some(d) => d.scheduler_timer_at(target), 153 | None => () 154 | }; 155 | } 156 | 157 | 158 | /* get when the scheduler timer IRQ is next set to fire on this core. 159 | this is a clock-on-the-wall value: it's a number of ticks or 160 | sub-seconds since the timer started, not the duration to the next IRQ */ 161 | pub fn scheduler_get_timer_next_at() -> Option 162 | { 163 | match &*(HARDWARE.lock()) 164 | { 165 | Some(d) => d.scheduler_get_timer_next_at(), 166 | None => None 167 | } 168 | } 169 | 170 | /* get the CPU's timer frequency in Hz */ 171 | pub fn scheduler_get_timer_frequency() -> Option 172 | { 173 | match &*(HARDWARE.lock()) 174 | { 175 | Some(d) => d.scheduler_get_timer_frequency(), 176 | None => None 177 | } 178 | } 179 | 180 | /* return the timer's current value in microseconds, or None for no timer 181 | this is a clock-on-the-wall value in that it always incremements and does 182 | not reset. the underlying platform code can do what it needs to implement this */ 183 | pub fn scheduler_get_timer_now() -> Option 184 | { 185 | match &*(HARDWARE.lock()) 186 | { 187 | Some(d) => d.scheduler_get_timer_now(), 188 | None => None 189 | } 190 | } 191 | 192 | /* clone the system's base device tree blob structure so it can be passed 193 | to guest capsules. the platform code should customize the tree to ensure 194 | peripherals are virtualized. the platform code therefore controls what 195 | hardware is provided. the hypervisor sets how many CPUs and RAM are available. 196 | the rest is decided by the platform code. 197 | => cpus = number of virtual CPU cores in this capsule 198 | boot_cpu_id = ID of system's boot CPU (typically 0) 199 | mem_base = base physical address of the contiguous system RAM 200 | mem_size = number of bytes available in the system RAM 201 | <= returns dtb as a byte array, or an error code 202 | */ 203 | pub fn clone_dtb_for_capsule(cpus: usize, boot_cpu_id: u32, mem_base: PhysMemBase, mem_size: PhysMemSize) -> Result, Cause> 204 | { 205 | match &*(HARDWARE.lock()) 206 | { 207 | Some(d) => match d.spawn_virtual_environment(cpus, boot_cpu_id, mem_base, mem_size) 208 | { 209 | Some(v) => return Ok(v), 210 | None => return Err(Cause::DeviceTreeBad) 211 | }, 212 | None => Err(Cause::CantCloneDevices) 213 | } 214 | } -------------------------------------------------------------------------------- /src/hypervisor/src/heap.rs: -------------------------------------------------------------------------------- 1 | /* diosix heap management 2 | * 3 | * Simple heap manager. A CPU can allocate only from its own 4 | * heap pool, though it can share these pointers with any CPU. 5 | * Any CPU can free them back to the owner's heap pool when 6 | * they are done with these allocations. 7 | * 8 | * Thus this code is *single threaded* per individual CPU core 9 | * and also lock-free. 10 | * 11 | * Each CPU heap is primed with a small amount of fixed 12 | * physical RAM, defined by the platform code. When this 13 | * fixed pool runs low, the heap code requests a temporary 14 | * block of memory from the physical memory manager. 15 | * this block is added as a free block to the heap and 16 | * subsequently allocated from. 17 | * 18 | * We use Rust's memory safety features to prevent any 19 | * use-after-free(). Blocks are free()'d atomically 20 | * preventing any races. 21 | * 22 | * This code interfaces with Rust's global allocator API 23 | * so things like vec! and Box just work. Heap is 24 | * the underlying engine for HVallocator. 25 | * 26 | * (c) Chris Williams, 2019-2021. 27 | * 28 | * See LICENSE for usage and copying. 29 | */ 30 | 31 | use core::alloc::{GlobalAlloc, Layout}; 32 | use core::ptr::null_mut; 33 | use core::mem; 34 | use core::fmt; 35 | use core::result::Result; 36 | use core::sync::atomic::{AtomicUsize, Ordering}; 37 | use platform::physmem::{PhysMemSize, PhysMemBase}; 38 | use super::physmem::{self, alloc_region, RegionHygiene}; 39 | use super::error::Cause; 40 | 41 | /* different states each recognized heap block can be in */ 42 | #[derive(PartialEq, Debug, Clone, Copy)] 43 | enum HeapBlockMagic 44 | { 45 | Free = 0x0deadded, 46 | InUse = 0x0d10c0de, 47 | BadMagic = 0xabad1dea 48 | } 49 | 50 | impl HeapBlockMagic 51 | { 52 | pub fn from_usize(value: usize) -> Self 53 | { 54 | match value 55 | { 56 | 0x0deadded => Self::Free, 57 | 0x0d10c0de => Self::InUse, 58 | _ => Self::BadMagic 59 | } 60 | } 61 | } 62 | 63 | /* source of a heap block */ 64 | #[derive(PartialEq, Debug, Clone, Copy)] 65 | enum HeapSource 66 | { 67 | Fixed, /* allocated during startup by platform code */ 68 | Temporary /* allocated dynamically from physical memory pool */ 69 | } 70 | 71 | /* to avoid fragmentation, allocate in block sizes of this multiple, including header */ 72 | const HEAP_BLOCK_SIZE: usize = 128; 73 | 74 | /* follow Rust's heap allocator API so we can drop our per-CPU allocator in and use things 75 | like Box. We allow the Rust toolchain to track and check pointers and object lifetimes, 76 | while we'll manage the underlying physical memory used by the heap. */ 77 | pub struct HVallocator; 78 | 79 | unsafe impl GlobalAlloc for HVallocator 80 | { 81 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 82 | { 83 | let bytes = layout.size(); 84 | 85 | match (*::this()).heap.alloc::(bytes) 86 | { 87 | Ok(p) => p, 88 | Err(e) => 89 | { 90 | hvalert!("HVallocator: request for {} bytes failed ({:?})", bytes, e); 91 | null_mut() /* yeesh */ 92 | } 93 | } 94 | } 95 | 96 | unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) 97 | { 98 | match (*::this()).heap.free::(ptr) 99 | { 100 | Err(e) => 101 | { 102 | hvalert!("HVallocator: request to free {} bytes at {:p} failed ({:?})", layout.size(), ptr, e) 103 | }, 104 | _ => () 105 | } 106 | } 107 | } 108 | 109 | /* describe the layout of a per-CPU heap block */ 110 | #[repr(C)] 111 | pub struct HeapBlock 112 | { 113 | /* heap is a single-link-list to keep it simple and safe */ 114 | next: Option<*mut HeapBlock>, 115 | /* size of this block *including* header */ 116 | size: PhysMemSize, 117 | /* define block state using magic words */ 118 | magic: AtomicUsize, 119 | /* define the source of the memory */ 120 | source: HeapSource 121 | /* block contents follows... */ 122 | } 123 | 124 | /* used to perform integrity checks */ 125 | const HEAP_MAGIC: usize = 0xcafed00d; 126 | 127 | /* this is our own internal API for the per-CPU hypervisor heap. use high-level abstractions, such as Box, 128 | rather than this directly, so we get all the safety measures and lifetime checking. think of kallocator 129 | as the API and Heap as the engine. kallocator is built on top of Heap, and each CPU core has its own Heap. */ 130 | #[repr(C)] 131 | pub struct Heap 132 | { 133 | /* magic to ensure heap structure hasn't been overwritten */ 134 | magic: usize, 135 | /* pointer to list of in-use and freed blocks */ 136 | block_list_head: *mut HeapBlock, 137 | /* stash a copy of the block header size here */ 138 | block_header_size: PhysMemSize, 139 | } 140 | 141 | /* describe a heap by its totals */ 142 | pub struct HeapStats 143 | { 144 | pub free_total: usize, /* total free space in bytes */ 145 | pub alloc_total: usize, /* total bytes allocated */ 146 | pub largest_free: usize, /* largest single free block in bytes */ 147 | pub largest_alloc: usize /* largest allocated block in bytes */ 148 | } 149 | 150 | /* pretty print the heap's stats */ 151 | impl fmt::Debug for Heap 152 | { 153 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result 154 | { 155 | let stats = self.calculate_stats(); 156 | 157 | write!(f, "size: {} alloc'd {} free {} largest alloc'd {} largest free {} magic 0x{:x}", 158 | stats.alloc_total + stats.free_total, 159 | stats.alloc_total, stats.free_total, 160 | stats.largest_alloc, stats.largest_free, self.magic) 161 | } 162 | } 163 | 164 | /* clean up heap list by returning chunks of free temporary physical RAM */ 165 | macro_rules! heaphousekeeper 166 | { 167 | () => ((*::this()).heap.return_unused();) 168 | } 169 | 170 | impl Heap 171 | { 172 | /* initialize this heap area. start off with one giant block 173 | covering all of free space, from which other blocks will be carved. 174 | this initial block is assuemd to be a fixed platform-allocated area 175 | of physical memory. 176 | => start = pointer to start of heap area 177 | size = number of available bytes in heap */ 178 | pub fn init(&mut self, start: *mut HeapBlock, size: PhysMemSize) 179 | { 180 | /* start with a free block covering the available space */ 181 | unsafe 182 | { 183 | let block = start; 184 | (*block).size = size; 185 | (*block).next = None; 186 | (*block).magic = AtomicUsize::new(HeapBlockMagic::Free as usize); 187 | (*block).source = HeapSource::Fixed; 188 | 189 | self.magic = HEAP_MAGIC; 190 | self.block_header_size = mem::size_of::(); 191 | self.block_list_head = block; 192 | } 193 | } 194 | 195 | /* insert a free physical memory block at the head of the list 196 | => base = base address of the memory block to add 197 | size = total size of the block, including header that will be automatically added 198 | <= OK or error code */ 199 | pub fn insert_free(&mut self, base: PhysMemBase, size: PhysMemSize) -> Result<(), Cause> 200 | { 201 | unsafe 202 | { 203 | /* craft free block from scratch */ 204 | let block = base as *mut HeapBlock; 205 | (*block).size = size; 206 | (*block).next = Some(self.block_list_head); 207 | (*block).magic = AtomicUsize::new(HeapBlockMagic::Free as usize); 208 | (*block).source = HeapSource::Temporary; 209 | 210 | /* add the free block to the start of the list */ 211 | self.block_list_head = block; 212 | } 213 | 214 | Ok(()) 215 | } 216 | 217 | /* free a previously allocated block 218 | => to_free = pointer previously returned by alloc() 219 | <= OK or failure code */ 220 | pub fn free(&mut self, to_free: *mut T) -> Result<(), Cause> 221 | { 222 | /* convert this into a raw pointer so we can find the heap block header */ 223 | let mut ptr = to_free as usize; 224 | ptr = ptr - self.block_header_size; 225 | let block = ptr as *mut HeapBlock; 226 | 227 | unsafe 228 | { 229 | /* we should be the only one writing to this metadata, though there 230 | will be readers, hence the split in reading and writing */ 231 | match HeapBlockMagic::from_usize((*block).magic.load(Ordering::SeqCst)) 232 | { 233 | HeapBlockMagic::InUse => 234 | { 235 | (*block).magic.store(HeapBlockMagic::Free as usize, Ordering::SeqCst); 236 | Ok(()) 237 | }, 238 | /* if it's not in use, or bad magic, then bail out */ 239 | HeapBlockMagic::Free => Err(Cause::HeapNotInUse), 240 | HeapBlockMagic::BadMagic => Err(Cause::HeapBadMagic) 241 | } 242 | } 243 | } 244 | 245 | /* allocate memory for the given object type. the returned pointer skips 246 | the heap block header, pointing to the available space, 247 | just like malloc() on other platforms. 248 | => T = type of object to allocate memory for 249 | num = number of objects to allocate for 250 | <= pointer to memory, or error code */ 251 | pub fn alloc(&mut self, num: usize) -> Result<*mut T, Cause> 252 | { 253 | if num == 0 254 | { 255 | return Err(Cause::HeapBadSize); 256 | } 257 | 258 | /* perform integrity check */ 259 | #[cfg(feature = "integritychecks")] 260 | { 261 | if self.magic != HEAP_MAGIC 262 | { 263 | hvalert!("CPU private heap overwritten (0x{:x}). Halting!", self.magic); 264 | loop {} 265 | } 266 | } 267 | 268 | let mut done = false; 269 | let mut extended = false; 270 | 271 | /* calculate size of block required, including header, rounded up to 272 | nearest whole heap block multiple */ 273 | let mut size_req = (mem::size_of::() * num) + self.block_header_size; 274 | size_req = ((size_req / HEAP_BLOCK_SIZE) + 1) * HEAP_BLOCK_SIZE; 275 | 276 | /* scan all blocks for first free fit */ 277 | let mut search_block = self.block_list_head; 278 | unsafe 279 | { 280 | while !done 281 | { 282 | if HeapBlockMagic::from_usize((*search_block).magic.load(Ordering::SeqCst)) == HeapBlockMagic::Free && (*search_block).size >= size_req 283 | { 284 | /* we've got a winner. if the found block is equal size, or only a few bytes 285 | larger than the required size, then take the whole block */ 286 | if ((*search_block).size - size_req) < HEAP_BLOCK_SIZE 287 | { 288 | (*search_block).magic.store(HeapBlockMagic::InUse as usize, Ordering::SeqCst); 289 | let found_ptr = (search_block as usize) + self.block_header_size; 290 | return Result::Ok(found_ptr as *mut T); 291 | } 292 | else 293 | { 294 | /* carve the end of a large-enough free block off to make a new block. 295 | then add this new block to the start of the list */ 296 | (*search_block).size = (*search_block).size - size_req; 297 | 298 | /* skip to the new (shorter) end of the free block */ 299 | let mut found_ptr = (search_block as usize) + (*search_block).size; 300 | 301 | /* set metadata for newly allocated block */ 302 | let alloc_block = found_ptr as *mut HeapBlock; 303 | (*alloc_block).next = Some(self.block_list_head); 304 | (*alloc_block).magic.store(HeapBlockMagic::InUse as usize, Ordering::SeqCst); 305 | (*alloc_block).size = size_req; 306 | 307 | /* point the head of the list at new block */ 308 | self.block_list_head = alloc_block; 309 | 310 | /* adjust pointer to skip the header of our new block, and we're done */ 311 | found_ptr = found_ptr + self.block_header_size; 312 | return Result::Ok(found_ptr as *mut T); 313 | } 314 | } 315 | 316 | /* make sure we don't run off the end of the list. 317 | also, attempt to consolidate neighboring blocks to make 318 | more bytes available and reduce fragmentation. do this 319 | after we've tried searching for available blocks */ 320 | match (*search_block).next 321 | { 322 | None => if self.consolidate() < HEAP_BLOCK_SIZE 323 | { 324 | if extended == false 325 | { 326 | /* if we can't squeeze any more bytes out of the list 327 | then grab a chunk of available RAM from the physical 328 | memory manager and add it to the free list */ 329 | let region = match alloc_region(size_req) 330 | { 331 | Ok(r) => r, 332 | Err(_e) => 333 | { 334 | /* give up and bail out if there's no more physical memory */ 335 | hvdebug!("Failed to extend heap by {} bytes: {:?}", size_req, _e); 336 | return Result::Err(Cause::HeapNoFreeMem); 337 | } 338 | }; 339 | 340 | if self.insert_free(region.base(), region.size()).is_ok() 341 | { 342 | extended = true; 343 | 344 | /* start the search over, starting with the new block */ 345 | search_block = self.block_list_head; 346 | } 347 | else 348 | { 349 | /* if we couldn't insert free block, give up */ 350 | done = true; 351 | } 352 | } 353 | else 354 | { 355 | /* can't squeeze any more out of list and we've tried allocating more 356 | physical memory. give up at this point, though we shouldn't really 357 | end up here */ 358 | hvdebug!("Giving up allocating {} bytes", size_req); 359 | done = true; 360 | } 361 | } 362 | else 363 | { 364 | /* start the search over */ 365 | search_block = self.block_list_head; 366 | }, 367 | Some(n) => search_block = n 368 | }; 369 | } 370 | } 371 | 372 | return Result::Err(Cause::HeapNoFreeMem); 373 | } 374 | 375 | /* deallocate any free temporary physical memory regions that are no longer needed */ 376 | pub fn return_unused(&mut self) 377 | { 378 | /* ensure all blocks are gathered up */ 379 | loop 380 | { 381 | if self.consolidate() < HEAP_BLOCK_SIZE 382 | { 383 | break; 384 | } 385 | } 386 | 387 | /* search for unused physical memory blocks to return */ 388 | let mut block = self.block_list_head; 389 | let mut prev_block: Option<*mut HeapBlock> = None; 390 | unsafe 391 | { 392 | loop 393 | { 394 | match ((*block).source, HeapBlockMagic::from_usize((*block).magic.load(Ordering::SeqCst))) 395 | { 396 | /* remove physical region from single-linked list if successfully deallocated. 397 | the physical memory manager will avoid fragmentation by rejecting regions that 398 | are not multiples of prefered region sizes */ 399 | (HeapSource::Temporary, HeapBlockMagic::Free) => 400 | { 401 | let region = physmem::Region::new(block as PhysMemBase, (*block).size, RegionHygiene::CanClean); 402 | if physmem::dealloc_region(region).is_ok() 403 | { 404 | hvdebug!("Returning heap block {:p} size {} to physical memory pool", 405 | block, (*block).size); 406 | 407 | /* delink the block - do not touch the contents of the 408 | deallocated block: it's back in the pool and another CPU core 409 | could grab it at any time. After dealloc_region() returns Ok, 410 | it's gone as far as this core is concerned. */ 411 | match prev_block 412 | { 413 | Some(b) => (*b).next = (*block).next, 414 | None => () 415 | }; 416 | } 417 | }, 418 | 419 | (_, _) => () 420 | } 421 | 422 | match (*block).next 423 | { 424 | Some(n) => 425 | { 426 | prev_block = Some(block); 427 | block = n; 428 | } 429 | None => break 430 | }; 431 | } 432 | } 433 | } 434 | 435 | /* pass once over the heap and try to merge adjacent free blocks 436 | <= size of the largest block seen, in bytes including header */ 437 | fn consolidate(&mut self) -> PhysMemSize 438 | { 439 | let mut largest_merged_block: PhysMemSize = 0; 440 | 441 | let mut block = self.block_list_head; 442 | unsafe 443 | { 444 | /* can't merge if we're the last block in the list */ 445 | while (*block).next.is_some() 446 | { 447 | let next = (*block).next.unwrap(); 448 | if HeapBlockMagic::from_usize((*block).magic.load(Ordering::SeqCst)) == HeapBlockMagic::Free && 449 | HeapBlockMagic::from_usize((*next).magic.load(Ordering::SeqCst)) == HeapBlockMagic::Free 450 | { 451 | let target_ptr = (block as usize) + (*block).size; 452 | if target_ptr == next as usize 453 | { 454 | /* we're adjacent, we're both free, and we can merge */ 455 | let merged_size = (*block).size + (*next).size; 456 | if merged_size > largest_merged_block 457 | { 458 | largest_merged_block = merged_size; 459 | } 460 | (*block).size = merged_size; 461 | (*block).next = (*next).next; 462 | } 463 | } 464 | match (*block).next 465 | { 466 | Some(n) => block = n, 467 | None => break, 468 | }; 469 | } 470 | 471 | /* catch corner case of there being two free blocks: the first on the 472 | list is higher than the last block on the list, and they are both free */ 473 | if HeapBlockMagic::from_usize((*self.block_list_head).magic.load(Ordering::SeqCst)) == HeapBlockMagic::Free 474 | { 475 | match (*self.block_list_head).next 476 | { 477 | Some(next) => 478 | { 479 | if HeapBlockMagic::from_usize((*next).magic.load(Ordering::SeqCst)) == HeapBlockMagic::Free 480 | { 481 | if (next as usize) + (*next).size == self.block_list_head as usize 482 | { 483 | (*next).size = (*next).size + (*self.block_list_head).size; 484 | self.block_list_head = next; 485 | if (*next).size > largest_merged_block 486 | { 487 | largest_merged_block = (*next).size; 488 | } 489 | } 490 | } 491 | }, 492 | _ => () 493 | } 494 | } 495 | } 496 | 497 | return largest_merged_block; 498 | } 499 | 500 | /* generate a block of statistics describing the heap */ 501 | pub fn calculate_stats(&self) -> HeapStats 502 | { 503 | let mut free_total = 0; 504 | let mut alloc_total = 0; 505 | let mut largest_free = 0; 506 | let mut largest_alloc = 0; 507 | 508 | let mut done = false; 509 | let mut block = self.block_list_head; 510 | unsafe 511 | { 512 | while !done 513 | { 514 | let size = (*block).size; 515 | match HeapBlockMagic::from_usize((*block).magic.load(Ordering::SeqCst)) 516 | { 517 | HeapBlockMagic::InUse => 518 | { 519 | alloc_total = alloc_total + size; 520 | if size > largest_alloc 521 | { 522 | largest_alloc = size; 523 | } 524 | }, 525 | HeapBlockMagic::Free => 526 | { 527 | free_total = free_total + size; 528 | if size > largest_free 529 | { 530 | largest_free = size; 531 | } 532 | }, 533 | HeapBlockMagic::BadMagic => hvdebug!("Bad magic for heap block {:p} during audit", block) 534 | }; 535 | 536 | match (*block).next 537 | { 538 | None => done = true, 539 | Some(b) => block = b 540 | }; 541 | } 542 | } 543 | 544 | HeapStats 545 | { 546 | free_total, 547 | alloc_total, 548 | largest_alloc, 549 | largest_free 550 | } 551 | } 552 | } 553 | -------------------------------------------------------------------------------- /src/hypervisor/src/irq.rs: -------------------------------------------------------------------------------- 1 | /* diosix hypervisor code for handling hardware interrupts and software exceptions 2 | * 3 | * (c) Chris Williams, 2019-2021. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | use super::scheduler; 9 | use super::capsule; 10 | use super::pcore; 11 | use super::hardware; 12 | use super::service; 13 | use super::error::Cause; 14 | 15 | /* platform-specific code must implement all this */ 16 | use platform; 17 | use platform::irq::{IRQContext, IRQType, IRQCause, IRQSeverity, IRQ}; 18 | use platform::cpu::PrivilegeMode; 19 | use platform::instructions::{self, EmulationResult}; 20 | use platform::syscalls; 21 | use platform::timer; 22 | 23 | /* hypervisor_irq_handler 24 | entry point for hardware interrupts and software exceptions, collectively known as IRQs. 25 | call down into platform-specific handlers 26 | => context = platform-specific context of the IRQ, which may be modified depending 27 | on the IRQ raised. 28 | */ 29 | #[no_mangle] 30 | pub extern "C" fn hypervisor_irq_handler(mut context: IRQContext) 31 | { 32 | /* if dispatch() returns an IRQ context then we need to handle it here 33 | at the high level. if it returns None, the platform-specific code handled it. 34 | note: the platform library should take care of hardware specfic things like 35 | catching illegal instructions that can be fixed up and handled transparently */ 36 | if let Some(irq) = platform::irq::dispatch(context) 37 | { 38 | match irq.irq_type 39 | { 40 | IRQType::Exception => exception(irq, &mut context), 41 | IRQType::Interrupt => interrupt(irq, &mut context), 42 | }; 43 | } 44 | } 45 | 46 | /* handle software exception */ 47 | fn exception(irq: IRQ, context: &mut IRQContext) 48 | { 49 | match (irq.severity, irq.privilege_mode, irq.cause) 50 | { 51 | /* catch illegal instructions we may be able to emulate */ 52 | (_, PrivilegeMode::User, IRQCause::IllegalInstruction) | 53 | (_, PrivilegeMode::Supervisor, IRQCause::IllegalInstruction) => 54 | { 55 | match instructions::emulate(irq.privilege_mode, context) 56 | { 57 | EmulationResult::Success => (), /* nothing more to do, return */ 58 | EmulationResult::Yield => 59 | { 60 | /* instruction was some kind of sleep or pause operation. 61 | try to find something else to run in the meantime */ 62 | scheduler::ping(); 63 | }, 64 | 65 | /* if we can't handle the instruction, 66 | kill the capsule and force a context switch. 67 | TODO: is killing the whole capsule a little extreme? */ 68 | _ => fatal_exception(&irq) 69 | } 70 | }, 71 | 72 | /* catch environment calls from supervisor mode */ 73 | (_, PrivilegeMode::Supervisor, IRQCause::SupervisorEnvironmentCall) => 74 | { 75 | /* determine what we need to do from the platform code's decoding */ 76 | if let Some(action) = syscalls::handler(context) 77 | { 78 | match action 79 | { 80 | syscalls::Action::Yield => scheduler::ping(), 81 | 82 | syscalls::Action::Terminate => if let Err(_e) = capsule::destroy_current() 83 | { 84 | hvalert!("BUG: Failed to terminate currently running capsule ({:?})", _e); 85 | syscalls::failed(context, syscalls::ActionResult::Failed); 86 | } 87 | else 88 | { 89 | /* find something else to run, this virtual core is dead */ 90 | scheduler::ping(); 91 | }, 92 | 93 | syscalls::Action::Restart => if let Err(_e) = capsule::restart_current() 94 | { 95 | hvalert!("BUG: Failed to restart currently running capsule ({:?})", _e); 96 | syscalls::failed(context, syscalls::ActionResult::Failed); 97 | } 98 | else 99 | { 100 | /* find something else to run, this virtual core is being replaced */ 101 | scheduler::ping(); 102 | }, 103 | 104 | syscalls::Action::TimerIRQAt(target) => 105 | { 106 | /* mark this virtual core as awaiting a timer IRQ and 107 | schedule a timer interrupt in anticipation */ 108 | pcore::PhysicalCore::set_virtualcore_timer_target(Some(target)); 109 | hardware::scheduler_timer_at(target); 110 | }, 111 | 112 | /* output a character to the user from this capsule 113 | when a console_write capsule calls this, it writes to the console. 114 | when a non-console_write capsule calls this, it writes to its console buffer */ 115 | syscalls::Action::OutputChar(character) => if let Err(_) = capsule::putc(character) 116 | { 117 | syscalls::failed(context, syscalls::ActionResult::Failed); 118 | }, 119 | 120 | /* get a character from the user for this capsule 121 | when a console_read capsule calls this, it reads from the console. 122 | when a non-console_read capsule calls this, it reads from its console buffer */ 123 | syscalls::Action::InputChar => match capsule::getc() 124 | { 125 | /* Linux expects getc()'s value (a character value, or -1 for none available) in 126 | the error field of the RISC-V SBI and not in the value field. FIXME: Non-portable. 127 | Ref: https://github.com/torvalds/linux/blob/master/arch/riscv/kernel/sbi.c#L92 */ 128 | Ok(c) => syscalls::result_as_error(context, c as usize), 129 | Err(Cause::CapsuleBufferEmpty) => syscalls::result_as_error(context, usize::MAX), /* -1 == nothing to read */ 130 | Err(_) => syscalls::failed(context, syscalls::ActionResult::Failed) 131 | }, 132 | 133 | /* write a character to the given capsule's console buffer. 134 | only console_write capsules can call this */ 135 | syscalls::Action::ConsoleBufferWriteChar(character, capsule_id) => match capsule::console_putc(character, capsule_id) 136 | { 137 | Ok(_) => (), 138 | Err(e) => syscalls::failed(context, match e 139 | { 140 | Cause::CapsuleBadPermissions => syscalls::ActionResult::Denied, 141 | _ => syscalls::ActionResult::Failed 142 | }) 143 | }, 144 | 145 | /* get the next available character from any capsule's console buffer 146 | only console_read capsules can call this */ 147 | syscalls::Action::ConsoleBufferReadChar => match capsule::console_getc() 148 | { 149 | Ok((character, capsule_id)) => syscalls::result_1extra(context, character as usize, capsule_id), 150 | Err(Cause::CapsuleBufferEmpty) => syscalls::result(context, usize::MAX), /* -1 == nothing to read */ 151 | Err(e) => syscalls::failed(context, match e 152 | { 153 | Cause::CapsuleBadPermissions => syscalls::ActionResult::Denied, 154 | _ => syscalls::ActionResult::Failed 155 | }) 156 | }, 157 | 158 | /* get the next available character from the hypervisor's console/log buffer 159 | only console_read capsules can call this */ 160 | syscalls::Action::HypervisorBufferReadChar => match capsule::hypervisor_getc() 161 | { 162 | Ok(character) => syscalls::result(context, character as usize), 163 | Err(Cause::CapsuleBufferEmpty) => syscalls::result(context, usize::MAX), /* -1 == nothing to read */ 164 | Err(e) => syscalls::failed(context, match e 165 | { 166 | Cause::CapsuleBadPermissions => syscalls::ActionResult::Denied, 167 | _ => syscalls::ActionResult::Failed 168 | }) 169 | }, 170 | 171 | /* currently running capsule wants to register itself as a service so it can receive 172 | and proces requests from other capsules */ 173 | syscalls::Action::RegisterService(stype_nr) => if let Some(cid) = pcore::PhysicalCore::get_capsule_id() 174 | { 175 | match service::usize_to_service_type(stype_nr) 176 | { 177 | Ok(stype) => match service::register(stype, cid) 178 | { 179 | Ok(_) => (), 180 | Err(e) => syscalls::failed(context, match e 181 | { 182 | Cause::CapsuleBadPermissions => syscalls::ActionResult::Denied, 183 | _ => syscalls::ActionResult::Failed 184 | }) 185 | }, 186 | Err(e) => syscalls::failed(context, match e 187 | { 188 | Cause::ServiceNotFound => syscalls::ActionResult::BadParams, 189 | _ => syscalls::ActionResult::Failed 190 | }) 191 | } 192 | } 193 | else 194 | { 195 | /* how is this possible? can't find capsule running on this physical core 196 | but we're going to try returning to it anyway? */ 197 | syscalls::failed(context, syscalls::ActionResult::Failed); 198 | }, 199 | 200 | _ => if let Some(c) = pcore::PhysicalCore::get_capsule_id() 201 | { 202 | hvalert!("Capsule {}: Unhandled syscall: {:x?} at 0x{:x}", c, action, irq.pc); 203 | } 204 | else 205 | { 206 | hvdebug!("Unhandled syscall: {:x?} at 0x{:x} in unknown capsule", action, irq.pc); 207 | } 208 | } 209 | } 210 | }, 211 | 212 | /* catch everything else, halting if fatal */ 213 | (severity, privilege, cause) => 214 | { 215 | /* if an unhandled fatal exception reaches us here from the supervisor or user mode, 216 | kill the capsule. if the hypervisor can't handle its own fatal exception, give up */ 217 | match privilege 218 | { 219 | PrivilegeMode::Supervisor | PrivilegeMode::User => if severity == IRQSeverity::Fatal 220 | { 221 | /* TODO: is it wise to blow away the whole capsule for a user exception? 222 | the supervisor should really catch its user-level faults */ 223 | fatal_exception(&irq); 224 | }, 225 | PrivilegeMode::Machine => 226 | { 227 | if severity == IRQSeverity::Fatal 228 | { 229 | hvalert!("Halting physical CPU core for {:?} at 0x{:x}, stack 0x{:x} integrity {:?}", 230 | cause, irq.pc, irq.sp, pcore::PhysicalCore::integrity_check()); 231 | debughousekeeper!(); // flush the debug output 232 | loop {} 233 | } 234 | } 235 | } 236 | } 237 | } 238 | } 239 | 240 | /* handle hardware interrupt */ 241 | fn interrupt(irq: IRQ, _: &mut IRQContext) 242 | { 243 | match irq.cause 244 | { 245 | IRQCause::MachineTimer => 246 | { 247 | /* make a scheduling decision and raise any supervior-level timer IRQs*/ 248 | scheduler::ping(); 249 | check_supervisor_timer_irq(); 250 | }, 251 | _ => hvdebug!("Unhandled hardware interrupt: {:?}", irq.cause) 252 | } 253 | 254 | /* clear the interrupt condition */ 255 | platform::irq::acknowledge(irq); 256 | } 257 | 258 | /* is the virtual core we're about to run awaiting a timer IRQ? 259 | if so, and if its timer target value has been passed, generate a pending timer IRQ */ 260 | fn check_supervisor_timer_irq() 261 | { 262 | if let Some(target) = pcore::PhysicalCore::get_virtualcore_timer_target() 263 | { 264 | match (hardware::scheduler_get_timer_now(), hardware::scheduler_get_timer_frequency()) 265 | { 266 | (Some(time), Some(freq)) => 267 | { 268 | let current = time.to_exact(freq); 269 | if current >= target.to_exact(freq) 270 | { 271 | /* create a pending timer IRQ for the supervisor kernel and clear the target */ 272 | timer::trigger_supervisor_irq(); 273 | pcore::PhysicalCore::set_virtualcore_timer_target(None); 274 | } 275 | }, 276 | (_, _) => () 277 | } 278 | } 279 | } 280 | 281 | /* kill the running capsule, alert the user, and then find something else to run. 282 | if the capsule is important enough to auto-restart-on-crash, try to revive it */ 283 | fn fatal_exception(irq: &IRQ) 284 | { 285 | hvalert!("Terminating running capsule {} for {:?} at 0x{:x}, stack 0x{:x}", 286 | match pcore::PhysicalCore::this().get_virtualcore_id() 287 | { 288 | Some(id) => format!("{}.{}", id.capsuleid, id.vcoreid), 289 | None => format!("[unknown!]") 290 | }, irq.cause, irq.pc, irq.sp); 291 | 292 | let mut terminate = false; // when true, destroy the current capsule 293 | let mut reschedule = false; // when true, we must find another vcore to run 294 | 295 | match capsule::is_current_autorestart() 296 | { 297 | Some(true) => 298 | { 299 | hvalert!("Restarting capsule due to auto-restart-on-crash flag"); 300 | if let Err(err) = capsule::restart_current() 301 | { 302 | hvalert!("Can't restart capsule ({:?}), letting it die instead", err); 303 | terminate = true; 304 | } 305 | else 306 | { 307 | /* the current vcore is no longer running due to restart */ 308 | reschedule = true; 309 | } 310 | }, 311 | Some(false) => terminate = true, 312 | None => 313 | { 314 | hvalert!("BUG: fatal_exception() can't find the running capsule to kill"); 315 | return; 316 | }, 317 | } 318 | 319 | if terminate == true 320 | { 321 | match capsule::destroy_current() 322 | { 323 | Err(e) => hvalert!("BUG: Failed to kill running capsule ({:?})", e), 324 | _ => 325 | { 326 | hvdebug!("Terminated running capsule"); 327 | 328 | /* the current vcore is no longer running due to restart */ 329 | reschedule = true; 330 | } 331 | } 332 | } 333 | 334 | if reschedule == true 335 | { 336 | /* force a context switch to find another virtual core to run 337 | because this virtual core no longer exists */ 338 | scheduler::ping(); 339 | } 340 | } -------------------------------------------------------------------------------- /src/hypervisor/src/loader.rs: -------------------------------------------------------------------------------- 1 | /* diosix high-level hypervisor's loader code for supervisor binaries 2 | * 3 | * Parses and loads supervisor-level binaries. It can perform basic 4 | * dynamic relocation, though not dynamic linking (yet). This 5 | * means guest kernels and system services 6 | * It supports ELF and may support other formats in future. 7 | * 8 | * (c) Chris Williams, 2019-2021. 9 | * 10 | * See LICENSE for usage and copying. 11 | */ 12 | 13 | #![allow(non_camel_case_types)] 14 | 15 | use super::error::Cause; 16 | use platform::cpu::Entry; 17 | use super::physmem::Region; 18 | use core::mem::size_of; 19 | use xmas_elf; 20 | 21 | /* supported CPU architectures */ 22 | #[derive(Debug)] 23 | enum CPUArch 24 | { 25 | /* see https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md#elf-object-file */ 26 | RISC_V 27 | } 28 | 29 | /* supported ELF dynamic relocation types */ 30 | const R_RISCV_RELATIVE: u8 = 3; 31 | 32 | /* xmas-elf is great but it doesn't help you out when you want to access Dynamic 33 | structs without duplicating a load of code for P32 and P64, hence this macro 34 | to wrap it up in one place */ 35 | macro_rules! get_abs_reloc_table 36 | { 37 | ($dynstructs:ident) => { 38 | { 39 | let mut base = None; 40 | let mut size = None; 41 | let mut entry_size = None; 42 | 43 | for dynstruct in $dynstructs 44 | { 45 | if let Ok(tag) = &dynstruct.get_tag() 46 | { 47 | match tag 48 | { 49 | // defines the base offset of the absolute relocation table 50 | xmas_elf::dynamic::Tag::Rela => if let Ok(ptr) = &dynstruct.get_ptr() 51 | { 52 | base = Some(*ptr as usize); 53 | }, 54 | // defines the total size of the absolute relocation table 55 | xmas_elf::dynamic::Tag::RelaSize => if let Ok(val) = &dynstruct.get_val() 56 | { 57 | size = Some(*val as usize); 58 | }, 59 | // defines the size of each absolute relocation table entry 60 | xmas_elf::dynamic::Tag::RelaEnt => if let Ok(val) = &dynstruct.get_val() 61 | { 62 | entry_size = Some(*val as usize); 63 | }, 64 | _ => () 65 | } 66 | } 67 | } 68 | 69 | (base, size, entry_size) 70 | }}; 71 | } 72 | 73 | /* load a supervisor binary into memory as required 74 | => target = region of RAM to write into 75 | source = slice containing supervisor binary image to parse 76 | <= entry point in physical RAM if successful, or error code 77 | */ 78 | pub fn load(target: Region, source: &[u8]) -> Result 79 | { 80 | let elf = match xmas_elf::ElfFile::new(source) 81 | { 82 | Ok(elf) => elf, 83 | Err(s) => 84 | { 85 | hvalert!("Failed to parse supervisor ELF (source physical RAM base {:p}, size {} MiB): {}", 86 | source, source.len() / 1024 / 1024, s); 87 | 88 | return Err(Cause::LoaderUnrecognizedSupervisor); 89 | } 90 | }; 91 | 92 | /* get the processor target */ 93 | let cpu = match elf.header.pt2.machine().as_machine() 94 | { 95 | xmas_elf::header::Machine::RISC_V => CPUArch::RISC_V, 96 | _ => return Err(Cause::LoaderUnrecognizedCPUArch) 97 | }; 98 | 99 | /* the ELF binary defines the entry point as a virtual address. we'll be loading the ELF 100 | somewhere in physical RAM. we have to translate that address to a physical one */ 101 | let mut entry_physical: Option = None; 102 | let entry_virtual = elf.header.pt2.entry_point(); 103 | 104 | /* we need to copy parts of the supervisor from the source to the target location in physical RAM. 105 | turn the region into a set of variables we can use */ 106 | let target_base = target.base() as u64; 107 | let target_end = target.end() as u64; 108 | let target_size = target.size() as u64; 109 | let target_as_bytes = target.as_u8_slice(); 110 | let target_as_words = target.as_usize_slice(); 111 | 112 | /* loop through program headers in the binary */ 113 | for ph_index in 0..*(&elf.header.pt2.ph_count()) 114 | { 115 | match &elf.program_header(ph_index) 116 | { 117 | Ok(ph) => 118 | { 119 | match ph.get_type() 120 | { 121 | /* copy an area in the binary from the source to the target RAM region */ 122 | Ok(xmas_elf::program::Type::Load) => 123 | { 124 | /* reject binaries with load area file sizes greater than their mem sizes */ 125 | if ph.file_size() > ph.mem_size() 126 | { 127 | return Err(Cause::LoaderSupervisorFileSizeTooLarge); 128 | } 129 | 130 | /* we're loading the header into an arbitrary-located block of physical RAM. 131 | we can't use the virtual address. we'll use the physical address as an offset 132 | from target_base. FIXME: is this correct? what else can we use? */ 133 | let offset_into_image = ph.offset(); 134 | let offset_into_target = ph.physical_addr(); 135 | let copy_size = ph.file_size(); 136 | 137 | /* reject wild offsets and physical addresses */ 138 | if (offset_into_image + copy_size) > source.len() as u64 139 | { 140 | return Err(Cause::LoaderSupervisorBadImageOffset); 141 | } 142 | if (offset_into_target + copy_size) > target_size 143 | { 144 | return Err(Cause::LoaderSupervisorBadPhysOffset); 145 | } 146 | 147 | /* is this program header home to the entry point? if so, calculate the physical RAM address. 148 | assumes the entry point is a virtual address. FIXME: is there a better way of handling this? */ 149 | if entry_virtual >= ph.virtual_addr() && entry_virtual < ph.virtual_addr() + ph.mem_size() 150 | { 151 | let addr = (entry_virtual - ph.virtual_addr()) + target_base + offset_into_target; 152 | if addr >= target_end 153 | { 154 | /* reject wild entry points */ 155 | return Err(Cause::LoaderSupervisorEntryOutOfRange); 156 | } 157 | entry_physical = Some(addr as usize); 158 | } 159 | 160 | /* do the copy */ 161 | target_as_bytes[offset_into_target as usize..(offset_into_target + copy_size) as usize].copy_from_slice 162 | ( 163 | &source[(offset_into_image as usize)..(offset_into_image + copy_size) as usize] 164 | ); 165 | }, 166 | 167 | /* support basic PIC ELFs by fixing up values in memory as instructed */ 168 | Ok(xmas_elf::program::Type::Dynamic) => 169 | { 170 | /* support absolute relocation tables -- tables of memory locations to patch up based on where the ELF is loaded */ 171 | let (rela_tbl_base, rela_tbl_size, rela_tbl_entry_size) = match ph.get_data(&elf) 172 | { 173 | Ok(d) => match d 174 | { 175 | xmas_elf::program::SegmentData::Dynamic32(dynstructs) => get_abs_reloc_table!(dynstructs), 176 | xmas_elf::program::SegmentData::Dynamic64(dynstructs) => get_abs_reloc_table!(dynstructs), 177 | _ => (None, None, None) 178 | }, 179 | /* fail binaries with bad metadata */ 180 | Err(_) => return Err(Cause::LoaderSupervisorBadDynamicArea) 181 | }; 182 | 183 | /* if present, parse the absolute relocation table */ 184 | if rela_tbl_base.is_some() && rela_tbl_size.is_some() && rela_tbl_entry_size.is_some() 185 | { 186 | let rela_tbl_base = rela_tbl_base.unwrap(); 187 | let rela_tbl_size = rela_tbl_size.unwrap(); 188 | let rela_tbl_entry_size = rela_tbl_entry_size.unwrap(); 189 | 190 | /* fail binaries with bad metadata */ 191 | if (rela_tbl_base + rela_tbl_size) as u64 > target_size 192 | { 193 | return Err(Cause::LoaderSupervisorRelaTableTooBig); 194 | } 195 | if rela_tbl_entry_size == 0 196 | { 197 | return Err(Cause::LoaderSupervisorBadRelaEntrySize); 198 | } 199 | 200 | /* if these values are not word-aligned, loading will eventually gracefully fail */ 201 | let rela_tbl_nr_entries = rela_tbl_size / rela_tbl_entry_size; 202 | let rela_tbl_words_per_entry = rela_tbl_entry_size / size_of::(); 203 | let rela_tbl_index_into_target = rela_tbl_base / size_of::(); 204 | 205 | /* read each absolute relocation table entry. layout is three machine words: 206 | [0] = offset into the target to alter 207 | [1] = type of relocation to apply 208 | [2] = value needed to compute the final relocation value */ 209 | for entry_nr in 0..rela_tbl_nr_entries 210 | { 211 | let index = rela_tbl_index_into_target + (entry_nr * rela_tbl_words_per_entry); 212 | let offset = target_as_words.get(index + 0); 213 | let info = target_as_words.get(index + 1); 214 | let addend = target_as_words.get(index + 2); 215 | 216 | match (offset, info, addend) 217 | { 218 | (Some(&o), Some(&i), Some(&a)) => 219 | { 220 | // hvdebug!("reloc: offset {:x}, addend {:x}", o, a); 221 | 222 | /* different CPU architectures have different relocation rules. 223 | relocation type is in the lower byte of the info word */ 224 | match (&cpu, (i & 0xff) as u8) 225 | { 226 | /* absolute value relocation */ 227 | (CPUArch::RISC_V, R_RISCV_RELATIVE) => 228 | { 229 | let word_to_alter = o / size_of::(); 230 | if let Some(word) = target_as_words.get_mut(word_to_alter) 231 | { 232 | *word = a + target.base(); 233 | } 234 | else 235 | { 236 | /* give up on malformed binaries */ 237 | return Err(Cause::LoaderSupervisorBadRelaTblEntry); 238 | } 239 | }, 240 | (_, _) => 241 | { 242 | hvdebug!("Unknown {:?} ELF relocation type {:x}", &cpu, i); 243 | return Err(Cause::LoaderSupervisorUnknownRelaType); 244 | } 245 | } 246 | }, 247 | (_, _, _) => return Err(Cause::LoaderSupervisorBadRelaTblEntry) 248 | } 249 | } 250 | } 251 | }, 252 | _ => () 253 | } 254 | }, 255 | _ => break 256 | }; 257 | } 258 | 259 | match entry_physical 260 | { 261 | None => Err(Cause::LoaderBadEntry), 262 | Some(entry) => Ok(entry) 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /src/hypervisor/src/lock.rs: -------------------------------------------------------------------------------- 1 | /* diosix high-level hypervisor's locking primitives 2 | * 3 | * Provides a standard spin lock and a mutex 4 | * 5 | * The mutex is reentrant, which means when a physical 6 | * core holds a mutex and then tries to acquire it 7 | * again, this operation will succeed. 8 | * 9 | * this is so that, eg, if a core holds a mutex 10 | * and is interrupted, it can regain access 11 | * to the locked data, use it, and release it. 12 | * 13 | * use lock() to acquire a mutex. 14 | * it is unlocked when it goes out of scope. 15 | * the mutex also maintains accounting stats 16 | * and is named to aid debugging. 17 | * 18 | * (c) Chris Williams, 2021. 19 | * 20 | * See LICENSE for usage and copying. 21 | */ 22 | 23 | use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; 24 | use core::cell::UnsafeCell; 25 | use core::ops::{Deref, DerefMut}; 26 | use super::pcore::PhysicalCore; 27 | 28 | /* if a lock() call spins more than DEADLOCK_THRESHOLD times 29 | then it's considered a deadlocked mutex */ 30 | const DEADLOCK_THRESHOLD: usize = 1000000; 31 | 32 | /* define a snip lock primitive */ 33 | pub struct SpinLock 34 | { 35 | lock: AtomicBool 36 | } 37 | 38 | impl SpinLock 39 | { 40 | pub fn new() -> SpinLock 41 | { 42 | SpinLock { lock: AtomicBool::new(false) } 43 | } 44 | 45 | /* spin until the lock value == must_equal, and then atomically do lock value = new_value */ 46 | fn spin(&self, must_equal: bool, new_value: bool) 47 | { 48 | loop 49 | { 50 | if self.lock.compare_exchange(must_equal, new_value, Ordering::Acquire, Ordering::Relaxed) == Ok(must_equal) 51 | { 52 | return; 53 | } 54 | } 55 | } 56 | 57 | /* acquire the lock, and block until successful */ 58 | pub fn lock(&self) 59 | { 60 | self.spin(false, true); 61 | } 62 | 63 | /* release the lock */ 64 | pub fn unlock(&self) 65 | { 66 | self.spin(true, false); 67 | } 68 | } 69 | 70 | pub struct Mutex 71 | { 72 | /* the data we're protecting */ 73 | content: UnsafeCell, 74 | 75 | /* owner_lock protects owned and owner. 76 | if the owned is false, then the mutex is considered free. 77 | if the owned is true, the mutex is considered held by a physical core whose ID == owner */ 78 | owner_lock: SpinLock, 79 | owned: AtomicBool, 80 | owner: AtomicUsize, 81 | 82 | /* accounting */ 83 | lock_attempts: AtomicUsize, 84 | lock_count: AtomicUsize, 85 | description: &'static str 86 | } 87 | 88 | /* Mutex uses the same API as std's Mutex. Create a Mutex using new() and then 89 | call lock() to block until mutex successfully acquired. Drop the mutex guard to release */ 90 | impl Mutex 91 | { 92 | pub fn new(description: &'static str, data: T) -> Mutex 93 | { 94 | Mutex 95 | { 96 | content: UnsafeCell::new(data), 97 | owner_lock: SpinLock::new(), 98 | owned: AtomicBool::new(false), 99 | owner: AtomicUsize::new(0), 100 | lock_attempts: AtomicUsize::new(0), 101 | lock_count: AtomicUsize::new(0), 102 | description 103 | } 104 | } 105 | 106 | /* spin until ready to return reference to protected data */ 107 | pub fn lock(&self) -> MutexGuard<'_, T> 108 | { 109 | let mut attempts = 0; 110 | 111 | let this_pcore_id = PhysicalCore::get_id(); 112 | loop 113 | { 114 | /* hold the spin lock while checking the metadata */ 115 | self.owner_lock.lock(); 116 | self.lock_attempts.fetch_add(1, Ordering::Relaxed); 117 | attempts = attempts + 1; 118 | if attempts == DEADLOCK_THRESHOLD 119 | { 120 | hvdebug!("BUG: {} mutex ({:p}) may be deadlocked", self.description, &self.content); 121 | } 122 | 123 | /* determine if the mutex is available, or may even 124 | already be held by this physical core */ 125 | if self.owned.load(Ordering::SeqCst) == false 126 | { 127 | /* lock is available so claim it */ 128 | self.owned.store(true, Ordering::SeqCst); 129 | self.owner.store(this_pcore_id, Ordering::SeqCst); 130 | break; 131 | } 132 | else 133 | { 134 | /* mutex is already held though this pcore may own it anyway */ 135 | if self.owner.load(Ordering::SeqCst) == this_pcore_id 136 | { 137 | break; 138 | } 139 | } 140 | 141 | /* give another core a chance to acquire the mutex */ 142 | self.owner_lock.unlock(); 143 | } 144 | 145 | /* don't forget to unlock the metadata 146 | before returning a reference to the content */ 147 | self.lock_count.fetch_add(1, Ordering::Relaxed); 148 | self.owner_lock.unlock(); 149 | MutexGuard { mutex: &self } 150 | } 151 | 152 | /* unlock the mutex */ 153 | fn unlock(&self) 154 | { 155 | self.owner_lock.lock(); 156 | self.owned.store(false, Ordering::SeqCst); 157 | self.owner_lock.unlock(); 158 | } 159 | 160 | /* return true if the mutex is locked, or false if not */ 161 | pub fn is_locked(&self) -> bool 162 | { 163 | self.owner_lock.lock(); 164 | let locked = self.owned.load(Ordering::SeqCst); 165 | self.owner_lock.unlock(); 166 | locked 167 | } 168 | } 169 | 170 | /* pretty print a mutex's stats */ 171 | impl core::fmt::Debug for MutexGuard<'_, T> 172 | { 173 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result 174 | { 175 | write!(f, "{} attempts to acquire {}, {} succeeded", 176 | self.mutex.lock_attempts.load(Ordering::Relaxed), 177 | self.mutex.description, 178 | self.mutex.lock_count.load(Ordering::Relaxed)) 179 | } 180 | } 181 | 182 | pub struct MutexGuard<'a, T> 183 | { 184 | mutex: &'a Mutex, 185 | } 186 | 187 | impl Deref for MutexGuard<'_, T> 188 | { 189 | type Target = T; 190 | 191 | fn deref(&self) -> &Self::Target 192 | { 193 | unsafe { &*self.mutex.content.get() } 194 | } 195 | } 196 | 197 | impl DerefMut for MutexGuard<'_, T> 198 | { 199 | fn deref_mut(&mut self) -> &mut Self::Target 200 | { 201 | unsafe { &mut *self.mutex.content.get() } 202 | } 203 | } 204 | 205 | impl Drop for MutexGuard<'_, T> 206 | { 207 | fn drop(&mut self) 208 | { 209 | self.mutex.unlock() 210 | } 211 | } 212 | 213 | /* keep rustc happy */ 214 | unsafe impl Send for Mutex where T: Send {} 215 | unsafe impl Sync for Mutex where T: Send {} 216 | unsafe impl Send for MutexGuard<'_, T> where T: Send {} 217 | unsafe impl Sync for MutexGuard<'_, T> where T: Send + Sync {} 218 | -------------------------------------------------------------------------------- /src/hypervisor/src/main.rs: -------------------------------------------------------------------------------- 1 | /* diosix hypervisor main entry code 2 | * 3 | * (c) Chris Williams, 2019-2021. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | /* let the compiler know we're on our own here in bare-metal world */ 9 | #![no_std] 10 | #![no_main] 11 | #![feature(asm)] 12 | 13 | #![allow(dead_code)] 14 | #![allow(unused_unsafe)] 15 | #![allow(improper_ctypes)] 16 | #![feature(type_ascription)] 17 | 18 | /* provide a framework for unit testing */ 19 | #![feature(custom_test_frameworks)] 20 | #![test_runner(crate::run_tests)] 21 | #![reexport_test_harness_main = "hvtests"] /* entry point for tests */ 22 | 23 | /* plug our custom heap allocator into the Rust language: Box, etc */ 24 | #![feature(alloc_error_handler)] 25 | #![feature(box_syntax)] 26 | #[allow(unused_imports)] 27 | #[macro_use] 28 | extern crate alloc; 29 | 30 | /* needed to convert raw dtb pointer into a slice */ 31 | use core::slice; 32 | 33 | /* needed for fast lookup tables of stuff */ 34 | extern crate hashbrown; 35 | 36 | /* needed for elf parsing */ 37 | extern crate xmas_elf; 38 | 39 | /* needed for device tree parsing and manipulation */ 40 | extern crate devicetree; 41 | 42 | /* needed for parsing diosix manifest file-system (DMFS) images bundled with the hypervisor */ 43 | extern crate dmfs; 44 | 45 | /* needed for lazyily-allocated static variables */ 46 | #[macro_use] 47 | extern crate lazy_static; 48 | 49 | /* this will bring in all the hardware-specific code */ 50 | extern crate platform; 51 | 52 | /* and now for all our non-hw specific code */ 53 | #[macro_use] 54 | mod debug; /* get us some kind of debug output, typically to a serial port */ 55 | #[macro_use] 56 | mod capsule; /* manage capsules */ 57 | #[macro_use] 58 | mod heap; /* per-CPU private heap management */ 59 | #[macro_use] 60 | mod physmem; /* manage host physical memory */ 61 | mod hardware; /* parse device trees into hardware objects */ 62 | mod panic; /* implement panic() handlers */ 63 | mod irq; /* handle hw interrupts and sw exceptions, collectively known as IRQs */ 64 | mod virtmem; /* manage capsule virtual memory */ 65 | mod pcore; /* manage CPU cores */ 66 | mod vcore; /* virtual CPU core management... */ 67 | mod scheduler; /* ...and scheduling */ 68 | mod loader; /* parse and load supervisor binaries */ 69 | mod message; /* send messages between physical cores */ 70 | mod service; /* allow capsules to register services */ 71 | mod manifest; /* manage capsules loaded with the hypervisor */ 72 | 73 | /* needed for exclusive locks */ 74 | mod lock; 75 | use lock::Mutex; 76 | 77 | /* list of error codes */ 78 | mod error; 79 | use error::Cause; 80 | 81 | use pcore::{PhysicalCoreID, BOOT_PCORE_ID}; 82 | 83 | /* tell Rust to use our HVallocator to allocate and free heap memory. 84 | although we'll keep track of physical memory, we'll let Rust perform essential 85 | tasks, such as dropping objects when it's no longer needed, borrow checking, etc */ 86 | #[global_allocator] 87 | static HV_HEAP: heap::HVallocator = heap::HVallocator; 88 | 89 | lazy_static! 90 | { 91 | /* set to true to allow physical CPU cores to start running supervisor code */ 92 | static ref INIT_DONE: Mutex = Mutex::new("system bring-up", false); 93 | 94 | /* a physical CPU core obtaining this lock when it is false must walk the DMFS, create 95 | capsules required to run at boot time, and set the flag to true. any other core 96 | obtaining it as true must release the lock and move on */ 97 | static ref MANIFEST_UNPACKED: Mutex = Mutex::new("dmfs unpacked", false); 98 | 99 | /* set to true if individual cores can sound off their presence and capabilities */ 100 | static ref ROLL_CALL: Mutex = Mutex::new("CPU roll call", false); 101 | } 102 | 103 | /* pointer sizes: stick to usize as much as possible: don't always assume it's a 64-bit machine */ 104 | 105 | /* hventry 106 | This is the official entry point of the Rust-level hypervisor. 107 | Call hvmain, which is where all the real work happens, and catch any errors. 108 | => cpu_nr = this boot-assigned CPU ID number 109 | dtb_ptr = pointer to start of device tree blob structure 110 | dtb_len = 32-bit big-endian length of the device tree blob 111 | <= return to infinite loop, awaiting interrupts */ 112 | #[no_mangle] 113 | pub extern "C" fn hventry(cpu_nr: PhysicalCoreID, dtb_ptr: *const u8, dtb_len: u32) 114 | { 115 | /* carry out tests if that's what we're here for */ 116 | #[cfg(test)] 117 | hvtests(); 118 | 119 | /* if not performing tests, start the system as normal */ 120 | match hvmain(cpu_nr, dtb_ptr, dtb_len) 121 | { 122 | Err(e) => 123 | { 124 | hvalert!("Hypervisor failed to start. Reason: {:?}", e); 125 | debughousekeeper!(); /* attempt to flush queued debug to output */ 126 | }, 127 | _ => () /* continue waiting for an IRQ to come in */ 128 | } 129 | } 130 | 131 | /* hvmain 132 | This code runs at the hypervisor level, with full physical memory access. 133 | Its job is to initialize physical CPU cores and other resources so that capsules can be 134 | created in which supervisors run that manage their own user spaces, in which 135 | applications run. The hypervisor ensures capsules are kept apart using 136 | hardware protections. 137 | 138 | Assumes all physical CPU cores enter this function during startup. 139 | The boot CPU is chosen to initialize the system in pre-SMP mode. 140 | If we're on a single CPU core then everything should still run OK. 141 | Assumes hardware and exception interrupts are enabled and handlers 142 | installed. 143 | 144 | Also assumes all CPU cores are compatible ISA-wise. There is provision 145 | for marking some cores as more powerful than others for systems with 146 | a mix of performance and efficiency CPU cores. 147 | 148 | => cpu_nr = arbitrary CPU core ID number assigned by boot code, 149 | separate from hardware ID number. 150 | BOOT_PCORE_ID = boot CPU core. 151 | dtb_ptr = pointer to device tree in memory from bootlaoder 152 | dtb_len = 32-bit big endian size of the device tree 153 | <= return to infinite loop, waiting for interrupts 154 | */ 155 | fn hvmain(cpu_nr: PhysicalCoreID, dtb_ptr: *const u8, dtb_len: u32) -> Result<(), Cause> 156 | { 157 | /* set up each physical processor core with its own private heap pool and any other resources. 158 | each private pool uses physical memory assigned by the pre-hvmain boot code. init() should be called 159 | first thing to set up each processor core, including the boot CPU, which then sets up the global 160 | resources. all non-boot CPUs should wait until global resources are ready. */ 161 | pcore::PhysicalCore::init(cpu_nr); 162 | 163 | /* note that pre-physmem::init(), CPU cores rely on their pre-hventry()-assigned 164 | heap space. after physmem::init(), CPU cores can extend their heaps using physical memory. 165 | the hypervisor will become stuck pre-physmem::init() if it goes beyond its assigned heap space. */ 166 | 167 | match cpu_nr 168 | { 169 | /* delegate to boot CPU the welcome banner and set up global resources. 170 | note: the platform code should ensure whichever CPU core is assigned 171 | BOOT_PCORE_ID as its cpu_nr can initialize the hypervisor */ 172 | BOOT_PCORE_ID => 173 | { 174 | /* convert the dtb pointer into a rust byte slice. assumes dtb_len is valid */ 175 | let dtb = unsafe { slice::from_raw_parts(dtb_ptr, u32::from_be(dtb_len) as usize) }; 176 | 177 | /* process device tree to create data structures representing system hardware, 178 | allowing these peripherals to be accessed by subsequent routines. this should 179 | also initialize any found hardware */ 180 | hardware::parse_and_init(dtb)?; 181 | 182 | /* register all the available physical RAM */ 183 | physmem::init()?; 184 | describe_system(); 185 | 186 | /* allow other cores to continue */ 187 | *(INIT_DONE.lock()) = true; 188 | }, 189 | 190 | /* non-boot cores must wait here for early initialization to complete */ 191 | _ => while *(INIT_DONE.lock()) != true {} 192 | } 193 | 194 | /* Create capsules to run from the bundled DMFS image. 195 | the hypervisor can't make any assumptions about the underlying hardware. 196 | the device tree for these early capsules is derived from the host's device tree, 197 | modified to virtualize peripherals. the virtual CPU cores will based on the 198 | physical CPU core that creates it. this is more straightforward than the hypervisor 199 | trying to specify a hypothetical CPU core 200 | 201 | as such, only allow supervisor-mode capable CPU cores to build capasules */ 202 | if pcore::PhysicalCore::smode_supported() == true 203 | { 204 | /* only allow one core to do the unpacking */ 205 | let mut flag = MANIFEST_UNPACKED.lock(); 206 | 207 | if *flag == false 208 | { 209 | /* process the manifest and mark it as handled */ 210 | manifest::unpack_at_boot()?; 211 | *flag = true; 212 | 213 | /* allow all working cores to join the roll call */ 214 | *(ROLL_CALL.lock()) = true; 215 | } 216 | } 217 | 218 | /* once ROLL_CALL is set to true, acknowledge we're alive and well, and report CPU core features */ 219 | while *(ROLL_CALL.lock()) != true {} 220 | hvdebug!("Physical CPU core {:?} ready to roll", pcore::PhysicalCore::describe()); 221 | 222 | /* enable timer on this physical CPU core to start scheduling and running virtual cores */ 223 | scheduler::start()?; 224 | 225 | /* initialization complete. fall through to infinite loop waiting for a timer interrupt 226 | to come in. when it does fire, this stack will be flattened, a virtual CPU loaded up to run, 227 | and this boot thread will disappear. thus, the call to start() should be the last thing 228 | this boot thread does */ 229 | Ok(()) 230 | } 231 | 232 | /* dump system information to the user */ 233 | fn describe_system() 234 | { 235 | const KILOBYTE: usize = 1024; 236 | const MEGABYTE: usize = KILOBYTE * KILOBYTE; 237 | const GIGABYTE: usize = KILOBYTE * MEGABYTE; 238 | 239 | /* say hello via the debug port with some information */ 240 | hvdebug!("Diosix {} :: Debug enabled. {} and {} RAM found", 241 | 242 | /* build version number */ 243 | env!("CARGO_PKG_VERSION"), 244 | 245 | /* report number of CPU cores found */ 246 | match hardware::get_nr_cpu_cores() 247 | { 248 | None | Some(0) => format!("no CPU cores"), 249 | Some(1) => format!("1 CPU core"), 250 | Some(c) => format!("{} CPU cores", c) 251 | }, 252 | 253 | /* count up total system RAM using GiB / MiB / KiB */ 254 | match hardware::get_phys_ram_total() 255 | { 256 | Some(t) => if t >= GIGABYTE 257 | { 258 | format!("{} GiB", t / GIGABYTE) 259 | } 260 | else if t >= MEGABYTE 261 | { 262 | format!("{} MiB", t / MEGABYTE) 263 | } 264 | else 265 | { 266 | format!("{} KiB", t / KILOBYTE) 267 | }, 268 | 269 | None => format!("no") 270 | }); 271 | } 272 | 273 | /* mandatory error handler for memory allocations */ 274 | #[alloc_error_handler] 275 | fn hvalloc_error(attempt: core::alloc::Layout) -> ! 276 | { 277 | let heap = &(*::this()).heap; 278 | hvalert!("hvalloc_error: Failed to allocate/free {} bytes. Heap: {:?}", attempt.size(), heap); 279 | debughousekeeper!(); 280 | loop {} /* it would be nice to be able to not die here :( */ 281 | } 282 | 283 | /* perform all unit tests required */ 284 | #[cfg(test)] 285 | fn run_tests(unit_tests: &[&dyn Fn()]) 286 | { 287 | /* run each test one by one */ 288 | for test in unit_tests 289 | { 290 | test(); 291 | } 292 | 293 | /* exit cleanly once tests are complete */ 294 | platform::test::end(Ok(0)); 295 | } 296 | 297 | #[test_case] 298 | fn test_assertion() 299 | { 300 | assert_eq!(42, 42); 301 | } -------------------------------------------------------------------------------- /src/hypervisor/src/manifest.rs: -------------------------------------------------------------------------------- 1 | /* diosix hypervisor manifest file-system management 2 | * 3 | * (c) Chris Williams, 2020-2021. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | use super::physmem; 9 | use super::error::Cause; 10 | use super::capsule; 11 | use super::hardware; 12 | use super::loader; 13 | use super::virtmem::Mapping; 14 | use super::vcore::Priority; 15 | use dmfs::{ManifestImageIter, ManifestObject, ManifestObjectType, ManifestObjectData}; 16 | use alloc::string::String; 17 | use alloc::vec::Vec; 18 | 19 | /* bring in the built-in dmfs image */ 20 | use core::slice; 21 | use core::intrinsics::transmute; 22 | extern "C" 23 | { 24 | static _binary_dmfs_img_start: u8; 25 | static _binary_dmfs_img_size: u8; 26 | } 27 | 28 | /* convert the included dmfs image into a byte slice */ 29 | macro_rules! get_dmfs_image 30 | { 31 | () => 32 | { 33 | unsafe 34 | { 35 | slice::from_raw_parts 36 | ( 37 | transmute(&_binary_dmfs_img_start), 38 | transmute(&_binary_dmfs_img_size) 39 | ) 40 | } 41 | } 42 | } 43 | 44 | /* return a list of a DMFS image's asset names and descriptions 45 | <= array of (names, descriptions) of image's assets */ 46 | pub fn list_assets() -> Result, Cause> 47 | { 48 | let image = get_dmfs_image!(); 49 | let manifest = match ManifestImageIter::from_slice(image) 50 | { 51 | Ok(m) => m, 52 | Err(_) => return Err(Cause::ManifestBadFS) 53 | }; 54 | 55 | let mut list: Vec<(String, String)> = Vec::new(); 56 | for asset in manifest 57 | { 58 | list.push((asset.get_name(), asset.get_description())); 59 | } 60 | 61 | Ok(list) 62 | } 63 | 64 | /* look up an asset from the given DMFS image by its name */ 65 | pub fn get_named_asset(name: &str) -> Result 66 | { 67 | let image = get_dmfs_image!(); 68 | let manifest = match ManifestImageIter::from_slice(image) 69 | { 70 | Ok(m) => m, 71 | Err(_) => return Err(Cause::ManifestBadFS) 72 | }; 73 | 74 | for asset in manifest 75 | { 76 | /* sadly no simple strcmp() in rust? */ 77 | if asset.get_name().as_str().starts_with(name) == true && asset.get_name().len() == name.len() 78 | { 79 | return Ok(asset); 80 | } 81 | } 82 | 83 | Err(Cause::ManifestNoSuchAsset) 84 | } 85 | 86 | /* parse the hypervisor's bundled manifest, creating services and capsules as required, 87 | and output any included boot banner messages, during system start up */ 88 | pub fn unpack_at_boot() -> Result<(), Cause> 89 | { 90 | let image = get_dmfs_image!(); 91 | let manifest = match ManifestImageIter::from_slice(image) 92 | { 93 | Ok(m) => m, 94 | Err(_) => return Err(Cause::ManifestBadFS) 95 | }; 96 | 97 | for asset in manifest 98 | { 99 | match asset.get_type() 100 | { 101 | /* only unpack and process boot messages and system services at startup */ 102 | ManifestObjectType::BootMsg => load_asset(asset)?, 103 | ManifestObjectType::SystemService => load_asset(asset)?, 104 | ManifestObjectType::GuestOS => load_asset(asset)?, 105 | _ => () 106 | } 107 | } 108 | 109 | Ok(()) 110 | } 111 | 112 | /* process the given asset, such as printing it to the debug output stream if it's a boot message 113 | or parsing it and running it if it's an executable, from the given DMFS image 114 | => asset = manifest asset to parse and process into memory 115 | */ 116 | pub fn load_asset(asset: ManifestObject) -> Result<(), Cause> 117 | { 118 | let image = get_dmfs_image!(); 119 | let properties = asset.get_properties(); 120 | let content = match asset.get_contents() 121 | { 122 | ManifestObjectData::Bytes(b) => b.as_slice(), 123 | ManifestObjectData::Region(r) => &image[r.start..r.end] 124 | }; 125 | 126 | match asset.get_type() 127 | { 128 | /* print the included boot message */ 129 | ManifestObjectType::BootMsg => 130 | { 131 | hvdebugraw!("\r\n{}\r\n\r\n", String::from_utf8_lossy(content)); 132 | debughousekeeper!(); /* ensure the message is seen */ 133 | }, 134 | 135 | /* create and run a system service */ 136 | ManifestObjectType::SystemService => match create_capsule_from_exec(content, Some(properties)) 137 | { 138 | Ok(cid) => hvdebug!("Created system service {} ({}) {} bytes (capsule {})", 139 | asset.get_name(), asset.get_description(), asset.get_contents_size(), cid), 140 | Err(_e) => hvdebug!("Failed to create capsule for system service {}: {:?}", asset.get_name(), _e) 141 | }, 142 | 143 | /* create an included guest OS (which does not have any special permissions) */ 144 | ManifestObjectType::GuestOS => match create_capsule_from_exec(content, None) 145 | { 146 | Ok(cid) => hvdebug!("Created guest OS {} ({}) {} bytes (capsule {})", 147 | asset.get_name(), asset.get_description(), asset.get_contents_size(), cid), 148 | Err(_e) => hvdebug!("Failed to create capsule for system service {}: {:?}", asset.get_name(), _e) 149 | }, 150 | 151 | t => hvdebug!("Found manifest object type {:?}", t) 152 | } 153 | 154 | Ok(()) 155 | } 156 | 157 | /* create a capsule from an executable in a DMFS image 158 | => binary = slice containing the executable to parse and load 159 | properties = permissions and other properties to grant the capsule, or None 160 | <= Ok with capusle ID, or an error code 161 | */ 162 | fn create_capsule_from_exec(binary: &[u8], properties: Option>) -> Result 163 | { 164 | /* assign one virtual CPU core to the capsule */ 165 | let cpus = 1; 166 | 167 | /* create capsule with the given properties */ 168 | let capid = capsule::create(properties, cpus)?; 169 | 170 | /* reserve 256MB of physical RAM for the capsule */ 171 | let size = 256 * 1024 * 1024; 172 | let ram = physmem::alloc_region(size)?; 173 | 174 | /* create device tree blob for the virtual hardware available to the guest 175 | capsule and copy into the end of the region's physical RAM. 176 | a zero-length DTB indicates something went wrong */ 177 | let guest_dtb = hardware::clone_dtb_for_capsule(cpus, 0, ram.base(), ram.size())?; 178 | if guest_dtb.len() == 0 179 | { 180 | return Err(Cause::BootDeviceTreeBad); 181 | } 182 | let guest_dtb_base = ram.fill_end(guest_dtb)?; 183 | 184 | /* map that physical RAM into the capsule */ 185 | let mut mapping = Mapping::new(); 186 | mapping.set_physical(ram); 187 | mapping.identity_mapping()?; 188 | capsule::map_memory(capid, mapping)?; 189 | 190 | /* parse + copy the capsule's binary into its physical RAM */ 191 | let entry = loader::load(ram, binary)?; 192 | 193 | /* create virtual CPU cores for the capsule as required */ 194 | for vcoreid in 0..cpus 195 | { 196 | capsule::add_vcore(capid, vcoreid, entry, guest_dtb_base, Priority::High)?; 197 | } 198 | 199 | Ok(capid) 200 | } -------------------------------------------------------------------------------- /src/hypervisor/src/message.rs: -------------------------------------------------------------------------------- 1 | /* diosix hypervisor's system for passing messages between physical CPU cores and services 2 | * 3 | * (c) Chris Williams, 2019-2020. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | use super::lock::Mutex; 9 | use alloc::collections::vec_deque::VecDeque; 10 | use alloc::string::String; 11 | use hashbrown::hash_map::HashMap; 12 | use super::error::Cause; 13 | use super::service::{self, ServiceType}; 14 | use super::capsule::CapsuleID; 15 | use super::pcore::{PhysicalCoreID, PhysicalCore}; 16 | 17 | /* here's how message passing works, depending on the target: 18 | * To an individual physical core: 19 | 1. locate the physical core's message queue in MAILBOXES 20 | 2. insert the message at the end of the queue 21 | 3. interrupt the physical CPU core to check its mailbox 22 | * To all physical cores: 23 | 1. iterate over each physical core in MAILBOXES 24 | 2. insert a copy of the message in the message queue of each physical CPU 25 | 3. interrupt each physical CPU core to check its mailbox 26 | * To a service registered by a capsule: 27 | 1. locate the service's mailbox 28 | 2. insert the message into the mailbox 29 | 3. raise an interrupt or wait for the capsule to poll the mailbox 30 | */ 31 | 32 | /* maintain a mailbox of messages per physical CPU core */ 33 | lazy_static! 34 | { 35 | static ref MAILBOXES: Mutex>> = Mutex::new("mailbox", HashMap::new()); 36 | } 37 | 38 | /* create a mailbox for physical CPU core coreid */ 39 | pub fn create_mailbox(coreid: PhysicalCoreID) 40 | { 41 | MAILBOXES.lock().insert(coreid, VecDeque::::new()); 42 | } 43 | 44 | #[derive(Clone)] 45 | pub enum Sender 46 | { 47 | PhysicalCore(PhysicalCoreID), 48 | Capsule(CapsuleID), 49 | Hypervisor 50 | } 51 | 52 | #[derive(Clone, Copy)] 53 | pub enum Recipient 54 | { 55 | Broadcast, /* send to all physical CPU cores */ 56 | PhysicalCore(PhysicalCoreID), /* send to a single physical CPU core */ 57 | Service(ServiceType) /* send to a single registered service */ 58 | } 59 | 60 | impl Recipient 61 | { 62 | /* broadcast message to all physical cores */ 63 | pub fn send_to_all() -> Recipient { Recipient::Broadcast } 64 | 65 | /* send to a particular physical core */ 66 | pub fn send_to_pcore(id: PhysicalCoreID) -> Recipient 67 | { 68 | Recipient::PhysicalCore(id) 69 | } 70 | 71 | /* send to a particular capsule-hosted service */ 72 | pub fn send_to_service(stype: ServiceType) -> Recipient 73 | { 74 | Recipient::Service(stype) 75 | } 76 | } 77 | 78 | #[derive(Clone, Debug)] 79 | pub enum MessageContent 80 | { 81 | HypervisorDebugStr(String), 82 | CapsuleConsoleStr(String), 83 | DisownQueuedVirtualCore 84 | } 85 | 86 | #[derive(Clone)] 87 | pub struct Message 88 | { 89 | sender: Sender, 90 | receiver: Recipient, 91 | data: MessageContent 92 | } 93 | 94 | impl Message 95 | { 96 | /* create a new message 97 | => recv = end point to send the message to 98 | data = message to send to the recipient 99 | <= returns message structure 100 | */ 101 | pub fn new(recv: Recipient, data: MessageContent) -> Result 102 | { 103 | Ok(Message 104 | { 105 | receiver: recv, 106 | 107 | /* determine sender from message type */ 108 | sender: match data 109 | { 110 | MessageContent::HypervisorDebugStr(_) => Sender::Hypervisor, 111 | MessageContent::CapsuleConsoleStr(_) => match PhysicalCore::get_capsule_id() 112 | { 113 | Some(id) => Sender::Capsule(id), 114 | None => 115 | { 116 | hvdebug!("BUG: Sending {:?} from non-existent capsule", data); 117 | return Err(Cause::CapsuleBadID); 118 | } 119 | }, 120 | MessageContent::DisownQueuedVirtualCore => Sender::PhysicalCore(PhysicalCore::get_id()) 121 | }, 122 | 123 | data 124 | }) 125 | } 126 | 127 | pub fn get_receiver(&self) -> Recipient 128 | { 129 | self.receiver 130 | } 131 | } 132 | 133 | /* send the given message msg, consuming it so it can't be reused or resent */ 134 | pub fn send(msg: Message) -> Result<(), Cause> 135 | { 136 | let receiver = msg.receiver; 137 | match receiver 138 | { 139 | /* iterate over all physical CPU cores */ 140 | Recipient::Broadcast => 141 | { 142 | for (_, mailbox) in MAILBOXES.lock().iter_mut() 143 | { 144 | mailbox.push_back(msg.clone()) 145 | } 146 | }, 147 | 148 | /* send to a particular physical CPU core */ 149 | Recipient::PhysicalCore(pid) => 150 | { 151 | if let Some(mailbox) = MAILBOXES.lock().get_mut(&pid) 152 | { 153 | mailbox.push_back(msg); 154 | } 155 | else 156 | { 157 | return Err(Cause::PhysicalCoreBadID); 158 | } 159 | }, 160 | 161 | /* send to a service */ 162 | Recipient::Service(_) => 163 | { 164 | return service::send(msg); 165 | } 166 | }; 167 | 168 | Ok(()) 169 | } 170 | -------------------------------------------------------------------------------- /src/hypervisor/src/panic.rs: -------------------------------------------------------------------------------- 1 | /* diosix high-level hypervisor panic code 2 | * 3 | * (c) Chris Williams, 2019-2020. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | use core::panic::PanicInfo; 9 | 10 | /* we need to provide these */ 11 | #[panic_handler] 12 | pub fn panic(info: &PanicInfo) -> ! 13 | { 14 | if cfg!(test) 15 | { 16 | /* signal to test environment we failed */ 17 | platform::test::end(Err(1)); 18 | } 19 | else 20 | { 21 | /* try to inform the user what went wrong */ 22 | hvalert!("Rust runtime panicked unexpectedly"); 23 | match info.location() 24 | { 25 | Some(location) => 26 | { 27 | hvalert!("... crashed in {}: {}", location.file(), location.line()) 28 | }, 29 | None => hvalert!("... crash location unknown") 30 | }; 31 | } 32 | 33 | /* just halt here */ 34 | debughousekeeper!(); 35 | loop {} 36 | } 37 | -------------------------------------------------------------------------------- /src/hypervisor/src/pcore.rs: -------------------------------------------------------------------------------- 1 | /* diosix hypervisor's physical CPU core management 2 | * 3 | * (c) Chris Williams, 2019-2021. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | /* Physical CPUs get their own private heaps to manage. Crucially, allocated memory 9 | blocks can be shared by other CPUs. Any CPU can free any block, returning 10 | it to its owner's heap pool. When allocating, a CPU can only draw from 11 | its own heap, reusing any blocks freed by itself or other cores. 12 | 13 | The hypervisor layer is unlikely to do much active allocation 14 | so it's OK to keep it really simple for now. */ 15 | 16 | use super::lock::Mutex; 17 | use hashbrown::hash_map::HashMap; 18 | use platform::physmem::PhysMemSize; 19 | use platform::cpu::{SupervisorState, CPUFeatures}; 20 | use platform::timer; 21 | use super::vcore::{VirtualCore, VirtualCoreCanonicalID}; 22 | use super::scheduler::ScheduleQueues; 23 | use super::capsule::{self, CapsuleID}; 24 | use super::message; 25 | use super::heap; 26 | 27 | /* physical CPU core IDs and count */ 28 | pub type PhysicalCoreID = usize; 29 | pub type PhysicalCoreCount = PhysicalCoreID; 30 | 31 | pub const BOOT_PCORE_ID: PhysicalCoreID = 0; 32 | const PCORE_MAGIC: usize = 0xc001c0de; 33 | 34 | /* require some help from the underlying platform */ 35 | extern "C" 36 | { 37 | fn platform_cpu_private_variables() -> &'static mut PhysicalCore; 38 | fn platform_cpu_heap_base() -> *mut heap::HeapBlock; 39 | fn platform_cpu_heap_size() -> PhysMemSize; 40 | fn platform_save_supervisor_state(state: &SupervisorState); 41 | fn platform_load_supervisor_state(state: &SupervisorState); 42 | } 43 | 44 | lazy_static! 45 | { 46 | /* map running virtual CPU cores to physical CPU cores, and vice-versa 47 | we can't store these in Core structs because it upsets Rust's borrow checker. 48 | note: PCORES keeps track of the last physical CPU core to run a given virtual 49 | core. this is more of a hint than a concrete guarantee: the virtual core 50 | may have been scheduled away, though it should be in the last physical 51 | CPU core's scheduling queue. */ 52 | static ref VCORES: Mutex> = Mutex::new("physical-virtual core table", HashMap::new()); 53 | static ref PCORES: Mutex> = Mutex::new("physical-virtual core ID table", HashMap::new()); 54 | } 55 | 56 | /* describe a physical CPU core - this structure is stored in the per-CPU private variable space. 57 | this is below the per-CPU machine-level stack */ 58 | #[repr(C)] 59 | pub struct PhysicalCore 60 | { 61 | /* this magic word is used to make sure the CPU's stack hasn't overflowed 62 | and corrupted this adjacent structure */ 63 | magic: usize, 64 | 65 | /* every physical CPU core has a hardware-assigned ID number that may be non-linear, 66 | while the startup code assigns each core a linear ID number from zero. we keep a copy of that 67 | linear runtime-assigned ID here. the hardware-assigned ID is not used in the portable code */ 68 | id: PhysicalCoreID, 69 | 70 | /* platform-defined bitmask of ISA features this core provides. if a virtual core has a features bit set that 71 | is unset in a physical core's feature bitmask, the virtual core will not be allowed to run on that physical core */ 72 | features: CPUFeatures, 73 | 74 | /* each physical CPU core gets its own heap that it can share, but it must manage its own */ 75 | pub heap: heap::Heap, 76 | 77 | /* each physical CPU gets its own set of queues of virtual CPU cores to schedule */ 78 | queues: ScheduleQueues, 79 | 80 | /* can this run guest operating systems? or is it a system management core? true if it can run 81 | supervisor-mode code, false if not */ 82 | smode: bool, 83 | 84 | /* set when this physical core CPU core last ran a scheduling decision */ 85 | timer_sched_last: Option, 86 | 87 | /* set to true when the vcore running on this physical core is doomed. 88 | that means it's in a capsule that was restarted or killed and 89 | must not be saved after a context switch */ 90 | vcore_doomed: bool 91 | } 92 | 93 | impl PhysicalCore 94 | { 95 | /* intiialize a physical CPU core. Prepare it for running supervisor code. 96 | => id = diosix-assigned CPU core ID at boot time. this is separate from the hardware-assigned 97 | ID number, which may be non-linear. the runtime-generated core ID will 98 | run from zero to N-1 where N is the number of available cores */ 99 | pub fn init(id: PhysicalCoreID) 100 | { 101 | /* the pre-hvmain startup code has allocated space for per-CPU core variables. 102 | this function returns a pointer to that structure */ 103 | let mut cpu = PhysicalCore::this(); 104 | 105 | cpu.magic = PCORE_MAGIC; 106 | cpu.id = id; 107 | cpu.features = platform::cpu::features(); 108 | cpu.smode = platform::cpu::features_priv_check(platform::cpu::PrivilegeMode::Supervisor); 109 | cpu.timer_sched_last = None; 110 | cpu.vcore_doomed = false; 111 | 112 | let (heap_ptr, heap_size) = PhysicalCore::get_heap_config(); 113 | cpu.heap.init(heap_ptr, heap_size); 114 | 115 | cpu.queues = ScheduleQueues::new(); 116 | message::create_mailbox(id); 117 | } 118 | 119 | /* return pointer to the calling CPU core's fixed private data structure */ 120 | pub fn this() -> &'static mut PhysicalCore 121 | { 122 | unsafe { platform_cpu_private_variables() } 123 | } 124 | 125 | /* return Ok if magic hasn't been overwritten, or the overwrite value as an error code */ 126 | pub fn integrity_check() -> Result<(), usize> 127 | { 128 | match PhysicalCore::this().magic 129 | { 130 | PCORE_MAGIC => Ok(()), 131 | other => Err(other) 132 | } 133 | } 134 | 135 | /* return CPU heap base and size set aside by the pre-hvmain boot code */ 136 | fn get_heap_config() -> (*mut heap::HeapBlock, PhysMemSize) 137 | { 138 | unsafe { (platform_cpu_heap_base(), platform_cpu_heap_size()) } 139 | } 140 | 141 | /* return boot-assigned ID number */ 142 | pub fn get_id() -> PhysicalCoreID { PhysicalCore::this().id } 143 | 144 | /* return features bitmask */ 145 | pub fn get_features() -> CPUFeatures { PhysicalCore::this().features } 146 | 147 | /* return a structure describing this core */ 148 | pub fn describe() -> platform::cpu::CPUDescription { platform::cpu::CPUDescription } 149 | 150 | /* return a virtual CPU core awaiting to run on this physical CPU core */ 151 | pub fn dequeue() -> Option 152 | { 153 | PhysicalCore::this().queues.dequeue() 154 | } 155 | 156 | /* move a virtual CPU core onto this physical CPU's queue of virtual cores to run */ 157 | pub fn queue(to_queue: VirtualCore) 158 | { 159 | PhysicalCore::this().queues.queue(to_queue) 160 | } 161 | 162 | /* return true if able to run supervisor code. a system management core 163 | that cannot or is not expected to run guest workloads should return false */ 164 | pub fn smode_supported() -> bool 165 | { 166 | PhysicalCore::this().smode 167 | } 168 | 169 | /* return ID of capsule of the virtual CPU core this physical CPU core is running, or None for none */ 170 | pub fn get_capsule_id() -> Option 171 | { 172 | if let Some(vcore) = VCORES.lock().get(&PhysicalCore::get_id()) 173 | { 174 | Some(vcore.get_capsule_id()) 175 | } 176 | else 177 | { 178 | None 179 | } 180 | } 181 | 182 | /* mark the running vcore as doomed, meaning after it's context switched out, 183 | drop it. this is useful when killing or restarting capsules, and 184 | the current set of vcores needs to be flushed from the scheduling system */ 185 | pub fn doom_vcore(&mut self) { self.vcore_doomed = true; } 186 | 187 | /* ensure the running vcore is not doomed */ 188 | pub fn approve_vcore(&mut self) { self.vcore_doomed = false; } 189 | 190 | /* return true if vcore is doomed, ie: must be discarded */ 191 | pub fn is_vcore_doomed(&self) -> bool { self.vcore_doomed } 192 | 193 | /* update the running virtual core's timer IRQ target. we have to do this here because 194 | the virtual core is held in a locked data structure. leaving this function relocks 195 | the structure. it's unsafe to access the vcore struct */ 196 | pub fn set_virtualcore_timer_target(target: Option) 197 | { 198 | if let Some(vcore) = VCORES.lock().get_mut(&(PhysicalCore::get_id())) 199 | { 200 | vcore.set_timer_irq_at(target); 201 | } 202 | } 203 | 204 | /* get the virtual core's timer IRQ target */ 205 | pub fn get_virtualcore_timer_target() -> Option 206 | { 207 | if let Some(vcore) = VCORES.lock().get_mut(&(PhysicalCore::get_id())) 208 | { 209 | return vcore.get_timer_irq_at(); 210 | } 211 | None 212 | } 213 | 214 | /* return canonical ID for the virtual core running in the capsule on this CPU, if any */ 215 | pub fn get_virtualcore_id(&self) -> Option 216 | { 217 | let cid = match PhysicalCore::get_capsule_id() 218 | { 219 | Some(id) => id, 220 | None => return None 221 | }; 222 | 223 | let vid = match VCORES.lock().get(&PhysicalCore::get_id()) 224 | { 225 | Some(vcore) => vcore.get_id(), 226 | None => return None 227 | }; 228 | 229 | Some(VirtualCoreCanonicalID 230 | { 231 | capsuleid: cid, 232 | vcoreid: vid 233 | }) 234 | } 235 | 236 | /* set the exact per-CPU timer value of the last time this physical core make a scheduling decision */ 237 | pub fn set_timer_sched_last(&mut self, value: Option) 238 | { 239 | self.timer_sched_last = value; 240 | } 241 | 242 | /* get the exact per-CPU timer value of the last time this physical core make a scheduling decision */ 243 | pub fn get_timer_sched_last(&mut self) -> Option 244 | { 245 | self.timer_sched_last 246 | } 247 | } 248 | 249 | /* save current virtual CPU core's context, if we're running one, and load next virtual core's context. 250 | this should be called from an IRQ context as it preserves the interrupted code's context 251 | and overwrites the context with the next virtual core's context, so returning to supervisor 252 | mode will land us in the new context */ 253 | pub fn context_switch(next: VirtualCore) 254 | { 255 | let next_capsule = next.get_capsule_id(); 256 | let pcore_id = PhysicalCore::get_id(); 257 | 258 | /* find what this physical core was running, if anything */ 259 | match VCORES.lock().remove(&pcore_id) 260 | { 261 | Some(mut current_vcore) => 262 | { 263 | let current_capsule = current_vcore.get_capsule_id(); 264 | 265 | /* if we're switching to a virtual CPU core in another capsule then replace the 266 | current hardware access permissions so that we're only allowing access to the RAM assigned 267 | to the next capsule to run */ 268 | if current_capsule != next_capsule 269 | { 270 | capsule::enforce(next_capsule); 271 | } 272 | 273 | /* if the current virtual core isn't doomed, queue the vcore 274 | on the waiting list. if it is doomed, drop it */ 275 | if PhysicalCore::this().is_vcore_doomed() == false 276 | { 277 | /* handle core and FP registers separately to keep rust borrow checker happy with current_vcore */ 278 | platform::cpu::save_supervisor_cpu_state(current_vcore.state_as_mut_ref()); 279 | platform::cpu::save_supervisor_fp_state(current_vcore.fp_state_as_mut_ref()); 280 | PhysicalCore::queue(current_vcore); 281 | } 282 | else 283 | { 284 | drop(current_vcore); 285 | } 286 | }, 287 | None => 288 | { 289 | /* if we were not running a virtual CPU core then ensure we return to supervisor mode 290 | rather than hypervisor mode */ 291 | platform::cpu::prep_supervisor_return(); 292 | /* and enforce its hardware access permissions */ 293 | capsule::enforce(next_capsule); 294 | } 295 | } 296 | 297 | /* prepare next virtual core to run when we leave this IRQ context. 298 | this takes care of core registers and FP registers in one */ 299 | platform::cpu::load_supervisor_cpu_fp_state 300 | ( 301 | next.state_as_ref(), 302 | next.fp_state_as_ref() 303 | ); 304 | 305 | /* link next virtual core and capsule to this physical CPU */ 306 | PCORES.lock().insert(VirtualCoreCanonicalID 307 | { 308 | vcoreid: next.get_id(), 309 | capsuleid: next_capsule 310 | }, 311 | pcore_id); 312 | 313 | /* and add the virtual core to the running virtual cores set. 314 | the previous vcore entry will be dropped */ 315 | VCORES.lock().insert(pcore_id, next); 316 | 317 | /* and ensure this switched-in vcore is not doomed */ 318 | PhysicalCore::this().approve_vcore(); 319 | } -------------------------------------------------------------------------------- /src/hypervisor/src/physmem.rs: -------------------------------------------------------------------------------- 1 | /* diosix hypervisor physical memory management 2 | * 3 | * allocate/free contiguous regions of physical memory. 4 | * these regions are categorized into two groups, 5 | * depending on the region size. 6 | * 7 | * large: >= PHYS_RAM_LARGE_REGION_MIN_SIZE 8 | * large regions are sized in multiples of 9 | * PHYS_RAM_LARGE_REGION_MIN_SIZE and are allocated 10 | * from the top of free region blocks, descending. 11 | * these are aimed at large blocks of contiguous 12 | * memory for guest supervisor OSes. 13 | * 14 | * small: < PHYS_RAM_LARGE_REGION_MIN_SIZE 15 | * small regions are sized in multiples of 16 | * PHYS_RAM_SMALL_REGION_MIN_SIZE and are allocated 17 | * from the bottom of free region blocks, ascending. 18 | * these are aimed at small blocks of memory 19 | * for the hypervisor's private per-CPU heaps. 20 | * 21 | * this arrangement is to avoid large and small 22 | * allocations fragmenting free region blocks 23 | * 24 | * (c) Chris Williams, 2019-2021. 25 | * 26 | * See LICENSE for usage and copying. 27 | */ 28 | 29 | use platform; 30 | use super::lock::Mutex; 31 | use alloc::vec::Vec; 32 | use platform::physmem::{PhysMemBase, PhysMemEnd, PhysMemSize, AccessPermissions, validate_ram}; 33 | use super::error::Cause; 34 | use super::hardware; 35 | 36 | /* needed to convert a region into a slice */ 37 | use core::slice; 38 | 39 | /* to avoid fragmentation, round up physical memory region allocations into multiples of these totals, 40 | depending on the region type. this only applies when creating regions with alloc_region() */ 41 | const PHYS_RAM_LARGE_REGION_MIN_SIZE: PhysMemSize = 64 * 1024 * 1024; /* 64MB ought to be enough for anyone */ 42 | const PHYS_RAM_SMALL_REGION_MIN_SIZE: PhysMemSize = 1 * 1024 * 1024; /* smaller blocks are multiples of 1MB in size */ 43 | 44 | /* ensure large region bases are aligned down to multiples of this value 45 | note: region minimum size must be a non-zero multiple of region base alignment */ 46 | const PHYS_RAM_LARGE_REGION_ALIGNMENT: PhysMemSize = 4 * 1024 * 1024; /* 4MB alignment */ 47 | 48 | /* define whether to split a region N bytes from the top or from the bottom */ 49 | #[derive(Clone, Copy, Debug)] 50 | pub enum RegionSplit 51 | { 52 | FromBottom, 53 | FromTop 54 | } 55 | 56 | /* define whether a region is dirty or clean */ 57 | #[derive(Clone, Copy, Debug)] 58 | pub enum RegionHygiene 59 | { 60 | DontClean, /* don't zero this region */ 61 | CanClean 62 | } 63 | 64 | /* describe a physical memory region */ 65 | #[derive(Copy, Clone)] 66 | pub struct Region 67 | { 68 | base: PhysMemBase, 69 | size: PhysMemSize, 70 | hygiene: RegionHygiene 71 | } 72 | 73 | impl Region 74 | { 75 | /* create a new region */ 76 | pub fn new(base: PhysMemBase, size: PhysMemSize, hygiene: RegionHygiene) -> Region 77 | { 78 | Region 79 | { 80 | base, 81 | size, 82 | hygiene 83 | } 84 | } 85 | 86 | /* scrub a whole region. FIXME: make this fast and efficient! 87 | Note: this only zeroes the region in release mode to avoid delays 88 | in debugging/development with slow region zeroing */ 89 | pub fn clean(&mut self) 90 | { 91 | match self.hygiene 92 | { 93 | RegionHygiene::DontClean => 94 | { 95 | hvalert!("BUG: Tried to scrub don't-clean region 0x{:x}", self.base); 96 | return; 97 | }, 98 | RegionHygiene::CanClean => 99 | { 100 | #[cfg(not(debug_assertions))] 101 | self.as_u8_slice().fill(0x0); 102 | } 103 | } 104 | } 105 | 106 | /* fill the end of a region with an array of bytes. thus if the array is 10 bytes long, 107 | the final 10 bytes of the region will be filled from that array, ascending 108 | => bytes = array to write into the region 109 | <= physical address for start of array in the region, or error code */ 110 | pub fn fill_end(&self, bytes: Vec) -> Result 111 | { 112 | let array_size = bytes.len(); 113 | 114 | /* ensure we've got enough space to accomdate the array */ 115 | if self.size < array_size 116 | { 117 | return Err(Cause::PhysRegionTooSmall) 118 | } 119 | 120 | for index in 0..array_size 121 | { 122 | unsafe 123 | { 124 | *(((self.base + self.size) - (array_size - index)) as *mut u8) = bytes[index]; 125 | } 126 | } 127 | 128 | Ok((self.base + self.size) - array_size) 129 | } 130 | 131 | /* allow the currently running supervisor kernel to access this region of physical memory */ 132 | pub fn grant_access(&self) 133 | { 134 | platform::physmem::protect(self.base, self.base + self.size, AccessPermissions::ReadWriteExecute); 135 | } 136 | 137 | /* return or change attributes */ 138 | pub fn base(&self) -> PhysMemBase { self.base } 139 | pub fn end(&self) -> PhysMemEnd { self.base + self.size } 140 | pub fn size(&self) -> PhysMemSize { self.size } 141 | 142 | /* represent the region as a word-size or byte-size slice 143 | **use carefully** don't hold a slice over an IRQ, for example */ 144 | pub fn as_usize_slice(&self) -> &mut [usize] 145 | { 146 | unsafe { slice::from_raw_parts_mut(self.base as *mut usize, self.size) } 147 | } 148 | pub fn as_u8_slice(&self) -> &mut [u8] 149 | { 150 | unsafe { slice::from_raw_parts_mut(self.base as *mut u8, self.size) } 151 | } 152 | 153 | /* split the region into two portions, lower and upper, and return the two portions. 154 | maintain the region's hygiene. 155 | => count = split the region this number of bytes into the block 156 | measure_from = FromBottom: count is number of bytes from bottom of the block, ascending 157 | FromTop: count is number of bytes from the top of the block, descending 158 | <= return two portions as regions, lower and upper, or a failure code */ 159 | pub fn split(&self, count: PhysMemSize, measure_from: RegionSplit) -> Result<(Region, Region), Cause> 160 | { 161 | /* check the split mark is within bounds */ 162 | if count > self.size 163 | { 164 | return Err(Cause::PhysRegionSplitOutOfBounds); 165 | } 166 | 167 | /* return (lower, upper) */ 168 | Ok(match measure_from 169 | { 170 | RegionSplit::FromBottom => 171 | ( 172 | Region::new(self.base, count, self.hygiene), 173 | Region::new(self.base + count, self.size - count, self.hygiene) 174 | ), 175 | 176 | RegionSplit::FromTop => 177 | ( 178 | Region::new(self.base, self.size - count, self.hygiene), 179 | Region::new(self.base + self.size - count, count, self.hygiene) 180 | ), 181 | }) 182 | } 183 | } 184 | 185 | /* gather up all physical RAM areas from which future capsule and heap physical 186 | RAM allocations will be drawn into the REGIONS list. this list is built from 187 | available, free physical RAM: it must *not* include any RAM areas already in use by 188 | the hypervisor, boot supervisor image, peripherals, etc. the underlying 189 | platform code needs to exclude those off-limits areas. 190 | 191 | this list must also be sorted, by base address, lowest first. this is so that 192 | adjoining regions can be merged into one. this list also contains only free 193 | and available regions. if a region is in use, it must be removed from the list. */ 194 | lazy_static! 195 | { 196 | /* acquire REGIONS lock before accessing any physical RAM regions */ 197 | static ref REGIONS: Mutex = Mutex::new("RAM regions", SortedRegions::new()); 198 | } 199 | 200 | /* implement a sorted list of regions */ 201 | struct SortedRegions 202 | { 203 | regions: Vec 204 | } 205 | 206 | impl SortedRegions 207 | { 208 | /* create an empty list */ 209 | pub fn new() -> SortedRegions 210 | { 211 | SortedRegions 212 | { 213 | regions: Vec::new() 214 | } 215 | } 216 | 217 | /* find a region that has a size equal to or greater than the required size. 218 | if one is found, remove the region and return it. if one can't be found, 219 | return an error code. */ 220 | pub fn find(&mut self, required_size: PhysMemSize) -> Result 221 | { 222 | for index in 0..self.regions.len() 223 | { 224 | if self.regions[index].size() >= required_size 225 | { 226 | /* remove from the list and return */ 227 | return Ok(self.regions.remove(index)); 228 | } 229 | } 230 | 231 | Err(Cause::PhysRegionNoMatch) /* can't find a region large enough */ 232 | } 233 | 234 | /* insert a region into the list, sorted by base addresses, lowest first */ 235 | pub fn insert(&mut self, to_insert: Region) -> Result<(), Cause> 236 | { 237 | /* ignore zero-size inserts */ 238 | if to_insert.size() == 0 239 | { 240 | return Ok(()) 241 | } 242 | 243 | for index in 0..self.regions.len() 244 | { 245 | if to_insert.end() <= self.regions[index].base() 246 | { 247 | self.regions.insert(index, to_insert); 248 | return Ok(()) 249 | } 250 | 251 | /* check to make sure we're not adding a region that will collide with another */ 252 | if to_insert.base() >= self.regions[index].base() && to_insert.base() < self.regions[index].end() 253 | { 254 | return Err(Cause::PhysRegionCollision); 255 | } 256 | } 257 | 258 | /* insert at the end: region greater than all others */ 259 | self.regions.push(to_insert); 260 | Ok(()) 261 | } 262 | 263 | /* merge all adjoining free regions. this requires the list to be sorted by base address ascending */ 264 | pub fn merge(&mut self) 265 | { 266 | let mut cursor = 0; 267 | loop 268 | { 269 | /* prevent search from going out of bounds */ 270 | if (cursor + 1) >= self.regions.len() 271 | { 272 | break; 273 | } 274 | 275 | if self.regions[cursor].end() == self.regions[cursor + 1].base() 276 | { 277 | /* absorb the next region's size into this region */ 278 | self.regions[cursor].size = self.regions[cursor].size() + self.regions.remove(cursor + 1).size(); 279 | } 280 | else 281 | { 282 | /* move onto next region */ 283 | cursor = cursor + 1; 284 | } 285 | } 286 | } 287 | } 288 | 289 | /* initialize the physical memory system by registering all physical RAM available for use as allocatable regions */ 290 | pub fn init() -> Result<(), Cause> 291 | { 292 | /* we need to know the CPU count so that any memory preallocated or reserved for the cores can be skipped */ 293 | let nr_cpu_cores = match hardware::get_nr_cpu_cores() 294 | { 295 | Some(c) => c, 296 | None => return Err(Cause::PhysicalCoreCountUnknown) 297 | }; 298 | 299 | /* the device tree defines chunks of memory that may or may not be entirely available for use */ 300 | let chunks = match hardware::get_phys_ram_chunks() 301 | { 302 | Some(c) => c, 303 | None => return Err(Cause::PhysNoRAMFound) 304 | }; 305 | 306 | /* iterate over the physical memory chunks... */ 307 | let mut regions = REGIONS.lock(); 308 | for chunk in chunks 309 | { 310 | /* ...and let validate_ram break each chunk in sections we can safely use. 311 | assume the RAM is clean: the firmware or boot code should have wiped it, 312 | or it should contain random values */ 313 | for section in validate_ram(nr_cpu_cores, chunk) 314 | { 315 | regions.insert(Region::new(section.base, section.size, RegionHygiene::CanClean))?; 316 | } 317 | } 318 | 319 | Ok(()) 320 | } 321 | 322 | /* perform housekeeping duties on idle physical CPU cores */ 323 | macro_rules! physmemhousekeeper 324 | { 325 | () => ($crate::physmem::coalesce_regions()); 326 | } 327 | 328 | pub fn coalesce_regions() 329 | { 330 | REGIONS.lock().merge(); 331 | } 332 | 333 | /* allocate a region of available physical memory for guest capsule or hypervisor heap use. 334 | capsules should use large regions, and the heap should use small, ideally. 335 | => size = number of bytes for the region, which will be rounded up to next multiple of: 336 | PHYS_RAM_LARGE_REGION_MIN_SIZE if the size >= PHYS_RAM_LARGE_REGION_MIN_SIZE (large type) 337 | PHYS_RAM_SMALL_REGION_MIN_SIZE if the size < PHYS_RAM_LARGE_REGION_MIN_SIZE (small type) 338 | 339 | note, large type regions will have a base address aligned down to PHYS_RAM_LARGE_REGION_ALIGNMENT 340 | this is so that guests that require 2MB or 4MB kernel alignment (eg RV64GC Linux) work as expected 341 | see: https://patchwork.kernel.org/patch/10868465/ 342 | this code assumes the top of physically available RAM is aligned to PHYS_RAM_LARGE_REGION_ALIGNMENT 343 | 344 | <= Region structure for the space, or an error code */ 345 | pub fn alloc_region(size: PhysMemSize) -> Result 346 | { 347 | /* determine where to split the free region block, and the region type */ 348 | let (split_from, region_multiple) = if size >= PHYS_RAM_LARGE_REGION_MIN_SIZE 349 | { 350 | (RegionSplit::FromTop, PHYS_RAM_LARGE_REGION_MIN_SIZE) 351 | } 352 | else 353 | { 354 | (RegionSplit::FromBottom, PHYS_RAM_SMALL_REGION_MIN_SIZE) 355 | }; 356 | 357 | /* round up to a multiple of the minimum size of a region type to avoid fragmentation */ 358 | let adjusted_size = match size % region_multiple 359 | { 360 | 0 => size, 361 | d => (size - d) + region_multiple 362 | }; 363 | 364 | let mut regions = REGIONS.lock(); 365 | match regions.find(adjusted_size) // find will remove found region from free list if successful 366 | { 367 | Ok(found) => 368 | { 369 | /* split the found region into two parts: one portion for the newly 370 | allocated region, and the remaining portion is returned to the free list. 371 | adjusted_size defines whwre in the region the split point occurs. 372 | split_from defines whether adjusted_size is measured from the top or 373 | bottom of the region block */ 374 | match (found.split(adjusted_size, split_from), split_from) 375 | { 376 | /* split so that the lower portion is allocated, and the upper portion is returned to the free list */ 377 | (Ok((mut lower, upper)), RegionSplit::FromBottom) => 378 | { 379 | regions.insert(upper)?; 380 | lower.clean(); 381 | Ok(lower) 382 | }, 383 | 384 | /* split so that the upper portion is allocated, and the lower portion is returned to the free list */ 385 | (Ok((lower, upper)), RegionSplit::FromTop) => 386 | { 387 | /* bring the base of the upper portion down to alignment mark */ 388 | let mut aligned_upper = match upper.base % PHYS_RAM_LARGE_REGION_ALIGNMENT 389 | { 390 | 0 => Region::new(upper.base, upper.size, found.hygiene), 391 | d => Region::new(upper.base - d, upper.size + d, found.hygiene) 392 | }; 393 | 394 | /* fail out if upper portion crashes through the lower portion base after alignment */ 395 | if lower.size < aligned_upper.size - upper.size 396 | { 397 | return Err(Cause::PhysRegionRegionAlignmentFailure) 398 | } 399 | 400 | /* adjust the size of the lower portion if the upper portion was aligned down */ 401 | let adjusted_lower = match aligned_upper.size - upper.size 402 | { 403 | 0 => lower, 404 | d => Region::new(lower.base, lower.size - d, found.hygiene) 405 | }; 406 | 407 | regions.insert(adjusted_lower)?; 408 | aligned_upper.clean(); 409 | Ok(aligned_upper) 410 | }, 411 | 412 | (Err(e), _) => Err(e) 413 | } 414 | }, 415 | Err(_) => Err(Cause::PhysNotEnoughFreeRAM) 416 | } 417 | } 418 | 419 | /* deallocate a region so that its physical RAM can be reallocated. 420 | only accept samll regions that are multiples of PHYS_RAM_SMALL_REGION_MIN_SIZE 421 | and large regions that are multiples of PHYS_RAM_LARGE_REGION_MIN_SIZE 422 | => to_free = region to deallocate 423 | <= Ok for success, or an error code for failure */ 424 | pub fn dealloc_region(to_free: Region) -> Result<(), Cause> 425 | { 426 | let size = to_free.size(); 427 | 428 | /* police the size of the region */ 429 | if size < PHYS_RAM_LARGE_REGION_MIN_SIZE 430 | { 431 | if size % PHYS_RAM_SMALL_REGION_MIN_SIZE != 0 432 | { 433 | return Err(Cause::PhysRegionSmallNotMultiple); 434 | } 435 | } 436 | else 437 | { 438 | if size % PHYS_RAM_LARGE_REGION_MIN_SIZE != 0 439 | { 440 | return Err(Cause::PhysRegionLargeNotMultiple); 441 | } 442 | } 443 | 444 | REGIONS.lock().insert(to_free) 445 | } 446 | -------------------------------------------------------------------------------- /src/hypervisor/src/scheduler.rs: -------------------------------------------------------------------------------- 1 | /* diosix virtual CPU scheduler 2 | * 3 | * This is, for now, really really simple. 4 | * Making it fairer and adaptive to workloads is the ultimate goal. 5 | * 6 | * (c) Chris Williams, 2018-2021. 7 | * 8 | * See LICENSE for usage and copying. 9 | */ 10 | 11 | use super::lock::Mutex; 12 | use alloc::collections::vec_deque::VecDeque; 13 | use hashbrown::hash_map::HashMap; 14 | use platform::timer::TimerValue; 15 | use super::error::Cause; 16 | use super::vcore::{VirtualCore, Priority}; 17 | use super::pcore::{self, PhysicalCore, PhysicalCoreID}; 18 | use super::hardware; 19 | use super::message; 20 | use super::capsule::{self, CapsuleState}; 21 | 22 | pub type TimesliceCount = u64; 23 | 24 | /* prevent physical CPU time starvation: allow a normal virtual core to run after this number of timeslices 25 | have been spent running high priority virtual cores */ 26 | const HIGH_PRIO_TIMESLICES_MAX: TimesliceCount = 10; 27 | 28 | /* max how long a virtual core is allowed to run before a scheduling decision is made */ 29 | const TIMESLICE_LENGTH: TimerValue = TimerValue::Milliseconds(50); 30 | 31 | /* define the shortest time between now and another interrupt and rescheduling decision. 32 | this is to stop supervisor kernels spamming the scheduling system with lots of short reschedulings */ 33 | const TIMESLICE_MIN_LENGTH: TimerValue = TimerValue::Milliseconds(5); 34 | 35 | /* duration a system maintence core (one that can't run supervisor code) must wait 36 | before looking for fixed work to do. also the length in between application cores can 37 | attempt to perform housekeeping */ 38 | const MAINTENANCE_LENGTH: TimerValue = TimerValue::Seconds(5); 39 | 40 | /* these are the global wait queues. while each physical CPU core gets its own pair 41 | of high-normal wait queues, virtual cores waiting to be assigned to a physical CPU sit in these global queues. 42 | when a physical CPU runs out of queued virtual cores, it pulls one from these global queues. 43 | a physical CPU core can ask fellow CPUs to push virtual cores onto the global queues via messages */ 44 | lazy_static! 45 | { 46 | static ref GLOBAL_QUEUES: Mutex = Mutex::new("global scheduler queue", ScheduleQueues::new()); 47 | static ref WORKLOAD: Mutex> = Mutex::new("workload balancer", HashMap::new()); 48 | static ref LAST_HOUSEKEEP_CHECK: Mutex = Mutex::new("housekeeper tracking", TimerValue::Exact(0)); 49 | } 50 | 51 | #[derive(PartialEq, Clone, Copy, Debug)] 52 | pub enum SearchMode 53 | { 54 | MustFind, /* when searching for something to run, keep looping until successful */ 55 | CheckOnce /* search just once for something else to run, return to environment otherwise */ 56 | } 57 | 58 | /* queue a virtual core in global wait list */ 59 | pub fn queue(to_queue: VirtualCore) 60 | { 61 | GLOBAL_QUEUES.lock().queue(to_queue); 62 | } 63 | 64 | /* activate preemptive multitasking. each physical CPU core should call this 65 | to start running workloads - be them user/supervisor or management tasks 66 | <= returns OK, or error code on failure */ 67 | pub fn start() -> Result<(), Cause> 68 | { 69 | hardware::scheduler_timer_start(); 70 | Ok(()) 71 | } 72 | 73 | /* make a decision on whether to run another virtual core, 74 | or return to the currently running core (if possible). 75 | ping() is called when a scheduler timer IRQ comes in */ 76 | pub fn ping() 77 | { 78 | let time_now = hardware::scheduler_get_timer_now(); 79 | let frequency = hardware::scheduler_get_timer_frequency(); 80 | if time_now.is_none() || frequency.is_none() 81 | { 82 | /* check to see if anything needs to run and bail out if 83 | no timer hardware can be found (and yet we're still getting IRQs?) */ 84 | run_next(SearchMode::CheckOnce); 85 | return; 86 | } 87 | 88 | /* get down to the exact timer values */ 89 | let frequency = frequency.unwrap(); 90 | let time_now = time_now.unwrap().to_exact(frequency); 91 | 92 | /* if the virtual core we're running is doomed, skip straight 93 | to forcing a reschedule of another vcore */ 94 | match (pcore::PhysicalCore::this().get_timer_sched_last(), 95 | pcore::PhysicalCore::this().is_vcore_doomed()) 96 | { 97 | (Some(v), false) => 98 | { 99 | let timeslice_length = TIMESLICE_LENGTH.to_exact(frequency); 100 | let mut last_scheduled_at = v.to_exact(frequency); 101 | 102 | /* if the capsule we're running in is valid then perform a time slice check. 103 | if it's not valid, ensure the capsule is torn down or restarted for this 104 | virtual core. when all vcores are removed from the capsule, it will either 105 | be deleted or restarted, depending on its state */ 106 | let capsule_state = capsule::get_current_state(); 107 | match capsule_state 108 | { 109 | Some(CapsuleState::Valid) => 110 | { 111 | /* check to see if we've reached the end of this physical CPU core's 112 | time slice. a virtual code has the pcore for TIMESLICE_LENGTH of time 113 | before a mandatory scheduling decision is made */ 114 | if time_now - last_scheduled_at >= timeslice_length 115 | { 116 | /* it's been a while since we last made a decision, so force one now */ 117 | run_next(SearchMode::CheckOnce); 118 | pcore::PhysicalCore::this().set_timer_sched_last(Some(TimerValue::Exact(time_now))); 119 | last_scheduled_at = time_now; 120 | } 121 | }, 122 | _ => 123 | { 124 | /* it is safe to call destroy_current() and restart_current() multiple times 125 | per vcore until the capsule is dead or restarted */ 126 | if let Err(_e) = match capsule_state 127 | { 128 | Some(CapsuleState::Dying) => capsule::destroy_current(), 129 | Some(CapsuleState::Restarting) => capsule::restart_current(), 130 | _ => Ok(()) 131 | } 132 | { 133 | hvalert!("BUG: Capsule update failure {:?} in scheduler ({:?})", _e, capsule_state) 134 | } 135 | 136 | /* capsule we're running in is no longer valid so force a reschedule */ 137 | run_next(SearchMode::MustFind); 138 | pcore::PhysicalCore::this().set_timer_sched_last(Some(TimerValue::Exact(time_now))); 139 | last_scheduled_at = time_now; 140 | } 141 | } 142 | 143 | /* check to make sure timer target is correct for whatever virtual core we're about 144 | to run. run_next() may have set a new timer irq target, or changed the virtual core 145 | we're running. there may be a supervisor-level timer IRQ upcoming. 146 | make sure the physical core timer target value is appropriate. */ 147 | if let Some(timer_target) = hardware::scheduler_get_timer_next_at() 148 | { 149 | let mut timer_target = timer_target.to_exact(frequency); 150 | 151 | /* avoid skipping over any pending supervisor timer IRQ: reduce latency between 152 | capsule timer interrupts being raised and capsule cores scheduled to pick up said IRQs */ 153 | if let Some(supervisor_target) = pcore::PhysicalCore::get_virtualcore_timer_target() 154 | { 155 | timer_target = supervisor_target.to_exact(frequency); 156 | } 157 | 158 | /* if the target is already behind us, discard it and interrupt at end of this timeslice. 159 | if the target is too far ahead, curtail it to the end of this timeslice */ 160 | if timer_target <= time_now || timer_target > last_scheduled_at + timeslice_length 161 | { 162 | timer_target = last_scheduled_at + timeslice_length; 163 | } 164 | 165 | hardware::scheduler_timer_at(TimerValue::Exact(timer_target)); 166 | } 167 | }, 168 | 169 | /* if not we've not scheduled anything yet, or whatever we were running 170 | is now invalid, we must find something (else) to run */ 171 | (None, _) | (_, true) => 172 | { 173 | run_next(SearchMode::MustFind); 174 | pcore::PhysicalCore::this().set_timer_sched_last(Some(TimerValue::Exact(time_now))); 175 | } 176 | } 177 | } 178 | 179 | /* find something else to run, or return to whatever we were running if allowed. 180 | call this function when a virtual core's timeslice has expired, or it has crashed 181 | or stopped running and we can't return to it. this function will return regardless 182 | if this physical CPU core is unable to run virtual cores. 183 | => search_mode = define whether or not to continue searching for another 184 | virtual core to run, or check once to see if something else is waiting */ 185 | fn run_next(search_mode: SearchMode) 186 | { 187 | /* check for housekeeping */ 188 | housekeeping(); 189 | 190 | /* don't bother scheduling if we can't run the code-to-schedule 191 | because there's no supervisor mode support */ 192 | if pcore::PhysicalCore::smode_supported() == true 193 | { 194 | /* check for something to do */ 195 | loop 196 | { 197 | let mut something_found = true; 198 | 199 | /* check to see if there's anything waiting to be picked up for this 200 | physical CPU from a global queue. if so, then adopt it so it can get a chance to run */ 201 | match GLOBAL_QUEUES.lock().dequeue() 202 | { 203 | /* we've found a virtual CPU core to run, so switch to that */ 204 | Some(orphan) => 205 | { 206 | let mut workloads = WORKLOAD.lock(); 207 | let pcore_id = PhysicalCore::get_id(); 208 | 209 | /* increment counter of how many virtual cores this physical CPU core 210 | has taken from the global queue */ 211 | if let Some(count) = workloads.get_mut(&pcore_id) 212 | { 213 | *count = *count + 1; 214 | } 215 | else 216 | { 217 | workloads.insert(pcore_id, 1); 218 | } 219 | 220 | pcore::context_switch(orphan); 221 | }, 222 | 223 | /* otherwise, try to take a virtual CPU core waiting for this physical CPU core and run it */ 224 | _ => match PhysicalCore::dequeue() 225 | { 226 | Some(virtcore) => pcore::context_switch(virtcore), /* waiting virtual CPU core found, queuing now */ 227 | _ => something_found = false /* nothing else to run */ 228 | } 229 | } 230 | 231 | /* if we've found something, or only searching once, exit the search loop */ 232 | if something_found == true || search_mode == SearchMode::CheckOnce 233 | { 234 | break; 235 | } 236 | 237 | /* still here? see if there's a capsule waiting to be restarted and give us something to do */ 238 | capsulehousekeeper!(); 239 | } 240 | 241 | /* at this point, we've got a virtual core to run. tell the timer system to call us back soon */ 242 | hardware::scheduler_timer_next_in(TIMESLICE_LENGTH); 243 | } 244 | else 245 | { 246 | hardware::scheduler_timer_next_in(MAINTENANCE_LENGTH); /* we'll be back some time later */ 247 | } 248 | } 249 | 250 | /* perform any housekeeping duties defined by the various parts of the system */ 251 | fn housekeeping() 252 | { 253 | /* perform integrity checks */ 254 | #[cfg(feature = "integritychecks")] 255 | { 256 | if let Err(val) = pcore::PhysicalCore::integrity_check() 257 | { 258 | hvalert!("CPU private stack overflowed (0x{:x}). Halting!", val); 259 | loop {} 260 | } 261 | } 262 | 263 | /* avoid blocking on the house keeping lock */ 264 | if LAST_HOUSEKEEP_CHECK.is_locked() == true 265 | { 266 | return; 267 | } 268 | 269 | let mut last_check = LAST_HOUSEKEEP_CHECK.lock(); 270 | 271 | /* only perform housekeeping once every MAINTENANCE_LENGTH-long period */ 272 | match (hardware::scheduler_get_timer_now(), hardware::scheduler_get_timer_frequency()) 273 | { 274 | (Some(time_now), Some(frequency)) => 275 | { 276 | let time_now = time_now.to_exact(frequency); 277 | let last_check_value = (*last_check).to_exact(frequency); 278 | let maintence_length = MAINTENANCE_LENGTH.to_exact(frequency); 279 | 280 | /* wait until we're at least MAINTENANCE_LENGTH into boot */ 281 | if time_now > maintence_length 282 | { 283 | if time_now - last_check_value < maintence_length 284 | { 285 | /* not enough MAINTENANCE_LENGTH time has passed */ 286 | return; 287 | } 288 | /* mark when we last performed housekeeping */ 289 | *last_check = TimerValue::Exact(time_now); 290 | } 291 | else 292 | { 293 | /* flush debug and bail out */ 294 | debughousekeeper!(); 295 | return; 296 | } 297 | }, 298 | (_, _) => 299 | { 300 | /* no timer. output debug and bail out */ 301 | debughousekeeper!(); 302 | return; 303 | } 304 | } 305 | 306 | debughousekeeper!(); /* drain the debug logs to the debug hardware port */ 307 | heaphousekeeper!(); /* return any unused regions of physical memory */ 308 | physmemhousekeeper!(); /* tidy up any physical memory structures */ 309 | capsulehousekeeper!(); /* restart capsules that crashed or rebooted */ 310 | 311 | /* if the global queues are empty then work out which physical CPU core 312 | has the most number of virtual cores and is therefore the busiest */ 313 | let global_queue_lock = GLOBAL_QUEUES.lock(); 314 | if global_queue_lock.total_queued() > 0 315 | { 316 | let mut highest_count = 0; 317 | let mut busiest_pcore: Option = None; 318 | let workloads = WORKLOAD.lock(); 319 | for (&pcoreid, &vcore_count) in workloads.iter() 320 | { 321 | if vcore_count > highest_count 322 | { 323 | highest_count = vcore_count; 324 | busiest_pcore = Some(pcoreid); 325 | } 326 | } 327 | 328 | /* ask the busiest core to send one virtual core back to the global queue 329 | but only if it has enough to share: it must have more than one virtual core */ 330 | if highest_count > 1 331 | { 332 | if let Some(pid) = busiest_pcore 333 | { 334 | if let Ok(m) = message::Message::new(message::Recipient::send_to_pcore(pid), 335 | message::MessageContent::DisownQueuedVirtualCore) 336 | { 337 | match message::send(m) 338 | { 339 | Err(e) => hvalert!("Failed to message physical CPU {} during load balancing: {:?}", pid, e), 340 | Ok(()) => () 341 | }; 342 | } 343 | } 344 | } 345 | } 346 | } 347 | 348 | /* maintain a simple two-level round-robin scheduler per physical CPU core. we can make it more fancy later. 349 | the hypervisor tries to dish out physical CPU time fairly among capsules, and let the 350 | capsule supervisors work out how best to allocate their time to userspace code. 351 | picking the next virtual CPU core to run should be O(1) or as close as possible to it. */ 352 | pub struct ScheduleQueues 353 | { 354 | high: VecDeque, 355 | low: VecDeque, 356 | high_timeslices: TimesliceCount 357 | } 358 | 359 | impl ScheduleQueues 360 | { 361 | /* initialize a new set of scheduler queues */ 362 | pub fn new() -> ScheduleQueues 363 | { 364 | ScheduleQueues 365 | { 366 | high: VecDeque::::new(), 367 | low: VecDeque::::new(), 368 | high_timeslices: 0 369 | } 370 | } 371 | 372 | /* run the given virtual core by switching to its supervisor context. 373 | this also updates NORM_PRIO_TICKS. if the current physical CPU was already running a 374 | virtual core, that virtual core is queued up in the waiting list by context_switch() */ 375 | pub fn run(&mut self, to_run: VirtualCore) 376 | { 377 | /* if we're about to run a normal virtual core, then reset counter since a normal virtual core ran. 378 | if we're running a non-normal virtual core, then increase the count. */ 379 | match to_run.get_priority() 380 | { 381 | Priority::Normal => self.high_timeslices = 0, 382 | Priority::High => self.high_timeslices = self.high_timeslices + 1 383 | }; 384 | 385 | pcore::context_switch(to_run); 386 | } 387 | 388 | /* add the given virtual core to the appropriate waiting queue. put it to the back 389 | so that other virtual cores get a chance to run */ 390 | pub fn queue(&mut self, to_queue: VirtualCore) 391 | { 392 | match to_queue.get_priority() 393 | { 394 | Priority::High => self.high.push_back(to_queue), 395 | Priority::Normal => self.low.push_back(to_queue) 396 | } 397 | } 398 | 399 | /* remove a virtual core from the waiting list queues, selected by priority with safeguards to 400 | prevent CPU time starvation. Returns selected virtual core or None for no other virtual cores waiting */ 401 | pub fn dequeue(&mut self) -> Option 402 | { 403 | /* has a normal virtual core been waiting for ages? */ 404 | if self.high_timeslices > HIGH_PRIO_TIMESLICES_MAX 405 | { 406 | match self.low.pop_front() 407 | { 408 | Some(t) => return Some(t), 409 | None => () 410 | }; 411 | } 412 | 413 | /* check the high priority queue for anything waiting. 414 | if not, then try the normal priority queue */ 415 | match self.high.pop_front() 416 | { 417 | Some(t) => Some(t), 418 | None => self.low.pop_front() 419 | } 420 | } 421 | 422 | /* return the total number of virtual cores queued */ 423 | pub fn total_queued(&self) -> usize 424 | { 425 | self.high.len() + self.low.len() 426 | } 427 | } 428 | -------------------------------------------------------------------------------- /src/hypervisor/src/service.rs: -------------------------------------------------------------------------------- 1 | /* diosix capsule-provided service management 2 | 3 | * (c) Chris Williams, 2019-2020. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | use super::lock::Mutex; 9 | use hashbrown::hash_map::{HashMap, Entry}; 10 | use alloc::collections::vec_deque::VecDeque; 11 | use alloc::vec::Vec; 12 | use super::message; 13 | use super::error::Cause; 14 | use super::capsule::{self, CapsuleID}; 15 | 16 | /* available type of services that can be offered by a capsule */ 17 | #[derive(Clone, Copy, PartialEq, Eq, Hash)] 18 | pub enum ServiceType 19 | { 20 | ConsoleInterface = 0 /* act as the console interface manager */ 21 | } 22 | 23 | pub fn usize_to_service_type(stype: usize) -> Result 24 | { 25 | match stype 26 | { 27 | 0 => Ok(ServiceType::ConsoleInterface), 28 | _ => Err(Cause::ServiceNotFound) 29 | } 30 | } 31 | 32 | /* select either a particular service or all services */ 33 | pub enum SelectService 34 | { 35 | AllServices, 36 | SingleService(ServiceType) 37 | } 38 | 39 | /* todo: a fixed list of known system services, 40 | such as video, sound, serial, network, etc 41 | that privileged / trusted capsules can register. 42 | then other capsules can message those services 43 | to access those underlying resources. */ 44 | 45 | /* maintain a table of registered services */ 46 | lazy_static! 47 | { 48 | static ref SERVICES: Mutex> = Mutex::new("system service table", HashMap::new()); 49 | } 50 | 51 | /* return true if the given service type is registered */ 52 | pub fn is_registered(stype: ServiceType) -> bool 53 | { 54 | let tbl = SERVICES.lock(); 55 | tbl.contains_key(&stype) 56 | } 57 | 58 | /* describe an individual service */ 59 | struct Service 60 | { 61 | capsuleid: CapsuleID, /* capsule that's registered this service */ 62 | msgs: VecDeque /* queue of messages to deliver to service */ 63 | } 64 | 65 | impl Service 66 | { 67 | pub fn queue(&mut self, msg: message::Message) 68 | { 69 | self.msgs.push_front(msg); 70 | } 71 | 72 | pub fn get_capsule_id(&self) -> CapsuleID { self.capsuleid } 73 | } 74 | 75 | /* register a service for a capsule. this will fail if the 76 | capsule has no right to run the service, or if the capsule doesn't exist, 77 | or if another capsule has already claimed the service type. 78 | be aware if the capsule has already claimed the service, it will 79 | return Err(Cause::ServiceAlreadyOwner). this will be the result if a 80 | restarted capsule registers its service(s) again. services aren't released 81 | during a restart to provide a non-stop continuation of services. 82 | => stype = type of service to register 83 | cid = ID of capsule to handle this service 84 | <= return Ok for success, or a failure code */ 85 | pub fn register(stype: ServiceType, cid: CapsuleID) -> Result<(), Cause> 86 | { 87 | if capsule::is_service_allowed(cid, stype)? == false 88 | { 89 | return Err(Cause::ServiceNotAllowed); 90 | } 91 | 92 | let service = Service 93 | { 94 | capsuleid: cid, 95 | msgs: VecDeque::new() 96 | }; 97 | 98 | match SERVICES.lock().entry(stype) 99 | { 100 | Entry::Vacant(v) => 101 | { 102 | v.insert(service); 103 | }, 104 | Entry::Occupied(o) => if o.get().get_capsule_id() != cid 105 | { 106 | /* another capsule owns this service */ 107 | return Err(Cause::ServiceAlreadyRegistered); 108 | } 109 | else 110 | { 111 | /* this capsule already owns this service */ 112 | return Err(Cause::ServiceAlreadyOwner) 113 | } 114 | } 115 | 116 | Ok(()) 117 | } 118 | 119 | /* deregister one or all services belonding to a capsule 120 | so that it is no longer responsible for them 121 | => stype = service to deregister, or None for all of them 122 | cid = ID of capsule to strip of its services 123 | <= Ok for success, or an error code for failure */ 124 | pub fn deregister(stype: SelectService, cid: CapsuleID) -> Result<(), Cause> 125 | { 126 | let mut tbl = SERVICES.lock(); 127 | let mut to_remove = Vec::new(); 128 | 129 | for (registered, owner) in (&tbl).iter() 130 | { 131 | /* remove either everything that matches the capsule ID, or a particular service */ 132 | match stype 133 | { 134 | SelectService::AllServices => if owner.get_capsule_id() == cid 135 | { 136 | to_remove.push(*registered); 137 | }, 138 | SelectService::SingleService(s) => if owner.get_capsule_id() == cid && s == *registered 139 | { 140 | to_remove.push(*registered); 141 | } 142 | } 143 | } 144 | 145 | /* now remove the vicims */ 146 | for victim in to_remove 147 | { 148 | tbl.remove(&victim); 149 | } 150 | 151 | Ok(()) 152 | } 153 | 154 | /* send the given message msg to a registered service */ 155 | pub fn send(msg: message::Message) -> Result<(), Cause> 156 | { 157 | let stype = match msg.get_receiver() 158 | { 159 | message::Recipient::Service(stype) => stype, 160 | _ => return Err(Cause::MessageBadType) 161 | }; 162 | 163 | if let Some(service) = SERVICES.lock().get_mut(&stype) 164 | { 165 | service.queue(msg); 166 | Ok(()) 167 | } 168 | else 169 | { 170 | return Err(Cause::ServiceNotAllowed) 171 | } 172 | } -------------------------------------------------------------------------------- /src/hypervisor/src/vcore.rs: -------------------------------------------------------------------------------- 1 | /* diosix virtual CPU core management 2 | * 3 | * (c) Chris Williams, 2018-2019. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | use super::error::Cause; 9 | use super::capsule::{self, CapsuleID}; 10 | use super::scheduler; 11 | use platform::cpu::{SupervisorState, SupervisorFPState, Entry}; 12 | use platform::physmem::PhysMemBase; 13 | use platform::timer; 14 | 15 | #[derive(Copy, Clone, Debug)] 16 | pub enum Priority 17 | { 18 | High, 19 | Normal 20 | } 21 | 22 | /* virtual core ID unique to its capsule */ 23 | pub type VirtualCoreID = usize; 24 | 25 | /* pair a virtual core with its parent capsule using their ID numbers */ 26 | #[derive(PartialEq, Eq, Hash)] 27 | pub struct VirtualCoreCanonicalID 28 | { 29 | pub capsuleid: CapsuleID, 30 | pub vcoreid: VirtualCoreID 31 | } 32 | 33 | /* a virtual core is either in a waiting queue awaiting physical CPU time, or is running and held in a physical CPU core struct. 34 | if you remove a virtual core object from the queue and don't place it back in a queue or Core structure, 35 | then the vcpu will be dropped, deallocated and destroyed. */ 36 | pub struct VirtualCore 37 | { 38 | id: VirtualCoreCanonicalID, 39 | priority: Priority, 40 | state: SupervisorState, 41 | fp_state: SupervisorFPState, 42 | timer_irq_at: Option 43 | } 44 | 45 | impl VirtualCore 46 | { 47 | /* create a virtual CPU core for a supervisor capsule. this virtual CPU is derived from 48 | the physical CPU core we're running on. 49 | => capsule = ID of the capsule 50 | core = virtual core ID within the capsule 51 | entry = pointer to where to begin execution 52 | dtb = physical address of the device tree blob 53 | describing the virtual CPU's hardware environment 54 | priority = virtual core's priority 55 | <= OK for success, or error code */ 56 | pub fn create(capsuleid: CapsuleID, core: VirtualCoreID, entry: Entry, dtb: PhysMemBase, priority: Priority) -> Result<(), Cause> 57 | { 58 | let max_vcores = capsule::get_max_vcores(capsuleid)?; 59 | 60 | let new_vcore = VirtualCore 61 | { 62 | id: VirtualCoreCanonicalID 63 | { 64 | capsuleid, 65 | vcoreid: core 66 | }, 67 | priority, 68 | state: platform::cpu::init_supervisor_cpu_state(core, max_vcores, entry, dtb), 69 | fp_state: platform::cpu::init_supervisor_fp_state(), 70 | timer_irq_at: None 71 | }; 72 | 73 | /* add virtual CPU core to the global waiting list queue */ 74 | scheduler::queue(new_vcore); 75 | Ok(()) 76 | } 77 | 78 | /* return reference to virtual CPU core's physical CPU state */ 79 | pub fn state_as_ref(&self) -> &SupervisorState { &self.state } 80 | 81 | /* return reference to virtual CPU core's floating-point register state */ 82 | pub fn fp_state_as_ref(&self) -> &SupervisorFPState { &self.fp_state } 83 | 84 | /* return mutable reference to virtual CPU core's physical CPU state */ 85 | pub fn state_as_mut_ref(&mut self) -> &mut SupervisorState { &mut self.state } 86 | 87 | /* return mutable reference to virtual CPU core's floating-point register state */ 88 | pub fn fp_state_as_mut_ref(&mut self) -> &mut SupervisorFPState { &mut self.fp_state } 89 | 90 | /* return this virtual core's ID within its capsule */ 91 | pub fn get_id(&self) -> VirtualCoreID { self.id.vcoreid } 92 | 93 | /* return virtual CPU core capsule's ID */ 94 | pub fn get_capsule_id(&self) -> CapsuleID { self.id.capsuleid } 95 | 96 | /* return virtual CPU core's priority */ 97 | pub fn get_priority(&self) -> Priority { self.priority } 98 | 99 | /* define value the next timer IRQ should fire for this core. 100 | measured as value of the clock-on-the-wall for the system, or None for no IRQ */ 101 | pub fn set_timer_irq_at(&mut self, target: Option) 102 | { 103 | self.timer_irq_at = target; 104 | } 105 | 106 | /* return timer value after which a per-CPU timer IRQ will fire for this core, or None for no IRQ */ 107 | pub fn get_timer_irq_at(&mut self) -> Option 108 | { 109 | self.timer_irq_at 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/hypervisor/src/virtmem.rs: -------------------------------------------------------------------------------- 1 | /* diosix capsule virtual memory management 2 | * 3 | * (c) Chris Williams, 2019-2020. 4 | * 5 | * See LICENSE for usage and copying. 6 | */ 7 | 8 | use platform::physmem::PhysMemBase; 9 | use platform::virtmem::VirtMemBase; 10 | use super::physmem::Region; 11 | use super::error::Cause; 12 | 13 | /* map a capsule's virtual memory to a host physical memory region */ 14 | #[derive(Clone, Copy)] 15 | pub struct Mapping 16 | { 17 | virtual_base: Option, 18 | physical_region: Option 19 | } 20 | 21 | impl Mapping 22 | { 23 | /* create an empty mapping */ 24 | pub fn new() -> Mapping 25 | { 26 | Mapping 27 | { 28 | virtual_base: None, 29 | physical_region: None 30 | } 31 | } 32 | 33 | /* define the virtual base address and corresponding physical RAM region */ 34 | pub fn set_virtual(&mut self, vbase: VirtMemBase) { self.virtual_base = Some(vbase); } 35 | pub fn set_physical(&mut self, region: Region) { self.physical_region = Some(region); } 36 | pub fn get_physical(&self) -> Option { self.physical_region } 37 | 38 | /* set 1:1 mapping of virtual to physical addresses. requires physical region to be defined */ 39 | pub fn identity_mapping(&mut self) -> Result<(), Cause> 40 | { 41 | match self.physical_region 42 | { 43 | Some(region) => self.virtual_base = Some(region.base()), 44 | None => return Err(Cause::VirtMemPhysNotSet) 45 | } 46 | Ok(()) 47 | } 48 | 49 | /* translate host physical address to capsule virtual address using this mapping, or None for outside mapping 50 | or None if translation not possible as mapping is not configured */ 51 | pub fn physical_to_virtual(&self, physaddr: PhysMemBase) -> Option 52 | { 53 | match(self.virtual_base, self.physical_region) 54 | { 55 | (Some(virtbase), Some(region)) => if physaddr >= region.base() && physaddr < region.end() 56 | { 57 | Some((physaddr - region.base()) + virtbase) 58 | } 59 | else 60 | { 61 | None 62 | }, 63 | (_, _ ) => None 64 | } 65 | } 66 | 67 | /* translate capsule virtual address to host physical address using this mapping, or None for outside mapping 68 | or None if translation not possible as mapping is not configured */ 69 | pub fn virtual_to_physical(&self, virtaddr: VirtMemBase) -> Option 70 | { 71 | match(self.virtual_base, self.physical_region) 72 | { 73 | (Some(virtbase), Some(region)) => if virtaddr >= virtbase && virtaddr < virtbase + region.size() 74 | { 75 | Some((virtaddr - virtbase) + region.base()) 76 | } 77 | else 78 | { 79 | None 80 | }, 81 | (_, _ ) => None 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/services/.cargo/config: -------------------------------------------------------------------------------- 1 | # 2 | # diosix supervisor platform-specific linker settings 3 | # 4 | # (c) Chris Williams, 2020. 5 | # See LICENSE for usage and copying. 6 | # 7 | 8 | # 9 | # set the default build triple 10 | # 11 | [build] 12 | target = "riscv64gc-unknown-none-elf" 13 | 14 | # Find the linker for 64-bit RISC-V (IMAC) targets 15 | [target.riscv64imac-unknown-none-elf] 16 | rustflags = [ 17 | "-Z", "pre-link-arg=-nostartfiles", 18 | "-C", "link-arg=-Tsrc/supervisor-riscv/link.ld", 19 | "-C", "link-arg=-pie", 20 | "-C", "link-arg=--no-dynamic-linker", 21 | "-C", "relocation-model=pic" ] 22 | linker = "riscv64-linux-gnu-ld" 23 | ar = "riscv64-linux-gnu-ar" 24 | 25 | # Find the linker for 64-bit RISC-V (GC) targets 26 | [target.riscv64gc-unknown-none-elf] 27 | rustflags = [ 28 | "-Z", "pre-link-arg=-nostartfiles", 29 | "-C", "link-arg=-Tsrc/supervisor-riscv/link.ld", 30 | "-C", "link-arg=-pie", 31 | "-C", "link-arg=--no-dynamic-linker", 32 | "-C", "relocation-model=pic" ] 33 | linker = "riscv64-linux-gnu-ld" 34 | ar = "riscv64-linux-gnu-ar" 35 | -------------------------------------------------------------------------------- /src/services/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "diosix-services" 3 | version = "1.0.0" 4 | authors = ["Chris Williams "] 5 | license = "MIT" 6 | build = "../mason/build.rs" 7 | publish = false 8 | edition = "2018" 9 | 10 | [[bin]] 11 | name = "gooey" 12 | path = "src/gooey/src/main.rs" 13 | 14 | [build-dependencies] 15 | regex = "1.4.2" 16 | toml = "0.5.8" 17 | serde = "1.0.118" 18 | serde_derive = "1.0.118" 19 | 20 | [dependencies.lazy_static] 21 | version = "1.4.0" 22 | features = [ "spin_no_std" ] 23 | 24 | [dependencies.spin] 25 | version = "0.7.0" 26 | 27 | [target.riscv64imac-unknown-none-elf.dependencies] 28 | supervisor = { path = "src/supervisor-riscv" } 29 | 30 | [target.riscv64gc-unknown-none-elf.dependencies] 31 | supervisor = { path = "src/supervisor-riscv" } 32 | -------------------------------------------------------------------------------- /src/services/mason.toml: -------------------------------------------------------------------------------- 1 | # Configure Mason to build non-Rust portions of hypervisor services 2 | # 3 | # Directory paths are relative to this manifest.toml file 4 | 5 | # Set up assembly code directories for supported architectures 6 | # asm_dirs is a colon-separated list of directories to scan 7 | # for assembly code to build and link to the hypervisor's executable 8 | 9 | [target.riscv64imac-unknown-none-elf] 10 | asm_dirs = [ "src/supervisor-riscv/asm" ] 11 | 12 | [target.riscv64gc-unknown-none-elf] 13 | asm_dirs = [ "src/supervisor-riscv/asm" ] 14 | --------------------------------------------------------------------------------