├── .cargo └── config.toml ├── .github └── workflows │ ├── format.yml │ ├── license.yml │ ├── merge.yml │ └── tests.yml ├── .gitignore ├── .gitmodules ├── .rustfmt.toml ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── external ├── libcrt │ ├── Makefile │ ├── README.md │ ├── include │ │ ├── assert.h │ │ ├── atomic.h │ │ ├── bits │ │ │ ├── alltypes.h │ │ │ └── atomic_arch.h │ │ ├── ctype.h │ │ ├── dirent.h │ │ ├── endian.h │ │ ├── errno.h │ │ ├── fcntl.h │ │ ├── inttypes.h │ │ ├── libcrt.h │ │ ├── limits.h │ │ ├── memory.h │ │ ├── stdarg.h │ │ ├── stdatomic.h │ │ ├── stdbool.h │ │ ├── stddef.h │ │ ├── stdint.h │ │ ├── stdio.h │ │ ├── stdlib.h │ │ ├── string.h │ │ ├── strings.h │ │ ├── sys │ │ │ ├── shm.h │ │ │ ├── stat.h │ │ │ ├── syscall.h │ │ │ ├── time.h │ │ │ ├── types.h │ │ │ └── utsname.h │ │ ├── time.h │ │ └── unistd.h │ └── src │ │ ├── ctype │ │ ├── isascii.c │ │ ├── isdigit.c │ │ ├── islower.c │ │ ├── isspace.c │ │ ├── isupper.c │ │ ├── tolower.c │ │ └── toupper.c │ │ ├── exit │ │ └── assert.c │ │ ├── prng │ │ └── rand.c │ │ ├── setjmp │ │ └── x86_64 │ │ │ ├── longjmp.s │ │ │ └── setjmp.s │ │ ├── stdio │ │ ├── asprintf.c │ │ ├── fprintf.c │ │ ├── printf.c │ │ ├── printf_wrapper.c │ │ ├── snprintf.c │ │ ├── sprintf.c │ │ ├── vasprintf.c │ │ ├── vsnprintf.c │ │ └── vsprintf.c │ │ ├── stdlib │ │ ├── atoi.c │ │ ├── qsort.c │ │ └── qsort_nr.c │ │ ├── string │ │ ├── memchr.c │ │ ├── memcmp.c │ │ ├── memcpy.c │ │ ├── memmove.c │ │ ├── memrchr.c │ │ ├── memset.c │ │ ├── stpcpy.c │ │ ├── stpncpy.c │ │ ├── strcasecmp.c │ │ ├── strcat.c │ │ ├── strchr.c │ │ ├── strchrnul.c │ │ ├── strcmp.c │ │ ├── strcpy.c │ │ ├── strcspn.c │ │ ├── strdup.c │ │ ├── strlen.c │ │ ├── strncasecmp.c │ │ ├── strncat.c │ │ ├── strncmp.c │ │ ├── strncpy.c │ │ ├── strrchr.c │ │ ├── strspn.c │ │ └── strstr.c │ │ ├── stub.c │ │ └── time │ │ ├── __secs_to_tm.c │ │ ├── gmtime_r.c │ │ └── time.c └── openssl_svsm.conf ├── rust-toolchain.toml ├── scripts ├── build.sh ├── common.sh ├── crates.sh ├── install.sh ├── launch-qemu.sh └── stable-commits ├── src ├── bios.rs ├── cpu │ ├── cpuid.rs │ ├── idt.rs │ ├── mod.rs │ ├── percpu.rs │ ├── smp.rs │ ├── sys.rs │ ├── tss.rs │ ├── vc.rs │ └── vmsa.rs ├── globals.rs ├── lib.rs ├── mem │ ├── alloc.rs │ ├── ca.rs │ ├── fwcfg.rs │ ├── ghcb.rs │ ├── map_guard.rs │ ├── mod.rs │ └── pgtable.rs ├── protocols │ ├── core.rs │ ├── error_codes.rs │ └── mod.rs ├── start │ ├── start.S │ ├── svsm.h │ └── svsm.lds.S ├── svsm_request.rs ├── util │ ├── locking.rs │ ├── mod.rs │ ├── serial.rs │ └── util.rs ├── vmsa_list.rs └── wrapper.rs └── svsm-target.json /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | # Needed to recompile Rust core and compiler_builtins libraries 2 | # also use the mem{set,cmp,cpy} implementations 3 | [unstable] 4 | build-std-features = ["compiler-builtins-mem"] 5 | build-std = ["core", "compiler_builtins", "alloc"] 6 | 7 | # This avoids writting cargo build --target svsm-target.json 8 | # but don't change the README instructions as someone might not 9 | # have .cargo/ 10 | [build] 11 | target = "svsm-target.json" 12 | 13 | [target.svsm-target] 14 | rustflags = [ 15 | "-C", "link-arg=-Tsrc/start/svsm.lds", 16 | ] 17 | -------------------------------------------------------------------------------- /.github/workflows/format.yml: -------------------------------------------------------------------------------- 1 | name: Format 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | format_check: 11 | runs-on: ubuntu-latest 12 | timeout-minutes: 2 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@v3 16 | - name: Install Rust toolchain 17 | uses: actions-rs/toolchain@v1 18 | with: 19 | toolchain: nightly 20 | override: true 21 | profile: minimal 22 | components: rustfmt 23 | - name: Check format 24 | uses: actions-rs/cargo@v1 25 | with: 26 | command: fmt 27 | args: --all -- --check 28 | -------------------------------------------------------------------------------- /.github/workflows/license.yml: -------------------------------------------------------------------------------- 1 | name: License 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | license_check: 11 | runs-on: ubuntu-latest 12 | timeout-minutes: 2 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@v3 16 | - name: Run licenses script 17 | run: bash ${GITHUB_WORKSPACE}/scripts/crates.sh --check 18 | -------------------------------------------------------------------------------- /.github/workflows/merge.yml: -------------------------------------------------------------------------------- 1 | name: Merge 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | merge_check: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: squalrus/merge-bot@v0.1.0 14 | if: ${{ github.actor == 'dependabot[bot]' }} 15 | with: 16 | github-token: ${{ secrets.GITHUB_TOKEN }} 17 | labels: dependencies 18 | delete_source_branch: true 19 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v2 15 | - name: Install Rust toolchain 16 | uses: actions-rs/toolchain@v1 17 | with: 18 | toolchain: nightly 19 | - name: Prepare tests 20 | run: rustup component add rust-src --toolchain nightly 21 | - name: Run tests 22 | run: cargo test --target=x86_64-unknown-linux-gnu -Z build-std 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | 8 | *.swp 9 | 10 | # Prerequisites 11 | *.d 12 | 13 | # Object files 14 | *.o 15 | *.ko 16 | *.obj 17 | *.elf 18 | 19 | # Linker output 20 | *.ilk 21 | *.map 22 | *.exp 23 | 24 | # Precompiled Headers 25 | *.gch 26 | *.pch 27 | 28 | # Libraries 29 | *.lib 30 | *.a 31 | *.la 32 | *.lo 33 | 34 | # Shared objects (inc. Windows DLLs) 35 | *.dll 36 | *.so 37 | *.so.* 38 | *.dylib 39 | 40 | # Executables 41 | *.exe 42 | *.out 43 | *.app 44 | *.i*86 45 | *.x86_64 46 | *.hex 47 | svsm.bin 48 | svsm.bin.elf 49 | 50 | # Debug files 51 | *.dSYM/ 52 | *.su 53 | *.idb 54 | *.pdb 55 | 56 | # Kernel Module Compile Results 57 | *.mod* 58 | *.cmd 59 | .tmp_versions/ 60 | modules.order 61 | Module.symvers 62 | Mkfile.old 63 | dkms.conf 64 | 65 | # Others 66 | *.lds 67 | .prereq 68 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "external/openssl"] 2 | path = external/openssl 3 | url = https://github.com/openssl/openssl.git 4 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | newline_style = "Unix" 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution and style 2 | 3 | Contributions are expected in the form of GitHub pull requests. They will 4 | need to be reviewed and accepted by a maintainer. 5 | 6 | Contributions must include a "Signed-off-by" line containing the 7 | contributor's name and e-mail to every commit message. The addition of this 8 | line attests that the contributor has read and agrees with the [Developer 9 | Certificate of Origin](https://developercertificate.org/). 10 | 11 | If significant changes must be made before accepting a pull request, it 12 | will be preferable to open a new pull request with clean commits. 13 | 14 | If your patch fixes a bug in a specific commit, please use the ‘Fixes:’ tag 15 | with the first 7 characters of the commit hash, and the one line summary. 16 | For example: 17 | 18 | ``` 19 | Fixes: 3541fad ("IDT: include new vector entry #HV") 20 | ``` 21 | 22 | If the patch was created with the help of other developer(s), the tag 23 | "Co-Developed-by:" can be included. The co-developers will also need to 24 | have a Signed-off-by line. 25 | 26 | Commits will be tagged when the maintainers consider there is something 27 | worth tagging. Similarly, new branches will be created if needed. 28 | 29 | ## Code style 30 | 31 | Code contributions should adhere to rustfmt, like in the Rust for Linux 32 | kernel project. You can check with: 33 | 34 | ``` 35 | # rustfmt --check 36 | ``` 37 | Besides that, we only ask for: 38 | 39 | 1. Variable definitions to be explicit on type. For example: 40 | 41 | ```rust 42 | let a: u64 = 4; 43 | ``` 44 | 45 | 2. Constant definitions to include a comment with their value (for 46 | documentation purposes). For example: 47 | 48 | ```rust 49 | /// 16 50 | const GUID_SIZE: u64 = 16; 51 | ``` 52 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "autocfg" 7 | version = "1.1.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" 10 | 11 | [[package]] 12 | name = "bit_field" 13 | version = "0.10.1" 14 | source = "registry+https://github.com/rust-lang/crates.io-index" 15 | checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4" 16 | 17 | [[package]] 18 | name = "bitflags" 19 | version = "1.3.2" 20 | source = "registry+https://github.com/rust-lang/crates.io-index" 21 | checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" 22 | 23 | [[package]] 24 | name = "cty" 25 | version = "0.2.2" 26 | source = "registry+https://github.com/rust-lang/crates.io-index" 27 | checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35" 28 | 29 | [[package]] 30 | name = "lazy_static" 31 | version = "1.4.0" 32 | source = "registry+https://github.com/rust-lang/crates.io-index" 33 | checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" 34 | dependencies = [ 35 | "spin", 36 | ] 37 | 38 | [[package]] 39 | name = "linux_svsm" 40 | version = "0.1.0" 41 | dependencies = [ 42 | "cty", 43 | "lazy_static", 44 | "memchr", 45 | "memoffset", 46 | "paste", 47 | "uuid", 48 | "x86_64", 49 | ] 50 | 51 | [[package]] 52 | name = "memchr" 53 | version = "2.5.0" 54 | source = "registry+https://github.com/rust-lang/crates.io-index" 55 | checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" 56 | 57 | [[package]] 58 | name = "memoffset" 59 | version = "0.6.5" 60 | source = "registry+https://github.com/rust-lang/crates.io-index" 61 | checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" 62 | dependencies = [ 63 | "autocfg", 64 | ] 65 | 66 | [[package]] 67 | name = "paste" 68 | version = "1.0.7" 69 | source = "registry+https://github.com/rust-lang/crates.io-index" 70 | checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" 71 | 72 | [[package]] 73 | name = "rustversion" 74 | version = "1.0.8" 75 | source = "registry+https://github.com/rust-lang/crates.io-index" 76 | checksum = "24c8ad4f0c00e1eb5bc7614d236a7f1300e3dbd76b68cac8e06fb00b015ad8d8" 77 | 78 | [[package]] 79 | name = "spin" 80 | version = "0.5.2" 81 | source = "registry+https://github.com/rust-lang/crates.io-index" 82 | checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" 83 | 84 | [[package]] 85 | name = "uuid" 86 | version = "1.1.2" 87 | source = "registry+https://github.com/rust-lang/crates.io-index" 88 | checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" 89 | 90 | [[package]] 91 | name = "volatile" 92 | version = "0.4.5" 93 | source = "registry+https://github.com/rust-lang/crates.io-index" 94 | checksum = "e3ca98349dda8a60ae74e04fd90c7fb4d6a4fbe01e6d3be095478aa0b76f6c0c" 95 | 96 | [[package]] 97 | name = "x86_64" 98 | version = "0.14.10" 99 | source = "registry+https://github.com/rust-lang/crates.io-index" 100 | checksum = "100555a863c0092238c2e0e814c1096c1e5cf066a309c696a87e907b5f8c5d69" 101 | dependencies = [ 102 | "bit_field", 103 | "bitflags", 104 | "rustversion", 105 | "volatile", 106 | ] 107 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [lib] 2 | crate-type = ["staticlib"] 3 | 4 | [features] 5 | 6 | verbose = [] # Print updates on serial (demo and debug purposes) 7 | 8 | [package] 9 | name = "linux_svsm" 10 | authors = ["Tom Lendacky ", "Carlos Bilbao "] 11 | version = "0.1.0" 12 | edition = "2021" 13 | rust-version = "1.71" 14 | 15 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 16 | 17 | [dependencies] 18 | x86_64 = "0.14.2" 19 | memoffset = "0.6" 20 | paste = "1.0" 21 | memchr = { version = "2", default-features = false } 22 | uuid = { version = "1", default-features = false } 23 | cty = "0.2.2" 24 | 25 | [dependencies.lazy_static] 26 | version = "1.0" 27 | features = ["spin_no_std"] 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: MIT 2 | 3 | GCC = gcc 4 | 5 | SHELL := /bin/bash 6 | 7 | A_FLAGS := -D__ASSEMBLY__ 8 | 9 | C_FLAGS := -g -O2 10 | C_FLAGS += -m64 -march=x86-64 -mno-sse2 11 | C_FLAGS += -fno-stack-protector 12 | C_FLAGS += -ffreestanding 13 | C_FLAGS += -Wall -Wstrict-prototypes -Wno-address-of-packed-member 14 | 15 | LD_FLAGS := -m64 16 | LD_FLAGS += -nostdlib 17 | LD_FLAGS += -Wl,-Tsrc/start/svsm.lds -Wl,--build-id=none 18 | 19 | TARGET_DIR := target 20 | TARGET := $(TARGET_DIR)/svsm-target/debug 21 | 22 | OBJS := src/start/start.o 23 | OBJS += $(TARGET)/liblinux_svsm.a 24 | 25 | FEATURES := "" 26 | 27 | ## Memory layout 28 | 29 | SVSM_GPA := 0x8000000000 30 | SVSM_MEM := 0x10000000 31 | LDS_FLAGS += -DSVSM_GPA="$(SVSM_GPA)" 32 | LDS_FLAGS += -DSVSM_MEM="$(SVSM_MEM)" 33 | 34 | EXT_LIBS := external/libcrt/libcrt.a 35 | EXT_LIBS += external/openssl/libcrypto.a 36 | 37 | .PHONY: all doc prereq clean clean_all superclean libcrt libcrypto 38 | 39 | all: .prereq libcrt libcrypto svsm.bin 40 | 41 | doc: .prereq 42 | cargo doc --open 43 | 44 | external/libcrt/libcrt.a: libcrt 45 | 46 | libcrt: 47 | $(MAKE) -C external/libcrt 48 | 49 | external/openssl/libcrypto.a: libcrypto 50 | 51 | libcrypto: external/openssl/Makefile libcrt 52 | $(MAKE) -C external/openssl -j$$(nproc) 53 | 54 | svsm.bin: svsm.bin.elf 55 | objcopy -g -O binary $< $@ 56 | 57 | # "-Wl,-u,malloc" prevents the linker from removing the wrapper.rs symbols 58 | svsm.bin.elf: $(EXT_LIBS) $(OBJS) src/start/svsm.lds 59 | $(GCC) $(LD_FLAGS) -o $@ $(OBJS) -Wl,-u,malloc -Wl,--start-group $(EXT_LIBS) -Wl,--end-group 60 | 61 | %.a: src/*.rs src/cpu/*.rs src/mem/*.rs src/protocols/*.rs src/util/*.rs 62 | @xargo build --features $(FEATURES) 63 | 64 | %.o: %.S src/start/svsm.h 65 | $(GCC) $(C_FLAGS) $(LDS_FLAGS) $(A_FLAGS) -c -o $@ $< 66 | 67 | %.lds: %.lds.S src/start/svsm.h 68 | $(GCC) $(A_FLAGS) $(LDS_FLAGS) -E -P -o $@ $< 69 | 70 | test: 71 | cargo test --features $(FEATURES) --target=x86_64-unknown-linux-gnu -Z build-std 72 | 73 | prereq: .prereq 74 | 75 | .prereq: 76 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 77 | source $(HOME)/.cargo/env 78 | echo "source $(HOME)/.cargo/env" >> ~/.bashrc 79 | rustup component add rust-src 80 | rustup component add llvm-tools-preview 81 | cargo install xargo 82 | cargo install bootimage 83 | touch .prereq 84 | 85 | external/openssl/Makefile: 86 | git submodule update --init 87 | (cd external/openssl && git checkout OpenSSL_1_1_1q && \ 88 | ./Configure \ 89 | --config=../openssl_svsm.conf \ 90 | SVSM \ 91 | no-afalgeng \ 92 | no-async \ 93 | no-autoerrinit \ 94 | no-autoload-config \ 95 | no-bf \ 96 | no-blake2 \ 97 | no-capieng \ 98 | no-cast \ 99 | no-chacha \ 100 | no-cms \ 101 | no-ct \ 102 | no-deprecated \ 103 | no-des \ 104 | no-dgram \ 105 | no-dsa \ 106 | no-dynamic-engine \ 107 | no-ec2m \ 108 | no-engine \ 109 | no-err \ 110 | no-filenames \ 111 | no-gost \ 112 | no-hw \ 113 | no-idea \ 114 | no-md4 \ 115 | no-mdc2 \ 116 | no-pic \ 117 | no-ocb \ 118 | no-poly1305 \ 119 | no-posix-io \ 120 | no-rc2 \ 121 | no-rc4 \ 122 | no-rfc3779 \ 123 | no-rmd160 \ 124 | no-scrypt \ 125 | no-seed \ 126 | no-sock \ 127 | no-srp \ 128 | no-ssl \ 129 | no-stdio \ 130 | no-threads \ 131 | no-ts \ 132 | no-whirlpool \ 133 | no-shared \ 134 | no-sse2 \ 135 | no-ui-console \ 136 | no-asm \ 137 | --with-rand-seed=none \ 138 | -I../libcrt/include \ 139 | -Wl,rpath=../libcrt -lcrt ) 140 | 141 | clean: 142 | @xargo clean 143 | rm -f svsm.bin svsm.bin.elf $(OBJS) 144 | rm -rf $(TARGET_DIR) 145 | rm -f src/start/svsm.lds 146 | 147 | clean_all: clean 148 | $(MAKE) -C external/libcrt clean 149 | $(MAKE) -C external/openssl clean 150 | 151 | superclean: clean_all 152 | rm -f .prereq 153 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Linux SVSM (Secure VM Service Module) 2 | 3 | \*\* The Linux SVSM project is no longer being actively developed. 4 | Users of Linux SVSM should switch over to the 5 | [COCONUT SVSM project](https://github.com/coconut-svsm/svsm). \*\* 6 | 7 | ## Table of contents 8 | 9 | 1. [What is this magic?](#introduction) 10 | 2. [Preparing the host](#host) 11 | 3. [Installation](#install) 12 | 4. [Running Linux SVSM](#run) 13 | 5. [Contribution](#contribute) 14 | 6. [Linux SVSM userspace](#cpl3) 15 | 7. [Authors and License](#authors) 16 | 17 | ## What is this magic? 18 | 19 | Linux SVSM (Secure VM Service Module) implements a guest communication 20 | interface so that VM guests can offload sensitive operations (for example, 21 | updating access permissions on protected pages) onto a privileged\* guest 22 | acting as service module. Linux SVSM relies on AMD's Secure Nested Paging 23 | (SNP) and prior Secure Encrypted Virtualization technologies (See 24 | [SEV documentation](https://developer.amd.com/sev/)). 25 | 26 | The idea is that Linux SVSM will not only offload security operations, 27 | but will also be able to provide other services such as live VM migration; 28 | the privilege separation model of SVSM permits the existence of a virtual 29 | Trusted Platform Module (virtual TPM). 30 | 31 | \* AMD SNP introduces the Virtual Machine Privilege Level (VMPLs) for 32 | enhanced security control. VMPL0 is the highest level of privilege. 33 | Linux SVSM runs at VMPL 0, as opposed to other guests running under 34 | VMPL >=1. Certain operations become architecturally impossible to guests 35 | running at lower privilege levels (e.g. use of the PVALIDATE instruction 36 | and certain forms of RMPADJUST). 37 | 38 | Generate and read source code documentation with: 39 | 40 | ``` 41 | # make doc 42 | ``` 43 | 44 | which will also install necessary prerequisites. 45 | 46 | ## Preparing the host 47 | 48 | Linux SVSM assumes a host with support for AMD's SEV-SNP, as well as 49 | compatible guest, Qemu and OVMF BIOS. We provide bash scripts to automate 50 | the installation process of these prerequisites. The remainder of these 51 | instructions were tested on Ubuntu 22.04 server, installed with kernel 52 | 5.15.0-46-generic. 53 | 54 | Start by verifying that the following BIOS settings are enabled. The 55 | settings may vary depending on the vendor BIOS. The menu options below are 56 | from AMD's BIOS. 57 | 58 | ``` 59 | CBS -> CPU Common -> 60 | SEV-ES ASID space Limit Control -> Manual 61 | SEV-ES ASID space limit -> 100 62 | SNP Memory Coverage -> Enabled 63 | SMEE -> Enabled 64 | -> NBIO common -> 65 | SEV-SNP -> Enabled 66 | ``` 67 | 68 | We now need to build the host and guest kernels, Qemu and OVMF BIOS used for 69 | launching the SEV-SNP guest. 70 | 71 | ``` 72 | $ cd scripts/ 73 | $ ./build.sh --package 74 | ``` 75 | 76 | If build fails, read subsection [Build troubleshooting](#trouble-build). On 77 | successful build, the binaries will be available in `snp-release-`. 78 | 79 | Now we need to install the Linux kernel on the host machine: 80 | 81 | ``` 82 | $ cd snp-release- 83 | $ sudo ./install.sh 84 | ``` 85 | 86 | Reboot the machine and choose SNP Host kernel from the grub menu. You can 87 | check you have a kernel with the proper SNP support with: 88 | 89 | ``` 90 | $ sudo dmesg | grep SEV 91 | [ 7.393321] SEV-SNP: RMP table physical address 0x0000000088a00000 - 0x00000000a8ffffff 92 | [ 18.958687] ccp 0000:22:00.1: SEV firmware update successful 93 | [ 21.081484] ccp 0000:22:00.1: SEV-SNP API:1.51 build:3 94 | [ 21.286378] SEV supported: 255 ASIDs 95 | [ 21.290367] SEV-ES and SEV-SNP supported: 254 ASIDs 96 | ``` 97 | 98 | ### Build troubleshooting 99 | 100 | The most likely source of build errors is missing a tool. Try installing 101 | the following: 102 | 103 | ``` 104 | $ sudo apt install make ninja-build libglib2.0-dev libpixman-1-dev python3 105 | $ sudo apt install nasm iasl flex bison libelf-dev libssl-dev 106 | ``` 107 | 108 | If your error is during OVMF's compilation, you can try getting a verbose 109 | form of the error, running manually with -v. In our case: 110 | 111 | ``` 112 | $ cd ovmf 113 | $ source edksetup.sh 114 | $ nice build -v -q --cmd-len=64436 -DDEBUG_ON_SERIAL_PORT -n 32 -t GCC5 -a X64 -p OvmfPkg/OvmfPkgX64.dsc 115 | ``` 116 | 117 | If your error involves still not finding Python, you may try to replace `python` 118 | with `python3` in the file `BaseTools/Tests/GNUmakefile` of the `ovmf` folder 119 | that you have just cloned. 120 | 121 | ## Installation 122 | 123 | Linux SVSM requires the Rust nightly tool-chain, as well as components that 124 | can be downloaded from rustup. The process can be automated with: 125 | 126 | ``` 127 | # make prereq 128 | ``` 129 | 130 | You can select default installation for rustup. After that, make sure rust-lld 131 | can be found in your PATH. You can edit your ~/.bashrc with: 132 | 133 | ``` 134 | export PATH="/(YOUR PATH)/rustlib/x86_64-unknown-linux-gnu/bin/:$PATH" 135 | ``` 136 | 137 | To build: 138 | 139 | ``` 140 | # make 141 | ``` 142 | 143 | To build with serial output progress information, for debugging: 144 | 145 | ``` 146 | # make FEATURES=verbose 147 | ``` 148 | 149 | You should NEVER have to specify the cargo target, as we have 150 | .cargo/config.toml. The Makefile includes a basic clean target. To 151 | force prerequisites re-installation on the next execution of make do: 152 | 153 | ``` 154 | # make superclean 155 | ``` 156 | 157 | To run the unit tests: 158 | 159 | ``` 160 | # make test 161 | ``` 162 | 163 | ## Running Linux SVSM 164 | 165 | The building process will generate svsm.bin that can be passed to Qemu (svsm 166 | parameter). Inside directory scripts/ we provide launch-qemu.sh to ease the 167 | execution of the Qemu virtual machine. First, we need an empty virtual disk 168 | image and distribution (in our example, Ubuntu): 169 | 170 | ``` 171 | # qemu-img create -f qcow2 guest.qcow2 30G 172 | # wget ubuntu.iso 173 | ``` 174 | 175 | Once we have an image prepared, we can boot with the command below. In the 176 | Grub option of installation, you can edit the linux kernel command adding 177 | 'console=tty0 console=ttyS0,115200n8' and then Ctr+X. 178 | 179 | ``` 180 | # ./launch-qemu.sh -hda guest.qcow2 -cdrom ubuntu.iso 181 | ``` 182 | 183 | after that, we can simply boot and install the kernel \*.debs/\*.rpms from 184 | within the guest VM. 185 | 186 | ``` 187 | [host@snp-host ~]# ./launch-qemu.sh -hda guest.qcow2 188 | [guest@snp-guest ~]# scp host@ip://scripts/linux/*guest*.deb . 189 | [guest@snp-guest ~]# chmod +x *.deb && dpkg -i *.deb 190 | [guest@snp-guest ~]# reboot 191 | ``` 192 | 193 | Finally, we will have to execute the script again, this time providing the 194 | SVSM binary. Once the SVSM guest is up, you can check it is running on 195 | VMPL1 (lower privilege level) with: 196 | 197 | ``` 198 | [host@snp-host ~]# ./launch-qemu.sh -hda guest.qcow2 -sev-snp -svsm svsm.bin 199 | [guest@snp-guest ~]# dmesg | grep VMPL 200 | [ 1.264552] SEV: SNP running at VMPL1. 201 | ``` 202 | 203 | Note: The launch-qemu.sh script was updated to support the newer UPM-based 204 | SEV-SNP support. If you are running on an older SEV-SNP host kernel that 205 | doesn't support UPM, please add the -noupm parameter to the launch command: 206 | 207 | ``` 208 | [host@snp-host ~]# ./launch-qemu.sh -hda guest.qcow2 -sev-snp -svsm svsm.bin -noupm 209 | ``` 210 | 211 | By default, SVSM lives at 512 GB (SVSM\_GPA), and has 256 MB of memory 212 | (SVSM\_MEM). This can be changed at compilation. For example: 213 | 214 | ``` 215 | # make SVSM_GPA=0x90000000 SVSM_MEM=0x20000000 216 | ``` 217 | 218 | The SVSM page table applies an offset to its virtual addresses. 219 | 220 | ## Linux SVSM userspace 221 | 222 | Linux SVSM's main branch does not contain support for userspace (CPL3). 223 | If interested, checkout branch cpl-support. 224 | 225 | ## Contribution 226 | 227 | Please read CONTRIBUTING.md for instructions on contribution and style. 228 | 229 | ## Authors and License 230 | 231 | The original authors and maintainers of this software are: 232 | 233 | - [Thomas Lendacky](https://github.com/tlendacky) 234 | - [Carlos Bilbao](https://github.com/Zildj1an) 235 | 236 | and they will act as reviewers for future contributions. 237 | 238 | Other developers have made substantial contributions to this project, to 239 | obtain the full list, please refer to the [Contributors](https://github.com/AMDESE/linux-svsm/graphs/contributors) 240 | page or review the authorship information in the project's source code 241 | headers. 242 | 243 | Linux SVSM is distributed under the MIT license. For more information, read 244 | file LICENSE. To obtain information about the crates that Linux SVSM 245 | depends on, you can run: 246 | 247 | ``` 248 | $./scripts/crates.sh 249 | ``` 250 | -------------------------------------------------------------------------------- /external/libcrt/Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: MIT 2 | 3 | CFLAGS = -I./include -O3 -nostdinc -nostdlib 4 | CFLAGS += -m64 -march=x86-64 -mno-sse2 -fPIE 5 | CFLAGS += -fno-stack-protector 6 | CFLAGS += -ffreestanding 7 | 8 | CC := gcc 9 | 10 | PREFIX := /usr/local/ 11 | 12 | # Functions we stub out 13 | OBJS := src/stub.o 14 | 15 | # ctype 16 | OBJS += $(addprefix src/ctype/, \ 17 | isdigit.o \ 18 | islower.o \ 19 | isspace.o \ 20 | isupper.o \ 21 | tolower.o \ 22 | toupper.o \ 23 | ) 24 | # exit 25 | OBJS += src/exit/assert.o 26 | 27 | # prng 28 | OBJS += src/prng/rand.o 29 | 30 | # setjmp 31 | OBJS += $(addprefix src/setjmp/x86_64/, \ 32 | longjmp.o \ 33 | setjmp.o \ 34 | ) 35 | # stdio 36 | OBJS += $(addprefix src/stdio/, \ 37 | asprintf.o \ 38 | fprintf.o \ 39 | printf.o \ 40 | printf_wrapper.o \ 41 | snprintf.o \ 42 | sprintf.o \ 43 | vasprintf.o \ 44 | vsnprintf.o \ 45 | vsprintf.o \ 46 | ) 47 | # stdlib 48 | OBJS += $(addprefix src/stdlib/, \ 49 | atoi.o \ 50 | qsort.o \ 51 | qsort_nr.o \ 52 | ) 53 | # string 54 | OBJS += $(addprefix src/string/, \ 55 | memchr.o \ 56 | memcmp.o \ 57 | memcpy.o \ 58 | memmove.o \ 59 | memrchr.o \ 60 | memset.o \ 61 | stpcpy.o \ 62 | stpncpy.o \ 63 | strcasecmp.o \ 64 | strcat.o \ 65 | strchr.o \ 66 | strchrnul.o \ 67 | strcmp.o \ 68 | strcspn.o \ 69 | strcpy.o \ 70 | strdup.o \ 71 | strlen.o \ 72 | strncasecmp.o \ 73 | strncat.o \ 74 | strncmp.o \ 75 | strncpy.o \ 76 | strrchr.o \ 77 | strspn.o \ 78 | strstr.o \ 79 | ) 80 | # time 81 | OBJS += $(addprefix src/time/, \ 82 | __secs_to_tm.o \ 83 | gmtime_r.o \ 84 | time.o \ 85 | ) 86 | 87 | all: libcrt.a 88 | 89 | libcrt.a: $(OBJS) 90 | ar rcs $@ $(OBJS) 91 | 92 | %.o : %.c 93 | $(CC) $(CFLAGS) -c -o $@ $< 94 | 95 | %.o : %.s 96 | $(CC) $(CFLAGS) -c -o $@ $< 97 | 98 | clean: 99 | rm -f libcrt.a 100 | rm -f $(OBJS) 101 | -------------------------------------------------------------------------------- /external/libcrt/README.md: -------------------------------------------------------------------------------- 1 | # C Run-Time (CRT) Library 2 | 3 | The libcrt is a subset of the [musl libc](https://musl.libc.org/). It provides 4 | only the libc functions required to build the SVSM external dependencies. 5 | 6 | # Code organization 7 | 8 | The `libcrt.h` header centralizes all definitions, the other header files are 9 | just a proxy for the `libcrt.h`. Hence, when we include a header in a source 10 | file, we are actually including the entire `libcrt.h`. That allow us to build 11 | openssl without having to patch it to include missing headers. 12 | 13 | In order to build the SVSM dependencies, some functions are required to be 14 | defined at build time, however, not all of them are executed at runtime. For 15 | those cases, we just stub out the function by printing a message and returning 16 | an error. For easy tracking, all the function we stub out can be found in 17 | `src/stub.c`. 18 | -------------------------------------------------------------------------------- /external/libcrt/include/assert.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/atomic.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #ifndef _ATOMIC_H 4 | #define _ATOMIC_H 5 | 6 | #include 7 | 8 | #include 9 | 10 | #ifdef a_ll 11 | 12 | #ifndef a_pre_llsc 13 | #define a_pre_llsc() 14 | #endif 15 | 16 | #ifndef a_post_llsc 17 | #define a_post_llsc() 18 | #endif 19 | 20 | #ifndef a_cas 21 | #define a_cas a_cas 22 | static inline int a_cas(volatile int *p, int t, int s) 23 | { 24 | int old; 25 | a_pre_llsc(); 26 | do old = a_ll(p); 27 | while (old==t && !a_sc(p, s)); 28 | a_post_llsc(); 29 | return old; 30 | } 31 | #endif 32 | 33 | #ifndef a_swap 34 | #define a_swap a_swap 35 | static inline int a_swap(volatile int *p, int v) 36 | { 37 | int old; 38 | a_pre_llsc(); 39 | do old = a_ll(p); 40 | while (!a_sc(p, v)); 41 | a_post_llsc(); 42 | return old; 43 | } 44 | #endif 45 | 46 | #ifndef a_fetch_add 47 | #define a_fetch_add a_fetch_add 48 | static inline int a_fetch_add(volatile int *p, int v) 49 | { 50 | int old; 51 | a_pre_llsc(); 52 | do old = a_ll(p); 53 | while (!a_sc(p, (unsigned)old + v)); 54 | a_post_llsc(); 55 | return old; 56 | } 57 | #endif 58 | 59 | #ifndef a_fetch_and 60 | #define a_fetch_and a_fetch_and 61 | static inline int a_fetch_and(volatile int *p, int v) 62 | { 63 | int old; 64 | a_pre_llsc(); 65 | do old = a_ll(p); 66 | while (!a_sc(p, old & v)); 67 | a_post_llsc(); 68 | return old; 69 | } 70 | #endif 71 | 72 | #ifndef a_fetch_or 73 | #define a_fetch_or a_fetch_or 74 | static inline int a_fetch_or(volatile int *p, int v) 75 | { 76 | int old; 77 | a_pre_llsc(); 78 | do old = a_ll(p); 79 | while (!a_sc(p, old | v)); 80 | a_post_llsc(); 81 | return old; 82 | } 83 | #endif 84 | 85 | #endif 86 | 87 | #ifdef a_ll_p 88 | 89 | #ifndef a_cas_p 90 | #define a_cas_p a_cas_p 91 | static inline void *a_cas_p(volatile void *p, void *t, void *s) 92 | { 93 | void *old; 94 | a_pre_llsc(); 95 | do old = a_ll_p(p); 96 | while (old==t && !a_sc_p(p, s)); 97 | a_post_llsc(); 98 | return old; 99 | } 100 | #endif 101 | 102 | #endif 103 | 104 | #ifndef a_cas 105 | #error missing definition of a_cas 106 | #endif 107 | 108 | #ifndef a_swap 109 | #define a_swap a_swap 110 | static inline int a_swap(volatile int *p, int v) 111 | { 112 | int old; 113 | do old = *p; 114 | while (a_cas(p, old, v) != old); 115 | return old; 116 | } 117 | #endif 118 | 119 | #ifndef a_fetch_add 120 | #define a_fetch_add a_fetch_add 121 | static inline int a_fetch_add(volatile int *p, int v) 122 | { 123 | int old; 124 | do old = *p; 125 | while (a_cas(p, old, (unsigned)old+v) != old); 126 | return old; 127 | } 128 | #endif 129 | 130 | #ifndef a_fetch_and 131 | #define a_fetch_and a_fetch_and 132 | static inline int a_fetch_and(volatile int *p, int v) 133 | { 134 | int old; 135 | do old = *p; 136 | while (a_cas(p, old, old&v) != old); 137 | return old; 138 | } 139 | #endif 140 | #ifndef a_fetch_or 141 | #define a_fetch_or a_fetch_or 142 | static inline int a_fetch_or(volatile int *p, int v) 143 | { 144 | int old; 145 | do old = *p; 146 | while (a_cas(p, old, old|v) != old); 147 | return old; 148 | } 149 | #endif 150 | 151 | #ifndef a_and 152 | #define a_and a_and 153 | static inline void a_and(volatile int *p, int v) 154 | { 155 | a_fetch_and(p, v); 156 | } 157 | #endif 158 | 159 | #ifndef a_or 160 | #define a_or a_or 161 | static inline void a_or(volatile int *p, int v) 162 | { 163 | a_fetch_or(p, v); 164 | } 165 | #endif 166 | 167 | #ifndef a_inc 168 | #define a_inc a_inc 169 | static inline void a_inc(volatile int *p) 170 | { 171 | a_fetch_add(p, 1); 172 | } 173 | #endif 174 | 175 | #ifndef a_dec 176 | #define a_dec a_dec 177 | static inline void a_dec(volatile int *p) 178 | { 179 | a_fetch_add(p, -1); 180 | } 181 | #endif 182 | 183 | #ifndef a_store 184 | #define a_store a_store 185 | static inline void a_store(volatile int *p, int v) 186 | { 187 | #ifdef a_barrier 188 | a_barrier(); 189 | *p = v; 190 | a_barrier(); 191 | #else 192 | a_swap(p, v); 193 | #endif 194 | } 195 | #endif 196 | 197 | #ifndef a_barrier 198 | #define a_barrier a_barrier 199 | static void a_barrier() 200 | { 201 | volatile int tmp = 0; 202 | a_cas(&tmp, 0, 0); 203 | } 204 | #endif 205 | 206 | #ifndef a_spin 207 | #define a_spin a_barrier 208 | #endif 209 | 210 | #ifndef a_and_64 211 | #define a_and_64 a_and_64 212 | static inline void a_and_64(volatile uint64_t *p, uint64_t v) 213 | { 214 | union { uint64_t v; uint32_t r[2]; } u = { v }; 215 | if (u.r[0]+1) a_and((int *)p, u.r[0]); 216 | if (u.r[1]+1) a_and((int *)p+1, u.r[1]); 217 | } 218 | #endif 219 | 220 | #ifndef a_or_64 221 | #define a_or_64 a_or_64 222 | static inline void a_or_64(volatile uint64_t *p, uint64_t v) 223 | { 224 | union { uint64_t v; uint32_t r[2]; } u = { v }; 225 | if (u.r[0]) a_or((int *)p, u.r[0]); 226 | if (u.r[1]) a_or((int *)p+1, u.r[1]); 227 | } 228 | #endif 229 | 230 | #ifndef a_cas_p 231 | typedef char a_cas_p_undefined_but_pointer_not_32bit[-sizeof(char) == 0xffffffff ? 1 : -1]; 232 | #define a_cas_p a_cas_p 233 | static inline void *a_cas_p(volatile void *p, void *t, void *s) 234 | { 235 | return (void *)a_cas((volatile int *)p, (int)t, (int)s); 236 | } 237 | #endif 238 | 239 | #ifndef a_or_l 240 | #define a_or_l a_or_l 241 | static inline void a_or_l(volatile void *p, long v) 242 | { 243 | if (sizeof(long) == sizeof(int)) a_or(p, v); 244 | else a_or_64(p, v); 245 | } 246 | #endif 247 | 248 | #ifndef a_crash 249 | #define a_crash a_crash 250 | static inline void a_crash() 251 | { 252 | *(volatile char *)0=0; 253 | } 254 | #endif 255 | 256 | #ifndef a_ctz_32 257 | #define a_ctz_32 a_ctz_32 258 | static inline int a_ctz_32(uint32_t x) 259 | { 260 | #ifdef a_clz_32 261 | return 31-a_clz_32(x&-x); 262 | #else 263 | static const char debruijn32[32] = { 264 | 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, 265 | 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 266 | }; 267 | return debruijn32[(x&-x)*0x076be629 >> 27]; 268 | #endif 269 | } 270 | #endif 271 | 272 | #ifndef a_ctz_64 273 | #define a_ctz_64 a_ctz_64 274 | static inline int a_ctz_64(uint64_t x) 275 | { 276 | static const char debruijn64[64] = { 277 | 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 278 | 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 279 | 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 280 | 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 281 | }; 282 | if (sizeof(long) < 8) { 283 | uint32_t y = x; 284 | if (!y) { 285 | y = x>>32; 286 | return 32 + a_ctz_32(y); 287 | } 288 | return a_ctz_32(y); 289 | } 290 | return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58]; 291 | } 292 | #endif 293 | 294 | static inline int a_ctz_l(unsigned long x) 295 | { 296 | return (sizeof(long) < 8) ? a_ctz_32(x) : a_ctz_64(x); 297 | } 298 | 299 | #ifndef a_clz_64 300 | #define a_clz_64 a_clz_64 301 | static inline int a_clz_64(uint64_t x) 302 | { 303 | #ifdef a_clz_32 304 | if (x>>32) 305 | return a_clz_32(x>>32); 306 | return a_clz_32(x) + 32; 307 | #else 308 | uint32_t y; 309 | int r; 310 | if (x>>32) y=x>>32, r=0; else y=x, r=32; 311 | if (y>>16) y>>=16; else r |= 16; 312 | if (y>>8) y>>=8; else r |= 8; 313 | if (y>>4) y>>=4; else r |= 4; 314 | if (y>>2) y>>=2; else r |= 2; 315 | return r | !(y>>1); 316 | #endif 317 | } 318 | #endif 319 | 320 | #ifndef a_clz_32 321 | #define a_clz_32 a_clz_32 322 | static inline int a_clz_32(uint32_t x) 323 | { 324 | x >>= 1; 325 | x |= x >> 1; 326 | x |= x >> 2; 327 | x |= x >> 4; 328 | x |= x >> 8; 329 | x |= x >> 16; 330 | x++; 331 | return 31-a_ctz_32(x); 332 | } 333 | #endif 334 | 335 | #endif 336 | -------------------------------------------------------------------------------- /external/libcrt/include/bits/atomic_arch.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #define a_cas a_cas 4 | static inline int a_cas(volatile int *p, int t, int s) 5 | { 6 | __asm__ __volatile__ ( 7 | "lock ; cmpxchg %3, %1" 8 | : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" ); 9 | return t; 10 | } 11 | 12 | #define a_cas_p a_cas_p 13 | static inline void *a_cas_p(volatile void *p, void *t, void *s) 14 | { 15 | __asm__( "lock ; cmpxchg %3, %1" 16 | : "=a"(t), "=m"(*(void *volatile *)p) 17 | : "a"(t), "r"(s) : "memory" ); 18 | return t; 19 | } 20 | 21 | #define a_swap a_swap 22 | static inline int a_swap(volatile int *p, int v) 23 | { 24 | __asm__ __volatile__( 25 | "xchg %0, %1" 26 | : "=r"(v), "=m"(*p) : "0"(v) : "memory" ); 27 | return v; 28 | } 29 | 30 | #define a_fetch_add a_fetch_add 31 | static inline int a_fetch_add(volatile int *p, int v) 32 | { 33 | __asm__ __volatile__( 34 | "lock ; xadd %0, %1" 35 | : "=r"(v), "=m"(*p) : "0"(v) : "memory" ); 36 | return v; 37 | } 38 | 39 | #define a_and a_and 40 | static inline void a_and(volatile int *p, int v) 41 | { 42 | __asm__ __volatile__( 43 | "lock ; and %1, %0" 44 | : "=m"(*p) : "r"(v) : "memory" ); 45 | } 46 | 47 | #define a_or a_or 48 | static inline void a_or(volatile int *p, int v) 49 | { 50 | __asm__ __volatile__( 51 | "lock ; or %1, %0" 52 | : "=m"(*p) : "r"(v) : "memory" ); 53 | } 54 | 55 | #define a_and_64 a_and_64 56 | static inline void a_and_64(volatile uint64_t *p, uint64_t v) 57 | { 58 | __asm__ __volatile( 59 | "lock ; and %1, %0" 60 | : "=m"(*p) : "r"(v) : "memory" ); 61 | } 62 | 63 | #define a_or_64 a_or_64 64 | static inline void a_or_64(volatile uint64_t *p, uint64_t v) 65 | { 66 | __asm__ __volatile__( 67 | "lock ; or %1, %0" 68 | : "=m"(*p) : "r"(v) : "memory" ); 69 | } 70 | 71 | #define a_inc a_inc 72 | static inline void a_inc(volatile int *p) 73 | { 74 | __asm__ __volatile__( 75 | "lock ; incl %0" 76 | : "=m"(*p) : "m"(*p) : "memory" ); 77 | } 78 | 79 | #define a_dec a_dec 80 | static inline void a_dec(volatile int *p) 81 | { 82 | __asm__ __volatile__( 83 | "lock ; decl %0" 84 | : "=m"(*p) : "m"(*p) : "memory" ); 85 | } 86 | 87 | #define a_store a_store 88 | static inline void a_store(volatile int *p, int x) 89 | { 90 | __asm__ __volatile__( 91 | "mov %1, %0 ; lock ; orl $0,(%%rsp)" 92 | : "=m"(*p) : "r"(x) : "memory" ); 93 | } 94 | 95 | #define a_barrier a_barrier 96 | static inline void a_barrier() 97 | { 98 | __asm__ __volatile__( "" : : : "memory" ); 99 | } 100 | 101 | #define a_spin a_spin 102 | static inline void a_spin() 103 | { 104 | __asm__ __volatile__( "pause" : : : "memory" ); 105 | } 106 | 107 | #define a_crash a_crash 108 | static inline void a_crash() 109 | { 110 | __asm__ __volatile__( "hlt" : : : "memory" ); 111 | } 112 | 113 | #define a_ctz_64 a_ctz_64 114 | static inline int a_ctz_64(uint64_t x) 115 | { 116 | __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) ); 117 | return x; 118 | } 119 | 120 | #define a_clz_64 a_clz_64 121 | static inline int a_clz_64(uint64_t x) 122 | { 123 | __asm__( "bsr %1,%0 ; xor $63,%0" : "=r"(x) : "r"(x) ); 124 | return x; 125 | } 126 | -------------------------------------------------------------------------------- /external/libcrt/include/ctype.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/dirent.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/endian.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/errno.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/fcntl.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/inttypes.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/limits.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/memory.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/stdarg.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/stdatomic.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/stdbool.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/stddef.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/stdint.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/stdio.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/stdlib.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/string.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/strings.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/sys/shm.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/sys/stat.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/sys/syscall.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/sys/time.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/sys/types.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/sys/utsname.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/time.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/include/unistd.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | -------------------------------------------------------------------------------- /external/libcrt/src/ctype/isascii.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int isascii(int c) 6 | { 7 | return !(c&~0x7f); 8 | } 9 | -------------------------------------------------------------------------------- /external/libcrt/src/ctype/isdigit.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int isdigit(int c) 4 | { 5 | return (unsigned)c-'0' < 10; 6 | } 7 | -------------------------------------------------------------------------------- /external/libcrt/src/ctype/islower.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int islower(int c) 6 | { 7 | return (unsigned)c-'a' < 26; 8 | } 9 | -------------------------------------------------------------------------------- /external/libcrt/src/ctype/isspace.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int isspace(int c) 6 | { 7 | return c == ' ' || (unsigned)c-'\t' < 5; 8 | } 9 | -------------------------------------------------------------------------------- /external/libcrt/src/ctype/isupper.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int isupper(int c) 6 | { 7 | return (unsigned)c-'A' < 26; 8 | } 9 | -------------------------------------------------------------------------------- /external/libcrt/src/ctype/tolower.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int tolower(int c) 6 | { 7 | if (isupper(c)) return c | 32; 8 | return c; 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/ctype/toupper.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int toupper(int c) 6 | { 7 | if (islower(c)) return c & 0x5f; 8 | return c; 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/exit/assert.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | void __assert_fail(const char *expr, const char *file, int line, const char *func) 7 | { 8 | printf("Assertion failed: %s (%s: %s: %d)\n", expr, file, func, line); 9 | abort(); 10 | } 11 | -------------------------------------------------------------------------------- /external/libcrt/src/prng/rand.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | static uint64_t seed; 8 | 9 | void srand(unsigned s) 10 | { 11 | seed = s - 1; 12 | } 13 | 14 | /* return 0 on success */ 15 | static inline int rdrand64(uint64_t *rnd) 16 | { 17 | unsigned char ok; 18 | 19 | __asm__ volatile("rdrand %0; setc %1":"=r"(*rnd), "=qm"(ok)); 20 | 21 | return (ok) ? 0 : -1; 22 | } 23 | 24 | int rand(void) 25 | { 26 | uint64_t r = 0; 27 | 28 | if (rdrand64(&r)) { 29 | printf("%s, RDRAND failed\n", __func__); 30 | } 31 | 32 | return r; 33 | } 34 | -------------------------------------------------------------------------------- /external/libcrt/src/setjmp/x86_64/longjmp.s: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* Copyright 2011-2012 Nicholas J. Kain, licensed under standard MIT license */ 3 | 4 | .global _longjmp 5 | .global longjmp 6 | .type _longjmp,@function 7 | .type longjmp,@function 8 | _longjmp: 9 | longjmp: 10 | xor %eax,%eax 11 | cmp $1,%esi /* CF = val ? 0 : 1 */ 12 | adc %esi,%eax /* eax = val + !val */ 13 | mov (%rdi),%rbx /* rdi is the jmp_buf, restore regs from it */ 14 | mov 8(%rdi),%rbp 15 | mov 16(%rdi),%r12 16 | mov 24(%rdi),%r13 17 | mov 32(%rdi),%r14 18 | mov 40(%rdi),%r15 19 | mov 48(%rdi),%rsp 20 | jmp *56(%rdi) /* goto saved address without altering rsp */ 21 | -------------------------------------------------------------------------------- /external/libcrt/src/setjmp/x86_64/setjmp.s: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* Copyright 2011-2012 Nicholas J. Kain, licensed under standard MIT license */ 3 | 4 | .global __setjmp 5 | .global _setjmp 6 | .global setjmp 7 | .type __setjmp,@function 8 | .type _setjmp,@function 9 | .type setjmp,@function 10 | __setjmp: 11 | _setjmp: 12 | setjmp: 13 | mov %rbx,(%rdi) /* rdi is jmp_buf, move registers onto it */ 14 | mov %rbp,8(%rdi) 15 | mov %r12,16(%rdi) 16 | mov %r13,24(%rdi) 17 | mov %r14,32(%rdi) 18 | mov %r15,40(%rdi) 19 | lea 8(%rsp),%rdx /* this is our rsp WITHOUT current ret addr */ 20 | mov %rdx,48(%rdi) 21 | mov (%rsp),%rdx /* save return addr ptr for new rip */ 22 | mov %rdx,56(%rdi) 23 | xor %eax,%eax /* always return 0 */ 24 | ret 25 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/asprintf.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int asprintf(char **s, const char *fmt, ...) 7 | { 8 | int ret; 9 | va_list ap; 10 | va_start(ap, fmt); 11 | ret = vasprintf(s, fmt, ap); 12 | va_end(ap); 13 | return ret; 14 | } 15 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/fprintf.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int fprintf(FILE *restrict f, const char *restrict fmt, ...) 7 | { 8 | int ret; 9 | va_list ap; 10 | va_start(ap, fmt); 11 | ret = vprints(fmt, ap); 12 | va_end(ap); 13 | return ret; 14 | } 15 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/printf.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int printf(const char *restrict fmt, ...) 7 | { 8 | int ret; 9 | va_list ap; 10 | va_start(ap, fmt); 11 | ret = vprints(fmt, ap); 12 | va_end(ap); 13 | return ret; 14 | } 15 | 16 | int dprintf(int fd, const char *__restrict fmt, ...) 17 | { 18 | int ret; 19 | va_list ap; 20 | va_start(ap, fmt); 21 | ret = vprints(fmt, ap); 22 | va_end(ap); 23 | return ret; 24 | } 25 | 26 | int vdprintf(int fd, const char *restrict fmt, va_list ap) 27 | { 28 | return vprints(fmt, ap); 29 | } 30 | 31 | int puts(const char *s) 32 | { 33 | return printf("%s\n", s); 34 | } 35 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/printf_wrapper.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | enum { 8 | LEN_MOD_INVALID = 0, 9 | LEN_MOD_HALF_HALF, 10 | LEN_MOD_HALF, 11 | LEN_MOD_INT, 12 | LEN_MOD_LONG, 13 | LEN_MOD_LONG_LONG, 14 | LEN_MOD_MAX 15 | }; 16 | 17 | static char *format_string(char *cur, char *end, va_list ap) 18 | { 19 | char *tmp = va_arg(ap, char *); 20 | 21 | while (*tmp) { 22 | if (cur < end) 23 | *cur = *tmp; 24 | cur++; 25 | tmp++; 26 | } 27 | 28 | return cur; 29 | } 30 | 31 | static char *format_base16(char *cur, char *end, int width, va_list ap) 32 | { 33 | unsigned long long num; 34 | char *tmp, buffer[32]; 35 | unsigned int size; 36 | char *map; 37 | 38 | map = "0123456789abcdef"; 39 | 40 | if (cur < end) 41 | *cur++ = '0'; 42 | if (cur < end) 43 | *cur++ = 'x'; 44 | 45 | if (width == LEN_MOD_HALF_HALF) { 46 | unsigned char n = (unsigned char) va_arg(ap, int); 47 | num = (unsigned long long)n; 48 | size = 1; 49 | } else if (width == LEN_MOD_HALF) { 50 | unsigned short n = (unsigned short) va_arg(ap, int); 51 | num = (unsigned long long)n; 52 | size = 2; 53 | } else if (width == LEN_MOD_INT) { 54 | unsigned int n = (unsigned int) va_arg(ap, int); 55 | num = (unsigned long long)n; 56 | size = 4; 57 | } else if (width == LEN_MOD_LONG) { 58 | unsigned long n = (unsigned long) va_arg(ap, long); 59 | num = (unsigned long long)n; 60 | size = 8; 61 | } else { 62 | unsigned long long n = (unsigned long long) va_arg(ap, long long); 63 | num = (unsigned long long)n; 64 | size = 8; 65 | } 66 | 67 | tmp = buffer + sizeof(buffer); 68 | 69 | tmp--; 70 | *tmp = '\0'; 71 | while (size--) { 72 | tmp--; 73 | *tmp = map[num & 0xf]; 74 | num >>= 4; 75 | 76 | tmp--; 77 | *tmp = map[num & 0xf]; 78 | num >>= 4; 79 | } 80 | 81 | while (*tmp) { 82 | if (cur < end) 83 | *cur = *tmp; 84 | cur++; 85 | tmp++; 86 | } 87 | 88 | return cur; 89 | } 90 | 91 | static char *format_base10(char *cur, char *end, int width, bool signed_format, va_list ap) 92 | { 93 | unsigned long long num; 94 | char *tmp, buffer[32]; 95 | 96 | if (width == LEN_MOD_HALF_HALF) { 97 | unsigned char n = (unsigned char) va_arg(ap, int); 98 | if (signed_format && (char)n < 0) { 99 | if (cur < end) 100 | *cur++ = '-'; 101 | n = -(char)n; 102 | } 103 | num = (unsigned long long)n; 104 | } else if (width == LEN_MOD_HALF) { 105 | unsigned short n = (unsigned short) va_arg(ap, int); 106 | if (signed_format && (short)n < 0) { 107 | if (cur < end) 108 | *cur++ = '-'; 109 | n = -(short)n; 110 | } 111 | num = (unsigned long long)n; 112 | } else if (width == LEN_MOD_INT) { 113 | unsigned int n = (unsigned int) va_arg(ap, int); 114 | if (signed_format && (int)n < 0) { 115 | if (cur < end) 116 | *cur++ = '-'; 117 | n = -(int)n; 118 | } 119 | num = (unsigned long long)n; 120 | } else if (width == LEN_MOD_LONG) { 121 | unsigned long n = (unsigned long) va_arg(ap, long); 122 | if (signed_format && (long)n < 0) { 123 | if (cur < end) 124 | *cur++ = '-'; 125 | n = -(long)n; 126 | } 127 | num = (unsigned long long)n; 128 | } else { 129 | unsigned long long n = (unsigned long long) va_arg(ap, long long); 130 | if (signed_format && (long long)n < 0) { 131 | if (cur < end) 132 | *cur++ = '-'; 133 | n = -(long long)n; 134 | } 135 | num = (unsigned long long)n; 136 | } 137 | 138 | tmp = buffer + sizeof(buffer); 139 | 140 | tmp--; 141 | *tmp = '\0'; 142 | do { 143 | tmp--; 144 | *tmp = (char)('0' + num % 10); 145 | 146 | num /= 10; 147 | } while (num); 148 | 149 | while (*tmp) { 150 | if (cur < end) 151 | *cur = *tmp; 152 | 153 | cur++; 154 | tmp++; 155 | } 156 | 157 | return cur; 158 | } 159 | 160 | int vsnprints(char *str, size_t size, const char *format, va_list ap) 161 | { 162 | unsigned int width; 163 | char *cur, *end; 164 | bool convert; 165 | 166 | cur = str; 167 | end = cur + size; 168 | if (end < cur) { 169 | end = (void *)-1; 170 | size = end - cur; 171 | } 172 | 173 | convert = false; 174 | while (*format) { 175 | if (!convert) { 176 | if (*format != '%') { 177 | if (cur < end) 178 | *cur = *format; 179 | 180 | cur++; 181 | format++; 182 | continue; 183 | } 184 | 185 | format++; 186 | if (*format == '%') { 187 | if (cur < end) 188 | *cur = '%'; 189 | 190 | cur++; 191 | format++; 192 | continue; 193 | } 194 | 195 | convert = true; 196 | width = LEN_MOD_INT; 197 | } else { 198 | bool signed_format = false; 199 | 200 | switch (*format) { 201 | case 'h': 202 | width--; 203 | if (width == LEN_MOD_INVALID) 204 | convert = false; 205 | break; 206 | case 'l': 207 | width++; 208 | if (width == LEN_MOD_MAX) 209 | convert = false; 210 | break; 211 | 212 | case 'd': 213 | signed_format = true; 214 | case 'u': 215 | cur = format_base10(cur, end, width, signed_format, ap); 216 | convert = false; 217 | break; 218 | 219 | case 'p': 220 | width = LEN_MOD_LONG; 221 | case 'x': 222 | cur = format_base16(cur, end, width, ap); 223 | convert = false; 224 | break; 225 | 226 | case 's': { 227 | cur = format_string(cur, end, ap); 228 | convert = false; 229 | break; 230 | } 231 | 232 | default: 233 | convert = false; 234 | } 235 | 236 | format++; 237 | } 238 | } 239 | 240 | if (size) { 241 | if (cur < end) 242 | *cur = '\0'; 243 | else 244 | *(end - 1) = '\0'; 245 | } 246 | 247 | return cur - str; 248 | } 249 | 250 | // A format string is converted to the actual string, which is then 251 | // truncated to PRINT_STR_MAX before printing. 252 | #define PRINT_STR_MAX 256 253 | 254 | int vprints(const char *format, va_list ap) 255 | { 256 | char buffer[PRINT_STR_MAX]; 257 | int ret; 258 | 259 | ret = vsnprints(buffer, sizeof(buffer), format, ap); 260 | 261 | if (ret >= sizeof(buffer)) { 262 | ret = PRINT_STR_MAX; 263 | buffer[PRINT_STR_MAX - 1] = '\0'; 264 | } 265 | 266 | serial_out(buffer, ret); 267 | 268 | return ret; 269 | } 270 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/snprintf.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int snprintf(char *restrict s, size_t n, const char *restrict fmt, ...) 7 | { 8 | int ret; 9 | va_list ap; 10 | va_start(ap, fmt); 11 | ret = vsnprintf(s, n, fmt, ap); 12 | va_end(ap); 13 | return ret; 14 | } 15 | 16 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/sprintf.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int sprintf(char *restrict s, const char *restrict fmt, ...) 7 | { 8 | int ret; 9 | va_list ap; 10 | va_start(ap, fmt); 11 | ret = vsprintf(s, fmt, ap); 12 | va_end(ap); 13 | return ret; 14 | } 15 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/vasprintf.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | int vasprintf(char **s, const char *fmt, va_list ap) 8 | { 9 | va_list ap2; 10 | va_copy(ap2, ap); 11 | int l = vsnprintf(0, 0, fmt, ap2); 12 | va_end(ap2); 13 | 14 | if (l<0 || !(*s=malloc(l+1U))) return -1; 15 | return vsnprintf(*s, l+1U, fmt, ap); 16 | } 17 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/vsnprintf.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int vsnprintf(char *restrict s, size_t n, const char *restrict fmt, va_list ap) 7 | { 8 | return vsnprints(s, n, fmt, ap); 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/stdio/vsprintf.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int vsprintf(char *restrict s, const char *restrict fmt, va_list ap) 7 | { 8 | return vsnprintf(s, INT_MAX, fmt, ap); 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/stdlib/atoi.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int atoi(const char *s) 7 | { 8 | int n=0, neg=0; 9 | while (isspace(*s)) s++; 10 | switch (*s) { 11 | case '-': neg=1; 12 | case '+': s++; 13 | } 14 | /* Compute n as a negative number to avoid overflow on INT_MIN */ 15 | while (isdigit(*s)) 16 | n = 10*n - (*s++ - '0'); 17 | return neg ? n : -n; 18 | } 19 | -------------------------------------------------------------------------------- /external/libcrt/src/stdlib/qsort.c: -------------------------------------------------------------------------------- 1 | /* Copyright (C) 2011 by Valentin Ochs 2 | * 3 | * Permission is hereby granted, free of charge, to any person obtaining a copy 4 | * of this software and associated documentation files (the "Software"), to 5 | * deal in the Software without restriction, including without limitation the 6 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 | * sell copies of the Software, and to permit persons to whom the Software is 8 | * furnished to do so, subject to the following conditions: 9 | * 10 | * The above copyright notice and this permission notice shall be included in 11 | * all copies or substantial portions of the Software. 12 | * 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 | * IN THE SOFTWARE. 20 | */ 21 | 22 | /* Minor changes by Rich Felker for integration in musl, 2011-04-27. */ 23 | 24 | /* Smoothsort, an adaptive variant of Heapsort. Memory usage: O(1). 25 | Run time: Worst case O(n log n), close to O(n) in the mostly-sorted case. */ 26 | 27 | #define _BSD_SOURCE 28 | #include 29 | #include 30 | #include 31 | 32 | #include "atomic.h" 33 | #define ntz(x) a_ctz_l((x)) 34 | 35 | typedef int (*cmpfun)(const void *, const void *, void *); 36 | 37 | static inline int pntz(size_t p[2]) { 38 | int r = ntz(p[0] - 1); 39 | if(r != 0 || (r = 8*sizeof(size_t) + ntz(p[1])) != 8*sizeof(size_t)) { 40 | return r; 41 | } 42 | return 0; 43 | } 44 | 45 | static void cycle(size_t width, unsigned char* ar[], int n) 46 | { 47 | unsigned char tmp[256]; 48 | size_t l; 49 | int i; 50 | 51 | if(n < 2) { 52 | return; 53 | } 54 | 55 | ar[n] = tmp; 56 | while(width) { 57 | l = sizeof(tmp) < width ? sizeof(tmp) : width; 58 | memcpy(ar[n], ar[0], l); 59 | for(i = 0; i < n; i++) { 60 | memcpy(ar[i], ar[i + 1], l); 61 | ar[i] += l; 62 | } 63 | width -= l; 64 | } 65 | } 66 | 67 | /* shl() and shr() need n > 0 */ 68 | static inline void shl(size_t p[2], int n) 69 | { 70 | if(n >= 8 * sizeof(size_t)) { 71 | n -= 8 * sizeof(size_t); 72 | p[1] = p[0]; 73 | p[0] = 0; 74 | } 75 | p[1] <<= n; 76 | p[1] |= p[0] >> (sizeof(size_t) * 8 - n); 77 | p[0] <<= n; 78 | } 79 | 80 | static inline void shr(size_t p[2], int n) 81 | { 82 | if(n >= 8 * sizeof(size_t)) { 83 | n -= 8 * sizeof(size_t); 84 | p[0] = p[1]; 85 | p[1] = 0; 86 | } 87 | p[0] >>= n; 88 | p[0] |= p[1] << (sizeof(size_t) * 8 - n); 89 | p[1] >>= n; 90 | } 91 | 92 | static void sift(unsigned char *head, size_t width, cmpfun cmp, void *arg, int pshift, size_t lp[]) 93 | { 94 | unsigned char *rt, *lf; 95 | unsigned char *ar[14 * sizeof(size_t) + 1]; 96 | int i = 1; 97 | 98 | ar[0] = head; 99 | while(pshift > 1) { 100 | rt = head - width; 101 | lf = head - width - lp[pshift - 2]; 102 | 103 | if(cmp(ar[0], lf, arg) >= 0 && cmp(ar[0], rt, arg) >= 0) { 104 | break; 105 | } 106 | if(cmp(lf, rt, arg) >= 0) { 107 | ar[i++] = lf; 108 | head = lf; 109 | pshift -= 1; 110 | } else { 111 | ar[i++] = rt; 112 | head = rt; 113 | pshift -= 2; 114 | } 115 | } 116 | cycle(width, ar, i); 117 | } 118 | 119 | static void trinkle(unsigned char *head, size_t width, cmpfun cmp, void *arg, size_t pp[2], int pshift, int trusty, size_t lp[]) 120 | { 121 | unsigned char *stepson, 122 | *rt, *lf; 123 | size_t p[2]; 124 | unsigned char *ar[14 * sizeof(size_t) + 1]; 125 | int i = 1; 126 | int trail; 127 | 128 | p[0] = pp[0]; 129 | p[1] = pp[1]; 130 | 131 | ar[0] = head; 132 | while(p[0] != 1 || p[1] != 0) { 133 | stepson = head - lp[pshift]; 134 | if(cmp(stepson, ar[0], arg) <= 0) { 135 | break; 136 | } 137 | if(!trusty && pshift > 1) { 138 | rt = head - width; 139 | lf = head - width - lp[pshift - 2]; 140 | if(cmp(rt, stepson, arg) >= 0 || cmp(lf, stepson, arg) >= 0) { 141 | break; 142 | } 143 | } 144 | 145 | ar[i++] = stepson; 146 | head = stepson; 147 | trail = pntz(p); 148 | shr(p, trail); 149 | pshift += trail; 150 | trusty = 0; 151 | } 152 | if(!trusty) { 153 | cycle(width, ar, i); 154 | sift(head, width, cmp, arg, pshift, lp); 155 | } 156 | } 157 | 158 | void __qsort_r(void *base, size_t nel, size_t width, cmpfun cmp, void *arg) 159 | { 160 | size_t lp[12*sizeof(size_t)]; 161 | size_t i, size = width * nel; 162 | unsigned char *head, *high; 163 | size_t p[2] = {1, 0}; 164 | int pshift = 1; 165 | int trail; 166 | 167 | if (!size) return; 168 | 169 | head = base; 170 | high = head + size - width; 171 | 172 | /* Precompute Leonardo numbers, scaled by element width */ 173 | for(lp[0]=lp[1]=width, i=2; (lp[i]=lp[i-2]+lp[i-1]+width) < size; i++); 174 | 175 | while(head < high) { 176 | if((p[0] & 3) == 3) { 177 | sift(head, width, cmp, arg, pshift, lp); 178 | shr(p, 2); 179 | pshift += 2; 180 | } else { 181 | if(lp[pshift - 1] >= high - head) { 182 | trinkle(head, width, cmp, arg, p, pshift, 0, lp); 183 | } else { 184 | sift(head, width, cmp, arg, pshift, lp); 185 | } 186 | 187 | if(pshift == 1) { 188 | shl(p, 1); 189 | pshift = 0; 190 | } else { 191 | shl(p, pshift - 1); 192 | pshift = 1; 193 | } 194 | } 195 | 196 | p[0] |= 1; 197 | head += width; 198 | } 199 | 200 | trinkle(head, width, cmp, arg, p, pshift, 0, lp); 201 | 202 | while(pshift != 1 || p[0] != 1 || p[1] != 0) { 203 | if(pshift <= 1) { 204 | trail = pntz(p); 205 | shr(p, trail); 206 | pshift += trail; 207 | } else { 208 | shl(p, 2); 209 | pshift -= 2; 210 | p[0] ^= 7; 211 | shr(p, 1); 212 | trinkle(head - lp[pshift] - width, width, cmp, arg, p, pshift + 1, 1, lp); 213 | shl(p, 1); 214 | p[0] |= 1; 215 | trinkle(head - width, width, cmp, arg, p, pshift, 1, lp); 216 | } 217 | head -= width; 218 | } 219 | } 220 | 221 | weak_alias(__qsort_r, qsort_r); 222 | -------------------------------------------------------------------------------- /external/libcrt/src/stdlib/qsort_nr.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #define _BSD_SOURCE 4 | #include 5 | 6 | typedef int (*cmpfun)(const void *, const void *); 7 | 8 | static int wrapper_cmp(const void *v1, const void *v2, void *cmp) 9 | { 10 | return ((cmpfun)cmp)(v1, v2); 11 | } 12 | 13 | void qsort(void *base, size_t nel, size_t width, cmpfun cmp) 14 | { 15 | __qsort_r(base, nel, width, wrapper_cmp, (void *)cmp); 16 | } 17 | -------------------------------------------------------------------------------- /external/libcrt/src/string/memchr.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #define SS (sizeof(size_t)) 8 | #define ALIGN (sizeof(size_t)-1) 9 | #define ONES ((size_t)-1/UCHAR_MAX) 10 | #define HIGHS (ONES * (UCHAR_MAX/2+1)) 11 | #define HASZERO(x) ((x)-ONES & ~(x) & HIGHS) 12 | 13 | void *memchr(const void *src, int c, size_t n) 14 | { 15 | const unsigned char *s = src; 16 | c = (unsigned char)c; 17 | #ifdef __GNUC__ 18 | for (; ((uintptr_t)s & ALIGN) && n && *s != c; s++, n--); 19 | if (n && *s != c) { 20 | typedef size_t __attribute__((__may_alias__)) word; 21 | const word *w; 22 | size_t k = ONES * c; 23 | for (w = (const void *)s; n>=SS && !HASZERO(*w^k); w++, n-=SS); 24 | s = (const void *)w; 25 | } 26 | #endif 27 | for (; n && *s != c; s++, n--); 28 | return n ? (void *)s : 0; 29 | } 30 | -------------------------------------------------------------------------------- /external/libcrt/src/string/memcmp.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int memcmp(const void *vl, const void *vr, size_t n) 6 | { 7 | const unsigned char *l=vl, *r=vr; 8 | for (; n && *l == *r; n--, l++, r++); 9 | return n ? *l-*r : 0; 10 | } 11 | -------------------------------------------------------------------------------- /external/libcrt/src/string/memcpy.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | void *memcpy(void *restrict dest, const void *restrict src, size_t n) 8 | { 9 | unsigned char *d = dest; 10 | const unsigned char *s = src; 11 | 12 | #ifdef __GNUC__ 13 | 14 | #if __BYTE_ORDER == __LITTLE_ENDIAN 15 | #define LS >> 16 | #define RS << 17 | #else 18 | #define LS << 19 | #define RS >> 20 | #endif 21 | 22 | typedef uint32_t __attribute__((__may_alias__)) u32; 23 | uint32_t w, x; 24 | 25 | for (; (uintptr_t)s % 4 && n; n--) *d++ = *s++; 26 | 27 | if ((uintptr_t)d % 4 == 0) { 28 | for (; n>=16; s+=16, d+=16, n-=16) { 29 | *(u32 *)(d+0) = *(u32 *)(s+0); 30 | *(u32 *)(d+4) = *(u32 *)(s+4); 31 | *(u32 *)(d+8) = *(u32 *)(s+8); 32 | *(u32 *)(d+12) = *(u32 *)(s+12); 33 | } 34 | if (n&8) { 35 | *(u32 *)(d+0) = *(u32 *)(s+0); 36 | *(u32 *)(d+4) = *(u32 *)(s+4); 37 | d += 8; s += 8; 38 | } 39 | if (n&4) { 40 | *(u32 *)(d+0) = *(u32 *)(s+0); 41 | d += 4; s += 4; 42 | } 43 | if (n&2) { 44 | *d++ = *s++; *d++ = *s++; 45 | } 46 | if (n&1) { 47 | *d = *s; 48 | } 49 | return dest; 50 | } 51 | 52 | if (n >= 32) switch ((uintptr_t)d % 4) { 53 | case 1: 54 | w = *(u32 *)s; 55 | *d++ = *s++; 56 | *d++ = *s++; 57 | *d++ = *s++; 58 | n -= 3; 59 | for (; n>=17; s+=16, d+=16, n-=16) { 60 | x = *(u32 *)(s+1); 61 | *(u32 *)(d+0) = (w LS 24) | (x RS 8); 62 | w = *(u32 *)(s+5); 63 | *(u32 *)(d+4) = (x LS 24) | (w RS 8); 64 | x = *(u32 *)(s+9); 65 | *(u32 *)(d+8) = (w LS 24) | (x RS 8); 66 | w = *(u32 *)(s+13); 67 | *(u32 *)(d+12) = (x LS 24) | (w RS 8); 68 | } 69 | break; 70 | case 2: 71 | w = *(u32 *)s; 72 | *d++ = *s++; 73 | *d++ = *s++; 74 | n -= 2; 75 | for (; n>=18; s+=16, d+=16, n-=16) { 76 | x = *(u32 *)(s+2); 77 | *(u32 *)(d+0) = (w LS 16) | (x RS 16); 78 | w = *(u32 *)(s+6); 79 | *(u32 *)(d+4) = (x LS 16) | (w RS 16); 80 | x = *(u32 *)(s+10); 81 | *(u32 *)(d+8) = (w LS 16) | (x RS 16); 82 | w = *(u32 *)(s+14); 83 | *(u32 *)(d+12) = (x LS 16) | (w RS 16); 84 | } 85 | break; 86 | case 3: 87 | w = *(u32 *)s; 88 | *d++ = *s++; 89 | n -= 1; 90 | for (; n>=19; s+=16, d+=16, n-=16) { 91 | x = *(u32 *)(s+3); 92 | *(u32 *)(d+0) = (w LS 8) | (x RS 24); 93 | w = *(u32 *)(s+7); 94 | *(u32 *)(d+4) = (x LS 8) | (w RS 24); 95 | x = *(u32 *)(s+11); 96 | *(u32 *)(d+8) = (w LS 8) | (x RS 24); 97 | w = *(u32 *)(s+15); 98 | *(u32 *)(d+12) = (x LS 8) | (w RS 24); 99 | } 100 | break; 101 | } 102 | if (n&16) { 103 | *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; 104 | *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; 105 | *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; 106 | *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; 107 | } 108 | if (n&8) { 109 | *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; 110 | *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; 111 | } 112 | if (n&4) { 113 | *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; 114 | } 115 | if (n&2) { 116 | *d++ = *s++; *d++ = *s++; 117 | } 118 | if (n&1) { 119 | *d = *s; 120 | } 121 | return dest; 122 | #endif 123 | 124 | for (; n; n--) *d++ = *s++; 125 | return dest; 126 | } 127 | -------------------------------------------------------------------------------- /external/libcrt/src/string/memmove.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | #ifdef __GNUC__ 7 | typedef __attribute__((__may_alias__)) size_t WT; 8 | #define WS (sizeof(WT)) 9 | #endif 10 | 11 | void *memmove(void *dest, const void *src, size_t n) 12 | { 13 | char *d = dest; 14 | const char *s = src; 15 | 16 | if (d==s) return d; 17 | if ((uintptr_t)s-(uintptr_t)d-n <= -2*n) return memcpy(d, s, n); 18 | 19 | if (d=WS; n-=WS, d+=WS, s+=WS) *(WT *)d = *(WT *)s; 27 | } 28 | #endif 29 | for (; n; n--) *d++ = *s++; 30 | } else { 31 | #ifdef __GNUC__ 32 | if ((uintptr_t)s % WS == (uintptr_t)d % WS) { 33 | while ((uintptr_t)(d+n) % WS) { 34 | if (!n--) return dest; 35 | d[n] = s[n]; 36 | } 37 | while (n>=WS) n-=WS, *(WT *)(d+n) = *(WT *)(s+n); 38 | } 39 | #endif 40 | while (n) n--, d[n] = s[n]; 41 | } 42 | 43 | return dest; 44 | } 45 | -------------------------------------------------------------------------------- /external/libcrt/src/string/memrchr.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | void *__memrchr(const void *m, int c, size_t n) 6 | { 7 | const unsigned char *s = m; 8 | c = (unsigned char)c; 9 | while (n--) if (s[n]==c) return (void *)(s+n); 10 | return 0; 11 | } 12 | 13 | weak_alias(__memrchr, memrchr); 14 | -------------------------------------------------------------------------------- /external/libcrt/src/string/memset.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | void *memset(void *dest, int c, size_t n) 7 | { 8 | unsigned char *s = dest; 9 | size_t k; 10 | 11 | /* Fill head and tail with minimal branching. Each 12 | * conditional ensures that all the subsequently used 13 | * offsets are well-defined and in the dest region. */ 14 | 15 | if (!n) return dest; 16 | s[0] = c; 17 | s[n-1] = c; 18 | if (n <= 2) return dest; 19 | s[1] = c; 20 | s[2] = c; 21 | s[n-2] = c; 22 | s[n-3] = c; 23 | if (n <= 6) return dest; 24 | s[3] = c; 25 | s[n-4] = c; 26 | if (n <= 8) return dest; 27 | 28 | /* Advance pointer to align it at a 4-byte boundary, 29 | * and truncate n to a multiple of 4. The previous code 30 | * already took care of any head/tail that get cut off 31 | * by the alignment. */ 32 | 33 | k = -(uintptr_t)s & 3; 34 | s += k; 35 | n -= k; 36 | n &= -4; 37 | 38 | #ifdef __GNUC__ 39 | typedef uint32_t __attribute__((__may_alias__)) u32; 40 | typedef uint64_t __attribute__((__may_alias__)) u64; 41 | 42 | u32 c32 = ((u32)-1)/255 * (unsigned char)c; 43 | 44 | /* In preparation to copy 32 bytes at a time, aligned on 45 | * an 8-byte bounary, fill head/tail up to 28 bytes each. 46 | * As in the initial byte-based head/tail fill, each 47 | * conditional below ensures that the subsequent offsets 48 | * are valid (e.g. !(n<=24) implies n>=28). */ 49 | 50 | *(u32 *)(s+0) = c32; 51 | *(u32 *)(s+n-4) = c32; 52 | if (n <= 8) return dest; 53 | *(u32 *)(s+4) = c32; 54 | *(u32 *)(s+8) = c32; 55 | *(u32 *)(s+n-12) = c32; 56 | *(u32 *)(s+n-8) = c32; 57 | if (n <= 24) return dest; 58 | *(u32 *)(s+12) = c32; 59 | *(u32 *)(s+16) = c32; 60 | *(u32 *)(s+20) = c32; 61 | *(u32 *)(s+24) = c32; 62 | *(u32 *)(s+n-28) = c32; 63 | *(u32 *)(s+n-24) = c32; 64 | *(u32 *)(s+n-20) = c32; 65 | *(u32 *)(s+n-16) = c32; 66 | 67 | /* Align to a multiple of 8 so we can fill 64 bits at a time, 68 | * and avoid writing the same bytes twice as much as is 69 | * practical without introducing additional branching. */ 70 | 71 | k = 24 + ((uintptr_t)s & 4); 72 | s += k; 73 | n -= k; 74 | 75 | /* If this loop is reached, 28 tail bytes have already been 76 | * filled, so any remainder when n drops below 32 can be 77 | * safely ignored. */ 78 | 79 | u64 c64 = c32 | ((u64)c32 << 32); 80 | for (; n >= 32; n-=32, s+=32) { 81 | *(u64 *)(s+0) = c64; 82 | *(u64 *)(s+8) = c64; 83 | *(u64 *)(s+16) = c64; 84 | *(u64 *)(s+24) = c64; 85 | } 86 | #else 87 | /* Pure C fallback with no aliasing violations. */ 88 | for (; n; n--, s++) *s = c; 89 | #endif 90 | 91 | return dest; 92 | } 93 | -------------------------------------------------------------------------------- /external/libcrt/src/string/stpcpy.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #define ALIGN (sizeof(size_t)) 8 | #define ONES ((size_t)-1/UCHAR_MAX) 9 | #define HIGHS (ONES * (UCHAR_MAX/2+1)) 10 | #define HASZERO(x) ((x)-ONES & ~(x) & HIGHS) 11 | 12 | char *__stpcpy(char *restrict d, const char *restrict s) 13 | { 14 | #ifdef __GNUC__ 15 | typedef size_t __attribute__((__may_alias__)) word; 16 | word *wd; 17 | const word *ws; 18 | if ((uintptr_t)s % ALIGN == (uintptr_t)d % ALIGN) { 19 | for (; (uintptr_t)s % ALIGN; s++, d++) 20 | if (!(*d=*s)) return d; 21 | wd=(void *)d; ws=(const void *)s; 22 | for (; !HASZERO(*ws); *wd++ = *ws++); 23 | d=(void *)wd; s=(const void *)ws; 24 | } 25 | #endif 26 | for (; (*d=*s); s++, d++); 27 | 28 | return d; 29 | } 30 | 31 | weak_alias(__stpcpy, stpcpy); 32 | -------------------------------------------------------------------------------- /external/libcrt/src/string/stpncpy.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #define ALIGN (sizeof(size_t)-1) 8 | #define ONES ((size_t)-1/UCHAR_MAX) 9 | #define HIGHS (ONES * (UCHAR_MAX/2+1)) 10 | #define HASZERO(x) ((x)-ONES & ~(x) & HIGHS) 11 | 12 | char *__stpncpy(char *restrict d, const char *restrict s, size_t n) 13 | { 14 | #ifdef __GNUC__ 15 | typedef size_t __attribute__((__may_alias__)) word; 16 | word *wd; 17 | const word *ws; 18 | if (((uintptr_t)s & ALIGN) == ((uintptr_t)d & ALIGN)) { 19 | for (; ((uintptr_t)s & ALIGN) && n && (*d=*s); n--, s++, d++); 20 | if (!n || !*s) goto tail; 21 | wd=(void *)d; ws=(const void *)s; 22 | for (; n>=sizeof(size_t) && !HASZERO(*ws); 23 | n-=sizeof(size_t), ws++, wd++) *wd = *ws; 24 | d=(void *)wd; s=(const void *)ws; 25 | } 26 | #endif 27 | for (; n && (*d=*s); n--, s++, d++); 28 | tail: 29 | memset(d, 0, n); 30 | return d; 31 | } 32 | 33 | weak_alias(__stpncpy, stpncpy); 34 | 35 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strcasecmp.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int strcasecmp(const char *_l, const char *_r) 7 | { 8 | const unsigned char *l=(void *)_l, *r=(void *)_r; 9 | for (; *l && *r && (*l == *r || tolower(*l) == tolower(*r)); l++, r++); 10 | return tolower(*l) - tolower(*r); 11 | } 12 | 13 | #if 0 14 | int __strcasecmp_l(const char *l, const char *r, locale_t loc) 15 | { 16 | return strcasecmp(l, r); 17 | } 18 | 19 | weak_alias(__strcasecmp_l, strcasecmp_l); 20 | #endif 21 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strcat.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | char *strcat(char *restrict dest, const char *restrict src) 6 | { 7 | strcpy(dest + strlen(dest), src); 8 | return dest; 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strchr.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | char *strchr(const char *s, int c) 6 | { 7 | char *r = __strchrnul(s, c); 8 | return *(unsigned char *)r == (unsigned char)c ? r : 0; 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strchrnul.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #define ALIGN (sizeof(size_t)) 8 | #define ONES ((size_t)-1/UCHAR_MAX) 9 | #define HIGHS (ONES * (UCHAR_MAX/2+1)) 10 | #define HASZERO(x) ((x)-ONES & ~(x) & HIGHS) 11 | 12 | char *__strchrnul(const char *s, int c) 13 | { 14 | c = (unsigned char)c; 15 | if (!c) return (char *)s + strlen(s); 16 | 17 | #ifdef __GNUC__ 18 | typedef size_t __attribute__((__may_alias__)) word; 19 | const word *w; 20 | for (; (uintptr_t)s % ALIGN; s++) 21 | if (!*s || *(unsigned char *)s == c) return (char *)s; 22 | size_t k = ONES * c; 23 | for (w = (void *)s; !HASZERO(*w) && !HASZERO(*w^k); w++); 24 | s = (void *)w; 25 | #endif 26 | for (; *s && *(unsigned char *)s != c; s++); 27 | return (char *)s; 28 | } 29 | 30 | weak_alias(__strchrnul, strchrnul); 31 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strcmp.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int strcmp(const char *l, const char *r) 6 | { 7 | for (; *l==*r && *l; l++, r++); 8 | return *(unsigned char *)l - *(unsigned char *)r; 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strcpy.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | char *strcpy(char *restrict dest, const char *restrict src) 6 | { 7 | __stpcpy(dest, src); 8 | return dest; 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strcspn.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | #define BITOP(a,b,op) \ 6 | ((a)[(size_t)(b)/(8*sizeof *(a))] op (size_t)1<<((size_t)(b)%(8*sizeof *(a)))) 7 | 8 | size_t strcspn(const char *s, const char *c) 9 | { 10 | const char *a = s; 11 | size_t byteset[32/sizeof(size_t)]; 12 | 13 | if (!c[0] || !c[1]) return __strchrnul(s, *c)-a; 14 | 15 | memset(byteset, 0, sizeof byteset); 16 | for (; *c && BITOP(byteset, *(unsigned char *)c, |=); c++); 17 | for (; *s && !BITOP(byteset, *(unsigned char *)s, &); s++); 18 | return s-a; 19 | } 20 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strdup.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | char *strdup(const char *s) 8 | { 9 | size_t l = strlen(s); 10 | char *d = malloc(l+1); 11 | if (!d) return NULL; 12 | return memcpy(d, s, l+1); 13 | } 14 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strlen.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #define ALIGN (sizeof(size_t)) 8 | #define ONES ((size_t)-1/UCHAR_MAX) 9 | #define HIGHS (ONES * (UCHAR_MAX/2+1)) 10 | #define HASZERO(x) ((x)-ONES & ~(x) & HIGHS) 11 | 12 | size_t strlen(const char *s) 13 | { 14 | const char *a = s; 15 | #ifdef __GNUC__ 16 | typedef size_t __attribute__((__may_alias__)) word; 17 | const word *w; 18 | for (; (uintptr_t)s % ALIGN; s++) if (!*s) return s-a; 19 | for (w = (const void *)s; !HASZERO(*w); w++); 20 | s = (const void *)w; 21 | #endif 22 | for (; *s; s++); 23 | return s-a; 24 | } 25 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strncasecmp.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | int strncasecmp(const char *_l, const char *_r, size_t n) 7 | { 8 | const unsigned char *l=(void *)_l, *r=(void *)_r; 9 | if (!n--) return 0; 10 | for (; *l && *r && n && (*l == *r || tolower(*l) == tolower(*r)); l++, r++, n--); 11 | return tolower(*l) - tolower(*r); 12 | } 13 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strncat.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | char *strncat(char *restrict d, const char *restrict s, size_t n) 6 | { 7 | char *a = d; 8 | d += strlen(d); 9 | while (n && *s) n--, *d++ = *s++; 10 | *d++ = 0; 11 | return a; 12 | } 13 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strncmp.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | int strncmp(const char *_l, const char *_r, size_t n) 6 | { 7 | const unsigned char *l=(void *)_l, *r=(void *)_r; 8 | if (!n--) return 0; 9 | for (; *l && *r && n && *l == *r ; l++, r++, n--); 10 | return *l - *r; 11 | } 12 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strncpy.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | char *strncpy(char *restrict d, const char *restrict s, size_t n) 6 | { 7 | __stpncpy(d, s, n); 8 | return d; 9 | } 10 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strrchr.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | char *strrchr(const char *s, int c) 6 | { 7 | return __memrchr(s, c, strlen(s) + 1); 8 | } 9 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strspn.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | #define BITOP(a,b,op) \ 6 | ((a)[(size_t)(b)/(8*sizeof *(a))] op (size_t)1<<((size_t)(b)%(8*sizeof *(a)))) 7 | 8 | size_t strspn(const char *s, const char *c) 9 | { 10 | const char *a = s; 11 | size_t byteset[32/sizeof(size_t)] = { 0 }; 12 | 13 | if (!c[0]) return 0; 14 | if (!c[1]) { 15 | for (; *s == *c; s++); 16 | return s-a; 17 | } 18 | 19 | for (; *c && BITOP(byteset, *(unsigned char *)c, |=); c++); 20 | for (; *s && BITOP(byteset, *(unsigned char *)s, &); s++); 21 | return s-a; 22 | } 23 | -------------------------------------------------------------------------------- /external/libcrt/src/string/strstr.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | static char *twobyte_strstr(const unsigned char *h, const unsigned char *n) 7 | { 8 | uint16_t nw = n[0]<<8 | n[1], hw = h[0]<<8 | h[1]; 9 | for (h++; *h && hw != nw; hw = hw<<8 | *++h); 10 | return *h ? (char *)h-1 : 0; 11 | } 12 | 13 | static char *threebyte_strstr(const unsigned char *h, const unsigned char *n) 14 | { 15 | uint32_t nw = (uint32_t)n[0]<<24 | n[1]<<16 | n[2]<<8; 16 | uint32_t hw = (uint32_t)h[0]<<24 | h[1]<<16 | h[2]<<8; 17 | for (h+=2; *h && hw != nw; hw = (hw|*++h)<<8); 18 | return *h ? (char *)h-2 : 0; 19 | } 20 | 21 | static char *fourbyte_strstr(const unsigned char *h, const unsigned char *n) 22 | { 23 | uint32_t nw = (uint32_t)n[0]<<24 | n[1]<<16 | n[2]<<8 | n[3]; 24 | uint32_t hw = (uint32_t)h[0]<<24 | h[1]<<16 | h[2]<<8 | h[3]; 25 | for (h+=3; *h && hw != nw; hw = hw<<8 | *++h); 26 | return *h ? (char *)h-3 : 0; 27 | } 28 | 29 | #define MAX(a,b) ((a)>(b)?(a):(b)) 30 | #define MIN(a,b) ((a)<(b)?(a):(b)) 31 | 32 | #define BITOP(a,b,op) \ 33 | ((a)[(size_t)(b)/(8*sizeof *(a))] op (size_t)1<<((size_t)(b)%(8*sizeof *(a)))) 34 | 35 | static char *twoway_strstr(const unsigned char *h, const unsigned char *n) 36 | { 37 | const unsigned char *z; 38 | size_t l, ip, jp, k, p, ms, p0, mem, mem0; 39 | size_t byteset[32 / sizeof(size_t)] = { 0 }; 40 | size_t shift[256]; 41 | 42 | /* Computing length of needle and fill shift table */ 43 | for (l=0; n[l] && h[l]; l++) 44 | BITOP(byteset, n[l], |=), shift[n[l]] = l+1; 45 | if (n[l]) return 0; /* hit the end of h */ 46 | 47 | /* Compute maximal suffix */ 48 | ip = -1; jp = 0; k = p = 1; 49 | while (jp+k n[jp+k]) { 56 | jp += k; 57 | k = 1; 58 | p = jp - ip; 59 | } else { 60 | ip = jp++; 61 | k = p = 1; 62 | } 63 | } 64 | ms = ip; 65 | p0 = p; 66 | 67 | /* And with the opposite comparison */ 68 | ip = -1; jp = 0; k = p = 1; 69 | while (jp+k ms+1) ms = ip; 85 | else p = p0; 86 | 87 | /* Periodic needle? */ 88 | if (memcmp(n, n+p, ms+1)) { 89 | mem0 = 0; 90 | p = MAX(ms, l-ms-1) + 1; 91 | } else mem0 = l-p; 92 | mem = 0; 93 | 94 | /* Initialize incremental end-of-haystack pointer */ 95 | z = h; 96 | 97 | /* Search loop */ 98 | for (;;) { 99 | /* Update incremental end-of-haystack pointer */ 100 | if (z-h < l) { 101 | /* Fast estimate for MAX(l,63) */ 102 | size_t grow = l | 63; 103 | const unsigned char *z2 = memchr(z, 0, grow); 104 | if (z2) { 105 | z = z2; 106 | if (z-h < l) return 0; 107 | } else z += grow; 108 | } 109 | 110 | /* Check last byte first; advance by shift on mismatch */ 111 | if (BITOP(byteset, h[l-1], &)) { 112 | k = l-shift[h[l-1]]; 113 | if (k) { 114 | if (k < mem) k = mem; 115 | h += k; 116 | mem = 0; 117 | continue; 118 | } 119 | } else { 120 | h += l; 121 | mem = 0; 122 | continue; 123 | } 124 | 125 | /* Compare right half */ 126 | for (k=MAX(ms+1,mem); n[k] && n[k] == h[k]; k++); 127 | if (n[k]) { 128 | h += k-ms; 129 | mem = 0; 130 | continue; 131 | } 132 | /* Compare left half */ 133 | for (k=ms+1; k>mem && n[k-1] == h[k-1]; k--); 134 | if (k <= mem) return (char *)h; 135 | h += p; 136 | mem = mem0; 137 | } 138 | } 139 | 140 | char *strstr(const char *h, const char *n) 141 | { 142 | /* Return immediately on empty needle */ 143 | if (!n[0]) return (char *)h; 144 | 145 | /* Use faster algorithms for short needles */ 146 | h = strchr(h, *n); 147 | if (!h || !n[1]) return (char *)h; 148 | if (!h[1]) return 0; 149 | if (!n[2]) return twobyte_strstr((void *)h, (void *)n); 150 | if (!h[2]) return 0; 151 | if (!n[3]) return threebyte_strstr((void *)h, (void *)n); 152 | if (!h[3]) return 0; 153 | if (!n[4]) return fourbyte_strstr((void *)h, (void *)n); 154 | 155 | return twoway_strstr((void *)h, (void *)n); 156 | } 157 | -------------------------------------------------------------------------------- /external/libcrt/src/stub.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | 5 | #define NOT_IMPLEMENTED printf("BUG: %s not implemented\n", __func__) 6 | 7 | // errno.h 8 | 9 | int errno = 0; 10 | FILE *stderr = NULL; 11 | FILE *stdin = NULL; 12 | FILE *stdout = NULL; 13 | 14 | char *strerror(int errnum) 15 | { 16 | NOT_IMPLEMENTED; 17 | return NULL; 18 | } 19 | 20 | // stdio.h 21 | 22 | int sscanf(const char *buffer, const char *format, ...) 23 | { 24 | NOT_IMPLEMENTED; 25 | return 0; 26 | } 27 | 28 | size_t fwrite(const void *buffer, size_t size, size_t count, FILE *stream) 29 | { 30 | NOT_IMPLEMENTED; 31 | return 0; 32 | } 33 | 34 | size_t fread(void *b, size_t c, size_t i, FILE *f) 35 | { 36 | NOT_IMPLEMENTED; 37 | return 0; 38 | } 39 | 40 | int fclose(FILE *f) 41 | { 42 | NOT_IMPLEMENTED; 43 | return EOF; 44 | } 45 | 46 | FILE *fopen(const char *c, const char *m) 47 | { 48 | NOT_IMPLEMENTED; 49 | return NULL; 50 | } 51 | 52 | void setbuf(FILE *stream, char *buf) 53 | { 54 | NOT_IMPLEMENTED; 55 | } 56 | 57 | // stdlib.h 58 | 59 | long strtol(const char *nptr, char **endptr, int base) 60 | { 61 | NOT_IMPLEMENTED; 62 | return LONG_MIN; 63 | } 64 | 65 | unsigned long strtoul(const char *nptr, char **endptr, int base) 66 | { 67 | NOT_IMPLEMENTED; 68 | return ULONG_MAX; 69 | } 70 | 71 | char *getenv(const char *varname) 72 | { 73 | NOT_IMPLEMENTED; 74 | return NULL; 75 | } 76 | 77 | int atexit(void (*func)(void)) 78 | { 79 | NOT_IMPLEMENTED; 80 | return -1; 81 | } 82 | 83 | // unistd.h 84 | 85 | static pid_t monotonic_counter; 86 | 87 | pid_t getpid(void) 88 | { 89 | NOT_IMPLEMENTED; 90 | return ++monotonic_counter; 91 | } 92 | 93 | uid_t getuid(void) 94 | { 95 | NOT_IMPLEMENTED; 96 | return 0; 97 | } 98 | 99 | uid_t geteuid(void) 100 | { 101 | NOT_IMPLEMENTED; 102 | return 0; 103 | } 104 | 105 | gid_t getgid(void) 106 | { 107 | NOT_IMPLEMENTED; 108 | return 0; 109 | } 110 | 111 | gid_t getegid(void) 112 | { 113 | NOT_IMPLEMENTED; 114 | return 0; 115 | } 116 | 117 | // dirent.h 118 | 119 | DIR *opendir(const char *name) 120 | { 121 | NOT_IMPLEMENTED; 122 | return NULL; 123 | } 124 | 125 | int closedir(DIR *name) 126 | { 127 | NOT_IMPLEMENTED; 128 | return -1; 129 | } 130 | 131 | struct dirent *readdir(DIR *name) 132 | { 133 | NOT_IMPLEMENTED; 134 | return NULL; 135 | } 136 | 137 | // sys/stat.h 138 | 139 | int stat(const char *__restrict path, struct stat *restrict buf) 140 | { 141 | NOT_IMPLEMENTED; 142 | return -1; 143 | } 144 | -------------------------------------------------------------------------------- /external/libcrt/src/time/__secs_to_tm.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | /* 2000-03-01 (mod 400 year, immediately after feb29 */ 7 | #define LEAPOCH (946684800LL + 86400*(31+29)) 8 | 9 | #define DAYS_PER_400Y (365*400 + 97) 10 | #define DAYS_PER_100Y (365*100 + 24) 11 | #define DAYS_PER_4Y (365*4 + 1) 12 | 13 | int __secs_to_tm(long long t, struct tm *tm) 14 | { 15 | long long days, secs, years; 16 | int remdays, remsecs, remyears; 17 | int qc_cycles, c_cycles, q_cycles; 18 | int months; 19 | int wday, yday, leap; 20 | static const char days_in_month[] = {31,30,31,30,31,31,30,31,30,31,31,29}; 21 | 22 | /* Reject time_t values whose year would overflow int */ 23 | if (t < INT_MIN * 31622400LL || t > INT_MAX * 31622400LL) 24 | return -1; 25 | 26 | secs = t - LEAPOCH; 27 | days = secs / 86400; 28 | remsecs = secs % 86400; 29 | if (remsecs < 0) { 30 | remsecs += 86400; 31 | days--; 32 | } 33 | 34 | wday = (3+days)%7; 35 | if (wday < 0) wday += 7; 36 | 37 | qc_cycles = days / DAYS_PER_400Y; 38 | remdays = days % DAYS_PER_400Y; 39 | if (remdays < 0) { 40 | remdays += DAYS_PER_400Y; 41 | qc_cycles--; 42 | } 43 | 44 | c_cycles = remdays / DAYS_PER_100Y; 45 | if (c_cycles == 4) c_cycles--; 46 | remdays -= c_cycles * DAYS_PER_100Y; 47 | 48 | q_cycles = remdays / DAYS_PER_4Y; 49 | if (q_cycles == 25) q_cycles--; 50 | remdays -= q_cycles * DAYS_PER_4Y; 51 | 52 | remyears = remdays / 365; 53 | if (remyears == 4) remyears--; 54 | remdays -= remyears * 365; 55 | 56 | leap = !remyears && (q_cycles || !c_cycles); 57 | yday = remdays + 31 + 28 + leap; 58 | if (yday >= 365+leap) yday -= 365+leap; 59 | 60 | years = remyears + 4*q_cycles + 100*c_cycles + 400LL*qc_cycles; 61 | 62 | for (months=0; days_in_month[months] <= remdays; months++) 63 | remdays -= days_in_month[months]; 64 | 65 | if (months >= 10) { 66 | months -= 12; 67 | years++; 68 | } 69 | 70 | if (years+100 > INT_MAX || years+100 < INT_MIN) 71 | return -1; 72 | 73 | tm->tm_year = years + 100; 74 | tm->tm_mon = months + 2; 75 | tm->tm_mday = remdays + 1; 76 | tm->tm_wday = wday; 77 | tm->tm_yday = yday; 78 | 79 | tm->tm_hour = remsecs / 3600; 80 | tm->tm_min = remsecs / 60 % 60; 81 | tm->tm_sec = remsecs % 60; 82 | 83 | return 0; 84 | } 85 | -------------------------------------------------------------------------------- /external/libcrt/src/time/gmtime_r.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | 6 | char *__utc = "UTC"; 7 | 8 | struct tm *gmtime_r(const time_t *restrict t, struct tm *restrict tm) 9 | { 10 | if (__secs_to_tm(*t, tm) < 0) { 11 | errno = EOVERFLOW; 12 | return 0; 13 | } 14 | tm->tm_isdst = 0; 15 | tm->tm_gmtoff = 0; 16 | tm->tm_zone = __utc; 17 | return tm; 18 | } 19 | 20 | 21 | struct tm *gmtime(const time_t *t) 22 | { 23 | static struct tm tm; 24 | return gmtime_r(t, &tm); 25 | } 26 | -------------------------------------------------------------------------------- /external/libcrt/src/time/time.c: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | // Calls the rdtsc instruction to read the current value of the processor's 8 | // time-stamp counter (a 64-bit MSR) 9 | clock_t clock(void) { 10 | return rdtsc(); 11 | } 12 | 13 | uint64_t secs; 14 | 15 | int clock_gettime(clockid_t clk_id, struct timespec *tp) { 16 | (void) clk_id; 17 | if (tp) { 18 | tp->tv_nsec = rdtsc(); 19 | tp->tv_sec = secs++; 20 | } 21 | return 0; 22 | } 23 | 24 | // time() returns the time as the number of seconds since the Epoch 25 | time_t time(time_t *tloc) { 26 | (void)tloc; 27 | time_t t = rdtsc(); 28 | return t; 29 | } 30 | 31 | int gettimeofday(struct timeval *restrict tv, void *restrict tz) 32 | { 33 | if (!tv) return 0; 34 | tv->tv_sec = clock(); 35 | tv->tv_usec = 0; 36 | return 0; 37 | } 38 | -------------------------------------------------------------------------------- /external/openssl_svsm.conf: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: MIT 2 | # 3 | # Copyright (C) 2023 IBM Corporation 4 | # 5 | # Authors: Claudio Carvalho 6 | # 7 | 8 | my %targets = ( 9 | "SVSM" => { 10 | inherit_from => [ "BASE_unix" ], 11 | perlasm_scheme => "elf", 12 | CC => "gcc", 13 | CFLAGS => add("-O2 -fPIE -m64 -nostdinc -nostdlib -static -fno-stack-protector"), 14 | bn_ops => "SIXTY_FOUR_BIT_LONG", 15 | lib_cppflags => add("-DL_ENDIAN -DNO_SYSLOG -DOPENSSL_SMALL_FOOTPRINT -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE"), 16 | sys_id => "SVSM" 17 | }, 18 | ); 19 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly" 3 | targets = ["x86_64-unknown-none"] 4 | -------------------------------------------------------------------------------- /scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: MIT 3 | 4 | SCRIPT_DIR="$(dirname $0)" 5 | . ${SCRIPT_DIR}/common.sh 6 | . ${SCRIPT_DIR}/stable-commits 7 | [ -e /etc/os-release ] && . /etc/os-release 8 | 9 | function usage() 10 | { 11 | echo "Usage: $0 [OPTIONS] [COMPONENT]" 12 | echo " where COMPONENT is an individual component to build:" 13 | echo " qemu, ovmf, kernel [host|guest]" 14 | echo " (default is to build all components)" 15 | echo " where OPTIONS are:" 16 | echo " --install PATH Installation path (default $INSTALL_DIR)" 17 | echo " --package Create a tarball containing built components" 18 | echo " -h|--help Usage information" 19 | 20 | exit 1 21 | } 22 | 23 | INSTALL_DIR="`pwd`/usr/local" 24 | 25 | while [ -n "$1" ]; do 26 | case "$1" in 27 | --install) 28 | [ -z "$2" ] && usage 29 | INSTALL_DIR="$2" 30 | shift; shift 31 | ;; 32 | --package) 33 | BUILD_PACKAGE="1" 34 | shift 35 | ;; 36 | -h|--help) 37 | usage 38 | ;; 39 | -*|--*) 40 | echo "Unsupported option: [$1]" 41 | usage 42 | ;; 43 | *) 44 | break 45 | ;; 46 | esac 47 | done 48 | 49 | mkdir -p $INSTALL_DIR 50 | IDIR=$INSTALL_DIR 51 | INSTALL_DIR=$(readlink -e $INSTALL_DIR) 52 | [ -n "$INSTALL_DIR" -a -d "$INSTALL_DIR" ] || { 53 | echo "Installation directory [$IDIR] does not exist, exiting" 54 | exit 1 55 | } 56 | 57 | if [ -z "$1" ]; then 58 | build_install_qemu "$INSTALL_DIR" 59 | build_install_ovmf "$INSTALL_DIR/share/qemu" 60 | build_kernel 61 | else 62 | case "$1" in 63 | qemu) 64 | build_install_qemu "$INSTALL_DIR" 65 | ;; 66 | ovmf) 67 | build_install_ovmf "$INSTALL_DIR/share/qemu" 68 | ;; 69 | kernel) 70 | # additional argument of "host" or "guest" can be added to limit build to that type 71 | build_kernel $2 72 | ;; 73 | esac 74 | fi 75 | 76 | if [[ "$BUILD_PACKAGE" = "1" ]]; then 77 | OUTPUT_DIR="snp-release-`date "+%F"`" 78 | rm -rf $OUTPUT_DIR 79 | mkdir -p $OUTPUT_DIR/linux/guest 80 | mkdir -p $OUTPUT_DIR/linux/host 81 | mkdir -p $OUTPUT_DIR/usr 82 | cp -dpR $INSTALL_DIR $OUTPUT_DIR/usr/ 83 | 84 | if [ "$ID" = "debian" ] || [ "$ID_LIKE" = "debian" ]; then 85 | cp linux/linux-*-guest-*.deb $OUTPUT_DIR/linux/guest -v 86 | cp linux/linux-*-host-*.deb $OUTPUT_DIR/linux/host -v 87 | else 88 | cp kernel-*.rpm $OUTPUT_DIR/linux -v 89 | fi 90 | 91 | cp launch-qemu.sh ${OUTPUT_DIR} -v 92 | cp install.sh ${OUTPUT_DIR} -v 93 | tar zcvf ${OUTPUT_DIR}.tar.gz ${OUTPUT_DIR} 94 | fi 95 | -------------------------------------------------------------------------------- /scripts/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: MIT 3 | 4 | run_cmd() 5 | { 6 | echo "$*" 7 | 8 | eval "$*" || { 9 | echo "ERROR: $*" 10 | exit 1 11 | } 12 | } 13 | 14 | build_kernel() 15 | { 16 | KERNEL_TYPE=$1 17 | 18 | mkdir -p linux 19 | pushd linux >/dev/null 20 | if [ ! -d guest ]; then 21 | run_cmd git clone ${KERNEL_GIT_URL} guest 22 | pushd guest >/dev/null 23 | run_cmd git remote add current ${KERNEL_GIT_URL} 24 | popd >/dev/null 25 | fi 26 | 27 | if [ ! -d host ]; then 28 | # use a copy of guest repo as the host repo 29 | run_cmd cp -r guest host 30 | fi 31 | 32 | for V in guest host; do 33 | # Check if only a "guest" or "host" or kernel build is requested 34 | if [ "$KERNEL_TYPE" != "" ]; then 35 | if [ "$KERNEL_TYPE" != "$V" ]; then 36 | continue 37 | fi 38 | fi 39 | 40 | if [ "${V}" = "guest" ]; then 41 | BRANCH="${KERNEL_GUEST_BRANCH}" 42 | else 43 | BRANCH="${KERNEL_HOST_BRANCH}" 44 | fi 45 | 46 | # Nuke any previously built packages so they don't end up in new tarballs 47 | # when ./build.sh --package is specified 48 | rm -f linux-*-snp-${V}* 49 | 50 | VER="-snp-${V}" 51 | 52 | MAKE="make -C ${V} -j $(getconf _NPROCESSORS_ONLN) LOCALVERSION=" 53 | 54 | run_cmd $MAKE distclean 55 | 56 | pushd ${V} >/dev/null 57 | # If ${KERNEL_GIT_URL} is ever changed, 'current' remote will be out 58 | # of date, so always update the remote URL first 59 | run_cmd git remote set-url current ${KERNEL_GIT_URL} 60 | run_cmd git fetch current 61 | run_cmd git checkout current/${BRANCH} 62 | COMMIT=$(git log --format="%h" -1 HEAD) 63 | 64 | run_cmd "cp /boot/config-$(uname -r) .config" 65 | run_cmd ./scripts/config --set-str LOCALVERSION "$VER-$COMMIT" 66 | run_cmd ./scripts/config --disable LOCALVERSION_AUTO 67 | run_cmd ./scripts/config --enable DEBUG_INFO 68 | run_cmd ./scripts/config --enable DEBUG_INFO_REDUCED 69 | run_cmd ./scripts/config --enable EXPERT 70 | run_cmd ./scripts/config --enable AMD_MEM_ENCRYPT 71 | run_cmd ./scripts/config --disable AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT 72 | run_cmd ./scripts/config --enable KVM_AMD_SEV 73 | run_cmd ./scripts/config --module CRYPTO_DEV_CCP_DD 74 | run_cmd ./scripts/config --disable SYSTEM_TRUSTED_KEYS 75 | run_cmd ./scripts/config --disable SYSTEM_REVOCATION_KEYS 76 | run_cmd ./scripts/config --module SEV_GUEST 77 | run_cmd ./scripts/config --disable IOMMU_DEFAULT_PASSTHROUGH 78 | popd >/dev/null 79 | 80 | yes "" | $MAKE olddefconfig 81 | 82 | # Build 83 | run_cmd $MAKE >/dev/null 84 | 85 | if [ "$ID" = "debian" ] || [ "$ID_LIKE" = "debian" ]; then 86 | run_cmd $MAKE bindeb-pkg 87 | else 88 | run_cmd $MAKE "RPMOPTS='--define \"_rpmdir .\"'" binrpm-pkg 89 | run_cmd mv ${V}/x86_64/*.rpm . 90 | fi 91 | done 92 | popd >/dev/null 93 | } 94 | 95 | build_install_ovmf() 96 | { 97 | DEST="$1" 98 | 99 | GCC_VERSION=$(gcc -v 2>&1 | tail -1 | awk '{print $3}') 100 | GCC_MAJOR=$(echo $GCC_VERSION | awk -F . '{print $1}') 101 | GCC_MINOR=$(echo $GCC_VERSION | awk -F . '{print $2}') 102 | if [ "$GCC_MAJOR" == "4" ]; then 103 | GCCVERS="GCC${GCC_MAJOR}${GCC_MINOR}" 104 | else 105 | GCCVERS="GCC5" 106 | fi 107 | 108 | # captures all the OVMF debug messages on qemu serial log. remove -DDEBUG_ON_SERIAL_PORT to disable it. 109 | BUILD_CMD="nice build -q --cmd-len=64436 -DDEBUG_ON_SERIAL_PORT -n $(getconf _NPROCESSORS_ONLN) ${GCCVERS:+-t $GCCVERS} -a X64 -p OvmfPkg/OvmfPkgX64.dsc" 110 | 111 | [ -d ovmf ] || { 112 | run_cmd git clone --single-branch -b ${OVMF_BRANCH} ${OVMF_GIT_URL} ovmf 113 | 114 | pushd ovmf >/dev/null 115 | run_cmd git submodule update --init --recursive 116 | popd >/dev/null 117 | } 118 | 119 | pushd ovmf >/dev/null 120 | run_cmd make -C BaseTools 121 | . ./edksetup.sh --reconfig 122 | run_cmd $BUILD_CMD 123 | 124 | mkdir -p $DEST 125 | run_cmd cp -f Build/OvmfX64/DEBUG_$GCCVERS/FV/OVMF_CODE.fd $DEST 126 | run_cmd cp -f Build/OvmfX64/DEBUG_$GCCVERS/FV/OVMF_VARS.fd $DEST 127 | popd >/dev/null 128 | } 129 | 130 | build_install_qemu() 131 | { 132 | DEST="$1" 133 | 134 | [ -d qemu ] || run_cmd git clone --single-branch -b ${QEMU_BRANCH} ${QEMU_GIT_URL} qemu 135 | 136 | MAKE="make -j $(getconf _NPROCESSORS_ONLN) LOCALVERSION=" 137 | 138 | pushd qemu >/dev/null 139 | run_cmd ./configure --target-list=x86_64-softmmu --prefix=$DEST --disable-werror 140 | run_cmd $MAKE 141 | run_cmd $MAKE install 142 | popd >/dev/null 143 | } 144 | -------------------------------------------------------------------------------- /scripts/crates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: MIT 3 | # This script helps keep track of crates we depend on 4 | # Author Carlos Bilbao 5 | 6 | CHECK_LICENSE=0 7 | 8 | function usage() 9 | { 10 | echo "$0 prints information of crates included in our dependencies" 11 | echo "Usage: $0 [OPTIONS]" 12 | echo " where OPTIONS are:" 13 | echo " --list Default behavior, list licenses and versions" 14 | echo " --check Return 0 if all licenses have MIT, error otherwise" 15 | echo " -h|--help Usage information" 16 | 17 | exit 1 18 | } 19 | 20 | while [ -n "$1" ] 21 | do 22 | case "$1" in 23 | --check) 24 | CHECK_LICENSE="1" 25 | ;; 26 | -h|--help) 27 | usage 28 | ;; 29 | --list) 30 | ;; 31 | *|-*|--*) 32 | echo "Unsupported option: [$1]" 33 | usage 34 | ;; 35 | esac 36 | shift 37 | done 38 | 39 | if [ "$CHECK_LICENSE" -eq 0 ] 40 | then 41 | SEPARATE="-------------" 42 | printf "%-20s %-20s %-20s\n" "Crate" "Version" "License" 43 | printf "%-20s %-20s %-20s\n" $SEPARATE $SEPARATE $SEPARATE 44 | fi 45 | 46 | # Iterate over each crate 47 | cargo tree --prefix none --format "{p} {l}" | while read CRATE VERSION LICENSE 48 | do 49 | if [ "$CRATE" == "linux_svsm" ] 50 | then 51 | continue 52 | fi 53 | 54 | LICENSE=${LICENSE//(proc-macro) /} 55 | 56 | if [ "$CHECK_LICENSE" -eq 0 ] 57 | then 58 | # Print the name, version and license of the crate 59 | printf "%-20s %-20s %-20s\n" "$CRATE" "$VERSION" "$LICENSE" 60 | 61 | elif [ "$LICENSE" != "${LICENSE//MIT/}" ] 62 | then 63 | exit 1 64 | fi 65 | done 66 | 67 | exit 0 68 | -------------------------------------------------------------------------------- /scripts/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # SPDX-License-Identifier: MIT 3 | 4 | [ -e /etc/os-release ] && . /etc/os-release 5 | 6 | # This will install all the dependent packages for qemu and ovmf to run 7 | if [ "$ID" = "debian" ] || [ "$ID_LIKE" = "debian" ]; then 8 | apt-get -y install qemu ovmf 9 | else 10 | dnf install qemu edk2-ovmf 11 | fi 12 | 13 | if [ "$ID" = "debian" ] || [ "$ID_LIKE" = "debian" ]; then 14 | dpkg -i linux/host/linux-image-*.deb 15 | else 16 | rpm -ivh linux/kernel-*.rpm 17 | fi 18 | 19 | echo 20 | echo "Reboot the host and select the SNP Host kernel" 21 | echo 22 | -------------------------------------------------------------------------------- /scripts/stable-commits: -------------------------------------------------------------------------------- 1 | # SPDX-License-Identifier: MIT 2 | # 3 | # Repos/Branches for SVSM support 4 | # -which also means SNP support. 5 | # 6 | 7 | # Hypervisor commit 8 | KERNEL_GIT_URL="https://github.com/AMDESE/linux.git" 9 | KERNEL_HOST_BRANCH="svsm-preview-hv-v4" 10 | KERNEL_GUEST_BRANCH="svsm-preview-guest-v4" 11 | 12 | # qemu commit 13 | QEMU_GIT_URL="https://github.com/AMDESE/qemu.git" 14 | QEMU_BRANCH="svsm-preview-v4" 15 | 16 | # Guest BIOS (OVMF) 17 | OVMF_GIT_URL="https://github.com/AMDESE/ovmf" 18 | OVMF_BRANCH="svsm-preview-v4" 19 | -------------------------------------------------------------------------------- /src/bios.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::cpu::smp_prepare_bios_vmpl; 10 | use crate::cpu::smp_run_bios_vmpl; 11 | use crate::cpu::vc::*; 12 | use crate::*; 13 | 14 | use core::cmp::min; 15 | use core::mem::size_of; 16 | use core::ptr; 17 | use core::slice; 18 | use uuid::Bytes; 19 | use uuid::{uuid, Uuid}; 20 | use x86_64::{PhysAddr, VirtAddr}; 21 | 22 | /// 2 23 | const BIOS_TABLE_LEN_FIELD: u64 = 2; 24 | /// 32 25 | const BIOS_TABLE_END: u64 = 32; 26 | /// 16 27 | const GUID_SIZE: u64 = 16; 28 | 29 | #[repr(C)] 30 | #[derive(Clone, Copy, Debug)] 31 | struct GuidTable { 32 | begin: u64, 33 | end: u64, 34 | len: u16, 35 | } 36 | 37 | #[allow(dead_code)] 38 | impl GuidTable { 39 | pub const fn new() -> Self { 40 | GuidTable { 41 | begin: 0, 42 | end: 0, 43 | len: 0, 44 | } 45 | } 46 | funcs!(begin, u64); 47 | funcs!(end, u64); 48 | funcs!(len, u16); 49 | } 50 | 51 | struct BiosInfo { 52 | va: u64, 53 | size: u64, 54 | 55 | guid_table: GuidTable, 56 | } 57 | 58 | #[allow(dead_code)] 59 | impl BiosInfo { 60 | pub const fn new(va: VirtAddr, size: u64) -> Self { 61 | let g: GuidTable = GuidTable::new(); 62 | 63 | BiosInfo { 64 | va: va.as_u64(), 65 | size: size, 66 | guid_table: g, 67 | } 68 | } 69 | funcs!(va, u64); 70 | funcs!(size, u64); 71 | } 72 | 73 | #[repr(C, packed)] 74 | #[derive(Clone, Copy, Debug)] 75 | struct SnpMetaData { 76 | signature: u32, 77 | len: u32, 78 | version: u32, 79 | section_count: u32, 80 | } 81 | 82 | #[allow(dead_code)] 83 | impl SnpMetaData { 84 | funcs!(signature, u32); 85 | funcs!(len, u32); 86 | funcs!(version, u32); 87 | funcs!(section_count, u32); 88 | } 89 | 90 | #[derive(Clone, Copy, Debug)] 91 | #[repr(C, packed)] 92 | struct SnpSection { 93 | address: u32, 94 | size: u32, 95 | stype: u32, 96 | } 97 | 98 | #[allow(dead_code)] 99 | impl SnpSection { 100 | funcs!(address, u32); 101 | funcs!(size, u32); 102 | funcs!(stype, u32); 103 | 104 | pub fn address_u64(&self) -> u64 { 105 | self.address as u64 106 | } 107 | pub fn size_u64(&self) -> u64 { 108 | self.size as u64 109 | } 110 | } 111 | 112 | #[derive(Clone, Copy, Debug)] 113 | #[repr(C, packed)] 114 | struct SnpSecrets { 115 | version: u32, 116 | flags: u32, 117 | fms: u32, 118 | reserved1: [u8; 4], 119 | 120 | gosvw: [u8; 16], 121 | 122 | vmpck0: [u8; 32], 123 | vmpck1: [u8; 32], 124 | vmpck2: [u8; 32], 125 | vmpck3: [u8; 32], 126 | 127 | os_reserved: [u8; 96], 128 | 129 | reserved2: [u8; 64], 130 | 131 | // SVSM fields start at offset 0x140 into the secrets page 132 | svsm_base: u64, 133 | svsm_size: u64, 134 | svsm_caa: u64, 135 | svsm_max_version: u32, 136 | svsm_guest_vmpl: u8, 137 | reserved3: [u8; 3], 138 | } 139 | 140 | #[allow(dead_code)] 141 | impl SnpSecrets { 142 | pub fn clear_vmpck0(&mut self) { 143 | self.vmpck0.iter_mut().for_each(|e| *e = 0); 144 | } 145 | 146 | funcs!(svsm_base, u64); 147 | funcs!(svsm_size, u64); 148 | funcs!(svsm_caa, u64); 149 | funcs!(svsm_max_version, u32); 150 | funcs!(svsm_guest_vmpl, u8); 151 | } 152 | 153 | /// 96b582de-1fb2-45f7-baea-a366c55a082d 154 | const OVMF_TABLE_GUID: Uuid = uuid!("96b582de-1fb2-45f7-baea-a366c55a082d"); 155 | /// dc886566-984a-4798-A75e-5585a7bf67cc 156 | const OVMF_SNP_ENTRY_GUID: Uuid = uuid!("dc886566-984a-4798-A75e-5585a7bf67cc"); 157 | 158 | /// 0x56455341 ("ASEV" in little endian integer) 159 | const SNP_METADATA_SIGNATURE: u32 = 0x56455341; /* "A", "S", "E", "V" in little endian integer */ 160 | 161 | //const SNP_SECT_MEM: u32 = 1; 162 | /// 2 163 | const SNP_SECT_SECRETS: u32 = 2; 164 | /// 3 165 | const SNP_SECT_CPUID: u32 = 3; 166 | /// 4 167 | const SNP_SECT_SVSM_CAA: u32 = 4; 168 | //const SNP_SECT_BSP_VMSA: u32 = 5; 169 | 170 | unsafe fn __find_bios_guid_entry( 171 | bios_info: &mut BiosInfo, 172 | target_guid: Uuid, 173 | avail_len: &mut u64, 174 | p: &mut u64, 175 | ) -> Option { 176 | /* Search is in reverse order */ 177 | while *p > bios_info.guid_table.begin() { 178 | let len: u64 = *((*p - GUID_SIZE - BIOS_TABLE_LEN_FIELD) as *const u16) as u64; 179 | if (len < (GUID_SIZE + BIOS_TABLE_LEN_FIELD)) || (len > *avail_len) { 180 | return None; 181 | } 182 | 183 | let bytes: *const Bytes = (*p - GUID_SIZE) as *const Bytes; 184 | let entry_guid: Uuid = Uuid::from_bytes_le(*bytes); 185 | if entry_guid == target_guid { 186 | return Some(*p - len as u64); 187 | } 188 | 189 | *avail_len -= len; 190 | *p -= len; 191 | } 192 | 193 | return None; 194 | } 195 | 196 | fn find_bios_guid_entry(bios_info: &mut BiosInfo, target_guid: Uuid) -> Option { 197 | let mut avail_len: u64 = bios_info.guid_table.len() as u64; 198 | let mut p: u64 = bios_info.guid_table.end(); 199 | 200 | unsafe { __find_bios_guid_entry(bios_info, target_guid, &mut avail_len, &mut p) } 201 | } 202 | 203 | unsafe fn __find_snp_section(bios_info: &mut BiosInfo, stype: u32, p: u64) -> Option { 204 | let offset: u64 = ptr::read_unaligned(p as *const u32) as u64; 205 | if offset > bios_info.size() { 206 | return None; 207 | } 208 | 209 | let metadata: *const SnpMetaData = 210 | (bios_info.va() + bios_info.size() - offset) as *const SnpMetaData; 211 | if (*metadata).signature() != SNP_METADATA_SIGNATURE { 212 | return None; 213 | } 214 | 215 | let defined_len: u64 = (*metadata).len() as u64; 216 | let expected_len: u64 = (*metadata).section_count() as u64 * size_of::() as u64; 217 | if defined_len < expected_len { 218 | return None; 219 | } 220 | 221 | let mut section: *const SnpSection = 222 | (metadata as u64 + size_of::() as u64) as *const SnpSection; 223 | for _i in 0..(*metadata).section_count() { 224 | if (*section).stype() == stype { 225 | return Some(*section); 226 | } 227 | 228 | section = (section as u64 + size_of::() as u64) as *const SnpSection; 229 | } 230 | 231 | return None; 232 | } 233 | 234 | fn find_snp_section(bios_info: &mut BiosInfo, stype: u32) -> Option { 235 | let p: u64 = match find_bios_guid_entry(bios_info, OVMF_SNP_ENTRY_GUID) { 236 | Some(p) => p, 237 | None => vc_terminate_svsm_bios(), 238 | }; 239 | 240 | unsafe { __find_snp_section(bios_info, stype, p) } 241 | } 242 | 243 | unsafe fn advertise_svsm_presence(bios_info: &mut BiosInfo, caa: PhysAddr) -> bool { 244 | let section: SnpSection = match find_snp_section(bios_info, SNP_SECT_SECRETS) { 245 | Some(p) => p, 246 | None => return false, 247 | }; 248 | 249 | if (section.size() as usize) < size_of::() { 250 | return false; 251 | } 252 | 253 | let bios_secrets_pa: PhysAddr = PhysAddr::new(section.address_u64()); 254 | let mut bios_secrets_map: MapGuard = 255 | match MapGuard::new_private(bios_secrets_pa, section.size_u64()) { 256 | Ok(m) => m, 257 | Err(_e) => return false, 258 | }; 259 | let svsm_secrets_va: VirtAddr = get_svsm_secrets_page(); 260 | 261 | // Copy the Secrets page to the BIOS Secrets page location 262 | let bios_secrets: &mut SnpSecrets = bios_secrets_map.as_object_mut(); 263 | let svsm_secrets: *const SnpSecrets = svsm_secrets_va.as_ptr(); 264 | *bios_secrets = *svsm_secrets; 265 | 266 | // Clear the VMPCK0 key 267 | bios_secrets.clear_vmpck0(); 268 | 269 | // Advertise the SVSM 270 | bios_secrets.set_svsm_base(pgtable_va_to_pa(get_svsm_begin()).as_u64()); 271 | bios_secrets.set_svsm_size(get_svsm_end().as_u64() - get_svsm_begin().as_u64()); 272 | bios_secrets.set_svsm_caa(caa.as_u64()); 273 | bios_secrets.set_svsm_max_version(1); 274 | bios_secrets.set_svsm_guest_vmpl(1); 275 | 276 | let section: SnpSection = match find_snp_section(bios_info, SNP_SECT_CPUID) { 277 | Some(p) => p, 278 | None => return false, 279 | }; 280 | 281 | let bios_cpuid_pa: PhysAddr = PhysAddr::new(section.address_u64()); 282 | let size: u64 = min(section.size_u64(), get_svsm_cpuid_page_size()); 283 | 284 | let mut bios_cpuid_map: MapGuard = match MapGuard::new_private(bios_cpuid_pa, size) { 285 | Ok(m) => m, 286 | Err(_e) => return false, 287 | }; 288 | let bios_cpuid: &mut [u8] = bios_cpuid_map.as_bytes_mut(); 289 | 290 | let svsm_cpuid_va: VirtAddr = get_svsm_cpuid_page(); 291 | let svsm_cpuid_ptr: *const u8 = svsm_cpuid_va.as_ptr(); 292 | let svsm_cpuid: &[u8] = unsafe { slice::from_raw_parts(svsm_cpuid_ptr, size as usize) }; 293 | 294 | // Copy the CPUID page to the BIOS Secrets page location 295 | bios_cpuid.copy_from_slice(svsm_cpuid); 296 | 297 | true 298 | } 299 | 300 | fn locate_bios_ca_page(bios_info: &mut BiosInfo) -> Option { 301 | let section: SnpSection = match find_snp_section(bios_info, SNP_SECT_SVSM_CAA) { 302 | Some(p) => p, 303 | None => return None, 304 | }; 305 | 306 | if (section.size() as usize) < size_of::() { 307 | return None; 308 | } 309 | 310 | return Some(PhysAddr::new(section.address_u64())); 311 | } 312 | 313 | fn parse_bios_guid_table(bios_info: &mut BiosInfo) -> bool { 314 | if bios_info.size() < (BIOS_TABLE_END + GUID_SIZE + BIOS_TABLE_LEN_FIELD) { 315 | return false; 316 | } 317 | 318 | unsafe { 319 | let bios: *const u8 = (bios_info.va() + bios_info.size() - BIOS_TABLE_END) as *const u8; 320 | let bytes: *const Bytes = (bios as u64 - GUID_SIZE) as *const Bytes; 321 | 322 | let guid: Uuid = Uuid::from_bytes_le(*bytes); 323 | if guid != OVMF_TABLE_GUID { 324 | return false; 325 | } 326 | 327 | let len: *const u16 = (bios as u64 - GUID_SIZE - BIOS_TABLE_LEN_FIELD) as *const u16; 328 | if (*len as u64) > bios_info.size() { 329 | return false; 330 | } 331 | 332 | bios_info.guid_table.set_begin(bios as u64 - *len as u64); 333 | bios_info.guid_table.set_end(len as u64); 334 | bios_info.guid_table.set_len(*len); 335 | } 336 | 337 | true 338 | } 339 | 340 | fn prepare_bios() { 341 | let (bios_pa, bios_size) = match fwcfg_get_bios_area() { 342 | Some(r) => r, 343 | None => vc_terminate_svsm_fwcfg(), 344 | }; 345 | 346 | let bios_map: MapGuard = match MapGuard::new_private(bios_pa, bios_size) { 347 | Ok(m) => m, 348 | Err(_e) => vc_terminate_svsm_fwcfg(), 349 | }; 350 | 351 | let mut bios_info: BiosInfo = BiosInfo::new(bios_map.va(), bios_size); 352 | if !parse_bios_guid_table(&mut bios_info) { 353 | vc_terminate_svsm_bios(); 354 | } 355 | 356 | let caa: PhysAddr = match locate_bios_ca_page(&mut bios_info) { 357 | Some(p) => p, 358 | None => vc_terminate_svsm_bios(), 359 | }; 360 | 361 | unsafe { 362 | if !advertise_svsm_presence(&mut bios_info, caa) { 363 | vc_terminate_svsm_bios(); 364 | } 365 | } 366 | 367 | if !smp_prepare_bios_vmpl(caa) { 368 | vc_terminate_svsm_general(); 369 | } 370 | } 371 | 372 | /// Locate BIOS, prepare it, advertise SVSM presence and run BIOS 373 | pub fn start_bios() { 374 | prepare_bios(); 375 | 376 | if !smp_run_bios_vmpl() { 377 | vc_terminate_svsm_general(); 378 | } 379 | } 380 | -------------------------------------------------------------------------------- /src/cpu/cpuid.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use paste::paste; 10 | 11 | macro_rules! cpuid_fns { 12 | ($name: ident, $type: ty) => { 13 | paste! { 14 | pub fn [<$name>](&self) -> $type { 15 | self.$name 16 | } 17 | } 18 | }; 19 | } 20 | 21 | /// 64 22 | pub const CPUID_COUNT_MAX: usize = 64; 23 | 24 | #[repr(C, packed)] 25 | #[derive(Copy, Clone, Debug)] 26 | pub struct CpuidPageEntry { 27 | eax_in: u32, 28 | ecx_in: u32, 29 | xcr0_in: u64, 30 | xss_in: u64, 31 | 32 | eax: u32, 33 | ebx: u32, 34 | ecx: u32, 35 | edx: u32, 36 | reserved1: [u8; 8], 37 | } 38 | 39 | impl CpuidPageEntry { 40 | cpuid_fns!(eax_in, u32); 41 | cpuid_fns!(ecx_in, u32); 42 | cpuid_fns!(xcr0_in, u64); 43 | cpuid_fns!(xss_in, u64); 44 | 45 | cpuid_fns!(eax, u32); 46 | cpuid_fns!(ebx, u32); 47 | cpuid_fns!(ecx, u32); 48 | cpuid_fns!(edx, u32); 49 | } 50 | 51 | #[repr(C, packed)] 52 | #[derive(Copy, Clone, Debug)] 53 | pub struct CpuidPage { 54 | count: u32, 55 | reserved1: [u8; 12], 56 | 57 | entries: [CpuidPageEntry; CPUID_COUNT_MAX], 58 | } 59 | 60 | impl CpuidPage { 61 | pub fn count(&self) -> u32 { 62 | self.count 63 | } 64 | 65 | pub fn entry(&self, index: usize) -> CpuidPageEntry { 66 | assert!(index < CPUID_COUNT_MAX); 67 | assert!(index < self.count as usize); 68 | 69 | self.entries[index] 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/cpu/idt.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::cpu::vc_handler; 10 | use crate::DOUBLE_FAULT_IST; 11 | 12 | use alloc::string::String; 13 | use lazy_static::lazy_static; 14 | use x86_64::structures::idt::InterruptDescriptorTable; 15 | use x86_64::structures::idt::InterruptStackFrame; 16 | use x86_64::structures::idt::PageFaultErrorCode; 17 | 18 | lazy_static! { 19 | static ref IDT: InterruptDescriptorTable = { 20 | let mut idt: InterruptDescriptorTable = InterruptDescriptorTable::new(); 21 | 22 | unsafe { 23 | idt.double_fault 24 | .set_handler_fn(df_handler) 25 | .set_stack_index(DOUBLE_FAULT_IST as u16); 26 | } 27 | idt.general_protection_fault.set_handler_fn(gp_handler); 28 | idt.page_fault.set_handler_fn(pf_handler); 29 | idt.vmm_communication_exception.set_handler_fn(vc_handler); 30 | 31 | idt 32 | }; 33 | } 34 | 35 | fn do_panic(stack_frame: InterruptStackFrame, name: &str, error_code: u64) -> ! { 36 | let rip: u64 = stack_frame.instruction_pointer.as_u64(); 37 | let msg: String = alloc::format!( 38 | "#{} at RIP {:#0x} with error code {:#0x}", 39 | name, 40 | rip, 41 | error_code 42 | ); 43 | 44 | panic!("{}", msg); 45 | } 46 | 47 | /// Double fault handler 48 | /// Every interruption except for #PF, #VC and #GP will end up here 49 | extern "x86-interrupt" fn df_handler(stack_frame: InterruptStackFrame, _error_code: u64) -> ! { 50 | do_panic(stack_frame, "DF", 0) 51 | } 52 | 53 | /// General protection fault handler 54 | extern "x86-interrupt" fn gp_handler(stack_frame: InterruptStackFrame, error_code: u64) { 55 | do_panic(stack_frame, "GP", error_code) 56 | } 57 | 58 | /// Page fault handler 59 | extern "x86-interrupt" fn pf_handler( 60 | stack_frame: InterruptStackFrame, 61 | error_code: PageFaultErrorCode, 62 | ) { 63 | do_panic(stack_frame, "PF", error_code.bits()) 64 | } 65 | 66 | /// Load IDT with function handlers for each exception 67 | pub fn idt_init() { 68 | IDT.load(); 69 | } 70 | -------------------------------------------------------------------------------- /src/cpu/mod.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | /// Handle CpuidPages and their entries. 10 | pub mod cpuid; 11 | /// Create IDT and handle exceptions 12 | pub mod idt; 13 | /// Handle per-vCPU information (Vmsa and Caa) 14 | pub mod percpu; 15 | /// Initialize and start SMP 16 | pub mod smp; 17 | /// Auxiliary assembly functions 18 | pub mod sys; 19 | /// Per-CPU TSS support 20 | pub mod tss; 21 | /// VC functions 22 | pub mod vc; 23 | /// Vmsa (Virtual Machine Saving Area) support 24 | pub mod vmsa; 25 | 26 | pub use crate::cpu::idt::*; 27 | pub use crate::cpu::percpu::*; 28 | pub use crate::cpu::smp::*; 29 | pub use crate::cpu::sys::*; 30 | pub use crate::cpu::tss::*; 31 | pub use crate::cpu::vc::*; 32 | -------------------------------------------------------------------------------- /src/cpu/smp.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::cpu::percpu::PERCPU; 10 | use crate::cpu::vc::*; 11 | use crate::cpu::vmsa::*; 12 | use crate::cpu::*; 13 | use crate::globals::*; 14 | use crate::mem::*; 15 | use crate::svsm_request::*; 16 | use crate::*; 17 | 18 | use core::mem::size_of; 19 | use x86_64::addr::{PhysAddr, VirtAddr}; 20 | use x86_64::instructions::tables::{sgdt, sidt}; 21 | use x86_64::registers::control::Cr3; 22 | use x86_64::structures::paging::frame::PhysFrame; 23 | use x86_64::structures::tss::TaskStateSegment; 24 | use x86_64::structures::DescriptorTablePointer; 25 | 26 | /// Bit 4 27 | const SEGMENT_TYPE_SUPERVISOR: u16 = BIT!(4); 28 | /// Bit 7 29 | const SEGMENT_TYPE_PRESENT: u16 = BIT!(7); 30 | /// Bit 9 31 | const SEGMENT_TYPE_LONGMODE: u16 = BIT!(9); 32 | 33 | /// 0x029a 34 | const SVSM_CS_TYPE: u16 = 35 | 0x0a | SEGMENT_TYPE_PRESENT | SEGMENT_TYPE_SUPERVISOR | SEGMENT_TYPE_LONGMODE; 36 | /// 0xffffffff 37 | const SVSM_CS_LIMIT: u32 = 0xffffffff; 38 | /// 0 39 | const SVSM_CS_BASE: u64 = 0; 40 | 41 | /// 0x0089 42 | pub const SVSM_TSS_TYPE: u16 = 0x9 | SEGMENT_TYPE_PRESENT; 43 | 44 | /// 0x80010033 45 | const SVSM_CR0: u64 = 0x80010033; /* PG, WP, NE, ET, MP, PE */ 46 | /// 0x668 47 | const SVSM_CR4: u64 = 0x668; /* OSXMMEXCPT, OSFXSR, MCE, PAE, DE */ 48 | /// 0xffff0ff0 49 | const SVSM_DR6: u64 = 0xffff0ff0; 50 | /// 0x400 51 | const SVSM_DR7: u64 = 0x400; 52 | /// 0x1d00 53 | const SVSM_EFER: u64 = 0x1d00; /* SVME, NXE, LMA, LME */ 54 | /// 0x0007040600070406 55 | const SVSM_GPAT: u64 = 0x0007040600070406; 56 | /// 0x1 57 | const SVSM_XCR0: u64 = 0x1; 58 | /// 0x1f80 59 | const SVSM_MXCSR: u32 = 0x1f80; 60 | /// 0x2 61 | const SVSM_RFLAGS: u64 = 0x2; 62 | /// 0x5555 63 | const SVSM_X87_FTW: u16 = 0x5555; 64 | /// 0x40 65 | const SVSM_X87_FCW: u16 = 0x40; 66 | 67 | /// 5 68 | const SVSM_STACK_PAGES: u64 = 5; /* 4 stack pages and one guard page */ 69 | 70 | static mut AP_SYNC: u8 = 0; 71 | /// 1 72 | const AP_STARTING: u8 = 1; 73 | /// 2 74 | const AP_STARTED: u8 = 2; 75 | 76 | /// Function executed for each AP when booted 77 | pub extern "C" fn ap_entry() -> ! { 78 | unsafe { 79 | vc_register_ghcb(pgtable_va_to_pa(PERCPU.ghcb())); 80 | BARRIER!(); 81 | AP_SYNC = AP_STARTED; 82 | } 83 | 84 | halt(); 85 | svsm_request_loop(); 86 | 87 | loop { 88 | halt() 89 | } 90 | } 91 | 92 | fn alloc_vmsa() -> PhysFrame { 93 | // Allocate one frame 94 | let mut frame: PhysFrame = match mem_allocate_frames(1) { 95 | Some(f) => f, 96 | None => vc_terminate_svsm_enomem(), 97 | }; 98 | 99 | // VMSA pages must not be 2MB aligned, check for that 100 | if frame.start_address().is_aligned(PAGE_2MB_SIZE) { 101 | // Free aligned frame 102 | mem_free_frame(frame); 103 | 104 | // Allocate two frames and ... 105 | frame = match mem_allocate_frames(2) { 106 | Some(f) => f, 107 | None => vc_terminate_svsm_enomem(), 108 | }; 109 | 110 | // ... chose a frame which is not 2MB aligned 111 | if frame.start_address().is_aligned(PAGE_2MB_SIZE) { 112 | frame += 1; 113 | } 114 | } 115 | 116 | return frame; 117 | } 118 | 119 | unsafe fn __create_bios_vmsa(vmsa_va: VirtAddr) { 120 | let bsp_page_va: VirtAddr = get_bios_vmsa_page(); 121 | 122 | let vmsa: *mut Vmsa = vmsa_va.as_mut_ptr(); 123 | let bsp_page: *const Vmsa = bsp_page_va.as_ptr(); 124 | 125 | // Copy the measured BIOS BSP VMSA page 126 | *vmsa = *bsp_page; 127 | 128 | if (*vmsa).vmpl() != VMPL::Vmpl1 as u8 { 129 | vc_terminate_svsm_incorrect_vmpl(); 130 | } 131 | 132 | // Check the SEV-SNP VMSA SEV features to make sure guest will 133 | // execute with supported SEV features. It is better to not fix 134 | // the SEV features ourselves, since this could indicate an issue 135 | // on the hypervisor side. 136 | 137 | if (*vmsa).sev_features() & VMPL1_REQUIRED_SEV_FEATS != VMPL1_REQUIRED_SEV_FEATS { 138 | vc_terminate_vmpl1_sev_features(); 139 | } 140 | 141 | if (*vmsa).sev_features() & VMPL1_UNSUPPORTED_SEV_FEATS != 0 { 142 | vc_terminate_vmpl1_sev_features(); 143 | } 144 | } 145 | 146 | /// Create VMSA (execution context information) for an AP 147 | fn create_svsm_vmsa(for_id: usize) -> VirtAddr { 148 | let frame: PhysFrame = alloc_vmsa(); 149 | let vmsa_va: VirtAddr = pgtable_pa_to_va(frame.start_address()); 150 | let vmsa: *mut Vmsa = vmsa_va.as_mut_ptr(); 151 | 152 | let gdtr: DescriptorTablePointer = sgdt(); 153 | let idtr: DescriptorTablePointer = sidt(); 154 | let gs: VirtAddr = percpu_address(for_id); 155 | let tss: VirtAddr = tss_init_for(for_id); 156 | 157 | unsafe { 158 | (*vmsa).set_cs_selector(get_gdt64_kernel_cs() as u16); 159 | (*vmsa).set_cs_rtype(SVSM_CS_TYPE); 160 | (*vmsa).set_cs_limit(SVSM_CS_LIMIT); 161 | (*vmsa).set_cs_base(SVSM_CS_BASE); 162 | 163 | (*vmsa).set_tr_selector(get_gdt64_tss() as u16); 164 | (*vmsa).set_tr_rtype(SVSM_TSS_TYPE); 165 | (*vmsa).set_tr_limit(size_of::() as u32 - 1); 166 | (*vmsa).set_tr_base(tss.as_u64()); 167 | 168 | (*vmsa).set_gs_base(gs.as_u64()); 169 | 170 | (*vmsa).set_rip(get_cpu_start()); 171 | 172 | (*vmsa).set_gdtr_limit(gdtr.limit as u32); 173 | (*vmsa).set_gdtr_base(gdtr.base.as_u64()); 174 | 175 | (*vmsa).set_idtr_limit(idtr.limit as u32); 176 | (*vmsa).set_idtr_base(idtr.base.as_u64()); 177 | 178 | (*vmsa).set_cr0(SVSM_CR0); 179 | (*vmsa).set_cr3(Cr3::read().0.start_address().as_u64()); 180 | (*vmsa).set_cr4(SVSM_CR4); 181 | (*vmsa).set_efer(SVSM_EFER); 182 | (*vmsa).set_rflags(SVSM_RFLAGS); 183 | (*vmsa).set_dr6(SVSM_DR6); 184 | (*vmsa).set_dr7(SVSM_DR7); 185 | (*vmsa).set_gpat(SVSM_GPAT); 186 | (*vmsa).set_xcr0(SVSM_XCR0); 187 | (*vmsa).set_mxcsr(SVSM_MXCSR); 188 | (*vmsa).set_x87_ftw(SVSM_X87_FTW); 189 | (*vmsa).set_x87_fcw(SVSM_X87_FCW); 190 | 191 | (*vmsa).set_vmpl(VMPL::Vmpl0 as u8); 192 | (*vmsa).set_sev_features(rdmsr(MSR_SEV_STATUS) >> 2); 193 | } 194 | 195 | vmsa_va 196 | } 197 | 198 | /// Start a given AP, which includes creating a Stack and Vmsa 199 | fn ap_start(cpu_id: usize) -> bool { 200 | let apic_id: u32 = unsafe { PERCPU.apic_id_for(cpu_id) }; 201 | 202 | let vmsa: VirtAddr = create_svsm_vmsa(cpu_id); 203 | 204 | let ret: u32 = rmpadjust(vmsa.as_u64(), RMP_4K, VMSA_PAGE | VMPL::Vmpl1 as u64); 205 | if ret != 0 { 206 | vc_terminate_svsm_general(); 207 | } 208 | 209 | let stack: VirtAddr = mem_create_stack(SVSM_STACK_PAGES, false); 210 | set_cpu_stack(stack.as_u64()); 211 | 212 | unsafe { 213 | PERCPU.set_vmsa_for(pgtable_va_to_pa(vmsa), VMPL::Vmpl0, cpu_id); 214 | 215 | AP_SYNC = AP_STARTING; 216 | BARRIER!(); 217 | 218 | vc_ap_create(vmsa, apic_id); 219 | 220 | while AP_SYNC != AP_STARTED { 221 | pause(); 222 | } 223 | } 224 | 225 | true 226 | } 227 | 228 | /// Retrieve Vmpl1 Vmsa and start it 229 | pub fn smp_run_bios_vmpl() -> bool { 230 | unsafe { 231 | // Retrieve VMPL1 VMSA and start it 232 | let vmsa_pa: PhysAddr = PERCPU.vmsa(VMPL::Vmpl1); 233 | if vmsa_pa == PhysAddr::zero() { 234 | return false; 235 | } 236 | 237 | let vmsa_map: MapGuard = match MapGuard::new_private(vmsa_pa, PAGE_SIZE) { 238 | Ok(r) => r, 239 | Err(_e) => return false, 240 | }; 241 | 242 | vc_ap_create(vmsa_map.va(), PERCPU.apic_id()); 243 | } 244 | 245 | true 246 | } 247 | 248 | /// Create a Vmsa and Caa and prepare them 249 | pub fn smp_prepare_bios_vmpl(caa_pa: PhysAddr) -> bool { 250 | let vmsa_pa: PhysAddr = alloc_vmsa().start_address(); 251 | let vmsa: MapGuard = match MapGuard::new_private(vmsa_pa, VMSA_MAP_SIZE) { 252 | Ok(v) => v, 253 | Err(_e) => return false, 254 | }; 255 | unsafe { __create_bios_vmsa(vmsa.va()) } 256 | 257 | let caa: MapGuard = match MapGuard::new_private(caa_pa, CAA_MAP_SIZE) { 258 | Ok(c) => c, 259 | Err(_e) => return false, 260 | }; 261 | 262 | unsafe { 263 | PERCPU.set_vmsa(vmsa_pa, VMPL::Vmpl1); 264 | PERCPU.set_caa(caa_pa, VMPL::Vmpl1); 265 | } 266 | 267 | // Update the permissions for the CAA and VMSA page. 268 | // 269 | // For the VMSA page, restrict it to read-only (at most) to prevent a guest 270 | // from attempting to alter the VMPL level within the VMSA. 271 | // 272 | // On error, do not try to reset the VMPL permission state for the pages, 273 | // just leak them. 274 | // 275 | // The lower VMPL has not been run, yet, so no TLB flushing is needed. 276 | // 277 | let ret: u32 = rmpadjust(caa.va().as_u64(), RMP_4K, VMPL_RWX | VMPL::Vmpl1 as u64); 278 | if ret != 0 { 279 | return false; 280 | } 281 | 282 | let ret: u32 = rmpadjust(vmsa.va().as_u64(), RMP_4K, VMPL_R | VMPL::Vmpl1 as u64); 283 | if ret != 0 { 284 | return false; 285 | } 286 | 287 | let vmin: u64 = VMPL::Vmpl2 as u64; 288 | let vmax: u64 = VMPL::VmplMax as u64; 289 | for i in vmin..vmax { 290 | let ret: u32 = rmpadjust(caa.va().as_u64(), RMP_4K, i); 291 | if ret != 0 { 292 | return false; 293 | } 294 | 295 | let ret: u32 = rmpadjust(vmsa.va().as_u64(), RMP_4K, i); 296 | if ret != 0 { 297 | return false; 298 | } 299 | } 300 | 301 | let ret: u32 = rmpadjust(vmsa.va().as_u64(), RMP_4K, VMPL_VMSA | VMPL::Vmpl1 as u64); 302 | if ret != 0 { 303 | return false; 304 | } 305 | 306 | unsafe { 307 | svsm_request_add_init_vmsa(vmsa_pa, PERCPU.apic_id()); 308 | } 309 | 310 | true 311 | } 312 | 313 | /// Get CPU id for a given Apic Id 314 | pub fn smp_get_cpu_id(apic_id: u32) -> Option { 315 | unsafe { 316 | for i in 0..percpu_count() { 317 | if PERCPU.apic_id_for(i) == apic_id { 318 | return Some(i); 319 | } 320 | } 321 | } 322 | 323 | return None; 324 | } 325 | 326 | unsafe fn __smp_init() { 327 | set_hl_main(ap_entry as u64); 328 | set_cpu_mode(1); 329 | 330 | let count: usize = percpu_count(); 331 | let aux: usize = count - 1; 332 | 333 | prints!("> Starting SMP for {aux} APs:\n"); 334 | 335 | for i in 1..count { 336 | if !ap_start(i) { 337 | vc_terminate_svsm_general(); 338 | } 339 | prints!("-- AP {i}/{aux} initialized.\n"); 340 | } 341 | } 342 | 343 | /// Boot other CPUs (APs) 344 | pub fn smp_init() { 345 | unsafe { 346 | __smp_init(); 347 | } 348 | } 349 | -------------------------------------------------------------------------------- /src/cpu/sys.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::BIT; 10 | use core::arch::asm; 11 | 12 | /// Bit 12 13 | pub const EFER_SVME: u64 = BIT!(12); 14 | 15 | /// Read Cr2 16 | pub fn read_cr2() -> u64 { 17 | let cr2: u64; 18 | 19 | unsafe { 20 | asm!("mov {0}, cr2", 21 | out(reg) cr2, 22 | options(nostack)); 23 | } 24 | 25 | cr2 26 | } 27 | 28 | /// Read MSR 29 | pub fn rdmsr(msr: u32) -> u64 { 30 | let lo: u32; 31 | let hi: u32; 32 | 33 | unsafe { 34 | asm!("rdmsr", 35 | in("rcx") msr, out("rax") lo, out("rdx") hi, 36 | options(nostack)); 37 | } 38 | 39 | ((hi as u64) << 32) | lo as u64 40 | } 41 | 42 | /// Write to MSR a given value 43 | pub fn wrmsr(msr: u32, value: u64) { 44 | let lo: u32 = value as u32; 45 | let hi: u32 = (value >> 32) as u32; 46 | 47 | unsafe { 48 | asm!("wrmsr", 49 | in("rcx") msr, in("rax") lo, in("rdx") hi, 50 | options(nostack)); 51 | } 52 | } 53 | 54 | /// Execute assembly pause instruction 55 | pub fn pause() { 56 | unsafe { 57 | asm!("pause", options(nostack)); 58 | } 59 | } 60 | 61 | /// Execute assembly hlt instruction (yielding) 62 | pub fn halt() { 63 | unsafe { 64 | asm!("hlt", options(nostack)); 65 | } 66 | } 67 | 68 | /// 1 69 | pub const PVALIDATE_FAIL_INPUT: u32 = 1; 70 | /// 6 71 | pub const PVALIDATE_FAIL_SIZE_MISMATCH: u32 = 6; 72 | 73 | /// 15 74 | pub const PVALIDATE_RET_MAX: u32 = 15; 75 | /// 16 76 | pub const PVALIDATE_CF_SET: u32 = 16; 77 | /// 17 78 | pub const PVALIDATE_RET_ERR: u32 = 17; 79 | 80 | /// Pvalidate a given memory region 81 | pub fn pvalidate(va: u64, page_size: u32, validation: u32) -> u32 { 82 | let mut ret: u32; 83 | let mut carry: u32; 84 | 85 | unsafe { 86 | asm!("xor r8, r8", 87 | ".byte 0xf2,0x0f,0x01,0xff", 88 | "jnc 1f", 89 | "inc r8", 90 | "1:", 91 | in("rax") va, in("rcx") page_size, in("rdx") validation, 92 | lateout("rax") ret, lateout("r8") carry, 93 | options(nostack)); 94 | } 95 | 96 | if ret > PVALIDATE_RET_MAX { 97 | ret = PVALIDATE_RET_ERR; 98 | } else if ret == 0 && carry > 0 { 99 | ret = PVALIDATE_CF_SET; 100 | } 101 | 102 | ret 103 | } 104 | 105 | /// 1 106 | pub const RMPADJUST_FAIL_INPUT: u32 = 1; 107 | /// 2 108 | pub const RMPADJUST_FAIL_PERMISSION: u32 = 2; 109 | /// 6 110 | pub const RMPADJUST_FAIL_SIZE_MISMATCH: u32 = 6; 111 | 112 | /// Update RMP (Reverse Map Table) with new attributes 113 | pub fn rmpadjust(va: u64, page_size: u32, attrs: u64) -> u32 { 114 | let ret: u32; 115 | 116 | unsafe { 117 | asm!(".byte 0xf3,0x0f,0x01,0xfe", 118 | in("rax") va, in("rcx") page_size, in("rdx") attrs, 119 | lateout("rax") ret, 120 | options(nostack)); 121 | } 122 | 123 | ret 124 | } 125 | 126 | /// Flush everything for the ASID, including Global entries 127 | pub fn invlpgb_all() { 128 | let rax: u32 = BIT!(3); 129 | 130 | unsafe { 131 | asm!(".byte 0x0f,0x01,0xfe", 132 | in("rax") rax, in("rcx") 0, in("rdx") 0, 133 | options(nostack)); 134 | } 135 | } 136 | 137 | pub fn tlbsync() { 138 | unsafe { 139 | asm!(".byte 0x0f,0x01,0xff", options(nostack)); 140 | } 141 | } 142 | 143 | /// Compare and exchange 144 | pub fn cmpxchg(cmpval: u64, newval: u64, va: u64) -> u64 { 145 | let ret: u64; 146 | 147 | unsafe { 148 | asm!("lock cmpxchg [{0}], {1}", 149 | in(reg) va, in(reg) newval, in("rax") cmpval, 150 | lateout("rax") ret, 151 | options(nostack)); 152 | } 153 | 154 | ret 155 | } 156 | -------------------------------------------------------------------------------- /src/cpu/tss.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::*; 10 | use core::mem::size_of; 11 | use x86_64::instructions::tables::load_tss; 12 | use x86_64::registers::segmentation::SegmentSelector; 13 | use x86_64::structures::tss::TaskStateSegment; 14 | use x86_64::VirtAddr; 15 | 16 | // Index of the IST for #DF 17 | /// 0 18 | pub const DOUBLE_FAULT_IST: usize = 0; 19 | 20 | // 3 stack pages 21 | /// 3 22 | const IST_STACK_PAGES: u64 = 3; 23 | 24 | unsafe fn create_tss() -> VirtAddr { 25 | let tss_va: VirtAddr = match mem_allocate(size_of::()) { 26 | Ok(f) => f, 27 | Err(()) => vc_terminate_svsm_enomem(), 28 | }; 29 | 30 | let tss: *mut TaskStateSegment = tss_va.as_mut_ptr(); 31 | let tss_template: TaskStateSegment = TaskStateSegment::new(); 32 | 33 | // Make sure we have correct initial values 34 | *tss = tss_template; 35 | 36 | let ist_stack: VirtAddr = mem_create_stack(IST_STACK_PAGES, false); 37 | 38 | (*tss).interrupt_stack_table[DOUBLE_FAULT_IST] = ist_stack; 39 | 40 | tss_va 41 | } 42 | 43 | unsafe fn __tss_init() { 44 | let tss: VirtAddr = create_tss(); 45 | let tss_base: u64 = tss.as_u64(); 46 | let tss_limit: u64 = (size_of::() - 1) as u64; 47 | 48 | let gdt_tss0: *mut u64 = get_early_tss().as_u64() as *mut u64; 49 | let gdt_tss1: *mut u64 = (get_early_tss().as_u64() + 8) as *mut u64; 50 | 51 | // Update existing TSS entry in the GDT. 52 | 53 | *gdt_tss0 = (SVSM_TSS_TYPE as u64) << 40; 54 | *gdt_tss0 |= (tss_base & 0xff000000) << 32; 55 | *gdt_tss0 |= (tss_base & 0x00ffffff) << 16; 56 | *gdt_tss0 |= tss_limit; 57 | 58 | *gdt_tss1 = tss_base >> 32; 59 | 60 | PERCPU.set_tss(tss); 61 | 62 | load_tss(SegmentSelector(get_gdt64_tss() as u16)); 63 | } 64 | 65 | /// 66 | /// Create new TSS for a given CPU, but don't load it. 67 | /// Used by AP creation where the VMSA can be used to pre-set the 68 | /// task register (TR) with the TSS values 69 | /// 70 | pub fn tss_init_for(cpu_id: usize) -> VirtAddr { 71 | let tss: VirtAddr; 72 | 73 | unsafe { 74 | tss = create_tss(); 75 | PERCPU.set_tss_for(tss, cpu_id); 76 | } 77 | 78 | tss 79 | } 80 | 81 | /// Create and load TSS. 82 | /// Only used by the BSP, since APs can use tss_init_for() 83 | pub fn tss_init() { 84 | unsafe { 85 | __tss_init(); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/cpu/vmsa.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::cpu::sys::EFER_SVME; 10 | use crate::globals::*; 11 | use crate::STATIC_ASSERT; 12 | use crate::{cmpxchg, funcs, BARRIER, BIT}; 13 | 14 | use core::mem::size_of; 15 | use memoffset::offset_of; 16 | use paste::paste; 17 | use x86_64::addr::VirtAddr; 18 | use x86_64::instructions::tlb::flush; 19 | 20 | // Sev Features for guest 21 | // Secure Nested Paging is active 22 | /// Bit 0 23 | pub const SEV_FEAT_SNP_ACTIVE: u64 = BIT!(0); 24 | 25 | // Virtual TOM feature is enabled 26 | /// Bit 1 27 | pub const SEV_FEAT_VIRTUAL_TOM: u64 = BIT!(1); 28 | 29 | // Reflect #VC is enabled 30 | /// Bit 2 31 | pub const SEV_FEAT_REFLECT_VC: u64 = BIT!(2); 32 | 33 | // Restricted Injection is enabled 34 | /// Bit 3 35 | pub const SEV_FEAT_RESTRICTED_INJ: u64 = BIT!(3); 36 | 37 | // Alternate Injection is enabled 38 | /// Bit 4 39 | pub const SEV_FEAT_ALTERNATE_INJ: u64 = BIT!(4); 40 | 41 | // Extra debug registers are swapped 42 | /// Bit 5 43 | pub const SEV_FEAT_DEBUG_SWAP: u64 = BIT!(5); 44 | 45 | // Prevent Host IBS is enabled 46 | /// Bit 6 47 | pub const SEV_FEAT_PREVENT_HOST_IBS: u64 = BIT!(6); 48 | 49 | // BTB predictor isolation is enabled 50 | /// Bit 7 51 | pub const SEV_FEAT_SNP_BTB_ISOLATION: u64 = BIT!(7); 52 | 53 | // VMPL SSS is enabled 54 | /// Bit 8 55 | pub const SEV_FEAT_VMPL_SSS: u64 = BIT!(8); 56 | 57 | // Secure TSC feature is enabled 58 | /// Bit 9 59 | pub const SEV_FEAT_SECURE_TSC: u64 = BIT!(9); 60 | 61 | // Reserved 62 | /// Bits 10 to 13 63 | pub const SEV_FEAT_RESERVED_1: u64 = 0b1111 << 10; 64 | 65 | // VMSA Register Protection is enabled 66 | /// Bit 14 67 | pub const SEV_FEAT_VMSA_REG_PROTECTION: u64 = BIT!(14); 68 | 69 | // SMT protection is enabled 70 | /// Bit 15 71 | pub const SEV_FEAT_SMT_PROTECTION: u64 = BIT!(15); 72 | 73 | // Reserved 74 | /// Bits 16 to 63 75 | pub const SEV_FEAT_RESERVED_2: u64 = !(BIT!(16) - 1); 76 | 77 | // 78 | // Different VMPL levels may have distinct SEV features, 79 | // but some of them should be enabled or not, depending on 80 | // currently supported features and security considerations. 81 | // This is the info on what should be checked or ignored: 82 | 83 | // MB1 - Must be 1 84 | // MBZ - Must be 0 85 | // DC - Don't care 86 | // 87 | // BITS VMPL0 VMPL1 88 | // 89 | // 0 - SNPAactive MB1 MB1 90 | // 1 - VirtualTOM MBZ MBZ 91 | // 2 - ReflectVC MBZ MBZ 92 | // 3 - RestrictInjection MB1 DC 93 | // 4 - AlternateInjection MBZ MBZ 94 | // 5 - DebugSwapSupport DC DC 95 | // 6 - PreventHostIbs DC DC 96 | // 7 - SNPBTBIsolation DC DC 97 | // 8 - VMPLSSS DC DC 98 | // 9 - SecureTSC MBZ MBZ 99 | // 10 - 13 Reserved_1 MBZ MBZ 100 | // 14 - VmsaRegisterProtection MBZ MBZ 101 | // 15 - SmtProtection MBZ MBZ 102 | // 103 | // 16 - 63 Reserved_2 MBZ MBZ 104 | // 105 | 106 | /// These are the features that must be one for VMPL1 107 | pub const VMPL1_REQUIRED_SEV_FEATS: u64 = SEV_FEAT_SNP_ACTIVE; 108 | 109 | /// These are the features that must be zero for VMPL1 110 | pub const VMPL1_UNSUPPORTED_SEV_FEATS: u64 = SEV_FEAT_VIRTUAL_TOM 111 | | SEV_FEAT_REFLECT_VC 112 | | SEV_FEAT_ALTERNATE_INJ 113 | | SEV_FEAT_SECURE_TSC 114 | | SEV_FEAT_RESERVED_1 115 | | SEV_FEAT_VMSA_REG_PROTECTION 116 | | SEV_FEAT_SMT_PROTECTION 117 | | SEV_FEAT_RESERVED_2; 118 | 119 | #[repr(C, packed)] 120 | #[derive(Copy, Clone, Debug)] 121 | pub struct VmsaSegmentRegister { 122 | selector: u16, 123 | rtype: u16, 124 | limit: u32, 125 | base: u64, 126 | } 127 | 128 | /// These are the features that must be one for VMPL0 129 | pub const VMPL0_REQUIRED_SEV_FEATS: u64 = SEV_FEAT_SNP_ACTIVE | SEV_FEAT_RESTRICTED_INJ; 130 | 131 | /// These are the features that must be zero for VMPL0 132 | pub const VMPL0_UNSUPPORTED_SEV_FEATS: u64 = SEV_FEAT_VIRTUAL_TOM 133 | | SEV_FEAT_REFLECT_VC 134 | | SEV_FEAT_ALTERNATE_INJ 135 | | SEV_FEAT_SECURE_TSC 136 | | SEV_FEAT_RESERVED_1 137 | | SEV_FEAT_VMSA_REG_PROTECTION 138 | | SEV_FEAT_RESERVED_2; 139 | 140 | #[repr(C, packed)] 141 | #[derive(Copy, Clone, Debug)] 142 | /// Virtual Machine Saving Area for world switches 143 | pub struct Vmsa { 144 | es: VmsaSegmentRegister, 145 | cs: VmsaSegmentRegister, 146 | ss: VmsaSegmentRegister, 147 | ds: VmsaSegmentRegister, 148 | fs: VmsaSegmentRegister, 149 | gs: VmsaSegmentRegister, 150 | gdtr: VmsaSegmentRegister, 151 | ldtr: VmsaSegmentRegister, 152 | idtr: VmsaSegmentRegister, 153 | tr: VmsaSegmentRegister, 154 | 155 | reserved1: [u8; 42], 156 | 157 | vmpl: u8, 158 | cpl: u8, 159 | 160 | reserved2: [u8; 4], 161 | 162 | efer: u64, 163 | 164 | reserved3: [u8; 104], 165 | 166 | xss: u64, 167 | cr4: u64, 168 | cr3: u64, 169 | cr0: u64, 170 | dr7: u64, 171 | dr6: u64, 172 | rflags: u64, 173 | rip: u64, 174 | 175 | reserved4: [u8; 88], 176 | 177 | rsp: u64, 178 | 179 | reserved5: [u8; 24], 180 | 181 | rax: u64, 182 | 183 | reserved6: [u8; 104], 184 | 185 | gpat: u64, 186 | 187 | reserved7: [u8; 152], 188 | 189 | rcx: u64, 190 | rdx: u64, 191 | rbx: u64, 192 | 193 | reserved8: [u8; 8], 194 | 195 | rbp: u64, 196 | rsi: u64, 197 | rdi: u64, 198 | r8: u64, 199 | r9: u64, 200 | r10: u64, 201 | r11: u64, 202 | r12: u64, 203 | r13: u64, 204 | r14: u64, 205 | r15: u64, 206 | 207 | reserved9: [u8; 48], 208 | 209 | sev_features: u64, 210 | 211 | reserved10: [u8; 8], 212 | 213 | guest_exitcode: u64, 214 | 215 | virtual_tom: u64, 216 | 217 | reserved11: [u8; 24], 218 | 219 | xcr0: u64, 220 | 221 | reserved12: [u8; 16], 222 | 223 | x87_dp: u64, 224 | mxcsr: u32, 225 | x87_ftw: u16, 226 | x87_fsw: u16, 227 | x87_fcw: u16, 228 | x87_fop: u16, 229 | x87_ds: u16, 230 | x87_cs: u16, 231 | x87_rip: u64, 232 | fpreg_x87: [u8; 80], 233 | fpreg_xmm: [u8; 256], 234 | fpreg_ymm: [u8; 256], 235 | 236 | reserved13: [u8; 2448], 237 | } 238 | 239 | macro_rules! vmsa_seg_fns { 240 | ($name: ident) => { 241 | paste! { 242 | pub fn [<$name _selector>](&self) -> u16 { 243 | self.$name.selector 244 | } 245 | pub fn [](&mut self, value: u16) { 246 | self.$name.selector = value; 247 | } 248 | pub fn [<$name _rtype>](&self) -> u16 { 249 | self.$name.rtype 250 | } 251 | pub fn [](&mut self, value: u16) { 252 | self.$name.rtype = value; 253 | } 254 | pub fn [<$name _limit>](&self) -> u32 { 255 | self.$name.limit 256 | } 257 | pub fn [](&mut self, value: u32) { 258 | self.$name.limit = value; 259 | } 260 | pub fn [<$name _base>](&self) -> u64 { 261 | self.$name.base 262 | } 263 | pub fn [](&mut self, value: u64) { 264 | self.$name.base = value; 265 | } 266 | } 267 | }; 268 | } 269 | 270 | impl Vmsa { 271 | vmsa_seg_fns!(cs); 272 | vmsa_seg_fns!(ds); 273 | vmsa_seg_fns!(es); 274 | vmsa_seg_fns!(fs); 275 | vmsa_seg_fns!(gs); 276 | vmsa_seg_fns!(ss); 277 | 278 | vmsa_seg_fns!(gdtr); 279 | vmsa_seg_fns!(idtr); 280 | vmsa_seg_fns!(ldtr); 281 | vmsa_seg_fns!(tr); 282 | 283 | funcs!(cr0, u64); 284 | funcs!(cr3, u64); 285 | funcs!(cr4, u64); 286 | funcs!(dr6, u64); 287 | funcs!(dr7, u64); 288 | funcs!(efer, u64); 289 | funcs!(gpat, u64); 290 | funcs!(mxcsr, u32); 291 | funcs!(vmpl, u8); 292 | funcs!(rax, u64); 293 | funcs!(rbx, u64); 294 | funcs!(rcx, u64); 295 | funcs!(rdx, u64); 296 | funcs!(rsi, u64); 297 | funcs!(rdi, u64); 298 | funcs!(r8, u64); 299 | funcs!(r9, u64); 300 | funcs!(r10, u64); 301 | funcs!(r11, u64); 302 | funcs!(r12, u64); 303 | funcs!(r13, u64); 304 | funcs!(r14, u64); 305 | funcs!(r15, u64); 306 | funcs!(rip, u64); 307 | funcs!(rflags, u64); 308 | funcs!(sev_features, u64); 309 | funcs!(guest_exitcode, u64); 310 | funcs!(xcr0, u64); 311 | funcs!(xss, u64); 312 | funcs!(x87_fcw, u16); 313 | funcs!(x87_ftw, u16); 314 | 315 | pub fn efer_offset(&self) -> u64 { 316 | offset_of!(Vmsa, efer) as u64 317 | } 318 | } 319 | 320 | #[inline] 321 | #[allow(dead_code)] 322 | fn vmsa_size_check() { 323 | STATIC_ASSERT!(size_of::() == PAGE_SIZE as usize); 324 | } 325 | 326 | unsafe fn update_vmsa_efer_svme(va: VirtAddr, svme: bool) -> bool { 327 | flush(va); 328 | BARRIER!(); 329 | 330 | let vmsa: *mut Vmsa = va.as_mut_ptr(); 331 | let efer_va: u64 = va.as_u64() + (*vmsa).efer_offset(); 332 | 333 | let cur_efer: u64 = (*vmsa).efer(); 334 | let new_efer: u64 = match svme { 335 | true => cur_efer | EFER_SVME, 336 | false => cur_efer & !EFER_SVME, 337 | }; 338 | 339 | let xchg_efer: u64 = cmpxchg(cur_efer, new_efer, efer_va); 340 | BARRIER!(); 341 | 342 | // If the cmpxchg() succeeds, xchg_efer will have the cur_efer value, 343 | // otherwise, it will have the new_efer value. 344 | xchg_efer == cur_efer 345 | } 346 | 347 | pub fn vmsa_clear_efer_svme(va: VirtAddr) -> bool { 348 | unsafe { update_vmsa_efer_svme(va, false) } 349 | } 350 | 351 | pub fn vmsa_set_efer_svme(va: VirtAddr) -> bool { 352 | unsafe { update_vmsa_efer_svme(va, true) } 353 | } 354 | -------------------------------------------------------------------------------- /src/globals.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::BIT; 10 | use x86_64::addr::VirtAddr; 11 | 12 | // GHCB standard termination constants 13 | /// 0 14 | pub const GHCB_REASON_CODE_SET: u64 = 0; 15 | /// 0 16 | pub const GHCB_TERM_GENERAL: u64 = 0; 17 | /// 1 18 | pub const GHCB_TERM_UNSUPPORTED_PROTOCOL: u64 = 1; 19 | /// 2 20 | pub const GHCB_TERM_FEATURE_SUPPORT: u64 = 2; 21 | 22 | // SVSM termination constants 23 | /// 15 24 | pub const SVSM_REASON_CODE_SET: u64 = 15; 25 | /// 0 26 | pub const SVSM_TERM_GENERAL: u64 = 0; 27 | /// 1 28 | pub const SVSM_TERM_ENOMEM: u64 = 1; 29 | /// 2 30 | pub const SVSM_TERM_UNHANDLED_VC: u64 = 2; 31 | /// 3 32 | pub const SVSM_TERM_PSC_ERROR: u64 = 3; 33 | /// 4 34 | pub const SVSM_TERM_SET_PAGE_ERROR: u64 = 4; 35 | /// 5 36 | pub const SVSM_TERM_NO_GHCB: u64 = 5; 37 | /// 6 38 | pub const SVSM_TERM_GHCB_RESP_INVALID: u64 = 6; 39 | /// 7 40 | pub const SVSM_TERM_FW_CFG_ERROR: u64 = 7; 41 | /// 8 42 | pub const SVSM_TERM_BIOS_FORMAT: u64 = 8; 43 | /// 9 44 | pub const SVSM_TERM_NOT_VMPL0: u64 = 9; 45 | /// 10 46 | pub const SVSM_TERM_VMPL0_SEV_FEATURES: u64 = 10; 47 | /// 11 48 | pub const SVSM_TERM_INCORRECT_VMPL: u64 = 11; 49 | /// 12 50 | pub const SVSM_TERM_VMPL1_SEV_FEATURES: u64 = 12; 51 | 52 | /// 12 53 | pub const PAGE_SHIFT: u64 = 12; 54 | /// BIT 12 55 | pub const PAGE_SIZE: u64 = BIT!(PAGE_SHIFT); 56 | /// Page Mask (the opposite of page size minus 1) 57 | pub const PAGE_MASK: u64 = !(PAGE_SIZE - 1); 58 | 59 | /// 21 60 | pub const PAGE_2MB_SHIFT: u64 = 21; 61 | /// Bit 21 62 | pub const PAGE_2MB_SIZE: u64 = BIT!(PAGE_2MB_SHIFT); 63 | /// Page Mask for 2MB (the opposite of 2MB page size minus 1) 64 | pub const PAGE_2MB_MASK: u64 = !(PAGE_2MB_SIZE - 1); 65 | 66 | // CPUID 67 | /// 0x0 68 | pub const CPUID_VENDOR_INFO: u32 = 0x00000000; 69 | /// 0xb 70 | pub const CPUID_EXTENDED_TOPO: u32 = 0x0000000b; 71 | /// 0xd 72 | pub const CPUID_EXTENDED_STATE: u32 = 0x0000000d; 73 | 74 | // MSRs 75 | /// 0xc0000101 76 | pub const MSR_GS_BASE: u32 = 0xc0000101; 77 | /// 0xc0010130 78 | pub const MSR_GHCB: u32 = 0xc0010130; 79 | /// 0xc0010131 80 | pub const MSR_SEV_STATUS: u32 = 0xc0010131; 81 | 82 | // PVALIDATE and RMPADJUST related 83 | /// 0 84 | pub const RMP_4K: u32 = 0; 85 | /// 1 86 | pub const RMP_2M: u32 = 1; 87 | 88 | /// Bit 8 89 | pub const VMPL_R: u64 = BIT!(8); 90 | /// Bit 9 91 | pub const VMPL_W: u64 = BIT!(9); 92 | /// Bit 10 93 | pub const VMPL_X_USER: u64 = BIT!(10); 94 | /// Bit 11 95 | pub const VMPL_X_SUPER: u64 = BIT!(11); 96 | /// Bit 16 97 | pub const VMSA_PAGE: u64 = BIT!(16); 98 | 99 | /// VMPL_R | VMPL_W | VMPL_X_USER | VMPL_X_SUPER 100 | pub const VMPL_RWX: u64 = VMPL_R | VMPL_W | VMPL_X_USER | VMPL_X_SUPER; 101 | /// VMPL_R | VMSA_PAGE 102 | pub const VMPL_VMSA: u64 = VMPL_R | VMSA_PAGE; 103 | 104 | #[derive(Copy, Clone, Debug)] 105 | /// Vmpl levels 106 | pub enum VMPL { 107 | Vmpl0, 108 | Vmpl1, 109 | Vmpl2, 110 | Vmpl3, 111 | 112 | VmplMax, 113 | } 114 | 115 | /// 8 116 | pub const CAA_MAP_SIZE: u64 = 8; 117 | 118 | /// PAGE_SIZE 119 | pub const VMSA_MAP_SIZE: u64 = PAGE_SIZE; 120 | 121 | // 122 | // External symbol support: 123 | // To better control the expected type of value in the external symbol, 124 | // create getter and, optionally, setter functions for accessing the 125 | // sysmbols. 126 | // 127 | macro_rules! extern_symbol_u64_ro { 128 | ($name: ident, $T: ty) => { 129 | paste::paste! { 130 | extern "C" { 131 | static $name: $T; 132 | } 133 | pub fn []() -> u64 { 134 | unsafe { 135 | $name as u64 136 | } 137 | } 138 | } 139 | }; 140 | } 141 | 142 | macro_rules! extern_symbol_virtaddr_ro { 143 | ($name: ident, $T: ty) => { 144 | paste::paste! { 145 | extern "C" { 146 | static $name: $T; 147 | } 148 | pub fn []() -> VirtAddr { 149 | unsafe { 150 | VirtAddr::new($name as u64) 151 | } 152 | } 153 | } 154 | }; 155 | } 156 | 157 | macro_rules! extern_symbol_u64_rw { 158 | ($name: ident, $T1: ty) => { 159 | paste::paste! { 160 | extern "C" { 161 | static mut $name: $T1; 162 | } 163 | pub fn []() -> u64 { 164 | unsafe { 165 | $name as u64 166 | } 167 | } 168 | pub fn [](value: u64) { 169 | unsafe { 170 | $name = value; 171 | } 172 | } 173 | } 174 | }; 175 | } 176 | 177 | extern_symbol_u64_ro!(sev_encryption_mask, u64); 178 | extern_symbol_virtaddr_ro!(svsm_begin, u64); 179 | extern_symbol_virtaddr_ro!(svsm_end, u64); 180 | extern_symbol_virtaddr_ro!(svsm_sbss, u64); 181 | extern_symbol_virtaddr_ro!(svsm_ebss, u64); 182 | extern_symbol_virtaddr_ro!(svsm_sdata, u64); 183 | extern_symbol_virtaddr_ro!(svsm_edata, u64); 184 | extern_symbol_virtaddr_ro!(svsm_secrets_page, u64); 185 | extern_symbol_virtaddr_ro!(svsm_cpuid_page, u64); 186 | extern_symbol_u64_ro!(svsm_cpuid_page_size, u64); 187 | extern_symbol_virtaddr_ro!(bios_vmsa_page, u64); 188 | extern_symbol_virtaddr_ro!(guard_page, u64); 189 | extern_symbol_virtaddr_ro!(early_ghcb, u64); 190 | extern_symbol_virtaddr_ro!(early_tss, u64); 191 | extern_symbol_u64_ro!(gdt64_tss, u64); 192 | extern_symbol_u64_ro!(gdt64_kernel_cs, u64); 193 | extern_symbol_virtaddr_ro!(dyn_mem_begin, u64); 194 | extern_symbol_virtaddr_ro!(dyn_mem_end, u64); 195 | extern_symbol_u64_rw!(hl_main, u64); 196 | extern_symbol_u64_rw!(cpu_mode, u64); 197 | extern_symbol_u64_rw!(cpu_stack, u64); 198 | extern_symbol_u64_ro!(cpu_start, u64); 199 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | #![feature(type_ascription)] 10 | #![feature(abi_x86_interrupt)] 11 | // Disable the (implicitly-linked) standard library. #! defines behavior of the current module; as 12 | // we are in root, the entire crate is affected. 13 | #![no_std] 14 | // We cannot use the Rust runtime, Hence we don't have the C start point (C run time 0 aka crt0). 15 | // Tell the compiler we don't want to use the normal entry chain. Nobody calls main(), since we 16 | // overwrite _start(). 17 | #![cfg_attr(not(test), no_main)] 18 | 19 | /// Initialize BIOS for the guest 20 | pub mod bios; 21 | /// Prepare and start SMP 22 | pub mod cpu; 23 | /// Global constants 24 | pub mod globals; 25 | /// Prepare page table, handle memory (de)allocations 26 | pub mod mem; 27 | /// Implementation of SVSM protocols and calls 28 | pub mod protocols; 29 | /// Handle requests from the SVSM guest 30 | pub mod svsm_request; 31 | /// Auxiliary functions and macros 32 | pub mod util; 33 | /// Handle the list of VMSA pages 34 | pub mod vmsa_list; 35 | /// Wrappers for external dependencies 36 | pub mod wrapper; 37 | 38 | extern crate alloc; 39 | 40 | use crate::bios::start_bios; 41 | use crate::cpu::rmpadjust; 42 | use crate::cpu::*; 43 | use crate::globals::*; 44 | use crate::mem::*; 45 | use crate::svsm_request::svsm_request_loop; 46 | use crate::util::*; 47 | use crate::vmsa::*; 48 | 49 | #[cfg(not(test))] 50 | use core::panic::PanicInfo; 51 | 52 | #[cfg(not(test))] 53 | #[panic_handler] 54 | fn panic(panic_info: &PanicInfo) -> ! { 55 | prints!("PANIC!\n{}\nPANIC!\n", panic_info); 56 | loop {} 57 | } 58 | 59 | /// Use the RMPADJUST instruction to determine if the SVSM is executing at VMPL0 60 | fn check_vmpl_level() { 61 | // Use the RMPADJUST instruction to determine if the SVSM is executing 62 | // at VMPL0. The RMPADJUST instruction can only update the attributes 63 | // of a lower VMPL-level (e.g.: VMPL0 can change VMPL1, VMPL2 or VMPL3). 64 | // By attempting to change the VMPL1 attributes of a page, it can be 65 | // determined if the SVSM is executing at VMPL0. 66 | // 67 | // Attempt to clear the VMPL1 attributes of the early GHCB page. 68 | 69 | let ret: u32 = rmpadjust(get_svsm_begin().as_u64(), RMP_4K, VMPL::Vmpl1 as u64); 70 | if ret != 0 { 71 | vc_terminate(SVSM_REASON_CODE_SET, SVSM_TERM_NOT_VMPL0); 72 | } 73 | } 74 | 75 | /// Check addresses are appropriately aligned and within boundaries 76 | fn check_svsm_address() { 77 | let total_size: u64 = get_svsm_end().as_u64() - get_svsm_begin().as_u64(); 78 | if !PAGE_2MB_ALIGNED!(get_svsm_begin().as_u64()) || !PAGE_2MB_ALIGNED!(total_size) { 79 | vc_terminate_svsm_general(); 80 | } 81 | // svsm_end is SVSM_GVA + SVSM_MEM. dyn_mem_begin is calculated based on 82 | // edata, so make sure it is within boundaries 83 | if get_svsm_end() < get_dyn_mem_begin() { 84 | vc_terminate_svsm_general(); 85 | } 86 | } 87 | 88 | /// Check SVSM is running with adequate SEV features 89 | fn check_vmpl0_features() { 90 | let features: u64 = rdmsr(MSR_SEV_STATUS) >> 2; 91 | 92 | if features & VMPL0_REQUIRED_SEV_FEATS != VMPL0_REQUIRED_SEV_FEATS { 93 | vc_terminate_vmpl0_sev_features(); 94 | } 95 | 96 | if features & VMPL0_UNSUPPORTED_SEV_FEATS != 0 { 97 | vc_terminate_vmpl0_sev_features(); 98 | } 99 | } 100 | 101 | /// Perform initial checkings to ensure adequate execution. 102 | /// This means checking SVSM runs on VMPL0, with proper addresses 103 | /// and sizes, and proper SEV features activate 104 | fn initial_checks() { 105 | // Ensure execution at VMPL0 106 | check_vmpl_level(); 107 | 108 | // Ensure we are running with proper SEV features 109 | check_vmpl0_features(); 110 | 111 | // Ensure SVSM addresses and sizes are appropiate 112 | check_svsm_address(); 113 | } 114 | 115 | /// Main function. Initialize everything and start request loop. 116 | /// This function never returns. 117 | #[no_mangle] 118 | pub extern "C" fn svsm_main() -> ! { 119 | // Ensure valid SVSM execution environment 120 | initial_checks(); 121 | 122 | // Initialize exception/interrupt handling 123 | idt_init(); 124 | 125 | // Prepare VC handler 126 | vc_init(); 127 | 128 | mem_init(); 129 | 130 | // Create 4-level page table and load it 131 | pgtable_init(); 132 | 133 | // Allocate per-CPU data (pointed to by GS register) 134 | percpu_init(); 135 | 136 | // Set up the TSS 137 | tss_init(); 138 | 139 | ghcb_init(); 140 | 141 | serial_init(); 142 | 143 | fwcfg_init(); 144 | 145 | // Initialize and start APs 146 | smp_init(); 147 | 148 | // Load BIOS 149 | start_bios(); 150 | 151 | // Start taking requests from guest in this vCPU 152 | svsm_request_loop(); 153 | 154 | // We should never reach this point 155 | loop { 156 | halt() 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /src/mem/ca.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::*; 10 | 11 | #[repr(C, packed)] 12 | pub struct Ca { 13 | call_pending: u8, 14 | mem_available: u8, 15 | reserved1: [u8; 6], 16 | } 17 | 18 | impl Ca { 19 | funcs!(call_pending, u8); 20 | funcs!(mem_available, u8); 21 | } 22 | -------------------------------------------------------------------------------- /src/mem/fwcfg.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::cpu::pause; 10 | use crate::cpu::vc::vc_terminate_svsm_fwcfg; 11 | use crate::cpu::*; 12 | use crate::globals::*; 13 | use crate::mem::mem_allocate_frame; 14 | use crate::mem::pgtable::*; 15 | use crate::util::locking::{LockGuard, SpinLock}; 16 | use crate::util::util::memset; 17 | use crate::*; 18 | 19 | use alloc::vec::Vec; 20 | use core::mem::size_of; 21 | use core::ptr::copy_nonoverlapping; 22 | use lazy_static::lazy_static; 23 | use memchr::memchr; 24 | use x86_64::addr::{PhysAddr, VirtAddr}; 25 | use x86_64::structures::paging::PhysFrame; 26 | 27 | /// 0x510 28 | const FW_CFG_SELECTOR: u16 = 0x510; 29 | /// 0x511 30 | const FW_CFG_DATA: u16 = 0x511; 31 | /// 0x514 32 | const FW_CFG_DMA_HI: u16 = 0x514; 33 | /// 0x518 34 | const FW_CFG_DMA_LO: u16 = 0x518; 35 | 36 | /// 0x0000 37 | const FW_CFG_SIGNATURE: u16 = 0x0000; 38 | /// 0x554d4551 39 | const FW_SIGNATURE: u32 = 0x554d4551; 40 | 41 | /// 0x0001 42 | const FW_CFG_ID: u16 = 0x0001; 43 | //const FW_FEATURE_TRADITIONAL: u32 = BIT!(0); 44 | /// Bit 1 45 | const FW_FEATURE_DMA: u32 = BIT!(1); 46 | 47 | /// 0x0019 48 | const FW_CFG_FILE_DIR: u16 = 0x0019; 49 | 50 | /// Bit 0 51 | const FW_CFG_DMA_ERROR: u32 = BIT!(0); 52 | /// Bit 1 53 | const FW_CFG_DMA_READ: u32 = BIT!(1); 54 | //const FW_CFG_DMA_SKIP: u32 = BIT!(2); 55 | /// Bit 3 56 | const FW_CFG_DMA_SELECT: u32 = BIT!(3); 57 | //const FW_CFG_DMA_WRITE: u32 = BIT!(4); 58 | const FW_CFG_DMA_CLEAR_SELECTOR: u32 = !((0xffff << 16) | FW_CFG_DMA_SELECT); 59 | 60 | /// etc/bios_gpa 61 | const FW_CFG_BIOS_GPA: &str = "etc/bios_gpa"; 62 | /// etc/bios_size 63 | const FW_CFG_BIOS_SIZE: &str = "etc/bios_size"; 64 | 65 | #[repr(C)] 66 | #[derive(Copy, Clone, Debug)] 67 | struct FwCfgDmaDesc { 68 | control: u32, 69 | length: u32, 70 | address: u64, 71 | } 72 | 73 | /// PAGE_SIZE minus size of FwCfgDmaDesc 74 | const DMA_DATA_SIZE: usize = PAGE_SIZE as usize - size_of::(); 75 | 76 | #[repr(C)] 77 | #[derive(Copy, Clone, Debug)] 78 | struct FwCfgDma { 79 | desc: FwCfgDmaDesc, 80 | data: [u8; DMA_DATA_SIZE], 81 | } 82 | 83 | #[allow(dead_code)] 84 | impl FwCfgDma { 85 | funcs!(desc, FwCfgDmaDesc); 86 | funcs!(data, [u8; DMA_DATA_SIZE]); 87 | } 88 | 89 | #[repr(C)] 90 | #[derive(Copy, Clone, Debug)] 91 | struct FwCfgFile { 92 | size: u32, 93 | select: u16, 94 | reserved: u16, 95 | name: [u8; 56], 96 | } 97 | 98 | #[allow(dead_code)] 99 | impl FwCfgFile { 100 | pub const fn new() -> Self { 101 | FwCfgFile { 102 | size: 0, 103 | select: 0, 104 | reserved: 0, 105 | name: [0; 56], 106 | } 107 | } 108 | funcs!(size, u32); 109 | funcs!(select, u16); 110 | funcs!(name, [u8; 56]); 111 | } 112 | 113 | static mut FILE_COUNT: usize = 0; 114 | 115 | lazy_static! { 116 | static ref FW_CFG_DMA: SpinLock<&'static mut FwCfgDma> = { 117 | let frame: PhysFrame = match mem_allocate_frame() { 118 | Some(f) => f, 119 | None => vc_terminate_svsm_enomem(), 120 | }; 121 | let va: VirtAddr = pgtable_pa_to_va(frame.start_address()); 122 | 123 | pgtable_make_pages_shared(va, PAGE_SIZE); 124 | memset(va.as_mut_ptr(), 0, PAGE_SIZE as usize); 125 | 126 | let dma: &mut FwCfgDma; 127 | unsafe { 128 | dma = &mut *va.as_mut_ptr() as &mut FwCfgDma; 129 | } 130 | 131 | SpinLock::new(dma) 132 | }; 133 | static ref FW_CFG_FILES: SpinLock> = { 134 | let mut files: Vec; 135 | 136 | unsafe { 137 | files = Vec::with_capacity(FILE_COUNT); 138 | for _i in 0..FILE_COUNT { 139 | let f: FwCfgFile = FwCfgFile::new(); 140 | files.push(f); 141 | } 142 | } 143 | 144 | SpinLock::new(files) 145 | }; 146 | } 147 | 148 | fn read32_data_be() -> u32 { 149 | let mut value: u32 = (vc_inb(FW_CFG_DATA) as u32) << 24; 150 | 151 | value |= (vc_inb(FW_CFG_DATA) as u32) << 16; 152 | value |= (vc_inb(FW_CFG_DATA) as u32) << 8; 153 | value |= vc_inb(FW_CFG_DATA) as u32; 154 | 155 | value 156 | } 157 | 158 | fn read32_data_le() -> u32 { 159 | let mut value: u32 = vc_inb(FW_CFG_DATA) as u32; 160 | 161 | value |= (vc_inb(FW_CFG_DATA) as u32) << 8; 162 | value |= (vc_inb(FW_CFG_DATA) as u32) << 16; 163 | value |= (vc_inb(FW_CFG_DATA) as u32) << 24; 164 | 165 | value 166 | } 167 | 168 | fn read64_data_le() -> u64 { 169 | let mut value: u64 = vc_inb(FW_CFG_DATA) as u64; 170 | 171 | value |= (vc_inb(FW_CFG_DATA) as u64) << 8; 172 | value |= (vc_inb(FW_CFG_DATA) as u64) << 16; 173 | value |= (vc_inb(FW_CFG_DATA) as u64) << 24; 174 | value |= (vc_inb(FW_CFG_DATA) as u64) << 32; 175 | value |= (vc_inb(FW_CFG_DATA) as u64) << 40; 176 | value |= (vc_inb(FW_CFG_DATA) as u64) << 48; 177 | value |= (vc_inb(FW_CFG_DATA) as u64) << 56; 178 | 179 | value 180 | } 181 | 182 | fn perform_dma(dma: &mut FwCfgDma, data: *const u8, control: u32, size: usize) { 183 | let dma_pa = pgtable_va_to_pa(VirtAddr::new(dma as *mut FwCfgDma as u64)); 184 | let dma_data_pa = pgtable_va_to_pa(VirtAddr::new(&dma.data as *const u8 as u64)); 185 | 186 | assert!(size <= DMA_DATA_SIZE); 187 | 188 | dma.desc.control = u32::swap_bytes(control); 189 | dma.desc.length = u32::swap_bytes(size as u32); 190 | dma.desc.address = u64::swap_bytes(dma_data_pa.as_u64()); 191 | 192 | let lo: u32 = LOWER_32BITS!(dma_pa.as_u64()) as u32; 193 | let hi: u32 = UPPER_32BITS!(dma_pa.as_u64()) as u32; 194 | vc_outl(FW_CFG_DMA_HI, u32::swap_bytes(hi)); 195 | vc_outl(FW_CFG_DMA_LO, u32::swap_bytes(lo)); 196 | 197 | let mut c: u32; 198 | loop { 199 | c = u32::swap_bytes(dma.desc.control); 200 | if (c & !FW_CFG_DMA_ERROR) == 0 { 201 | break; 202 | } 203 | pause(); 204 | } 205 | 206 | if (c & FW_CFG_DMA_ERROR) != 0 { 207 | vc_terminate_svsm_fwcfg(); 208 | } 209 | 210 | unsafe { 211 | let p: *mut u8 = data as *mut u8; 212 | copy_nonoverlapping(&dma.data as *const u8, p, size); 213 | } 214 | } 215 | 216 | #[inline] 217 | fn select_cfg_item(item: u16) { 218 | vc_outw(FW_CFG_SELECTOR, item); 219 | } 220 | 221 | fn find_file_selector(fname: &str) -> Option { 222 | let files: LockGuard> = FW_CFG_FILES.lock(); 223 | 224 | for f in files.iter() { 225 | let nul: usize = match memchr(0, &f.name) { 226 | Some(n) => n, 227 | None => vc_terminate_svsm_fwcfg(), 228 | }; 229 | let n: &str = match core::str::from_utf8(&f.name[0..nul]) { 230 | Ok(n) => n, 231 | Err(_e) => vc_terminate_svsm_fwcfg(), 232 | }; 233 | 234 | if n.eq(fname) { 235 | return Some(f.select); 236 | } 237 | } 238 | 239 | return None; 240 | } 241 | 242 | /// Returns the GPA and size of the area in which the bios is loaded 243 | pub fn fwcfg_get_bios_area() -> Option<(PhysAddr, u64)> { 244 | let bios_pa: u64; 245 | let bios_size: u64; 246 | 247 | let selector: u16 = match find_file_selector(FW_CFG_BIOS_GPA) { 248 | Some(f) => f, 249 | None => return None, 250 | }; 251 | select_cfg_item(selector); 252 | bios_pa = read64_data_le(); 253 | 254 | let selector: u16 = match find_file_selector(FW_CFG_BIOS_SIZE) { 255 | Some(f) => f, 256 | None => return None, 257 | }; 258 | select_cfg_item(selector); 259 | bios_size = read64_data_le(); 260 | 261 | // Check for possible buffer overflow 262 | match bios_pa.checked_add(bios_size) { 263 | Some(_v) => (), 264 | None => return None, 265 | }; 266 | 267 | Some((PhysAddr::new(bios_pa), bios_size)) 268 | } 269 | 270 | /// Perform DMA to read firmware configuration files 271 | pub fn fwcfg_init() { 272 | STATIC_ASSERT!(size_of::() == PAGE_SIZE as usize); 273 | 274 | lazy_static::initialize(&FW_CFG_DMA); 275 | 276 | /* Validate the signature */ 277 | select_cfg_item(FW_CFG_SIGNATURE); 278 | let signature: u32 = read32_data_le(); 279 | if signature != FW_SIGNATURE { 280 | vc_terminate_svsm_fwcfg(); 281 | } 282 | 283 | /* Validate DMA support */ 284 | select_cfg_item(FW_CFG_ID); 285 | let features: u32 = read32_data_le(); 286 | if (features & FW_FEATURE_DMA) == 0 { 287 | vc_terminate_svsm_fwcfg(); 288 | } 289 | 290 | select_cfg_item(FW_CFG_FILE_DIR); 291 | let file_count: u32 = read32_data_be(); 292 | if file_count == 0 { 293 | vc_terminate_svsm_fwcfg(); 294 | } 295 | 296 | unsafe { 297 | FILE_COUNT = file_count as usize; 298 | } 299 | 300 | lazy_static::initialize(&FW_CFG_FILES); 301 | 302 | unsafe { 303 | let f: FwCfgFile = FwCfgFile { 304 | size: 0, 305 | select: 0, 306 | reserved: 0, 307 | name: [0; 56], 308 | }; 309 | 310 | let mut files: LockGuard> = FW_CFG_FILES.lock(); 311 | 312 | let size: usize = size_of::(); 313 | let mut control = FW_CFG_DMA_READ; 314 | let mut dma: LockGuard<&'static mut FwCfgDma> = FW_CFG_DMA.lock(); 315 | for i in 0..FILE_COUNT { 316 | let bytes: *const u8 = &f as *const FwCfgFile as *const u8; 317 | perform_dma(&mut dma, bytes, control, size); 318 | 319 | files[i].size = u32::swap_bytes(f.size); 320 | files[i].select = u16::swap_bytes(f.select); 321 | files[i].name = f.name; 322 | 323 | /* Stay on the same item */ 324 | control &= FW_CFG_DMA_CLEAR_SELECTOR; 325 | } 326 | 327 | prints!("> All {FILE_COUNT} firmware config files read.\n"); 328 | } 329 | } 330 | -------------------------------------------------------------------------------- /src/mem/ghcb.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::cpu::percpu::PERCPU; 10 | use crate::cpu::percpu_count; 11 | use crate::cpu::vc_register_ghcb; 12 | use crate::cpu::vc_terminate_svsm_enomem; 13 | use crate::globals::*; 14 | use crate::mem::mem_allocate_frame; 15 | use crate::mem::pgtable_make_pages_shared; 16 | use crate::mem::pgtable_pa_to_va; 17 | use crate::mem::pgtable_va_to_pa; 18 | use crate::util::util::memset; 19 | use crate::BIT; 20 | use crate::STATIC_ASSERT; 21 | 22 | use core::mem::size_of; 23 | use core::ptr::copy_nonoverlapping; 24 | use memoffset::offset_of; 25 | use paste::paste; 26 | use x86_64::structures::paging::PhysFrame; 27 | use x86_64::VirtAddr; 28 | 29 | /// 1 30 | pub const GHCB_VERSION_1: u16 = 1; 31 | /// 0 32 | pub const GHCB_USAGE: u32 = 0; 33 | 34 | /// 2032 35 | pub const SHARED_BUFFER_SIZE: usize = 2032; 36 | 37 | #[repr(C, packed)] 38 | #[derive(Copy, Clone, Debug)] 39 | pub struct Ghcb { 40 | reserved1: [u8; 203], 41 | cpl: u8, 42 | reserved2: [u8; 300], 43 | rax: u64, 44 | reserved3: [u8; 264], 45 | rcx: u64, 46 | rdx: u64, 47 | rbx: u64, 48 | reserved4: [u8; 112], 49 | sw_exit_code: u64, 50 | sw_exit_info_1: u64, 51 | sw_exit_info_2: u64, 52 | sw_scratch: u64, 53 | reserved5: [u8; 56], 54 | xcr0: u64, 55 | valid_bitmap: [u8; 16], 56 | reserved6: [u8; 1024], 57 | shared_buffer: [u8; SHARED_BUFFER_SIZE], 58 | reserved7: [u8; 10], 59 | version: u16, 60 | usage: u32, 61 | } 62 | 63 | macro_rules! ghcb_fns { 64 | ($name: ident) => { 65 | paste! { 66 | pub fn [<$name>](&self) -> u64 { 67 | self.$name 68 | } 69 | pub fn [](&mut self, value: u64) { 70 | self.$name = value; 71 | self.set_offset_valid(offset_of!(Ghcb, $name)); 72 | } 73 | pub fn [](&self) -> bool { 74 | self.is_offset_valid(offset_of!(Ghcb, $name)) 75 | } 76 | } 77 | }; 78 | } 79 | 80 | impl Ghcb { 81 | ghcb_fns!(rax); 82 | ghcb_fns!(rbx); 83 | ghcb_fns!(rcx); 84 | ghcb_fns!(rdx); 85 | ghcb_fns!(xcr0); 86 | ghcb_fns!(sw_exit_code); 87 | ghcb_fns!(sw_exit_info_1); 88 | ghcb_fns!(sw_exit_info_2); 89 | ghcb_fns!(sw_scratch); 90 | 91 | pub fn shared_buffer(&mut self, data: *mut u8, len: usize) { 92 | assert!(len <= SHARED_BUFFER_SIZE); 93 | 94 | unsafe { 95 | copy_nonoverlapping(&self.shared_buffer as *const u8, data, len); 96 | } 97 | } 98 | 99 | pub fn set_shared_buffer(&mut self, data: *const u8, len: usize) { 100 | assert!(len <= SHARED_BUFFER_SIZE); 101 | 102 | unsafe { 103 | copy_nonoverlapping(data, &mut self.shared_buffer as *mut u8, len); 104 | } 105 | 106 | let va: VirtAddr = VirtAddr::new_truncate(&self.shared_buffer as *const u8 as u64); 107 | self.set_sw_scratch(pgtable_va_to_pa(va).as_u64()); 108 | } 109 | 110 | pub fn version(&mut self) -> u16 { 111 | self.version 112 | } 113 | 114 | pub fn set_version(&mut self, version: u16) { 115 | self.version = version; 116 | } 117 | 118 | pub fn usage(&mut self) -> u32 { 119 | self.usage 120 | } 121 | 122 | pub fn set_usage(&mut self, usage: u32) { 123 | self.usage = usage; 124 | } 125 | 126 | pub fn clear(&mut self) { 127 | self.sw_exit_code = 0; 128 | self.valid_bitmap.iter_mut().for_each(|i| *i = 0); 129 | } 130 | 131 | fn set_offset_valid(&mut self, offset: usize) { 132 | let idx: usize = (offset / 8) / 8; 133 | let bit: usize = (offset / 8) % 8; 134 | 135 | self.valid_bitmap[idx] |= BIT!(bit); 136 | } 137 | 138 | fn is_offset_valid(&self, offset: usize) -> bool { 139 | let idx: usize = (offset / 8) / 8; 140 | let bit: usize = (offset / 8) % 8; 141 | 142 | (self.valid_bitmap[idx] & BIT!(bit)) != 0 143 | } 144 | } 145 | 146 | pub fn ghcb_init() { 147 | STATIC_ASSERT!(size_of::() == PAGE_SIZE as usize); 148 | 149 | // 150 | // Perform GHCB allocation in a loop to avoid allocation order failures 151 | // for large vCPU counts. 152 | // 153 | let count: usize = percpu_count(); 154 | for i in 0..count { 155 | let frame: PhysFrame = match mem_allocate_frame() { 156 | Some(f) => f, 157 | None => vc_terminate_svsm_enomem(), 158 | }; 159 | let va: VirtAddr = pgtable_pa_to_va(frame.start_address()); 160 | 161 | pgtable_make_pages_shared(va, PAGE_SIZE); 162 | memset(va.as_mut_ptr(), 0, PAGE_SIZE as usize); 163 | 164 | unsafe { 165 | PERCPU.set_ghcb_for(va, i); 166 | 167 | if i == 0 { 168 | // Register the BSPs GHCB 169 | vc_register_ghcb(frame.start_address()); 170 | } 171 | } 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /src/mem/map_guard.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2023 IBM Corporation 4 | * 5 | * Authors: Dov Murik 6 | */ 7 | 8 | use crate::getter_func; 9 | use crate::mem::pgtable::{ 10 | pgtable_map_pages_private, pgtable_map_pages_shared, pgtable_unmap_pages, 11 | }; 12 | 13 | use core::slice; 14 | use x86_64::structures::paging::mapper::MapToError; 15 | use x86_64::structures::paging::page::Size4KiB; 16 | use x86_64::{PhysAddr, VirtAddr}; 17 | 18 | /// An area mapped into virtual memory. If `unmap_on_drop` is true, the 19 | /// area is unmapped when the `MapGuard` is dropped (out of scope). 20 | /// 21 | /// # Examples 22 | /// 23 | /// ``` 24 | /// fn work() -> Result<()> { 25 | /// let map1 = MapGuard::new_private(gpa, size)?; 26 | /// // view the memory as a C struct 27 | /// let req: &MyRequestStruct = map1.as_object(); 28 | /// let map2 = MapGuard::new_private(gpa2, size)?; // <--- an error here will cause map1 to unmap 29 | /// // view the memory as a slice of bytes with the correct size (the entire mapped area) 30 | /// let buf: &[u8] = map2.as_bytes(); 31 | /// 32 | /// // read from the mapped memory 33 | /// if some_condition { 34 | /// return Err(...); // here both areas are unmapped 35 | /// } 36 | /// 37 | /// Ok(()) 38 | /// // here both areas are unmapped 39 | /// } 40 | /// ``` 41 | 42 | pub struct MapGuard { 43 | pa: PhysAddr, 44 | va: VirtAddr, 45 | len: u64, 46 | unmap_on_drop: bool, 47 | } 48 | 49 | impl MapGuard { 50 | /// Map an area to virtual memory as private (encrypted) pages; when 51 | /// the MapGuard is dropped, the area will be unmapped. 52 | pub fn new_private(pa: PhysAddr, len: u64) -> Result> { 53 | let va: VirtAddr = pgtable_map_pages_private(pa, len)?; 54 | Ok(Self { 55 | pa, 56 | va, 57 | len, 58 | unmap_on_drop: true, 59 | }) 60 | } 61 | 62 | /// Map an area to virtual memory as private (encrypted) pages but 63 | /// don't unmap it when the MapGuard is dropped. 64 | pub fn new_private_persistent(pa: PhysAddr, len: u64) -> Result> { 65 | let va: VirtAddr = pgtable_map_pages_private(pa, len)?; 66 | Ok(Self { 67 | pa, 68 | va, 69 | len, 70 | unmap_on_drop: false, 71 | }) 72 | } 73 | 74 | /// Map an area to virtual memory as shared (plaintext) pages; when 75 | /// the MapGuard is dropped, the area will be unmapped. 76 | pub fn new_shared(pa: PhysAddr, len: u64) -> Result> { 77 | let va: VirtAddr = pgtable_map_pages_shared(pa, len)?; 78 | Ok(Self { 79 | pa, 80 | va, 81 | len, 82 | unmap_on_drop: true, 83 | }) 84 | } 85 | 86 | pub fn as_bytes(&self) -> &[u8] { 87 | unsafe { slice::from_raw_parts(self.va.as_ptr(), self.len as usize) } 88 | } 89 | 90 | pub fn as_bytes_mut(&mut self) -> &mut [u8] { 91 | unsafe { slice::from_raw_parts_mut(self.va.as_mut_ptr(), self.len as usize) } 92 | } 93 | 94 | pub fn as_object(&self) -> &T { 95 | unsafe { &slice::from_raw_parts::(self.va.as_ptr() as *const _, 1)[0] } 96 | } 97 | 98 | pub fn as_object_mut(&mut self) -> &mut T { 99 | unsafe { &mut slice::from_raw_parts_mut::(self.va.as_mut_ptr() as *mut _, 1)[0] } 100 | } 101 | 102 | getter_func!(pa, PhysAddr); 103 | getter_func!(va, VirtAddr); 104 | getter_func!(len, u64); 105 | } 106 | 107 | impl Drop for MapGuard { 108 | fn drop(&mut self) { 109 | if self.unmap_on_drop { 110 | pgtable_unmap_pages(self.va, self.len); 111 | } 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /src/mem/mod.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | /// Dynamic memory allocation handling 10 | mod alloc; 11 | /// Calling area (for guest requests) 12 | pub mod ca; 13 | /// Firmware configuration 14 | pub mod fwcfg; 15 | /// Guest Host Communication Block support 16 | pub mod ghcb; 17 | /// MapGuard 18 | pub mod map_guard; 19 | /// Page Table and its related operations 20 | pub mod pgtable; 21 | 22 | pub use crate::mem::alloc::{ 23 | mem_allocate, mem_allocate_frame, mem_allocate_frames, mem_callocate, mem_create_stack, 24 | mem_free, mem_free_frame, mem_free_frames, mem_init, mem_reallocate, 25 | }; 26 | 27 | pub use crate::mem::pgtable::{ 28 | pgtable_init, pgtable_make_pages_np, pgtable_make_pages_nx, pgtable_make_pages_private, 29 | pgtable_make_pages_shared, pgtable_pa_to_va, pgtable_print_pte_pa, pgtable_print_pte_va, 30 | pgtable_va_to_pa, 31 | }; 32 | 33 | pub use crate::mem::map_guard::MapGuard; 34 | 35 | pub use crate::mem::ghcb::ghcb_init; 36 | 37 | pub use crate::mem::fwcfg::{fwcfg_get_bios_area, fwcfg_init}; 38 | -------------------------------------------------------------------------------- /src/protocols/error_codes.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | /// 0 10 | pub const SVSM_SUCCESS: u64 = 0; 11 | //pub const SVSM_ERR_INCOMPLETE: u64 = 0x80000000; 12 | /// 0x80000001 13 | pub const SVSM_ERR_UNSUPPORTED_PROTOCOL: u64 = 0x80000001; 14 | /// 0x80000002 15 | pub const SVSM_ERR_UNSUPPORTED_CALLID: u64 = 0x80000002; 16 | /// 0x80000003 17 | pub const SVSM_ERR_INVALID_ADDRESS: u64 = 0x80000003; 18 | //pub const SVSM_ERR_INVALID_FORMAT: u64 = 0x80000004; 19 | /// 0x80000005 20 | pub const SVSM_ERR_INVALID_PARAMETER: u64 = 0x80000005; 21 | /// 0x80000006 22 | pub const SVSM_ERR_INVALID_REQUEST: u64 = 0x80000006; 23 | 24 | /// 0x80001000 25 | pub const SVSM_ERR_PROTOCOL_BASE: u64 = 0x80001000; 26 | /// 0x80001003 27 | pub const SVSM_ERR_PROTOCOL_FAIL_INUSE: u64 = 0x80001003; 28 | -------------------------------------------------------------------------------- /src/protocols/mod.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2023 IBM Corporation 4 | * 5 | * Authors: Dov Murik 6 | */ 7 | 8 | /// Implementation of the core protocol (0) 9 | pub mod core; 10 | /// Error codes returned from the SVSM calls 11 | pub mod error_codes; 12 | 13 | pub use crate::protocols::core::*; 14 | 15 | /// 0 16 | pub const SVSM_CORE_PROTOCOL: u32 = 0; 17 | -------------------------------------------------------------------------------- /src/start/start.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Author: Tom Lendacky 6 | */ 7 | 8 | #include "svsm.h" 9 | 10 | #define CBIT(x) (BIT(51) + x) 11 | #define GPA(x) (x - SVSM_GVA_OFFSET_ASM) 12 | 13 | #define SVSM_PGD_ENTRY(x) (CBIT(x) + 0x03) 14 | #define SVSM_P4D_ENTRY(x) (CBIT(x) + 0x03) 15 | #define SVSM_PUD_ENTRY(x) (CBIT(x) + 0x03) 16 | #define SVSM_PMD_ENTRY(x) (CBIT(x) + 0x83) 17 | #define SVSM_PTE_ENTRY(x) (CBIT(x) + 0x03) 18 | 19 | .code64 20 | GLOBAL(code_64) 21 | cli 22 | 23 | xorq %rax, %rax 24 | movq %rax, %ds 25 | movq %rax, %es 26 | movq %rax, %fs 27 | movq %rax, %ss 28 | 29 | /* Setup a stack */ 30 | movq cpu_stack(%rip), %rsp 31 | 32 | /* 33 | * Jump to main high-level language code now for APs 34 | */ 35 | cmpl $0, cpu_mode(%rip) 36 | jne hl 37 | 38 | /* Load RBX with the virtual address offset for use throughout boot */ 39 | movq $SVSM_GVA_OFFSET_ASM, %rbx 40 | 41 | /* GS is set for APs, only clear it after the AP check */ 42 | movq %rax, %gs 43 | 44 | /* 45 | * SEV mitigation test to verify encryption bit position: 46 | * Use the CMP instruction, with RIP-relative addressing, to compare 47 | * the first four bytes of the CMP instruction itself (which will be 48 | * read decrypted if the encryption bit is in the proper location) 49 | * against the immediate value within the instruction itself 50 | * (instruction fetches are always decrypted by hardware). 51 | */ 52 | movq $0x1f100, %rsi 53 | insn: 54 | cmpl $0xfff63d81, insn(%rip) 55 | jne terminate_64 56 | 57 | /* Validate that the build load address matches the actual load address */ 58 | movq $0x2f100, %rsi 59 | leaq code_64(%rip), %rax 60 | subq %rbx, %rax 61 | movq $SVSM_GPA_ASM, %rcx 62 | cmpq %rax, %rcx 63 | jne terminate_64 64 | 65 | /* 66 | * Make the early GHCB shared: 67 | * - Since there is only one PGD/P4D/PUD entry, operate on just 68 | * the PMD entry that holds the early GHCB. 69 | */ 70 | leaq ghcb(%rip), %rsi 71 | movq %rsi, early_ghcb(%rip) 72 | 73 | /* 74 | * Rescind the page validation from LAUNCH. 75 | */ 76 | movq %rsi, %rax 77 | movq $0, %rcx 78 | movq $0, %rdx 79 | .byte 0xf2,0x0f,0x01,0xff /* pvalidate */ 80 | jc terminate_64 81 | 82 | /* 83 | * Issue the Page State Change to make shared in the RMP. 84 | */ 85 | psc: 86 | movq $0xc0010130, %rcx 87 | rdmsr 88 | pushq %rax 89 | pushq %rdx 90 | 91 | subq %rbx, %rsi 92 | movq $2, %rax 93 | shlq $52, %rax 94 | addq $0x14, %rax 95 | addq %rsi, %rax 96 | movq %rax, %rdx 97 | shrq $32, %rdx 98 | wrmsr 99 | rep; vmmcall 100 | rdmsr 101 | cmpq $0x15, %rax 102 | jne terminate_64 103 | cmpq $0, %rdx 104 | jne terminate_64 105 | 106 | popq %rdx 107 | popq %rax 108 | wrmsr 109 | 110 | /* 111 | * Build the PTE entries. Use the address of the early GHCB to 112 | * obtain the start and end of the 2MB page in which it lives. 113 | */ 114 | leaq ghcb(%rip), %rsi 115 | subq %rbx, %rsi 116 | andq $PAGE_2MB_MASK, %rsi 117 | addq $PAGE_2MB_SIZE, %rsi 118 | movq $SVSM_PTE_ENTRY(0), %rax 119 | addq %rax, %rsi 120 | 121 | leaq pte(%rip), %rax 122 | addq $PAGE_SIZE, %rax 123 | 124 | movq $PAGE_TABLE_ENTRY_COUNT, %rcx 125 | set_pte: 126 | subq $PAGE_SIZE, %rsi 127 | subq $PAGE_TABLE_ENTRY_SIZE, %rax 128 | movq %rsi, (%rax) 129 | loop set_pte 130 | 131 | /* Make GHCB page shared */ 132 | leaq ghcb(%rip), %rsi 133 | movq %rsi, %rax 134 | shrq $PAGE_SHIFT, %rax 135 | andq $PAGE_TABLE_INDEX_MASK, %rax 136 | shlq $3, %rax 137 | leaq pte(%rip), %rcx 138 | addq %rcx, %rax 139 | subq %rbx, %rsi 140 | addq $0x03, %rsi 141 | movq %rsi, (%rax) 142 | 143 | /* Replace the huge PMD entry with the new PTE */ 144 | leaq ghcb(%rip), %rsi 145 | movq %rsi, %rax 146 | shrq $PAGE_2MB_SHIFT, %rax 147 | andq $PAGE_TABLE_INDEX_MASK, %rax 148 | shlq $3, %rax 149 | leaq pmd(%rip), %rcx 150 | addq %rcx, %rax 151 | 152 | leaq pte(%rip), %rdx 153 | subq %rbx, %rdx 154 | movq $SVSM_PTE_ENTRY(0), %rcx 155 | addq %rcx, %rdx 156 | movq %rdx, (%rax) 157 | 158 | /* Flush the TLB - no globals, so CR3 update is enough */ 159 | mov %cr3, %rax 160 | mov %rax, %cr3 161 | 162 | /* Zero out the early GHCB */ 163 | cld 164 | leaq ghcb(%rip), %rdi 165 | movq $PAGE_SIZE, %rcx 166 | xorq %rax, %rax 167 | rep stosb 168 | 169 | /* Zero out the BSS memory */ 170 | cld 171 | leaq sbss(%rip), %rdi 172 | leaq ebss(%rip), %rcx 173 | subq %rdi, %rcx 174 | xorq %rax, %rax 175 | rep stosb 176 | 177 | /* Save the start and end of the SVSM and dynamic memory */ 178 | movq $SVSM_GVA_ASM, %rax 179 | movq %rax, svsm_begin(%rip) 180 | addq $SVSM_MEM_ASM, %rax 181 | movq %rax, svsm_end(%rip) 182 | 183 | movq %rax, dyn_mem_end(%rip) 184 | leaq SVSM_DYN_MEM_BEGIN(%rip), %rax 185 | movq %rax, dyn_mem_begin(%rip) 186 | 187 | hl: 188 | xorq %rdi, %rdi 189 | xorq %rsi, %rsi 190 | xorq %rdx, %rdx 191 | xorq %rcx, %rcx 192 | xorq %r8, %r8 193 | xorq %r9, %r9 194 | 195 | movq hl_main(%rip), %rax 196 | call *%rax 197 | 198 | movq $0x3f100, %rsi 199 | jmp terminate_64 200 | 201 | /* 202 | * 64-bit termination MSR protocol termination and HLT loop 203 | */ 204 | terminate_64: 205 | movq %rsi, %rax 206 | movq $0, %rdx 207 | movq $0xc0010130, %rcx 208 | wrmsr 209 | rep; vmmcall 210 | terminate_hlt: 211 | hlt 212 | jmp terminate_hlt 213 | 214 | .section .data 215 | /* 216 | * Four zeroed stack pages with associated guard page. 217 | */ 218 | .balign 4096 219 | GLOBAL(bsp_guard_page) 220 | .fill 512, 8, 0 221 | bsp_stack_start: 222 | .fill 512, 8, 0 223 | .fill 512, 8, 0 224 | .fill 512, 8, 0 225 | .fill 512, 8, 0 226 | bsp_stack_end: 227 | 228 | /* 229 | * 64-bit GDT. 230 | */ 231 | .balign 8 232 | GLOBAL(gdt64) 233 | .quad 0 /* Reserved */ 234 | kernel_cs: 235 | .quad SVSM_KERNEL_CS_ATTR /* 64-bit code segment (CPL0) */ 236 | kernel_ds: 237 | .quad SVSM_KERNEL_DS_ATTR /* 64-bit data segment (CPL0) */ 238 | 239 | tss: 240 | .quad SVSM_TSS_ATTR0 /* 64-bit TSS */ 241 | .quad SVSM_TSS_ATTR1 /* TSS (Second half) */ 242 | GLOBAL(gdt64_end) 243 | 244 | GLOBAL(gdt64_kernel_cs) 245 | .quad SVSM_KERNEL_CS_SELECTOR 246 | 247 | GLOBAL(gdt64_kernel_ds) 248 | .quad SVSM_KERNEL_DS_SELECTOR 249 | 250 | GLOBAL(gdt64_tss) 251 | .quad SVSM_TSS_SELECTOR 252 | 253 | GLOBAL(early_tss) 254 | .quad tss 255 | 256 | /* 257 | * 64-bit IDT - 256 16-byte entries 258 | */ 259 | .balign 8 260 | GLOBAL(idt64) 261 | .fill 2 * 256, 8, 0 262 | GLOBAL(idt64_end) 263 | 264 | /* 265 | * BSP/AP support: 266 | * SMP support will update these values when starting an AP to provide 267 | * information and unique values to each AP. This requires serialized 268 | * AP startup. 269 | */ 270 | GLOBAL(cpu_mode) 271 | .long 0 272 | GLOBAL(cpu_stack) 273 | .quad bsp_stack_end 274 | GLOBAL(cpu_start) 275 | .quad code_64 276 | 277 | /* 278 | * 64-bit identity-mapped pagetables: 279 | * Maps only the size of the working memory of the SVSM. 280 | * (e.g. 0x8000000000 - 0x800fffffff for 256MB) 281 | */ 282 | .balign 4096 283 | pgtables_start: 284 | pgd: 285 | .fill SVSM_PGD_INDEX, 8, 0 286 | .quad SVSM_PGD_ENTRY(GPA(p4d)) 287 | .fill 511 - SVSM_PGD_INDEX, 8, 0 288 | p4d: 289 | .fill SVSM_P4D_INDEX, 8, 0 290 | .quad SVSM_P4D_ENTRY(GPA(pud)) 291 | .fill 511 - SVSM_P4D_INDEX, 8, 0 292 | pud: 293 | .fill SVSM_PUD_INDEX, 8, 0 294 | .quad SVSM_PUD_ENTRY(GPA(pmd)) 295 | .fill 511 - SVSM_PUD_INDEX, 8, 0 296 | pmd: 297 | .fill SVSM_PMD_INDEX, 8, 0 298 | i = 0 299 | .rept SVSM_PMD_COUNT 300 | .quad SVSM_PMD_ENTRY(SVSM_GPA_ASM + i) 301 | i = i + SVSM_PMD_SIZE 302 | .endr 303 | .fill 511 - SVSM_PMD_INDEX - SVSM_PMD_COUNT + 1, 8, 0 304 | 305 | /* 306 | * Reserve one extra page to split the 2MB private page that holds the 307 | * early GHCB so that a GHCB can be used for early page validation. 308 | */ 309 | pte: 310 | .fill 512, 8, 0 311 | pgtables_end: 312 | 313 | /* 314 | * Reserved an area for an early-usage GHCB, needed for fast validation 315 | * of memory. 316 | */ 317 | .balign 4096 318 | ghcb: 319 | .fill 512, 8, 0 320 | 321 | /* 322 | * Main high-level language function to call 323 | */ 324 | GLOBAL(hl_main) 325 | .quad svsm_main 326 | 327 | /* 328 | * SEV related information 329 | */ 330 | GLOBAL(early_ghcb) 331 | .quad 0 332 | 333 | GLOBAL(sev_encryption_mask) 334 | .quad CBIT(0) 335 | 336 | GLOBAL(sev_status) 337 | .quad 0 338 | 339 | GLOBAL(svsm_begin) 340 | .quad 0 341 | 342 | GLOBAL(svsm_end) 343 | .quad 0 344 | 345 | GLOBAL(dyn_mem_begin) 346 | .quad 0 347 | 348 | GLOBAL(dyn_mem_end) 349 | .quad 0 350 | 351 | GLOBAL(svsm_sbss) 352 | .quad sbss 353 | 354 | GLOBAL(svsm_ebss) 355 | .quad ebss 356 | 357 | GLOBAL(svsm_sdata) 358 | .quad sdata 359 | 360 | GLOBAL(svsm_edata) 361 | .quad edata 362 | 363 | GLOBAL(guard_page) 364 | .quad bsp_guard_page 365 | 366 | GLOBAL(svsm_secrets_page) 367 | .quad SVSM_SNP_SECRETS_PAGE_BASE 368 | 369 | GLOBAL(svsm_secrets_page_size) 370 | .quad SVSM_SNP_SECRETS_PAGE_SIZE 371 | 372 | GLOBAL(svsm_cpuid_page) 373 | .quad SVSM_SNP_CPUID_PAGE_BASE 374 | 375 | GLOBAL(svsm_cpuid_page_size) 376 | .quad SVSM_SNP_CPUID_PAGE_SIZE 377 | 378 | GLOBAL(bios_vmsa_page) 379 | .quad SVSM_SNP_BIOS_BSP_PAGE_BASE 380 | 381 | /* 382 | * SVSM GUID Table 383 | */ 384 | .section .data.guids 385 | 386 | /* Place the GUIDs at the end of the page */ 387 | .balign 4096 388 | .fill 4096 - ((svsm_guids_end - svsm_fill_end) % 4096), 1, 0 389 | svsm_fill_end: 390 | 391 | /* 392 | * SVSM SEV SNP MetaData 393 | * (similar to OVMF format, but addresses expanded to 8 bytes) 394 | */ 395 | svsm_snp_metadata: 396 | .byte 'S', 'V', 'S', 'M' /* Signature */ 397 | .long svsm_snp_metadata_end - svsm_snp_metadata /* Length */ 398 | .long 1 /* Version */ 399 | .long (svsm_snp_metadata_end - svsm_snp_sections ) / 16 /* Section Count */ 400 | 401 | svsm_snp_sections: 402 | /* SEV SNP Secrets Page */ 403 | .quad GPA(SVSM_SNP_SECRETS_PAGE_BASE) 404 | .long SVSM_SNP_SECRETS_PAGE_SIZE 405 | .long 2 406 | 407 | /* SEV SNP CPUID Page */ 408 | .quad GPA(SVSM_SNP_CPUID_PAGE_BASE) 409 | .long SVSM_SNP_CPUID_PAGE_SIZE 410 | .long 3 411 | 412 | /* BIOS BSP VMSA Page */ 413 | .quad GPA(SVSM_SNP_BIOS_BSP_PAGE_BASE) 414 | .long SVSM_SNP_BIOS_BSP_PAGE_SIZE 415 | .long 5 416 | svsm_snp_metadata_end: 417 | 418 | /* 419 | * SVSM GUID Envelope: 81384fea-ad48-4eb6-af4f-6ac49316df2b 420 | */ 421 | svsm_guids_start: 422 | 423 | /* SVSM SEV SNP MetaData GUID: be30189b-ab44-4a97-82dd-ea813941047e */ 424 | svsm_guid_snp: 425 | .long svsm_guids_end - svsm_snp_metadata /* Offset to metadata */ 426 | .word svsm_guid_snp_end - svsm_guid_snp 427 | .byte 0x9b, 0x18, 0x30, 0xbe, 0x44, 0xab, 0x97, 0x4a 428 | .byte 0x82, 0xdd, 0xea, 0x81, 0x39, 0x41, 0x04, 0x7e 429 | svsm_guid_snp_end: 430 | 431 | /* SVSM INFO GUID: a789a612-0597-4c4b-a49f-cbb1fe9d1ddd */ 432 | svsm_guid_info: 433 | .quad SVSM_GPA_ASM /* SVSM load address */ 434 | .quad SVSM_MEM_ASM /* SVSM memory footprint */ 435 | .quad CBIT(GPA(p4d)) /* SVSM pagetable (4-level) */ 436 | .quad gdt64 /* SVSM GDT */ 437 | .word SVSM_GDT_LIMIT /* SVSM GDT limit */ 438 | .quad idt64 /* SVSM IDT */ 439 | .word SVSM_IDT_LIMIT /* SVSM IDT limit */ 440 | .word SVSM_KERNEL_CS_SELECTOR /* SVSM 64-bit CS slot */ 441 | .quad SVSM_KERNEL_CS_ATTR /* SVSM 64-bit CS attributes */ 442 | .quad code_64 /* BSP start RIP */ 443 | .quad SVSM_EFER /* SVSM EFER value */ 444 | .quad SVSM_CR0 /* SVSM CR0 value */ 445 | .quad SVSM_CR4 /* SVSM CR4 value */ 446 | .word svsm_guid_info_end - svsm_guid_info 447 | .byte 0x12, 0xa6, 0x89, 0xa7, 0x97, 0x05, 0x4b, 0x4c 448 | .byte 0xa4, 0x9f, 0xcb, 0xb1, 0xfe, 0x9d, 0x1d, 0xdd 449 | svsm_guid_info_end: 450 | 451 | .word svsm_guids_end - svsm_guids_start 452 | .byte 0xea, 0x4f, 0x38, 0x81, 0x48, 0xad, 0xb6, 0x4e 453 | .byte 0xaf, 0x4f, 0x6a, 0xc4, 0x93, 0x16, 0xdf, 0x2b 454 | svsm_guids_end: 455 | -------------------------------------------------------------------------------- /src/start/svsm.h: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Author: Tom Lendacky 6 | */ 7 | 8 | #ifndef __SVSM_H__ 9 | #define __SVSM_H__ 10 | 11 | #define GLOBAL(_name) \ 12 | .global _name; \ 13 | _name: 14 | 15 | #define BIT(x) (1ULL << (x)) 16 | 17 | #define LOWER_8BITS(x) ((u8)((x) & 0xff)) 18 | #define LOWER_16BITS(x) ((u16)((x) & 0xffff)) 19 | #define LOWER_32BITS(x) ((u32)((x) & 0xffffffff)) 20 | #define UPPER_32BITS(x) ((u32)(x >> 32)) 21 | 22 | #define MIN(x, y) ((x) < (y) ? (x) : (y)) 23 | #define MAX(x, y) ((x) > (y) ? (x) : (y)) 24 | 25 | #define PAGE_TABLE_ENTRY_SIZE 8 26 | #define PAGE_TABLE_ENTRY_COUNT 512 27 | #define PAGE_TABLE_INDEX_MASK (PAGE_TABLE_ENTRY_COUNT - 1) 28 | 29 | #define PAGE_SHIFT 12 30 | #define PAGE_SIZE BIT(PAGE_SHIFT) 31 | #define PAGE_MASK ~(PAGE_SIZE - 1) 32 | #define PAGE_ADDR(x) ((u64)(x) & PAGE_MASK) 33 | #define PAGE_ALIGN(x) (((u64)(x) + PAGE_SIZE - 1) & PAGE_MASK) 34 | #define PAGE_ALIGNED(x) ALIGNED((u64)(x), PAGE_SIZE) 35 | #define PAGE_COUNT(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) 36 | #define PA(x) ((u64)(x)) 37 | #define PFN(x) (PA(x) >> PAGE_SHIFT) 38 | #define PFN_TO_PA(x) ((u64)(x) << PAGE_SHIFT) 39 | 40 | #define PAGE_2MB_SHIFT 21 41 | #define PAGE_2MB_SIZE BIT(PAGE_2MB_SHIFT) 42 | #define PAGE_2MB_MASK ~(PAGE_2MB_SIZE - 1) 43 | #define PAGE_2MB_ALIGNED(x) ALIGNED((u64)(x), PAGE_2MB_SIZE) 44 | 45 | #ifndef SVSM_GPA 46 | #define SVSM_GPA 0x8000000000 /* 512 GB start */ 47 | #endif /* SVSM_GPA */ 48 | 49 | #ifndef SVSM_MEM 50 | #define SVSM_MEM 0x10000000 /* 256 MB of memory */ 51 | #endif /* SVSM_MEM */ 52 | 53 | #define __ASM_ULL(x) x ## ULL 54 | #define ASM_ULL(x) __ASM_ULL(x) 55 | 56 | #define SVSM_GPA_ASM ASM_ULL(SVSM_GPA) 57 | #define SVSM_MEM_ASM ASM_ULL(SVSM_MEM) 58 | 59 | #define SVSM_GVA_OFFSET 0xffff800000000000 60 | #define SVSM_GVA_OFFSET_ASM ASM_ULL(SVSM_GVA_OFFSET) 61 | 62 | #define SVSM_GVA_ASM (SVSM_GPA_ASM + SVSM_GVA_OFFSET_ASM) 63 | #define SVSM_GVA_LDS (SVSM_GPA + SVSM_GVA_OFFSET) 64 | 65 | #define SVSM_PAGES (SVSM_MEM_ASM / PAGE_SIZE) 66 | 67 | #define SVSM_EFER 0x00001d00 /* SVME, NXE, LMA, LME */ 68 | #define SVSM_CR0 0x80010033 /* PG, WP, NE, ET, MP, PE */ 69 | #define SVSM_CR4 0x00000668 /* OSXMMEXCPT, OSFXSR, MCE, PAE, DE */ 70 | 71 | #define SVSM_PGD_INDEX ((SVSM_GVA_ASM >> 48) & 511) 72 | #define SVSM_P4D_INDEX ((SVSM_GVA_ASM >> 39) & 511) 73 | #define SVSM_PUD_INDEX ((SVSM_GVA_ASM >> 30) & 511) 74 | #define SVSM_PMD_INDEX ((SVSM_GVA_ASM >> 21) & 511) 75 | #define SVSM_PTE_INDEX ((SVSM_GVA_ASM >> 12) & 511) 76 | 77 | #define SVSM_PGD_SIZE 0x1000000000000ULL 78 | #define SVSM_PGD_COUNT (SVSM_MEM_ASM / SVSM_PGD_SIZE) 79 | #define SVSM_P4D_SIZE 0x8000000000ULL 80 | #define SVSM_P4D_COUNT (SVSM_MEM_ASM / SVSM_P4D_SIZE) 81 | #define SVSM_PUD_SIZE 0x40000000ULL 82 | #define SVSM_PUD_COUNT (SVSM_MEM_ASM / SVSM_P4D_SIZE) 83 | #define SVSM_PMD_SIZE 0x200000ULL 84 | #define SVSM_PMD_COUNT (SVSM_MEM_ASM / SVSM_PMD_SIZE) 85 | 86 | #define SVSM_GDT_LIMIT gdt64_end - gdt64 - 1 87 | #define SVSM_IDT_LIMIT idt64_end - idt64 - 1 88 | 89 | #define SVSM_KERNEL_CS_SELECTOR (kernel_cs - gdt64) 90 | #define SVSM_KERNEL_CS_ATTR 0x00af9a000000ffff 91 | #define SVSM_KERNEL_DS_SELECTOR (kernel_ds - gdt64) 92 | #define SVSM_KERNEL_DS_ATTR 0x00cf92000000ffff 93 | 94 | #define SVSM_TSS_SELECTOR (tss - gdt64) 95 | #define SVSM_TSS_ATTR0 0x0080890000000000 96 | #define SVSM_TSS_ATTR1 0x0000000000000000 97 | 98 | #define SVSM_SNP_MEASURED_PAGES_BASE edata 99 | 100 | #define SVSM_SNP_SECRETS_PAGE_BASE SVSM_SNP_MEASURED_PAGES_BASE 101 | #define SVSM_SNP_SECRETS_PAGE_SIZE 4096 102 | #define SVSM_SNP_SECRETS_PAGE_END SVSM_SNP_SECRETS_PAGE_BASE + SVSM_SNP_SECRETS_PAGE_SIZE 103 | 104 | #define SVSM_SNP_CPUID_PAGE_BASE SVSM_SNP_SECRETS_PAGE_END 105 | #define SVSM_SNP_CPUID_PAGE_SIZE 4096 106 | #define SVSM_SNP_CPUID_PAGE_END SVSM_SNP_CPUID_PAGE_BASE + SVSM_SNP_CPUID_PAGE_SIZE 107 | 108 | #define SVSM_SNP_BIOS_BSP_PAGE_BASE SVSM_SNP_CPUID_PAGE_END 109 | #define SVSM_SNP_BIOS_BSP_PAGE_SIZE 4096 110 | #define SVSM_SNP_BIOS_BSP_PAGE_END SVSM_SNP_BIOS_BSP_PAGE_BASE + SVSM_SNP_BIOS_BSP_PAGE_SIZE 111 | 112 | #define SVSM_DYN_MEM_BEGIN SVSM_SNP_BIOS_BSP_PAGE_END 113 | 114 | #define PVALIDATE_RET_MAX 0x0f 115 | #define PVALIDATE_CF_SET 0x10 116 | #define PVALIDATE_RET_RANGE_ERR 0x11 117 | 118 | #define VMPL_R BIT(8) 119 | #define VMPL_W BIT(9) 120 | #define VMPL_X_USER BIT(10) 121 | #define VMPL_X_SUPER BIT(11) 122 | #define VMSA_PAGE BIT(16) 123 | 124 | #define VMPL_RWX (VMPL_R | VMPL_W | VMPL_X_USER | VMPL_X_SUPER) 125 | #define VMPL_VMSA (VMPL_R | VMSA_PAGE) 126 | 127 | #define CPUID_VENDOR_INFO 0x00000000 128 | #define CPUID_PROCESSOR_INFO 0x00000001 129 | #define CPUID_EXTENDED_TOPO 0x0000000b 130 | 131 | #define SVSM_SECRETS_PAGE_OFFSET 0x140 132 | 133 | #ifndef __ASSEMBLY__ 134 | 135 | #define ALIGN(x, y) (((x) + (y) - 1) & ~(y - 1)) 136 | #define ALIGNED(x, y) ((x) == ALIGN((x), (y))) 137 | 138 | #include 139 | #include 140 | #include 141 | #include 142 | #include 143 | #include 144 | 145 | typedef unsigned char u8; 146 | typedef unsigned short u16; 147 | typedef unsigned int u32; 148 | typedef unsigned long long u64; 149 | 150 | enum { 151 | RMP_4K = 0, 152 | RMP_2M, 153 | }; 154 | 155 | enum { 156 | PVALIDATE_RESCIND = 0, 157 | PVALIDATE_VALIDATE, 158 | }; 159 | 160 | enum { 161 | VMPL0 = 0, 162 | VMPL1, 163 | VMPL2, 164 | VMPL3, 165 | 166 | VMPL_MAX 167 | }; 168 | 169 | struct ca { 170 | u8 call_pending; 171 | u8 mem_available; 172 | u8 reserved_1[6]; 173 | } __attribute__((packed)); 174 | 175 | struct vmpl_info { 176 | struct vmsa *vmsa; 177 | struct ca *ca; 178 | }; 179 | 180 | struct sev_snp_secrets_page { 181 | u32 version; 182 | u32 flags; 183 | u32 fms; 184 | u8 reserved_1[4]; 185 | 186 | u8 gosvw[16]; 187 | 188 | u8 vmpck0[32]; 189 | u8 vmpck1[32]; 190 | u8 vmpck2[32]; 191 | u8 vmpck3[32]; 192 | 193 | u8 os_reserved[96]; 194 | 195 | u8 reserved_2[64]; 196 | 197 | /* SVSM fields start at offset 0x140 into the secrets page */ 198 | u64 svsm_base; 199 | u64 svsm_size; 200 | u64 svsm_caa; 201 | u32 svsm_max_version; 202 | u8 svsm_guest_vmpl; 203 | u8 reserved_3[3]; 204 | } __attribute__((packed)); 205 | 206 | extern u64 sev_encryption_mask; 207 | extern u64 sev_status; 208 | 209 | extern u8 code_64[]; 210 | 211 | extern u8 stext[]; 212 | extern u8 etext[]; 213 | 214 | extern u8 sbss[]; 215 | extern u8 ebss[]; 216 | 217 | extern u8 sdata[]; 218 | extern u8 edata[]; 219 | 220 | extern u8 bsp_guard_page[]; 221 | 222 | extern u32 cpu_mode; 223 | extern u64 cpu_stack; 224 | 225 | extern u64 hl_main; 226 | void svsm_ap(void); 227 | 228 | int prints(const char *format, ...); 229 | 230 | #endif /* __ASSEMBLY__ */ 231 | 232 | #endif 233 | -------------------------------------------------------------------------------- /src/start/svsm.lds.S: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Author: Tom Lendacky and 6 | * Carlos Bilbao 7 | */ 8 | 9 | #include "svsm.h" 10 | 11 | OUTPUT_FORMAT("elf64-x86-64") 12 | OUTPUT_ARCH(i386:x86-64) 13 | ENTRY(code_64) 14 | 15 | SECTIONS 16 | { 17 | . = SVSM_GVA_LDS; 18 | 19 | .text : { 20 | stext = .; 21 | *(.text.reset) 22 | *(.text) 23 | *(.text.*) 24 | } 25 | . = ALIGN(4096); 26 | etext = .; 27 | 28 | .bss : { 29 | sbss = .; 30 | *(.bss) 31 | *(.bss._*) 32 | } 33 | . = ALIGN(4096); 34 | ebss = .; 35 | 36 | .data : { 37 | sdata = .; 38 | *(.data) 39 | *(.data._*) 40 | *(.data.rel*) 41 | *(.data.guids) 42 | } 43 | edata = .; 44 | 45 | /DISCARD/ : { 46 | *(.comment) 47 | *(.discard) 48 | *(.discard.*) 49 | *(.dynamic) 50 | *(.eh_frame) 51 | *(.note.GNU-stack) 52 | *(.note.gnu.build-id) 53 | *(.debug) 54 | *(.debug_*) 55 | } 56 | 57 | . = ASSERT(!(SVSM_GVA_LDS & 0xfffff), "SVSM_GPA not 2MB aligned! make clean and try again..."); 58 | } 59 | -------------------------------------------------------------------------------- /src/svsm_request.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::cpu::percpu::PERCPU; 10 | use crate::cpu::vc_run_vmpl; 11 | use crate::cpu::vmsa::Vmsa; 12 | use crate::globals::*; 13 | use crate::mem::ca::Ca; 14 | use crate::protocols::error_codes::*; 15 | use crate::protocols::*; 16 | use crate::vmsa_list::*; 17 | use crate::*; 18 | 19 | use alloc::string::String; 20 | use x86_64::addr::PhysAddr; 21 | 22 | /// 0x403 23 | const VMEXIT_VMGEXIT: u64 = 0x403; 24 | 25 | unsafe fn handle_request(vmsa: *mut Vmsa) { 26 | let protocol: u32 = UPPER_32BITS!((*vmsa).rax()) as u32; 27 | let callid: u32 = LOWER_32BITS!((*vmsa).rax()) as u32; 28 | 29 | match protocol { 30 | SVSM_CORE_PROTOCOL => core_handle_request(callid, vmsa), 31 | _ => (*vmsa).set_rax(SVSM_ERR_UNSUPPORTED_PROTOCOL), 32 | } 33 | } 34 | 35 | pub fn svsm_request_add_init_vmsa(vmsa_pa: PhysAddr, apic_id: u32) { 36 | VMSA_LIST.push(vmsa_pa, apic_id); 37 | } 38 | 39 | fn process_one_request(vmpl: VMPL) -> Result<(), String> { 40 | // 41 | // Limit the mapping of guest memory to only what is needed to process 42 | // the request. 43 | // 44 | let caa_gpa: PhysAddr = unsafe { PERCPU.caa(vmpl) }; 45 | let mut ca_map: MapGuard = match MapGuard::new_private(caa_gpa, CAA_MAP_SIZE) { 46 | Ok(m) => m, 47 | Err(e) => return Err(alloc::format!("Error mapping guest calling area: {e:?}")), 48 | }; 49 | 50 | let vmsa_gpa: PhysAddr = unsafe { PERCPU.vmsa(vmpl) }; 51 | let mut vmsa_map: MapGuard = match MapGuard::new_private(vmsa_gpa, VMSA_MAP_SIZE) { 52 | Ok(m) => m, 53 | Err(e) => return Err(alloc::format!("Error mapping guest VMSA: {e:?}")), 54 | }; 55 | 56 | if !vmsa_clear_efer_svme(vmsa_map.va()) { 57 | let msg: String = alloc::format!("map_guest_input: vmsa_clear_efer_svme() failed"); 58 | return Err(msg); 59 | } 60 | 61 | let vmsa: &mut Vmsa = vmsa_map.as_object_mut(); 62 | let ca: &mut Ca = ca_map.as_object_mut(); 63 | 64 | if vmsa.guest_exitcode() == VMEXIT_VMGEXIT && ca.call_pending() == 1 { 65 | unsafe { handle_request(&mut *vmsa) }; 66 | ca.set_call_pending(0); 67 | } 68 | 69 | // 70 | // Set EFER.SVME to 1 to allow the VMSA to be run by the hypervisor. 71 | // 72 | vmsa_set_efer_svme(vmsa_map.va()); 73 | 74 | Ok(()) 75 | } 76 | 77 | /// Process SVSM requests 78 | pub fn svsm_request_loop() { 79 | loop { 80 | match process_one_request(VMPL::Vmpl1) { 81 | Ok(()) => (), 82 | Err(e) => prints!("{}", e), 83 | }; 84 | vc_run_vmpl(VMPL::Vmpl1); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/util/locking.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 SUSE LLC. 4 | * Authors: Jörg Rödel (jroedel at suse.de) 5 | * 6 | */ 7 | 8 | use core::arch::asm; 9 | use core::cell::UnsafeCell; 10 | use core::ops::{Deref, DerefMut}; 11 | use core::sync::atomic::{AtomicU64, Ordering}; 12 | 13 | pub struct LockGuard<'a, T> { 14 | holder: &'a AtomicU64, 15 | data: &'a mut T, 16 | } 17 | 18 | impl<'a, T> Deref for LockGuard<'a, T> { 19 | type Target = T; 20 | 21 | fn deref(&self) -> &T { 22 | self.data 23 | } 24 | } 25 | 26 | impl<'a, T> DerefMut for LockGuard<'a, T> { 27 | fn deref_mut(&mut self) -> &mut T { 28 | self.data 29 | } 30 | } 31 | 32 | impl<'a, T> Drop for LockGuard<'a, T> { 33 | fn drop(&mut self) { 34 | self.holder.fetch_add(1, Ordering::Release); 35 | } 36 | } 37 | 38 | pub struct SpinLock { 39 | current: AtomicU64, 40 | holder: AtomicU64, 41 | data: UnsafeCell, 42 | testmode: AtomicU64, 43 | } 44 | 45 | unsafe impl Sync for SpinLock {} 46 | 47 | impl SpinLock { 48 | pub const fn new(data: T) -> Self { 49 | SpinLock { 50 | current: AtomicU64::new(1), 51 | holder: AtomicU64::new(1), 52 | data: UnsafeCell::new(data), 53 | testmode: AtomicU64::new(0), 54 | } 55 | } 56 | 57 | pub fn test_mode(&self) { 58 | self.testmode.swap(1, Ordering::Relaxed); 59 | } 60 | 61 | pub fn lock(&self) -> LockGuard { 62 | let ticket: u64 = self.current.fetch_add(1, Ordering::Relaxed); 63 | 64 | loop { 65 | let h: u64 = self.holder.load(Ordering::Acquire); 66 | if h == ticket { 67 | break; 68 | } 69 | } 70 | 71 | let res: LockGuard = LockGuard { 72 | holder: &self.holder, 73 | data: unsafe { &mut *self.data.get() }, 74 | }; 75 | 76 | if self.testmode.fetch_add(0, Ordering::SeqCst) == 1 { 77 | unsafe { 78 | asm!("2: jmp 2b", in("rsi") 0xdead256); 79 | } 80 | } 81 | 82 | return res; 83 | } 84 | 85 | pub fn unlock(&mut self) { 86 | self.holder.fetch_add(1, Ordering::Release); 87 | } 88 | } 89 | 90 | #[cfg(test)] 91 | mod tests { 92 | 93 | use super::*; 94 | 95 | #[test] 96 | fn test_spin_lock() { 97 | let lock = SpinLock::new(0); 98 | 99 | // Acquire the lock and modify the data 100 | { 101 | let mut guard = lock.lock(); 102 | *guard += 1; 103 | } 104 | 105 | // Verify that the data was correctly modified 106 | { 107 | let guard = lock.lock(); 108 | assert_eq!(*guard, 1); 109 | } 110 | 111 | // Unlock the lock 112 | drop(lock); 113 | 114 | // Verify that the lock is now available after 115 | // it has been dropped 116 | let lock = SpinLock::new(0); 117 | { 118 | let mut guard = lock.lock(); 119 | *guard += 1; 120 | } 121 | 122 | // Verify that the data was correctly modified 123 | { 124 | let guard = lock.lock(); 125 | assert_eq!(*guard, 1); 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | /// Lock implementation for mutual exclusion 10 | pub mod locking; 11 | /// Serial output support 12 | pub mod serial; 13 | /// Auxiliary functions and macros 14 | pub mod util; 15 | 16 | pub use crate::util::serial::{serial_init, serial_out}; 17 | -------------------------------------------------------------------------------- /src/util/serial.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | #[cfg(feature = "verbose")] 10 | pub mod verbose_serial { 11 | 12 | use crate::BIT; 13 | 14 | /// 0x3f8 15 | pub const TTYS0: u16 = 0x3f8; 16 | 17 | /// 115200 18 | pub const DIV_BASE: u64 = 115200; 19 | /// Bit 7 20 | pub const DLAB_BIT: u8 = BIT!(7); 21 | 22 | /// 1 23 | pub const IER: u16 = 1; 24 | /// 2 25 | pub const FCR: u16 = 2; 26 | /// 3 27 | pub const LCR: u16 = 3; 28 | /// 4 29 | pub const MCR: u16 = 4; 30 | 31 | /// 0 32 | pub const DLL: u16 = 0; 33 | /// 1 34 | pub const DLM: u16 = 1; 35 | 36 | pub static PORT: u16 = TTYS0; 37 | 38 | pub static mut SERIAL_READY: bool = false; 39 | } 40 | 41 | #[cfg(not(feature = "verbose"))] 42 | pub fn serial_out(_string: &str) {} 43 | #[cfg(not(feature = "verbose"))] 44 | pub fn serial_init() {} 45 | 46 | /// Print with format to the serial output 47 | #[macro_export] 48 | macro_rules! prints { 49 | ($($args:tt),*) => {{ 50 | use crate::util::serial::serial_out; 51 | serial_out(&alloc::format!($($args),*)) 52 | }}; 53 | } 54 | 55 | #[inline] 56 | #[cfg(feature = "verbose")] 57 | pub fn serial_out(string: &str) { 58 | use crate::cpu::vc_outb; 59 | use crate::serial::verbose_serial::{PORT, SERIAL_READY}; 60 | unsafe { 61 | if !SERIAL_READY { 62 | return; 63 | } 64 | } 65 | 66 | for b in string.as_bytes() { 67 | vc_outb(PORT, *b); 68 | } 69 | } 70 | 71 | /// Initialize serial port 72 | #[cfg(feature = "verbose")] 73 | pub fn serial_init() { 74 | use crate::cpu::{vc_inb, vc_outb}; 75 | use crate::serial::verbose_serial::*; 76 | vc_outb(PORT + IER, 0); /* Disable all interrupts */ 77 | vc_outb(PORT + FCR, 0); /* Disable all FIFOs */ 78 | vc_outb(PORT + LCR, 3); /* 8n1 */ 79 | vc_outb(PORT + MCR, 3); /* DTR and RTS */ 80 | 81 | let div: u16 = (DIV_BASE / 115200) as u16; 82 | let div_lo: u8 = (div & 0xff) as u8; 83 | let div_hi: u8 = ((div >> 8) & 0xff) as u8; 84 | 85 | let c: u8 = vc_inb(PORT + LCR); 86 | vc_outb(PORT + LCR, c | DLAB_BIT); 87 | vc_outb(PORT + DLL, div_lo); 88 | vc_outb(PORT + DLM, div_hi); 89 | vc_outb(PORT + LCR, c); 90 | 91 | unsafe { 92 | SERIAL_READY = true; 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/util/util.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::prints; 10 | use core::arch::asm; 11 | 12 | /// Generate set/get methods for a given struct field and type 13 | #[macro_export] 14 | macro_rules! funcs { 15 | ($name: ident, $T: ty) => { 16 | paste::paste! { 17 | pub fn [<$name>](&self) -> $T { 18 | self.$name 19 | } 20 | pub fn [](&mut self, value: $T) { 21 | self.$name = value; 22 | } 23 | } 24 | }; 25 | } 26 | 27 | /// Generate get method for a given struct field and type 28 | #[macro_export] 29 | macro_rules! getter_func { 30 | ($name: ident, $T: ty) => { 31 | paste::paste! { 32 | pub fn [<$name>](&self) -> $T { 33 | self.$name 34 | } 35 | } 36 | }; 37 | } 38 | 39 | /// Statically check for a condition 40 | #[macro_export] 41 | macro_rules! STATIC_ASSERT { 42 | ($x: expr) => { 43 | const _: () = core::assert!($x); 44 | }; 45 | } 46 | 47 | /// Obtain bit for a given position 48 | #[macro_export] 49 | macro_rules! BIT { 50 | ($x: expr) => { 51 | (1 << ($x)) 52 | }; 53 | } 54 | 55 | /// Retrieve 8 least significant bits 56 | #[macro_export] 57 | macro_rules! LOWER_8BITS { 58 | ($x: expr) => { 59 | (($x) as u8 & 0xff) 60 | }; 61 | } 62 | 63 | /// Retrieve 16 least significant bits 64 | #[macro_export] 65 | macro_rules! LOWER_16BITS { 66 | ($x: expr) => { 67 | (($x) as u16 & 0xffff) 68 | }; 69 | } 70 | 71 | /// Retrieve 32 least significant bits 72 | #[macro_export] 73 | macro_rules! LOWER_32BITS { 74 | ($x: expr) => { 75 | (($x) as u32 & 0xffffffff) 76 | }; 77 | } 78 | 79 | /// Retrieve 32 most significant bits 80 | #[macro_export] 81 | macro_rules! UPPER_32BITS { 82 | ($x: expr) => { 83 | (($x >> 32) as u32 & 0xffffffff) 84 | }; 85 | } 86 | 87 | /// Align value to a given size 88 | #[macro_export] 89 | macro_rules! ALIGN { 90 | ($x: expr, $y: expr) => { 91 | ((($x) + ($y) - 1) & !(($y) - 1)) 92 | }; 93 | } 94 | 95 | /// Check if x is aligned to y 96 | #[macro_export] 97 | macro_rules! ALIGNED { 98 | ($x: expr, $y: expr) => { 99 | ($x == ALIGN!(($x), ($y))) 100 | }; 101 | } 102 | 103 | /// Check if address is 2MB aligned 104 | #[macro_export] 105 | macro_rules! PAGE_2MB_ALIGNED { 106 | ($x: expr) => { 107 | ALIGNED!($x, PAGE_2MB_SIZE) 108 | }; 109 | } 110 | 111 | /// Retrieve number of pages that a given value contains 112 | #[macro_export] 113 | macro_rules! PAGE_COUNT { 114 | ($x: expr) => { 115 | (ALIGN!(($x), PAGE_SIZE) >> PAGE_SHIFT) 116 | }; 117 | } 118 | 119 | /// Make sure threads are sequentially consistent 120 | #[macro_export] 121 | macro_rules! BARRIER { 122 | () => { 123 | core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst) 124 | }; 125 | } 126 | 127 | pub fn memset(dst: *mut u8, val: u8, len: usize) { 128 | unsafe { 129 | core::intrinsics::write_bytes(dst, val, len); 130 | } 131 | } 132 | 133 | /// Infinite loop that updates rsi (debugging purposes) 134 | #[inline] 135 | pub fn loop_rsi(val: u64) { 136 | unsafe { 137 | asm!("2: jmp 2b", in("rsi") val); 138 | } 139 | } 140 | 141 | #[inline] 142 | pub fn breakpoint() { 143 | prints!("\nDebug breakpoint\n"); 144 | loop_rsi(0xdeb); 145 | } 146 | -------------------------------------------------------------------------------- /src/vmsa_list.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2022 Advanced Micro Devices, Inc. 4 | * 5 | * Authors: Carlos Bilbao and 6 | * Tom Lendacky 7 | */ 8 | 9 | use crate::funcs; 10 | use crate::locking::LockGuard; 11 | use crate::locking::SpinLock; 12 | 13 | use alloc::vec::Vec; 14 | use lazy_static::lazy_static; 15 | use x86_64::addr::PhysAddr; 16 | 17 | #[derive(Clone, Copy, Debug)] 18 | struct VmsaInfo { 19 | gpa: u64, 20 | apic_id: u32, 21 | } 22 | 23 | #[allow(dead_code)] 24 | impl VmsaInfo { 25 | funcs!(gpa, u64); 26 | funcs!(apic_id, u32); 27 | } 28 | 29 | /// List of VMSAs (their GPAs and APIC IDs), with methods to inspect 30 | /// and modify the list in a safe (locked) manner. 31 | pub struct VmsaList { 32 | list: SpinLock>, 33 | } 34 | 35 | impl Default for VmsaList { 36 | fn default() -> Self { 37 | Self { 38 | list: SpinLock::new(Vec::with_capacity(512)), 39 | } 40 | } 41 | } 42 | 43 | impl VmsaList { 44 | pub fn remove(&self, gpa: PhysAddr) -> bool { 45 | let mut vmsa_list: LockGuard> = self.list.lock(); 46 | match vmsa_list.iter().position(|&vi| vi.gpa() == gpa.as_u64()) { 47 | Some(i) => { 48 | vmsa_list.swap_remove(i); 49 | true 50 | } 51 | None => false, 52 | } 53 | } 54 | 55 | #[inline] 56 | pub fn push(&self, gpa: PhysAddr, apic_id: u32) { 57 | let mut vmsa_list: LockGuard> = self.list.lock(); 58 | vmsa_list.push(VmsaInfo { 59 | gpa: gpa.as_u64(), 60 | apic_id: apic_id, 61 | }); 62 | } 63 | 64 | pub fn get_apic_id(&self, gpa: PhysAddr) -> Option { 65 | let vmsa_list: LockGuard> = self.list.lock(); 66 | vmsa_list 67 | .iter() 68 | .find(|&vi| vi.gpa() == gpa.as_u64()) 69 | .map(|&vi| vi.apic_id()) 70 | } 71 | 72 | pub fn contains(&self, gpa: PhysAddr) -> bool { 73 | let vmsa_list: LockGuard> = self.list.lock(); 74 | vmsa_list.iter().any(|&vi| vi.gpa() == gpa.as_u64()) 75 | } 76 | } 77 | 78 | lazy_static! { 79 | /// Global list of VMSAs 80 | pub static ref VMSA_LIST: VmsaList = VmsaList::default(); 81 | } 82 | 83 | #[cfg(test)] 84 | mod tests { 85 | use super::*; 86 | 87 | #[test] 88 | fn test_get_apic_id() { 89 | let list: VmsaList = VmsaList::default(); 90 | list.push(PhysAddr::new(0x1000), 2); 91 | list.push(PhysAddr::new(0x5000), 1); 92 | assert_eq!(list.get_apic_id(PhysAddr::new(0x1000)), Some(2)); 93 | assert_eq!(list.get_apic_id(PhysAddr::new(0x5000)), Some(1)); 94 | assert_eq!(list.get_apic_id(PhysAddr::new(0x3000)), None); 95 | } 96 | 97 | #[test] 98 | fn test_contains() { 99 | let list: VmsaList = VmsaList::default(); 100 | list.push(PhysAddr::new(0x1000), 2); 101 | list.push(PhysAddr::new(0x5000), 1); 102 | assert_eq!(list.contains(PhysAddr::new(0x1000)), true); 103 | assert_eq!(list.contains(PhysAddr::new(0x5000)), true); 104 | assert_eq!(list.contains(PhysAddr::new(0x3000)), false); 105 | } 106 | 107 | #[test] 108 | fn test_remove() { 109 | let list: VmsaList = VmsaList::default(); 110 | list.push(PhysAddr::new(0x1000), 2); 111 | list.push(PhysAddr::new(0x5000), 1); 112 | assert_eq!(list.remove(PhysAddr::new(0x1000)), true); 113 | assert_eq!(list.contains(PhysAddr::new(0x1000)), false); 114 | assert_eq!(list.contains(PhysAddr::new(0x5000)), true); 115 | 116 | // Test remove of non-existing vmsa 117 | assert_eq!(list.remove(PhysAddr::new(0x3000)), false); 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/wrapper.rs: -------------------------------------------------------------------------------- 1 | /* SPDX-License-Identifier: MIT */ 2 | /* 3 | * Copyright (C) 2023 IBM 4 | * 5 | * Authors: 6 | * Claudio Carvalho 7 | * Vikram Narayanan 8 | */ 9 | 10 | #![allow(non_camel_case_types)] 11 | 12 | #[cfg(not(test))] 13 | mod wrappers { 14 | use crate::mem::{mem_allocate, mem_callocate, mem_free, mem_reallocate}; 15 | use crate::prints; 16 | 17 | use core::{ptr, slice, str}; 18 | use x86_64::VirtAddr; 19 | 20 | #[no_mangle] 21 | pub extern "C" fn malloc(size: cty::c_ulong) -> *mut cty::c_void { 22 | if let Ok(va) = mem_allocate(size as usize) { 23 | return va.as_mut_ptr(); 24 | }; 25 | ptr::null_mut() 26 | } 27 | 28 | #[no_mangle] 29 | pub extern "C" fn calloc(items: cty::c_ulong, size: cty::c_ulong) -> *mut cty::c_void { 30 | if let Some(num_bytes) = items.checked_mul(size as u64) { 31 | if let Ok(va) = mem_callocate(num_bytes as usize) { 32 | return va.as_mut_ptr(); 33 | } 34 | } 35 | ptr::null_mut() 36 | } 37 | 38 | #[no_mangle] 39 | pub extern "C" fn realloc(p: *mut cty::c_void, size: cty::c_ulong) -> *mut cty::c_void { 40 | if let Ok(va) = mem_reallocate(VirtAddr::new(p as u64), size as usize) { 41 | return va.as_mut_ptr(); 42 | } 43 | ptr::null_mut() 44 | } 45 | 46 | #[no_mangle] 47 | #[cfg(not(test))] 48 | pub extern "C" fn free(p: *mut cty::c_void) { 49 | if p.is_null() { 50 | return; 51 | } 52 | mem_free(VirtAddr::new(p as u64)); 53 | } 54 | 55 | #[no_mangle] 56 | pub extern "C" fn serial_out(s: *const cty::c_char, size: cty::c_int) { 57 | let str_slice: &[u8] = unsafe { slice::from_raw_parts(s as *const u8, size as usize) }; 58 | if let Ok(rust_str) = str::from_utf8(str_slice) { 59 | prints!("{}", rust_str); 60 | } else { 61 | prints!("ERR: BUG: serial_out arg1 is not a valid utf8 string\n"); 62 | } 63 | } 64 | } 65 | 66 | #[cfg(test)] 67 | #[allow(dead_code)] 68 | mod test_wrappers { 69 | 70 | extern "C" { 71 | fn malloc(size: cty::c_ulong) -> *mut cty::c_void; 72 | fn calloc(items: cty::c_ulong, size: cty::c_ulong) -> *mut cty::c_void; 73 | fn realloc(p: *mut cty::c_void, size: cty::c_ulong) -> *mut cty::c_void; 74 | fn free(ptr: *mut cty::c_void); 75 | } 76 | } 77 | 78 | #[no_mangle] 79 | pub extern "C" fn abort() -> ! { 80 | use crate::vc_terminate_svsm_general; 81 | vc_terminate_svsm_general(); 82 | } 83 | -------------------------------------------------------------------------------- /svsm-target.json: -------------------------------------------------------------------------------- 1 | { 2 | "llvm-target": "x86_64-unknown-none", 3 | "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", 4 | "arch": "x86_64", 5 | "target-endian": "little", 6 | "target-pointer-width": "64", 7 | "target-c-int-width": "32", 8 | "os": "none", 9 | "executables": true, 10 | "linker-flavor": "ld.lld", 11 | "linker": "rust-lld", 12 | "panic-strategy": "abort", 13 | "disable-redzone": true, 14 | "features": "-mmx,-sse,+soft-float" 15 | } 16 | --------------------------------------------------------------------------------