├── .github ├── dependabot.yml └── workflows │ └── build-pdf.yml ├── .gitignore ├── .gitmodules ├── LICENSE ├── dependencies ├── Gemfile ├── README.md ├── apt_packages.txt └── package.json ├── readme.adoc └── specification ├── Makefile ├── conversion ├── Copy of RISC-V Platform Security Model.adoc-20220217T212124Z-001.zip └── Copy of RISC-V Platform Security Model.adoc │ ├── Copy of RISC-V Platform Security Model.adoc │ ├── img_0.png │ ├── img_1.png │ ├── img_2.png │ ├── img_3.png │ ├── img_4.png │ ├── img_5.png │ └── img_6.png ├── images ├── img_0.png ├── img_1.png ├── img_2.png ├── img_3.png ├── img_4.png ├── img_5.png ├── img_6.png ├── img_ch2_reference-model.png ├── img_ch2_security-lifecycle.png ├── img_ch4_cove.png ├── img_ch4_gp-tee.png ├── img_ch4_priv.png └── risc-v_logo.svg └── src ├── README.adoc ├── archive ├── bibliography.adoc ├── chapter2.adoc ├── contributors.adoc ├── discovery.adoc ├── header.adoc ├── index.adoc ├── intro.adoc ├── references.adoc ├── riscv-platform-security-model.pdf ├── security_ecosystem.adoc ├── security_model.adoc ├── standards.adoc ├── threat_model.adoc ├── zero_trust.adoc └── zero_trust_principles.adoc ├── bibliography.adoc ├── chapter1.adoc ├── chapter2.adoc ├── chapter3.adoc ├── chapter4.adoc ├── chapter5.adoc ├── contributors.adoc ├── example.bib ├── header.adoc ├── index.adoc └── references.adoc /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem 3 | version: 2 4 | updates: 5 | - package-ecosystem: gitsubmodule 6 | directory: / 7 | schedule: 8 | interval: daily 9 | -------------------------------------------------------------------------------- /.github/workflows/build-pdf.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Create Specification Document 3 | 4 | # The workflow is triggered by pull request, push to main, and manual dispatch. 5 | on: 6 | workflow_dispatch: 7 | inputs: 8 | version: 9 | description: 'Release version, e.g. X.Y.Z:' 10 | required: true 11 | type: string 12 | revision_mark: 13 | description: 'Set revision mark as Draft, Release or Stable:' 14 | required: true 15 | type: choice 16 | options: 17 | - Draft 18 | - Release 19 | - Stable 20 | default: Draft 21 | prerelease: 22 | description: Tag as a pre-release? 23 | required: false 24 | type: boolean 25 | default: true 26 | draft: 27 | description: Create release as a draft? 28 | required: false 29 | type: boolean 30 | default: false 31 | pull_request: 32 | push: 33 | branches: 34 | - main 35 | 36 | jobs: 37 | build: 38 | runs-on: ubuntu-latest 39 | 40 | steps: 41 | # Checkout the repository 42 | - name: Checkout repository 43 | uses: actions/checkout@v4 44 | with: 45 | submodules: recursive 46 | 47 | # Pull the latest RISC-V Docs container image 48 | - name: Pull Container 49 | run: docker pull riscvintl/riscv-docs-base-container-image:latest 50 | 51 | # Override VERSION and REVMARK for manual workflow dispatch 52 | - name: Update environment variables 53 | run: | 54 | echo "VERSION=v${{ github.event.inputs.version }}" >> "$GITHUB_ENV" 55 | echo "REVMARK=${{ github.event.inputs.revision_mark }}" >> "$GITHUB_ENV" 56 | if: github.event_name == 'workflow_dispatch' 57 | 58 | # Build Files 59 | - name: Build Files 60 | run: cd ./specification && make 61 | 62 | # Upload the built PDF files as a single artifact 63 | - name: Upload Build Artifacts 64 | uses: actions/upload-artifact@v4 65 | with: 66 | name: Build Artifacts 67 | path: ${{ github.workspace }}/specification/*.pdf 68 | retention-days: 30 69 | 70 | # Create Release 71 | - name: Create Release 72 | uses: softprops/action-gh-release@v2 73 | with: 74 | files: ${{ github.workspace }}/specification/*.pdf 75 | tag_name: v${{ github.event.inputs.version }} 76 | name: Release ${{ github.event.inputs.version }} 77 | draft: ${{ github.event.inputs.draft }} 78 | prerelease: ${{ github.event.inputs.prerelease }} 79 | env: 80 | GITHUB_TOKEN: ${{ secrets.GHTOKEN }} 81 | if: github.event_name == 'workflow_dispatch' 82 | # This condition ensures this step only runs for workflow_dispatch events. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pdf 2 | .DS_Store 3 | desktop.ini 4 | 5 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "specification/docs-resources"] 2 | path = specification/docs-resources 3 | url = https://github.com/riscv/docs-resources.git 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Attribution 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More_considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution 4.0 International Public License 58 | 59 | By exercising the Licensed Rights (defined below), You accept and agree 60 | to be bound by the terms and conditions of this Creative Commons 61 | Attribution 4.0 International Public License ("Public License"). To the 62 | extent this Public License may be interpreted as a contract, You are 63 | granted the Licensed Rights in consideration of Your acceptance of 64 | these terms and conditions, and the Licensor grants You such rights in 65 | consideration of benefits the Licensor receives from making the 66 | Licensed Material available under these terms and conditions. 67 | 68 | 69 | Section 1 -- Definitions. 70 | 71 | a. Adapted Material means material subject to Copyright and Similar 72 | Rights that is derived from or based upon the Licensed Material 73 | and in which the Licensed Material is translated, altered, 74 | arranged, transformed, or otherwise modified in a manner requiring 75 | permission under the Copyright and Similar Rights held by the 76 | Licensor. For purposes of this Public License, where the Licensed 77 | Material is a musical work, performance, or sound recording, 78 | Adapted Material is always produced where the Licensed Material is 79 | synched in timed relation with a moving image. 80 | 81 | b. Adapter's License means the license You apply to Your Copyright 82 | and Similar Rights in Your contributions to Adapted Material in 83 | accordance with the terms and conditions of this Public License. 84 | 85 | c. Copyright and Similar Rights means copyright and/or similar rights 86 | closely related to copyright including, without limitation, 87 | performance, broadcast, sound recording, and Sui Generis Database 88 | Rights, without regard to how the rights are labeled or 89 | categorized. For purposes of this Public License, the rights 90 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 91 | Rights. 92 | 93 | d. Effective Technological Measures means those measures that, in the 94 | absence of proper authority, may not be circumvented under laws 95 | fulfilling obligations under Article 11 of the WIPO Copyright 96 | Treaty adopted on December 20, 1996, and/or similar international 97 | agreements. 98 | 99 | e. Exceptions and Limitations means fair use, fair dealing, and/or 100 | any other exception or limitation to Copyright and Similar Rights 101 | that applies to Your use of the Licensed Material. 102 | 103 | f. Licensed Material means the artistic or literary work, database, 104 | or other material to which the Licensor applied this Public 105 | License. 106 | 107 | g. Licensed Rights means the rights granted to You subject to the 108 | terms and conditions of this Public License, which are limited to 109 | all Copyright and Similar Rights that apply to Your use of the 110 | Licensed Material and that the Licensor has authority to license. 111 | 112 | h. Licensor means the individual(s) or entity(ies) granting rights 113 | under this Public License. 114 | 115 | i. Share means to provide material to the public by any means or 116 | process that requires permission under the Licensed Rights, such 117 | as reproduction, public display, public performance, distribution, 118 | dissemination, communication, or importation, and to make material 119 | available to the public including in ways that members of the 120 | public may access the material from a place and at a time 121 | individually chosen by them. 122 | 123 | j. Sui Generis Database Rights means rights other than copyright 124 | resulting from Directive 96/9/EC of the European Parliament and of 125 | the Council of 11 March 1996 on the legal protection of databases, 126 | as amended and/or succeeded, as well as other essentially 127 | equivalent rights anywhere in the world. 128 | 129 | k. You means the individual or entity exercising the Licensed Rights 130 | under this Public License. Your has a corresponding meaning. 131 | 132 | 133 | Section 2 -- Scope. 134 | 135 | a. License grant. 136 | 137 | 1. Subject to the terms and conditions of this Public License, 138 | the Licensor hereby grants You a worldwide, royalty-free, 139 | non-sublicensable, non-exclusive, irrevocable license to 140 | exercise the Licensed Rights in the Licensed Material to: 141 | 142 | a. reproduce and Share the Licensed Material, in whole or 143 | in part; and 144 | 145 | b. produce, reproduce, and Share Adapted Material. 146 | 147 | 2. Exceptions and Limitations. For the avoidance of doubt, where 148 | Exceptions and Limitations apply to Your use, this Public 149 | License does not apply, and You do not need to comply with 150 | its terms and conditions. 151 | 152 | 3. Term. The term of this Public License is specified in Section 153 | 6(a). 154 | 155 | 4. Media and formats; technical modifications allowed. The 156 | Licensor authorizes You to exercise the Licensed Rights in 157 | all media and formats whether now known or hereafter created, 158 | and to make technical modifications necessary to do so. The 159 | Licensor waives and/or agrees not to assert any right or 160 | authority to forbid You from making technical modifications 161 | necessary to exercise the Licensed Rights, including 162 | technical modifications necessary to circumvent Effective 163 | Technological Measures. For purposes of this Public License, 164 | simply making modifications authorized by this Section 2(a) 165 | (4) never produces Adapted Material. 166 | 167 | 5. Downstream recipients. 168 | 169 | a. Offer from the Licensor -- Licensed Material. Every 170 | recipient of the Licensed Material automatically 171 | receives an offer from the Licensor to exercise the 172 | Licensed Rights under the terms and conditions of this 173 | Public License. 174 | 175 | b. No downstream restrictions. You may not offer or impose 176 | any additional or different terms or conditions on, or 177 | apply any Effective Technological Measures to, the 178 | Licensed Material if doing so restricts exercise of the 179 | Licensed Rights by any recipient of the Licensed 180 | Material. 181 | 182 | 6. No endorsement. Nothing in this Public License constitutes or 183 | may be construed as permission to assert or imply that You 184 | are, or that Your use of the Licensed Material is, connected 185 | with, or sponsored, endorsed, or granted official status by, 186 | the Licensor or others designated to receive attribution as 187 | provided in Section 3(a)(1)(A)(i). 188 | 189 | b. Other rights. 190 | 191 | 1. Moral rights, such as the right of integrity, are not 192 | licensed under this Public License, nor are publicity, 193 | privacy, and/or other similar personality rights; however, to 194 | the extent possible, the Licensor waives and/or agrees not to 195 | assert any such rights held by the Licensor to the limited 196 | extent necessary to allow You to exercise the Licensed 197 | Rights, but not otherwise. 198 | 199 | 2. Patent and trademark rights are not licensed under this 200 | Public License. 201 | 202 | 3. To the extent possible, the Licensor waives any right to 203 | collect royalties from You for the exercise of the Licensed 204 | Rights, whether directly or through a collecting society 205 | under any voluntary or waivable statutory or compulsory 206 | licensing scheme. In all other cases the Licensor expressly 207 | reserves any right to collect such royalties. 208 | 209 | 210 | Section 3 -- License Conditions. 211 | 212 | Your exercise of the Licensed Rights is expressly made subject to the 213 | following conditions. 214 | 215 | a. Attribution. 216 | 217 | 1. If You Share the Licensed Material (including in modified 218 | form), You must: 219 | 220 | a. retain the following if it is supplied by the Licensor 221 | with the Licensed Material: 222 | 223 | i. identification of the creator(s) of the Licensed 224 | Material and any others designated to receive 225 | attribution, in any reasonable manner requested by 226 | the Licensor (including by pseudonym if 227 | designated); 228 | 229 | ii. a copyright notice; 230 | 231 | iii. a notice that refers to this Public License; 232 | 233 | iv. a notice that refers to the disclaimer of 234 | warranties; 235 | 236 | v. a URI or hyperlink to the Licensed Material to the 237 | extent reasonably practicable; 238 | 239 | b. indicate if You modified the Licensed Material and 240 | retain an indication of any previous modifications; and 241 | 242 | c. indicate the Licensed Material is licensed under this 243 | Public License, and include the text of, or the URI or 244 | hyperlink to, this Public License. 245 | 246 | 2. You may satisfy the conditions in Section 3(a)(1) in any 247 | reasonable manner based on the medium, means, and context in 248 | which You Share the Licensed Material. For example, it may be 249 | reasonable to satisfy the conditions by providing a URI or 250 | hyperlink to a resource that includes the required 251 | information. 252 | 253 | 3. If requested by the Licensor, You must remove any of the 254 | information required by Section 3(a)(1)(A) to the extent 255 | reasonably practicable. 256 | 257 | 4. If You Share Adapted Material You produce, the Adapter's 258 | License You apply must not prevent recipients of the Adapted 259 | Material from complying with this Public License. 260 | 261 | 262 | Section 4 -- Sui Generis Database Rights. 263 | 264 | Where the Licensed Rights include Sui Generis Database Rights that 265 | apply to Your use of the Licensed Material: 266 | 267 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 268 | to extract, reuse, reproduce, and Share all or a substantial 269 | portion of the contents of the database; 270 | 271 | b. if You include all or a substantial portion of the database 272 | contents in a database in which You have Sui Generis Database 273 | Rights, then the database in which You have Sui Generis Database 274 | Rights (but not its individual contents) is Adapted Material; and 275 | 276 | c. You must comply with the conditions in Section 3(a) if You Share 277 | all or a substantial portion of the contents of the database. 278 | 279 | For the avoidance of doubt, this Section 4 supplements and does not 280 | replace Your obligations under this Public License where the Licensed 281 | Rights include other Copyright and Similar Rights. 282 | 283 | 284 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 285 | 286 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 287 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 288 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 289 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 290 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 291 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 292 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 293 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 294 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 295 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 296 | 297 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 298 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 299 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 300 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 301 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 302 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 303 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 304 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 305 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 306 | 307 | c. The disclaimer of warranties and limitation of liability provided 308 | above shall be interpreted in a manner that, to the extent 309 | possible, most closely approximates an absolute disclaimer and 310 | waiver of all liability. 311 | 312 | 313 | Section 6 -- Term and Termination. 314 | 315 | a. This Public License applies for the term of the Copyright and 316 | Similar Rights licensed here. However, if You fail to comply with 317 | this Public License, then Your rights under this Public License 318 | terminate automatically. 319 | 320 | b. Where Your right to use the Licensed Material has terminated under 321 | Section 6(a), it reinstates: 322 | 323 | 1. automatically as of the date the violation is cured, provided 324 | it is cured within 30 days of Your discovery of the 325 | violation; or 326 | 327 | 2. upon express reinstatement by the Licensor. 328 | 329 | For the avoidance of doubt, this Section 6(b) does not affect any 330 | right the Licensor may have to seek remedies for Your violations 331 | of this Public License. 332 | 333 | c. For the avoidance of doubt, the Licensor may also offer the 334 | Licensed Material under separate terms or conditions or stop 335 | distributing the Licensed Material at any time; however, doing so 336 | will not terminate this Public License. 337 | 338 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 339 | License. 340 | 341 | 342 | Section 7 -- Other Terms and Conditions. 343 | 344 | a. The Licensor shall not be bound by any additional or different 345 | terms or conditions communicated by You unless expressly agreed. 346 | 347 | b. Any arrangements, understandings, or agreements regarding the 348 | Licensed Material not stated herein are separate from and 349 | independent of the terms and conditions of this Public License. 350 | 351 | 352 | Section 8 -- Interpretation. 353 | 354 | a. For the avoidance of doubt, this Public License does not, and 355 | shall not be interpreted to, reduce, limit, restrict, or impose 356 | conditions on any use of the Licensed Material that could lawfully 357 | be made without permission under this Public License. 358 | 359 | b. To the extent possible, if any provision of this Public License is 360 | deemed unenforceable, it shall be automatically reformed to the 361 | minimum extent necessary to make it enforceable. If the provision 362 | cannot be reformed, it shall be severed from this Public License 363 | without affecting the enforceability of the remaining terms and 364 | conditions. 365 | 366 | c. No term or condition of this Public License will be waived and no 367 | failure to comply consented to unless expressly agreed to by the 368 | Licensor. 369 | 370 | d. Nothing in this Public License constitutes or may be interpreted 371 | as a limitation upon, or waiver of, any privileges and immunities 372 | that apply to the Licensor or You, including from the legal 373 | processes of any jurisdiction or authority. 374 | 375 | 376 | ======================================================================= 377 | 378 | Creative Commons is not a party to its public 379 | licenses. Notwithstanding, Creative Commons may elect to apply one of 380 | its public licenses to material it publishes and in those instances 381 | will be considered the “Licensor.” The text of the Creative Commons 382 | public licenses is dedicated to the public domain under the CC0 Public 383 | Domain Dedication. Except for the limited purpose of indicating that 384 | material is shared under a Creative Commons public license or as 385 | otherwise permitted by the Creative Commons policies published at 386 | creativecommons.org/policies, Creative Commons does not authorize the 387 | use of the trademark "Creative Commons" or any other trademark or logo 388 | of Creative Commons without its prior written consent including, 389 | without limitation, in connection with any unauthorized modifications 390 | to any of its public licenses or any other arrangements, 391 | understandings, or agreements concerning use of licensed material. For 392 | the avoidance of doubt, this paragraph does not form part of the 393 | public licenses. 394 | 395 | Creative Commons may be contacted at creativecommons.org. 396 | 397 | -------------------------------------------------------------------------------- /dependencies/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | gem 'asciidoctor' 3 | gem 'asciidoctor-bibtex' 4 | gem 'asciidoctor-diagram' 5 | gem 'asciidoctor-mathematical' 6 | gem 'asciidoctor-pdf' 7 | gem 'citeproc-ruby' 8 | gem 'coderay' 9 | gem 'csl-styles' 10 | gem 'json' 11 | gem 'pygments.rb' 12 | gem 'rghost' 13 | gem 'rouge' 14 | gem 'ruby_dev' 15 | -------------------------------------------------------------------------------- /dependencies/README.md: -------------------------------------------------------------------------------- 1 | Dependencies for the build environment for various package managers. Used in 2 | `.github/workflows/`. 3 | 4 | -------------------------------------------------------------------------------- /dependencies/apt_packages.txt: -------------------------------------------------------------------------------- 1 | bison 2 | build-essential 3 | cmake 4 | curl 5 | flex 6 | fonts-lyx 7 | git 8 | graphviz 9 | # For wavedrom 10 | default-jre 11 | libcairo2-dev 12 | libffi-dev 13 | libgdk-pixbuf2.0-dev 14 | libpango1.0-dev 15 | libxml2-dev 16 | make 17 | pkg-config 18 | ruby 19 | ruby-dev 20 | libwebp-dev 21 | libzstd-dev 22 | -------------------------------------------------------------------------------- /dependencies/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "local", 3 | "version": "0.0.1", 4 | "dependencies": { 5 | "wavedrom-cli": "^2.6.8" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /readme.adoc: -------------------------------------------------------------------------------- 1 | = RISC-V Platform Security Model Specification 2 | 3 | This repository holds specifications which define a platform security model for RISC-V platforms. 4 | 5 | = License 6 | 7 | This work is licensed under a Creative Commons Attribution 4.0 International License (CC-BY-4.0). 8 | See the https://github.com/riscv/docs-spec-template/blob/main/LICENSE[LICENSE] file for details. 9 | 10 | = Contributors 11 | 12 | Contributors to this specification are listed link:specification/src/contributors.adoc[here]. 13 | 14 | = Dependencies 15 | 16 | This project is built using AsciiDoctor (Ruby). The project's repository has been setup to automatically build a PDF 17 | version on a commit using GitHub actions. Workflow dependencies are located in the `dependencies` directory. 18 | 19 | For more information on AsciiDoctor, specification guidelines, or building locally, see the 20 | https://github.com/riscv/docs-dev-guide[RISC-V Documentation Developer Guide]. 21 | 22 | = Cloning the project 23 | 24 | This project uses https://git-scm.com/book/en/v2/Git-Tools-Submodules[GitHub Submodules] 25 | to include the https://github.com/riscv/docs-resources[RISC-V docs-resources project] 26 | to achieve a common look and feel. 27 | 28 | The project repository uses git submodules which must be cloned explicitly. This can be done when cloning the repository 29 | the first time by executing either: 30 | 31 | `git clone --recurse-submodules` 32 | 33 | or: 34 | 35 | `git submodule init` followed by `git submodule update` 36 | 37 | Failure to do so will result in an error while building the pdf: 38 | 39 | ``` 40 | $ make 41 | asciidoctor-pdf \ 42 | -a toc \ 43 | -a compress \ 44 | -a pdf-style=docs-resources/themes/riscv-pdf.yml \ 45 | -a pdf-fontsdir=docs-resources/fonts \ 46 | --failure-level=ERROR \ 47 | -o profiles.pdf profiles.adoc 48 | 49 | asciidoctor: ERROR: could not locate or load the built-in pdf theme 50 | docs-resources/themes/riscv-pdf.yml; reverting to default theme 51 | 52 | No such file or directory - notoserif-regular-subset.ttf not found in docs-resources/fonts 53 | Use --trace for backtrace 54 | make: *** [Makefile:7: profiles.pdf] Error 1 55 | ``` 56 | 57 | = Building the document 58 | 59 | The final specification can be generated in PDF form using the `make` command at the project root. -------------------------------------------------------------------------------- /specification/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for RISC-V Doc Template 2 | # 3 | # This work is licensed under the Creative Commons Attribution-ShareAlike 4.0 4 | # International License. To view a copy of this license, visit 5 | # http://creativecommons.org/licenses/by-sa/4.0/ or send a letter to 6 | # Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. 7 | # 8 | # SPDX-License-Identifier: CC-BY-SA-4.0 9 | # 10 | # Description: 11 | # 12 | # This Makefile is designed to automate the process of building and packaging 13 | # the Doc Template for RISC-V Extensions. 14 | 15 | DOCKER_RUN := docker run --rm -v ${PWD}:/build -w /build \ 16 | riscvintl/riscv-docs-base-container-image:latest 17 | 18 | HEADER_SOURCE := ./src/header.adoc 19 | PDF_RESULT := riscv-platform-security-model.pdf 20 | 21 | ASCIIDOCTOR_PDF := asciidoctor-pdf 22 | OPTIONS := --trace \ 23 | -a compress \ 24 | -a mathematical-format=svg \ 25 | -a pdf-fontsdir=docs-resources/fonts \ 26 | -a pdf-theme=docs-resources/themes/riscv-pdf.yml \ 27 | --failure-level=ERROR 28 | REQUIRES := --require=asciidoctor-bibtex \ 29 | --require=asciidoctor-diagram \ 30 | --require=asciidoctor-mathematical 31 | 32 | .PHONY: all build clean build-container build-no-container 33 | 34 | all: build 35 | 36 | build: 37 | @echo "Checking if Docker is available..." 38 | @if command -v docker >/dev/null 2>&1 ; then \ 39 | echo "Docker is available, building inside Docker container..."; \ 40 | $(MAKE) build-container; \ 41 | else \ 42 | echo "Docker is not available, building without Docker..."; \ 43 | $(MAKE) build-no-container; \ 44 | fi 45 | 46 | build-container: 47 | @echo "Starting build inside Docker container..." 48 | $(DOCKER_RUN) /bin/sh -c "$(ASCIIDOCTOR_PDF) $(OPTIONS) $(REQUIRES) --out-file=$(PDF_RESULT) $(HEADER_SOURCE)" 49 | @echo "Build completed successfully inside Docker container." 50 | 51 | build-no-container: 52 | @echo "Starting build..." 53 | $(ASCIIDOCTOR_PDF) $(OPTIONS) $(REQUIRES) --out-file=$(PDF_RESULT) $(HEADER_SOURCE) 54 | @echo "Build completed successfully." 55 | 56 | clean: 57 | @echo "Cleaning up generated files..." 58 | rm -f $(PDF_RESULT) 59 | @echo "Cleanup completed." -------------------------------------------------------------------------------- /specification/conversion/Copy of RISC-V Platform Security Model.adoc-20220217T212124Z-001.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/conversion/Copy of RISC-V Platform Security Model.adoc-20220217T212124Z-001.zip -------------------------------------------------------------------------------- /specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_0.png -------------------------------------------------------------------------------- /specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_1.png -------------------------------------------------------------------------------- /specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_2.png -------------------------------------------------------------------------------- /specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_3.png -------------------------------------------------------------------------------- /specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_4.png -------------------------------------------------------------------------------- /specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_5.png -------------------------------------------------------------------------------- /specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/conversion/Copy of RISC-V Platform Security Model.adoc/img_6.png -------------------------------------------------------------------------------- /specification/images/img_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_0.png -------------------------------------------------------------------------------- /specification/images/img_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_1.png -------------------------------------------------------------------------------- /specification/images/img_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_2.png -------------------------------------------------------------------------------- /specification/images/img_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_3.png -------------------------------------------------------------------------------- /specification/images/img_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_4.png -------------------------------------------------------------------------------- /specification/images/img_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_5.png -------------------------------------------------------------------------------- /specification/images/img_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_6.png -------------------------------------------------------------------------------- /specification/images/img_ch2_reference-model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_ch2_reference-model.png -------------------------------------------------------------------------------- /specification/images/img_ch2_security-lifecycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_ch2_security-lifecycle.png -------------------------------------------------------------------------------- /specification/images/img_ch4_cove.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_ch4_cove.png -------------------------------------------------------------------------------- /specification/images/img_ch4_gp-tee.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_ch4_gp-tee.png -------------------------------------------------------------------------------- /specification/images/img_ch4_priv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/images/img_ch4_priv.png -------------------------------------------------------------------------------- /specification/images/risc-v_logo.svg: -------------------------------------------------------------------------------- 1 | ../docs-resources/images/risc-v_logo.svg -------------------------------------------------------------------------------- /specification/src/README.adoc: -------------------------------------------------------------------------------- 1 | = RISC-V Specification 2 | 3 | This document describes the RISC-V Security Model specification. 4 | 5 | = License 6 | 7 | This work is licensed under a Creative Commons Attribution 4.0 International 8 | License (CC-BY-4.0). 9 | See the link:LICENSE[LICENSE] file for details. 10 | 11 | = Contributors 12 | 13 | Contributors to this specification are contained in the 14 | link:contributors.adoc[contributors] file. 15 | 16 | = Dependencies 17 | 18 | To build the document, you'll need the following tools installed on your system: 19 | 20 | ``` 21 | Make 22 | asciiDoctor-pdf, asciidoctor-bibtex, asciidoctor-diagram and 23 | asciidoctor-mathematical 24 | Docker 25 | ``` 26 | 27 | = Cloning and Building the Document 28 | 29 | This project uses submodules to include the RISC-V documentation toolchain. 30 | 31 | ``` 32 | git clone --recurse-submodules 33 | cd ./ 34 | make [VERSION=] [REVMARK=Draft] 35 | ``` 36 | 37 | `VERSION`: Represents the version of the specification being built. By default, 38 | this is set to 'v0.0.0'. You can change this to a different value, like 39 | 'v1.0.0', 'v1.1.0', etc., based on the current version of your specification. 40 | 41 | `REVMARK`: This represents a revision marker for the project. Its default value 42 | is 'Draft'. You may want to change this to something like 'Release', 'Stable' 43 | or 'Ratified'. 44 | -------------------------------------------------------------------------------- /specification/src/archive/bibliography.adoc: -------------------------------------------------------------------------------- 1 | [bibliography] 2 | == Bibliography 3 | 4 | bibliography::[] 5 | -------------------------------------------------------------------------------- /specification/src/archive/chapter2.adoc: -------------------------------------------------------------------------------- 1 | [[chapter2]] 2 | == The Second Chapter 3 | 4 | . The first item. 5 | 6 | . The second item. 7 | + 8 | .. The first sub item. 9 | 10 | .. The second sub item. 11 | + 12 | [CAUTION] 13 | ==== 14 | A moment of caution is required for this block of text must be read and apreciated for its importance. 15 | ==== 16 | 17 | . Yet another item. 18 | 19 | . Again, an item. 20 | 21 | .. A multi-line item. 22 | + 23 | This item has multiple lines. 24 | + 25 | By multiple lines, this is what we mean. 26 | + 27 | Seriously, multiple. 28 | 29 | === An example table 30 | 31 | [cols="^1,^1,^1,^1,^3,^3",stripes=even,options="header"] 32 | |=== 33 | 4+|Letters _and_ bits {set:cellbgcolor:green} 2+|A much longer area 34 | |L|R|W|X|Quarter 1|Quarter 2 35 | |{set:cellbgcolor:!} 0|0|0|0 2+|Rows alternate colors 36 | |0|0|0|1|Thing 1|Thing 2 37 | |1|0|0|0|Thing 3|Thing 4 38 | |1|1|1|1 2+|Span Thing 1 and 2 39 | |=== 40 | 41 | === Sub section 42 | 43 | Diam donec adipiscing tristique risus indexterm:[risus]. Nisl rhoncus mattis rhoncus urna. Egestas egestas fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate. Porta non pulvinar neque laoreet suspendisse interdum consectetur libero id. Massa vitae tortor condimentum lacinia quis vel. Donec ac odio tempor orci. Mi sit amet mauris commodo quis imperdiet massa tincidunt. Quis enim lobortis scelerisque fermentum dui. Lacus viverra vitae congue eu. Sed faucibus turpis in eu mi bibendum neque. Sit amet porttitor eget dolor. Aliquet eget sit amet tellus cras adipiscing enim. Id cursus metus aliquam eleifend mi. Vestibulum lorem sed risus ultricies tristique nulla aliquet. 44 | 45 | === Yet another subsection 46 | 47 | Quam lacus suspendisse faucibus interdum posuere lorem ipsum. Nulla aliquet enim tortor at auctor urna nunc id cursus. Massa massa ultricies mi quis hendrerit dolor magna. Integer enim neque volutpat ac tincidunt. Dolor magna eget est lorem ipsum dolor. Urna neque viverra justo nec. Neque gravida in fermentum et. Fringilla ut morbi tincidunt augue interdum velit euismod. Dolor sit amet consectetur adipiscing elit. Eu facilisis sed odio morbi. In cursus turpis massa tincidunt dui. Orci indexterm:[orci] phasellus egestas tellus rutrum tellus. Semper eget duis at tellus at urna condimentum. Orci porta non pulvinar neque laoreet suspendisse interdum consectetur. 48 | -------------------------------------------------------------------------------- /specification/src/archive/contributors.adoc: -------------------------------------------------------------------------------- 1 | == Contributors 2 | 3 | This RISC-V specification has been contributed to directly or indirectly by: 4 | 5 | [%hardbreaks] 6 | * Andrew Dellow 7 | * Dong Du 8 | * Colin O'Flynn 9 | * Munir Geden 10 | * Yann Loisel 11 | * Manuel Offenberg 12 | * Ravi Sahita 13 | * Suresh Sugumar 14 | * Steve Wallach 15 | -------------------------------------------------------------------------------- /specification/src/archive/discovery.adoc: -------------------------------------------------------------------------------- 1 | 2 | == Discovery & Config Schema 3 | 4 | Placeholder. 5 | -------------------------------------------------------------------------------- /specification/src/archive/header.adoc: -------------------------------------------------------------------------------- 1 | :description: RISC-V Platform Security Model Specification 2 | :company: RISC-V.org 3 | :revdate: 2/2022 4 | :revnumber: 0.1 5 | :revremark: This document is in development. Assume everything can change. 6 | :url-riscv: http://riscv.org 7 | :doctype: book 8 | :preface-title: Preamble 9 | :colophon: 10 | :chapter-signifier: Section 11 | :appendix-caption: Appendix 12 | :imagesdir: ../images 13 | :title-logo-image: image:risc-v_logo.svg[pdfwidth=3.25in,align=center] 14 | // Settings: 15 | :experimental: 16 | :reproducible: 17 | // needs to be changed? bug discussion started 18 | //:WaveDromEditorApp: app/wavedrom-editor.app 19 | :imagesoutdir: ../images 20 | :bibtex-file: src/example.bib 21 | :bibtex-order: alphabetical 22 | :bibtex-style: apa 23 | :icons: font 24 | :lang: en 25 | :listing-caption: Listing 26 | :sectnums: 27 | //:toc: left 28 | :toc: macro 29 | :toclevels: 4 30 | :source-highlighter: pygments 31 | ifdef::backend-pdf[] 32 | :source-highlighter: coderay 33 | endif::[] 34 | :data-uri: 35 | :hide-uri-scheme: 36 | :stem: latexmath 37 | :footnote: 38 | :xrefstyle: short 39 | 40 | = RISC-V Platform Security Model Specification 41 | :author: RISC-V Security Committee 42 | 43 | // Preamble 44 | [WARNING] 45 | .This document is in the link:http://riscv.org/spec-state[Development state] 46 | ==== 47 | Assume everything can change. This draft specification will change before 48 | being accepted as standard, so implementations made to this draft 49 | specification will likely not conform to the future standard. 50 | ==== 51 | 52 | [preface] 53 | == Copyright and license information 54 | This specification is licensed under the Creative Commons 55 | Attribution 4.0 International License (CC-BY 4.0). The full 56 | license text is available at 57 | https://creativecommons.org/licenses/by/4.0/. 58 | 59 | Copyright 2022 by RISC-V International. 60 | 61 | [preface] 62 | include::contributors.adoc[] 63 | 64 | toc::[] 65 | 66 | include::intro.adoc[] 67 | 68 | include::zero_trust.adoc[] 69 | include::threat_model.adoc[] 70 | include::security_model.adoc[] 71 | include::security_ecosystem.adoc[] 72 | include::discovery.adoc[] 73 | include::standards.adoc[] 74 | //include::chapter2.adoc[] 75 | 76 | //Appendix 77 | include::zero_trust_principles.adoc[] 78 | include::references.adoc[] 79 | 80 | //the index must precede the bibliography 81 | include::index.adoc[] 82 | 83 | include::bibliography.adoc[] 84 | -------------------------------------------------------------------------------- /specification/src/archive/index.adoc: -------------------------------------------------------------------------------- 1 | [index] 2 | == Index 3 | -------------------------------------------------------------------------------- /specification/src/archive/intro.adoc: -------------------------------------------------------------------------------- 1 | :imagesdir: ../../images 2 | 3 | == Introduction 4 | 5 | Security needs to be an intrinsic part of the hardware, software, and firmware and no longer an afterthought or add-on feature. This document provides a holistic view of the threat model, security analysis, security requirements/ recommendations enabling anyone building a RISC-V platform. 6 | 7 | 8 | === Guiding Principles 9 | ==== Intrinsic Security 10 | 11 | Security has often been a late consideration in the development of systems, hardware, and software. The emergence of exploits such as malware, trojans, the recent Spectre, Meltdown, RAMbleed attacks has resulted in serious financial and reputational losses. This illustrates the need to consider security as an essential component, directly built into a system rather than layered on top. 12 | 13 | Unlike other commercial architectures such as X86 and ARM which carry a lot of legacies, RISC-V is a clean slate architecture that can invite a whole lot of new features and solutions both in hardware and software. Below are some key rationale for improving and accelerating RISC-V security 14 | 15 | * Clean slate architecture with no legacy support complexity 16 | * Open security model accelerates hardware security innovation 17 | * Opportunity to incorporate industry learnings & best practices for security 18 | * Open governance facilitates collaboration on the best security approach 19 | * Royalty-free model enables wide access to new hardware security solutions thereby democratizing innovation. 20 | 21 | ==== Zero Trust Model 22 | 23 | The Zero Trust model follows the principle "never trust, always verify." The term Zero Trust does not mean "Zero" Trust, but it's quite the opposite. It improves the trust of the whole platform by not having any implicit trust assumptions among various components that are interconnected, but by establishing trust verifying/ authenticating before trusting. 24 | 25 | For further details please refer to <>. 26 | 27 | 28 | === Device Model 29 | 30 | This diagram below represents a generic device model having an application processor, other IPs, etc. on the left side and root-of-trust on the right side that is isolated from each other. The only communication between the two domains is via a security mailbox. 31 | 32 | .Device Model 33 | image::img_0.png[] 34 | 35 | 36 | The root-of-trust consists of the following elements: 37 | 38 | . An immutable code and data that are typically programmed into ROM/ OTP and root-keys either provisioned into OTP or generated every time from the PUF (physical unclonable function) hardware 39 | . A mutable code and data that are updated onto the flash to fix bugs or enhance features, which are authenticated and verified before consumption 40 | . A secure storage mechanism to store secrets/ assets bound to the platform, ex: root-key 41 | . A secure mailbox is the only mechanism to communicate between both the domains 42 | 43 | 44 | === Security Goals 45 | 46 | The security goals are high-level essential security features for a platform to implement, for the product to be secure and trustworthy. Note that depending on the final usecase, some or all of these goals may not be required and that this will be defined in the mapping to platform specifications (TBD - point to the mapping section later). 47 | 48 | [cols="1,5,5",stripes=even,options="header"] 49 | |=== 50 | | # | Goal | Description 51 | | 1 | A platform is uniquely identifiable | A platform shall have an immutable identity that is both verifiable and attestable 52 | | 2 | A platform shall only execute authorized software | A platform shall verify and authenticate any software before execution 53 | | 3 | A platform shall support device bound storage | A platform shall support a secure storage mechanism to store keys/ secrets that are tied to a particular platform (confidentiality & integrity protection of secret keys, integrity protection for public keys) 54 | | 4 | A platform shall support secure TCB update | A platform shall verify and authenticate any software image updates before loading/ storing/ executing them 55 | | 5 | A platform shall prevent software version rollback | A platform shall prevent software version rollback with anti-rollback mechanism using techniques like monotonous counters 56 | | 6 | A platform shall support security through its lifecycle | A platform shall support security through various lifecycle stages such as development, deployed, returned, end-of-life, etc 57 | | 7 | A platform shall support isolation for code & data | A platform shall support both temporal and spatial isolation for security sensitive code and data 58 | | 8 | A platform shall implement all zero-trust principles | See section 3 59 | | 9 | A platform shall offer crypto services | A platform shall offer classical crypto & optionally post-quantum crypto operations 60 | | 10 | A platform shall protect customer sensitive data | A Platform shall offer confidentiality, integrity, and authenticity protection to sensitive data 61 | | 11 | A platform shall establish its trustworthiness remotely | A platform shall be remotely attestable to prove its trustworthiness and security properties 62 | | 12 | Security guidelines matched to product segment/ profile | A platform shall follow the security guidelines below to incorporate the right level of security for its product segment/ profile needs 63 | | 13 | Software toolchains shall enforce security properties | A platform toolchain shall provide security, ex: C compiler when compiling to WebAssembly certain security features like stack canary are being omitted, and so the hardware cannot also protect 64 | | 14 | | 65 | |=== 66 | 67 | 68 | 69 | === Adversary Model 70 | 71 | The following are the adversary models we consider for this document: 72 | 73 | [cols="1,5,5",stripes=even,options="header"] 74 | |=== 75 | | # | Adversary | Description 76 | | 1 | Unprivileged Software Adversary | This includes software executing in U-mode. Application workloads are typically being managed by S/M-mode system software. This adversary can access U-mode CSRs, process/task memory, CPU registers in the process context. 77 | | 2 | System Software Adversary | This includes system software executing in S and HS-modes. Such an adversary can access privileged CSRs, all of the system memory, CPU registers, and IO devices. 78 | | 3 | Startup Code Adversary | This includes system software executing in early/boot phases of the system, including BIOS, memory configuration code, device option ROM/firmware that can access system memory, CPU registers, IO devices, and IOMMU, etc. 79 | | 4 | Simple Hardware Adversary | This includes adversaries that can use hardware attacks such as bus interposers to snoop on memory/device interfaces, which may give the adversary the ability to tamper with data in memory. 80 | | 5 | Advanced Hardware Adversary | This includes adversaries that can use advanced hardware attacks, with unlimited physical access to the devices, and use mechanisms to tamper with the hardware TCB e.g., extract keys from hardware, using capabilities such as scanning electron microscopes, fib attacks, glitching attacks, etc. 81 | | 6 | Side/ Covert Channel Adversary | This includes adversaries that may leverage any explicit/implicit shared state (architectural or micro-architectural) to leak information across privilege boundaries via inference of characteristics from the shared resources (e.g. caches, branch prediction state, internal micro-architectural buffers, queues). Some attacks may require the use of high-precision timers to leak information. A combination of system software and hardware adversarial approaches may be utilized by this adversary. 82 | |=== 83 | -------------------------------------------------------------------------------- /specification/src/archive/references.adoc: -------------------------------------------------------------------------------- 1 | [appendix] 2 | 3 | == References 4 | 5 | 6 | . https://www.intel.com/content/www/us/en/newsroom/opinion/zero-trust-approach-architecting-silicon.html[https://www.intel.com/content/www/us/en/newsroom/opinion/zero-trust-approach-architecting-silicon.html] 7 | . https://www.forrester.com/blogs/tag/zero-trust/[https://www.forrester.com/blogs/tag/zero-trust/] 8 | . https://docs.microsoft.com/en-us/security/zero-trust/[https://docs.microsoft.com/en-us/security/zero-trust/] 9 | . https://github.com/riscv/riscv-crypto/releases[https://github.com/riscv/riscv-crypto/releases] 10 | . https://github.com/riscv/riscv-platform-specs/blob/main/riscv-platform-spec.adoc[https://github.com/riscv/riscv-platform-specs/blob/main/riscv-platform-spec.adoc] 11 | . https://www.commoncriteriaportal.org/files/ppfiles/pp0084b_pdf.pdf[https://www.commoncriteriaportal.org/files/ppfiles/pp0084b_pdf.pdf] 12 | . https://docs.opentitan.org/doc/security/specs/device_life_cycle/[https://docs.opentitan.org/doc/security/specs/device_life_cycle/] 13 | . https://nvlpubs.nist.gov/nistpubs/ir/2021/NIST.IR.8320-draft.pdf[https://nvlpubs.nist.gov/nistpubs/ir/2021/NIST.IR.8320-draft.pdf] 14 | . https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-193.pdf[https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-193.pdf] 15 | . https://www.rambus.com/security/root-of-trust/rt-630/[https://www.rambus.com/security/root-of-trust/rt-630/] 16 | . https://docs.opentitan.org/doc/security/specs/[https://docs.opentitan.org/doc/security/specs/] 17 | . https://trustedcomputinggroup.org/work-groups/dice-architectures/[https://trustedcomputinggroup.org/work-groups/dice-architectures/] 18 | . https://ieeexplore.ieee.org/iel7/8168766/8203442/08203496.pdf[https://ieeexplore.ieee.org/iel7/8168766/8203442/08203496.pdf] 19 | . https://dl.acm.org/doi/10.1145/168619.168635[https://dl.acm.org/doi/10.1145/168619.168635] 20 | . https://dl.acm.org/doi/abs/10.1145/3342195.3387532[https://dl.acm.org/doi/abs/10.1145/3342195.3387532] 21 | . https://github.com/riscv/riscv-debug-spec/blob/master/riscv-debug-stable.pdf[https://github.com/riscv/riscv-debug-spec/blob/master/riscv-debug-stable.pdf] 22 | . https://csrc.nist.gov/csrc/media/events/non-invasive-attack-testing-workshop/documents/08_goodwill.pdf[https://csrc.nist.gov/csrc/media/events/non-invasive-attack-testing-workshop/documents/08_goodwill.pdf] 23 | . https://www.iso.org/standard/60612.html[https://www.iso.org/standard/60612.html] 24 | . https://ieeexplore.ieee.org/document/6176671[https://ieeexplore.ieee.org/document/6176671] 25 | . https://tches.iacr.org/index.php/TCHES/article/view/8988[https://tches.iacr.org/index.php/TCHES/article/view/8988] 26 | . https://ieeexplore.ieee.org/abstract/document/1401864[https://ieeexplore.ieee.org/abstract/document/1401864] 27 | . https://www.electronicspecifier.com/products/design-automation/increasingly-connected-world-needs-greater-security[https://www.electronicspecifier.com/products/design-automation/increasingly-connected-world-needs-greater-security] 28 | . https://www.samsungknox.com/es-419/blog/knox-e-fota-and-sequential-updates[https://www.samsungknox.com/es-419/blog/knox-e-fota-and-sequential-updates] 29 | . https://docs.microsoft.com/en-us/windows/security/threat-protection/intelligence/supply-chain-malware[https://docs.microsoft.com/en-us/windows/security/threat-protection/intelligence/supply-chain-malware] 30 | . https://dl.acm.org/doi/10.1145/3466752.3480068[https://dl.acm.org/doi/10.1145/3466752.3480068] 31 | . https://arxiv.org/abs/2111.01421[https://arxiv.org/abs/2111.01421] 32 | . https://www.nap.edu/catalog/24676/foundational-cybersecurity-research-improving-science-engineering-and-institutions[https://www.nap.edu/catalog/24676/foundational-cybersecurity-research-improving-science-engineering-and-institutions] 33 | . https://trustedcomputinggroup.org/work-groups/dice-architectures/[https://trustedcomputinggroup.org/work-groups/dice-architectures/] 34 | -------------------------------------------------------------------------------- /specification/src/archive/riscv-platform-security-model.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/riscv-non-isa/riscv-security-model/99a182d56441ef730522a1486d7716756f9eca19/specification/src/archive/riscv-platform-security-model.pdf -------------------------------------------------------------------------------- /specification/src/archive/security_ecosystem.adoc: -------------------------------------------------------------------------------- 1 | [[security_ecosystem]] 2 | 3 | == Security Ecosystem 4 | 5 | text 6 | -------------------------------------------------------------------------------- /specification/src/archive/security_model.adoc: -------------------------------------------------------------------------------- 1 | :imagesdir: ../../images 2 | 3 | [[security_model]] 4 | 5 | == Platform Security Model 6 | 7 | In this chapter, we shall discuss the various components of the Zero Trust Platform Security Model along with guidance for architecting them effectively. 8 | 9 | === Platform Unique Identity 10 | 11 | This is typically done by provisioning a Hardware Unique Key (HUK) or root-key into on-chip immutable storage such as One Time Programmable (OTP) memory/ fuses OR by using hardware or software Physical Unclonable Functions (PUFs) that can regenerate unique identities for a platform based on the device characteristics. Once a platform is uniquely identifiable, then that identity shall be verifiable and attested as proof of identity. DICE ^[12]^ attestation architecture from TCG for example uses hardware unique key to establish a cryptographically strong device identity, attest software and security policy, and assist in safely deploying and verifying software updates. 12 | 13 | 14 | 15 | === Platform Root-of-Trust 16 | 17 | A hardware Root-of-Trust (RoT) is the foundation on which all security operations of a computing system depend. It contains the root keys used for cryptographic functions and enables a secure boot process. It is inherently trusted, and therefore must be secure by design. The most secure implementation of a root of trust utilizes hardware to make it immune from malware attacks. As such, it can be a stand-alone security module or implemented as a security module within a processor or System on Chip (SoC). The RoT consists of both an immutable firmware that is trusted which never changes on a production platform such as ROM and an updatable firmware that is verified every time before trusting that is anchored to the hardware. The RoT shall provide trusted services such as verified boot as a chain of trust, key provisioning, and management, security lifecycle management, sealed storage, device management, crypto services, attestation, etc. There are 2 types of RoT available to choose 18 | 19 | * Hardware-based RoT are of two types - fixed-function and programmable, where the fixed-function RoT is specifically targeted for resource-constrained IoT product segments while the fully reprogrammable RoT is for all other product segments where a more complex set of security functions are required that can evolve over time to meet new attacks/ vulnerabilities. 20 | 21 | Hardware RoTs can either be further classified into two types 22 | 23 | ** Integrated RoT, where RoT IP is integrated into an SoC 24 | ** Discrete RoT (platform RoT), where RoT is added to the platform/ PCB such as TPM module or secure elements chip 25 | 26 | 27 | 28 | .Hardware Root-of-Trust _[10]_ 29 | image::img_2.png[] 30 | 31 | 32 | 33 | 34 | === Device Lifecycle 35 | 36 | A security lifecycle defines the security state of a device through its lifetime, where each state of the device defines the security properties to be in effect in that state. The below security life cycle model is borrowed from OpenTitan's life cycle model acts as a good reference for implementing product-specific security life cycle whether we use an integrated RoT or discrete RoT such as OpenTitan or TPM module or Secure Elements. 37 | 38 | .Device Security Lifecycle _[11]_ 39 | image::img_3.png[] 40 | 41 | 42 | 43 | ==== Device Provisioning 44 | 45 | 46 | ==== Secure Boot 47 | 48 | 49 | .Secure Boot _[22]_ 50 | image::img_4.png[] 51 | 52 | 53 | 54 | ==== Ownership Transfer 55 | 56 | 57 | 58 | ==== Device Attestation 59 | 60 | The health of the platform shall be remotely verifiable by measuring the firmware's integrity. 61 | 62 | 63 | 64 | ==== Software Update 65 | 66 | .Software Update Overview _[23]_ 67 | image::img_5.png[] 68 | 69 | 70 | 71 | === Firmware Version Anti-rollback 72 | 73 | Anti-roll back mechanisms shall be implemented to prevent unauthorized firmware version rollback as described in chapter Firmware Upgrade. Typically this is done using monotonic counters in hardware. 74 | 75 | 76 | 77 | === Sealed Storage 78 | 79 | 80 | 81 | 82 | 83 | === Authorized Software Execution 84 | 85 | Any software/ firmware before being executed the platform shall verify the authenticity with digital signature checks. 86 | 87 | 88 | 89 | === Trusted Execution Capability 90 | 91 | A trusted execution environment (TEE) is a context of execution protected by a system processor with a Hardware-attested TCB. Sensitive secrets like cryptographic keys, authentication strings, or data with intellectual property and privacy concerns can be preserved within a TEE, and operations involving these secrets can be performed within the TEE, thereby mitigating against loss of confidentiality and integrity. A TEE also helps ensure that operations performed within it and the associated data cannot be viewed from outside, not even by privileged software or debuggers. Communication with the TEE is designed to only be possible through designated interfaces, and it is the responsibility of the TEE designer/ developer to define these interfaces appropriately. 92 | 93 | * Memory Isolation with Encryption 94 | * Application Isolation via TEEs 95 | * VM Isolation with Encryption 96 | * Integrity Protection using Merkle Trees 97 | 98 | 99 | 100 | Multiple TEEs are becoming popular as gathering assets from various stakeholders in a single TEE could be limiting and risky from a business perspective ^[15]^ . 101 | 102 | 103 | === Cryptographically-Secure Entropy Source 104 | 105 | 106 | 107 | 108 | 109 | === Cryptographic ISA Extensions/ Accelerators 110 | 111 | … 112 | 113 | 114 | 115 | === Confidential Computing 116 | 117 | The Confidential Computing Consortium has defined confidential computing as “the protection of data in use by performing computation in a hardware-based Trusted Execution Environment”, and identified three primary attributes for what constitutes a Trusted Execution Environment: data integrity, data confidentiality, and code integrity. As described in https://confidentialcomputing.io/whitepaper-01-latest/[Confidential Computing: Hardware-Based Trusted Execution for Applications and Data], four additional attributes may be present (code confidentiality, programmability, recoverability, and attestability) but are not strictly necessary for a computational environment to be classified as confidential computing. 118 | 119 | 120 | 121 | === Control Flow Integrity 122 | 123 | … 124 | 125 | 126 | 127 | === Software Fault Isolation 128 | 129 | … Refer [14] 130 | 131 | 132 | 133 | === Memory Safety 134 | 135 | … 136 | 137 | 138 | 139 | === Side-channel Attack Resistance 140 | 141 | Side-channel attacks (the most recent popular ones being Spectre & Meltdown) exploit unintended information leakage, such as the execution time of an algorithm depending on the secret key being processed. On embedded systems, common observations which leak side-channel information include timing and power (including electromagnetic emissions from power). These leaks are coupled to the physical design of the device, including microarchitectural design choices. Such attacks may require intimate physical measurements taken from the device or maybe possibly from external interfaces or even across networks. Microarchitectural covert channels exploit changes in execution timing resulting from competing for access to limited hardware resources such as caches, TLBs, branch predictors, and prefetchers. An example microarchitectural attack is: a Spy & Trojan process exploits the Victim process and leaks secrets, ex: the cache lines used by the Trojan create a footprint that can be sensed by the Spy and similarly, TLB, prefetchers also can be utilized for the sensing. 142 | 143 | Measuring the “leakage” from a device to understand if the vulnerability is present is typically done with a statistical measurement showing if a device has differing characteristics when processing different secret values. In power and timing side-channels this commonly uses the Test Vector Leakage Assessment (TVLA) method ^[17]^ which relies on Welch’s T-Test, which has been standardized as ISO 17825 ^[18]^ . 144 | 145 | Countermeasures may include algorithmic-level and physical level changes, and the choice of countermeasure depends on required protection along with interoperability requirements. Modifying standard algorithms to include a mask means the algorithm processes a random value (unknown to the attacker), complicating side-channel attacks. For example, AES can be masked such that it produces the same input and output as the standard AES modes, but with different intermediate states than unmasked AES ^[19]^ . The encryption can also be adjusted to be used in a leakage-resistant fashion, which is incompatible with “standard” AES modes ^[20]^ . Physical changes such as dual-rail logic can suppress the leakage, and do not require changes to the algorithms ^[21]^ . As another example, the recent fence.T new ISA extension proposed for RISC-V for temporal partitioning prevents any interference between security domains, each such microarchitectural state must be reset to a state that is independent of execution history before a context switch to a different process. 146 | 147 | Speculative execution attacks put a dangerous new twist on information leakage through microarchitectural side channels. Ordinarily, programmers can reason about leakage based on the program’s semantics, and prevent said leakage by carefully writing the program to not pass secrets to covert channel-creating “transmitter” instructions, such as branches and loads. Speculative execution breaks this defense because a transmitter might miss-speculatively execute with a secret operand even if it can never execute with said operand invalid executions. There have been much research around the microarchitecture of RISC-V designs to be resistant to Spectre, and the noticeable one being Speculative Privacy Tracking ^[25]^ , hardware protection that delays execution of every transmitter until it can prove that the transmitter’s operands leak during the program’s non-speculative execution. Using a novel dynamic information flow analysis microarchitecture, SPT efficiently proves when such an operand declassification implies that other data becomes declassified, which enables other delayed transmitters to be executed safely. 148 | 149 | 150 | === Supply-chain Attack Resistance 151 | 152 | Organizations are increasingly at risk of supply chain compromise, whether intentional or unintentional. Managing cyber supply chain risks requires, in part, ensuring the integrity, quality, and resilience of the supply chain, its products, and its services. Cyber supply chain risks may include counterfeiting, unauthorized production, tampering, theft, and insertion of malicious or otherwise unexpected software and hardware, as well as poor manufacturing and development practices in the cyber supply chain. 153 | 154 | 155 | * Hardware Supply Chain Mitigation: 156 | + 157 | When a design house outsources its hardware design (GDSII format) to contract manufacturing foundry services and assembly services, there could be a potential risk to following threats, which the design house may never come to know. 158 | 159 | ** IP theft 160 | ** Counterfeiting 161 | ** Over-production 162 | 163 | + 164 | A decade long researched technique - Logic Locki ^ng[1^ 3] has recently matured that helps to lock sensitive IPs or entire SoCs using specific hardware unlocking keys only known to the hardware design house, which can only be used by the design house to unlock it after the part returns from foundry/ assembly thereby eliminating foundry/ assembly services out of trust boundary. 165 | 166 | 167 | * Software Supply Chain Mitigation: 168 | + 169 | Software supply chain attacks ^[24]^ are an emerging kind of threat that targets software developers and suppliers. The goal is to access source codes, build processes, or update mechanisms by infecting legitimate apps to distribute malware. Because software is built and released by trusted vendors, these apps and updates are signed and certified. In software supply chain attacks, vendors are likely unaware that their apps or updates are infected with malicious code when they’re released to the public. The malicious code then runs with the same trust and permissions as the app. The following types of attacks exist 170 | 171 | ** Compromised software building tools or updated infrastructure 172 | ** Stolen code-sign certificates or signed malicious apps using the stolen identity 173 | ** Compromised specialized code shipped into hardware or firmware components 174 | ** Pre-installed malware on devices 175 | 176 | + 177 | Deploying strong code integrity policies to allow only authorized apps to run on the device and using on-device detection of suspicious activities to take remedial action is a promising way to mitigate against such attacks. 178 | -------------------------------------------------------------------------------- /specification/src/archive/standards.adoc: -------------------------------------------------------------------------------- 1 | [[standards]] 2 | 3 | == Standards & Certification 4 | 5 | text 6 | -------------------------------------------------------------------------------- /specification/src/archive/threat_model.adoc: -------------------------------------------------------------------------------- 1 | [[threat_model]] 2 | 3 | == Threat Model 4 | 5 | === Pointer Safety 6 | 7 | [cols="1,3", grid=none, frame=none] 8 | |=== 9 | |Asset: | Pointers 10 | | 11 | Location: | Memory 12 | | 13 | Description: | Pointers stored in programs to store addresses 14 | | 15 | Security Property: | Integrity 16 | | 17 | Threat: | Tamper 18 | | 19 | Entry Point of Threat: | Misusing pointers to access unauthorized memory, manipulating stack, heap regions, executing data pointers, use after free, out of range access, etc 20 | | 21 | Impact of Vulnerability: | Memory misuse 22 | | 23 | Severity CVSS v3 Rating: | HIGH: 7.5 24 | 25 | CVSS v3.1 Vector 26 | 27 | AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N 28 | 29 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 30 | | 31 | Mitigation/Security Requirement: | Extending pointer virtual address width or using unused bits if any of pointer virtual address to hold type, permissions, and tag inserted by malloc function and checked during page, walk to prevent memory misuse 32 | 33 | |=== 34 | 35 | === Stack Safety 36 | 37 | [cols="1,3", grid=none, frame=none] 38 | |=== 39 | 40 | |Asset: | Stack 41 | | 42 | Location: | Memory/ CPU Registers 43 | | 44 | Description: | System Stack 45 | | 46 | Security Property: | Integrity 47 | | 48 | Threat: | Tamper 49 | | 50 | Entry Point of Threat: | Return Oriented Programming (ROP) attack using stack smashing by either buffer overrun or injecting code into the stack 51 | | 52 | Impact of Vulnerability: | Program control-flow hijack 53 | | 54 | Severity CVSS v3 Rating:| HIGH: 7.5 55 | 56 | CVSS v3.1 Vector 57 | 58 | AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N 59 | 60 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 61 | | 62 | Mitigation/Security Requirement: | Use shadow stack to compare return addresses for control-flow transfer instructions if a mismatch is detected then raise an exception to the kernel to handle it 63 | 64 | |=== 65 | 66 | === Call/ Jump Safety 67 | 68 | [cols="1,3", grid=none, frame=none] 69 | |=== 70 | 71 | |Asset: | Call/ Jump Targets 72 | | 73 | Location: | Memory/ CPU Registers 74 | | 75 | Description: | Indirect call/ jump target addresses 76 | | 77 | Security Property: | Integrity 78 | | 79 | Threat: | Tamper 80 | | 81 | Entry Point of Threat: | Call/ Jump Oriented Programming (COP/ JOP) attack using ata tampering to perform indirect call/ jump to invalid locations 82 | | 83 | Impact of Vulnerability:| Program control-flow hijack 84 | | 85 | Severity CVSS v3 Rating: | HIGH: 7.5 86 | 87 | CVSS v3.1 Vector 88 | 89 | AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N 90 | 91 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 92 | | 93 | Mitigation/Security Requirement: | Track indirect call/jump instructions and permit only valid call/jump locations of the code 94 | 95 | |=== 96 | 97 | === Code/ Data Confidentiality 98 | 99 | [cols="1,3", grid=none, frame=none] 100 | |=== 101 | 102 | |Asset: | Code/ Data 103 | | 104 | Location: | Memory/ CPU Registers 105 | | 106 | Description: | Software Code and Data 107 | | 108 | Security Property: | Confidentiality 109 | | 110 | Threat: | Disclosure 111 | | 112 | Entry Point of Threat: | Vulnerable OS/ VMM can be exploited with privilege escalation o tamper code/ data of an application or hosted software 113 | | 114 | Impact of Vulnerability: | Compromised confidentiality of secrets 115 | | 116 | Severity CVSS v3 Rating: | HIGH: 7.5 117 | 118 | CVSS v3.1 Vector 119 | 120 | AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N 121 | 122 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 123 | | 124 | Mitigation/Security Requirement: | Encrypt code/ data via hardware mechanisms with hardware enerated keys invisible to OS/ VMM 125 | 126 | |=== 127 | 128 | === Code/ Data Integrity 129 | 130 | [cols="1,3", grid=none, frame=none] 131 | |=== 132 | 133 | |Asset: | Code/ Data 134 | | 135 | Location: | Memory/ CPU Registers 136 | | 137 | Description: | Software Code and Data 138 | | 139 | Security Property: | Integrity 140 | | 141 | Threat: | Tamper 142 | | 143 | Entry Point of Threat: | Vulnerable OS/ VMM can be exploited with privilege escalation to tamper code/ data of an application or hosted software 144 | | 145 | Impact of Vulnerability: | Compromised integrity of interesting assets, eg: code 146 | | 147 | Severity CVSS v3 Rating: | HIGH: 7.5 148 | 149 | CVSS v3.1 Vector 150 | 151 | AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:L/A:N 152 | 153 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 154 | | 155 | Mitigation/Security Requirement: | Integrity check (is a threat protection mechanism that checks the drivers and system files on your device for signs of corruption) of code/ data by hardware that is attested by the hardware which can be verified locally/ remotely. Integrity checking should/shall be a permanently running mechanism. 156 | 157 | |=== 158 | 159 | === Timing Side-Channel Safety 160 | 161 | [cols="1,3", grid=none, frame=none] 162 | |=== 163 | 164 | |Asset: | Any secret (see section 5.14) 165 | | 166 | Location: | Cache, TLB, Memory 167 | | 168 | Description: | Leakage 169 | | 170 | Security Property: | Confidentiality 171 | | 172 | Threat: | Disclosure 173 | | 174 | Entry Point of Threat: | Covert channel - Spy & Trojan attacking the victim 175 | | 176 | Impact of Vulnerability: | Disclosure of secret 177 | | 178 | Severity CVSS v3 Rating: | HIGH: 6.2 179 | 180 | CVSS v3.1 Vector 181 | 182 | AV:L/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N 183 | 184 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 185 | | 186 | Mitigation/Security Requirement: | Timing protection (temporal partitioning) to prevent interference that affects observable timing behavior. The new fence.T ISA extension proposed for RISC-V for temporal partitioning prevents any interference between security domains, each such microarchitectural state must be reset to a state that is independent of execution history before a context switch to a different thread/ process. 187 | 188 | |=== 189 | 190 | 191 | === Hardware Supply Chain Safety 192 | 193 | [cols="1,3", grid=none, frame=none] 194 | |=== 195 | 196 | |Asset: | Hardware IP 197 | | 198 | Location: | Design (GDSII) 199 | | 200 | Description: | IP theft, Counterfeiting, Overproduction 201 | | 202 | Security Property: | Confidentiality 203 | | 204 | Threat: | Disclosure 205 | | 206 | Entry Point of Threat: | Design in GDSII form 207 | | 208 | Impact of Vulnerability: | Loss of IP, Loss of revenue 209 | | 210 | Severity CVSS v3 Rating: | HIGH: 4.6 211 | 212 | CVSS v3.1 Vector 213 | 214 | https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator?vector=AV:P/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N&version=3.1[AV:P/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N] 215 | 216 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 217 | | 218 | Mitigation/Security Requirement: | Logic locking is one of the new emerging technology that enables the hardware to lock the IP/ SoC using a password only known to the design house and can only be unlocked after the parts come back to the design house. Without this password, the IP/ SoC is literally defunct or unusable. 219 | 220 | |=== 221 | 222 | 223 | 224 | === Software Supply Chain Safety 225 | 226 | [cols="1,3", grid=none, frame=none] 227 | |=== 228 | 229 | |Asset: | Software IP 230 | | 231 | Location: | Software/ Application binary 232 | | 233 | Description: | Cloning, Tampering 234 | | 235 | Security Property: | Confidentiality, Integrity 236 | | 237 | Threat: | Disclosure, Tamper 238 | | 239 | Entry Point of Threat: | Build tools, build servers, release servers, etc 240 | | 241 | Impact of Vulnerability: | Loss of IP, Loss of revenue 242 | | 243 | Severity CVSS v3 Rating: | HIGH: 4.6 244 | 245 | CVSS v3.1 Vector 246 | 247 | https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator?vector=AV:P/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N&version=3.1[AV:P/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N] 248 | 249 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 250 | | 251 | Mitigation/Security Requirement: | Encryption, Attestation, and protection of code signing certificates, build tool attestation, etc 252 | 253 | |=== 254 | 255 | 256 | 257 | === Peripheral/ IP Authentication 258 | 259 | [cols="1,3", grid=none, frame=none] 260 | |=== 261 | 262 | |Asset: | Peripherals/ IPs 263 | | 264 | Location: | SoC/ Platform 265 | | 266 | Description: | Fake/ rogue Peripheral/ IP communicating with the victim 267 | | 268 | Security Property: | Integrity, Availability 269 | | 270 | Threat: | Disclosure, Tamper 271 | | 272 | Entry Point of Threat: | Procurement channels 273 | | 274 | Impact of Vulnerability: | Insecure products 275 | | 276 | Severity CVSS v3 Rating: | HIGH: 5.2 277 | 278 | CVSS v3.1 Vector 279 | 280 | https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator?vector=AV:P/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N&version=3.1[AV:P/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:]L 281 | 282 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 283 | | 284 | Mitigation/Security Requirement: | Peripheral/ IP mutual authentication. Recent developments in the industry to address this concern include opencompute.org, dmtf.org, and pce.org where they propose peripheral extensions to enable mutual authentication and encrypted communication among N parties on the platform. This could be extended to even to the IP level inside the SoC, which needs careful evaluation to make sure the trade-offs for PPA$ are worth the additional security it offers for the particular product. 285 | 286 | |=== 287 | 288 | 289 | 290 | === Non-CPU IPs/ Peripherals outside TEE 291 | 292 | [cols="1,3", grid=none, frame=none] 293 | |=== 294 | 295 | |Asset: | Peripherals/ IPs 296 | | 297 | Location: | SoC/ Platform 298 | | 299 | Description: | Non-CPU IPs & Peripherals are outside scopes of TEE and hence the code & data do not get any security guarantees from the TEE, and so are unprotected 300 | | 301 | Security Property: | Confidentiality, Integrity, Availability 302 | | 303 | Threat: | Disclosure, Tamper, DoS 304 | | 305 | Entry Point of Threat: | Untrusted OS/ VMM 306 | | 307 | Impact of Vulnerability: | Weak security to code/ data 308 | | 309 | Severity CVSS v3 Rating: | HIGH: 7.2 310 | 311 | CVSS v3.1 Vector 312 | 313 | https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator?vector=AV:P/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N&version=3.1[AV:N/AC:L/PR:H/UI:N/S:U/C:H/I:H/A:]H 314 | 315 | (https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator) 316 | | 317 | Mitigation/Security Requirement: | TEEs need to be extended to include non-CPU IPs such as GPU, etc., and peripheral devices into the enclave. 318 | 319 | |=== 320 | -------------------------------------------------------------------------------- /specification/src/archive/zero_trust.adoc: -------------------------------------------------------------------------------- 1 | :imagesdir: ../../images 2 | 3 | [[zero_trust]] 4 | 5 | == Zero Trust 6 | 7 | === What is Zero Trust? 8 | 9 | Zero Trust, the term coined in 2010 by Forrester Research ^[2]^ , refers to a proactive and pervasive approach to network security designed to minimize uncertainty. It shifts the paradigm from trust-based on physical connectivity or proximity to a new model that involves always authenticating and verifying every access. This new model assumes breach and verifies each request as though it originated from an uncontrolled network. Regardless of where the request originates or what resource it accesses, the Zero Trust model teaches us to "never trust, always verify." This model for an enterprise spans from device/ user identity, devices, data, and applications to the network infrastructure that needs to be secured end-to-end to attain Zero Trust. 10 | 11 | .Zero Trust Security for an Enterprise _[3]_ 12 | image::img_1.png[] 13 | 14 | 15 | === Principles of Zero Trust 16 | 17 | We can draw an analogy between the networking world of things to the platforms/ chipsets (SoC) that are developed for mobile phones, laptops, wearables/ IoT, and data centers, such that in the first case endpoint devices are communicating with each other while in the latter case various platform components and chipset components are communicating with each other to perform a computation and similarly various software layers are communicating with each other to execute a function/ application for the user. 18 | 19 | . Verify explicitly 20 | ** Hardware: The source of a data packet from a peripheral bus on the platform or an internal bus/ NoC within a chip needs to be authenticated first before consuming it. We are used to the paradigm of trusting anything that is attached to a hardware bus, and recent sophisticated physical attacks are proving us wrong. Data protection with authentication, confidentiality, integrity, and replay protection shall be provided. 21 | ** Software: An inter-VM message coming from one VM to another needs to be verified for authenticity of the source before granting access to any resources. 22 | . Use least-privilege access 23 | ** Hardware: Minimize the privileges any hardware block has. In hardware, it’s often appealing to give additional privileges to agents just in case. For example, the PMU firmware has no need to access TRNG/ PUF registers, but granting access to PMU and a compromised firmware might steal secrets. Hardware programming must be reportable to be able to validate that the least privileges are enforced. Alternate modes of access to the same resource should be avoided when possible, for example special modes should be visibly reported. 24 | ** Software: Limit the access of a software module to what it needs to perform its task at a given time. Paradigms such as Just-In-Time (JIT - not the JIT language model) and Just-Enough-Access (JEA) shall help mitigate risks. 25 | . Assume breach 26 | ** Hardware: Never assume security, but assume breach of trust is imminent and build defenses to mitigate it. A peripheral with compromised firmware could be plugged into the system anytime, and before it hijacks control, detection, and quarantine actions need to be taken. 27 | ** Software: Function calls and API invocations are key methods to execute gadgets to deviate program control flow. Attacks such as ROP/ JOP, control flow bending are classic cases of not verifying the parameters passed in function calls or memory (stack/ heap) corruption, etc. Hardware support for protecting the control-flow integrity can enforce deterministic properties on code execution. 28 | . Fail safely 29 | ** Hardware: Ensure that error conditions don't leave secrets around. Configuration or runtime error conditions or unexpected configuration changes must not leave sensitive data unprotected or disclose sensitive information via error management. Hardware elements should provide privileged software with appropriate mechanisms to manage state changes to enforce safe failures without data leakage. The classic anti-pattern in hardware is the so-called cold-boot attack, where secrets are left in memory after a reboot. Certain new memory devices have been invented to detect temperature variations and erase secrets automatically. ML algorithms also detect such attacks to take preventive measures, for example on a drone that was just shot down, will detect freefall and then erase critical secrets such as keys from the root-of-trust engine. 30 | ** Software: Borrowing concepts from functional safety (IEC 61508, ISO 26262, ISO 21434), a failure or fault could be detected and then bring the system to a safe/ secure state and avoid catastrophe. 31 | . Complete mediation 32 | ** Hardware: Check every single access to confirm legitimacy. In hardware, this might mean making memory accesses go through appropriate memory management checks from the path from application to memory and back. Some CPUs and bus-masters even have unique IDs or fingerprints or tags that are sent along with every transaction on the bus for anyone to check legitimacy and non-repudiation. 33 | ** Software: The use of software isolation mechanisms in hardware offer mediation of access from software entities (guest OSes and applications) to peripherals, as well as from additional bus masters to the memory of other components. 34 | . Separation of duty 35 | ** Hardware: Every agent on the system has a single purpose, for example, a PMU only does power management and a debug controller only manages to debug ports. Since hardware real-estate is expensive, it is often appealing to overload an agent with multiple duties. This complicates validating and reasoning about the security posture. Isolation techniques can help to guarantee strict separation while saving real estate. 36 | ** Software: The addition of a new privileged mode (hypervisor) enables the software (VMMs) to use privilege levels to separate duties and isolate failures for VMs. 37 | . Least common mechanism 38 | ** Hardware: Separate out security functions from others. It is a common design pattern to have a shared bus that transports sideband messages across designs given the expense of on-die wires. If that same bus carries non-secrets and secrets, it is an attack point. All shared resources should logically be separated to avoid sharing mechanisms from being misused as covert or side channels. 39 | ** Software: The addition of a new privileged mode (hypervisor) is an example of one way to enable the separation and segregation of security-critical tasks from non-critical ones. 40 | . Protect the weakest link 41 | ** Hardware: Protect the design’s weakest part. Hardware debug and monitoring features often require access to all assets of the design, and can hence access sensitive data. Often debug functionality is at odds with security mechanisms that try to restrict or minimize access to the data. Appropriate security mechanisms must be employed to debug available in isolated portions when activated. 42 | ** Software: The D-mode (debug mode) software is the component with the highest level of privileges in a RISC-V platform. We could consider the debug mode to be restricted and not have the highest level of privilege ( refer to debug spec requirement from security section 3.12). 43 | . Defense-in-depth 44 | ** Hardware: Provide appropriate checkpoints in the system that can contain attacks when a component is found vulnerable and have a system-level approach for security. This can mean blocking access to a resource even if it seems like it should be open. For example, secrets shall be protected by requiring additional authentication and authorization to validate access modes e.g. debug. Validation of parameters even if the operation was protected (e.g. via authentication mechanisms). 45 | ** Software: A few new RISC-V features implement multiple walls inside the CPU and from the CPU to the SoC/platform - 1) Smepmp blocks access from Machine-mode software to unprivileged/ less-privileged components. 2) New privileged hypervisor mode adds a new privileged component that manages and restricts access from VMs. 3) Software isolation frameworks. 4) Mechanisms for uncore blocks (e.g. other masters), e.g. the usage of/access to a DMA by the software. 46 | . Simplicity 47 | ** Invent simpler architectures. Simpler mechanisms are harder to come up with, but easier to implement, validate and secure. Example: reduction of the size of Trusted Computing Base (TCB), which is an important security objective. 48 | -------------------------------------------------------------------------------- /specification/src/archive/zero_trust_principles.adoc: -------------------------------------------------------------------------------- 1 | [appendix] 2 | == Zero Trust Principles 3 | 4 | [cols="1,3,5,5",stripes=even,options="header"] 5 | |=== 6 | | *#* | *Principle* | *Description* | *Example* 7 | | 1 | Verify Explicitly | Verify every access explicitly without any trust assumptions | Data packet from any sender needs to be authenticated before using it to ensure non-repudiation 8 | | 9 | 10 | 2 | Least Privilege | A subject should be given only those privileges that it needs to complete its task | An entity having access to unauthorized resources, can lead to security vulnerabilities such as data leakage 11 | | 12 | 13 | 3 | Assume Breach | Assume everything eventually gets broken | A rogue peripheral attached, or a malware installed in a system can lead to system compromise 14 | | 15 | 16 | 4 | Fail Securely | Ensure that error conditions don’t leave secrets around | Any secrets left in the memory after a reboot can lead to data leakage 17 | | 18 | 19 | 5 | Complete Mediation | Unless a subject is given explicit access to an object, it should be denied access to that object | Caching of access privilege information, when reused without being updated dynamically can lead to unauthorized access to resources 20 | | 21 | 22 | 6 | Separation of Duty | Every agent in a system has only a single purpose | Augmenting the functionality of an entity with unrelated features or bug fixes can lead to creation of backdoors 23 | | 24 | 25 | 7 | Least Common | Access mechanisms should not be shared  | Every entity should have their own custom access permissions to avoid unauthorized access 26 | | 27 | 28 | 8 | Secure Weak Link | Protect the weakest link in the chain | A decryption key stored in unprotected memory can lead to loss of data confidentiality and availability 29 | | 30 | 31 | 9 | Defense in Depth | Build multiple layers/ walls of security | If a system has only a single layer of defense mechanism, and if it is bypassed, it can lead to system compromise 32 | | 33 | 34 | 10 | Simplicity | Keep it simple | Complexity can lead to vulnerabilities 35 | 36 | |=== 37 | -------------------------------------------------------------------------------- /specification/src/bibliography.adoc: -------------------------------------------------------------------------------- 1 | [bibliography] 2 | == Bibliography 3 | 4 | * [[[R1,1]]] The RISC-V Instruction Set Manual Volume II: Privileged 5 | Architecture Document Version 20211203 6 | (https://drive.google.com/file/d/1EMip5dZlnypTk7pt4WWUKmtjUKTOkBqh/view[link]) 7 | 8 | * [[[R2,2]]] RISC-V Security Model mapping of building blocks to best practices 9 | (https://docs.google.com/spreadsheets/d/1u56I03hxHCUKuPjByB2J2fPmVgPLKUnPcsemDyVh_T0/edit?gid=1395632845#gid=1395632845[link]) 10 | -------------------------------------------------------------------------------- /specification/src/chapter1.adoc: -------------------------------------------------------------------------------- 1 | 2 | [[chapter1]] 3 | 4 | == Introduction 5 | 6 | This specification provides a superset of normative requirements for building 7 | secure RISC-V systems using RISC-V security building blocks. It is aimed at 8 | developers of RISC-V technical specifications, as well as at designers of 9 | secure RISC-V systems. It is intended to be referenced by other documentation 10 | that will define a specific subset of requirements applicable for a given use 11 | case, certification regime, platform or profile. 12 | 13 | 14 | A few non-normative example use cases are provided, based on commonly used 15 | security deployment models. 16 | These are not intended to be exhaustive but are common enough to represent a 17 | wide range of deployments of secure products. They are accompanied by an 18 | appropriate set of use case specific security 19 | guidelines which are intended to help readers implement secure products for 20 | their specific use cases. 21 | 22 | The examples may be extended over time as required. 23 | 24 | RISC-V is currently not intending to create a security certification programme. 25 | This specification is provided as guidance for developing secure 26 | RISC-V systems which are certifiable within existing third party security 27 | certification programmes. 28 | 29 | This specification does not define any new RISC-V ISA or non-ISA extensions. 30 | Instead it refers to existing RISC-V extensions, as well as commonly used 31 | non-RVI architecture agnostic security features and processes. It aims to show 32 | how those can be combined, in commonly used examples, to create systems which 33 | are certifiabe within commonly used existing security certification programmes. 34 | 35 | All existing RISC-V extensions are associated with an RVI _proof of concept 36 | (PoC)_, providing a viable example implementation. Any non-RVI security feature 37 | or process referred to in this document has existing commonly used sample 38 | implementations equivalent to an RVI PoC. 39 | 40 | The examples are not definitions of formal Protection Profiles (See: 41 | https://csrc.nist.gov/glossary/term/protection_profile). 42 | Formal protection profiles are typically provided by third party certification 43 | bodies for different ecosystems. The guidelines provided within the examples in 44 | this specification are intended to help readers adapt RISC-V security features 45 | to meet security requirements of commonly used third party protection profiles. 46 | Mapping documentation or protection profiles may also directly reference this 47 | document. 48 | 49 | This specification does not contain threat modelling or security assessment of 50 | individual RISC-V technical specifications. Individual RISC-V technical 51 | specifications are expected to use the Security Model as a guide to develop 52 | their own specific security analysis, including formal threat modeling where 53 | appropriate. For this purpose, all guidelines in this document are labelled to 54 | enable referencing from other specifications. Specific security analysis in the 55 | context of a RISC-V technical specification may require testing and a proof of 56 | concept as per normal RISC-V development processes for RISC-V technical 57 | specifications. 58 | 59 | Security is an evolving area where new use cases and new threats can emerge at 60 | any time. This specification represents the RISC-V security model and best 61 | practice as of the date of publication of this document. 62 | 63 | New versions of this document may be developed and released as and when 64 | required. 65 | 66 | === Requirements and tracking 67 | 68 | This is a normative specification. However, the specification does not mandate 69 | the adoption of any requirements. These requirements are intended to be 70 | referenced by current and future specifications, both RISC-V and external 71 | specifications, where a set, specific to the use case, certification regime, 72 | platform or profile under consideration will be documented. A set of in-scope 73 | security threats are also defined in this specification. These threats are 74 | intended to be referenced by the threat models of current and future 75 | specifications, both RISC-V and external specifications. 76 | This specification expresses trackable requirements and threats using the 77 | following format: 78 | 79 | [width=100%] 80 | [%header, cols="5,20"] 81 | |=== 82 | | ID# 83 | | Requirement 84 | 85 | | SR_CAT_NNN 86 | | The `SR_CAT` is a "Security Requirement CATegory" prefix that logically groups 87 | the requirements (e.g. SR_UPD denotes security requirements related to updates, 88 | and SR_ATT denotes security requirements related to attestation) and is followed 89 | by 3 digits - `NNN` - assigning a numeric ID to the requirement. 90 | 91 | The requirements use the key words "MUST", "MUST NOT", "REQUIRED", "SHALL", 92 | "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", 93 | and "OPTIONAL" that are to be interpreted as described in 94 | https://www.ietf.org/rfc/rfc2119.txt[RFC 2119] when, and only when, they appear 95 | in all capitals, as shown here. When these words are not capitalized, they have 96 | their normal English meanings. 97 | 98 | | T_CAT_NNN 99 | | The `T_CAT` is a "Threat CATegory" prefix that logically groups 100 | the threats considered in scope (e.g. T_LGC denotes threats categorised by 101 | logical attacks) and is followed 102 | by 3 digits - `NNN` - assigning a numeric ID to the threat. 103 | 104 | |=== 105 | 106 | A requirement or a group of requirements may be followed by non-normative text 107 | providing context or justification for the requirement. The non-normative text 108 | may also be used to reference sources that are the origin of the requirement. 109 | 110 | NOTE: Some sections may include duplicate requirements, both recommending and 111 | mandating the same feature. A specific use case must define if the feature is 112 | recommended or required. 113 | 114 | Trackable requirements are intended for ease of reference across dependent 115 | specifications. 116 | 117 | === Relationship with external protection profiles 118 | 119 | To assist with articulating security guidelines relevant to the included 120 | examples, this specification references external 121 | protection profiles as appropriate. Such references are not intended to mandate 122 | any specific implementations, but to provide guidelines on how RISC-V security 123 | building blocks may be used to comply with those protection profiles. 124 | 125 | Typically, protection profiles cover some or all of: 126 | 127 | * Security reference architectures and taxonomy 128 | * Hardware and software security requirements 129 | * Interfaces and programming models 130 | * Reference firmware/software 131 | * Certification programs 132 | * Misc Processes and methodology 133 | 134 | The following are examples of some external protection profiles that are 135 | referenced by this specification: 136 | 137 | [width=100%] 138 | [%header, cols="5,15"] 139 | |=== 140 | | Profile 141 | | Description 142 | 143 | | Global Platforms (GP) 144 | | Trusted execution environments(TEE) and trusted firmware for mobile, 145 | connected clients, and IoT. + 146 | Secure element (SE) for tamper resistant storage of and operations on 147 | cryptographic secrets. + 148 | SESIP certification. + 149 | https://globalplatform.org/ 150 | 151 | | Platform Security Architecture (PSA) 152 | | Platform security requirements for connected devices. + 153 | PSA Certified. + 154 | https://www.psacertified.org/ 155 | 156 | | Trusted computing group (TCG) 157 | | Trusted platform module (TPM) and Device identifier composition engine (DICE) 158 | for trusted platforms. + 159 | TCG certification. + 160 | https://trustedcomputinggroup.org/ 161 | 162 | | Confidential computing consortium 163 | | Common principles and protocols for protecting data in use (confidential 164 | computing). + 165 | https://confidentialcomputing.io/ 166 | 167 | | NIST 168 | | Widely used US standards for security processes, protocols and algorithms. 169 | Examples for the purposes of this specification: + 170 | NISTIR 8259 - IoT device cybersecurity capability + 171 | SP800-207 - Zero Trust Architecture + 172 | https://www.nist.gov/ 173 | |=== 174 | 175 | This is not an exhaustive list, more examples can be found in the reference 176 | section of this specification. 177 | 178 | -------------------------------------------------------------------------------- /specification/src/chapter2.adoc: -------------------------------------------------------------------------------- 1 | :imagesdir: ../images 2 | 3 | [[chapter2]] 4 | 5 | == RISC-V security model overview 6 | 7 | The aim of this chapter is to define common taxonomies and principles for 8 | secure RISC-V systems as used in the rest of this specification as well as other RISC-V specifications. It 9 | is divided into the following sections: 10 | 11 | * Reference model + 12 | Defines a set of generic hardware and software subsystems used in examples and 13 | use cases to describe secure systems. 14 | 15 | * Adversarial model + 16 | Defines common attack types on secure systems, and identifies RISC-V extensions 17 | which can aid mitigation. 18 | 19 | * Ecosystem security objectives + 20 | Defines common security features and functional guidelines, used to deploy 21 | trustworthy devices in an ecosystem. 22 | 23 | === Reference model 24 | 25 | [caption="Figure {counter:image}: ", reftext="Figure {image}"] 26 | [title= "Generic security reference model"] 27 | image::img_ch2_reference-model.png[] 28 | 29 | The figure above outlines a generic security reference model and taxonomy. This specification 30 | is oriented around this reference model. The model is not tied to any particular implementation. 31 | 32 | Most systems are built with multiple software components each managing _assets_ 33 | that need to be protected. These components are often sourced from multiple 34 | different supply chains. The security reference model uses the generic term 35 | _domain_ to identify a logically isolated region, with at least some private 36 | resources and execution state not accessible to at least some other domain(s). 37 | The figure above uses two domains for illustrative purposes but some 38 | use-cases can require more. This specification does not imply or limit the number 39 | of domains or the type of use cases a RISC-V system can support. See 40 | xref:chapter4.adoc[Use case examples] for more comprehensive and specific examples. 41 | 42 | Logically isolated domains at the software level are typically reflected in logically isolated domains at the system hardware level, where hardware resources can be assigned or restricted to specific software domains. For example, device DMA transfers can be restricted to memory assigned to a particular domain. 43 | 44 | ==== Assets 45 | 46 | Examples of assets include: 47 | 48 | * Cryptographic keys and credentials 49 | * User data 50 | * Proprietary models 51 | * Secret algorithms 52 | 53 | In this specification, a _hardware provisioned asset_ is an immutable asset 54 | provisioned in hardware by a security provisioning process, before a device is 55 | used in a production environment. For example, hardware provisioned keys or 56 | identities. 57 | 58 | ==== Trusted Computing Base (TCB) 59 | 60 | The _Trusted Computing Base (TCB)_ of any system function is the totality of 61 | protection mechanisms within a computer system, including hardware, 62 | firmware, and software - the combination responsible for enforcing a security 63 | policy. 64 | 65 | The following are examples of software components that can be a part of some function's TCB: 66 | 67 | * An operating system 68 | * A hypervisor and a guest operating system 69 | * A TEE security manager 70 | * Hosting services such as orchestration and server provisioning software 71 | 72 | ==== Root of trust 73 | 74 | A _root of trust (RoT)_ is the foundation on which all secure operations of a system depend. A RoT is typically a combination of a minimal amount of hardware and software that has to be implicitly trusted by all system components. 75 | 76 | A RoT supports fundamental security services, for example: 77 | 78 | * Boot and attestation 79 | * Security life cycle management 80 | * Key derivations and sealing (sealing is defined in a later section) 81 | * Security provisioning 82 | 83 | Depending on use case and ecosystem requirements, a RISC-V RoT can be: 84 | 85 | * Hart firmware (FW RoT) 86 | * A dedicated trusted subsystem (HW RoT) supporting a FW RoT 87 | 88 | Using a HW RoT moves critical functions and assets off a Hart to a dedicated and possibly isolated trusted subsystem, which can 89 | provide stronger protection against physical and logical attacks. 90 | 91 | The HW RoT acts as a _primary root of trust_ on the system. 92 | 93 | NOTE: It is common for secure systems to support multiple trust chains with 94 | their own root of trust. For example, a TPM can be a root of trust for UEFI 95 | boot flows within a runtime environment while a SIM can be a root of trust for 96 | user identity management. + 97 | + 98 | For the purpose of this document, these should be treated as _secondary roots of 99 | trust_. + 100 | + 101 | The HW RoT can manage the security life cycle of a secondary root of trust (booting etc). 102 | 103 | [#cat_sr_sub_rot] 104 | [width=100%] 105 | [%header, cols="5,20"] 106 | |=== 107 | | ID# 108 | | Requirement 109 | 110 | | SR_ROT_001 111 | | A complex secure system MUST implement a HW RoT 112 | 113 | | SR_ROT_002 114 | | A simpler system SHOULD implement a HW RoT 115 | 116 | |=== 117 | 118 | An example of a complex system is one with multiple, out-of-order, cache-coherent harts. 119 | 120 | An example of a simpler system is one with a single, in-order, hart such as in a microcontroller. 121 | 122 | NOTE: In this document, the terms "FW RoT" and "HW RoT" will be used as defined 123 | above. The term "RoT" on its own can be used where a rule or a rationale applies 124 | to either model. 125 | 126 | ==== Isolation 127 | 128 | Assets can be protected by _isolation_. Isolation reduces dependencies between 129 | components, and reduces the amount of software that needs to be trusted. 130 | 131 | Isolation protects _resources_: 132 | 133 | * Memory and memory mapped devices 134 | * _Execution state_, including Hart register state 135 | 136 | Examples of isolation mechanisms include: 137 | 138 | * Privilege based isolation + 139 | More privileged software is able to enforce security guarantees for less 140 | privileged software. 141 | * Physical memory isolation + 142 | More privileged software controls memory access for less privileged software. 143 | * Domain isolation + 144 | Software in one domain cannot access or modify resources assigned to a different 145 | domain (without consent), regardless of privilege level. + 146 | (Higher privileged software in one domain cannot access resources assigned to a 147 | lower privileged software in a different domain) 148 | * Virtualization + 149 | Virtualization creates and manages _virtual resources_ - compute, memory, 150 | devices - independent of actual physical hardware. A system, or individual 151 | domains, can be virtualized. 152 | 153 | On complex systems the TCB can grow large and become difficult to certify and 154 | attest. 155 | 156 | Domain isolation enables confidential workloads to be separated from complex 157 | hosting software, including other workloads. The TCB of a confidential workload 158 | can be reduced to a domain security manager in a confidential domain, and the 159 | RoT, while allowing the main runtime environment in a separate hosting domain 160 | to remain in control of resource management. 161 | 162 | Examples of confidential workloads include: 163 | 164 | * Platform security services - for example: secure storage, user identity 165 | management, payment clients, DRM clients 166 | * Hosted confidential third party workloads 167 | 168 | RISC-V has a range of isolation mechanisms available and in development. 169 | 170 | [#cat_sr_sub_ism] 171 | [width=100%] 172 | [%header, cols="10,25,5,5,5,10"] 173 | |=== 174 | | Technololgy 175 | | Use Case 176 | | Privilege level 177 | | Memory 178 | | Granularity 179 | | Limitations 180 | 181 | | PMP, ePMP 182 | | Boot code isolation, code and data isolation by privilege level. + 183 | Building block for simple trusted execution isolation using high privilege security monitor 184 | | M 185 | | Physical 186 | | Fine Grained 187 | | Switching overhead, limited resource 188 | 189 | | SPMP 190 | | OS managed code and data isolation by privilege level. + 191 | Building block to allow multiple OS to manage U mode isolation 192 | | S 193 | | Physical 194 | | Fine Grained 195 | | Switching overhead, limited resource 196 | 197 | | Virtual Memory 198 | MMU 199 | | S - U, U - U isolation + 200 | Guest – Guest isolation (VS–VS) + 201 | Host – Guest isolation (HS-VS) 202 | | S + 203 | HS/VS 204 | | Virtual 205 | | Page Based 206 | | 207 | 208 | | IOPMP 209 | | System Level PMP 210 | | n/a 211 | | Physical 212 | | Page Based 213 | | 214 | 215 | | Pointer Masking 216 | | Simple SW based memory tagging, memory range restriction 217 | | S U 218 | | Both 219 | | Coarse 220 | | 221 | 222 | | Smmpt, SDID 223 | | Supervisor domains and memory proteciton tables, building block for confidential computing, trusted execution. + 224 | S-S isolation 225 | | S 226 | | Physical 227 | | Page or larger 228 | | 229 | 230 | | Hardware Fault Isolation 231 | | Simple memory range based task isolation. Accelerates isolation of containers for webasm etc. 232 | | U 233 | | Virtual 234 | | Fine Grained 235 | | 236 | 237 | | Memory Tagging 238 | | Faults on access to an incorrect TAG. 239 | used for debug, garbage collection, security isolation 240 | | S U 241 | | Virtual 242 | | tbd 243 | | Probabilistic, performance impact, + 244 | tag storage overhead 245 | 246 | | CHERI 247 | | Full Capability based access for memory safety and isolation 248 | | M S U 249 | | Both 250 | | Fine Grained 251 | | HW/SW impact 252 | 253 | |=== 254 | 255 | ==== Device assignment 256 | 257 | Isolation policy needs to extend to device assignment: 258 | 259 | * Physical memory access control for device initiated transactions 260 | * Virtual memory translation for virtualized device transactions 261 | * Interrupt management across privilege and domain boundaries 262 | 263 | These policies can be enforced by system level hardware, controlled by Hart 264 | firmware. 265 | 266 | ==== Invasive subsystems 267 | 268 | _Invasive subsystems_ include any system or Hart feature which could 269 | break security guarantees, either directly or indirectly. For example: 270 | 271 | * External debug 272 | * Power and timing management 273 | * RAS (_reliability, accessibility, serviceability_) 274 | 275 | [#cat_sr_sub_inv] 276 | [width=100%] 277 | [%header, cols="5,20"] 278 | |=== 279 | | ID# 280 | | Requirement 281 | 282 | | SR_INV_001 283 | | Invasive subsystems MUST be controlled, or moderated, by a RoT. 284 | 285 | | SR_INV_002 286 | | Invasive subsystems SHOULD be enabled separately for M-mode & 287 | non-M-mode software. 288 | 289 | | SR_INV_003 290 | | Invasive subsystems SHOULD be enabled separately for individual domains 291 | 292 | |=== 293 | 294 | ==== Event counters 295 | 296 | Event counters are commonly used for performance management and resource 297 | allocation on systems. 298 | 299 | However, they can pose a security risk. For example, a workload can maliciously attempt to infer another workload's 300 | secrets by monitoring that other workload's operation. The victim workload can be at the same, lower, or higher 301 | privilege than the malicious workload. 302 | 303 | [#cat_sr_sub_pmu] 304 | [width=100%] 305 | [%header, cols="5,20"] 306 | |=== 307 | | ID# 308 | | Requirement 309 | 310 | | SR_PMU_001 311 | | Lower privileged software MUST NOT be able to monitor higher privileged 312 | software. 313 | 314 | | SR_PMU_002 315 | | Software in one domain MUST NOT be able to monitor software in a different 316 | domain, without consent. 317 | 318 | |=== 319 | 320 | ==== Platform quality of service 321 | 322 | More complex systems, such as server platforms, can provide _platform quality of service (QoS)_ features beyond the capabilities of basic event counters. Platform QoS features include any Hart and system hardware and firmware aimed at managing access to 323 | shared physical resources across workloads while minimizing contention. 324 | 325 | For example: 326 | 327 | * Memory bandwidth management 328 | * Cache allocation policies across workloads, including workload prioritization 329 | * Hart allocation policies across workloads 330 | 331 | These types of features rely on monitoring the resource utilization of workloads, 332 | similar to event counters, and on the optimization of resource allocation policies. 333 | 334 | [#cat_sr_sub_qos] 335 | [width=100%] 336 | [%header, cols="5,20"] 337 | |=== 338 | | ID# 339 | | Requirement 340 | 341 | | SR_QOS_001 342 | | Lower privileged software MUST NOT be able to observe QoS events or attributes concerning higher privileged 343 | software. 344 | 345 | | SR_QOS_002 346 | | Software in one domain MUST NOT be able to observe QoS events or attributes concerning a different 347 | domain, without consent. 348 | 349 | |=== 350 | 351 | ==== Denial of service 352 | 353 | The RISC-V security model is primarily concerned with protection of assets. It is not concerned with providing service 354 | guarantees. 355 | 356 | For example, a hosting environment is free to apply its own resource allocation policy to relevant workloads. This can 357 | include denying service to some workloads. 358 | 359 | [#cat_sr_sub_dos] 360 | [width=100%] 361 | [%header, cols="5,20"] 362 | |=== 363 | | ID# 364 | | Requirement 365 | 366 | | SR_DOS_001 367 | | Lower privileged software MUST NOT be able to deny service to higher 368 | privileged software, or other isolated workloads at the same privilege level. 369 | 370 | | SR_DOS_002 371 | | Software in one domain SHOULD NOT be able to deny service to software in a different domain 372 | 373 | |=== 374 | 375 | Higher privileged software must always be able to enforce its own resource 376 | management policy without interference, including scheduling, resource 377 | assignment and revocation policies. 378 | 379 | Similarly, a hosting domain owning resource allocation and host management across a system normally has to be able to enforce its own policies across domains. Including denying service. But other domains should not be able to deny service to the hosting domain, or to other domains. 380 | 381 | === Adversarial model 382 | 383 | For the purpose of this specification, the main goal of an adversary is to gain 384 | unauthorized access to _resources_ - memory, memory mapped devices, and 385 | execution state. For example, to access sensitive assets, to gain privileges, 386 | or to affect the control flow of a victim. 387 | 388 | In general, adversaries capable of mounting the following broad classes of 389 | attacks should be considered by system designers: 390 | 391 | * Logical + 392 | The attacker and the victim are both processes on the same system. 393 | 394 | * Physical + 395 | The victim is a process on a system, and the attacker has physical access to 396 | the same system. For example: probing, interposers, glitching, and disassembly. 397 | 398 | * Remote + 399 | The victim is a process on a system, and the attacker does not have physical or 400 | logical access to the system. For example, radiation or power fluctuations, or 401 | protocol level attacks on connected services. 402 | 403 | At an implementation level there can be further distinctions, for example the degree of proximity required to execute a remote or a physical attack as defined above. However, this document does not make any finer grained distinctions other than logical, physical and remote. 404 | 405 | Attacks can be direct, indirect or chained: 406 | 407 | * Direct + 408 | An adversary gains direct access to a resource belonging to the victim. For 409 | example: direct access to the victim's memory or execution state, or direct 410 | control of the victim's control flow. 411 | 412 | * Indirect + 413 | An adversary can use a side channel to access or modify the content of a resource owned by the victim. 414 | For example: by analyzing timing patterns of an operation by a victim to reveal 415 | information about data used in that operation, or launching row-hammer style 416 | memory attacks to affect the contents of memory owned by the victim. 417 | 418 | * Chained + 419 | An adversary is able to chain together multiple direct and indirect attacks to 420 | achieve a goal. For example, using a software interface exploit to affect the call 421 | stack such that control flow is redirected to the adversary's code. 422 | 423 | The threats considered in-scope and the required level of protection will vary depending on use case. For example, a HW RoT would likely have a large set of threats that are considered applicable. Mitigating these threats may require protection against complex or advanced physical attacks. A Software based TEE may limit the threats considered applicable, and therefore the required mitigations. 424 | 425 | This specification is primarily concerned with ISA level mitigations against logical attacks. 426 | 427 | Physical or remote attacks in general need to be addressed at system, protocol or governance level, and can require additional non-ISA mitigations. However, some ISA level mitigations can also help provide some mitigation against physical or remote attacks and this is indicated in the tables below. 428 | 429 | Finally, this specification does not attempt to rate attacks by severity, or by adversary skill level. Ratings tend to depend on use case specific threat models and requirements. 430 | 431 | 432 | ==== Logical 433 | 434 | [#cat_sr_sub_lgc] 435 | [width=100%] 436 | [%header, cols="5,5,5,10,15,10"] 437 | |=== 438 | | ID# 439 | | Threat 440 | | Type 441 | | Description 442 | | Current RISC-V mitigations 443 | | Planned RISC-V mitigations 444 | 445 | | T_LGC_001 446 | | Unrestricted access 447 | | Direct + 448 | Logical 449 | | Unauthorized direct access to resources in normal operation. 450 | a| * RISC-V privilege levels 451 | * RISC-V isolation (for example: PMP/ePMP, sPMP, MTT, supervisor domains) 452 | * RISC-V hardware enforced virtualization (H extension, MMU) 453 | | CHERI 454 | 455 | | T_LGC_002 456 | | Transient execution attacks 457 | | Chained + 458 | Logical 459 | | Attacks on speculative execution implementations. 460 | | Known (documented) attacks, except Spectre v1, are specific to particular 461 | micro-architectures. Micro-architecture for RISC-V systems is implementation 462 | specific, but must not introduce such vulnerabilities. + 463 | + 464 | This is an evolving area of research. + 465 | + 466 | For example: + 467 | https://meltdownattack.com/[Spectre and meltdown papers] + 468 | https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html[Intel 469 | security guidance] + 470 | https://developer.arm.com/documentation/#cf-navigationhierarchiesproducts=Arm%20Security%20Center,Speculative%20Processor%20Vulnerability[Arm speculative 471 | vulnerability] 472 | | Fence.t, or similar future extensions, may at least partially mitigate against Spectre v1. 473 | 474 | | T_LGC_003 475 | | Interface abuse 476 | | Chained + 477 | Logical 478 | | Abusing interfaces across privilege or isolation boundaries, for example to 479 | elevate privilege or to gain unauthorized access to resources. 480 | a| * RISC-V privilege levels 481 | * RISC-V isolation 482 | | High assurance cryptography 483 | 484 | | T_LGC_004 485 | | Event counting 486 | | Direct + 487 | Logical 488 | | For example, timing processes across privilege or isolation boundaries to 489 | derive information about confidential assets. 490 | a| * Data-independent timing instructions 491 | * Performance counters restricted by privilege and isolation boundaries 492 | (sscofpmf, smcntrpmf) 493 | | 494 | 495 | | T_LGC_005 496 | | Redirect control flow 497 | | Chained + 498 | Logical 499 | | Unauthorized manipulation of call stacks and jump targets to redirect a 500 | control flow to code controlled by an attacker. 501 | a| * Shadow stacks (Zicfiss) 502 | * Landing pads (Zicfilp) 503 | | CHERI, + 504 | Memory Tagging 505 | 506 | | T_LGC_006 507 | | Memory safety 508 | | Logical 509 | | Unauthorized access to resources within an isolated component. For example, pointer or allocation errors (temporal memory safety), or buffer overflows (spatial memory safety). 510 | a| * RISC-V pointer masking (J-extension) + 511 | * Shadow stacks (Zicfiss) + 512 | * Landing pads (Zicfilp) + 513 | + 514 | Memory safe programming, for example: + 515 | https:/www.cisa.gov/sites/default/files/2023-12/CSAC_TAC_Recommendations-Memory-Safety_Final_20231205_508.pdf + 516 | | Architectural sandboxing, such as HFI. + 517 | Capability based architecture, such as CHERI. 518 | 519 | | T_LGC_007 520 | | Architectural Covert Channel 521 | | Logical 522 | | Execution environment is unaware of, or doesnt swap/sanitize CSRs on context switch, creating covert communication channel between user threads or guest OSs 523 | a| * Smstateen + 524 | * Ssstateen 525 | | 526 | 527 | |=== 528 | 529 | ==== Physical and remote 530 | 531 | [#cat_sr_sub_phy] 532 | [width=100%] 533 | [%header, cols="5,10,10,15,15"] 534 | |=== 535 | | ID# 536 | | Threat 537 | | Type 538 | | Description 539 | | RISC-V recommendations 540 | 541 | | T_PHY_001 542 | | Analysis of physical leakage 543 | | Direct or indirect + 544 | Physical or remote 545 | | For example, observing radiation, power line patterns, or temperature. 546 | a| * Implement robust power management and radiation control 547 | * Data Independent Execution Latency (Zkt, Zvkt) 548 | 549 | 550 | | T_PHY_002 551 | | Physical memory manipulation 552 | | Direct + 553 | Logical or physical 554 | a| * Using NVDIMM, interposers, or physical probing to read, record, or replay 555 | physical memory. 556 | a| * Implement robust memory error detection, cryptographic memory protection, 557 | or physical tamper resistance. 558 | * Supervisor domain ID, privilege level, or MPT attributes, may be used to 559 | derive memory encryption contexts at domain or workload granularity 560 | * Provide a degree of tamper resistance. 561 | 562 | // * Physical attacks on hardware shielded locations to extract hardware 563 | // provisioned assets 564 | 565 | 566 | | T_PHY_003 567 | | Boot attacks 568 | | Chained + 569 | Logical or physical 570 | a| * Glitching to bypass secure boot 571 | * Retrieving residual confidential memory after a system reset 572 | a| Implement robust power management, and adopt glitch-safe software techniques. + 573 | + 574 | Industry best practice should be followed. For example: ensuring un-initialized variables are not used; implementing integrity checking of critical data and hardware provisioned parameters; implementing redundancy in encoding, verification, branching, and critical logic. + 575 | + 576 | Adopt randomization techniques between boot sessions. For example: cryptographic memory protection with at least boot freshness; register randomization. 577 | 578 | | T_PHY_004 579 | | Subverting supply chains 580 | | Remote 581 | | Infiltration or collusion to subvert security provisioning chains, software 582 | supply chains and signing processes, hardware supply chains, attestation 583 | processes, development processes (for example, unfused development hardware or 584 | debug authorizations) 585 | | Deploy appropriate governance, accreditation, and certification processes for 586 | an ecosystem. 587 | 588 | | T_PHY_005 589 | | Fault Injection 590 | | Direct or remote + 591 | Logical or physical 592 | a| * Rowhammer-type software attacks to manipulate nearby memory cells. 593 | * Fault injection attacks (glitching, laser, electromagnetic, etc.) to extract hardware-provisioned assets, modify the control flow, circumvent countermeasures, etc. 594 | a| * Implement robust memory error detection, cryptographic memory protection, or physical tamper resistance. 595 | * Provide a level of tamper resistance, e.g., through RoT attestation, redundancy during execution, etc. 596 | * Enforce proper access control for the DVFS configuration. 597 | * Employ lockstep processors for security-critical devices. 598 | * Employ physical sensors to detect attack 599 | 600 | |=== 601 | 602 | === Ecosystem security objectives 603 | 604 | Ecosystem security objectives identify a set of common features and mechanisms 605 | that can be used to enforce and establish trust in an ecosystem. 606 | 607 | These features are defined here at a functional level only. Technical 608 | requirements are typically use case specific and defined by external 609 | certification programs. 610 | 611 | In some cases RISC-V non-ISA specifications can provide guidance or protocols. 612 | This is discussed more in use case examples later in this specification. 613 | 614 | ==== Secure identity 615 | 616 | [cat_sr_sub_idn] 617 | [width=100%] 618 | [%header, cols="5,20"] 619 | |=== 620 | | ID# 621 | | Requirement 622 | 623 | | SR_IDN_001 624 | | A secure platform MUST be securely identifiable 625 | |=== 626 | 627 | Identifies the immutable part of the secure platform - immutable hardware, 628 | configurations, and firmware. Immutable components cannot change after 629 | the completion of security provisioning (see also security life cycle management). 630 | 631 | A _secure identity_ is an element capable of generating a cryptographic signature 632 | which can be verified by a remote party. This is usually an asymmetric key pair, but 633 | symmetric signing schemes can also be used. Secure identities are typically used as part 634 | of an attestation process. 635 | 636 | A secure identity's scope and uniqueness is use case dependent. For example, a secure identity can be: 637 | 638 | * Unique to a system 639 | * Shared among multiple systems with the same immutable security properties 640 | (group based anonymization) 641 | * Anonymized using an attestation protocol supporting a third party 642 | anonymization service 643 | 644 | A secure identity can be directly hardware provisioned, or derived from other hardware 645 | provisioned assets. 646 | 647 | ==== Security life cycle 648 | 649 | [#cat_sr_sub_lfc] 650 | [width=100%] 651 | [%header, cols="5,20"] 652 | |=== 653 | | ID# 654 | | Requirement 655 | 656 | | SR_LFC_001 657 | | A secure system MUST manage a security life cycle. 658 | |=== 659 | 660 | [caption="Figure {counter:image}: ", reftext="Figure {image}"] 661 | [title= "Generic security life cycle"] 662 | image::img_ch2_security-lifecycle.png[] 663 | 664 | [#security-lifecycle] 665 | A security life cycle reflects the trustworthiness of a system during its 666 | lifetime and reflects the life cycle state of hardware provisioned assets. 667 | 668 | It can be extended as indicated below to cover additional security provisioning 669 | steps such as device onboarding, device activation, user management, and RMA (Return Merchandize Authorization) 670 | processes. These are use case or ecosystem specific and out of scope of this 671 | specification. 672 | 673 | For the purpose of this specification, _revealing debug_ includes any HW or FW 674 | debug capability which: 675 | 676 | * Could break security guarantees or could expose assets 677 | * Is not part of an attested trust contract with a relying party 678 | 679 | Examples of revealing debug include revealing logging, external debug or 680 | boundary scans, dedicated debug builds of software components, or enabling 681 | self-hosted debug for a component. 682 | 683 | 684 | 685 | Depending on use case, an attested software component can include debug 686 | capabilities managed through an ecosystem defined governance process 687 | - _trusted debug_. For example, self-hosted debug or external debug enabled following an ecosystem 688 | specific authorization process. In this case the debug capability, and the 689 | associated governance, is part of the trust contract with a relying party. 690 | 691 | For the purpose of this specification, a minimum security life cycle includes at 692 | least the following states: 693 | 694 | * Manufacture - The system may not yet be locked down and has no hardware 695 | provisioned assets 696 | * Security provisioning - The process of provisioning hardware provisioned 697 | assets + 698 | Depending on ecosystem requirement, security provisioning may be performed in 699 | multiple stages through a supply chain and may require additional sub-states. 700 | These types of application specific extensions are out of scope of this 701 | specification. 702 | * Secured - hardware provisioned assets are locked (immutable), only authorized 703 | software can be used, and revealing debug is not enabled. + 704 | Additional specific provisioning stages can take place in this 705 | state - for example network onboarding and device activation, App/Device 706 | attestation or user identity management. This is out of scope of this 707 | specification. 708 | * Recoverable debug - part of the system is in a revealing debug state + 709 | At least the RoT is not compromised and hardware provisioned secrets remain 710 | protected. + 711 | This state is both attestable and recoverable. For example, revealing debug is 712 | enabled for a domain without compromising another domain or any RoT services. 713 | * Terminated - any system change which could expose hardware provisioned assets 714 | + 715 | Typically hardware provisioned assets are made permanently inaccessible and 716 | revoked before entering this state. This also protects any derived assets such 717 | as attestation and sealing keys. 718 | 719 | A system may support re-provisioning from a terminated state, for example 720 | following repair/RMA. This can be viewed as equivalent to starting over from the 721 | security provisioning state, and creates a new instance with a new secure 722 | identifier. 723 | 724 | [width=100%] 725 | [%header, cols="5,20"] 726 | |=== 727 | | ID# 728 | | Requirement 729 | 730 | | SR_LFC_002 731 | | Hardware provisioned assets MUST only be accessible while the system is in 732 | secured state, or a recoverable debug state.(with the recoverable debug state in 733 | attestation evidence). 734 | 735 | | SR_LFC_003 736 | | Derived assets MUST only be available if a component is in secured state. 737 | |=== 738 | 739 | For example, returning garbage or some known test and debug value when attempting to read a hardware provisioned asset, unless the system is in a secured state, or a recoverable debug state. Derived assets would then also become unavailable in these states, though test and debug versions may be available. 740 | 741 | A derived asset in this context is any asset derived from hardware provisioned 742 | assets. For example attestation keys, or sealing keys for a supervisor domain. 743 | 744 | [width=100%] 745 | [%header, cols="5,20"] 746 | |=== 747 | | ID# 748 | | Requirement 749 | 750 | | SR_LFC_004 751 | | Revealing debug MUST be reflected in attestation. 752 | 753 | |=== 754 | 755 | _Attestable states_ are ones where the RoT and hardware provisioned assets are 756 | not compromised by debug and a valid attestation can be generated reflecting 757 | that state: 758 | 759 | * Secured 760 | * Recoverable debug 761 | 762 | In other states the system is not able to generate a valid attestation key. It 763 | is still _indirectly attestable_ as any generated attestation will not be signed 764 | correctly and can be rejected by a relying party. 765 | 766 | Trusted debug is part of a trust contract with a relying party and is application 767 | specific. The presence of trusted debug can be determined indirectly by a 768 | relying party through other attested properties, for example measurements. 769 | 770 | ==== Attestable services 771 | 772 | For the purpose of this specification a confidential service can be any 773 | isolated component on a system. For example, a hosted confidential workload, or 774 | an isolated application security service. 775 | 776 | [#cat_sr_sub_att] 777 | [width=100%] 778 | [%header, cols="5,20"] 779 | |=== 780 | | ID# 781 | | Requirement 782 | 783 | | SR_ATT_001 784 | | A confidential service, and all software and hardware components it depends 785 | on, MUST be attestable. 786 | |=== 787 | 788 | Attestation allows a remote relying party to determine the trustworthiness of a 789 | confidential service before submitting assets to it. Attestation aims to: 790 | 791 | * Verify the security state of a confidential service 792 | * Verify the security state of all software and hardware a confidential service 793 | depends on 794 | * Establish an attested secure connection to a confidential service 795 | 796 | Attestation can be direct or layered: 797 | 798 | * Direct + 799 | The whole system can be defined by a single security platform attestation. Eg : vertically integrated connected IoT 800 | devices and edge devices. 801 | * Layered + 802 | Enables parts of the attestation process to be delegated to lower privileged 803 | components. 804 | 805 | Direct and layered attestation are discussed in more detail in use case 806 | examples later in this specification. 807 | 808 | [width=100%] 809 | [%header, cols="5,20"] 810 | |=== 811 | | ID# 812 | | Requirement 813 | 814 | | SR_ATT_002 815 | | A secure platform attestation MUST be signed by a HW RoT, if present, or 816 | else by a FW RoT 817 | 818 | | SR_ATT_003 819 | | A secure platform attestation MUST be signed using a hardware provisioned 820 | (directly or derived) secure identity 821 | 822 | | SR_ATT_004 823 | | A layered attestation MAY be signed by lower privileged software, itself 824 | attested by a security platform attestation 825 | 826 | | SR_ATT_005 827 | a| Layered attestations MUST be cryptographically bound such that a relying 828 | party can determine that they: 829 | 830 | * Were generated on the same system 831 | * Are fresh. 832 | 833 | |=== 834 | 835 | NOTE: Software interfaces should only support either direct attestation or 836 | layered attestation workflows, never both, to prevent impersonation attacks. 837 | 838 | ==== Authorized software 839 | 840 | Running unauthorized software can compromise the security state of the system. 841 | 842 | [#cat_sr_sub_aut] 843 | [width=100%] 844 | [%header, cols="5,20"] 845 | |=== 846 | | ID# 847 | | Requirement 848 | 849 | | SR_AUT_001 850 | | A system in secured or recoverable debug states MUST only load authorized 851 | software. 852 | 853 | | SR_AUT_002 854 | | A system in security provisioning state SHOULD only load authorized software. 855 | 856 | |=== 857 | 858 | Two complementary processes can be used to authorize software: 859 | 860 | * Measurement + 861 | In the context of this document, a measurement is a record of a present state of the system, which can be used by a remote party to verify the security state of the system. It is typically a cryptographic fingerprint, such as a running hash of memory combined with security lifecycle state and other attributes. Although depending on use case other kinds of measurements can be used. 862 | * Verification + 863 | Verification is a process of establishing that a measurement is correct 864 | (expected) 865 | 866 | When a system in a security provisioning state doesnt restrict loading to only authorized software, other protection measures such as physical access protection, or device registration would be required. 867 | 868 | A boot process is typically layered, allowing software to be measured and 869 | verified in stages. Different measurement and verification policies can be 870 | employed at different stages. This is discussed further in use case examples 871 | later in this specification. The properties discussed below still apply to each 872 | stage. 873 | 874 | NOTE: Measurements can be calculated at boot (_boot state_), and sometimes also 875 | dynamically at runtime (_runtime state_). Measuring runtime state can be used as 876 | a robustness feature to mitigate against unauthorized runtime changes of static 877 | code segments. It is out of scope of this specification, though the principles 878 | discussed below can still be applied. 879 | 880 | Verification can be: 881 | 882 | * Local + 883 | A measurement is verified locally on the device. 884 | * Remote + 885 | A measurement is verified by a remote provisioning service, or a remote relying 886 | party. 887 | 888 | Verification can be: 889 | 890 | * Direct + 891 | The measurement is directly compared with an expected measurement from a signed 892 | authorization. 893 | * Indirect + 894 | The measurement is included in derivations of other assets, for example sealing 895 | keys, binding assets to a measured state. 896 | 897 | [#cat_sr_sub_msm] 898 | [width=100%] 899 | [%header, cols="5,20"] 900 | |=== 901 | | ID# 902 | | Requirement 903 | 904 | | SR_MSM_001 905 | | A secure platform MUST be measured. 906 | 907 | | SR_MSM_002 908 | | A secure platform MUST be verified, either directly or indirectly, before 909 | launching services which depend on the security platform. 910 | 911 | |=== 912 | 913 | Verification ensures the system has loaded authorized software 914 | 915 | [width=100%] 916 | [%header, cols="5,20"] 917 | |=== 918 | | ID# 919 | | Requirement 920 | 921 | | SR_MSM_003 922 | | A system MUST only use authorizations from trusted authority. 923 | |=== 924 | 925 | * Direct verification requires a signed image authorization from a trusted 926 | authority before loading an image + 927 | For example, a signed image, or a separately signed authorization 928 | message. 929 | * Indirect verification requires a signed authorization from a trusted authority 930 | for migrating assets bound to a previously measured state + 931 | For example, a signed provisioning message. 932 | 933 | Either way, only authorizations from trusted authorities should be used. For 934 | example, from a list of hardware provisioned or securely discovered trusted 935 | authorities. 936 | 937 | [width=100%] 938 | [%header, cols="5,20"] 939 | |=== 940 | | ID# 941 | | Requirement 942 | 943 | | SR_MSM_004 944 | | Local verification MUST be rooted in immutable boot code. 945 | |=== 946 | 947 | For example, ROM or locked flash, or rooted in a HW RoT itself rooted in 948 | immutable boot code. 949 | 950 | ==== System updates 951 | 952 | Over time, any mutable component may need updates to address 953 | vulnerabilities or functionality improvements. A system update can concern 954 | software, firmware, microcode, or any other updatable component on a system. 955 | 956 | [#cat_sr_sub_upd] 957 | [width=100%] 958 | [%header, cols="5,20"] 959 | |=== 960 | | ID# 961 | | Requirement 962 | 963 | | SR_UPD_001 964 | | All components on a system which are not immutable MUST be updatable. 965 | |=== 966 | 967 | Immutable components include at least immutable boot code. Some trusted 968 | subsystems can also include immutable software to meet specific security 969 | certification requirements. 970 | 971 | System updates are typically layered so that updates can target only parts of a 972 | system and not a whole system. The properties discussed below still apply to 973 | any system update. 974 | 975 | [width=100%] 976 | [%header, cols="5,20"] 977 | |=== 978 | | ID# 979 | | Requirement 980 | 981 | | SR_UPD_002 982 | | A system update MUST be measured and verified before launch. 983 | |=== 984 | 985 | See <<_authorized_software>>. 986 | 987 | A system update can be: 988 | 989 | * Deferred + 990 | The update can only be effected after a restart of at least the affected 991 | component, and all of its dependents. 992 | * Live + 993 | The update can be effected without restarting any dependent components. 994 | 995 | [width=100%] 996 | [%header, cols="5,20"] 997 | |=== 998 | | ID# 999 | | Requirement 1000 | 1001 | | SR_UPD_003 1002 | | Updates affecting a security platform SHOULD be deferred. 1003 | 1004 | | SR_UPD_004 1005 | | Updates MAY be live if live update capability, and suitable governance, is 1006 | part of an already attested trust contract between a relying party and the 1007 | system. 1008 | |=== 1009 | 1010 | A system update changes the attested security state of the affected 1011 | component(s), as well as that of all other components that depend on it. It can 1012 | affect whether a dependent confidential service is still considered trustworthy 1013 | or not, as well as affect any derived assets such as sealing keys. 1014 | 1015 | [width=100%] 1016 | [%header, cols="5,20"] 1017 | |=== 1018 | | ID# 1019 | | Requirement 1020 | 1021 | | SR_UPD_005 1022 | | System updates MUST be monotonic 1023 | 1024 | | SR_UPD_006 1025 | | System updates SHOULD be robust against update failures 1026 | |=== 1027 | 1028 | Earlier versions could be carrying known vulnerabilities, or could be able to affect the safe 1029 | operation of a system in other ways. 1030 | 1031 | For example, using derived anti-rollback counters (counter tree) rooted in a 1032 | hardware monotonic counter. 1033 | 1034 | A system can still support recovery mechanisms, with suitable governance, in 1035 | the case of update failures. For example, a fallback process or a dedicated 1036 | recovery loader. 1037 | 1038 | Success criteria for a system update are typically use case or ecosystem 1039 | specific and out of scope of this specification. Examples include local 1040 | watchdog or checkpoints, and network control through a secure update protocol, 1041 | and a dedicated recovery loader. 1042 | 1043 | [width=100%] 1044 | [%header, cols="5,20"] 1045 | |=== 1046 | | ID# 1047 | | Requirement 1048 | 1049 | | SR_UPD_007 1050 | | System updates, and authorization messages, SHOULD only be received from 1051 | trusted sources. 1052 | 1053 | |=== 1054 | 1055 | A system update is itself always verified before being launched. Verifying the 1056 | source as well can mitigate against attempts to inject adversary controlled 1057 | data into a local update process. Including into protected memory regions. 1058 | 1059 | ==== Isolation 1060 | Complex systems include software components from different supply chains, and 1061 | complex integration chains with different roles and actors. These supply chains 1062 | and integration actors often share mutual distrust: 1063 | 1064 | * Developed, certified, deployed and attested independently 1065 | * Protected from errors in, or abuse from, other components 1066 | * Protected from debugging of other components 1067 | * Contain assets which should not be available to other components 1068 | 1069 | Use cases later in this specification provide examples of RISC-V isolation 1070 | models. 1071 | 1072 | [#cat_sr_sub_iso] 1073 | [width=100%] 1074 | [%header, cols="5,20"] 1075 | |=== 1076 | | ID# 1077 | | Requirement 1078 | 1079 | | SR_ISO_001 1080 | | Isolated software components SHOULD be supported 1081 | |=== 1082 | 1083 | An isolated component has private memory and private execution contexts not 1084 | accessible to other components. 1085 | 1086 | [width=100%] 1087 | [%header, cols="5,20"] 1088 | |=== 1089 | | ID# 1090 | | Requirement 1091 | 1092 | | SR_ISO_002 1093 | | Devices MUST not access memory belonging to an isolated component without 1094 | permission 1095 | |=== 1096 | 1097 | Isolation can also extend to other features, such as interrupts and debug. 1098 | 1099 | ==== Sealing 1100 | 1101 | Sealing is the process of protecting confidential assets on a system, typically 1102 | using sealing keys derived in different ways for different use cases as 1103 | discussed in this section. For example, from a hardware provisioned root key, 1104 | from a boot state (measurements, security life cycle state), or provisioned at 1105 | runtime by a remote provisioning system. 1106 | 1107 | Sealing can be: 1108 | 1109 | * Local + 1110 | Local sealing binds assets to a local device (hardware unique sealing) or to a 1111 | measured boot state. 1112 | * Remote + 1113 | Remote sealing binds assets to credentials provided by a remote provisioning 1114 | service following successful attestation. 1115 | 1116 | [#cat_sr_sub_slg] 1117 | [width=100%] 1118 | [%header, cols="5,20"] 1119 | |=== 1120 | | ID# 1121 | | Requirement 1122 | 1123 | | SR_SLG_001 1124 | | Sealed assets SHOULD only be possible to unseal in a secured state 1125 | 1126 | |=== 1127 | 1128 | For example, local sealing key derivations should take the security life cycle 1129 | state of the system into account. And remote sealing key provisioning should 1130 | always attest the system before releasing unsealing credentials or keys. 1131 | 1132 | Local sealing can be: 1133 | 1134 | * Direct + 1135 | Direct sealing binds assets to sealing keys derived by a RoT. 1136 | * Layered + 1137 | Layered sealing enables delegation of some sealing key derivations to lower 1138 | privileged software. 1139 | 1140 | [width=100%] 1141 | [%header, cols="5,20"] 1142 | |=== 1143 | | ID# 1144 | | Requirement 1145 | 1146 | | SR_SLG_002 1147 | | Locally sealed assets MUST only be possible to unseal on the same physical 1148 | instance of a system that they were sealed on. 1149 | 1150 | |=== 1151 | 1152 | For example, using sealing keys derived from a hardware provisioned _hardware 1153 | unique key (HUK)_. 1154 | 1155 | [width=100%] 1156 | [%header, cols="5,20"] 1157 | |=== 1158 | | ID# 1159 | | Requirement 1160 | 1161 | | SR_SLG_003 1162 | | Locally sealed assets bound to a boot measurement MUST only be possible to 1163 | unseal if that measurement has not changed, or the system has received an 1164 | authorized update. 1165 | 1166 | |=== 1167 | 1168 | See <<_system_updates, system updates>> 1169 | 1170 | Sealing is discussed further in use cases examples later in this document. 1171 | -------------------------------------------------------------------------------- /specification/src/chapter3.adoc: -------------------------------------------------------------------------------- 1 | [[chapter3]] 2 | 3 | == RISC-V security building blocks 4 | 5 | This chapter outlines brief descriptions of RISC-V security building blocks 6 | discussed in this specification, together with general guidelines and links to 7 | technical specifications. The requirement to use a specific mechanism is 8 | use case dependent. Risc V has multiple mechanisms that can be used to achieve the 9 | same goal, the notes associated with SRs indicate where this is the case. 10 | 11 | See also the reference use cases chapter of this specification for common 12 | examples of how RISC-V security building blocks can be combined. 13 | 14 | === Isolation 15 | 16 | Isolation enables access restrictions on software components executing on a hart, as well 17 | as on device accesses. RISC-V enables: 18 | 19 | * Privilege based isolation 20 | * Physical memory access control (hart and device-initiated accesses) 21 | * Virtual memory management (hart and device virtualization) 22 | * Hypervisor extension 23 | * Supervisor domains 24 | 25 | ==== Privilege levels 26 | 27 | *See section 1.2 (Privilege Levels) of the https://github.com/riscv/riscv-isa-manual/releases/tag/Priv-v1.12[Privileged 28 | ISA] specification.* 29 | 30 | Standard privilege levels - Machine mode (M), Supervisor mode (S), and User 31 | mode (U) - enable separation of more privileged software from less privileged 32 | software. 33 | 34 | ==== Hypervisor extension 35 | 36 | *See chapter 8 (Hypervisor Extension) of the https://github.com/riscv/riscv-isa-manual/releases/tag/Priv-v1.12[Privileged 37 | ISA] specification.* 38 | 39 | The Hypervisor extension supports standard supervisor level hypervisors. It extends 40 | S mode into Hypervisor-extended supervisor mode (HS), and a virtual supervisor 41 | mode (VS) for guests. It also extends U mode into standard user mode (U) and 42 | virtual user mode (VU). 43 | 44 | Isolation of guests is enforced using two-stage address translation and 45 | protection. Two-stage address translation and protection is in effect in VS 46 | and VU modes. 47 | 48 | [#cat_sr_sub_hyp] 49 | [width=100%] 50 | [%header, cols="5,20"] 51 | |=== 52 | | ID# 53 | | Requirement 54 | 55 | | SR_HYP_001 56 | | For virtualised environments, Hypervisor extension MUST be supported 57 | 58 | |=== 59 | 60 | 61 | Alternatively sPMP can be used instead of MMU to support static partitionining 62 | hypervisors, for example on systems with hard and deterministic real time 63 | requirements [Note -The sPMP for Hypervisor extension has not been specified 64 | yet]. 65 | 66 | MMU, PMP/Smepmp, and sPMP are discussed later in this chapter. 67 | 68 | ==== PMA 69 | 70 | *See section 3.6 (Physical Memory Attributes) of the https://github.com/riscv/riscv-isa-manual/releases/tag/Priv-v1.12[Privileged 71 | ISA] specification.* 72 | 73 | _Physical memory attributes (PMA)_ are intended to capture inherent properties 74 | of the underlying hardware. For example, read-only ROM regions, or non-cachable 75 | device regions. Often PMA can be fixed at design time or at boot, but sometimes 76 | runtime PMA can be required. 77 | 78 | A separate hardware checker - _PMA checker_ - enforces PMA rules at runtime once 79 | a physical address is known. PMA rules are always checked on every physical 80 | access, and typically configured by region. 81 | 82 | ==== PMP 83 | 84 | *See section 3.7 (Physical Memory Protection) of the https://github.com/riscv/riscv-isa-manual/releases/tag/Priv-v1.12[Privileged 85 | ISA] specification.* 86 | 87 | _Physical memory protection (PMP)_ enables M-mode to access-control physical 88 | memory for supervisor and U modes (with or without H-extension). 89 | 90 | [#cat_sr_sub_pmp] 91 | [width=100%] 92 | [%header, cols="5,20"] 93 | |=== 94 | | ID# 95 | | Requirement 96 | 97 | | SR_PMP_001 98 | | PMP configurations MUST only be directly accessible to machine mode 99 | | SR_PMP_002 100 | | PMP/smepmp OR MPT MUST be used to isolate M-mode from lower privilege levels 101 | |=== 102 | 103 | NOTE: Individual access controlled regions can be locked until the next system reset 104 | to create temporal isolation boundaries, such as protecting immutable boot code. 105 | Depending on the use case, MPT may be an acceptable additional or alternative mechanism to the PMP/smepmp 106 | 107 | ==== Smepmp 108 | 109 | *See the https://github.com/riscv/riscv-tee/blob/main/Smepmp/Smepmp.pdf[PMP Enhancements for memory access and execution prevention on Machine mode] specification.* 110 | 111 | Smepmp extends PMP protection by allowing machine mode to restrict its own access to memory allocated to lower privilege levels. This can be 112 | used to mitigate against privilege escalation attacks. 113 | 114 | [#cat_sr_sub_smepmp] 115 | [width=100%] 116 | [%header, cols="5,20"] 117 | |=== 118 | | ID# 119 | | Requirement 120 | 121 | | SR_PMP_003 122 | | If PMP is supported then Smepmp MUST be supported. 123 | |=== 124 | 125 | ==== sPMP 126 | 127 | *See the https://github.com/riscv/riscv-spmp[RISC-V S-mode Physical Memory Protection (SPMP)] specification.* 128 | 129 | _Supervisor PMP (sPMP)_ enables supervisor mode to control physical memory 130 | access for U mode. 131 | 132 | sPMP allows supervisor mode to restrict its own access to memory allocated to 133 | lower privilege levels. This can be used to mitigate against privilege 134 | escalation attacks, for example. 135 | 136 | When combined with H-extension, sPMP can be nested so that the hypervisor can 137 | control memory allocations to its guests, and each guest can control its own 138 | memory allocations to its workloads. 139 | 140 | [width=100%] 141 | [%header, cols="5,20"] 142 | |=== 143 | | ID# 144 | | Requirement 145 | 146 | | SR_PMP_004 147 | | sPMP MUST be used to protect S-mode from lower privilege levels. 148 | 149 | | SR_PMP_005 150 | | sPMP MUST be used to protect U-mode workloads from other U-mode workloads. 151 | 152 | | SR_PMP_006 153 | | sPMP configurations MUST only be directly accessible to machine mode and supervisor mode 154 | |=== 155 | 156 | NOTE: Dependent on the use case, it may be recommended or mandatory to use sPMP to protect supervisor mode from lower privilege levels (with or without the H-extension). The MMU may be an additional or alternative mechanism. 157 | 158 | NOTE: Dependent on the use case, Trusted execution environments may require or recommend the use of sPMP to isolate trusted applications from each other. 159 | The MMU may be an additional or alternative mechanism 160 | 161 | 162 | ==== MMU 163 | 164 | *See sections 4.3 to 4.6 (Page-Based Virtual-Memory Systems) of the https://github.com/riscv/riscv-isa-manual/releases/tag/Priv-v1.12[Privileged 165 | ISA] specification.* 166 | 167 | _Memory management unit (MMU)_ enables address translation and protection for: 168 | 169 | * Isolating an OS from workloads, and workloads from each other on a system without H-extension (one-stage 170 | translation) 171 | * Isolating a hypervisor from a guest, on a system with H-extension (two-stage 172 | translation) 173 | 174 | [#cat_sr_sub_mmu] 175 | [width=100%] 176 | [%header, cols="5,20"] 177 | |=== 178 | | ID# 179 | | Requirement 180 | 181 | | SR_MMU_001 182 | | if the Sv extension is supported then 1st-stage page tables MUST used to protect 183 | the S-Mode Supervisor domain from accesses made by U-Mode. 184 | 185 | | SR_MMU_002 186 | | if the H extension is supported 1st-stage and/or G-stage page tables MUST used to protect 187 | Supervisor domain H/S-mode from lower privilege levels. 188 | 189 | | SR_MMU_003 190 | | MMU MUST be used to protect resources assigned to one workload from other workloads 191 | |=== 192 | 193 | NOTE: Dependent on the use case, it may be recommended or mandatory to use MMU to protect supervisor mode from lower privilege levels (with or without the H-extension). The sPMP and PMP may be an additional or alternative mechanism. 194 | 195 | NOTE: Dependent on the use case, Trusted execution environments may require or recommend the use of MMU to isolate trusted applications from each other. The sPMP and PMP may be an additional or alternative mechanism. 196 | 197 | ==== Supervisor domains and MPT 198 | 199 | *See the https://github.com/riscv/riscv-smmtt[RISC-V Supervisor Domains Access 200 | Protection] specification.* 201 | 202 | Supervisor domains allow software components on the same hart to be developed, 203 | certified, deployed and attested independently of each other. 204 | 205 | A supervisor domain is an S-Mode compartment that is physically isolated from other supervisor domains. The memory, 206 | execution state and devices belonging to a supervisor domains are isolated from other supervisor domains. 207 | This isolation of supervisor domains and the context switching between them is managed by M-mode firmware. 208 | 209 | A supervisor domain is identified at an architecture level by a _supervisor domain 210 | id (SDID)_ held in the _mmpt_ CSR, managed by M-mode firmware. In addition to 211 | the SDID, the mmpt CSR may specify a root PPN for a Memory Protection Table (MPT). 212 | 213 | The _memory protection table (MPT)_ is a memory structure managed by machine 214 | mode that is used to manage physical memory permissions across supervisor domains. 215 | It is designed to enable page-based dynamic memory management across supervisor 216 | domain boundaries. 217 | responsible for resource management. 218 | 219 | [#cat_sr_sub_mtt] 220 | [width=100%] 221 | [%header, cols="5,20"] 222 | |=== 223 | | ID# 224 | | Requirement 225 | 226 | | SR_MPT_001 227 | | MPT MUST be used to protect M-mode from lower privilege 228 | levels 229 | 230 | | SR_MPT_002 231 | | MPT configurations SHOULD only be directly accessible to machine mode. 232 | 233 | |=== 234 | 235 | 236 | 237 | NOTE: The M-Mode resident software responsible for managing context switches and communication between supervisor 238 | domains is called the Root Domain. Depending on the use case, MPT can 239 | be sufficient for protecting the Root Domain by enabling M-mode 240 | to ensure that its own resources are never assigned to any another domain. 241 | PMP/Smepmp may be an additional or alternative protection for M-mode, enabling the ability to 242 | implement temporal isolation boundaries within M-mode (to protect 243 | early boot code, for example), or to prevent itself from accessing or executing from memory 244 | assigned to lower privilege levels (privilege escalation). 245 | 246 | [cat_sr_sub_sud] 247 | [width=100%] 248 | [%header, cols="5,20"] 249 | |=== 250 | | ID# 251 | | Requirement 252 | 253 | | SR_SUD_001 254 | | PMP/Smepmp or MPT MUST be used to enforce physical memory isolation 255 | boundaries for supervisor domains, and to protect machine mode from any 256 | supervisor domain. 257 | 258 | |=== 259 | 260 | PMP can be used for more static and deterministic use cases. 261 | MPT can be used where more fine grained (page-based) dynamic resource management across 262 | supervisor domain boundaries is required. 263 | 264 | ===== Supervisor Domain Resource Access and Sharing 265 | 266 | [width=100%] 267 | [%header, cols="5,20"] 268 | |=== 269 | | ID# 270 | | Requirement 271 | 272 | | SR_SUD_002 273 | | MPT MUST be used to protect resources assigned to one supervisor domain from other supervisor domains 274 | 275 | | SR_SUD_003 276 | | Resources assigned to a higher trust level supervisor domain MUST NOT be accessible 277 | to an lower trust level supervisor domain. The trust levels/policies are specified by the 278 | security system designer. 279 | 280 | | SR_SUD_004 281 | | Resources assigned to an untrusted supervisor domain MUST be accessible to a trusted supervisor domain 282 | |=== 283 | 284 | Supervisor domains allow resource isolation and sharing between domains under the control of M-mode firmware. Trusted Execution environments can require asymmetric sharing models, where one trusted domain has X/W/R access to other domain's resources. 285 | 286 | ===== Supervisor Domain Debug 287 | 288 | [width=100%] 289 | [%header, cols="5,20"] 290 | |=== 291 | 292 | | ID# 293 | | Requirement 294 | 295 | | SR_SUD_005 296 | | A system supporting supervisor domains MUST support supervisor domain 297 | extensions for interrupts (Smsdia) and SHOULD support supervisor domain 298 | extensions for external debug (TBD). 299 | 300 | |=== 301 | 302 | *See chapter 6 (Smsdia) of the https://github.com/riscv/riscv-smmtt[RISC-V Supervisor 303 | Domains Access Protection] specification.* 304 | 305 | ==== External debug and Performance counters 306 | 307 | *See the https://github.com/riscv-non-isa/riscv-external-debug-security[RISC-V External Debug Security Extension] 308 | specification.* 309 | 310 | [cat_sr_sub_dbg] 311 | [width=100%] 312 | [%header, cols="5,20"] 313 | |=== 314 | | ID# 315 | | Requirement 316 | 317 | | SR_DBG_001 318 | | External debug MUST only be enabled by HW RoT (M-mode external debug) or by FW 319 | RoT (non M-mode external debug). 320 | 321 | | SR_DBG_002 322 | | External debug SHOULD be enabled separately for M-mode & non-M-mode software. 323 | 324 | | SR_DBG_003 325 | | Self-hosted debug MAY be used for debug of non M-mode software. 326 | 327 | | SR_DBG_004 328 | | Self-hosted debug MUST only be enabled by a higher privileged component. 329 | 330 | |=== 331 | 332 | For example, external debug can be enabled for non-M-mode software without affecting M-mode (recoverable debug). And an S-mode OS can enable self-hosted debug for a user application without affecting other applications or S-mode itself. 333 | 334 | [width=100%] 335 | [%header, cols="5,20"] 336 | |=== 337 | | ID# 338 | | Requirement 339 | 340 | | SR_DBG_005 341 | | FW RoT MAY disable self-hosted debug for all non M-mode software. 342 | 343 | |=== 344 | 345 | For example, disable self-hosted debug in a production system for certification 346 | reasons. 347 | 348 | [width=100%] 349 | [%header, cols="5,20"] 350 | |=== 351 | | ID# 352 | | Requirement 353 | 354 | | SR_DBG_006 355 | | External debug MUST only be enabled following system reset (part of measuring) 356 | of the affected component, moderated by a RoT. 357 | 358 | | SR_DBG_007 359 | | Revealing self-hosted debug MUST only be enabled following reboot (part of 360 | measuring) of the affected component. 361 | 362 | | SR_DBG_008 363 | | Trusted self-hosted debug MAY be enabled at runtime (after measuring) of the 364 | affected component, to an application specific governance process. 365 | 366 | |=== 367 | 368 | Enabling debug after measurement ensures the system remains attestable. 369 | 370 | *See chapters 7 and 9 of the https://github.com/riscv/riscv-isa-manual/releases/tag/Priv-v1.12[Privileged 371 | ISA] specification on performance counters.* 372 | 373 | These extensions enable management of interrupts, external debug, and 374 | performance counters across supervisor domain boundaries. 375 | 376 | ==== IOPMP 377 | 378 | *See the https://github.com/riscv-non-isa/iopmp-spec[RISC-V IOPMP] specification.* 379 | 380 | IOPMP is a system level component providing physical memory access control for 381 | device-initiated transactions, complementing PMP and sPMP rules. 382 | 383 | [#cat_sr_sub_iop] 384 | [width=100%] 385 | [%header, cols="5,20"] 386 | |=== 387 | | ID# 388 | | Requirement 389 | 390 | | SR_IOP_001 391 | | A system which supports PMP/Smepmp, or sPMP, MUST implement either IOPMP or IOMPT for device 392 | access control. 393 | 394 | Depending on system design, IOMPT can enforce the same access control policies as IOPMP. 395 | 396 | | SR_IOP_002 397 | | IOPMP configurations MUST only be directly accessible to machine mode. 398 | 399 | | SR_IOP_003 400 | | IOPMP MUST be used to guarantee that devices assigned to lower privilege levels cannot access resources assigned to M-mode. 401 | 402 | | SR_IOP_004 403 | | IOPMP MUST be used to guarantee that devices assigned to a domain cannot be accessed by other domains. 404 | 405 | |=== 406 | 407 | NOTE: IOPMP defines multiple "models" for different system configurations. 408 | Unless specified differently in the use cases in this specification, system 409 | designers are free to choose any IOPMP model. Depending on the use case, IOMPT may be an alternative or addition to IOPMP 410 | 411 | ==== IOMPT 412 | 413 | *See the https://github.com/riscv/riscv-smmtt[RISC-V Supervisor Domains Access 414 | Protection] specification.* 415 | 416 | IOMPT is a system level component providing physical memory access control for 417 | device-initiated transactions, by mapping transactions to IOMMU translation and MPT permissions. 418 | 419 | [width=100%] 420 | [%header, cols="5,20"] 421 | |=== 422 | | ID# 423 | | Requirement 424 | 425 | | SR_IOM_001 426 | | A system which supports MPT MUST implement IOMPT for access-control for 427 | device-initiated memory accesses. 428 | 429 | | SR_IOM_002 430 | | IOMPT configurations MUST only be directly accessible to machine mode. 431 | 432 | | SR_IOM_003 433 | | IOMPT MUST be used to guarantee that devices assigned to lower privilege levels cannot access resources assigned to M-mode. 434 | 435 | | SR_IOM_004 436 | | IOMPT MUST be used to guarantee that devices assigned to a domain cannot be accessed by other domains. 437 | 438 | | SR_IOM_005 439 | | A system which implements IOMPT MAY also implement IOPMP to access-control 440 | device-initiated access to M-mode memory. 441 | 442 | |=== 443 | 444 | NOTE: IOMPT can be sufficient for protecting Root devices as M-mode can enforce that its own resources are never assigned to another domain. 445 | Depending on use case, IOPMP may be used an addition or alternative. For example, a system 446 | may require that Root devices are not able to access memory assigned to TEE domain. 447 | 448 | ==== IOMMU 449 | 450 | *See the https://github.com/riscv-non-isa/riscv-iommu[RISC-V IOMMU] specification.* 451 | 452 | IOMMU is a system level component performing memory address translation from IO 453 | Virtual Addresses to Physical Addresses thereby allowing devices to access virtual memory 454 | locations. It complements the MMU. 455 | 456 | [width=100%] 457 | [%header, cols="5,20"] 458 | |=== 459 | | ID# 460 | | Requirement 461 | 462 | | SR_IOM_006 463 | | Systems supporting MMU SHOULD also support IOMMU 464 | 465 | | SR_IOM_007 466 | | Systems supporting IOMMU MUST also enforce physical memory access control for 467 | M-mode memory against device-initiated transactions using IOMPT or IOPMP 468 | 469 | |=== 470 | 471 | === Software enforced memory tagging 472 | 473 | *See the https://github.com/riscv/riscv-j-extension[RISC-V Pointer Masking] specification.* 474 | 475 | _Memory tagging (MT)_, is a technique which can improve the memory safety of an 476 | application. A part of the effective address of a pointer can be masked off 477 | and used as a tag indicating the intended ownership or state of a pointer. The tag 478 | can be used to track accesses across different regions as well as protecting 479 | against pointer misuse such as "use-after-free". Pointer masking implementations should use 480 | the proposed RISC-V pointer masking extension (Smmpm, Smnpm, Ssnpm). 481 | 482 | With software based memory tagging the access rules encoded in tags are 483 | enforced by software, such as the compiler and the application runtime. 484 | 485 | See also hardware enforced memory tagging below. 486 | 487 | === Control flow integrity 488 | 489 | *See the https://github.com/riscv/riscv-cfi[RISC-V Control Flow Integrity] specification.* 490 | 491 | Control-flow Integrity (CFI) capabilities help defend against Return-Oriented 492 | Programming (ROP) and Call/Jump-Oriented Programming (COP/JOP) style of 493 | control-flow subversion attacks. Here an attacker attempts to modify return 494 | addresses or call/jump address to redirect a victim to code used by the 495 | attacker. 496 | 497 | These attack methodologies use code sequences in authorized modules, with at 498 | least one instruction in the sequence being a control transfer instruction that 499 | depends on attacker-controlled data either in the return stack or in memory 500 | used to obtain the target address for a call or jump. 501 | 502 | Attackers stitch these 503 | sequences together by diverting the control flow instructions (e.g., JALR, 504 | C.JR, C.JALR), from their original target address to a new target via 505 | modification in the return stack or in the memory used to obtain the jump/call 506 | target address. 507 | 508 | RISC-V provides two defenses: 509 | 510 | * Shadow stacks (Zicfiss) - protect return addresses on call stacks 511 | * Labeled Landing pads (Zicfilp) - protect target addresses in jumps and 512 | branches 513 | 514 | === Cryptography 515 | 516 | *See the https://github.com/riscv/riscv-crypto[RISC-V Cryptography Extension] specification.* 517 | 518 | RISC-V includes ISA extensions in the following cryptographic areas: 519 | 520 | * Scalar cryptography 521 | * Vector cryptography 522 | * Entropy source (scalar) 523 | 524 | RISC-V cryptographic extensions are aimed at supporting efficient acceleration 525 | of cryptographic operations at the ISA level. This can both help reduce the TCB of 526 | an isolated component and also avoid hardware bottlenecks (for example, system 527 | level cryptographic subsystems). 528 | 529 | The entropy source extension provides an ISA level interface to a hardware 530 | entropy source. Entropy source requirements can depend on use case or ecosystem 531 | specific requirements and RISC-V does not provide any entropy source technical 532 | specification. However, the entropy source ISA specification does contain general 533 | recommendations and references. 534 | 535 | [width=100%] 536 | [%header, cols="5,20"] 537 | |=== 538 | | ID# 539 | | Requirement 540 | 541 | | SR_CPT_001 542 | | RISC-V systems SHOULD support either scalar or vector cryptographic ISA 543 | extensions 544 | 545 | | SR_CPT_002 546 | | The entropy source ISA extension MUST be supported if either scalar or vector 547 | cryptographic ISA extensions are supported. 548 | 549 | |=== 550 | 551 | It is not necessary to support both scalar and vector operations, as a scalar 552 | operation can be viewed as a vector of size 1. 553 | 554 | ==== Post quantum cryptography 555 | 556 | *See the https://github.com/riscv/riscv-pqc[RISC-V Specification for Post-quantum Cryptography] specification.* 557 | 558 | The _RISC-V Post Quantum Cryptography_ initiative aims to specify ISA extensions that enhance performance and 559 | implementation efficiency for contemporary public-key cryptography, with a focus on standard Post-Quantum Cryptography 560 | algorithms like ML-KEM (FIPS-203), ML-DSA (FIPS-204), SLH-DSA (FIPS-205) and others. The ISA design and evaluation prioritize the requirements of real-world 561 | networked devices, ensuring that the Post-Quantum Cryptography (PQC) extensions effectively complement existing scalar 562 | and vector cryptography extensions. 563 | 564 | ==== High assurance cryptography 565 | 566 | *See the https://github.com/riscv/riscv-hac[RISC-V Specification for High Assurance Cryptography] 567 | 568 | The High Assurance Cryptography task group will create instruction set extensions (ISEs) that facilitate higher levels of assurance than the existing Scalar and Vector Crypto ISEs. One initial focus will be on full-rounds vector AES extensions that allow (do not prevent) effective side-channel resistant implementations and that may perform better than the existing round-based instructions, with future work on other algorithms. A second intimately related focus area will be ISEs that manage secret keys -- not restricted to just AES keys -- in ways that better protect them from unauthorized users and from side-channel analysis. 569 | 570 | === Architectural metadata storage 571 | 572 | In the context of this document, _architectural metadata_ refers to any data that is implicitly trusted by the architecture. Storage of such data is referred to as _architectural metadata storage_. 573 | 574 | Examples (not exhaustive) where architectural metadata is required in the RISC-V architecture include: MTT, memory tagging, and CHERI. 575 | 576 | Architectural metadata storage is implementation defined, but the following rules should be considered by any implementation. 577 | 578 | [width=100%] 579 | [%header, cols="5,20"] 580 | |=== 581 | | ID# 582 | | Requirement 583 | 584 | | SR_AMS_001 585 | | Architectural metadata storage MUST be protected against logical attacks 586 | 587 | | SR_AMS_002 588 | | Architectural metadata storage MUST be protected against physical attacks 589 | 590 | | SR_AMS_003 591 | | Architectural metadata storage MUST be protected against direct attacks 592 | 593 | |=== 594 | 595 | See xref:chapter2.adoc#_adversarial_model[adversarial model] 596 | 597 | For example, architectural metadata storage may be implemented in on-chip memory, or in cryptographically protected external DDR. 598 | 599 | [width=100%] 600 | [%header, cols="5,20"] 601 | |=== 602 | | ID# 603 | | Requirement 604 | 605 | | SR_AMS_004 606 | | Architectural metadata MUST be isolated by privilege level, and within supervisor domain boundaries 607 | 608 | |=== 609 | 610 | Depending on use case, architectural metadata may be visible to or managed by, for example, a supervisor level kernel or hypervisor, a kernel or a hypervisor within a supervisor domain, or by a machine mode monitor. But it should be considered private within an isolation boundary and not accessible or guessable by lower privilege levels, or by code in a different supervisor domain. 611 | 612 | [width=100%] 613 | [%header, cols="5,20"] 614 | |=== 615 | | ID# 616 | | Requirement 617 | 618 | | SR_AMS_005 619 | | Cryptographically protected architectural metadata storage MUST provide privacy protection, including at least location freshness and boot freshness 620 | 621 | | SR_AMS_006 622 | | Cryptographically protected architectural metadata storage MUST provide at least error detection, and SHOULD provide integrity protection 623 | 624 | | SR_AMS_007 625 | | Cryptographically protected architectural metadata storage SHOULD provide replay protection or temporal freshness 626 | 627 | |=== 628 | 629 | Architectural metadata needs to be protected against both unauthorized access (read or modify), boot attacks, relocation attacks, and errors (accidental or malicious). 630 | 631 | On systems where architectural metadata is stored in external memory, and external memory attacks are in scope (for example, directly accessible or replaceable external memory), then cryptographic protection with replay protection or temporal freshness is strongly recommended. 632 | 633 | [width=100%] 634 | [%header, cols="5,20"] 635 | |=== 636 | | ID# 637 | | Requirement 638 | 639 | | SR_AMS_008 640 | | Architectural metadata storage SHOULD be protected against indirect attacks 641 | 642 | |=== 643 | 644 | In general, protection against indirect attacks is a system implementation problem not specific to architectural metadata storage. For example, systems supporting speculative execution should also implement appropriate mitigations against speculation based attacks. Any such mitigations should also be applied to the implementaiton of architectural metadata storage. 645 | 646 | === Capability based architecture 647 | 648 | ==== CHERI 649 | 650 | *See the https://github.com/riscv/riscv-cheri[RISC-V Specification for CHERI Extensions] specification.* 651 | 652 | CHERI - an ISA technique that uses capability-based memory protection for spatial and temporal memory safety, compartmentalization, and control-flow enforcement. Source code has to be recompiled to capture memory safety properties inherent in the source language. 653 | 654 | 655 | -------------------------------------------------------------------------------- /specification/src/chapter4.adoc: -------------------------------------------------------------------------------- 1 | :imagesdir: ../images 2 | 3 | [[chapter4]] 4 | 5 | == Use Case Examples - Non Normative 6 | 7 | This chapter provides a selection of non-exhaustive example security usecases based on commonly used 8 | deployment security models. The examples may be extended over time as required. This section is non normative 9 | guidance only, detailing the recommended combination of security requirements for the associated use case. 10 | 11 | For ease of reference, for each usecase, <> lists the mapping of the RISC-V 12 | ISA and non-ISA building blocks used along with links to the building block 13 | specification, POCs, and code repositories available. 14 | 15 | === Generic system without supervisor domains 16 | 17 | ==== Overview 18 | 19 | [caption="Figure {counter:image}: ", reftext="Figure {image}"] 20 | [title= "Generic vertically integrated system"] 21 | image::img_ch4_priv.png[] 22 | 23 | A generic vertically integrated system can be either virtualized or 24 | non-virtualized. 25 | 26 | 27 | M-mode hosts a FW RoT. An OS or a Hypervisor in S or HS mode controls 28 | applications or guests. Guests and applications execute in U or VS/VU modes and 29 | trust the OS or Hypervisor to provide isolation guarantees. 30 | 31 | 32 | NOTE: The RISC-V architecture also caters for systems with just M and U modes, 33 | commonly used in embedded systems, helper cores, and similar use cases. On 34 | secure systems not supporting S-mode a FW RoT has to share M-mode with an OS. 35 | RISC-V does not exclude such implementations - for example, implementations 36 | using a certified OS and FW RoT, or using a HW RoT to isolate sensitive code 37 | and assets (physical isolation). There is no current mechanism in RISC-V for 38 | isolation within M-mode itself other than temporal boundaries + 39 | + 40 | To minimize the TCB of the FW RoT RISC-V recommends that secure systems 41 | implement S mode, and deprivilege non-RoT firmware such as an OS or 42 | non-security services. 43 | 44 | ==== Isolation model 45 | 46 | [width=100%] 47 | [%header, cols="5,20"] 48 | |=== 49 | | Requirement 50 | | Reference 51 | 52 | | Protect M-mode from lower privilege levels. 53 | | SR_PMP_001, SR_PMP_002 54 | 55 | | Protect Supervisor mode from lower privilege levels (with or without H-extension) 56 | | SR_PMP_004 57 | 58 | |=== 59 | 60 | See xref:chapter3.adoc#_pmp_and_epmp[PMP and Smepmp] 61 | 62 | See xref:chapter3.adoc#_spmp[sPMP] 63 | 64 | See xref:chapter3.adoc#_mmu[MMU] 65 | 66 | The sPMP is typically used in relatively static deployments such as determinism sensitive use cases like automotives. 67 | 68 | The MMU is typically required for Linux based systems and for virtualized systems. 69 | 70 | Either MMU or sPMP can be used both with or without the hypervisor extension. For 71 | example, the hypervisor extension with sPMP can support static partitioning 72 | hypervisors commonly used in automotive. A single stage MMU can be used 73 | without the hypervisor extension for full Linux support. 74 | 75 | ==== Root of Trust 76 | 77 | See xref:chapter2.adoc#_reference_model[reference model]. 78 | 79 | [width=100%] 80 | [%header, cols="5,20"] 81 | |=== 82 | | Requirement 83 | | Reference 84 | 85 | | A system level HW RoT is recommended 86 | | SR_ROT_001, 87 | SR_ROT_002 88 | 89 | |=== 90 | 91 | 92 | ==== Authorized Boot 93 | 94 | Multiple models can be used to ensure a secure system can only run authorized 95 | software. 96 | 97 | See xref:chapter2.adoc#_authorized_software[authorized software]. 98 | 99 | ==== Attestation 100 | 101 | Multiple models can be used to prove to a relying party that a secure system is 102 | in a trustworthy state. 103 | 104 | See xref:chapter2.adoc#_attestable_services[attestable services]. 105 | 106 | ==== Sealing 107 | 108 | Multiple models can be used to protect assets if a system is not in a 109 | trustworthy state. 110 | 111 | See xref:chapter2.adoc#_sealing[sealing]. 112 | 113 | ==== Device access control 114 | 115 | For the purpose of this specification, a device can be a logical device. A 116 | physical device can present one or more logical devices, each with its own 117 | (logical) control interface. 118 | 119 | Isolation guarantees provided to software also apply to device initiated 120 | transaction. 121 | 122 | [width=100%] 123 | [%header, cols="1,^1"] 124 | |=== 125 | | Requirement | Reference 126 | 127 | | Guarantee that devices assigned to lower 128 | privilege levels cannot access resources 129 | assigned to M-mode. 130 | | SR_IOM_005, + 131 | (SR_IOP_001, SR_IOP_002, SR_IOP_003) + 132 | OR + 133 | (SR_IOM_001, SR_IOM_002, SR_IOM_003) + 134 | 135 | 136 | | Enforce access rules for devices assigned 137 | to user applications or guests on a virtualized 138 | system. 139 | | SR_IOP_004 OR SR_IOM_004 140 | 141 | |=== 142 | 143 | On a non-virtualized system, user devices can be managed by the OS which can 144 | enforce access rules for user applications. 145 | 146 | On a virtualized system, devices can be virtualized and assigned to guests by 147 | the hypervisor configuring MMU and IOMMU translation rules. 148 | 149 | NOTE: IOMTT can also be sufficient for protecting Root devices in the sense that 150 | M-mode can enforce that its own resources are never assigned to another domain. 151 | Use of IOPMP or similar could still add further protections. For example, a system 152 | may require that Root devices cannot access memory assigned to Confidential 153 | domain. 154 | 155 | ==== Debug and performance management 156 | 157 | See xref:chapter2.adoc#_security_lifecycle[security life cycle]. + 158 | See https://github.com/riscv-non-isa/riscv-external-debug-security[RISC-V external debug security] 159 | 160 | [width=100%] 161 | [%header, cols="5,20"] 162 | |=== 163 | | Requirement 164 | | Reference 165 | 166 | | Securely control debug access 167 | | SR_DBG_001 168 | 169 | | Allow Debug of non-M-mode software while blocking debug of higher privilege code 170 | | SR_DBG_002 171 | 172 | | Allow Self-hosted Debug of non M-mode software 173 | | SR_DBG_003, SR_DBG_004 174 | 175 | |=== 176 | 177 | For example, external debug can be enabled for non-M-mode software without affecting M-mode (recoverable debug). And an S-mode OS can enable self-hosted debug for a user application without affecting other applications or S-mode itself. 178 | 179 | [width=100%] 180 | [%header, cols="5,20"] 181 | |=== 182 | | Requirement 183 | | Reference 184 | 185 | | Allow a FW RoT to prevent debug of a production system 186 | | SR_DBG_005 187 | 188 | |=== 189 | 190 | For example, disable self-hosted debug in a production system for certification 191 | reasons. 192 | 193 | [width=100%] 194 | [%header, cols="5,20"] 195 | |=== 196 | | Requirement 197 | | Reference 198 | 199 | | Include debug controls in boot time measurement for attestation purpose. 200 | | SR_DBG_006, SR_DBG_007, SR_DBG_008, SR_LFC_004 201 | 202 | |=== 203 | 204 | Guarantees the system remains attestable. 205 | 206 | [width=100%] 207 | [%header, cols="5,20"] 208 | |=== 209 | | Requirement 210 | | Reference 211 | 212 | | Protect an application or domain against monitoring without consent or DOS by other applications or domains 213 | | SR_PMU_001, SR_PMU_002, SR_QOS_001, SR_QOS_002, SR_DOS_001, SR_DOS_002 214 | 215 | |=== 216 | 217 | Prevents using event counters to monitor across application or privilege 218 | boundaries. Event counters can be managed by higher privileged software as part 219 | of context switching across boundaries. 220 | 221 | === Global Platform TEE 222 | 223 | ==== Overview 224 | 225 | [caption="Figure {counter:image}: ", reftext="Figure {image}"] 226 | [title= "Global platform TEE use cases"] 227 | image::img_ch4_gp-tee.png[] 228 | 229 | https://globalplatform.org/[Global platform] defines technical standards, 230 | interface specifications and programming models, open source firmware, and 231 | certification programs for _trusted execution environments (TEE)_. 232 | 233 | A TEE is an isolated environment providing security services. TEE services can 234 | be available to software on multiple Harts. For example: 235 | 236 | * Payment clients 237 | * DRM clients and content protection 238 | * Secure storage 239 | * User identity management 240 | * Attestation services 241 | 242 | The TEE model divides software into physically isolated domains: 243 | 244 | * Normal domain + 245 | Typically hosting a _rich OS_ (for example, RTOS or Linux), and user 246 | applications. 247 | * TEE domain + 248 | Hosts a _TEE OS_ (domain security manager) and _trusted applications (TA)_. 249 | * Root domain + 250 | Hosts RoT firmware, including a secure monitor. 251 | 252 | The TEE OS is primarily responsible for isolation of TA, and for providing root 253 | of trust services, within the TEE domain. 254 | 255 | The OS in Normal domain typically controls scheduling on the system, across all 256 | Harts available to it. To interact with TA services in TEE domain, the OS in 257 | Normal domain interacts with a TEE OS through a secure monitor in Root domain. 258 | 259 | The secure monitor is responsible for context switching and isolation across 260 | domain boundaries, including event management. 261 | 262 | For the purpose of this specification, TEE deployment models can be separated 263 | as: 264 | 265 | * Static partitioning TEE + 266 | A single TEE provides security services to Normal domain. TA are typically 267 | installed at boot by RoT FW and TEE OS, though Global Platform does also define 268 | protocols for installation of TA at runtime. System configuration and resource 269 | allocation can be mostly static, making the system more deterministic. + 270 | + 271 | _Use case examples:_ edge devices and IoT, automation, and automotive. 272 | * Virtualized TEE + 273 | On a virtualized system, TEE can also be virtualized. In this case a _secure 274 | partition manager_ (SPM) in TEE domain is responsible for isolation of multiple TEE 275 | guests (for example, an OEM TEE and separate third party TEE). This model can 276 | also support more dynamic resource allocation. + 277 | + 278 | _Use case examples:_ mobile clients, and automotive. 279 | 280 | ==== Isolation model 281 | 282 | A Global Platform TEE requires the following isolation guarantees: 283 | 284 | [width=100%] 285 | [%header, cols="5,20"] 286 | |=== 287 | | Requirement 288 | | Reference 289 | 290 | | Allow Root domain to access resources assigned to any domain, while preventing 291 | itself from unintended access to resources assigned to a different domain 292 | (privilege escalation). 293 | | SR_PMP_003 294 | 295 | | Prevent other domains from accessing resources assigned to Root domain 296 | | (SR_PMP_001, SR_PMP002) OR (SR_MPT_001, SR_MPT_002), + 297 | SR_SUD_001 298 | 299 | 300 | | Block resources assigned to TEE domain from access by Normal domain 301 | | SR_SUD_001, SR_SUD_002, SR_SUD_003 302 | 303 | | Allow resources assigned to Normal domain to be accessible to Normal domain 304 | (r/w/x), and to TEE domain (r/w) (default sharing rule) 305 | | SR_SUD_004 306 | 307 | | Ensure resources assigned to a single TA, or a guest TEE, are not be accessible by a 308 | different TA, or guest TEE, without consent. 309 | | SR_PMP_005 OR SR_MMU_003 310 | 311 | |=== 312 | 313 | In the standard GP TEE model, each TA is expected to be a self-contained unit 314 | providing a specific security service, either to Normal domain or to other TA. 315 | All communications are implemented through secure channels managed by the TEE OS 316 | or SPM. 317 | 318 | Sharing of memory between TA is generally discouraged. But there are mechanisms 319 | to do so in specific use cases. For example, sharing media buffers in a secure 320 | media path. Such policies are enforced by SPM or TEE OS. 321 | 322 | Processes in Normal domain can share memory assigned to Normal domain when 323 | interacting with a TA in TEE world (default sharing rule). Such shared memory 324 | can be cached when context switching between Normal and TEE domains. 325 | 326 | RISC-V hardware enforced isolation mechanisms can be used as follows to meet 327 | those guarantees: 328 | 329 | See xref:chapter3.adoc#_supervisor_domains[supervisor domains]. 330 | See xref:chapter3.adoc#_pmp_and_epmp[PMP and Smepmp] 331 | See xref:chapter3.adoc#_spmp[sPMP] 332 | See xref:chapter3.adoc#_mmu[MMU] 333 | See xref:chapter3.adoc#_mtt[MTT] 334 | 335 | [width=100%] 336 | [%header, cols="5,20"] 337 | |=== 338 | | Requirement 339 | | Reference 340 | 341 | | Use Supervisor domains to enforce isolation between Normal and TEE domains, and to protect machine mode from other domains 342 | | SR_SUD_001, SR_MPT_001, SR_MPT_002 343 | 344 | | For a static partition TEE, use PMP, sPMP, MMU or MPT to enforce isolation 345 | between TA in TEE domain. 346 | | SR_PMP_005 OR SR_MMU_003 347 | 348 | | For a virtualized TEE, use hypervisor extension 349 | | SR_HYP_001,SR_MMU_001, SR_MMU_002, 350 | 351 | | For a virtualized TEE, sPMP or MMU MUST be used to enforce isolation between guest 352 | TEE, and between TA within a TEE. 353 | | SR_PMP_005 OR SR_MMU_003 354 | 355 | |=== 356 | 357 | ==== Root of Trust 358 | 359 | See xref:chapter2.adoc#_reference_model[reference model]. 360 | 361 | [width=100%] 362 | [%header, cols="5,20"] 363 | |=== 364 | | Requirement 365 | | Reference 366 | 367 | | It is recommended for a TEE based system implement a HW RoT 368 | | SR_TOT_001, SR_ROT_002 369 | 370 | |=== 371 | 372 | ==== Authorized boot 373 | 374 | See xref:chapter2.adoc#_authorized_software[authorized software]. 375 | 376 | TEE boot is typically based on: 377 | 378 | * Measured and verified local boot (direct or indirect) 379 | * Sealing, to protect TEE production assets 380 | 381 | The process can involve multiple stages (layered boot). 382 | 383 | [width=100%] 384 | [%header, cols="5,20"] 385 | |=== 386 | | Requirement 387 | | Reference 388 | 389 | | Direct or indirect measurement of a system verifies the software is authorised 390 | | SR_MSM_001, SR_MSM_002, SR_MSM_003 391 | 392 | | Immutable code ensures a trusted starting point 393 | | SR_MSM_004 394 | 395 | | Systems allow secure updates to all mutable components 396 | | SR_UPD_001, SR_UPD_002, SR_UPD_005, SR_UPD_006, SR_UPD_007 397 | 398 | |=== 399 | 400 | 401 | ==== Attestation 402 | 403 | See xref:chapter2.adoc#_attestable_services[attestable services]. 404 | 405 | Static partition TEE attestation is typically based on a direct security 406 | platform attestation. 407 | 408 | [width=100%] 409 | [%header, cols="5,20"] 410 | |=== 411 | | Requirement 412 | | Reference 413 | 414 | | Attestation is used to determine trustworthyness across all comonents 415 | * TEE domain 416 | * Root domain 417 | * Boot state of all trusted subsystems 418 | | SR_ATT_001, SR_ATT_002, SR_ATT_003 419 | 420 | |=== 421 | 422 | Virtualized TEE attestation can be layered, for performance or separation of 423 | concern. For example: 424 | 425 | * A security platform attestation, signed by a RoT, covering trusted subsystems, 426 | Root domain, and SPM 427 | * Separate guest TEE attestation(s) signed by SPM 428 | 429 | [width=100%] 430 | [%header, cols="5,20"] 431 | |=== 432 | | Requirement 433 | | Reference 434 | 435 | | Layered attestation allows delegation in complex systems 436 | | SR_ATT_004, SR_ATT_005 437 | |=== 438 | 439 | ==== Sealing 440 | 441 | See xref:chapter2.adoc#_sealing[sealing]. 442 | 443 | In the Global Platform security model, SPM or TEE OS typically provide local 444 | trusted storage, key management, and cryptographic services to TA and guest TEE. 445 | These services support local sealing of TA or guest TEE assets, and minimize 446 | exposure of cryptographic materials. 447 | 448 | [width=100%] 449 | [%header, cols="5,20"] 450 | |=== 451 | | Requirement 452 | | Reference 453 | 454 | | Local sealing for a TA, or a TEE guest, must be unique to TEE domain and to a 455 | physical instance of a system. 456 | 457 | | SR_SUD_002, SR_SLG_002 458 | 459 | | Local sealing for a TA, or a TEE guest, should also be unique to the TEE guest 460 | or the TA.Local sealing MAY be layered 461 | 462 | | SR_MMU_003 OR SR_PMP_005 463 | |=== 464 | 465 | For example: 466 | 467 | * TEE domain unique sealing keys derived by a RoT from a hardware unique key 468 | * TA, or guest TEE, unique sealing keys derived by TEE OS or SPM from a TEE 469 | domain unique sealing key 470 | 471 | ==== Device access control 472 | 473 | For the purpose of this specification, a device can be a logical device. A 474 | physical device can present one or more logical devices, each with its own 475 | (logical) control interface. 476 | 477 | The security guarantees also apply to device initiated accesses, for example DMA 478 | and interrupts. 479 | 480 | [width=100%] 481 | [%header, cols="5,20"] 482 | |=== 483 | | Requirement 484 | | Reference 485 | 486 | | A static partition TEE must use IOPMP to enforce access rules for devices. 487 | | SR_IOP_004 488 | 489 | | A virtualized TEE must use IOMPT and IOMMU to enforce access rules for devices 490 | assigned to Normal or TEE domains, and should use IOPMP to enforce access rules 491 | for Root devices. 492 | | SR_IOM_001, SR_IOM_002, SR_IOM_003, SR_IOM_004, SR_IOM_005 493 | |=== 494 | 495 | For a static partition TEE, domain level granularity can be sufficient as device 496 | access within TEE and Normal domains is governed by TEE OS and the rich OS 497 | respectively. It can be implemented using IOPMP. Policy can be controlled by 498 | boot configuration, by a HW or FW RoT. 499 | 500 | For a virtualized TEE, IOMTT enforces supervisor domain level access rules 501 | (physical isolation). IOMMU enforces guest and TA level access rules 502 | (virtualization), supporting device assignment to a guest TEE or a TA. 503 | 504 | NOTE: IOMTT can also be sufficient for protecting Root devices in the sense that 505 | M-mode can enforce that its own resources are never assigned to another domain. 506 | Use of IOPMP or similar could still add further protections. For example, a system 507 | may require that Root devices cannot be used to access memory assigned to 508 | Confidential domain. 509 | 510 | ==== System integration 511 | 512 | In the case of a Global Platform TEE system a rich OS in Normal domain is free 513 | to schedule services, including TEE services, on any Hart available to it. The 514 | number and make-up of supervisor domains can be known, and a simple convention 515 | can be used for common identification (SDID value, see 516 | xref:chapter3.adoc#_supervisor_domains[supervisor domains]) of Normal, TEE, and 517 | Root domains across multiple Harts in a system. 518 | 519 | System integration in this context involves providing _security attributes_ on 520 | a system interconnect, tagging all transactions (CPU or system agent initiated) 521 | to either Root, Normal, or TEE domains. 522 | 523 | Possible use cases include: 524 | 525 | * Tweaking cryptographic memory protection (uniqueness) 526 | * Tagging interrupts, debug accesses, or coherent memory accesses 527 | * Device assignment (IOPMP/IOMTT integration), static or dynamic 528 | 529 | The attributes can be derived, for example, from SDID and privilege level, or from 530 | PMA. 531 | 532 | For some use cases security attributes can be extended to reflect finer 533 | granularity, for example for cryptographic memory protection with TA 534 | granularity. 535 | 536 | ==== Debug and performance management 537 | 538 | See xref:chapter2.adoc#_security_lifecycle[security life cycle]. + 539 | See https://github.com/riscv-non-isa/riscv-external-debug-security[enhanced RISC-V external debug security] 540 | 541 | [width=100%] 542 | [%header, cols="5,20"] 543 | |=== 544 | | Requirement 545 | | Reference 546 | 547 | | External debug must be enabled separately for Root domain. 548 | | SR_DBG_001, SR_DBG_002 549 | 550 | | External debug must be enabled separately for each supervisor domain. 551 | | SR_SUD_005 552 | 553 | | External debug must only be enabled by a HW RoT (Root domain external debug) 554 | or by Root domain (supervisor domain external debug). 555 | | SR_DBG_001, SR_SUD_005 556 | 557 | | Self-hosted debug may be used for debug within a supervisor domain. 558 | | SR_DBG_003 559 | 560 | | Self-hosted debug must only be enabled by a higher privileged component. 561 | | SR_DBG_004 562 | |=== 563 | 564 | For example, within normal domain an S-mode or VS-mode OS can enable 565 | self-hosted debug for a user application. Or an HS-mode hypervisor can enable 566 | self-hosted debug for a VS-mode guest. Only Root domain should enable 567 | self-hosted debug for an S-mode OS or an HS mode hypervisor. 568 | 569 | Within TEE domain a TEE OS can enable self-hosted debug for a TA. An SPM can 570 | enable self-hosted debug for guest TEE. Only Root domain should enable 571 | self-hosted debug of SPM (virtualized) or TEE OS (non-virtualized). 572 | 573 | A machine mode monitor can enable external debug of individual supervisor domains without affecting M-mode, or any other supervisor domain. 574 | 575 | [width=100%] 576 | [%header, cols="5,20"] 577 | |=== 578 | | Requirement 579 | | Reference 580 | 581 | | Root domain may disable self-hosted debug for a whole domain. 582 | | SR_DBG_005 583 | |=== 584 | 585 | For example, for all of TEE domain on a production system, for certification 586 | reasons. 587 | 588 | [width=100%] 589 | [%header, cols="5,20"] 590 | |=== 591 | | Requirement 592 | | Reference 593 | 594 | | External debug MUST only be enabled following system reset (part of measuring) 595 | of the affected component. 596 | |SR_DBG_006 597 | 598 | | Revealing self-hosted debug MUST only be enabled following reboot (part of 599 | measuring) of the affected component. 600 | |SR_DBG_007 601 | 602 | | Trusted self-hosted debug MAY be enabled at runtime (after measuring) of the 603 | affected component, to an application specific governance process. 604 | |SR_DBG_008 605 | |=== 606 | 607 | Guarantees the system remains attestable. 608 | 609 | See xref:chapter2.adoc#_event_counters[event counters] 610 | 611 | === Confidential computing on RISC-V (CoVE) 612 | ==== Overview 613 | [caption="Figure {counter:image}: ", reftext="Figure {image}"] 614 | [title= "Confidential compute use case"] 615 | image::img_ch4_cove.png[] 616 | 617 | In hosting environments, tenant workloads rely on isolation primitives that are 618 | managed by host privileged software. This can lead to a large TCB for tenants 619 | which may include, for example, a hypervisor, orchestration services, and 620 | host management services. It may also include other tenants exploiting 621 | vulnerabilities in complex hosting software. 622 | 623 | Confidential compute aims to achieve a minimal and certifiable TCB for 624 | _confidential workloads_. 625 | 626 | _CoVE (Confidential VM Extensions)_ 627 | https://github.com/riscv-non-isa/riscv-ap-tee/tree/main/specification[specification] 628 | defines a confidential compute platform for RISC-V systems, including 629 | interfaces and programming models, covering life cycle management, attestation, 630 | resource management and devices assignment, for confidential workloads. It is 631 | based on principles defined by 632 | https://confidentialcomputing.io/[Confidential Computing Consortium]. 633 | Reference firmware for CoVE is being developed as part of the 634 | https://riseproject.dev/[RISC-V Software Ecosystem] project. 635 | 636 | CoVE is primarily aimed at cloud hosting of confidential workloads. In this deployment model 637 | CoVE divides software into physically isolated domains: 638 | 639 | * Normal domain + 640 | Typically hosting a hypervisor, and Normal guests and services. 641 | * Confidential domain + 642 | Hosts a domain security manager (_trusted security manager, TSM_) and confidential guests. 643 | * Root domain + 644 | Hosts RoT firmware, including a secure monitor. 645 | 646 | The TSM is primarily responsible for isolation of confidential workloads, and 647 | for providing RoT services, within the Confidential domain. 648 | 649 | A hypervisor in Normal domain typically controls scheduling and resource 650 | assignment on the system across all Harts available to it, including for 651 | confidential workloads. It interacts with the TSM through the secure monitor in 652 | Root domain to manage confidential workloads. 653 | 654 | The secure monitor is responsible for context switching and isolation across 655 | domain boundaries, including event management. 656 | 657 | More details including a threat model and the security requirements to address that threat 658 | model can be found in the _CoVE (Confidential VM Extensions)_ 659 | https://github.com/riscv-non-isa/riscv-ap-tee/tree/main/specification[specification] 660 | 661 | The information below adds cross references to the security model normative security requirements. 662 | 663 | The underlying isolation mechanisms may be used in other deployment models, such 664 | as some mobile clients or edge devices whose design may might be constrained by real 665 | time and formal verification requirements. The TSM and secure monitor function are 666 | then combined into a single TEE security manager in Root domain. 667 | 668 | 669 | ==== Isolation model 670 | 671 | Confidential workloads require isolation guarantees. RISC-V hardware enforced isolation mechanisms can be used as follows to meet those requirments: 672 | 673 | See xref:chapter3.adoc#_supervisor_domains[supervisor domains]. 674 | See xref:chapter3.adoc#_pmp_and_epmp[PMP and Smepmp] 675 | See xref:chapter3.adoc#_spmp[sPMP] 676 | See xref:chapter3.adoc#_mmu[MMU] 677 | See xref:chapter3.adoc#_mtt[MTT] 678 | 679 | [width=100%] 680 | [%header, cols="5,20"] 681 | |=== 682 | | Requirement 683 | | Reference 684 | 685 | 686 | | Allow Root domain to access resources assigned to any domain, while preventing 687 | itself from unintended access to resources assigned to a different domain 688 | (privilege escalation). 689 | | SR_PMP_003 690 | 691 | | Prevent other domains from accessing resources assigned to Root domain 692 | | (SR_PMP_001, SR_PMP002) OR (SR_MPT_001, SR_MPT_002), + 693 | SR_SUD_001 694 | 695 | | Block resources assigned to Confidential domains from access by Normal domain 696 | | SR_SUD_001, SR_SUD_002, SR_SUD_003, SR_MMU_001, SR_MMU_002 697 | 698 | | Block resources assigned to Normal domain from access by Confidential domain 699 | | SR_SUD_001, SR_SUD_002, SR_SUD_003, SR_MMU_001, SR_MMU_002 700 | 701 | | Allow resources to be assigned to both Normal domain and Confidential domain (sharing by consent) 702 | | SR_SUD_001, SR_SUD_002, SR_SUD_003 703 | 704 | | Ensure resources assigned to a confidential workload are not be accessible by other confidential worloads 705 | without consent. 706 | | SR_MMU_003 707 | 708 | | Implement hypervisor functionality for resource management 709 | | SR_HYP_001, SR_MMU_001, SR_MMU_002 710 | 711 | 712 | |=== 713 | 714 | 715 | ==== Root of trust 716 | 717 | See xref:chapter2.adoc#_reference_model[reference model]. 718 | 719 | [width=100%] 720 | [%header, cols="5,20"] 721 | |=== 722 | | Requirement 723 | | Reference 724 | 725 | | Implement a HW RoT 726 | | SR_TOT_001, SR_ROT_002 727 | 728 | |=== 729 | 730 | ==== Authorized Boot 731 | 732 | See xref:chapter2.adoc#_authorized_software[authorized software]. 733 | 734 | Boot in a cloud hosting context is typically based on: 735 | 736 | * Measured boot of a hosting platform, including Root domain and TSM 737 | * Platform attestation and security provisioning (unsealing) by a remote 738 | provisioning system 739 | * Launch and measurement of confidential workloads, only once the system has 740 | been unsealed 741 | 742 | A _trusted platform module_ (TPM) can be used to measure the security platform. 743 | 744 | Measuring confidential guests can be done by TSM in Confidential domain. 745 | 746 | The process can involve multiple stages (layered boot). 747 | 748 | [width=100%] 749 | [%header, cols="5,20"] 750 | |=== 751 | | Requirement 752 | | Reference 753 | 754 | a| Confidential guests must not boot until at least the security platform has 755 | been verified: 756 | 757 | * TSM in Confidential domain 758 | * Root domain 759 | * Boot state of all trusted subsystems 760 | 761 | Direct or indirect measurement of a system verifies the software is authorised 762 | | SR_MSM_001, SR_MSM_002, SR_MSM_003 763 | 764 | | Immutable code ensures a trusted starting point 765 | | SR_MSM_004 766 | 767 | | Systems allow secure updates to all mutable components 768 | | SR_UPD_001, SR_UPD_002, SR_UPD_005, SR_UPD_006, SR_UPD_007 769 | 770 | |=== 771 | 772 | ==== Attestation 773 | 774 | See xref:chapter2.adoc#_attestable_services[attestable services]. 775 | 776 | Virtualized TEE attestation can be layered, for performance or separation of 777 | concern. For example: 778 | 779 | * A security platform attestation, signed by a RoT, covering trusted subsystems, 780 | Root domain, and SPM 781 | * Separate guest TEE attestation(s) signed by SPM 782 | 783 | 784 | See xref:chapter2.adoc#_attestable_services[attestable services]. 785 | 786 | Attestation of confidential workloads is typically layered, for performance and 787 | separation of concern: 788 | 789 | * A security platform attestation, signed by a hardware root of trust 790 | * A confidential workload attestation, signed by TSM 791 | 792 | [width=100%] 793 | [%header, cols="5,20"] 794 | |=== 795 | | Requirement 796 | | Reference 797 | 798 | a| A security platform attestation is used, covering at least: 799 | 800 | * HW RoT 801 | * TSM 802 | * Root domain 803 | * Boot state of all trusted subsystems 804 | | SR_ATT_001, SR_ATT_002, SR_ATT_003 805 | 806 | |=== 807 | 808 | ==== Sealing 809 | 810 | See xref:chapter2.adoc#_sealing[sealing]. 811 | 812 | Sealing of confidential workloads is typically based on remote sealing, 813 | unsealing assets for a confidential workload following successful attestation 814 | by a remote provisioning system. This enables use cases such as: 815 | 816 | * Shared assets across multiple instances of a confidential workload (scale or 817 | redundancy) 818 | * Unsealing different sets of assets for different users of a service 819 | 820 | TSM itself is typically stateless across reset and does not require any sealed 821 | assets of its own. 822 | 823 | [#_cove_device_access_control] 824 | ==== Device access control 825 | 826 | For the purpose of this specification, a device can be a logical device. A 827 | physical device can present more than one logical devices, each with its own 828 | (logical) control interface. 829 | 830 | The security guarantees also apply to device initiated accesses, for example 831 | DMA and interrupts. 832 | 833 | [width=100%] 834 | [%header, cols="5,20"] 835 | |=== 836 | | Requirement 837 | | Reference 838 | 839 | | IOMPT and IOMMU are used to enforce access rules for devices assigned to 840 | Normal or Confidential domains, and IOPMP is used to enforce access rules 841 | for Root devices. 842 | |SR_IOM_001, SR_IOM_002, SR_IOM_003, SR_IOM_004, SR_IOM_005 843 | 844 | | IOPMP and IOMPT configurations are only directly accessible by 845 | Root domain. 846 | | SR_PMP_001, SR_IOM_002 847 | 848 | |=== 849 | 850 | IOMTT enforces supervisor domain level access rules (physical isolation). 851 | IOMMU enforces guest and TA level access rules (virtualization), supporting 852 | device assignment to a Confidential guest. 853 | 854 | NOTE: IOMTT can also be sufficient for protecting Root devices in the sense 855 | that M-mode can enforce that its own resources are never assigned to another 856 | domain. Use of IOPMP or similar could still add further protections. For example, 857 | a system may require that Root devices cannot be used to access memory assigned 858 | to Confidential domain. 859 | 860 | ==== System integration 861 | 862 | In the case of a confidential compute system, hypervisor in Normal domain 863 | typically controls scheduling and resource assignment on the system across all 864 | Harts available to it. The number and make-up of supervisor domains can be 865 | known, and a simple convention can be used for common identification of Normal, 866 | Confidential, and Root domains across multiple Harts in a system. 867 | 868 | System integration in this context involves providing _security attributes_ on 869 | the interconnect, tagging all transactions (CPU or system agent initiated) to 870 | either Root, Normal, or TEE domains. 871 | 872 | Possible use cases include: 873 | 874 | * Tweaking cryptographic memory protection (uniqueness) 875 | * Tagging interrupts, debug accesses, or coherent memory accesses 876 | * Device assignment (IOPMP/IOMTT integration), static or dynamic 877 | 878 | The attributes can be derived, for example, from SDID and privilege mode. 879 | 880 | For some use cases security attributes can be extended to reflect finer 881 | granularity, for example for cryptographic memory protection with confidential 882 | workload granularity. 883 | 884 | ==== Trusted device assignment 885 | 886 | The goal of confidential compute is to provide a minimum TCB for a confidential 887 | service, and CPU isolation mechanisms discussed so far does that on a Hart. 888 | 889 | But most confidential services also make use of devices, both on-chip and 890 | external. <<_cove_device_access_control, Device virtualization>> can guarantee 891 | exclusivity for devices assigned to a confidential workload - TSM can guarantee 892 | that a device assigned to a confidential workload cannot be accessed by: 893 | 894 | * Any other confidential workload 895 | * Any software in Normal domain 896 | 897 | But the confidential workload still has to trust all intermediaries between the 898 | workload and the device, both physical and software. For example: 899 | 900 | * Drivers 901 | * Physical interconnects and device hardware interfaces 902 | 903 | Secure access to devices is important in a number of use cases where a device 904 | performs work on assets owned by a confidential workload, such as accelerators. 905 | 906 | The _TEE device interface security protocol (TDISP)_ defined by PCIe provides a 907 | security architecture and protocols allowing a confidential workload to 908 | securely attest, manage and exchange data with a trusted device. 909 | 910 | CoVE defines RISC-V support for TDISP. See: 911 | 912 | https://pcisig.com/specifications/ 913 | https://github.com/riscv-non-isa/riscv-ap-tee-io 914 | 915 | ==== Debug and performance management 916 | 917 | See xref:chapter2.adoc#_security_lifecycle[security life cycle]. + 918 | See https://github.com/riscv-non-isa/riscv-external-debug-security[enhanced RISC-V external debug security] 919 | 920 | [width=100%] 921 | [%header, cols="5,20"] 922 | |=== 923 | | Requirement 924 | | Reference 925 | 926 | | External debug must be enabled separately for Root domain. 927 | | SR_DBG_001, SR_DBG_002 928 | 929 | | External debug must be enabled separately for each supervisor domain. 930 | | SR_SUD_005 931 | 932 | | External debug must only be enabled by a HW RoT (Root domain external debug) 933 | or by Root domain (supervisor domain external debug). 934 | | SR_DBG_001, SR_SUD_005 935 | 936 | | Self-hosted debug may be used for debug within a supervisor domain. 937 | | SR_DBG_003 938 | 939 | | Self-hosted debug must only be enabled by a higher privileged component. 940 | | SR_DBG_004 941 | 942 | |=== 943 | 944 | For example, within normal domain an HS-mode hypervisor can enable self-hosted 945 | debug for a VS-mode guest. Only Root domain should enable self-hosted debug for 946 | the HS mode hypervisor. 947 | 948 | Within Confidential domain the TSM can enable self-hosted debug for a 949 | confidential guest. Only Root domain should enable self-hosted debug of TSM. 950 | 951 | A machine mode monitor can enable external debug of individual supervisor domains without affecting M-mode, or any other supervisor domain. 952 | 953 | [width=100%] 954 | [%header, cols="5,20"] 955 | |=== 956 | | Requirement 957 | | Reference 958 | 959 | | External debug must only be enabled following system reset (part of measuring) 960 | of the affected component. 961 | | SR_DBG_006 962 | 963 | | Revealing self-hosted debug must only be enabled following reboot (part of 964 | measuring) of the affected component. 965 | | SR_DBG_007 966 | 967 | | Trusted self-hosted debug may be enabled at runtime (after measuring) of the 968 | affected component, to an application specific governance process. 969 | | SR_DBG_008 970 | 971 | |=== 972 | 973 | Guarantees the system remains attestable. 974 | 975 | See xref:chapter2.adoc#_event_counters[event counters] 976 | 977 | ==== Platform QoS 978 | 979 | See xref:chapter2.adoc#_platform_quality_of_service[platform quality of service]. 980 | -------------------------------------------------------------------------------- /specification/src/chapter5.adoc: -------------------------------------------------------------------------------- 1 | [[chapter5]] 2 | 3 | == Cryptography 4 | 5 | RISC-V supports a number of ISA-level extensions aimed at improving performance 6 | for cryptographic operations (scalar and vector). They also include an 7 | ISA-level entropy source, and guidelines for data independent execution latency. 8 | 9 | See xref:chapter3.adoc#_cryptography[cryptography] + 10 | See https://github.com/riscv/riscv-crypto 11 | 12 | Current ISA level cryptographic extensions work at round level. With the data 13 | independent execution latency properties, they can provide some mitigation 14 | against some side-channel attacks, such as cache timing attacks. They may not 15 | defend fully against some differential power analysis, for example. 16 | 17 | Work is on-going to define ISA-level _high assurance cryptography (HAC)_. This 18 | work includes defining full-round operations to increase side-channel 19 | resistance; adding operations supporting _post-quantum cryptography (PQC)_; and 20 | adding ISA-level privilege-based key management. 21 | 22 | Cryptographic requirements depend on target ecosystem, as well as on varying 23 | regulatory requirements in different geographic regions. This chapter 24 | summarizes commonly used cryptographic guidance for secure systems, provided as 25 | guidance for development of RISC-V specifications and RISC-V based secure 26 | systems. 27 | 28 | === PQC readiness 29 | 30 | Quantum safe cryptography is an evolving area of research. For example, see: 31 | https://csrc.nist.gov/projects/post-quantum-cryptography. 32 | 33 | ML-KEM (FIPS-203), ML-DSA (FIPS-204), and SLH-DSA (FIPS-205). ML-KEM defines a 34 | key-encapsulation mechanism used to establish a shared secret key over a public 35 | channel. ML-DSA and SLH-DSA defined digital signature schemes. 36 | 37 | RISC-V systems and specifications must at least support a migration path 38 | towards use of PQC. 39 | 40 | [width=100%] 41 | [%header, cols="5,20"] 42 | |=== 43 | | ID# 44 | | Requirement 45 | 46 | | SR_CPT_003 47 | | In applications which require a migration path to PQC algorithms, all 48 | immutable components SHOULD support PQC alternatives. 49 | 50 | | SR_CPT_004 51 | | All mutable components MUST at least have a migration path to quantum safe 52 | cryptography. 53 | 54 | |=== 55 | 56 | Immutable components, in particular immutable boot code, cannot be updated. To 57 | provide a full migration path for a system, immutable components need to 58 | support PQC alternatives. 59 | 60 | Mutable stages can be updated, and can provide a migration path to quantum safe 61 | cryptography. For example, system designers should consider protocols, 62 | governance, and storage requirements for upgrading hardware provisioned assets 63 | to PQC versions. 64 | 65 | === Cryptographic algorithms and guidelines 66 | 67 | The following resources provide general cryptographic guidance applicable to 68 | most western jurisdictions: 69 | https://csrc.nist.gov/Projects/Cryptographic-Standards-and-Guidelines 70 | https://www.cnss.gov/CNSS/issuances/Memoranda.cfm 71 | 72 | In particular, for most new systems: 73 | 74 | * Public identifier: 512 bits (for example, hash of a public key) 75 | * Counter used as identifier: 64 bits 76 | * Block cipher: AES-256 77 | * Hash function: SHA-512, or SHA-3 78 | * Message authentication: HMAC-SHA-512 79 | * Asymmetric signing/encryption: RSA-3072, or ECC-384 (see <<_pqc_readiness, 80 | PQC readiness>>) 81 | 82 | Some legacy use cases may require use of other algorithms, such as SHA-256 or 83 | AES-128. In these cases, wherever possible, an upgrade path should be 84 | supported. For example, allocating sufficient storage to accommodate larger 85 | sizes in future updates. 86 | 87 | Some use cases, such as cryptographic memory protection, may sometimes use 88 | specialized algorithms for performance in a constrained use case. These are not 89 | discussed here but should have similar properties to the ones listed above, but 90 | with different trade-offs. 91 | 92 | For Chinese markets, equivalent _ShangMi (SM)_ algorithm support is required. 93 | In particular: 94 | 95 | * SM2: Authentication (ECC based) 96 | * SM3: Hash function (256-bit) 97 | * SM4: Block cipher 98 | 99 | See http://gmbz.org.cn/main/index.html 100 | 101 | RISC-V cryptographic ISA extensions also include support for ShangMi algorithms 102 | (SM3 and SM4) 103 | 104 | Some Shang-Mi algorithms are also described in ISO specifications. 105 | 106 | Other specific markets also require regional cryptographic algorithms, for 107 | example Russian Ghost. RISC-V cryptographic ISA extensions currently do not 108 | directly support Russia specific algorithms. 109 | -------------------------------------------------------------------------------- /specification/src/contributors.adoc: -------------------------------------------------------------------------------- 1 | == Contributors 2 | 3 | This RISC-V specification has been contributed to directly or indirectly by (in 4 | alphabetical order): Ali Zhang, Andy Dellow, Carl Shaw, Colin O'Flynn, Dean 5 | Liberty, Dong Du, Deepak Gupta, Fabrice Marinet, Guerney Hunt, Luis Fiolhais, 6 | Manuel Offenberg, Markku Juhani Saarinen, Munir Geden, Mark Hill, Nicholas Wood 7 | (editor), Paul Elliott, Ravi Sahita (co-editor), Robin Randhawa, Samuel Ortiz, 8 | Steve Wallach, Suresh Sugumar, Terry Wang, Victor Lu, Ved Shanbhogue, Yann Loisel 9 | -------------------------------------------------------------------------------- /specification/src/example.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{riscI-isca1981, 2 | title = {{RISC I}: {A} Reduced Instruction Set {VLSI} Computer}, 3 | author = {David A. Patterson and Carlo H. S\'{e}quin}, 4 | booktitle = {ISCA}, 5 | location = {Minneapolis, Minnesota, USA}, 6 | pages = {443-458}, 7 | year = {1981} 8 | } 9 | 10 | @InProceedings{Katevenis:1983, 11 | author = {Katevenis, Manolis G.H. and Sherburne,Jr., Robert W. and Patterson, David A. and S{\'e}quin, Carlo H.}, 12 | title = {The {RISC II} micro-architecture}, 13 | booktitle = {Proceedings VLSI 83 Conference}, 14 | year = 1983, 15 | month = {August}} 16 | 17 | @inproceedings{Ungar:1984, 18 | author = {David Ungar and Ricki Blau and Peter Foley and Dain Samples 19 | and David Patterson}, 20 | title = {Architecture of {SOAR}: {Smalltalk} on a {RISC}}, 21 | booktitle = {ISCA}, 22 | address = {Ann Arbor, MI}, 23 | year = {1984}, 24 | pages = {188--197} 25 | } 26 | 27 | @Article{spur-jsscc1989, 28 | author = {David D. Lee and Shing I. Kong and Mark D. Hill and 29 | George S. Taylor and David A. Hodges and Randy 30 | H. Katz and David A. Patterson}, 31 | title = {A {VLSI} Chip Set for a Multiprocessor 32 | Workstation--{Part I}: An {RISC} Microprocessor with 33 | Coprocessor Interface and Support for Symbolic 34 | Processing}, 35 | journal = {IEEE JSSC}, 36 | year = 1989, 37 | volume = 24, 38 | number = 6, 39 | pages = {1688--1698}, 40 | month = {December}} 41 | -------------------------------------------------------------------------------- /specification/src/header.adoc: -------------------------------------------------------------------------------- 1 | [[header]] 2 | :description: RISC-V Security Model 3 | :company: RISC-V.org 4 | :revdate: 03/2025 5 | :revnumber: 0.4 6 | :revremark: This document is in development. Assume everything can change. See http://riscv.org/spec-state for details. 7 | :url-riscv: http://riscv.org 8 | :doctype: book 9 | :preface-title: Preamble 10 | :colophon: 11 | :appendix-caption: Appendix 12 | :imagesdir: ../images 13 | :title-logo-image: image:risc-v_logo.svg[pdfwidth=3.25in,align=center] 14 | // Settings: 15 | :experimental: 16 | :reproducible: 17 | // needs to be changed? bug discussion started 18 | //:WaveDromEditorApp: app/wavedrom-editor.app 19 | :imagesoutdir: images 20 | :bibtex-file: src/example.bib 21 | :bibtex-order: alphabetical 22 | :bibtex-style: apa 23 | :icons: font 24 | :lang: en 25 | :listing-caption: Listing 26 | :sectnums: 27 | :rimage: 28 | :toc: left 29 | :toclevels: 4 30 | :source-highlighter: pygments 31 | ifdef::backend-pdf[] 32 | :source-highlighter: coderay 33 | endif::[] 34 | :data-uri: 35 | :hide-uri-scheme: 36 | :stem: latexmath 37 | :footnote: 38 | :xrefstyle: short 39 | 40 | = RISC-V Security Model 41 | RISC-V Security Model Task Group 42 | 43 | // Preamble 44 | [WARNING] 45 | .This document is in the link:http://riscv.org/spec-state[Development state] 46 | ==== 47 | Assume everything can change. This draft specification will change before 48 | being accepted as informative, so implementations made to this draft 49 | specification will likely not follow the future informative specification. 50 | ==== 51 | 52 | [preface] 53 | == Copyright and license information 54 | This specification is licensed under the Creative Commons 55 | Attribution 4.0 International License (CC-BY 4.0). The full 56 | license text is available at 57 | https://creativecommons.org/licenses/by/4.0/. 58 | 59 | Copyright 2023 by RISC-V International. 60 | 61 | [preface] 62 | include::contributors.adoc[] 63 | include::chapter1.adoc[] 64 | include::chapter2.adoc[] 65 | include::chapter3.adoc[] 66 | include::chapter4.adoc[] 67 | include::chapter5.adoc[] 68 | include::references.adoc[] 69 | 70 | //the index must precede the bibliography 71 | //include::index.adoc[] 72 | include::bibliography.adoc[] 73 | -------------------------------------------------------------------------------- /specification/src/index.adoc: -------------------------------------------------------------------------------- 1 | [index] 2 | == Index 3 | -------------------------------------------------------------------------------- /specification/src/references.adoc: -------------------------------------------------------------------------------- 1 | [appendix] 2 | 3 | == References 4 | 5 | 6 | . https://www.intel.com/content/www/us/en/newsroom/opinion/zero-trust-approach-architecting-silicon.html[https://www.intel.com/content/www/us/en/newsroom/opinion/zero-trust-approach-architecting-silicon.html] 7 | . https://www.forrester.com/blogs/tag/zero-trust/[https://www.forrester.com/blogs/tag/zero-trust/] 8 | . https://docs.microsoft.com/en-us/security/zero-trust/[https://docs.microsoft.com/en-us/security/zero-trust/] 9 | . https://github.com/riscv/riscv-crypto/releases[https://github.com/riscv/riscv-crypto/releases] 10 | . https://github.com/riscv/riscv-platform-specs/blob/main/riscv-platform-spec.adoc[https://github.com/riscv/riscv-platform-specs/blob/main/riscv-platform-spec.adoc] 11 | . https://www.commoncriteriaportal.org/files/ppfiles/pp0084b_pdf.pdf[https://www.commoncriteriaportal.org/files/ppfiles/pp0084b_pdf.pdf] 12 | . https://docs.opentitan.org/doc/security/specs/device_life_cycle/[https://docs.opentitan.org/doc/security/specs/device_life_cycle/] 13 | . https://nvlpubs.nist.gov/nistpubs/ir/2021/NIST.IR.8320-draft.pdf[https://nvlpubs.nist.gov/nistpubs/ir/2021/NIST.IR.8320-draft.pdf] 14 | . https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-193.pdf[https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-193.pdf] 15 | . https://www.rambus.com/security/root-of-trust/rt-630/[https://www.rambus.com/security/root-of-trust/rt-630/] 16 | . https://docs.opentitan.org/doc/security/specs/[https://docs.opentitan.org/doc/security/specs/] 17 | . https://trustedcomputinggroup.org/work-groups/dice-architectures/[https://trustedcomputinggroup.org/work-groups/dice-architectures/] 18 | . https://ieeexplore.ieee.org/iel7/8168766/8203442/08203496.pdf[https://ieeexplore.ieee.org/iel7/8168766/8203442/08203496.pdf] 19 | . https://dl.acm.org/doi/10.1145/168619.168635[https://dl.acm.org/doi/10.1145/168619.168635] 20 | . https://dl.acm.org/doi/abs/10.1145/3342195.3387532[https://dl.acm.org/doi/abs/10.1145/3342195.3387532] 21 | . https://github.com/riscv/riscv-debug-spec/blob/master/riscv-debug-stable.pdf[https://github.com/riscv/riscv-debug-spec/blob/master/riscv-debug-stable.pdf] 22 | . https://csrc.nist.gov/csrc/media/events/non-invasive-attack-testing-workshop/documents/08_goodwill.pdf[https://csrc.nist.gov/csrc/media/events/non-invasive-attack-testing-workshop/documents/08_goodwill.pdf] 23 | . https://www.iso.org/standard/60612.html[https://www.iso.org/standard/60612.html] 24 | . https://ieeexplore.ieee.org/document/6176671[https://ieeexplore.ieee.org/document/6176671] 25 | . https://tches.iacr.org/index.php/TCHES/article/view/8988[https://tches.iacr.org/index.php/TCHES/article/view/8988] 26 | . https://ieeexplore.ieee.org/abstract/document/1401864[https://ieeexplore.ieee.org/abstract/document/1401864] 27 | . https://www.electronicspecifier.com/products/design-automation/increasingly-connected-world-needs-greater-security[https://www.electronicspecifier.com/products/design-automation/increasingly-connected-world-needs-greater-security] 28 | . https://www.samsungknox.com/es-419/blog/knox-e-fota-and-sequential-updates[https://www.samsungknox.com/es-419/blog/knox-e-fota-and-sequential-updates] 29 | . https://docs.microsoft.com/en-us/windows/security/threat-protection/intelligence/supply-chain-malware[https://docs.microsoft.com/en-us/windows/security/threat-protection/intelligence/supply-chain-malware] 30 | . https://dl.acm.org/doi/10.1145/3466752.3480068[https://dl.acm.org/doi/10.1145/3466752.3480068] 31 | . https://arxiv.org/abs/2111.01421[https://arxiv.org/abs/2111.01421] 32 | . https://www.nap.edu/catalog/24676/foundational-cybersecurity-research-improving-science-engineering-and-institutions[https://www.nap.edu/catalog/24676/foundational-cybersecurity-research-improving-science-engineering-and-institutions] 33 | . https://trustedcomputinggroup.org/work-groups/dice-architectures/[https://trustedcomputinggroup.org/work-groups/dice-architectures/] 34 | --------------------------------------------------------------------------------