├── .github ├── dependabot.yml └── workflows │ └── ci.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── NOTICE ├── README.md ├── eif_build ├── Cargo.toml ├── LICENSE ├── README.md └── src │ └── main.rs └── src ├── defs ├── eif_hasher.rs └── mod.rs ├── lib.rs └── utils ├── eif_reader.rs ├── eif_signer.rs ├── identity.rs └── mod.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | rust: [1.71.1, stable, nightly] 18 | steps: 19 | - uses: actions/checkout@v3 20 | - run: rustup install ${{ matrix.rust }} 21 | - run: cargo +${{ matrix.rust }} build --workspace --verbose 22 | - run: | 23 | cargo +${{ matrix.rust }} test --workspace --verbose \ 24 | -- --skip utils::tests::kms # Requires AWS creds 25 | clippy: 26 | runs-on: ubuntu-latest 27 | steps: 28 | - uses: actions/checkout@v3 29 | - run: rustup component add clippy 30 | - run: cargo clippy --workspace 31 | rustfmt: 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v3 35 | - run: rustup component add rustfmt 36 | - run: cargo fmt --all --check 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | */.* 2 | target 3 | **/*.rs.bk 4 | .idea 5 | */debug 6 | */release 7 | Cargo.lock 8 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "aws-nitro-enclaves-image-format" 3 | version = "0.5.0" 4 | authors = ["The AWS Nitro Enclaves Team "] 5 | edition = "2018" 6 | license = "Apache-2.0" 7 | description = "This library provides the definition of the enclave image format (EIF) file used in AWS Nitro Enclaves." 8 | repository = "https://github.com/aws/aws-nitro-enclaves-image-format" 9 | readme = "README.md" 10 | keywords = ["Nitro", "Enclaves", "AWS"] 11 | rust-version = "1.71" 12 | 13 | [dependencies] 14 | sha2 = "0.9.5" 15 | serde = { version = ">=1.0", features = ["derive"] } 16 | serde_json = "1.0" 17 | num-traits = "0.2" 18 | num-derive = "0.4" 19 | byteorder = "1.3" 20 | clap = "3.2" 21 | hex = "0.4" 22 | crc = "3.0" 23 | aws-nitro-enclaves-cose = { version = "0.5", features = ["key_kms"] } 24 | openssl = "0.10" 25 | serde_cbor = "0.11" 26 | chrono = { version = "0.4", default-features = false, features = ["clock"]} 27 | # These dependencies with versions less than specified are coming from 28 | # `aws-nitro-enclaves-cose` crate. 29 | aws-sdk-kms = "<=1.20" 30 | aws-config = "<=1.1" 31 | aws-types = "<=1.1" 32 | aws-smithy-runtime = { version = "<=1.2" } 33 | # Needed by `aws-nitro-enclaves-cose` to perform calls to KMS. 34 | tokio = { version = "1.20", features = ["rt-multi-thread"] } 35 | regex = "1.0" 36 | 37 | [dev-dependencies] 38 | tempfile = "3.5" 39 | 40 | [workspace] 41 | members = [ 42 | ".", # aws-nitro-enclaves-image-format 43 | "eif_build", # The eif_build CLI tool 44 | ] 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## aws-nitro-enclaves-image-format 2 | 3 | [![status]][actions] [![version]][crates.io] [![docs]][docs.rs] ![msrv] 4 | 5 | [status]: https://img.shields.io/github/actions/workflow/status/aws/aws-nitro-enclaves-image-format/ci.yml?branch=main 6 | [actions]: https://github.com/aws/aws-nitro-enclaves-image-format/actions?query=branch%3Amain 7 | [version]: https://img.shields.io/crates/v/aws-nitro-enclaves-image-format.svg 8 | [crates.io]: https://crates.io/crates/aws-nitro-enclaves-image-format 9 | [docs]: https://img.shields.io/docsrs/aws-nitro-enclaves-image-format 10 | [docs.rs]: https://docs.rs/aws-nitro-enclaves-image-format 11 | [msrv]: https://img.shields.io/badge/MSRV-1.71.1-blue 12 | 13 | This library provides the definition of the enclave image format (EIF) file. 14 | 15 | ## Security 16 | 17 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 18 | 19 | ## License 20 | 21 | This project is licensed under the Apache-2.0 License. 22 | 23 | ## Enclave Image File (EIF) Specification 24 | 25 | Date: 2024-06-21 26 | 27 | ### Background 28 | 29 | AWS Nitro Enclaves ([Official documentation](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html)) is an Amazon EC2 feature that allows you to create isolated compute environments, called enclaves, from Amazon EC2 instances. 30 | Enclaves are separate, hardened, and highly-constrained virtual machines. They provide only secure local socket connectivity with their parent instance. 31 | They have no persistent storage, interactive access, or external networking. 32 | 33 | To run your application in an enclave, your application needs to be packaged into an Enclave Image File (EIF). 34 | An EIF is self contained - everything your application needs to run within an enclave is part of the file (e.g. operating system, your application, root file system). 35 | 36 | ### The File Format 37 | 38 | #### High Level Structure 39 | 40 | On a high level an Enclave Image File consists of a general header and multiple data sections, each with their local header: 41 | 42 | ``` 43 | +-------------------------+ 44 | | EifHeader | 45 | +-------------------------+ 46 | | EifSectionHeader 0 | 47 | +-------------------------+ 48 | | Data Section 0 | 49 | +-------------------------+ 50 | | EifSectionHeader 1 | 51 | +-------------------------+ 52 | | Data Section 1 | 53 | +-------------------------+ 54 | > ... < 55 | +-------------------------+ 56 | | EifSectionHeader n | 57 | +-------------------------+ 58 | | Data Section n | 59 | +-------------------------+ 60 | ``` 61 | 62 | The Enclave Image File format supports a variety of data section types. The data section types can be mandatory or optional. 63 | Each section contains a specific type of data needed to run your application within a Nitro Enclave, the specifics of which are specified below in [Data sections](#data-sections). 64 | 65 | #### `EifHeader` 66 | 67 | The `EifHeader` is a general description of an enclave image file and provides metadata on the file as a whole. 68 | It has a fixed size of 548 bytes and the byte-order for all multi-byte fields is big-endian. The `EifHeader` is structured as follows: 69 | 70 | ``` 71 | 0x0000 +--------+--------+--------+--------+ 72 | | magic | 73 | 0x0004 +--------+--------+--------+--------+ 74 | | version | flags | 75 | 0x0008 +--------+--------+--------+--------+ 76 | | | 77 | + default_mem + 78 | | | 79 | 0x0010 +--------+--------+--------+--------+ 80 | | | 81 | + default_cpus + 82 | | | 83 | 0x0018 +--------+--------+--------+--------+ 84 | | reserved | num_sections | 85 | 0x001c +--------+--------+--------+--------+ 86 | | | 87 | + section_offset 0 + 88 | | | 89 | +--------+--------+--------+--------+ 90 | > ... < 91 | +--------+--------+--------+--------+ 92 | | | 93 | + section_offset 31 + 94 | | | 95 | 0x011c +--------+--------+--------+--------+ 96 | | | 97 | + section_size 0 + 98 | | | 99 | +--------+--------+--------+--------+ 100 | > ... < 101 | +--------+--------+--------+--------+ 102 | | | 103 | + section_size 31 + 104 | | | 105 | 0x021c +--------+--------+--------+--------+ 106 | + reserved | 107 | 0x0220 +--------+--------+--------+--------+ 108 | | crc_32 | 109 | 0x0224 +--------+--------+--------+--------+ 110 | ``` 111 | 112 | All reserved fields are ignored by the virtualization stack. 113 | 114 | ##### `magic` 115 | 116 | The `magic` field is a constant value chosen to easily identify enclave image files. 117 | The value equates to the ASCII string `.eif` or `[0x2e, 0x65, 0x69, 0x66]` as byte array. 118 | 119 | ##### `version` 120 | 121 | The `version` field encodes the specification version of the enclave image file. 122 | It is necessary to determine the file format version and chose the correct handling according to it. 123 | 124 | The latest version of the EIF format is `4`. 125 | The version gets incremented whenever a backwards incompatible change or addition to the file format is introduced. 126 | 127 | ###### EIF format version history: 128 | 129 | * Version `0`: internal development version 130 | * Version `1`: internal development version 131 | * Version `2`: initial publicly released version as published in [aws-nitro-enclaves-cli v0.1.0](https://github.com/aws/aws-nitro-enclaves-cli/releases/tag/v0.1.0) (initial public pre-release) on 2020-08-13. 132 | This initial version set the basic file format structure and supported the base section types `EifSectionKernel`, `EifSectionCmdline`, and `EifSectionRamdisk`. 133 | * Version `3`: published in [aws-nitro-enclaves-cli v1.0.10](https://github.com/aws/aws-nitro-enclaves-cli/releases/tag/v1.0.10) (initial public production release) on 2021-04-29. 134 | This version added optional support for image signing through section type `EifSectionSignature`. 135 | * Version `4`: published in [aws-nitro-enclaves-cli v1.2.0](https://github.com/aws/aws-nitro-enclaves-cli/releases/tag/v1.2.0) on 2022-03-08. 136 | This version added a new mandatory section type `EifSectionMetadata` containing metadata about the environment the EIF was built in. 137 | 138 | *Versions `0` and `1` have not been published as part of any tooling release. 139 | They are not supported anymore and will fail at the crc check stage of loading the enclave image file.* 140 | 141 | *Versions `2` and `3` both behave the same way. 142 | As the `EifSectionSignature` section type has not been defined in version `2` and is optional in version `3` they are effectively handled the same.* 143 | 144 | *Version `4` introduced the `EifSectionMetadata` as a mandatory section and checks that it is part of an enclave image file. 145 | Apart from that it is handled the same way as Version `3`.* 146 | 147 | *Versions `>4` are reserved for the future and yield undefined behavior.* 148 | 149 | ##### `flags` 150 | 151 | The `flags` bit-field encodes properties for the file and the environment the file is targeted for. 152 | The structure of the flags field is as follows: 153 | 154 | ``` 155 | f e d c b a 9 8 7 6 5 4 3 2 1 0 156 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 157 | | |a| 158 | | |r| 159 | | reserved |c| 160 | | |h| 161 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 162 | ``` 163 | * `arch`: determines the CPU architecture of the enclave this image file is for: `0` for `x86_64`, `1` for `aarch64`. 164 | This flag is mandatory and an EIF with an architecture other than the architecture of the enclave will be rejected. 165 | 166 | ##### `default_mem` 167 | 168 | The `default_mem` field describes the default amount of main memory in bytes for the enclave this image is going to run on. 169 | Currently this field is unused by the virtualization stack and [aws-nitro-enclaves-cli](https://github.com/aws/aws-nitro-enclaves-cli). 170 | 171 | ##### `default_cpus` 172 | 173 | The `default_cpus` field describes the default number of vCPUs for the enclave this image is going to run on. 174 | Currently this field is unused by the virtualization stack and [aws-nitro-enclaves-cli](https://github.com/aws/aws-nitro-enclaves-cli). 175 | 176 | ##### `num_sections` 177 | 178 | The `num_sections` field describes how many data sections an enclave image file contains. 179 | The value range for this is between 2 and 32 (`MAX_NUM_SECTIONS`). 180 | 181 | ##### `section_offsets` 182 | 183 | The `section_offsets` field is an array of 32 (`MAX_NUM_SECTIONS`) 8-byte values. 184 | The first `num_secions` entries in this array each describe the position of one data section within the file in bytes from the file start. 185 | All used entries have to be ordered in the same order as the corresponding data sections in the file, 186 | meaning `section_offsets[0]` describes the file offset for the first data section in the file, 187 | `section_offset[1]` describes the file offset for the second data section in the file and so on. 188 | 189 | ##### `section_sizes` 190 | 191 | The `section_sizes` field is an array of 32 (`MAX_NUM_SECTIONS`) 8-byte values. 192 | The first `num_sections` entries in this array each describe the size of one data section within the file in bytes. 193 | All used entries have to be ordered in the same order as the corresponding data sections in the file, 194 | meaning `section_sizes[0]` describes the size of the first data section in the file, 195 | `section_sizes[1]` describes the size of the second data section in the file and so on. 196 | The `section sizes` as set in this array only cover the size of the data in a section and do not include the size of section headers. 197 | 198 | ##### `eif_crc32` 199 | 200 | The `eif_crc32` field contains the crc32 checksum over the whole file except this `eif_crc32` field itself; 201 | this checksum includes `EifHeader` and all sections, including their respective section headers, in the order they appear in the file. 202 | 203 | #### Data sections 204 | 205 | An enclave image file contains multiple data sections of different types, each with a distinct purpose. 206 | The high level format is common between all section types, consisting of an `EifSectionHeader` and binary data. 207 | Sections cannot overlap each other and must not overflow out of 64-bit address space. 208 | 209 | The ordering of the different section types within one enclave image file is mostly unconstrained. 210 | The only constraint on ordering the sections is that all `EifSectionRamdisk` sections must be located after the `EifSectionKernel` section. 211 | 212 | ##### `EifSectionHeader` 213 | 214 | The section header is a basic description of a section: 215 | 216 | ``` 217 | 0x0000 +--------+--------+--------+--------+ 218 | | section_type | flags | 219 | 0x0004 +--------+--------+--------+--------+ 220 | | | 221 | + section_size + 222 | | | 223 | 0x000c +--------+--------+--------+--------+ 224 | ``` 225 | 226 | ###### `section_type` 227 | 228 | The `section_type` field describes the kind of section. 229 | The following is a list of valid section types and their numeric value. 230 | A detailed description of each section type follows below. 231 | 232 | * `EifSectionInvalid (0x00)` 233 | * `EifSectionKernel (0x01)` 234 | * `EifSectionCmdline (0x02)` 235 | * `EifSectionRamdisk (0x03)` 236 | * `EifSectionSignature (0x04)` (introduced in version 3 of the EIF format) 237 | * `EifSectionMetadata (0x05)` (introduced in version 4 of the EIF format) 238 | 239 | Enclave image files containing an `EifSectionInvalid` section or sections with a type outside of the above range (`>= 6`) will be rejected by the virtualization stack. 240 | 241 | ###### `flags` 242 | 243 | The `flags` bit-field can be used to encode properties of the binary data in a section. 244 | It is currently not used by any section type and is reserved for future use. 245 | 246 | ###### `section_size` 247 | 248 | The `section_size` field describes the size in bytes of the sections data. 249 | It must match the corresponding section_sizes entry in the global EifHeader structure. 250 | 251 | ##### `EifSectionKernel` 252 | 253 | The `EifSectionKernel` section data contains a Linux kernel image to be run within the enclave. 254 | The file format for that depends on the CPU architecture of your instance and its enclave. 255 | For `x86_64` instances the kernel section data has to be a `bzImage` (Refer to the [`x86_64` boot protocol for details](https://www.kernel.org/doc/Documentation/x86/boot.txt)). 256 | For `aarch64` instances the kernel section data has to be an uncompressed kernel `Image` file (Refer to the [`arm64` boot protocol for details](https://www.kernel.org/doc/Documentation/arm64/booting.txt)). 257 | 258 | The [aws-nitro-enclaves-cli](https://github.com/aws/aws-nitro-enclaves-cli) provides pre-built kernel images for both architectures: 259 | * `x86_64`: [`blobs/x86_64/bzImage`](https://github.com/aws/aws-nitro-enclaves-cli/blob/main/blobs/x86_64/bzImage) 260 | * `aarch64`: [`blobs/aarch64/Image`](https://github.com/aws/aws-nitro-enclaves-cli/blob/main/blobs/aarch64/Image) 261 | 262 | `EifSectionKernel` section is a mandatory section and every enclave image file must contain exactly one `EifSectionKernel` section. 263 | 264 | ##### `EifSectionCmdline` 265 | 266 | The `EifSectionCmdline` section data contains a string with Linux kernel cmdline parameters for the enclave kernel. 267 | The kernel cmdline can be used to configure certain aspects of the kernel at boot time ([Documentation of kernel-parameters](https://www.kernel.org/doc/Documentation/admin-guide/kernel-parameters.txt)). 268 | 269 | The [aws-nitro-enclaves-cli](https://github.com/aws/aws-nitro-enclaves-cli) provides kernel cmdlines for both architectures (compatible with the pre-built kernel images in the same location): 270 | * `x86_64`: [`blobs/x86_64/cmdline`](https://github.com/aws/aws-nitro-enclaves-cli/blob/main/blobs/x86_64/cmdline) 271 | * `aarch64`: [`blobs/aarch64/cmdline`](https://github.com/aws/aws-nitro-enclaves-cli/blob/main/blobs/aarch64/cmdline) 272 | 273 | `EifSectionCmdline` section is a mandatory section and every enclave image file must contain exactly one `EifSectionCmdline` section. 274 | 275 | ##### `EifSectionRamdisk` 276 | 277 | The `EifSectionRamdisk` section contains data that is going to be part of the root file system of the enclave in `cpio` or `cpio.gz` (compressed) format. 278 | All data of `EifSectionRamdisk` sections are concatenated to act together as one `initramfs` (See [Background on ramdisk composition and loading](#background-on-ramdisk-composition-and-loading) below). 279 | 280 | All `EifSectionRamdisk` sections must be positioned after the `EifSectionKernel` section within an enclave image file. 281 | 282 | ###### Example: ramdisks created with [aws-nitro-enclaves-cli](https://github.com/aws/aws-nitro-enclaves-cli) 283 | 284 | When creating an enclave image file through the [aws-nitro-enclaves-cli](https://github.com/aws/aws-nitro-enclaves-cli), two `EifSectionRamdisk` sections are created. 285 | The first ramdisk is the same for all applications and contains two main parts: 286 | 287 | * **An init executable:** The init process is the first user-space process started by the kernel. 288 | The task of the init process is to bring up the systems user-space and start relevant services. 289 | For Nitro Enclaves, the init processes tasks are reduced to the bare minimum of mounting special filesystems and files (i.e. procfs, sysfs, /dev), initializing the console device, loading the driver to interact with the Nitro Security Module, and launching the user application. 290 | The code for the minimal init process can be found in [aws-nitro-enclaves-sdk-bootstrap](https://github.com/aws/aws-nitro-enclaves-sdk-bootstrap/blob/main/init/init.c), 291 | and the [aws-nitro-enclaves-cli](https://github.com/aws/aws-nitro-enclaves-cli) provides pre-compiled executables of it for both architectures: 292 | * `x86_64`: [`blobs/x86_64/init`](https://github.com/aws/aws-nitro-enclaves-cli/blob/main/blobs/x86_64/init) 293 | * `aarch64`: [`blobs/aarch64/init`](https://github.com/aws/aws-nitro-enclaves-cli/blob/main/blobs/aarch64/init) 294 | * **The `nsm.ko` driver:** This is a loadable driver module for the Linux kernel which facilitates access to the Nitro Secure Module (NSM). 295 | The driver exposes a special device in the enclave to communicate with the hypervisor to retrieve an attestation document, which can be used to prove the identity of the enclave. 296 | The source code for the NSM driver can be found in [aws-nitro-enclaves-sdk-bootstrap](https://github.com/aws/aws-nitro-enclaves-sdk-bootstrap/tree/main/nsm-driver), 297 | starting with Linux kernel series v6.8 the driver is part of the upstream Linux kernel. 298 | The [aws-nitro-enclaves-cli](https://github.com/aws/aws-nitro-enclaves-cli) provides pre-compiled versions of this driver for both architectures (compatible with the pre-built kernel images in the same location): 299 | * `x86_64`: [`blobs/x86_64/nsm.ko`](https://github.com/aws/aws-nitro-enclaves-cli/blob/main/blobs/x86_64/nsm.ko) 300 | * `aarch64`: [`blobs/aarch64/nsm.ko`](https://github.com/aws/aws-nitro-enclaves-cli/blob/main/blobs/aarch64/nsm.ko) 301 | 302 | The second ramdisk contains the application specific data and has three major parts: 303 | 304 | * **The root file system:** This is a filesystem providing all the software and runtime environment needed by the application as shipped in the applications docker image. 305 | * **`cmd` file:** The `cmd` file contains the default entry point of the application as specified in the Dockerfile through `CMD` (or `ENTRYPOINT` if `CMD` is not specified). 306 | * **`env` file:** The `env` file contains the environment variables of the application as specified in the Dockerfile through `ENV`. 307 | 308 | ###### Background on ramdisk composition and loading 309 | 310 | The Linux kernel supports various models of booting a system and bringing up user-space. 311 | One mechanism is through the Linux kernel’s `initramfs` format (See [Linux kernel documentation `driver-api/early-userspace/buffer-format.rst`](https://www.kernel.org/doc/Documentation/driver-api/early-userspace/buffer-format.rst)). 312 | An `initramfs` consists of a collection of `cpio` files, either uncompressed (`.cpio`) or compressed (`.cpio.gz`). 313 | The Linux kernel contains a minimal interpreter for these files to load `initramfs` and construct the root file system from them. 314 | The `initramfs` usually contains a basic user-space to bootstrap a system and bring up additional devices like hard disks to switch to the final file system from disk. 315 | In the case of Nitro Enclaves, there is no support for persistent storage like hard disks, so the whole system is booted from and contained in the initramfs. 316 | 317 | For Nitro Enclaves, no bootloader is employed to load the kernel and ramdisks into memory. 318 | That part is performed by the hypervisor, which loads the kernel and ramdisk data into the enclaves memory before starting the enclave. 319 | The resulting enclave memory on startup is populated as follows from the EIF (For an example EIF with three ramdisk sections): 320 | 321 | ``` 322 | Enclave Memory Layout 323 | +-------------------------+ +--------------------+ 324 | | EifHeader | | zeroes | 0x0 325 | +-------------------------+ > ... < 326 | | EifSectionHeader 0 | | | 327 | +-------------------------+>--------------------------->+--------------------+ 328 | | Kernel Image | | Kernel Image | 329 | +-------------------------+ +------------------------>+--------------------+ --+ 330 | | EifSectionHeader 1 | | | Ramdisk (init) | | 331 | +-------------------------+ | +-------------------->+--------------------+ | 332 | | Kernel Cmdline | | | | Ramdisk (user0) | > initramfs 333 | +-------------------------+ | | +---------------->+--------------------+ | 334 | | EifSectionHeader 2 | | | | | Ramdisk (user1) | | 335 | +-------------------------+>--+ | | +--------------------+ -- 336 | | Ramdisk (init) | | | | | 337 | +-------------------------+ | | | zeroes | 338 | | EifSectionHeader 3 | | | > ... < 339 | +-------------------------+>------+ | | | 0xffffffffffffffff 340 | | Ramdisk (user0) | | +--------------------+ 341 | +-------------------------+ | 342 | | EifSectionHeader 4 | | 343 | +-------------------------+>----------+ 344 | | Ramdisk (user1) | 345 | +-------------------------+ 346 | | EifSectionHeader 5 | 347 | +-------------------------+ 348 | | Signature Data | 349 | +-------------------------+ 350 | | EifSectionHeader 6 | 351 | +-------------------------+ 352 | | Metadata | 353 | +-------------------------+ 354 | ``` 355 | 356 | ##### `EifSectionSignature` 357 | 358 | The `EifSectionSignature` section was introduced as an optional section in file format version 3. 359 | The section data has a maximum size of 32768 bytes (`SIGNATURE_MAX_SIZE`). 360 | 361 | The data format for the `EifSectionSignature` section is Concise Binary Object Representation (CBOR) as introduced in [RFC8949](https://datatracker.ietf.org/doc/html/rfc8949). 362 | The CBOR data contains an array of two-tuples, each containing a serialized signing certificate and a serialized CBOR Object Signing and Encryption (COSE) Sign1 object as described in [RFC8152](https://datatracker.ietf.org/doc/html/rfc8152). 363 | Although the `EifSectionSignature` section allows for multiple such tuples, only the first of these objects is currently verified against PCR0. 364 | This means the only relevant data to sign and add to the `EifSectionSignature` section is the tuple `(0, PCR0)`. 365 | 366 | The overall structure of the CBOR data in `EifSectionSignature` can be described as follows, where `>>>>` and `<<<<` describe entry and exit boundaries of nested serialized CBOR data: 367 | 368 | ``` 369 | Array(1) { 370 | Map(2) { 371 | [0] { 372 | Text(19) // key = "signing_certificate" 373 | Array(len(cbor_serialize(cert))) // value = CBOR serialized certificate 374 | }, 375 | [1] { 376 | Text(9) // key = "signature" 377 | Array(len(cbor_serialize(cose_sign1)) // value = CBOR serialized COSE_Sign1 object 378 | >>>> 379 | Array(4) { 380 | [0] ByteString(len(cbor_serialize(protected))), // CBOR serialized COSE protected header 381 | >>>> 382 | Map(1) { 383 | unsigned(1) // key = 1 (alg) 384 | negative() // value = Signing Algorithm (-7 for ES256, -35 for ES384, -36 for ES512) 385 | } 386 | <<<< 387 | [1] Map(0), // CBOR serialized COSE unprotected header (empty) 388 | [2] BytesString(len(cbor_serialize(payload))), // CBOR serialized COSE_Sign1 payload 389 | >>>> 390 | Map(2) { 391 | [0] { 392 | Text(14) // key = "register_index" 393 | Unsigned() // value = (Index of which PCR got signed) 394 | }, 395 | [1] { 396 | Text(14) // key = "register_value" 397 | Array(48) // value = PCR value bytes 398 | }, 399 | } 400 | <<<< 401 | [3] BytesString(len()) // Signature bytes 402 | } 403 | <<<< 404 | } 405 | } 406 | } 407 | ``` 408 | 409 | ###### Background on COSE Sign1 and usage in EIF 410 | 411 | COSE Sign1 provides a signature structure to have a message signed by a single signer: 412 | 413 | ``` 414 | +-------------------------+-----------------+-------------------+ 415 | | COSE Headers | Payload | Signature | 416 | +- - - - - - - - - - - - -+- - - - - - - - -+- - - - - - - - - -+ 417 | | protected | unprotected | plaintext | signature bytes | 418 | +-------------------------+-----------------+-------------------+ 419 | ``` 420 | 421 | The COSE Headers are divided into two buckets, the protected bucket contains metadata for the signature layer that is part of the data being signed (covered/protected by the signature), 422 | while the unprotected bucket contains metadata that does not contribute towards the signature. 423 | Each bucket is a map of key-value pairs. The payload is the plaintext data that is being signed. 424 | The signature contains the signature bytes. 425 | 426 | For the usage in EIF the data contained in the different parts of the COSE Sign1 object is as follows: 427 | 428 | * The protected bucket of COSE Headers only contains one key-value pair identifying the used signature algorithm. 429 | * The unprotected bucket of COSE Headers is empty. 430 | * The payload contains a tuple describing one platform configuration register (PCR) for the EIF file, specifically a two-tuple containing the PCRs index and it’s value. 431 | (More details on PCRs can be found below in section [EIF Measurements](#eif-measurements)). 432 | * The Signature contains an Elliptic Curve Digital Signature Algorithm (ECDSA) signature of the protected headers and payload with one of the following ECDSA variants: ES256, ES384, and ES512. 433 | 434 | ##### `EifSectionMetadata` 435 | 436 | The `EifSectionMetadata` section was introduced as a mandatory section in file format version 4. 437 | The section data contains JSON describing the build environment that produced the enclave image file according to the following JSON schema: 438 | 439 | ``` 440 | { 441 | "$schema": "https://json-schema.org/draft/2020-12/schema", 442 | "$id": "https://github.com/aws/aws-nitro-enclaves-image-format", 443 | "title": "EIF Metadata Content", 444 | "description": "Format Content of EIFSection of type EifSectionMetadata", 445 | "type": "object", 446 | "properties": { 447 | "ImageName": { 448 | "type": "string", 449 | "description": "Name of the EIF image" 450 | }, 451 | "ImageVersion": { 452 | "type": "string", 453 | "description": "EIF version for this image file" 454 | }, 455 | "BuildMetadata": { 456 | "type": "object", 457 | "description": "Metadata on the build environment", 458 | "properties": { 459 | "BuildTime": { 460 | "type": "string", 461 | "description": "Time the image was build at" 462 | }, 463 | "BuildTool": { 464 | "type": "string", 465 | "description": "Name of the tool that produced the image" 466 | }, 467 | "BuildToolVersion": { 468 | "type": "string", 469 | "description": "Version of the tool that produced the image" 470 | }, 471 | "OperatingSystem": { 472 | "type": "string", 473 | "description": "Name of the OS the image was build on" 474 | }, 475 | "KernelVersion": { 476 | "type": "string", 477 | "description": "Kernel version of the build host" 478 | } 479 | }, 480 | "required": [ "BuildTime", "BuildTool", "BuildToolVersion", "OperatingSystem", "KernelVersion" ] 481 | }, 482 | "DockerInfo": { 483 | "type": "object", 484 | "description": "Metadata on the docker image this EIF was based on, as produced by `docker image inspect`" 485 | }, 486 | "CustomMetadata": { 487 | "type": "object", 488 | "description": "Optional custom metadata to annotate the EIF with" 489 | } 490 | }, 491 | "required": [ "ImageName", "ImageVersion", "BuildMetadata", "DockerInfo" ] 492 | } 493 | ``` 494 | 495 | The `EifSectionMetadata` section is not part of any measurement for an enclave and does not get validated by the hypervisor, beyond checking for its existence with file format version 4 and above. 496 | 497 | ### EIF Measurements 498 | 499 | Nitro Enclaves includes attestation mechanisms to prove its identity and build trust with external services. 500 | As part of these measurements the enclave exposes a set of platform configuration registers (PCRs), each providing a set of hashes over some identifying data for the enclaves configuration and code. 501 | For the EIF files, there are four PCRs that describe it. They are `PCR0`, `PCR1`, `PCR2`, and `PCR8`: 502 | 503 | ``` 504 | PCR0 PCR1 PCR2 PCR8* 505 | | +-------------------------+ | | | 506 | | | EifHeader | | | | 507 | | +-------------------------+ | | | 508 | | | EifSectionHeader 0 | | | | 509 | | +-------------------------+ | | | 510 | +----<| Kernel Image |>----+ | | 511 | | +-------------------------+ | | | 512 | | | EifSectionHeader 1 | | | | 513 | | +-------------------------+ | | | 514 | +----<| Kernel Cmdline |>----+ | | 515 | | +-------------------------+ | | | 516 | | | EifSectionHeader 2 | | | | 517 | | +-------------------------+ | | | 518 | +----<| Ramdisk (init) |>----+ | | 519 | | +-------------------------+ | | 520 | | | EifSectionHeader 3 | | | 521 | | +-------------------------+ | | 522 | +----<| Ramdisk (user0) |>----------+ | 523 | | +-------------------------+ | | 524 | | | EifSectionHeader 4 | | | 525 | | +-------------------------+ | | 526 | +----<| Ramdisk (user1) |>----------+ | 527 | +-------------------------+ | 528 | | EifSectionHeader 5 | | 529 | +-------------------------+ | 530 | | Signature Data |>----------------+ 531 | +-------------------------+ 532 | | EifSectionHeader 6 | 533 | +-------------------------+ 534 | | Metadata | 535 | +-------------------------+ 536 | ``` 537 | 538 | All EIF specific PCRs are calculated in a multi-level scheme containing a fixed initial state and a digest over specific parts of the EIF. 539 | They are calculated as the sha384 message digest (described in [RFC6234](https://datatracker.ietf.org/doc/html/rfc6234)) over the concatenation of the `initial_digest` and the `content_digest`: 540 | 541 | ``` 542 | PCRX = sha384sum( initial_digest.content_digest ) 543 | ``` 544 | 545 | The `initial_digest` is the same for all PCRs and consists of 48 zero-bytes. 546 | The `content_digest` contains data on different parts of an EIF depending on the PCR. 547 | 548 | #### `PCR0` 549 | 550 | `PCR0` contains a measurement of all the data influencing the runtime of code in an EIF. 551 | It includes a sha384 message digest over the contiguous data of the `EifSectionKernel`, `EifSectionCmdline`, and all `EifSectionRamdisk` sections in the order they are present in the enclave image file. 552 | For `PCR0` this means `content_digest` is calculated as follows: 553 | 554 | ``` 555 | content_digest[PCR0] = sha384sum( data(EifSectionKernel).data(EifSectionCmdline).data(EifSectionRamdisk[..]) ) 556 | ``` 557 | 558 | *Note: The order of elements in that calculation depends on the order of sections in the EIF. 559 | Only the data for each section is part of the calculation, the headers are excluded.* 560 | 561 | #### `PCR1` 562 | 563 | `PCR1` contains a measurement of all the data influencing the bootstrap and kernel in an EIF. 564 | It includes a sha384 message digest over the contiguous data of the `EifSectionKernel`, `EifSectionCmdline`, and the first `EifSectionRamdisk` sections in the order they are present in the enclave image file. 565 | For `PCR1` this means `content_digest` is calculated as follows: 566 | 567 | ``` 568 | content_digest[PCR1] = sha384sum( data(EifSectionKernel).data(EifSectionCmdline).data(EifSectionRamdiks[0]) ) 569 | ``` 570 | 571 | *Note: The order of elements in that calculation depends on the order of sections in the EIF. 572 | Only the data for each section is part of the calculation, the headers are excluded.* 573 | 574 | #### `PCR2` 575 | 576 | `PCR2` contains a measurement of the user application in an EIF. 577 | It includes a sha384 message digest over the contiguous data of all `EifSectionRamdisk` sections excluding the first `EifSectionRamdisk` section in the order they are present in the enclave image file. 578 | For `PCR2` this means `content_digest` is calculated as follows: 579 | 580 | ``` 581 | content_digest[PCR2] = sha384sum( data(EifSectionRamdisk[1..]) ) 582 | ``` 583 | 584 | *Note: The order of elements in that calculation depends on the order of sections in the EIF. 585 | Only the data for each section is part of the calculation, the headers are excluded.* 586 | 587 | #### `PCR8` 588 | 589 | `PCR8` is populated only if an `EifSectionSignature` section is part of the enclave image file. 590 | In that case `PCR8` contains a measurement over the DER representation of the certificate used to sign `PCR0` and contained in `EifSectionSignature`. 591 | For `PCR8` this means `content_digest` is calculated as follows: 592 | 593 | ``` 594 | content_digest[PCR8] = sha384sum( signing_certificate_in_DER ) 595 | -------------------------------------------------------------------------------- /eif_build/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eif_build" 3 | version = "0.3.1" 4 | authors = ["The AWS Nitro Enclaves Team "] 5 | edition = "2018" 6 | license = "Apache-2.0" 7 | description = "This CLI tool provides a low level path to assemble an enclave image format (EIF) file used in AWS Nitro Enclaves." 8 | repository = "https://github.com/aws/aws-nitro-enclaves-image-format" 9 | readme = "README.md" 10 | keywords = ["Nitro", "Enclaves", "AWS", "EIF"] 11 | rust-version = "1.71" 12 | 13 | [dependencies] 14 | aws-nitro-enclaves-image-format = "0.5" 15 | sha2 = "0.9.5" 16 | serde = { version = ">=1.0", features = ["derive"] } 17 | serde_json = "1.0" 18 | clap = "3.2" 19 | chrono = { version = "0.4", default-features = false, features = ["clock"]} 20 | 21 | -------------------------------------------------------------------------------- /eif_build/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | -------------------------------------------------------------------------------- /eif_build/README.md: -------------------------------------------------------------------------------- 1 | ## eif\_build 2 | 3 | [![status]][actions] [![version]][crates.io] [![docs]][docs.rs] ![msrv] 4 | 5 | [status]: https://img.shields.io/github/actions/workflow/status/aws/aws-nitro-enclaves-image-format/ci.yml?branch=main 6 | [actions]: https://github.com/aws/aws-nitro-enclaves-image-format/actions?query=branch%3Amain 7 | [version]: https://img.shields.io/crates/v/eif_build.svg 8 | [crates.io]: https://crates.io/crates/eif_build 9 | [docs]: https://img.shields.io/docsrs/eif_build 10 | [docs.rs]: https://docs.rs/eif_build 11 | [msrv]: https://img.shields.io/badge/MSRV-1.71.1-blue 12 | 13 | This CLI tool provides a low level path to assemble an enclave image format (EIF) file used in AWS Nitro Enclaves. 14 | 15 | ## Security 16 | 17 | See [CONTRIBUTING](../CONTRIBUTING.md#security-issue-notifications) for more information. 18 | 19 | ## License 20 | 21 | This project is licensed under the Apache-2.0 License. 22 | 23 | ## Building 24 | 25 | To compile the `eif_build` tool, run 26 | 27 | ```sh 28 | $ cargo build --all --release 29 | ``` 30 | 31 | The resulting binary will be under `./target/release/eif_build`. 32 | 33 | ## Usage 34 | 35 | ```plain 36 | Enclave image format builder 37 | Builds an eif file 38 | 39 | USAGE: 40 | eif_build [OPTIONS] --kernel --cmdline --output --ramdisk 41 | 42 | OPTIONS: 43 | --arch <(x86_64|aarch64)> 44 | Sets image architecture [default: x86_64] 45 | 46 | --build-time 47 | Overrides image build time. [default: 2024-07-09T17:16:38.424202433+00:00] 48 | 49 | --build-tool 50 | Image build tool name. [default: eif_build] 51 | 52 | --build-tool-version 53 | Overrides image build tool version. [default: 0.2.0] 54 | 55 | --cmdline 56 | Sets the cmdline 57 | 58 | -h, --help 59 | Print help information 60 | 61 | --img-kernel 62 | Overrides image Operating System kernel version. [default: "Unknown version"] 63 | 64 | --img-os 65 | Overrides image Operating System name. [default: "Generic Linux"] 66 | 67 | --kernel 68 | Sets path to a bzImage/Image file for x86_64/aarch64 architecture 69 | 70 | --kernel_config 71 | Sets path to a bzImage.config/Image.config file for x86_64/aarch64 architecture 72 | 73 | --metadata 74 | Path to JSON containing the custom metadata provided by the user. 75 | 76 | --name 77 | Name for enclave image 78 | 79 | --output 80 | Specify output file path 81 | 82 | --private-key 83 | Specify KMS key ARN, or the path to the local private key file 84 | 85 | --ramdisk 86 | Sets path to a ramdisk file representing a cpio.gz archive 87 | 88 | --signing-certificate 89 | Specify the path to the signing certificate 90 | 91 | --version 92 | Version of the enclave image 93 | 94 | --algo <(sha256|sha384|sha512)> 95 | Sets algorithm to measure the image [default: sha384] 96 | ``` 97 | -------------------------------------------------------------------------------- /eif_build/src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | #![deny(warnings)] 5 | /// Simple utility tool for building an Eif file 6 | /// cargo run --example eif_build -- --help should be self explanatory. 7 | /// Example of usage: 8 | /// cargo run --example eif_build --target-dir=~/vmm-build -- --kernel bzImage \ 9 | /// --cmdline "reboot=k initrd=0x2000000,3228672 root=/dev/ram0 panic=1 pci=off nomodules \ 10 | /// console=ttyS0 i8042.noaux i8042.nomux i8042.nopnp i8042.dumbkbd" 11 | /// --ramdisk initramfs_x86.txt_part1.cpio.gz 12 | /// --ramdisk initramfs_x86.txt_part2.cpio.gz 13 | /// --output eif.bin 14 | /// 15 | use std::path::Path; 16 | 17 | use aws_nitro_enclaves_image_format::defs::{EifBuildInfo, EifIdentityInfo, EIF_HDR_ARCH_ARM64}; 18 | use aws_nitro_enclaves_image_format::utils::identity::parse_custom_metadata; 19 | use aws_nitro_enclaves_image_format::{ 20 | generate_build_info, 21 | utils::{get_pcrs, EifBuilder, SignKeyData}, 22 | }; 23 | use chrono::offset::Utc; 24 | use clap::{App, Arg, ValueSource}; 25 | use serde_json::json; 26 | use sha2::{Digest, Sha256, Sha384, Sha512}; 27 | use std::fmt::Debug; 28 | use std::fs::OpenOptions; 29 | use std::io::Write; 30 | use ValueSource::CommandLine; 31 | 32 | pub struct EifBuildParameters<'a> { 33 | pub kernel_path: &'a str, 34 | pub cmdline: &'a str, 35 | pub ramdisks: Vec<&'a str>, 36 | pub output_path: &'a str, 37 | pub sign_info: Option, 38 | pub eif_info: EifIdentityInfo, 39 | pub arch: &'a str, 40 | } 41 | 42 | fn main() { 43 | let now = Utc::now().to_rfc3339(); 44 | let build_tool = env!("CARGO_PKG_NAME").to_string(); 45 | let build_tool_version = env!("CARGO_PKG_VERSION").to_string(); 46 | let img_os = "OS".to_string(); 47 | let img_kernel = "kernel".to_string(); 48 | let matches = App::new("Enclave image format builder") 49 | .about("Builds an eif file") 50 | .arg( 51 | Arg::with_name("kernel") 52 | .long("kernel") 53 | .value_name("FILE") 54 | .required(true) 55 | .help("Sets path to a bzImage/Image file for x86_64/aarch64 architecture") 56 | .takes_value(true), 57 | ) 58 | .arg( 59 | Arg::with_name("kernel_config") 60 | .long("kernel_config") 61 | .value_name("FILE") 62 | .help("Sets path to a bzImage.config/Image.config file for x86_64/aarch64 architecture") 63 | .takes_value(true), 64 | ) 65 | .arg( 66 | Arg::with_name("cmdline") 67 | .long("cmdline") 68 | .help("Sets the cmdline") 69 | .value_name("String") 70 | .required(true) 71 | .takes_value(true), 72 | ) 73 | .arg( 74 | Arg::with_name("output") 75 | .long("output") 76 | .help("Specify output file path") 77 | .value_name("FILE") 78 | .required(true) 79 | .takes_value(true), 80 | ) 81 | .arg( 82 | Arg::with_name("ramdisk") 83 | .long("ramdisk") 84 | .value_name("FILE") 85 | .required(true) 86 | .help("Sets path to a ramdisk file representing a cpio.gz archive") 87 | .takes_value(true) 88 | .multiple(true) 89 | .number_of_values(1), 90 | ) 91 | .arg( 92 | Arg::with_name("signing-certificate") 93 | .long("signing-certificate") 94 | .help("Specify the path to the signing certificate") 95 | .takes_value(true) 96 | .requires("private-key"), 97 | ) 98 | .arg( 99 | Arg::with_name("private-key") 100 | .long("private-key") 101 | .help("Path to a local key or KMS key ARN") 102 | .takes_value(true) 103 | .requires("signing-certificate"), 104 | ) 105 | .arg( 106 | Arg::with_name("image_name") 107 | .long("name") 108 | .help("Name for enclave image") 109 | .takes_value(true), 110 | ) 111 | .arg( 112 | Arg::with_name("image_version") 113 | .long("version") 114 | .help("Version of the enclave image") 115 | .takes_value(true), 116 | ) 117 | .arg( 118 | Arg::with_name("metadata") 119 | .long("metadata") 120 | .help("Path to JSON containing the custom metadata provided by the user.") 121 | .takes_value(true), 122 | ) 123 | .arg( 124 | Arg::with_name("arch") 125 | .long("arch") 126 | .help("Sets image architecture") 127 | .default_value("x86_64") 128 | .value_parser(["x86_64", "aarch64"]) 129 | .takes_value(true), 130 | ) 131 | .arg( 132 | Arg::with_name("build_time") 133 | .long("build-time") 134 | .help("Overrides image build time.") 135 | .default_value(&now) 136 | .takes_value(true), 137 | ) 138 | .arg( 139 | Arg::with_name("build_tool") 140 | .long("build-tool") 141 | .help("Image build tool name.") 142 | .default_value(&build_tool) 143 | .takes_value(true), 144 | ) 145 | .arg( 146 | Arg::with_name("build_tool_version") 147 | .long("build-tool-version") 148 | .help("Overrides image build tool version.") 149 | .default_value(&build_tool_version) 150 | .takes_value(true), 151 | ) 152 | .arg( 153 | Arg::with_name("img_os") 154 | .long("img-os") 155 | .help("Overrides image Operating System name.") 156 | .default_value(&img_os) 157 | .takes_value(true), 158 | ) 159 | .arg( 160 | Arg::with_name("img_kernel") 161 | .long("img-kernel") 162 | .help("Overrides image Operating System kernel version.") 163 | .default_value(&img_kernel) 164 | .takes_value(true), 165 | ) 166 | .arg( 167 | Arg::with_name("algo") 168 | .long("algo") 169 | .help("Sets algorithm to be used for measuring the image") 170 | .possible_values(["sha256", "sha384", "sha512"]) 171 | .default_value("sha384") 172 | ) 173 | .get_matches(); 174 | 175 | let arch = matches.value_of("arch").expect("default value"); 176 | 177 | let kernel_path = matches 178 | .value_of("kernel") 179 | .expect("Kernel path is a mandatory option"); 180 | 181 | let cmdline = matches 182 | .value_of("cmdline") 183 | .expect("Cmdline is a mandatory option"); 184 | 185 | let ramdisks: Vec<&str> = matches 186 | .values_of("ramdisk") 187 | .expect("At least one ramdisk should be specified") 188 | .collect(); 189 | 190 | let output_path = matches 191 | .value_of("output") 192 | .expect("Output file should be provided"); 193 | 194 | let signing_certificate = matches.value_of("signing-certificate"); 195 | let private_key = matches.value_of("private-key"); 196 | 197 | let sign_info = match (private_key, signing_certificate) { 198 | (Some(key), Some(cert)) => SignKeyData::new(key, Path::new(&cert)).map_or_else( 199 | |e| { 200 | eprintln!("Could not read signing info: {:?}", e); 201 | None 202 | }, 203 | Some, 204 | ), 205 | _ => None, 206 | }; 207 | 208 | let img_name = matches.value_of("image_name").map(|val| val.to_string()); 209 | let img_version = matches.value_of("image_name").map(|val| val.to_string()); 210 | let metadata_path = matches.value_of("metadata").map(|val| val.to_string()); 211 | let metadata = match metadata_path { 212 | Some(ref path) => { 213 | parse_custom_metadata(path).expect("Can not parse specified metadata file") 214 | } 215 | None => json!(null), 216 | }; 217 | 218 | let mut build_info = EifBuildInfo { 219 | build_time: matches 220 | .get_one::("build_time") 221 | .expect("default value") 222 | .to_string(), 223 | build_tool: matches 224 | .get_one::("build_tool") 225 | .expect("default value") 226 | .to_string(), 227 | build_tool_version: matches 228 | .get_one::("build_tool_version") 229 | .expect("default value") 230 | .to_string(), 231 | img_os: matches 232 | .get_one::("img_os") 233 | .expect("default value") 234 | .to_string(), 235 | img_kernel: matches 236 | .get_one::("img_kernel") 237 | .expect("default value") 238 | .to_string(), 239 | }; 240 | 241 | if let Some(kernel_config) = matches.get_one::("kernel_config") { 242 | build_info = generate_build_info!(kernel_config).expect("Can not generate build info"); 243 | } 244 | 245 | if matches.value_source("build_time") == Some(CommandLine) { 246 | build_info.build_time = matches 247 | .get_one::("build_time") 248 | .expect("default value") 249 | .to_string(); 250 | } 251 | 252 | if matches.value_source("build_tool") == Some(CommandLine) { 253 | build_info.build_tool = matches 254 | .get_one::("build_tool") 255 | .expect("default_value") 256 | .to_string(); 257 | } 258 | 259 | if matches.value_source("build_tool_version") == Some(CommandLine) { 260 | build_info.build_tool_version = matches 261 | .get_one::("build_tool_version") 262 | .expect("default value") 263 | .to_string(); 264 | } 265 | 266 | if matches.value_source("img_os") == Some(CommandLine) { 267 | build_info.img_os = matches 268 | .get_one::("img_os") 269 | .expect("default value") 270 | .to_string(); 271 | } 272 | 273 | if matches.value_source("img_kernel") == Some(CommandLine) { 274 | build_info.img_kernel = matches 275 | .get_one::("img_kernel") 276 | .expect("default value") 277 | .to_string(); 278 | } 279 | 280 | let eif_info = EifIdentityInfo { 281 | img_name: img_name.unwrap_or_else(|| { 282 | // Set default value to kernel file name 283 | Path::new(kernel_path) 284 | .file_name() 285 | .expect("Valid kernel file path should be provided") 286 | .to_str() 287 | .unwrap() 288 | .to_string() 289 | }), 290 | img_version: img_version.unwrap_or_else(|| "1.0".to_string()), 291 | build_info, 292 | docker_info: json!(null), 293 | custom_info: metadata, 294 | }; 295 | 296 | let params = EifBuildParameters { 297 | kernel_path, 298 | cmdline, 299 | ramdisks, 300 | output_path, 301 | sign_info, 302 | eif_info, 303 | arch, 304 | }; 305 | 306 | let algo = matches 307 | .value_of("algo") 308 | .expect("Clap must specify default value"); 309 | match algo { 310 | "sha256" => build_eif(params, Sha256::new()), 311 | "sha512" => build_eif(params, Sha512::new()), 312 | "sha384" => build_eif(params, Sha384::new()), 313 | _ => unreachable!("Clap guarantees that we get only the specified values"), 314 | } 315 | } 316 | 317 | pub fn build_eif(params: EifBuildParameters, hasher: T) { 318 | let mut output_file = OpenOptions::new() 319 | .read(true) 320 | .create(true) 321 | .write(true) 322 | .truncate(true) 323 | .open(params.output_path) 324 | .expect("Could not create output file"); 325 | 326 | let flags = match params.arch { 327 | "aarch64" => EIF_HDR_ARCH_ARM64, 328 | "x86_64" => 0, 329 | _ => panic!("Invalid architecture: {}", params.arch), 330 | }; 331 | 332 | let mut build = EifBuilder::new( 333 | Path::new(params.kernel_path), 334 | params.cmdline.to_string(), 335 | params.sign_info, 336 | hasher.clone(), 337 | flags, // flags 338 | params.eif_info, 339 | ); 340 | for ramdisk in params.ramdisks { 341 | build.add_ramdisk(Path::new(ramdisk)); 342 | } 343 | 344 | build.write_to(&mut output_file); 345 | let signed = build.is_signed(); 346 | println!("Output file: {}", params.output_path); 347 | build.measure(); 348 | let measurements = get_pcrs( 349 | &mut build.image_hasher, 350 | &mut build.bootstrap_hasher, 351 | &mut build.customer_app_hasher, 352 | &mut build.certificate_hasher, 353 | hasher.clone(), 354 | signed, 355 | ) 356 | .expect("Failed to get boot measurements."); 357 | 358 | println!("{}", serde_json::to_string_pretty(&measurements).unwrap()); 359 | } 360 | -------------------------------------------------------------------------------- /src/defs/eif_hasher.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | #![deny(warnings)] 5 | 6 | use sha2::Digest; 7 | use std::fmt::Debug; 8 | use std::io::Result as IoResult; 9 | use std::io::Write; 10 | use std::vec::Vec; 11 | 12 | use serde::{Deserialize, Serialize}; 13 | 14 | /// EifHasher class 15 | /// 16 | /// A simple serialization/deserialization friendly Hasher class. 17 | /// The only reason this exists is that we can't serialize a Hasher 18 | /// from sha2 crate, so we are going to use the following algorithm: 19 | /// 20 | /// 1. Initialize digest with 0. 21 | /// 2. Gather block_size bytes in block field. 22 | /// 3. digest = Hash(Concatenate(digest, block)) 23 | /// 4. Goto step 2 24 | #[derive(Debug, Clone, Serialize, Deserialize)] 25 | pub struct EifHasher { 26 | /// The bytes that have not been hashed yet, they get hashed 27 | /// once we gather block_size bytes. 28 | pub block: Vec, 29 | /// Intermediary digest for the blocks processed untill now. 30 | pub digest: Vec, 31 | /// The number of bytes we need to gather before hashing, it needs to 32 | /// be at least twice of the hasher output, since the hash of each output 33 | /// is fed back into the hasher the size of the block impacts the performance 34 | /// 0 means nothing is cached and all the bytes are feed to the hasher. 35 | pub block_size: usize, 36 | pub output_size: usize, 37 | #[serde(skip)] 38 | /// The hasher to be used, it is always in reset state, so we can skip 39 | /// serialization. 40 | pub hasher: T, 41 | } 42 | 43 | fn initial_digest(len: usize) -> Vec { 44 | vec![0; len] 45 | } 46 | 47 | impl EifHasher { 48 | pub fn new(block_size: usize, mut hasher: T) -> Result { 49 | let output_size = hasher.finalize_reset().len(); 50 | if block_size > 0 && output_size * 2 > block_size { 51 | return Err("Invalid block_size".to_string()); 52 | } 53 | 54 | Ok(EifHasher { 55 | block: Vec::with_capacity(block_size), 56 | digest: initial_digest(output_size), 57 | block_size, 58 | output_size, 59 | hasher, 60 | }) 61 | } 62 | 63 | /// EifHasher constructor with fixed block size. 64 | /// 65 | /// It is needed in order for all clients of this class to use the same 66 | /// block size if we want to get the same results. 67 | pub fn new_with_fixed_block_size(hasher: T) -> Result { 68 | /// This impacts the performance of the hasher, it is a sweet 69 | /// spot where we get decent performance, 200MB/s, and where are not 70 | /// forced to keep a large serialized state, 256 bytes for SHA256. 71 | pub const FIXED_BLOCK_SIZE_HASHER_OUPUT_RATIO: usize = 8; 72 | Self::new( 73 | hasher.clone().finalize_reset().len() * FIXED_BLOCK_SIZE_HASHER_OUPUT_RATIO, 74 | hasher, 75 | ) 76 | } 77 | 78 | /// EifHasher constructor without cache. 79 | /// 80 | /// EIfHasher acts like passthrough passing all the bytes to the actual hasher. 81 | pub fn new_without_cache(hasher: T) -> Result { 82 | Self::new(0, hasher) 83 | } 84 | 85 | pub fn finalize_reset(&mut self) -> IoResult> { 86 | if self.block_size == 0 { 87 | return Ok(self.hasher.finalize_reset().to_vec()); 88 | } 89 | if !self.block.is_empty() { 90 | self.consume_block()?; 91 | } 92 | let result = self.digest.clone(); 93 | self.digest = initial_digest(self.output_size); 94 | Ok(result) 95 | } 96 | 97 | pub fn tpm_extend_finalize_reset(&mut self) -> IoResult> { 98 | let result = self.finalize_reset()?; 99 | let mut hasher = self.hasher.clone(); 100 | 101 | hasher.write_all(&initial_digest(self.output_size))?; 102 | hasher.write_all(&result[..])?; 103 | Ok(hasher.finalize_reset().to_vec()) 104 | } 105 | 106 | fn consume_block(&mut self) -> IoResult<()> { 107 | self.hasher.write_all(&self.digest[..])?; 108 | self.hasher.write_all(&self.block[..])?; 109 | self.block.clear(); 110 | let result = self.hasher.finalize_reset(); 111 | self.digest.copy_from_slice(&result[..]); 112 | Ok(()) 113 | } 114 | } 115 | 116 | impl Write for EifHasher { 117 | fn write(&mut self, buf: &[u8]) -> IoResult { 118 | if self.block_size == 0 { 119 | self.hasher.write_all(buf)?; 120 | return Ok(buf.len()); 121 | } 122 | let mut remaining = buf; 123 | while self.block.len() + remaining.len() >= self.block_size { 124 | let (for_hasher, for_next_iter) = 125 | remaining.split_at(self.block_size - self.block.len()); 126 | self.block.extend_from_slice(for_hasher); 127 | self.consume_block()?; 128 | remaining = for_next_iter; 129 | } 130 | 131 | self.block.extend_from_slice(remaining); 132 | Ok(buf.len()) 133 | } 134 | 135 | fn flush(&mut self) -> IoResult<()> { 136 | Ok(()) 137 | } 138 | } 139 | 140 | #[cfg(test)] 141 | mod tests { 142 | use super::{initial_digest, EifHasher}; 143 | use sha2::{Digest, Sha256, Sha384, Sha512}; 144 | use std::fmt::Debug; 145 | use std::io::Write; 146 | 147 | const INPUT_BLOCK_SIZE_SHA256: usize = 64; 148 | const INPUT_BLOCK_SIZE_SHA384: usize = 128; 149 | const INPUT_BLOCK_SIZE_SHA512: usize = 128; 150 | 151 | #[test] 152 | fn invalid_block_size() { 153 | let hasher = EifHasher::new(31, Sha256::new()); 154 | assert!(hasher.is_err()); 155 | 156 | let hasher = EifHasher::new(63, Sha512::new()); 157 | assert!(hasher.is_err()); 158 | 159 | let hasher = EifHasher::new(47, Sha384::new()); 160 | assert!(hasher.is_err()) 161 | } 162 | 163 | #[test] 164 | fn test_hash_less_values_than_block_size() { 165 | hash_less_values_than_block_size(Sha256::new(), INPUT_BLOCK_SIZE_SHA256); 166 | hash_less_values_than_block_size(Sha512::new(), INPUT_BLOCK_SIZE_SHA512); 167 | hash_less_values_than_block_size(Sha384::new(), INPUT_BLOCK_SIZE_SHA384); 168 | } 169 | 170 | fn hash_less_values_than_block_size( 171 | mut hasher_alg: T, 172 | block_size: usize, 173 | ) { 174 | let data = vec![78u8; block_size - 1]; 175 | let output_size = hasher_alg.finalize_reset().len(); 176 | let mut hasher = EifHasher::new(block_size, hasher_alg.clone()).unwrap(); 177 | 178 | hasher.write_all(&data[..]).unwrap(); 179 | hasher_alg 180 | .write_all(&initial_digest(output_size)[..]) 181 | .unwrap(); 182 | hasher_alg.write_all(&data[..]).unwrap(); 183 | 184 | let mut hasher_clone = hasher.clone(); 185 | let mut hasher_alg_clone = hasher_alg.clone(); 186 | assert_eq!( 187 | hasher_alg.finalize_reset().to_vec(), 188 | hasher.finalize_reset().unwrap() 189 | ); 190 | 191 | let result = hasher_alg_clone.finalize_reset(); 192 | hasher_alg_clone 193 | .write_all(&initial_digest(output_size)[..]) 194 | .unwrap(); 195 | hasher_alg_clone.write_all(&result[..]).unwrap(); 196 | 197 | assert_eq!( 198 | hasher_clone.tpm_extend_finalize_reset().unwrap(), 199 | hasher_alg_clone.finalize_reset().to_vec() 200 | ); 201 | } 202 | 203 | #[test] 204 | fn test_hash_exact_block_size_values() { 205 | hash_exact_block_size_values(Sha256::new(), INPUT_BLOCK_SIZE_SHA256); 206 | hash_exact_block_size_values(Sha384::new(), INPUT_BLOCK_SIZE_SHA384); 207 | hash_exact_block_size_values(Sha512::new(), INPUT_BLOCK_SIZE_SHA512); 208 | } 209 | 210 | fn hash_exact_block_size_values( 211 | mut hasher_alg: T, 212 | block_size: usize, 213 | ) { 214 | let data = vec![78u8; block_size]; 215 | let output_size = hasher_alg.finalize_reset().len(); 216 | let mut hasher = EifHasher::new(block_size, hasher_alg.clone()).unwrap(); 217 | 218 | hasher.write_all(&data).unwrap(); 219 | hasher_alg 220 | .write_all(&initial_digest(output_size)[..]) 221 | .unwrap(); 222 | hasher_alg.write_all(&data[..block_size]).unwrap(); 223 | 224 | let mut hasher_clone = hasher.clone(); 225 | let mut hasher_alg_clone = hasher_alg.clone(); 226 | 227 | assert_eq!( 228 | hasher_alg.finalize_reset().to_vec(), 229 | hasher.finalize_reset().unwrap() 230 | ); 231 | 232 | let result = hasher_alg_clone.finalize_reset(); 233 | hasher_alg_clone 234 | .write_all(&initial_digest(output_size)[..]) 235 | .unwrap(); 236 | hasher_alg_clone.write_all(&result[..]).unwrap(); 237 | 238 | assert_eq!( 239 | hasher_clone.tpm_extend_finalize_reset().unwrap(), 240 | hasher_alg_clone.finalize_reset().to_vec() 241 | ); 242 | } 243 | 244 | #[test] 245 | fn test_hash_more_values_than_block_size() { 246 | hash_more_values_than_block_size(Sha256::new(), INPUT_BLOCK_SIZE_SHA256); 247 | hash_more_values_than_block_size(Sha384::new(), INPUT_BLOCK_SIZE_SHA384); 248 | hash_more_values_than_block_size(Sha512::new(), INPUT_BLOCK_SIZE_SHA512); 249 | } 250 | 251 | fn hash_more_values_than_block_size( 252 | mut hasher_alg: T, 253 | block_size: usize, 254 | ) { 255 | let data = vec![78u8; block_size + block_size / 2 - 1]; 256 | let output_size = hasher_alg.finalize_reset().len(); 257 | let (data1, data2) = data.split_at(block_size); 258 | let mut hasher = EifHasher::new(block_size, hasher_alg.clone()).unwrap(); 259 | 260 | hasher.write_all(&data).unwrap(); 261 | 262 | hasher_alg.write_all(&initial_digest(output_size)).unwrap(); 263 | hasher_alg.write_all(data1).unwrap(); 264 | let result = hasher_alg.finalize_reset(); 265 | hasher_alg.write_all(&result).unwrap(); 266 | hasher_alg.write_all(data2).unwrap(); 267 | 268 | let mut hasher_clone = hasher.clone(); 269 | let mut hasher_alg_clone = hasher_alg.clone(); 270 | 271 | assert_eq!( 272 | hasher_alg.finalize_reset().to_vec(), 273 | hasher.finalize_reset().unwrap() 274 | ); 275 | 276 | let result = hasher_alg_clone.finalize_reset(); 277 | hasher_alg_clone 278 | .write_all(&initial_digest(output_size)[..]) 279 | .unwrap(); 280 | hasher_alg_clone.write_all(&result[..]).unwrap(); 281 | 282 | assert_eq!( 283 | hasher_clone.tpm_extend_finalize_reset().unwrap(), 284 | hasher_alg_clone.finalize_reset().to_vec() 285 | ); 286 | } 287 | 288 | #[test] 289 | fn test_hash_with_writes_of_different_sizes() { 290 | hash_with_writes_of_different_sizes(Sha256::new(), INPUT_BLOCK_SIZE_SHA256); 291 | hash_with_writes_of_different_sizes(Sha384::new(), INPUT_BLOCK_SIZE_SHA512); 292 | hash_with_writes_of_different_sizes(Sha512::new(), INPUT_BLOCK_SIZE_SHA512); 293 | } 294 | 295 | fn hash_with_writes_of_different_sizes( 296 | hasher_alg: T, 297 | block_size: usize, 298 | ) { 299 | let data = vec![78u8; block_size * 256]; 300 | let mut hasher_in_one_go = EifHasher::new(block_size, hasher_alg.clone()).unwrap(); 301 | let mut hasher_in_random_chunks = EifHasher::new(block_size, hasher_alg.clone()).unwrap(); 302 | let mut hasher_one_byte_at_atime = EifHasher::new(block_size, hasher_alg).unwrap(); 303 | 304 | hasher_in_one_go.write_all(&data).unwrap(); 305 | let mut remaining = &data[..]; 306 | let mut iteration = 1; 307 | while !remaining.is_empty() { 308 | let chunk_size = std::cmp::max(1, (iteration % remaining.len()) % block_size); 309 | let (to_be_written, unhandled) = remaining.split_at(chunk_size); 310 | hasher_in_random_chunks.write_all(to_be_written).unwrap(); 311 | 312 | remaining = unhandled; 313 | iteration += 1987; 314 | } 315 | 316 | for x in data { 317 | hasher_one_byte_at_atime.write_all(&[x]).unwrap(); 318 | } 319 | 320 | let result1 = hasher_in_one_go.finalize_reset().unwrap(); 321 | let result2 = hasher_in_random_chunks.finalize_reset().unwrap(); 322 | let result3 = hasher_one_byte_at_atime.finalize_reset().unwrap(); 323 | assert_eq!(result1, result2); 324 | assert_eq!(result1, result3); 325 | } 326 | 327 | #[test] 328 | fn test_no_cache() { 329 | let data = vec![78u8; 127 * 256]; 330 | let mut eif_hasher = EifHasher::new_without_cache(Sha384::new()).unwrap(); 331 | let mut hasher = Sha384::new(); 332 | 333 | hasher.write_all(&data[..]).unwrap(); 334 | for value in data { 335 | eif_hasher.write(&[value]).unwrap(); 336 | } 337 | 338 | let result1 = eif_hasher.finalize_reset().unwrap(); 339 | let result2 = hasher.finalize_reset(); 340 | assert_eq!(result1, result2.to_vec()); 341 | } 342 | } 343 | -------------------------------------------------------------------------------- /src/defs/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | #![deny(warnings)] 5 | /// Definition Eif (Enclave Image format) 6 | /// 7 | /// This crate is consumed by the following clients: 8 | /// - eif_utils: needed by the eif_builder. 9 | /// - eif_loader: needed to properly send an eif file over vsock. 10 | /// 11 | /// With that in mind please be frugal with the dependencies for this 12 | /// crate. 13 | use byteorder::{BigEndian, ByteOrder}; 14 | use num_derive::FromPrimitive; 15 | use num_traits::FromPrimitive; 16 | use serde::{Deserialize, Serialize}; 17 | use std::mem::size_of; 18 | 19 | pub const EIF_MAGIC: [u8; 4] = [46, 101, 105, 102]; // .eif in ascii 20 | pub const MAX_NUM_SECTIONS: usize = 32; 21 | /// EIF Header Flags 22 | /// bits 1 : Architecture - toggled for aarch64, cleared for x86_64 23 | /// bits 2-8: Unused 24 | pub const EIF_HDR_ARCH_ARM64: u16 = 0x1; 25 | 26 | /// Current EIF version to be incremented every time we change the format 27 | /// of this structures, we assume changes are backwards compatible. 28 | /// V1 -> V2: Add support to generate and check CRC. 29 | /// V2 -> V3: Add the signature section. 30 | /// V3 -> V4: Add the metadata section. 31 | pub const CURRENT_VERSION: u16 = 4; 32 | 33 | #[derive(Clone, Copy, Debug)] 34 | pub struct EifHeader { 35 | /// Magic number used to identify this file format 36 | pub magic: [u8; 4], 37 | /// EIF version 38 | pub version: u16, 39 | /// EIF header flags 40 | pub flags: u16, 41 | /// Default enclave memory used to boot this eif file 42 | pub default_mem: u64, 43 | /// Default enclave cpus number used to boot this eif file 44 | pub default_cpus: u64, 45 | pub reserved: u16, 46 | pub num_sections: u16, 47 | pub section_offsets: [u64; MAX_NUM_SECTIONS], 48 | /// Sizes for each section, we need this information in the 49 | /// header because the vsock_loader needs to know how large are the 50 | /// ramdisks 51 | pub section_sizes: [u64; MAX_NUM_SECTIONS], 52 | pub unused: u32, 53 | /// crc32 IEEE used for validating the eif file is correct, it contains 54 | /// the crc for everything except the bytes representing this field. 55 | /// Needs to be the last field of the header. 56 | pub eif_crc32: u32, 57 | } 58 | 59 | impl EifHeader { 60 | pub fn from_be_bytes(bytes: &[u8]) -> Result { 61 | let mut pos = 0; 62 | 63 | let mut magic = [0u8; 4]; 64 | magic.copy_from_slice(&bytes[pos..pos + size_of::<[u8; 4]>()]); 65 | pos += size_of::<[u8; 4]>(); 66 | 67 | let version = BigEndian::read_u16(&bytes[pos..]); 68 | pos += size_of::(); 69 | let flags = BigEndian::read_u16(&bytes[pos..]); 70 | pos += size_of::(); 71 | let default_mem = BigEndian::read_u64(&bytes[pos..]); 72 | pos += size_of::(); 73 | let default_cpus = BigEndian::read_u64(&bytes[pos..]); 74 | pos += size_of::(); 75 | let reserved = BigEndian::read_u16(&bytes[pos..]); 76 | pos += size_of::(); 77 | let num_sections = BigEndian::read_u16(&bytes[pos..]); 78 | pos += size_of::(); 79 | 80 | let mut section_offsets = [0u64; MAX_NUM_SECTIONS]; 81 | for item in section_offsets.iter_mut() { 82 | *item = BigEndian::read_u64(&bytes[pos..]); 83 | pos += size_of::(); 84 | } 85 | 86 | let mut section_sizes = [0u64; MAX_NUM_SECTIONS]; 87 | for item in section_sizes.iter_mut() { 88 | *item = BigEndian::read_u64(&bytes[pos..]); 89 | pos += size_of::(); 90 | } 91 | 92 | let unused = BigEndian::read_u32(&bytes[pos..]); 93 | pos += size_of::(); 94 | let eif_crc32 = BigEndian::read_u32(&bytes[pos..]); 95 | pos += size_of::(); 96 | 97 | if bytes.len() != pos { 98 | return Err("Invalid EifHeader length".to_string()); 99 | } 100 | 101 | Ok(EifHeader { 102 | magic, 103 | version, 104 | flags, 105 | default_mem, 106 | default_cpus, 107 | reserved, 108 | num_sections, 109 | section_offsets, 110 | section_sizes, 111 | unused, 112 | eif_crc32, 113 | }) 114 | } 115 | 116 | pub fn to_be_bytes(&self) -> Vec { 117 | let mut buf = [0u8; Self::size()]; 118 | let mut result = Vec::new(); 119 | let mut pos = 0; 120 | 121 | buf[pos..pos + size_of::<[u8; 4]>()].copy_from_slice(&self.magic); 122 | pos += size_of::<[u8; 4]>(); 123 | 124 | BigEndian::write_u16(&mut buf[pos..], self.version); 125 | pos += size_of::(); 126 | BigEndian::write_u16(&mut buf[pos..], self.flags); 127 | pos += size_of::(); 128 | BigEndian::write_u64(&mut buf[pos..], self.default_mem); 129 | pos += size_of::(); 130 | BigEndian::write_u64(&mut buf[pos..], self.default_cpus); 131 | pos += size_of::(); 132 | BigEndian::write_u16(&mut buf[pos..], self.reserved); 133 | pos += size_of::(); 134 | BigEndian::write_u16(&mut buf[pos..], self.num_sections); 135 | pos += size_of::(); 136 | 137 | for elem in self.section_offsets.iter() { 138 | BigEndian::write_u64(&mut buf[pos..], *elem); 139 | pos += size_of::(); 140 | } 141 | 142 | for elem in self.section_sizes.iter() { 143 | BigEndian::write_u64(&mut buf[pos..], *elem); 144 | pos += size_of::(); 145 | } 146 | 147 | BigEndian::write_u32(&mut buf[pos..], self.unused); 148 | pos += size_of::(); 149 | BigEndian::write_u32(&mut buf[pos..], self.eif_crc32); 150 | 151 | result.extend_from_slice(&buf[..]); 152 | result 153 | } 154 | 155 | pub const fn size() -> usize { 156 | 4 * size_of::() 157 | + 2 * size_of::() 158 | + 2 * size_of::() 159 | + size_of::<[u8; 4]>() 160 | + 2 * size_of::<[u64; MAX_NUM_SECTIONS]>() 161 | } 162 | } 163 | 164 | #[derive(Clone, Copy, Debug, FromPrimitive, PartialEq, Eq)] 165 | #[repr(u16)] 166 | pub enum EifSectionType { 167 | EifSectionInvalid, 168 | EifSectionKernel, 169 | EifSectionCmdline, 170 | EifSectionRamdisk, 171 | EifSectionSignature, 172 | EifSectionMetadata, 173 | } 174 | 175 | #[derive(Clone, Copy, Debug)] 176 | pub struct EifSectionHeader { 177 | pub section_type: EifSectionType, 178 | pub flags: u16, 179 | pub section_size: u64, 180 | } 181 | 182 | impl EifSectionHeader { 183 | pub fn from_be_bytes(bytes: &[u8]) -> Result { 184 | let mut pos = 0; 185 | 186 | let section_type = BigEndian::read_u16(&bytes[pos..]); 187 | pos += size_of::(); 188 | let flags = BigEndian::read_u16(&bytes[pos..]); 189 | pos += size_of::(); 190 | let section_size = BigEndian::read_u64(&bytes[pos..]); 191 | pos += size_of::(); 192 | 193 | if bytes.len() != pos { 194 | return Err("Invalid EifSectionHeader length".to_string()); 195 | } 196 | 197 | Ok(EifSectionHeader { 198 | section_type: FromPrimitive::from_u16(section_type) 199 | .ok_or_else(|| "Invalid section type".to_string())?, 200 | flags, 201 | section_size, 202 | }) 203 | } 204 | 205 | pub fn to_be_bytes(&self) -> Vec { 206 | let mut result = Vec::new(); 207 | let mut buf = [0u8; Self::size()]; 208 | let mut pos = 0; 209 | 210 | BigEndian::write_u16(&mut buf[pos..], self.section_type as u16); 211 | pos += size_of::(); 212 | BigEndian::write_u16(&mut buf[pos..], self.flags); 213 | pos += size_of::(); 214 | BigEndian::write_u64(&mut buf[pos..], self.section_size); 215 | 216 | result.extend_from_slice(&buf[..]); 217 | result 218 | } 219 | 220 | pub const fn size() -> usize { 221 | size_of::() + size_of::() + size_of::() 222 | } 223 | } 224 | 225 | /// Array containing the signatures of at least one PCR. 226 | /// For now, it only contains the signature of PRC0. 227 | pub type EifSignature = Vec; 228 | 229 | #[derive(Clone, Debug, Serialize, Deserialize)] 230 | pub struct PcrSignature { 231 | /// The PEM-formatted signing certificate 232 | pub signing_certificate: Vec, 233 | /// The serialized COSESign1 object generated using the byte array 234 | /// formed from RegisterIndex and RegisterValue as payload 235 | pub signature: Vec, 236 | } 237 | 238 | #[derive(Clone, Debug, Serialize, Deserialize)] 239 | pub struct PcrInfo { 240 | /// The index of the PCR 241 | pub register_index: i32, 242 | /// The value of the PCR 243 | pub register_value: Vec, 244 | } 245 | 246 | impl PcrInfo { 247 | pub fn new(register_index: i32, register_value: Vec) -> Self { 248 | PcrInfo { 249 | register_index, 250 | register_value, 251 | } 252 | } 253 | } 254 | 255 | #[derive(Debug, Serialize, Deserialize, Clone)] 256 | pub struct EifBuildInfo { 257 | #[serde(rename = "BuildTime")] 258 | pub build_time: String, 259 | #[serde(rename = "BuildTool")] 260 | pub build_tool: String, 261 | #[serde(rename = "BuildToolVersion")] 262 | pub build_tool_version: String, 263 | #[serde(rename = "OperatingSystem")] 264 | pub img_os: String, 265 | #[serde(rename = "KernelVersion")] 266 | pub img_kernel: String, 267 | } 268 | 269 | /// Structure used for (de)serializing metadata when 270 | /// writing or reading the metadata section of the EIF 271 | #[derive(Debug, Clone, Serialize, Deserialize)] 272 | pub struct EifIdentityInfo { 273 | #[serde(rename = "ImageName")] 274 | pub img_name: String, 275 | #[serde(rename = "ImageVersion")] 276 | pub img_version: String, 277 | #[serde(rename = "BuildMetadata")] 278 | pub build_info: EifBuildInfo, 279 | #[serde(rename = "DockerInfo")] 280 | pub docker_info: serde_json::Value, 281 | #[serde(rename = "CustomMetadata")] 282 | pub custom_info: serde_json::Value, 283 | } 284 | 285 | #[cfg(test)] 286 | mod tests { 287 | use super::{EifHeader, EifSectionHeader, EifSectionType}; 288 | use super::{EIF_MAGIC, MAX_NUM_SECTIONS}; 289 | 290 | #[test] 291 | fn test_eif_section_type() { 292 | assert_eq!(std::mem::size_of::(), 2); 293 | } 294 | 295 | #[test] 296 | fn test_eif_section_header_to_from_be_bytes() { 297 | let eif_section_header = EifSectionHeader { 298 | section_type: EifSectionType::EifSectionSignature, 299 | flags: 3, 300 | section_size: 123, 301 | }; 302 | 303 | let bytes = eif_section_header.to_be_bytes(); 304 | assert_eq!(bytes.len(), EifSectionHeader::size()); 305 | 306 | let new_eif_section_header = EifSectionHeader::from_be_bytes(&bytes).unwrap(); 307 | assert_eq!( 308 | eif_section_header.section_type, 309 | new_eif_section_header.section_type 310 | ); 311 | assert_eq!(eif_section_header.flags, new_eif_section_header.flags); 312 | assert_eq!( 313 | eif_section_header.section_size, 314 | new_eif_section_header.section_size 315 | ); 316 | } 317 | 318 | #[test] 319 | fn test_eif_header_to_from_be_bytes() { 320 | let eif_header = EifHeader { 321 | magic: EIF_MAGIC, 322 | version: 3, 323 | flags: 4, 324 | default_mem: 5, 325 | default_cpus: 6, 326 | reserved: 2, 327 | num_sections: 5, 328 | section_offsets: [12u64; MAX_NUM_SECTIONS], 329 | section_sizes: [13u64; MAX_NUM_SECTIONS], 330 | unused: 0, 331 | eif_crc32: 123, 332 | }; 333 | 334 | let bytes = eif_header.to_be_bytes(); 335 | assert_eq!(bytes.len(), EifHeader::size()); 336 | 337 | let new_eif_header = EifHeader::from_be_bytes(&bytes).unwrap(); 338 | assert_eq!(eif_header.magic, new_eif_header.magic); 339 | assert_eq!(eif_header.version, new_eif_header.version); 340 | assert_eq!(eif_header.flags, new_eif_header.flags); 341 | assert_eq!(eif_header.default_mem, new_eif_header.default_mem); 342 | assert_eq!(eif_header.default_cpus, new_eif_header.default_cpus); 343 | assert_eq!(eif_header.reserved, new_eif_header.reserved); 344 | assert_eq!(eif_header.num_sections, new_eif_header.num_sections); 345 | assert_eq!(eif_header.section_offsets, new_eif_header.section_offsets); 346 | assert_eq!(eif_header.section_sizes, new_eif_header.section_sizes); 347 | assert_eq!(eif_header.unused, new_eif_header.unused); 348 | assert_eq!(eif_header.eif_crc32, new_eif_header.eif_crc32); 349 | } 350 | 351 | #[test] 352 | fn test_eif_header_size() { 353 | assert_eq!(EifHeader::size(), 548); 354 | } 355 | 356 | #[test] 357 | fn test_eif_section_header_size() { 358 | assert_eq!(EifSectionHeader::size(), 12); 359 | } 360 | 361 | #[test] 362 | fn test_eif_header_from_be_bytes_invalid_length() { 363 | let bytes = [0u8; 550]; 364 | assert!(EifHeader::from_be_bytes(&bytes).is_err()); 365 | } 366 | 367 | #[test] 368 | fn test_eif_section_header_from_be_bytes_invalid_length() { 369 | let bytes = [0u8; 16]; 370 | assert!(EifSectionHeader::from_be_bytes(&bytes).is_err()); 371 | } 372 | 373 | #[test] 374 | fn test_eif_section_header_from_be_bytes_invalid_section_type() { 375 | let mut bytes = [0u8; 12]; 376 | // As there are 6 EIF section types, set the enum index to 6 377 | // so we get an invalid section to cause the error. 378 | bytes[1] = 6; 379 | 380 | assert!(EifSectionHeader::from_be_bytes(&bytes).is_err()); 381 | } 382 | } 383 | 384 | pub mod eif_hasher; 385 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | //! AWS Nitro Enclaves Image Format API 5 | //! 6 | //! This is the library that provides the API for interacting with Enclave Image Format (EIF) files 7 | //! used in AWS Nitro Enclaves. 8 | //! 9 | //! defs provides the APIs that define an EIF and related objects. 10 | //! utils provides the APIs to consume and interact with an EIF and related objects. 11 | 12 | pub mod defs; 13 | pub mod utils; 14 | -------------------------------------------------------------------------------- /src/utils/eif_reader.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use crate::defs::eif_hasher::EifHasher; 5 | use crate::defs::{ 6 | EifHeader, EifIdentityInfo, EifSectionHeader, EifSectionType, PcrInfo, PcrSignature, 7 | }; 8 | use crate::utils::get_pcrs; 9 | use aws_nitro_enclaves_cose::{crypto::Openssl, CoseSign1}; 10 | use crc::{Crc, CRC_32_ISO_HDLC}; 11 | use openssl::pkey::PKey; 12 | use serde::{Deserialize, Serialize}; 13 | use serde_cbor::{from_slice, to_vec}; 14 | use sha2::{Digest, Sha384}; 15 | 16 | use std::collections::BTreeMap; 17 | use std::fs::File; 18 | use std::io::{Read, Seek, SeekFrom, Write}; 19 | use std::mem::size_of; 20 | 21 | /// The information about the signing certificate to be provided for a `describe-eif` request. 22 | #[derive(Clone, Serialize, Deserialize)] 23 | pub struct SignCertificateInfo { 24 | #[serde(rename = "IssuerName")] 25 | /// Certificate's subject name. 26 | pub issuer_name: BTreeMap, 27 | #[serde(rename = "Algorithm")] 28 | /// Certificate's signature algorithm 29 | pub algorithm: String, 30 | #[serde(rename = "NotBefore")] 31 | /// Not before validity period 32 | pub not_before: String, 33 | #[serde(rename = "NotAfter")] 34 | /// Not after validity period 35 | pub not_after: String, 36 | #[serde(rename = "Signature")] 37 | /// Certificate's signature in hex format: 'XX:XX:XX..' 38 | pub signature: String, 39 | } 40 | 41 | impl SignCertificateInfo { 42 | /// Create new signing certificate information structure 43 | pub fn new( 44 | issuer_name: BTreeMap, 45 | algorithm: String, 46 | not_before: String, 47 | not_after: String, 48 | signature: String, 49 | ) -> Self { 50 | SignCertificateInfo { 51 | issuer_name, 52 | algorithm, 53 | not_before, 54 | not_after, 55 | signature, 56 | } 57 | } 58 | } 59 | 60 | /// Used for providing EIF info when requested by 61 | /// 'describe-eif' or 'describe-enclaves' commands 62 | pub struct EifReader { 63 | /// Deserialized EIF header 64 | pub header: EifHeader, 65 | /// Serialized signature section 66 | pub signature_section: Option>, 67 | /// Hash of the whole EifImage. 68 | pub image_hasher: EifHasher, 69 | /// Hash of the EifSections provided by Amazon 70 | /// Kernel + cmdline + First Ramdisk 71 | pub bootstrap_hasher: EifHasher, 72 | /// Hash of the remaining ramdisks. 73 | pub app_hasher: EifHasher, 74 | /// Hash the signing certificate 75 | pub cert_hasher: EifHasher, 76 | pub eif_crc: u32, 77 | pub sign_check: Option, 78 | /// Generated and custom EIF metadata 79 | pub metadata: Option, 80 | } 81 | 82 | impl EifReader { 83 | /// Reads EIF and extracts sections to be written in the hashers based 84 | /// on section type. Also writes sections in the eif_crc, excluding the 85 | /// CRC from the header 86 | pub fn from_eif(eif_path: String) -> Result { 87 | let crc_gen = Crc::::new(&CRC_32_ISO_HDLC); 88 | let mut eif_crc = crc_gen.digest(); 89 | let mut curr_seek = 0; 90 | let mut eif_file = 91 | File::open(eif_path).map_err(|e| format!("Failed to open the EIF file: {:?}", e))?; 92 | 93 | // Extract EIF header 94 | let mut header_buf = vec![0u8; EifHeader::size()]; 95 | eif_file 96 | .read_exact(&mut header_buf) 97 | .map_err(|e| format!("Error while reading EIF header: {:?}", e))?; 98 | 99 | // Exclude last field of header which is CRC 100 | let len_without_crc = header_buf.len() - size_of::(); 101 | eif_crc.update(&header_buf[..len_without_crc]); 102 | 103 | let header = EifHeader::from_be_bytes(&header_buf) 104 | .map_err(|e| format!("Error while parsing EIF header: {:?}", e))?; 105 | curr_seek += EifHeader::size(); 106 | eif_file 107 | .seek(SeekFrom::Start(curr_seek as u64)) 108 | .map_err(|e| format!("Failed to seek file from start: {:?}", e))?; 109 | 110 | let mut section_buf = vec![0u8; EifSectionHeader::size()]; 111 | let mut image_hasher = EifHasher::new_without_cache(Sha384::new()) 112 | .map_err(|e| format!("Could not create image_hasher: {:?}", e))?; 113 | let mut bootstrap_hasher = EifHasher::new_without_cache(Sha384::new()) 114 | .map_err(|e| format!("Could not create bootstrap_hasher: {:?}", e))?; 115 | let mut app_hasher = EifHasher::new_without_cache(Sha384::new()) 116 | .map_err(|e| format!("Could not create app_hasher: {:?}", e))?; 117 | let mut cert_hasher = EifHasher::new_without_cache(Sha384::new()) 118 | .map_err(|e| format!("Could not create cert_hasher: {:?}", e))?; 119 | let mut ramdisk_idx = 0; 120 | let mut signature_section = None; 121 | let mut metadata = None; 122 | 123 | // Read all section headers and treat by type 124 | while eif_file 125 | .read_exact(&mut section_buf) 126 | .map_err(|e| format!("Error while reading EIF header: {:?}", e)) 127 | .is_ok() 128 | { 129 | let section = EifSectionHeader::from_be_bytes(§ion_buf) 130 | .map_err(|e| format!("Error extracting EIF section header: {:?}", e))?; 131 | eif_crc.update(§ion_buf); 132 | 133 | let mut buf = vec![0u8; section.section_size as usize]; 134 | curr_seek += EifSectionHeader::size(); 135 | eif_file 136 | .seek(SeekFrom::Start(curr_seek as u64)) 137 | .map_err(|e| format!("Failed to seek after EIF header: {:?}", e))?; 138 | eif_file 139 | .read_exact(&mut buf) 140 | .map_err(|e| format!("Error while reading kernel from EIF: {:?}", e))?; 141 | eif_crc.update(&buf); 142 | 143 | curr_seek += section.section_size as usize; 144 | eif_file 145 | .seek(SeekFrom::Start(curr_seek as u64)) 146 | .map_err(|e| format!("Failed to seek after EIF section: {:?}", e))?; 147 | 148 | match section.section_type { 149 | EifSectionType::EifSectionKernel | EifSectionType::EifSectionCmdline => { 150 | image_hasher.write_all(&buf).map_err(|e| { 151 | format!("Failed to write EIF section to image_hasher: {:?}", e) 152 | })?; 153 | bootstrap_hasher.write_all(&buf).map_err(|e| { 154 | format!("Failed to write EIF section to bootstrap_hasher: {:?}", e) 155 | })?; 156 | } 157 | EifSectionType::EifSectionRamdisk => { 158 | image_hasher.write_all(&buf).map_err(|e| { 159 | format!("Failed to write ramdisk section to image_hasher: {:?}", e) 160 | })?; 161 | if ramdisk_idx == 0 { 162 | bootstrap_hasher.write_all(&buf).map_err(|e| { 163 | format!( 164 | "Failed to write ramdisk section to bootstrap_hasher: {:?}", 165 | e 166 | ) 167 | })?; 168 | } else { 169 | app_hasher.write_all(&buf).map_err(|e| { 170 | format!("Failed to write ramdisk section to app_hasher: {:?}", e) 171 | })?; 172 | } 173 | ramdisk_idx += 1; 174 | } 175 | EifSectionType::EifSectionSignature => { 176 | signature_section = Some(buf.clone()); 177 | // Deserialize PCR0 signature structure and write it to the hasher 178 | let des_sign: Vec = from_slice(&buf[..]) 179 | .map_err(|e| format!("Error deserializing certificate: {:?}", e))?; 180 | 181 | let cert = openssl::x509::X509::from_pem(&des_sign[0].signing_certificate) 182 | .map_err(|e| format!("Error while digesting certificate: {:?}", e))?; 183 | let cert_der = cert.to_der().map_err(|e| { 184 | format!("Failed to deserialize signing certificate: {:?}", e) 185 | })?; 186 | cert_hasher.write_all(&cert_der).map_err(|e| { 187 | format!("Failed to write signature section to cert_hasher: {:?}", e) 188 | })?; 189 | } 190 | EifSectionType::EifSectionMetadata => { 191 | metadata = serde_json::from_slice(&buf[..]) 192 | .map_err(|e| format!("Error deserializing metadata: {:?}", e))?; 193 | } 194 | EifSectionType::EifSectionInvalid => { 195 | return Err("Found invalid EIF section".to_string()); 196 | } 197 | } 198 | } 199 | 200 | Ok(EifReader { 201 | header, 202 | signature_section, 203 | image_hasher, 204 | bootstrap_hasher, 205 | app_hasher, 206 | cert_hasher, 207 | eif_crc: eif_crc.finalize(), 208 | sign_check: None, 209 | metadata, 210 | }) 211 | } 212 | 213 | pub fn get_metadata(&self) -> Option { 214 | self.metadata.clone() 215 | } 216 | 217 | pub fn get_measurements(&mut self) -> Result, String> { 218 | get_pcrs( 219 | &mut self.image_hasher, 220 | &mut self.bootstrap_hasher, 221 | &mut self.app_hasher, 222 | &mut self.cert_hasher, 223 | Sha384::new(), 224 | self.signature_section.is_some(), 225 | ) 226 | } 227 | 228 | /// Returns deserialized header section 229 | pub fn get_header(&self) -> EifHeader { 230 | self.header 231 | } 232 | 233 | /// Compare header CRC to the one we computed 234 | pub fn check_crc(&self) -> bool { 235 | self.header.eif_crc32 == self.eif_crc 236 | } 237 | 238 | /// Extract signature section from EIF and parse the signing certificate 239 | pub fn get_certificate_info( 240 | &mut self, 241 | measurements: BTreeMap, 242 | ) -> Result { 243 | let signature_buf = match &self.signature_section { 244 | Some(section) => section, 245 | None => { 246 | return Err("Signature section missing from EIF.".to_string()); 247 | } 248 | }; 249 | // Deserialize PCR0 signature structure and write it to the hasher 250 | let des_sign: Vec = from_slice(&signature_buf[..]) 251 | .map_err(|e| format!("Error deserializing certificate: {:?}", e))?; 252 | 253 | let cert = openssl::x509::X509::from_pem(&des_sign[0].signing_certificate) 254 | .map_err(|e| format!("Error while digesting certificate: {:?}", e))?; 255 | 256 | // Parse issuer into a BTreeMap 257 | let mut issuer_name = BTreeMap::new(); 258 | for e in cert.issuer_name().entries() { 259 | issuer_name.insert( 260 | e.object().to_string(), 261 | format!("{:?}", e.data()).replace(&['\"'][..], ""), 262 | ); 263 | } 264 | 265 | let algorithm = format!("{:#?}", cert.signature_algorithm().object()); 266 | 267 | // Get measured PCR0 signature payload 268 | let pcr0 = match measurements.get("PCR0") { 269 | Some(pcr) => pcr, 270 | None => { 271 | return Err("Failed to get PCR0.".to_string()); 272 | } 273 | }; 274 | let pcr_info = PcrInfo::new( 275 | 0, 276 | hex::decode(pcr0).map_err(|e| format!("Error while decoding PCR0: {:?}", e))?, 277 | ); 278 | 279 | let measured_payload = 280 | to_vec(&pcr_info).map_err(|e| format!("Could not serialize PCR info: {:?}", e))?; 281 | 282 | // Extract public key from certificate and convert to PKey 283 | let public_key = &cert 284 | .public_key() 285 | .map_err(|e| format!("Failed to get public key: {:?}", e))?; 286 | let coses_key = PKey::public_key_from_pem( 287 | &public_key 288 | .public_key_to_pem() 289 | .map_err(|e| format!("Failed to serialize public key: {:?}", e))?[..], 290 | ) 291 | .map_err(|e| format!("Failed to decode key nit elliptic key structure: {:?}", e))?; 292 | 293 | // Deserialize COSE signature and extract the payload using the public key 294 | let pcr_sign = CoseSign1::from_bytes(&des_sign[0].signature[..]) 295 | .map_err(|e| format!("Failed to deserialize signature: {:?}", e))?; 296 | let coses_payload = pcr_sign 297 | .get_payload::(Some(coses_key.as_ref())) 298 | .map_err(|e| format!("Failed to get signature payload: {:?}", e))?; 299 | 300 | self.sign_check = Some(measured_payload == coses_payload); 301 | 302 | Ok(SignCertificateInfo { 303 | issuer_name, 304 | algorithm, 305 | not_before: format!("{:#?}", cert.not_before()), 306 | not_after: format!("{:#?}", cert.not_after()), 307 | // Change format from [\ XX, \ X, ..] to XX:XX:XX... 308 | signature: format!("{:02X?}", cert.signature().as_slice().to_vec()) 309 | .replace(&vec!['[', ']'][..], "") 310 | .replace(", ", ":"), 311 | }) 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /src/utils/eif_signer.rs: -------------------------------------------------------------------------------- 1 | use crate::defs::{EifHeader, EifSectionHeader, EifSectionType, PcrInfo, PcrSignature}; 2 | use crate::utils::eif_reader::EifReader; 3 | use crate::utils::get_pcrs; 4 | use aws_config::BehaviorVersion; 5 | use aws_nitro_enclaves_cose::{ 6 | crypto::kms::KmsKey, crypto::Openssl, header_map::HeaderMap, CoseSign1, 7 | }; 8 | use aws_sdk_kms::client::Client; 9 | use aws_types::region::Region; 10 | use openssl::pkey::PKey; 11 | use regex::Regex; 12 | use serde_cbor::to_vec; 13 | use sha2::{Digest, Sha384}; 14 | use std::collections::BTreeMap; 15 | use std::fs::{File, OpenOptions}; 16 | use std::io::{Read, Seek, SeekFrom, Write}; 17 | use std::mem::size_of; 18 | use std::sync::Arc; 19 | use tokio::runtime::Runtime; 20 | 21 | // Signing key for eif images 22 | pub enum SignKey { 23 | // Local private key 24 | LocalPrivateKey(Vec), 25 | 26 | // KMS signer implementation from Cose library 27 | KmsKey(Arc), 28 | } 29 | 30 | // Signing key details 31 | #[derive(Clone, Debug)] 32 | enum SignKeyInfo { 33 | // Local private key file path 34 | LocalPrivateKeyInfo { path: std::path::PathBuf }, 35 | 36 | // KMS key details 37 | KmsKeyInfo { id: String, region: Option }, 38 | } 39 | 40 | impl SignKeyInfo { 41 | pub fn new(key_location: &str) -> Result { 42 | match parse_kms_arn(key_location) { 43 | Some((region, key_id)) => Ok(SignKeyInfo::KmsKeyInfo { 44 | id: key_id, 45 | region: Some(region), 46 | }), 47 | None => Ok(SignKeyInfo::LocalPrivateKeyInfo { 48 | path: key_location.into(), 49 | }), 50 | } 51 | } 52 | } 53 | 54 | fn parse_kms_arn(s: &str) -> Option<(String, String)> { 55 | // Matches KMS key ARNs in the format: 56 | // arn:partition:kms:region:account-id:key[/|:]key-id where: 57 | // - partition is: aws, aws-cn, or aws-us-gov 58 | // - region is captured: letters, numbers, hyphens 59 | // - account-id: exactly 12 digits 60 | // - key-id is captured: letters, numbers, hyphens 61 | let re = Regex::new( 62 | r"^arn:(?:aws|aws-cn|aws-us-gov):kms:([a-z0-9-]+):\d{12}:key[:/]([a-zA-Z0-9-]+)$", 63 | ) 64 | .expect("Regular expression for parsing ARNs must be valid"); 65 | 66 | re.captures(s).map(|caps| { 67 | // Safe to use index access since we know the pattern has exactly 2 capture groups 68 | (caps[1].to_string(), caps[2].to_string()) 69 | }) 70 | } 71 | 72 | // Full signining key data 73 | pub struct SignKeyData { 74 | // x509 certificate 75 | pub cert: Vec, 76 | 77 | // Signing key itself 78 | pub key: SignKey, 79 | } 80 | 81 | impl SignKeyData { 82 | pub fn new(key_location: &str, certificate: &std::path::Path) -> Result { 83 | let key_info = SignKeyInfo::new(key_location)?; 84 | 85 | let mut cert_file = File::open(certificate) 86 | .map_err(|err| format!("Could not open the certificate file: {:?}", err))?; 87 | let mut cert = Vec::new(); 88 | cert_file 89 | .read_to_end(&mut cert) 90 | .map_err(|err| format!("Could not read the certificate file: {:?}", err))?; 91 | 92 | let key = match &key_info { 93 | SignKeyInfo::LocalPrivateKeyInfo { path } => { 94 | let mut key_file = File::open(path) 95 | .map_err(|err| format!("Could not open the key file: {:?}", err))?; 96 | let mut key_data = Vec::new(); 97 | key_file 98 | .read_to_end(&mut key_data) 99 | .map_err(|err| format!("Could not read the key file: {:?}", err))?; 100 | 101 | SignKey::LocalPrivateKey(key_data) 102 | } 103 | SignKeyInfo::KmsKeyInfo { id, region } => { 104 | // Method `KmsKey::new_with_public_key` must be called from a thread being run 105 | // by Tokio runtime, or from a thread with an active `EnterGuard`. 106 | let act = async { 107 | let mut config_loader = aws_config::defaults(BehaviorVersion::latest()); 108 | if let Some(region_id) = region { 109 | config_loader = config_loader.region(Region::new(region_id.clone())); 110 | } 111 | 112 | let sdk_config = config_loader.load().await; 113 | if sdk_config.region().is_none() { 114 | return Err("AWS region for KMS is not specified".to_string()); 115 | } 116 | 117 | let id_copy = id.clone(); 118 | tokio::task::spawn_blocking(move || { 119 | let client = Client::new(&sdk_config); 120 | KmsKey::new_with_public_key(client, id_copy, None) 121 | .map_err(|e| e.to_string()) 122 | }) 123 | .await 124 | .unwrap() 125 | }; 126 | let runtime = Runtime::new().map_err(|e| e.to_string())?; 127 | let key = runtime.block_on(act)?; 128 | SignKey::KmsKey(Arc::new(key)) 129 | } 130 | }; 131 | 132 | Ok(SignKeyData { cert, key }) 133 | } 134 | } 135 | 136 | pub struct EifSigner { 137 | key_data: SignKeyData, 138 | } 139 | 140 | impl EifSigner { 141 | pub fn new(sign_key: Option) -> Option { 142 | sign_key.map(|key| EifSigner { key_data: key }) 143 | } 144 | 145 | pub fn sign(&self, payload: &[u8]) -> Result { 146 | let cose_sign = match &self.key_data.key { 147 | SignKey::LocalPrivateKey(key) => { 148 | let pkey = PKey::private_key_from_pem(key).map_err(|e| { 149 | format!("Failed to deserialize PEM-formatted private key: {}", e) 150 | })?; 151 | 152 | CoseSign1::new::(payload, &HeaderMap::new(), &pkey) 153 | .map_err(|e| format!("Failed to create CoseSign1 with local key: {}", e))? 154 | } 155 | SignKey::KmsKey(key) => { 156 | let arc_key = key.clone(); 157 | let runtime = 158 | Runtime::new().map_err(|e| format!("Failed to create Tokio runtime: {}", e))?; 159 | 160 | runtime.block_on(async move { 161 | let payload_copy = Vec::from(payload); 162 | tokio::task::spawn_blocking(move || { 163 | CoseSign1::new::(&payload_copy, &HeaderMap::new(), &*arc_key) 164 | .map_err(|e| format!("Failed to create CoseSign1 with KMS key: {}", e)) 165 | }) 166 | .await 167 | .map_err(|e| format!("Task join error: {}", e))? 168 | })? 169 | } 170 | }; 171 | 172 | let signature = cose_sign 173 | .as_bytes(false) 174 | .map_err(|e| format!("Failed to get signature bytes: {}", e))?; 175 | 176 | Ok(PcrSignature { 177 | signing_certificate: self.key_data.cert.clone(), 178 | signature, 179 | }) 180 | } 181 | 182 | /// Generate the signature of a certain PCR. 183 | fn generate_pcr_signature( 184 | &self, 185 | register_index: i32, 186 | register_value: Vec, 187 | ) -> Result { 188 | let pcr_info = PcrInfo::new(register_index, register_value); 189 | let payload = to_vec(&pcr_info).expect("Could not serialize PCR info"); 190 | 191 | self.sign(payload.as_slice()) 192 | } 193 | 194 | /// Generate the signature of the EIF. 195 | /// eif_signature = [pcr0_signature] 196 | pub fn generate_eif_signature( 197 | &self, 198 | measurements: &BTreeMap, 199 | ) -> Result, String> { 200 | let pcr0_index = 0; 201 | let pcr0_value = hex::decode( 202 | measurements 203 | .get("PCR0") 204 | .ok_or_else(|| "PCR0 measurement not found".to_string())?, 205 | ) 206 | .map_err(|e| format!("Failed to decode PCR0 hex value: {}", e))?; 207 | 208 | let pcr0_signature = self.generate_pcr_signature(pcr0_index, pcr0_value)?; 209 | 210 | let eif_signature = vec![pcr0_signature]; 211 | to_vec(&eif_signature).map_err(|e| format!("Failed to serialize signature: {}", e)) 212 | } 213 | 214 | pub fn get_cert_der(&self) -> Result, String> { 215 | let cert = openssl::x509::X509::from_pem(&self.key_data.cert) 216 | .map_err(|e| format!("Failed to parse PEM certificate: {}", e))?; 217 | 218 | cert.to_der() 219 | .map_err(|e| format!("Failed to convert certificate to DER format: {}", e)) 220 | } 221 | 222 | /// Writes the provided pcr signature to an existing EIF 223 | pub fn write_signature( 224 | &self, 225 | eif_path: &str, 226 | serialized_signature: Vec, 227 | is_signed: bool, 228 | ) -> Result<(), String> { 229 | let mut eif_file = OpenOptions::new() 230 | .read(true) 231 | .write(true) 232 | .open(eif_path) 233 | .map_err(|e| format!("Failed to open file: {:?}", e))?; 234 | let new_signature_size = serialized_signature.len() as u64; 235 | let mut header = self.read_and_parse_header(&mut eif_file)?; 236 | 237 | let signature_section = EifSectionHeader { 238 | section_type: EifSectionType::EifSectionSignature, 239 | flags: 0, 240 | section_size: new_signature_size, 241 | }; 242 | 243 | // Determine where to write the signature 244 | let (signature_offset, section_id, old_signature_size) = if is_signed { 245 | self.find_existing_signature(&mut eif_file, &header)? 246 | } else { 247 | let file_len = eif_file 248 | .metadata() 249 | .map_err(|e| format!("Failed to get file metadata: {:?}", e))? 250 | .len(); 251 | (file_len, header.num_sections as usize, 0) 252 | }; 253 | 254 | let section_header_size = EifSectionHeader::size() as u64; 255 | let old_section_end = signature_offset + section_header_size + old_signature_size; 256 | let new_section_end = signature_offset + section_header_size + new_signature_size; 257 | let mut remaining_data = Vec::new(); 258 | 259 | if is_signed { 260 | // Read all data after the old signature section 261 | eif_file 262 | .seek(SeekFrom::Start(old_section_end)) 263 | .and_then(|_| eif_file.read_to_end(&mut remaining_data)) 264 | .map_err(|e| format!("Failed to read remaining data: {:?}", e))?; 265 | 266 | // Calculate the shift amount (positive if expanding, negative if shrinking) 267 | let shift_amount = (new_section_end as i64) - (old_section_end as i64); 268 | 269 | // Update offsets in the header for all sections after the signature 270 | for i in (section_id + 1)..header.num_sections as usize { 271 | header.section_offsets[i] = 272 | (header.section_offsets[i] as i64 + shift_amount) as u64; 273 | } 274 | } else { 275 | // For new signatures, just append to the end 276 | header.section_offsets[section_id] = signature_offset; 277 | header.section_sizes[section_id] = new_signature_size; 278 | header.num_sections += 1; 279 | } 280 | 281 | // Write updated header 282 | eif_file 283 | .seek(SeekFrom::Start(0)) 284 | .and_then(|_| eif_file.write_all(&header.to_be_bytes())) 285 | .map_err(|e| format!("Failed to write header: {:?}", e))?; 286 | 287 | // Write signature section 288 | eif_file 289 | .seek(SeekFrom::Start(signature_offset)) 290 | .and_then(|_| eif_file.write_all(&signature_section.to_be_bytes())) 291 | .and_then(|_| eif_file.write_all(&serialized_signature)) 292 | .map_err(|e| format!("Failed to write signature: {:?}", e))?; 293 | 294 | // Write the remaining data at the new position 295 | eif_file 296 | .write_all(&remaining_data) 297 | .map_err(|e| format!("Failed to write remaining data: {:?}", e))?; 298 | 299 | // Set the new file length 300 | let new_file_size = new_section_end + remaining_data.len() as u64; 301 | eif_file.set_len(new_file_size).map_err(|e| { 302 | format!( 303 | "Failed to set new file length after writing signature: {:?}", 304 | e 305 | ) 306 | }) 307 | } 308 | 309 | fn read_and_parse_header(&self, file: &mut File) -> Result { 310 | let mut header_buf = vec![0u8; EifHeader::size()]; 311 | file.read_exact(&mut header_buf) 312 | .map_err(|e| format!("Error while reading EIF header: {:?}", e))?; 313 | 314 | EifHeader::from_be_bytes(&header_buf).map_err(|e| format!("Error parsing header: {:?}", e)) 315 | } 316 | 317 | fn find_existing_signature( 318 | &self, 319 | eif_file: &mut File, 320 | header: &EifHeader, 321 | ) -> Result<(u64, usize, u64), String> { 322 | for i in 0..header.num_sections as usize { 323 | let offset = header.section_offsets[i]; 324 | let mut section_header_buf = vec![0u8; EifSectionHeader::size()]; 325 | 326 | eif_file 327 | .seek(SeekFrom::Start(offset)) 328 | .map_err(|e| format!("Failed to seek: {:?}", e))?; 329 | 330 | eif_file 331 | .read_exact(&mut section_header_buf) 332 | .map_err(|e| format!("Failed to read section header: {:?}", e))?; 333 | 334 | let section_header = EifSectionHeader::from_be_bytes(§ion_header_buf) 335 | .map_err(|e| format!("Failed to parse section header: {:?}", e))?; 336 | 337 | if section_header.section_type == EifSectionType::EifSectionSignature { 338 | return Ok((offset, i, section_header.section_size)); 339 | } 340 | } 341 | Err("Signature section not found".to_string()) 342 | } 343 | 344 | /// Generates the signature based on the selected method and writes it to the EIF 345 | pub fn sign_image(&self, eif_path: &str) -> Result<(), String> { 346 | // Read PCRs and check if EIF already has a signature 347 | let mut eif_reader = EifReader::from_eif(eif_path.into())?; 348 | let has_signature = eif_reader.signature_section.is_some(); 349 | let measurements = get_pcrs( 350 | &mut eif_reader.image_hasher, 351 | &mut eif_reader.bootstrap_hasher, 352 | &mut eif_reader.app_hasher, 353 | &mut eif_reader.cert_hasher, 354 | Sha384::new(), 355 | has_signature, 356 | )?; 357 | 358 | let signature = self.generate_eif_signature(&measurements)?; 359 | self.write_signature(eif_path, signature, has_signature) 360 | .map_err(|e| format!("Failed to write signature to EIF: {}", e))?; 361 | 362 | // Update CRC of the EIF 363 | self.update_crc(eif_path) 364 | } 365 | 366 | pub fn update_crc(&self, eif_path: &str) -> Result<(), String> { 367 | // Create a new instance of Reader to calculate the actual CRC 368 | let eif_reader = EifReader::from_eif(eif_path.into())?; 369 | 370 | let mut eif_file = OpenOptions::new() 371 | .read(true) 372 | .write(true) 373 | .open(eif_path) 374 | .map_err(|err| format!("Could not open the EIF: {:?}", err))?; 375 | 376 | let len_without_crc = EifHeader::size() - size_of::(); 377 | eif_file 378 | .seek(SeekFrom::Start(len_without_crc as u64)) 379 | .map_err(|err| format!("Could not seek in the EIF: {:?}", err))?; 380 | 381 | eif_file 382 | .write_all(&eif_reader.eif_crc.to_be_bytes()) 383 | .map_err(|err| format!("Failed to write checksum: {:?}", err)) 384 | } 385 | } 386 | 387 | #[cfg(test)] 388 | mod arn_tests { 389 | use super::parse_kms_arn; 390 | 391 | #[test] 392 | fn test_valid_kms_arns() { 393 | // Test cases with expected captures: (arn, region, key_id) 394 | let test_cases = vec![ 395 | ( 396 | "arn:aws:kms:us-east-1:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab", 397 | "us-east-1", 398 | "1234abcd-12ab-34cd-56ef-1234567890ab", 399 | ), 400 | ( 401 | "arn:aws:kms:us-east-1:123456789012:key:1234abcd-12ab-34cd-56ef-1234567890ab", 402 | "us-east-1", 403 | "1234abcd-12ab-34cd-56ef-1234567890ab", 404 | ), 405 | ( 406 | "arn:aws-cn:kms:cn-north-1:123456789012:key/abcd1234", 407 | "cn-north-1", 408 | "abcd1234", 409 | ), 410 | ( 411 | "arn:aws-us-gov:kms:us-gov-west-1:123456789012:key:5678efgh", 412 | "us-gov-west-1", 413 | "5678efgh", 414 | ), 415 | ]; 416 | 417 | for (arn, expected_region, expected_key_id) in test_cases { 418 | let (captured_region, captured_key_id) = 419 | parse_kms_arn(&arn).expect("Should match valid ARN"); 420 | assert_eq!(captured_region, expected_region); 421 | assert_eq!(captured_key_id, expected_key_id); 422 | } 423 | } 424 | 425 | #[test] 426 | fn test_invalid_kms_arns() { 427 | let invalid_arns = vec![ 428 | // Invalid partition 429 | "arn:invalid:kms:us-east-1:123456789012:key/abcd1234", 430 | // Missing region 431 | "arn:aws:kms::123456789012:key/abcd1234", 432 | // Invalid account ID (too short) 433 | "arn:aws:kms:us-east-1:12345678901:key/abcd1234", 434 | // Invalid account ID (too long) 435 | "arn:aws:kms:us-east-1:1234567890123:key/abcd1234", 436 | // Invalid account ID (non-numeric) 437 | "arn:aws:kms:us-east-1:12345678901a:key/abcd1234", 438 | // Wrong service 439 | "arn:aws:s3:us-east-1:123456789012:key/abcd1234", 440 | // Invalid resource type 441 | "arn:aws:kms:us-east-1:123456789012:alias/abcd1234", 442 | // Invalid key ID format 443 | "arn:aws:kms:us-east-1:123456789012:key/abc@1234", 444 | // Missing key ID 445 | "arn:aws:kms:us-east-1:123456789012:key/", 446 | "arn:aws:kms:us-east-1:123456789012:key:", 447 | // Invalid separator 448 | "arn:aws:kms:us-east-1:123456789012:key-abcd1234", 449 | // Empty string 450 | "", 451 | ]; 452 | 453 | for arn in invalid_arns { 454 | assert!( 455 | parse_kms_arn(arn).is_none(), 456 | "ARN should not match: {}", 457 | arn 458 | ); 459 | } 460 | } 461 | 462 | #[test] 463 | fn test_region_formats() { 464 | let valid_regions = vec![ 465 | "us-east-1", 466 | "us-west-2", 467 | "eu-central-1", 468 | "ap-southeast-2", 469 | "cn-north-1", 470 | "us-gov-west-1", 471 | ]; 472 | 473 | for region in valid_regions { 474 | let arn = format!("arn:aws:kms:{}:123456789012:key/abcd1234", region); 475 | let (captured_region, _) = parse_kms_arn(&arn).expect("Should match valid region"); 476 | assert_eq!(captured_region, region); 477 | } 478 | } 479 | 480 | #[test] 481 | fn test_key_id_formats() { 482 | let valid_key_ids = vec![ 483 | "1234abcd-12ab-34cd-56ef-1234567890ab", // UUID format 484 | "abcd1234", // Short format 485 | "12345678-1234-1234-1234-123456789012", // Another UUID format 486 | "a1b2c3d4-e5f6", // Partial UUID format 487 | ]; 488 | 489 | for key_id in valid_key_ids { 490 | let arn = format!("arn:aws:kms:us-east-1:123456789012:key/{}", key_id); 491 | let (_, captured_id) = parse_kms_arn(&arn).expect("Should match valid key ID"); 492 | assert_eq!(captured_id, key_id); 493 | } 494 | } 495 | } 496 | -------------------------------------------------------------------------------- /src/utils/identity.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | use crate::defs::EifBuildInfo; 5 | use chrono::offset::Utc; 6 | use chrono::DateTime; 7 | use std::fs::File; 8 | use std::io::{BufRead, BufReader}; 9 | use std::path::Path; 10 | 11 | /// Utilities and helpers to fill EIF identity information 12 | const MAX_META_FILE_SIZE: u64 = 4096; 13 | const UNKNOWN_IMG_STR: &str = "Unknown"; 14 | 15 | /// Generate basic build info (build tool, version, time, image kernel info) 16 | pub fn generate_build_info( 17 | build_tool: &str, 18 | build_tool_version: &str, 19 | img_config_path: &str, 20 | build_time_override: Option>, 21 | ) -> Result { 22 | let now = match build_time_override { 23 | Some(val) => val, 24 | None => Utc::now(), 25 | }; 26 | 27 | let config_file = File::open(img_config_path) 28 | .map_err(|e| format!("Failed to open kernel image config file: {}", e))?; 29 | let os_string = BufReader::new(config_file) 30 | .lines() 31 | .nth(2) 32 | .unwrap() 33 | .map_err(|e| format!("Failed to read kernel config file: {}", e))?; 34 | 35 | // Extract OS and version from line format: 36 | // ' # Linux/x86_64 4.14.177-104.253.amzn2.x86_64 Kernel Configuration ' 37 | let sep: Vec = vec![' ', '/', '-']; 38 | let os_words: Vec<&str> = os_string.split(&sep[..]).collect(); 39 | 40 | Ok(EifBuildInfo { 41 | build_time: now.to_rfc3339(), 42 | build_tool: build_tool.to_string(), 43 | build_tool_version: build_tool_version.to_string(), 44 | img_os: os_words.get(1).unwrap_or(&UNKNOWN_IMG_STR).to_string(), 45 | img_kernel: os_words.get(3).unwrap_or(&UNKNOWN_IMG_STR).to_string(), 46 | }) 47 | } 48 | 49 | /// Macro helper for generate_buid_info function to automatically pick up cargo info for build tool fields 50 | #[macro_export] 51 | macro_rules! generate_build_info { 52 | ($kernel_config_path:expr) => { 53 | $crate::utils::identity::generate_build_info( 54 | env!("CARGO_PKG_NAME"), 55 | env!("CARGO_PKG_VERSION"), 56 | $kernel_config_path, 57 | None, 58 | ) 59 | }; 60 | ($kernel_config_path:expr, $time_override:expr) => { 61 | $crate::utils::identity::generate_build_info( 62 | env!("CARGO_PKG_NAME"), 63 | env!("CARGO_PKG_VERSION"), 64 | $kernel_config_path, 65 | $time_override, 66 | ) 67 | }; 68 | } 69 | 70 | /// Read user-provided metadata from a file in a JSON format 71 | pub fn parse_custom_metadata(path: &str) -> Result { 72 | if !Path::new(&path).is_file() { 73 | return Err("Specified path is not a file".to_string()); 74 | } 75 | 76 | // Check file size 77 | let file_meta = 78 | std::fs::metadata(path).map_err(|e| format!("Failed to get file metadata: {}", e))?; 79 | if file_meta.len() > MAX_META_FILE_SIZE { 80 | return Err(format!( 81 | "Metadata file size exceeded limit of {}B", 82 | MAX_META_FILE_SIZE 83 | )); 84 | } 85 | 86 | // Get json Value 87 | let custom_file = 88 | File::open(path).map_err(|e| format!("Failed to open custom metadata file: {}", e))?; 89 | let json_value: serde_json::Value = serde_json::from_reader(custom_file) 90 | .map_err(|e| format!("Failed to deserialize json: {}", e))?; 91 | 92 | Ok(json_value) 93 | } 94 | -------------------------------------------------------------------------------- /src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2019-2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | #![deny(warnings)] 4 | pub mod eif_reader; 5 | pub mod eif_signer; 6 | pub mod identity; 7 | 8 | use crate::defs::eif_hasher::EifHasher; 9 | use crate::defs::{ 10 | EifHeader, EifIdentityInfo, EifSectionHeader, EifSectionType, PcrSignature, EIF_MAGIC, 11 | MAX_NUM_SECTIONS, 12 | }; 13 | use aws_nitro_enclaves_cose::{crypto::Openssl, CoseSign1}; 14 | use crc::{Crc, CRC_32_ISO_HDLC}; 15 | use openssl::asn1::Asn1Time; 16 | use serde::{Deserialize, Serialize}; 17 | use serde_cbor::from_slice; 18 | use sha2::Digest; 19 | use std::cmp::Ordering; 20 | use std::collections::BTreeMap; 21 | 22 | pub use eif_signer::{EifSigner, SignKeyData}; 23 | 24 | /// Contains code for EifBuilder a simple library used for building an EifFile 25 | /// from a: 26 | /// - kernel_file 27 | /// - cmdline string 28 | /// - ramdisks files. 29 | /// 30 | /// TODO: 31 | /// - Unittests. 32 | /// - Add support to write default_mem & default_cpus, flags. 33 | /// - Various validity checks: E.g: kernel is a bzImage. 34 | use std::ffi::CString; 35 | use std::fmt::Debug; 36 | use std::fs::File; 37 | use std::io::{Read, Seek, SeekFrom, Write}; 38 | use std::mem::size_of; 39 | use std::path::Path; 40 | 41 | const DEFAULT_SECTIONS_COUNT: u16 = 3; 42 | 43 | /// Utility function to calculate PCRs, used at build and describe. 44 | pub fn get_pcrs( 45 | image_hasher: &mut EifHasher, 46 | bootstrap_hasher: &mut EifHasher, 47 | app_hasher: &mut EifHasher, 48 | cert_hasher: &mut EifHasher, 49 | hasher: T, 50 | is_signed: bool, 51 | ) -> Result, String> { 52 | let mut measurements = BTreeMap::new(); 53 | let image_hasher = hex::encode( 54 | image_hasher 55 | .tpm_extend_finalize_reset() 56 | .map_err(|e| format!("Could not get result for image_hasher: {:?}", e))?, 57 | ); 58 | let bootstrap_hasher = hex::encode( 59 | bootstrap_hasher 60 | .tpm_extend_finalize_reset() 61 | .map_err(|e| format!("Could not get result for bootstrap_hasher: {:?}", e))?, 62 | ); 63 | let app_hash = hex::encode( 64 | app_hasher 65 | .tpm_extend_finalize_reset() 66 | .map_err(|e| format!("Could not get result for app_hasher: {:?}", e))?, 67 | ); 68 | 69 | // Hash certificate only if signing key is set, otherwise related PCR will be zero 70 | let cert_hash = if is_signed { 71 | Some(hex::encode( 72 | cert_hasher 73 | .tpm_extend_finalize_reset() 74 | .map_err(|e| format!("Could not get result for cert_hash: {:?}", e))?, 75 | )) 76 | } else { 77 | None 78 | }; 79 | 80 | measurements.insert("HashAlgorithm".to_string(), format!("{:?}", hasher)); 81 | measurements.insert("PCR0".to_string(), image_hasher); 82 | measurements.insert("PCR1".to_string(), bootstrap_hasher); 83 | measurements.insert("PCR2".to_string(), app_hash); 84 | if let Some(cert_hash) = cert_hash { 85 | measurements.insert("PCR8".to_string(), cert_hash); 86 | } 87 | 88 | Ok(measurements) 89 | } 90 | 91 | pub struct EifBuilder { 92 | kernel: File, 93 | cmdline: Vec, 94 | ramdisks: Vec, 95 | signer: Option, 96 | signature: Option>, 97 | signature_size: u64, 98 | metadata: Vec, 99 | eif_hdr_flags: u16, 100 | default_mem: u64, 101 | default_cpus: u64, 102 | /// Hash of the whole EifImage. 103 | pub image_hasher: EifHasher, 104 | /// Hash of the EifSections provided by Amazon 105 | /// Kernel + cmdline + First Ramdisk 106 | pub bootstrap_hasher: EifHasher, 107 | /// Hash of the remaining ramdisks. 108 | pub customer_app_hasher: EifHasher, 109 | /// Hash the signing certificate 110 | pub certificate_hasher: EifHasher, 111 | hasher_template: T, 112 | eif_crc: u32, 113 | } 114 | 115 | impl EifBuilder { 116 | pub fn new( 117 | kernel_path: &Path, 118 | cmdline: String, 119 | sign_info: Option, 120 | hasher: T, 121 | flags: u16, 122 | eif_info: EifIdentityInfo, 123 | ) -> Self { 124 | let kernel_file = File::open(kernel_path).expect("Invalid kernel path"); 125 | let cmdline = CString::new(cmdline).expect("Invalid cmdline"); 126 | let metadata = serde_json::to_vec(&eif_info).expect("Could not serialize metadata: {}"); 127 | let signer = EifSigner::new(sign_info); 128 | EifBuilder { 129 | kernel: kernel_file, 130 | cmdline: cmdline.into_bytes(), 131 | ramdisks: Vec::new(), 132 | signer, 133 | signature: None, 134 | signature_size: 0, 135 | metadata, 136 | eif_hdr_flags: flags, 137 | default_mem: 1024 * 1024 * 1024, 138 | default_cpus: 2, 139 | image_hasher: EifHasher::new_without_cache(hasher.clone()) 140 | .expect("Could not create image_hasher"), 141 | bootstrap_hasher: EifHasher::new_without_cache(hasher.clone()) 142 | .expect("Could not create bootstrap_hasher"), 143 | customer_app_hasher: EifHasher::new_without_cache(hasher.clone()) 144 | .expect("Could not create customer app hasher"), 145 | certificate_hasher: EifHasher::new_without_cache(hasher.clone()) 146 | .expect("Could not create certificate hasher"), 147 | hasher_template: hasher, 148 | eif_crc: 0, 149 | } 150 | } 151 | 152 | pub fn is_signed(&mut self) -> bool { 153 | self.signer.is_some() 154 | } 155 | 156 | pub fn add_ramdisk(&mut self, ramdisk_path: &Path) { 157 | let ramdisk_file = File::open(ramdisk_path).expect("Invalid ramdisk path"); 158 | self.ramdisks.push(ramdisk_file); 159 | } 160 | 161 | /// The first two sections are the kernel and the cmdline and the last is metadata. 162 | fn num_sections(&self) -> u16 { 163 | DEFAULT_SECTIONS_COUNT + self.ramdisks.len() as u16 + self.signer.iter().count() as u16 164 | } 165 | 166 | fn sections_offsets(&self) -> [u64; MAX_NUM_SECTIONS] { 167 | let mut result = [0; MAX_NUM_SECTIONS]; 168 | result[0] = self.kernel_offset(); 169 | result[1] = self.cmdline_offset(); 170 | result[2] = self.metadata_offset(); 171 | 172 | for i in 0..self.ramdisks.len() { 173 | result[i + DEFAULT_SECTIONS_COUNT as usize] = self.ramdisk_offset(i); 174 | } 175 | 176 | if self.signer.is_some() { 177 | result[DEFAULT_SECTIONS_COUNT as usize + self.ramdisks.len()] = self.signature_offset(); 178 | } 179 | 180 | result 181 | } 182 | 183 | fn sections_sizes(&self) -> [u64; MAX_NUM_SECTIONS] { 184 | let mut result = [0; MAX_NUM_SECTIONS]; 185 | 186 | result[0] = self.kernel_size(); 187 | result[1] = self.cmdline_size(); 188 | result[2] = self.metadata_size(); 189 | 190 | for i in 0..self.ramdisks.len() { 191 | result[i + DEFAULT_SECTIONS_COUNT as usize] = self.ramdisk_size(&self.ramdisks[i]); 192 | } 193 | 194 | if self.signer.is_some() { 195 | result[DEFAULT_SECTIONS_COUNT as usize + self.ramdisks.len()] = self.signature_size(); 196 | } 197 | 198 | result 199 | } 200 | 201 | fn eif_header_offset(&self) -> u64 { 202 | 0 203 | } 204 | 205 | fn kernel_offset(&self) -> u64 { 206 | self.eif_header_offset() + EifHeader::size() as u64 207 | } 208 | 209 | fn kernel_size(&self) -> u64 { 210 | self.kernel.metadata().unwrap().len() 211 | } 212 | 213 | fn cmdline_offset(&self) -> u64 { 214 | self.kernel_offset() + EifSectionHeader::size() as u64 + self.kernel_size() 215 | } 216 | 217 | fn cmdline_size(&self) -> u64 { 218 | self.cmdline.len() as u64 219 | } 220 | 221 | fn ramdisk_offset(&self, index: usize) -> u64 { 222 | self.metadata_offset() 223 | + self.metadata_size() 224 | + EifSectionHeader::size() as u64 225 | + self.ramdisks[0..index] 226 | .iter() 227 | .fold(0, |mut total_len, file| { 228 | total_len += file.metadata().expect("Invalid ramdisk metadata").len() 229 | + EifSectionHeader::size() as u64; 230 | total_len 231 | }) 232 | } 233 | 234 | fn ramdisk_size(&self, ramdisk: &File) -> u64 { 235 | ramdisk.metadata().unwrap().len() 236 | } 237 | 238 | fn signature_offset(&self) -> u64 { 239 | let index = self.ramdisks.len() - 1; 240 | self.ramdisk_offset(index) 241 | + EifSectionHeader::size() as u64 242 | + self.ramdisk_size(&self.ramdisks[index]) 243 | } 244 | 245 | fn signature_size(&self) -> u64 { 246 | self.signature_size 247 | } 248 | 249 | fn metadata_offset(&self) -> u64 { 250 | self.cmdline_offset() + EifSectionHeader::size() as u64 + self.cmdline_size() 251 | } 252 | 253 | fn metadata_size(&self) -> u64 { 254 | self.metadata.len() as u64 255 | } 256 | 257 | pub fn header(&mut self) -> EifHeader { 258 | EifHeader { 259 | magic: EIF_MAGIC, 260 | version: crate::defs::CURRENT_VERSION, 261 | flags: self.eif_hdr_flags, 262 | default_mem: self.default_mem, 263 | default_cpus: self.default_cpus, 264 | reserved: 0, 265 | num_sections: self.num_sections(), 266 | section_offsets: self.sections_offsets(), 267 | section_sizes: self.sections_sizes(), 268 | unused: 0, 269 | eif_crc32: self.eif_crc, 270 | } 271 | } 272 | 273 | /// Compute the crc for the whole enclave image, excluding the 274 | /// eif_crc32 field from the EIF header. 275 | pub fn compute_crc(&mut self) { 276 | let crc_gen = Crc::::new(&CRC_32_ISO_HDLC); 277 | let mut crc = crc_gen.digest(); 278 | let eif_header = self.header(); 279 | let eif_buffer = eif_header.to_be_bytes(); 280 | // The last field of the EifHeader is the CRC itself, so we need 281 | // to exclude it from contributing to the CRC. 282 | let len_without_crc = eif_buffer.len() - size_of::(); 283 | crc.update(&eif_buffer[..len_without_crc]); 284 | 285 | let eif_section = EifSectionHeader { 286 | section_type: EifSectionType::EifSectionKernel, 287 | flags: 0, 288 | section_size: self.kernel_size(), 289 | }; 290 | 291 | let eif_buffer = eif_section.to_be_bytes(); 292 | crc.update(&eif_buffer[..]); 293 | let mut kernel_file = &self.kernel; 294 | 295 | kernel_file 296 | .seek(SeekFrom::Start(0)) 297 | .expect("Could not seek kernel to beginning"); 298 | let mut buffer = Vec::new(); 299 | kernel_file 300 | .read_to_end(&mut buffer) 301 | .expect("Failed to read kernel content"); 302 | 303 | crc.update(&buffer[..]); 304 | 305 | let eif_section = EifSectionHeader { 306 | section_type: EifSectionType::EifSectionCmdline, 307 | flags: 0, 308 | section_size: self.cmdline_size(), 309 | }; 310 | 311 | let eif_buffer = eif_section.to_be_bytes(); 312 | crc.update(&eif_buffer[..]); 313 | crc.update(&self.cmdline[..]); 314 | 315 | let eif_section = EifSectionHeader { 316 | section_type: EifSectionType::EifSectionMetadata, 317 | flags: 0, 318 | section_size: self.metadata_size(), 319 | }; 320 | 321 | let eif_buffer = eif_section.to_be_bytes(); 322 | crc.update(&eif_buffer[..]); 323 | crc.update(&self.metadata[..]); 324 | 325 | for mut ramdisk in &self.ramdisks { 326 | let eif_section = EifSectionHeader { 327 | section_type: EifSectionType::EifSectionRamdisk, 328 | flags: 0, 329 | section_size: self.ramdisk_size(ramdisk), 330 | }; 331 | 332 | let eif_buffer = eif_section.to_be_bytes(); 333 | crc.update(&eif_buffer[..]); 334 | 335 | ramdisk 336 | .seek(SeekFrom::Start(0)) 337 | .expect("Could not seek kernel to begining"); 338 | let mut buffer = Vec::new(); 339 | ramdisk 340 | .read_to_end(&mut buffer) 341 | .expect("Failed to read kernel content"); 342 | crc.update(&buffer[..]); 343 | } 344 | 345 | if let Some(signature) = &self.signature { 346 | let eif_section = EifSectionHeader { 347 | section_type: EifSectionType::EifSectionSignature, 348 | flags: 0, 349 | section_size: self.signature_size(), 350 | }; 351 | 352 | let eif_buffer = eif_section.to_be_bytes(); 353 | crc.update(&eif_buffer[..]); 354 | crc.update(&signature[..]); 355 | } 356 | 357 | self.eif_crc = crc.finalize(); 358 | } 359 | 360 | pub fn write_header(&mut self, file: &mut File) { 361 | let eif_header = self.header(); 362 | file.seek(SeekFrom::Start(self.eif_header_offset())).expect( 363 | "Could not seek while writing eif \ 364 | header", 365 | ); 366 | let eif_buffer = eif_header.to_be_bytes(); 367 | file.write_all(&eif_buffer[..]) 368 | .expect("Failed to write eif header"); 369 | } 370 | 371 | pub fn write_kernel(&mut self, eif_file: &mut File) { 372 | let eif_section = EifSectionHeader { 373 | section_type: EifSectionType::EifSectionKernel, 374 | flags: 0, 375 | section_size: self.kernel_size(), 376 | }; 377 | 378 | eif_file 379 | .seek(SeekFrom::Start(self.kernel_offset())) 380 | .expect("Could not seek while writing kernel section"); 381 | let eif_buffer = eif_section.to_be_bytes(); 382 | eif_file 383 | .write_all(&eif_buffer[..]) 384 | .expect("Failed to write kernel header"); 385 | let mut kernel_file = &self.kernel; 386 | 387 | kernel_file 388 | .seek(SeekFrom::Start(0)) 389 | .expect("Could not seek kernel to begining"); 390 | let mut buffer = Vec::new(); 391 | kernel_file 392 | .read_to_end(&mut buffer) 393 | .expect("Failed to read kernel content"); 394 | 395 | eif_file 396 | .write_all(&buffer[..]) 397 | .expect("Failed to write kernel data"); 398 | } 399 | 400 | pub fn write_cmdline(&mut self, eif_file: &mut File) { 401 | let eif_section = EifSectionHeader { 402 | section_type: EifSectionType::EifSectionCmdline, 403 | flags: 0, 404 | section_size: self.cmdline_size(), 405 | }; 406 | 407 | eif_file 408 | .seek(SeekFrom::Start(self.cmdline_offset())) 409 | .expect( 410 | "Could not seek while writing 411 | cmdline section", 412 | ); 413 | let eif_buffer = eif_section.to_be_bytes(); 414 | eif_file 415 | .write_all(&eif_buffer[..]) 416 | .expect("Failed to write cmdline header"); 417 | 418 | eif_file 419 | .write_all(&self.cmdline[..]) 420 | .expect("Failed write cmdline header"); 421 | } 422 | 423 | pub fn write_metadata(&mut self, eif_file: &mut File) { 424 | let eif_section = EifSectionHeader { 425 | section_type: EifSectionType::EifSectionMetadata, 426 | flags: 0, 427 | section_size: self.metadata_size(), 428 | }; 429 | 430 | eif_file 431 | .seek(SeekFrom::Start(self.metadata_offset())) 432 | .expect("Could not seek while writing metadata section"); 433 | 434 | let eif_buffer = eif_section.to_be_bytes(); 435 | eif_file 436 | .write_all(&eif_buffer[..]) 437 | .expect("Failed to write metadata header"); 438 | 439 | eif_file 440 | .write_all(&self.metadata) 441 | .expect("Failed to write metadata content"); 442 | } 443 | 444 | pub fn write_ramdisks(&mut self, eif_file: &mut File) { 445 | for (index, mut ramdisk) in self.ramdisks.iter().enumerate() { 446 | let eif_section = EifSectionHeader { 447 | section_type: EifSectionType::EifSectionRamdisk, 448 | flags: 0, 449 | section_size: self.ramdisk_size(ramdisk), 450 | }; 451 | 452 | eif_file 453 | .seek(SeekFrom::Start(self.ramdisk_offset(index))) 454 | .expect( 455 | "Could not seek while writing 456 | kernel section", 457 | ); 458 | let eif_buffer = eif_section.to_be_bytes(); 459 | eif_file 460 | .write_all(&eif_buffer[..]) 461 | .expect("Failed to write section header"); 462 | 463 | ramdisk 464 | .seek(SeekFrom::Start(0)) 465 | .expect("Could not seek ramdisk to beginning"); 466 | let mut buffer = Vec::new(); 467 | ramdisk 468 | .read_to_end(&mut buffer) 469 | .expect("Failed to read ramdisk content"); 470 | eif_file 471 | .write_all(&buffer[..]) 472 | .expect("Failed to write ramdisk data"); 473 | } 474 | } 475 | 476 | pub fn write_signature(&mut self, eif_file: &mut File) { 477 | if let Some(signature) = &self.signature { 478 | let eif_section = EifSectionHeader { 479 | section_type: EifSectionType::EifSectionSignature, 480 | flags: 0, 481 | section_size: self.signature_size(), 482 | }; 483 | 484 | eif_file 485 | .seek(SeekFrom::Start(self.signature_offset())) 486 | .expect("Could not seek while writing signature section"); 487 | let eif_buffer = eif_section.to_be_bytes(); 488 | eif_file 489 | .write_all(&eif_buffer[..]) 490 | .expect("Failed to write signature header"); 491 | 492 | eif_file 493 | .write_all(&signature[..]) 494 | .expect("Failed write signature header"); 495 | } 496 | } 497 | 498 | pub fn write_to(&mut self, output_file: &mut File) -> BTreeMap { 499 | self.measure(); 500 | let measurements = get_pcrs( 501 | &mut self.image_hasher, 502 | &mut self.bootstrap_hasher, 503 | &mut self.customer_app_hasher, 504 | &mut self.certificate_hasher, 505 | self.hasher_template.clone(), 506 | self.signer.is_some(), 507 | ) 508 | .expect("Failed to get measurements"); 509 | if let Some(signer) = self.signer.as_ref() { 510 | let signature = signer 511 | .generate_eif_signature(&measurements) 512 | .expect("Failed to generate signature"); 513 | self.signature_size = signature.len() as u64; 514 | self.signature = Some(signature); 515 | } 516 | self.compute_crc(); 517 | self.write_header(output_file); 518 | self.write_kernel(output_file); 519 | self.write_cmdline(output_file); 520 | self.write_metadata(output_file); 521 | self.write_ramdisks(output_file); 522 | self.write_signature(output_file); 523 | measurements 524 | } 525 | 526 | pub fn measure(&mut self) { 527 | let mut kernel_file = &self.kernel; 528 | kernel_file 529 | .seek(SeekFrom::Start(0)) 530 | .expect("Could not seek kernel to beginning"); 531 | let mut buffer = Vec::new(); 532 | kernel_file 533 | .read_to_end(&mut buffer) 534 | .expect("Failed to read kernel content"); 535 | self.image_hasher.write_all(&buffer[..]).unwrap(); 536 | self.bootstrap_hasher.write_all(&buffer[..]).unwrap(); 537 | 538 | self.image_hasher.write_all(&self.cmdline[..]).unwrap(); 539 | self.bootstrap_hasher.write_all(&self.cmdline[..]).unwrap(); 540 | 541 | for (index, mut ramdisk) in self.ramdisks.iter().enumerate() { 542 | ramdisk 543 | .seek(SeekFrom::Start(0)) 544 | .expect("Could not seek kernel to beginning"); 545 | let mut buffer = Vec::new(); 546 | ramdisk 547 | .read_to_end(&mut buffer) 548 | .expect("Failed to read kernel content"); 549 | self.image_hasher.write_all(&buffer[..]).unwrap(); 550 | // The first ramdisk is provided by amazon and it contains the 551 | // code to bootstrap the docker container. 552 | if index == 0 { 553 | self.bootstrap_hasher.write_all(&buffer[..]).unwrap(); 554 | } else { 555 | self.customer_app_hasher.write_all(&buffer[..]).unwrap(); 556 | } 557 | } 558 | 559 | if let Some(signer) = self.signer.as_ref() { 560 | let cert_der = signer 561 | .get_cert_der() 562 | .expect("Certificate must be available and convertible to DER"); 563 | // This is equivalent to extend(cert.digest(sha384)), since hasher is going to 564 | // hash the DER certificate (cert.digest()) and then tpm_extend_finalize_reset 565 | // will do the extend. 566 | self.certificate_hasher.write_all(&cert_der).unwrap(); 567 | } 568 | } 569 | } 570 | 571 | /// PCR Signature verifier that checks the validity of 572 | /// the certificate used to sign the enclave 573 | #[derive(Clone, Debug, Serialize, Deserialize)] 574 | pub struct PcrSignatureChecker { 575 | signing_certificate: Vec, 576 | signature: Vec, 577 | } 578 | 579 | impl PcrSignatureChecker { 580 | pub fn new(pcr_signature: &PcrSignature) -> Self { 581 | PcrSignatureChecker { 582 | signing_certificate: pcr_signature.signing_certificate.clone(), 583 | signature: pcr_signature.signature.clone(), 584 | } 585 | } 586 | 587 | /// Reads EIF section headers and looks for a signature. 588 | /// Seek to the signature section, if present, and save the certificate and signature 589 | pub fn from_eif(eif_path: &str) -> Result { 590 | let mut signing_certificate = Vec::new(); 591 | let mut signature = Vec::new(); 592 | 593 | let mut curr_seek = 0; 594 | let mut eif_file = 595 | File::open(eif_path).map_err(|e| format!("Failed to open the EIF file: {:?}", e))?; 596 | 597 | // Skip header 598 | let mut header_buf = vec![0u8; EifHeader::size()]; 599 | eif_file 600 | .read_exact(&mut header_buf) 601 | .map_err(|e| format!("Error while reading EIF header: {:?}", e))?; 602 | 603 | curr_seek += EifHeader::size(); 604 | eif_file 605 | .seek(SeekFrom::Start(curr_seek as u64)) 606 | .map_err(|e| format!("Failed to seek file from start: {:?}", e))?; 607 | 608 | let mut section_buf = vec![0u8; EifSectionHeader::size()]; 609 | 610 | // Read all section headers and skip if different from signature section 611 | while eif_file.read_exact(&mut section_buf).is_ok() { 612 | let section = EifSectionHeader::from_be_bytes(§ion_buf) 613 | .map_err(|e| format!("Error extracting EIF section header: {:?}", e))?; 614 | curr_seek += EifSectionHeader::size(); 615 | 616 | if section.section_type == EifSectionType::EifSectionSignature { 617 | let mut buf = vec![0u8; section.section_size as usize]; 618 | eif_file 619 | .seek(SeekFrom::Start(curr_seek as u64)) 620 | .map_err(|e| format!("Failed to seek after EIF section header: {:?}", e))?; 621 | eif_file.read_exact(&mut buf).map_err(|e| { 622 | format!("Error while reading signature section from EIF: {:?}", e) 623 | })?; 624 | 625 | // Deserialize PCR signature structure and save certificate and signature 626 | let des_sign: Vec = from_slice(&buf[..]) 627 | .map_err(|e| format!("Error deserializing certificate: {:?}", e))?; 628 | 629 | signing_certificate.clone_from(&des_sign[0].signing_certificate); 630 | signature.clone_from(&des_sign[0].signature); 631 | } 632 | 633 | curr_seek += section.section_size as usize; 634 | eif_file 635 | .seek(SeekFrom::Start(curr_seek as u64)) 636 | .map_err(|e| format!("Failed to seek after EIF section: {:?}", e))?; 637 | } 638 | 639 | Ok(Self { 640 | signing_certificate, 641 | signature, 642 | }) 643 | } 644 | 645 | pub fn is_empty(&self) -> bool { 646 | self.signing_certificate.len() == 0 && self.signature.len() == 0 647 | } 648 | 649 | /// Verifies the validity of the signing certificate 650 | pub fn verify(&mut self) -> Result<(), String> { 651 | let signature = CoseSign1::from_bytes(&self.signature[..]) 652 | .map_err(|err| format!("Could not deserialize the signature: {:?}", err))?; 653 | let cert = openssl::x509::X509::from_pem(&self.signing_certificate[..]) 654 | .map_err(|_| "Could not deserialize the signing certificate".to_string())?; 655 | let public_key = cert 656 | .public_key() 657 | .map_err(|_| "Could not get the public key from the signing certificate".to_string())?; 658 | 659 | // Verify the signature 660 | let result = signature 661 | .verify_signature::(public_key.as_ref()) 662 | .map_err(|err| format!("Could not verify EIF signature: {:?}", err))?; 663 | if !result { 664 | return Err("The EIF signature is not valid".to_string()); 665 | } 666 | 667 | // Verify that the signing certificate is not expired 668 | let current_time = Asn1Time::days_from_now(0).map_err(|err| err.to_string())?; 669 | if current_time 670 | .compare(cert.not_after()) 671 | .map_err(|err| err.to_string())? 672 | == Ordering::Greater 673 | || current_time 674 | .compare(cert.not_before()) 675 | .map_err(|err| err.to_string())? 676 | == Ordering::Less 677 | { 678 | return Err("The signing certificate is expired".to_string()); 679 | } 680 | 681 | Ok(()) 682 | } 683 | } 684 | 685 | #[cfg(test)] 686 | mod tests { 687 | use crate::utils::eif_signer::{SignKey, SignKeyData}; 688 | use std::{env, io::Write, path::Path}; 689 | use tempfile::{NamedTempFile, TempPath}; 690 | 691 | const TEST_CERT_CONTENT: &[u8] = "test cert content".as_bytes(); 692 | const TEST_PKEY_CONTENT: &[u8] = "test key content".as_bytes(); 693 | 694 | fn generate_certificate_file() -> Result { 695 | let cert_file = NamedTempFile::new()?; 696 | cert_file.as_file().write(TEST_CERT_CONTENT)?; 697 | Ok(cert_file.into_temp_path()) 698 | } 699 | 700 | fn generate_pkey_file() -> Result { 701 | let key_file = NamedTempFile::new()?; 702 | key_file.as_file().write(TEST_PKEY_CONTENT)?; 703 | Ok(key_file.into_temp_path()) 704 | } 705 | 706 | #[test] 707 | fn test_local_sign_key_data_from_invalid_local_key_info() -> Result<(), std::io::Error> { 708 | let cert_file_path = generate_certificate_file()?; 709 | 710 | let key_data = SignKeyData::new("/incorrect/pk/path".into(), &cert_file_path); 711 | 712 | assert!(key_data.is_err()); 713 | Ok(()) 714 | } 715 | 716 | #[test] 717 | fn test_local_sign_key_data_from_invalid_cert_key_info() -> Result<(), std::io::Error> { 718 | let key_file_path = generate_pkey_file()?; 719 | let key_path_str = >::as_ref(&key_file_path) 720 | .to_str() 721 | .expect("Key file must be correct"); 722 | 723 | let key_data = SignKeyData::new(key_path_str, Path::new("/incorrect/cert/path")); 724 | 725 | assert!(key_data.is_err()); 726 | Ok(()) 727 | } 728 | 729 | #[test] 730 | fn test_local_sign_key_data_from_valid_key_info() -> Result<(), std::io::Error> { 731 | let key_file_path = generate_pkey_file()?; 732 | let key_path_str = >::as_ref(&key_file_path) 733 | .to_str() 734 | .expect("Key file must be correct"); 735 | let cert_file_path = generate_certificate_file()?; 736 | 737 | let key_data = SignKeyData::new(key_path_str, &cert_file_path).unwrap(); 738 | 739 | assert_eq!(key_data.cert, TEST_CERT_CONTENT); 740 | assert!(matches!(key_data.key, SignKey::LocalPrivateKey(key) if key == TEST_PKEY_CONTENT)); 741 | 742 | Ok(()) 743 | } 744 | 745 | mod kms { 746 | use super::*; 747 | 748 | #[test] 749 | fn test_kms_sign_key_data_from_invalid_cert_key_info() -> Result<(), std::io::Error> { 750 | let key_arn = 751 | env::var("AWS_KMS_TEST_KEY_ARN").expect("Please set AWS_KMS_TEST_KEY_ARN"); 752 | 753 | let key_data = SignKeyData::new(&key_arn, Path::new("/incorrect/cert/path")); 754 | 755 | assert!(key_data.is_err()); 756 | Ok(()) 757 | } 758 | 759 | #[test] 760 | fn test_kms_sign_key_data_from_valid_key_arn() -> Result<(), std::io::Error> { 761 | let cert_file_path = generate_certificate_file()?; 762 | let key_arn = 763 | env::var("AWS_KMS_TEST_KEY_ARN").expect("Please set AWS_KMS_TEST_KEY_ARN"); 764 | 765 | let key_data = SignKeyData::new(&key_arn, &cert_file_path).unwrap(); 766 | 767 | assert_eq!(key_data.cert, TEST_CERT_CONTENT); 768 | assert!(matches!(key_data.key, SignKey::KmsKey(_))); 769 | 770 | Ok(()) 771 | } 772 | } 773 | } 774 | --------------------------------------------------------------------------------