├── tests ├── fixtures │ ├── blob.tar.gz │ ├── config.json │ └── manifest.json └── digest_validation.rs ├── CODEOWNERS ├── justfile ├── .gitignore ├── .github ├── dependabot.yml └── workflows │ ├── release.yml │ ├── daily_security.yml │ └── build.yml ├── justfile-windows ├── README.md ├── src ├── lib.rs ├── secrets.rs ├── annotations.rs ├── token_cache.rs ├── digest.rs ├── blob.rs ├── errors.rs ├── config.rs └── manifest.rs ├── examples ├── wasm │ ├── pull.rs │ ├── push.rs │ ├── cli.rs │ └── main.rs └── get-manifest │ └── main.rs ├── deny.toml ├── CONTRIBUTING.md ├── Cargo.toml └── LICENSE /tests/fixtures/blob.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oras-project/rust-oci-client/HEAD/tests/fixtures/blob.tar.gz -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # This file is described here: https://help.github.com/en/articles/about-code-owners 2 | 3 | # Global Owners: 4 | * @thomastaylor312 @bacongobbler @flavio 5 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | build +FLAGS='': 2 | cargo build {{FLAGS}} 3 | 4 | test: 5 | cargo fmt --all -- --check 6 | cargo clippy --workspace 7 | cargo test --workspace --lib --tests 8 | cargo test --doc --all 9 | 10 | check-deny: 11 | cargo deny --all-features check bans licenses sources 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # see rust-lang/cargo#315 2 | Cargo.lock 3 | 4 | target/ 5 | **/*.rs.bk 6 | /config 7 | _dist/ 8 | node_modules/ 9 | .DS_Store 10 | .vscode/ 11 | 12 | # oneclick trace files 13 | krustlet-wasi-e2e.stdout.txt 14 | krustlet-wasi-e2e.stderr.txt 15 | oneclick-logs 16 | csi-test-binaries 17 | 18 | .idea/ 19 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | open-pull-requests-limit: 10 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | open-pull-requests-limit: 10 13 | -------------------------------------------------------------------------------- /justfile-windows: -------------------------------------------------------------------------------- 1 | set shell := ["powershell.exe", "-c"] 2 | 3 | build +FLAGS='--no-default-features --features rustls-tls': 4 | cargo build {{FLAGS}} 5 | 6 | test: 7 | cargo fmt --all -- --check 8 | cargo clippy --no-default-features --features rustls-tls 9 | cargo test --no-default-features --features rustls-tls 10 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | on: 3 | push: 4 | tags: 5 | - "v*" 6 | jobs: 7 | publish: 8 | name: publish to crates.io 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 12 | - name: publish oci-distribution to crates.io 13 | run: cargo publish --token ${{ secrets.CargoToken }} 14 | -------------------------------------------------------------------------------- /.github/workflows/daily_security.yml: -------------------------------------------------------------------------------- 1 | name: Security audit 2 | on: 3 | schedule: 4 | - cron: "0 0 * * *" 5 | workflow_dispatch: 6 | 7 | jobs: 8 | audit: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 12 | - uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 #v2.0.0 13 | with: 14 | token: ${{ secrets.GITHUB_TOKEN }} 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rust OCI Client 2 | 3 | Formerly known as `oci-distribution` 4 | 5 | [![oci-client documentation](https://docs.rs/oci-client/badge.svg)](https://docs.rs/oci-client) 6 | 7 | This Rust library implements the 8 | [OCI Distribution specification](https://github.com/opencontainers/distribution-spec/blob/master/spec.md), 9 | which is the protocol that Docker Hub and other container registries use. 10 | 11 | ## Code of Conduct 12 | 13 | This project has adopted the [CNCF Code of 14 | Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). 15 | -------------------------------------------------------------------------------- /tests/fixtures/config.json: -------------------------------------------------------------------------------- 1 | {"architecture":"amd64","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["sh"],"WorkingDir":"/","ArgsEscaped":true,"OnBuild":null},"created":"2024-01-17T21:49:12Z","history":[{"created":"2024-01-17T21:49:12Z","created_by":"ADD busybox.tar.xz / # buildkit","comment":"buildkit.dockerfile.v0"},{"created":"2024-01-17T21:49:12Z","created_by":"CMD [\"sh\"]","comment":"buildkit.dockerfile.v0","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:2e112031b4b923a873c8b3d685d48037e4d5ccd967b658743d93a6e56c3064b9"]}} -------------------------------------------------------------------------------- /tests/fixtures/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": 2, 3 | "mediaType": "application/vnd.docker.distribution.manifest.v2+json", 4 | "config": { 5 | "mediaType": "application/vnd.docker.container.image.v1+json", 6 | "size": 581, 7 | "digest": "sha256:3f57d9401f8d42f986df300f0c69192fc41da28ccc8d797829467780db3dd741" 8 | }, 9 | "layers": [ 10 | { 11 | "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", 12 | "size": 2220094, 13 | "digest": "sha256:9ad63333ebc97e32b987ae66aa3cff81300e4c2e6d2f2395cef8a3ae18b249fe" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! An OCI Distribution client for fetching oci images from an OCI compliant remote store 2 | #![deny(missing_docs)] 3 | 4 | use sha2::Digest; 5 | 6 | pub mod annotations; 7 | mod blob; 8 | pub mod client; 9 | pub mod config; 10 | pub(crate) mod digest; 11 | pub mod errors; 12 | pub mod manifest; 13 | pub mod secrets; 14 | mod token_cache; 15 | 16 | #[doc(inline)] 17 | pub use client::Client; 18 | #[doc(inline)] 19 | pub use oci_spec::distribution::{ParseError, Reference}; 20 | #[doc(inline)] 21 | pub use token_cache::RegistryOperation; 22 | 23 | /// Computes the SHA256 digest of a byte vector 24 | pub(crate) fn sha256_digest(bytes: &[u8]) -> String { 25 | format!("sha256:{:x}", sha2::Sha256::digest(bytes)) 26 | } 27 | -------------------------------------------------------------------------------- /examples/wasm/pull.rs: -------------------------------------------------------------------------------- 1 | use oci_client::{manifest, secrets::RegistryAuth, Client, Reference}; 2 | use tracing::info; 3 | 4 | pub(crate) async fn pull_wasm( 5 | client: &mut Client, 6 | auth: &RegistryAuth, 7 | reference: &Reference, 8 | output: &str, 9 | ) { 10 | info!(?reference, ?output, "pulling wasm module"); 11 | 12 | let image_content = client 13 | .pull(reference, auth, vec![manifest::WASM_LAYER_MEDIA_TYPE]) 14 | .await 15 | .expect("Cannot pull Wasm module") 16 | .layers 17 | .into_iter() 18 | .next() 19 | .map(|layer| layer.data) 20 | .expect("No data found"); 21 | 22 | tokio::fs::write(output, image_content) 23 | .await 24 | .expect("Cannot write to file"); 25 | println!("Wasm module successfully written to {output}"); 26 | } 27 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | [advisories] 2 | 3 | [licenses] 4 | version = 2 5 | confidence-threshold = 1.0 6 | 7 | # List of explictly allowed licenses 8 | # See https://spdx.org/licenses/ for list of possible licenses 9 | # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. 10 | allow = [ 11 | "Apache-2.0", 12 | "BSD-3-Clause", 13 | "CDLA-Permissive-2.0", 14 | "ISC", 15 | "MIT", 16 | "Unicode-3.0", 17 | "Zlib", 18 | ] 19 | 20 | [[licenses.clarify]] 21 | name = "encoding_rs" 22 | version = "*" 23 | expression = "(Apache-2.0 OR MIT) AND BSD-3-Clause" 24 | license-files = [{ path = "COPYRIGHT", hash = 0x39f8ad31 }] 25 | 26 | [[licenses.clarify]] 27 | name = "ring" 28 | expression = "MIT AND ISC AND OpenSSL" 29 | license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] 30 | 31 | [bans] 32 | multiple-versions = "allow" 33 | -------------------------------------------------------------------------------- /src/secrets.rs: -------------------------------------------------------------------------------- 1 | //! Types for working with registry access secrets 2 | 3 | /// A method for authenticating to a registry 4 | #[derive(Eq, PartialEq, Debug, Clone)] 5 | pub enum RegistryAuth { 6 | /// Access the registry anonymously 7 | Anonymous, 8 | /// Access the registry using HTTP Basic authentication 9 | Basic(String, String), 10 | /// Access the registry using Bearer token authentication 11 | Bearer(String), 12 | } 13 | 14 | pub(crate) trait Authenticable { 15 | fn apply_authentication(self, auth: &RegistryAuth) -> Self; 16 | } 17 | 18 | impl Authenticable for reqwest::RequestBuilder { 19 | fn apply_authentication(self, auth: &RegistryAuth) -> Self { 20 | match auth { 21 | RegistryAuth::Anonymous => self, 22 | RegistryAuth::Basic(username, password) => self.basic_auth(username, Some(password)), 23 | RegistryAuth::Bearer(token) => self.bearer_auth(token), 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guide 2 | 3 | This document describes the requirements for committing to this repository. 4 | 5 | ## Developer Certificate of Origin (DCO) 6 | 7 | In order to contribute to this project, you must sign each of your commits to 8 | attest that you have the right to contribute that code. This is done with the 9 | `-s`/`--signoff` flag on `git commit`. More information about DCO can be found 10 | [here](https://developercertificate.org/) 11 | 12 | ## Pull Request Management 13 | 14 | All code that is contributed to oci-distribution must go through the Pull 15 | Request (PR) process. To contribute a PR, fork this project, create a new 16 | branch, make changes on that branch, and then use GitHub to open a pull request 17 | with your changes. 18 | 19 | Every PR must be reviewed by at least one Core Maintainer of the project. Once 20 | a PR has been marked "Approved" by a Core Maintainer (and no other core 21 | maintainer has an open "Rejected" vote), the PR may be merged. While it is fine 22 | for non-maintainers to contribute their own code reviews, those reviews do not 23 | satisfy the above requirement. 24 | 25 | ## Code of Conduct 26 | 27 | This project has adopted the [CNCF Code of 28 | Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). 29 | -------------------------------------------------------------------------------- /examples/wasm/push.rs: -------------------------------------------------------------------------------- 1 | use oci_client::{ 2 | client::{Config, ImageLayer}, 3 | manifest, 4 | secrets::RegistryAuth, 5 | Client, Reference, 6 | }; 7 | use std::collections::BTreeMap; 8 | use tracing::info; 9 | 10 | pub(crate) async fn push_wasm( 11 | client: &mut Client, 12 | auth: &RegistryAuth, 13 | reference: &Reference, 14 | module: &str, 15 | annotations: Option>, 16 | ) { 17 | info!(?reference, ?module, "pushing wasm module"); 18 | 19 | let data = tokio::fs::read(module) 20 | .await 21 | .expect("Cannot read Wasm module from disk"); 22 | 23 | let layers = vec![ImageLayer::new( 24 | data, 25 | manifest::WASM_LAYER_MEDIA_TYPE.to_string(), 26 | None, 27 | )]; 28 | 29 | let config = Config { 30 | data: bytes::Bytes::from_static(b"{}"), 31 | media_type: manifest::WASM_CONFIG_MEDIA_TYPE.to_string(), 32 | annotations: None, 33 | }; 34 | 35 | let image_manifest = manifest::OciImageManifest::build(&layers, &config, annotations); 36 | 37 | let response = client 38 | .push(reference, &layers, config, auth, Some(image_manifest)) 39 | .await 40 | .map(|push_response| push_response.manifest_url) 41 | .expect("Cannot push Wasm module"); 42 | 43 | println!("Wasm module successfully pushed {response:?}"); 44 | } 45 | -------------------------------------------------------------------------------- /examples/wasm/cli.rs: -------------------------------------------------------------------------------- 1 | use clap::{Parser, Subcommand}; 2 | 3 | /// Pull a WebAssembly module from a OCI container registry 4 | #[derive(Parser, Debug)] 5 | #[clap(author, version, about, long_about = None)] 6 | pub(crate) struct Cli { 7 | /// Enable verbose mode 8 | #[clap(short, long)] 9 | pub verbose: bool, 10 | 11 | /// Perform anonymous operation, by default the tool tries to reuse the docker credentials read 12 | /// from the default docker file 13 | #[clap(short, long)] 14 | pub anonymous: bool, 15 | 16 | /// Pull image from registry using HTTP instead of HTTPS 17 | #[clap(short, long)] 18 | pub insecure: bool, 19 | 20 | #[clap(subcommand)] 21 | pub command: Commands, 22 | } 23 | 24 | #[derive(Debug, Subcommand)] 25 | pub(crate) enum Commands { 26 | #[clap(arg_required_else_help = true)] 27 | Pull { 28 | /// Write contents to file 29 | #[clap(short, long)] 30 | output: String, 31 | 32 | /// Name of the image to pull 33 | image: String, 34 | }, 35 | #[clap(arg_required_else_help = true)] 36 | Push { 37 | /// OCI Annotations to be added to the manifest 38 | #[clap(short, long, required(false))] 39 | annotations: Vec, 40 | 41 | /// Wasm file to push 42 | module: String, 43 | 44 | /// Name of the image to pull 45 | image: String, 46 | }, 47 | } 48 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: {} 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | strategy: 12 | fail-fast: false 13 | steps: 14 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 15 | - uses: engineerd/configurator@dc6b312d89ab097f73f3ebbf507a69dd7399c5d0 #v0.0.10 16 | with: 17 | name: just 18 | url: https://github.com/casey/just/releases/download/0.10.2/just-0.10.2-x86_64-unknown-linux-musl.tar.gz 19 | pathInArchive: just 20 | - name: Build 21 | run: | 22 | just build 23 | just test 24 | 25 | windows-build: 26 | runs-on: windows-latest 27 | defaults: 28 | run: 29 | # For some reason, running with the default powershell doesn't work with the `Build` step, 30 | # but bash does! 31 | shell: bash 32 | steps: 33 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 34 | - uses: engineerd/configurator@dc6b312d89ab097f73f3ebbf507a69dd7399c5d0 #v0.0.10 35 | with: 36 | name: just 37 | url: "https://github.com/casey/just/releases/download/0.10.2/just-0.10.2-x86_64-pc-windows-msvc.zip" 38 | pathInArchive: just.exe 39 | - name: Build 40 | run: | 41 | just --justfile justfile-windows build 42 | just --justfile justfile-windows test 43 | 44 | cargo-deny: 45 | name: Run cargo deny 46 | runs-on: ubuntu-latest 47 | strategy: 48 | matrix: 49 | checks: 50 | - advisories 51 | - bans licenses sources 52 | steps: 53 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 54 | - uses: EmbarkStudios/cargo-deny-action@76cd80eb775d7bbbd2d80292136d74d39e1b4918 # v2.0.3 55 | with: 56 | command: check ${{ matrix.checks }} 57 | -------------------------------------------------------------------------------- /src/annotations.rs: -------------------------------------------------------------------------------- 1 | //! OCI Annotation key constants, taken from: 2 | //! 3 | 4 | /// Date and time on which the image was built (string, date-time as defined by RFC 3339) 5 | pub const ORG_OPENCONTAINERS_IMAGE_CREATED: &str = "org.opencontainers.image.created"; 6 | /// Contact details of the people or organization responsible for the image (freeform string) 7 | pub const ORG_OPENCONTAINERS_IMAGE_AUTHORS: &str = "org.opencontainers.image.authors"; 8 | /// URL to find more information on the image (string) 9 | pub const ORG_OPENCONTAINERS_IMAGE_URL: &str = "org.opencontainers.image.url"; 10 | /// URL to get documentation on the image (string) 11 | pub const ORG_OPENCONTAINERS_IMAGE_DOCUMENTATION: &str = "org.opencontainers.image.documentation"; 12 | /// URL to get source code for building the image (string) 13 | pub const ORG_OPENCONTAINERS_IMAGE_SOURCE: &str = "org.opencontainers.image.source"; 14 | /// Version of the packaged software 15 | pub const ORG_OPENCONTAINERS_IMAGE_VERSION: &str = "org.opencontainers.image.version"; 16 | /// Source control revision identifier for the packaged software 17 | pub const ORG_OPENCONTAINERS_IMAGE_REVISION: &str = "org.opencontainers.image.revision"; 18 | /// Name of the distributing entity, organization or individual 19 | pub const ORG_OPENCONTAINERS_IMAGE_VENDOR: &str = "org.opencontainers.image.vendor"; 20 | /// License(s) under which contained software is distributed as an SPDX License Expression 21 | pub const ORG_OPENCONTAINERS_IMAGE_LICENSES: &str = "org.opencontainers.image.licenses"; 22 | /// Name of the reference for a target (string) 23 | pub const ORG_OPENCONTAINERS_IMAGE_REF_NAME: &str = "org.opencontainers.image.ref.name"; 24 | /// Human-readable title of the image (string) 25 | pub const ORG_OPENCONTAINERS_IMAGE_TITLE: &str = "org.opencontainers.image.title"; 26 | /// Human-readable description of the software packaged in the image (string) 27 | pub const ORG_OPENCONTAINERS_IMAGE_DESCRIPTION: &str = "org.opencontainers.image.description"; 28 | /// Digest of the image this image is based on (string) 29 | pub const ORG_OPENCONTAINERS_IMAGE_BASE_DIGEST: &str = "org.opencontainers.image.base.digest"; 30 | /// If the `image.base.name` annotation is specified, the `image.base.digest` 31 | /// annotation SHOULD be the digest of the manifest referenced by 32 | /// the `image.ref.name` annotation. 33 | pub const ORG_OPENCONTAINERS_IMAGE_BASE_NAME: &str = "org.opencontainers.image.base.name"; 34 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = [ 3 | "Matt Butcher ", 4 | "Matthew Fisher ", 5 | "Radu Matei ", 6 | "Taylor Thomas ", 7 | "Brian Ketelsen ", 8 | "Brian Hardock ", 9 | "Ryan Levick ", 10 | "Kevin Flansburg ", 11 | "Flavio Castelli ", 12 | ] 13 | description = "An OCI implementation in Rust" 14 | documentation = "https://docs.rs/oci-client" 15 | edition = "2021" 16 | homepage = "https://github.com/oras-project/rust-oci-client" 17 | keywords = ["oci", "containers"] 18 | license = "Apache-2.0" 19 | name = "oci-client" 20 | readme = "README.md" 21 | repository = "https://github.com/oras-project/rust-oci-client" 22 | version = "0.15.0" 23 | 24 | [badges] 25 | maintenance = { status = "actively-developed" } 26 | 27 | [features] 28 | default = ["native-tls", "test-registry"] 29 | native-tls = ["reqwest/native-tls"] 30 | rustls-tls = ["reqwest/rustls-tls"] 31 | rustls-tls-native-roots = ["reqwest/rustls-tls-native-roots"] 32 | trust-dns = ["reqwest/trust-dns"] 33 | # This features is used by tests that use docker to create a registry 34 | test-registry = [] 35 | 36 | [dependencies] 37 | bytes = "1" 38 | chrono = { version = "0.4", features = ["serde"] } 39 | futures-util = "0.3" 40 | http = "1.3" 41 | http-auth = { version = "0.1", default-features = false } 42 | jsonwebtoken = "9.3" 43 | lazy_static = "1.4" 44 | oci-spec = "0.8" 45 | olpc-cjson = "0.1" 46 | regex = "1.11" 47 | reqwest = { version = "0.12", default-features = false, features = [ 48 | "json", 49 | "stream", 50 | ] } 51 | serde_json = "1.0" 52 | serde = { version = "1.0", features = ["derive"] } 53 | sha2 = "0.10" 54 | thiserror = "2" 55 | tokio = { version = "1", features = ["macros", "io-util"] } 56 | tracing = { version = "0.1", features = ['log'] } 57 | unicase = "2.8" 58 | 59 | [dev-dependencies] 60 | assert-json-diff = "2.0" 61 | anyhow = "1" 62 | axum = "0.8" 63 | clap = { version = "4.5", features = ["derive"] } 64 | rstest = "0.26" 65 | docker_credential = "1.3" 66 | hmac = "0.12" 67 | itertools = "0.14" 68 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 69 | tempfile = "3.21" 70 | # This should stay pinned here until testcontainers makes sure all of its deps using rustls are 71 | # using the ring feature. Otherwise this fails to compile on Windows 72 | testcontainers = "0.26" 73 | tokio = { version = "1", features = ["macros", "fs", "rt-multi-thread"] } 74 | tokio-util = { version = "0.7", features = ["compat"] } 75 | -------------------------------------------------------------------------------- /examples/get-manifest/main.rs: -------------------------------------------------------------------------------- 1 | use oci_client::{secrets::RegistryAuth, Client, Reference}; 2 | 3 | use clap::Parser; 4 | use docker_credential::{CredentialRetrievalError, DockerCredential}; 5 | use tracing::{debug, warn}; 6 | use tracing_subscriber::prelude::*; 7 | use tracing_subscriber::{fmt, EnvFilter}; 8 | 9 | /// Pull a WebAssembly module from a OCI container registry 10 | #[derive(Parser, Debug)] 11 | #[clap(author, version, about, long_about = None)] 12 | pub(crate) struct Cli { 13 | /// Enable verbose mode 14 | #[clap(short, long)] 15 | pub verbose: bool, 16 | 17 | /// Perform anonymous operation, by default the tool tries to reuse the docker credentials read 18 | /// from the default docker file 19 | #[clap(short, long)] 20 | pub anonymous: bool, 21 | 22 | /// Pull image from registry using HTTP instead of HTTPS 23 | #[clap(short, long)] 24 | pub insecure: bool, 25 | 26 | /// Enable json output 27 | #[clap(long)] 28 | pub json: bool, 29 | 30 | /// Name of the image to pull 31 | image: String, 32 | } 33 | 34 | fn build_auth(reference: &Reference, cli: &Cli) -> RegistryAuth { 35 | let server = reference 36 | .resolve_registry() 37 | .strip_suffix('/') 38 | .unwrap_or_else(|| reference.resolve_registry()); 39 | 40 | if cli.anonymous { 41 | return RegistryAuth::Anonymous; 42 | } 43 | 44 | match docker_credential::get_credential(server) { 45 | Err(CredentialRetrievalError::ConfigNotFound) => RegistryAuth::Anonymous, 46 | Err(CredentialRetrievalError::NoCredentialConfigured) => RegistryAuth::Anonymous, 47 | Err(e) => panic!("Error handling docker configuration file: {e}"), 48 | Ok(DockerCredential::UsernamePassword(username, password)) => { 49 | debug!(username, "Found docker credentials"); 50 | RegistryAuth::Basic(username, password) 51 | } 52 | Ok(DockerCredential::IdentityToken(_)) => { 53 | warn!("Cannot use contents of docker config, identity token not supported. Using anonymous auth"); 54 | RegistryAuth::Anonymous 55 | } 56 | } 57 | } 58 | 59 | fn build_client_config(cli: &Cli) -> oci_client::client::ClientConfig { 60 | let protocol = if cli.insecure { 61 | oci_client::client::ClientProtocol::Http 62 | } else { 63 | oci_client::client::ClientProtocol::Https 64 | }; 65 | 66 | oci_client::client::ClientConfig { 67 | protocol, 68 | ..Default::default() 69 | } 70 | } 71 | 72 | #[tokio::main] 73 | pub async fn main() { 74 | let cli = Cli::parse(); 75 | 76 | // setup logging 77 | let level_filter = if cli.verbose { "debug" } else { "info" }; 78 | let filter_layer = EnvFilter::new(level_filter); 79 | tracing_subscriber::registry() 80 | .with(filter_layer) 81 | .with(fmt::layer().with_writer(std::io::stderr)) 82 | .init(); 83 | 84 | let reference: Reference = cli.image.parse().expect("Not a valid image reference"); 85 | let auth = build_auth(&reference, &cli); 86 | 87 | let client_config = build_client_config(&cli); 88 | let client = Client::new(client_config); 89 | 90 | let (manifest, _) = client 91 | .pull_manifest(&reference, &auth) 92 | .await 93 | .expect("Cannot pull manifest"); 94 | 95 | if cli.json { 96 | serde_json::to_writer_pretty(std::io::stdout(), &manifest) 97 | .expect("Cannot serialize manifest to JSON"); 98 | println!(); 99 | } else { 100 | println!("{manifest}"); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /examples/wasm/main.rs: -------------------------------------------------------------------------------- 1 | use oci_client::{annotations, secrets::RegistryAuth, Client, Reference}; 2 | 3 | use docker_credential::{CredentialRetrievalError, DockerCredential}; 4 | use std::collections::BTreeMap; 5 | use tracing::{debug, warn}; 6 | use tracing_subscriber::prelude::*; 7 | use tracing_subscriber::{fmt, EnvFilter}; 8 | 9 | mod cli; 10 | use clap::Parser; 11 | use cli::Cli; 12 | 13 | mod pull; 14 | use pull::pull_wasm; 15 | 16 | mod push; 17 | use push::push_wasm; 18 | 19 | fn build_auth(reference: &Reference, cli: &Cli) -> RegistryAuth { 20 | let server = reference 21 | .resolve_registry() 22 | .strip_suffix('/') 23 | .unwrap_or_else(|| reference.resolve_registry()); 24 | 25 | if cli.anonymous { 26 | return RegistryAuth::Anonymous; 27 | } 28 | 29 | match docker_credential::get_credential(server) { 30 | Err(CredentialRetrievalError::ConfigNotFound) => RegistryAuth::Anonymous, 31 | Err(CredentialRetrievalError::NoCredentialConfigured) => RegistryAuth::Anonymous, 32 | Err(e) => panic!("Error handling docker configuration file: {e}"), 33 | Ok(DockerCredential::UsernamePassword(username, password)) => { 34 | debug!("Found docker credentials"); 35 | RegistryAuth::Basic(username, password) 36 | } 37 | Ok(DockerCredential::IdentityToken(_)) => { 38 | warn!("Cannot use contents of docker config, identity token not supported. Using anonymous auth"); 39 | RegistryAuth::Anonymous 40 | } 41 | } 42 | } 43 | 44 | fn build_client_config(cli: &Cli) -> oci_client::client::ClientConfig { 45 | let protocol = if cli.insecure { 46 | oci_client::client::ClientProtocol::Http 47 | } else { 48 | oci_client::client::ClientProtocol::Https 49 | }; 50 | 51 | oci_client::client::ClientConfig { 52 | protocol, 53 | ..Default::default() 54 | } 55 | } 56 | 57 | #[tokio::main] 58 | pub async fn main() { 59 | let cli = Cli::parse(); 60 | 61 | // setup logging 62 | let level_filter = if cli.verbose { "debug" } else { "info" }; 63 | let filter_layer = EnvFilter::new(level_filter); 64 | tracing_subscriber::registry() 65 | .with(filter_layer) 66 | .with(fmt::layer().with_writer(std::io::stderr)) 67 | .init(); 68 | 69 | let client_config = build_client_config(&cli); 70 | let mut client = Client::new(client_config); 71 | 72 | match &cli.command { 73 | crate::cli::Commands::Pull { output, image } => { 74 | let reference: Reference = image.parse().expect("Not a valid image reference"); 75 | let auth = build_auth(&reference, &cli); 76 | pull_wasm(&mut client, &auth, &reference, output).await; 77 | } 78 | crate::cli::Commands::Push { 79 | module, 80 | image, 81 | annotations, 82 | } => { 83 | let reference: Reference = image.parse().expect("Not a valid image reference"); 84 | let auth = build_auth(&reference, &cli); 85 | 86 | let annotations = if annotations.is_empty() { 87 | let mut values: BTreeMap = BTreeMap::new(); 88 | values.insert( 89 | annotations::ORG_OPENCONTAINERS_IMAGE_TITLE.to_string(), 90 | module.clone(), 91 | ); 92 | Some(values) 93 | } else { 94 | let mut values: BTreeMap = BTreeMap::new(); 95 | for annotation in annotations { 96 | let tmp: Vec<_> = annotation.splitn(2, '=').collect(); 97 | if tmp.len() == 2 { 98 | values.insert(String::from(tmp[0]), String::from(tmp[1])); 99 | } 100 | } 101 | values 102 | .entry(annotations::ORG_OPENCONTAINERS_IMAGE_TITLE.to_string()) 103 | .or_insert_with(|| module.clone()); 104 | 105 | Some(values) 106 | }; 107 | 108 | push_wasm(&mut client, &auth, &reference, module, annotations).await; 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/token_cache.rs: -------------------------------------------------------------------------------- 1 | use oci_spec::distribution::Reference; 2 | use serde::Deserialize; 3 | use std::collections::BTreeMap; 4 | use std::fmt; 5 | use std::sync::Arc; 6 | use std::time::{SystemTime, UNIX_EPOCH}; 7 | use tokio::sync::RwLock; 8 | use tracing::{debug, warn}; 9 | 10 | /// A token granted during the OAuth2-like workflow for OCI registries. 11 | #[derive(Deserialize, Clone)] 12 | #[serde(untagged)] 13 | #[serde(rename_all = "snake_case")] 14 | pub(crate) enum RegistryToken { 15 | Token { token: String }, 16 | AccessToken { access_token: String }, 17 | } 18 | 19 | impl fmt::Debug for RegistryToken { 20 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 21 | let redacted = String::from(""); 22 | match self { 23 | RegistryToken::Token { .. } => { 24 | f.debug_struct("Token").field("token", &redacted).finish() 25 | } 26 | RegistryToken::AccessToken { .. } => f 27 | .debug_struct("AccessToken") 28 | .field("access_token", &redacted) 29 | .finish(), 30 | } 31 | } 32 | } 33 | 34 | #[derive(Debug, Clone)] 35 | pub(crate) enum RegistryTokenType { 36 | Bearer(RegistryToken), 37 | Basic(String, String), 38 | } 39 | 40 | impl RegistryToken { 41 | pub fn bearer_token(&self) -> String { 42 | format!("Bearer {}", self.token()) 43 | } 44 | 45 | pub fn token(&self) -> &str { 46 | match self { 47 | RegistryToken::Token { token } => token, 48 | RegistryToken::AccessToken { access_token } => access_token, 49 | } 50 | } 51 | } 52 | 53 | /// Desired operation for registry authentication 54 | #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] 55 | pub enum RegistryOperation { 56 | /// Authenticate for push operations 57 | Push, 58 | /// Authenticate for pull operations 59 | Pull, 60 | } 61 | 62 | #[derive(Debug, Deserialize)] 63 | struct BearerTokenClaims { 64 | exp: Option, 65 | } 66 | 67 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] 68 | struct TokenCacheKey { 69 | registry: String, 70 | repository: String, 71 | operation: RegistryOperation, 72 | } 73 | 74 | struct TokenCacheValue { 75 | token: RegistryTokenType, 76 | expiration: u64, 77 | } 78 | 79 | #[derive(Clone)] 80 | pub(crate) struct TokenCache { 81 | // (registry, repository, scope) -> (token, expiration) 82 | tokens: Arc>>, 83 | /// Default token expiration in seconds, to use when claim doesn't specify a value 84 | pub default_expiration_secs: usize, 85 | } 86 | 87 | impl TokenCache { 88 | pub(crate) fn new(default_expiration_secs: usize) -> Self { 89 | TokenCache { 90 | tokens: Arc::new(RwLock::new(BTreeMap::new())), 91 | default_expiration_secs, 92 | } 93 | } 94 | 95 | pub(crate) async fn insert( 96 | &self, 97 | reference: &Reference, 98 | op: RegistryOperation, 99 | token: RegistryTokenType, 100 | ) { 101 | let expiration = match token { 102 | RegistryTokenType::Basic(_, _) => u64::MAX, 103 | RegistryTokenType::Bearer(ref t) => { 104 | match parse_expiration_from_jwt(t.token(), self.default_expiration_secs) { 105 | Some(value) => value, 106 | None => return, 107 | } 108 | } 109 | }; 110 | let registry = reference.resolve_registry().to_string(); 111 | let repository = reference.repository().to_string(); 112 | debug!(%registry, %repository, ?op, %expiration, "Inserting token"); 113 | self.tokens.write().await.insert( 114 | TokenCacheKey { 115 | registry, 116 | repository, 117 | operation: op, 118 | }, 119 | TokenCacheValue { token, expiration }, 120 | ); 121 | } 122 | 123 | pub(crate) async fn get( 124 | &self, 125 | reference: &Reference, 126 | op: RegistryOperation, 127 | ) -> Option { 128 | let registry = reference.resolve_registry().to_string(); 129 | let repository = reference.repository().to_string(); 130 | let key = TokenCacheKey { 131 | registry, 132 | repository, 133 | operation: op, 134 | }; 135 | match self.tokens.read().await.get(&key) { 136 | Some(TokenCacheValue { 137 | ref token, 138 | expiration, 139 | }) => { 140 | let now = SystemTime::now(); 141 | let epoch = now 142 | .duration_since(UNIX_EPOCH) 143 | .expect("Time went backwards") 144 | .as_secs(); 145 | if epoch > *expiration { 146 | debug!(%key.registry, %key.repository, ?key.operation, %expiration, miss=false, expired=true, "Fetching token"); 147 | None 148 | } else { 149 | debug!(%key.registry, %key.repository, ?key.operation, %expiration, miss=false, expired=false, "Fetching token"); 150 | Some(token.clone()) 151 | } 152 | } 153 | None => { 154 | debug!(%key.registry, %key.repository, ?key.operation, miss = true, "Fetching token"); 155 | None 156 | } 157 | } 158 | } 159 | } 160 | 161 | fn parse_expiration_from_jwt(token_str: &str, default_expiration_secs: usize) -> Option { 162 | // This might be able to change if/when jsonwebtoken provides a simpler API for 163 | // looking through jwt claims without validating the token. 164 | // See the following GitHub issue for more details: 165 | // https://github.com/Keats/jsonwebtoken/issues/401 166 | let mut validation = jsonwebtoken::Validation::default(); 167 | validation.insecure_disable_signature_validation(); 168 | validation.required_spec_claims.clear(); 169 | validation.validate_aud = false; 170 | validation.validate_exp = false; 171 | validation.validate_nbf = false; 172 | 173 | match jsonwebtoken::decode::( 174 | token_str, 175 | &jsonwebtoken::DecodingKey::from_secret(&[]), 176 | &validation, 177 | ) { 178 | Ok(token) => { 179 | let token_exp = match token.claims.exp { 180 | Some(exp) => exp, 181 | None => { 182 | // the token doesn't have a claim that states a 183 | // value for the expiration. We assume it has a 60 184 | // seconds validity as indicated here: 185 | // https://docs.docker.com/reference/api/registry/auth/#token-response-fields 186 | // > (Optional) The duration in seconds since the token was issued 187 | // > that it will remain valid. When omitted, this defaults to 60 seconds. 188 | // > For compatibility with older clients, a token should never be returned 189 | // > with less than 60 seconds to live. 190 | let now = SystemTime::now(); 191 | let epoch = now 192 | .duration_since(UNIX_EPOCH) 193 | .expect("Time went backwards") 194 | .as_secs(); 195 | let expiration = epoch + default_expiration_secs as u64; 196 | debug!(?token, "Cannot extract expiration from token's claims, assuming a {} seconds validity", default_expiration_secs); 197 | expiration 198 | } 199 | }; 200 | 201 | Some(token_exp) 202 | } 203 | Err(error) => { 204 | warn!(?error, "Invalid bearer token"); 205 | None 206 | } 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/digest.rs: -------------------------------------------------------------------------------- 1 | //! Errors and functions for validating digests 2 | 3 | use http::HeaderMap; 4 | use sha2::Digest as _; 5 | 6 | use crate::sha256_digest; 7 | 8 | pub const DOCKER_DIGEST_HEADER: &str = "Docker-Content-Digest"; 9 | 10 | pub type Result = std::result::Result; 11 | 12 | /// Errors that can occur when validating digests 13 | #[derive(Debug, thiserror::Error)] 14 | pub enum DigestError { 15 | /// Invalid digest header 16 | #[error("Invalid digest header: {0}")] 17 | InvalidHeader(#[from] http::header::ToStrError), 18 | /// Invalid digest algorithm found 19 | #[error("Unsupported digest algorithm: {0}")] 20 | UnsupportedAlgorithm(String), 21 | /// Missing digest algorithm 22 | #[error("Missing digest algorithm")] 23 | MissingAlgorithm, 24 | /// Digest verification failed 25 | #[error("Invalid digest. Expected {expected}, got {actual}")] 26 | VerificationError { 27 | /// Expected digest 28 | expected: String, 29 | /// Actual digest 30 | actual: String, 31 | }, 32 | } 33 | 34 | /// A convenience struct for parsing a digest value with an algorithm 35 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] 36 | pub struct Digest<'a> { 37 | pub algorithm: &'a str, 38 | pub digest: &'a str, 39 | } 40 | 41 | impl<'a> Digest<'a> { 42 | /// Create a new digest from a str. This isn't using `FromStr` because we can't use lifetimes 43 | /// properly when implementing the trait 44 | pub fn new(digest: &'a str) -> Result { 45 | let (algorithm, digest) = digest 46 | .split_once(':') 47 | .ok_or(DigestError::MissingAlgorithm)?; 48 | Ok(Self { algorithm, digest }) 49 | } 50 | } 51 | 52 | /// Helper wrapper around various digest algorithms to make it easier to use them with our blob 53 | /// utils. This has to be an enum because the digest algorithms aren't object safe so we can't box 54 | /// dynner them 55 | pub(crate) enum Digester { 56 | Sha256(sha2::Sha256), 57 | Sha384(sha2::Sha384), 58 | Sha512(sha2::Sha512), 59 | } 60 | 61 | impl Digester { 62 | pub fn new(digest: &str) -> Result { 63 | let parsed_digest = Digest::new(digest)?; 64 | 65 | match parsed_digest.algorithm { 66 | "sha256" => Ok(Digester::Sha256(sha2::Sha256::new())), 67 | "sha384" => Ok(Digester::Sha384(sha2::Sha384::new())), 68 | "sha512" => Ok(Digester::Sha512(sha2::Sha512::new())), 69 | // We already check this above when parsing, but just in case, we return the error as 70 | // well here 71 | _ => Err(DigestError::UnsupportedAlgorithm( 72 | parsed_digest.algorithm.to_string(), 73 | )), 74 | } 75 | } 76 | 77 | pub fn update(&mut self, data: impl AsRef<[u8]>) { 78 | match self { 79 | Self::Sha256(d) => d.update(data), 80 | Self::Sha384(d) => d.update(data), 81 | Self::Sha512(d) => d.update(data), 82 | } 83 | } 84 | 85 | pub fn finalize(&mut self) -> String { 86 | match self { 87 | Self::Sha256(d) => format!("sha256:{:x}", d.finalize_reset()), 88 | Self::Sha384(d) => format!("sha384:{:x}", d.finalize_reset()), 89 | Self::Sha512(d) => format!("sha512:{:x}", d.finalize_reset()), 90 | } 91 | } 92 | } 93 | 94 | /// Helper for extracting `Docker-Content-Digest` header from manifest GET or HEAD request. 95 | pub fn digest_header_value(headers: HeaderMap) -> Result> { 96 | headers 97 | .get(DOCKER_DIGEST_HEADER) 98 | .map(|hv| hv.to_str().map(|s| s.to_string())) 99 | .transpose() 100 | .map_err(DigestError::from) 101 | } 102 | 103 | /// Given the optional digest header value and digest of the reference, returns the digest of the 104 | /// content, validating that the digest of the content matches the proper digest. If neither a 105 | /// header digest or a reference digest is provided, then the body is digested and returned as the 106 | /// digest. If both digests are provided, but they use different algorithms, then the header digest 107 | /// is returned after validation as according to the spec it is the "canonical" digest for the given 108 | /// content. 109 | pub fn validate_digest( 110 | body: &[u8], 111 | digest_header: Option, 112 | reference_digest: Option<&str>, 113 | ) -> Result { 114 | match (digest_header, reference_digest) { 115 | // If both digests are equal, then just calculate once 116 | (Some(digest), Some(reference)) if digest == reference => { 117 | calculate_and_validate(body, &digest) 118 | } 119 | (Some(digest), Some(reference)) => { 120 | calculate_and_validate(body, reference)?; 121 | calculate_and_validate(body, &digest) 122 | } 123 | (Some(digest), None) => calculate_and_validate(body, &digest), 124 | (None, Some(reference)) => calculate_and_validate(body, reference), 125 | // If we have neither, just digest the body 126 | (None, None) => Ok(sha256_digest(body)), 127 | } 128 | } 129 | 130 | /// Helper for calculating and validating the digest of the given content 131 | fn calculate_and_validate(content: &[u8], digest: &str) -> Result { 132 | let parsed_digest = Digest::new(digest)?; 133 | let digest_calculated = match parsed_digest.algorithm { 134 | "sha256" => format!("{:x}", sha2::Sha256::digest(content)), 135 | "sha384" => format!("{:x}", sha2::Sha384::digest(content)), 136 | "sha512" => format!("{:x}", sha2::Sha512::digest(content)), 137 | other => return Err(DigestError::UnsupportedAlgorithm(other.to_string())), 138 | }; 139 | let hex = format!("{}:{digest_calculated}", parsed_digest.algorithm); 140 | tracing::debug!(%hex, "Computed digest of payload"); 141 | if hex != digest { 142 | return Err(DigestError::VerificationError { 143 | expected: digest.to_owned(), 144 | actual: hex, 145 | }); 146 | } 147 | Ok(hex) 148 | } 149 | 150 | #[cfg(test)] 151 | mod tests { 152 | use super::*; 153 | 154 | #[test] 155 | fn test_validate_digest() { 156 | let body = b"hello world"; 157 | let digest_sha256 = format!("sha256:{:x}", sha2::Sha256::digest(body)); 158 | let digest_sha384 = format!("sha384:{:x}", sha2::Sha384::digest(body)); 159 | 160 | // Test case 1: Both digests are equal 161 | assert_eq!( 162 | validate_digest(body, Some(digest_sha256.clone()), Some(&digest_sha256)) 163 | .expect("Failed to validate digest with matching header and reference"), 164 | digest_sha256 165 | ); 166 | 167 | // Test case 2: Different digests 168 | assert_eq!( 169 | validate_digest(body, Some(digest_sha256.clone()), Some(&digest_sha384)) 170 | .expect("Failed to validate digest with different header and reference"), 171 | digest_sha256 172 | ); 173 | 174 | // Test case 3: Only digest_header 175 | assert_eq!( 176 | validate_digest(body, Some(digest_sha256.clone()), None) 177 | .expect("Failed to validate digest with only header"), 178 | digest_sha256 179 | ); 180 | 181 | // Test case 4: Only reference_digest 182 | assert_eq!( 183 | validate_digest(body, None, Some(&digest_sha384)) 184 | .expect("Failed to validate digest with only reference"), 185 | digest_sha384 186 | ); 187 | 188 | // Test case 5: No digests provided 189 | assert_eq!( 190 | validate_digest(body, None, None) 191 | .expect("Failed to validate digest with no digests provided"), 192 | digest_sha256 193 | ); 194 | 195 | // Test case 6: Invalid digest 196 | let invalid_digest = "sha256:invalid"; 197 | validate_digest(body, Some(invalid_digest.to_string()), None) 198 | .expect_err("Expected error for invalid digest"); 199 | 200 | // Test case 7: Valid header digest and invalid layer digest 201 | let invalid_layer_digest = "sha512:invalid"; 202 | validate_digest( 203 | body, 204 | Some(digest_sha256.clone()), 205 | Some(invalid_layer_digest), 206 | ) 207 | .expect_err("Expected error for invalid layer digest"); 208 | 209 | // Test case 8: Unsupported algorithm 210 | let unsupported_digest = "md5:d41d8cd98f00b204e9800998ecf8427e"; 211 | validate_digest(body, Some(unsupported_digest.to_string()), None) 212 | .expect_err("Expected error for unsupported algorithm"); 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /src/blob.rs: -------------------------------------------------------------------------------- 1 | //! Helpers for interacting with blobs and their verification 2 | use std::task::Poll; 3 | 4 | use futures_util::stream::{BoxStream, Stream}; 5 | use futures_util::TryStreamExt; 6 | 7 | use crate::digest::Digester; 8 | use crate::errors::DigestError; 9 | 10 | /// Stream response of a blob with optional content length if available 11 | pub struct SizedStream { 12 | /// The length of the stream if the upstream registry sent a `Content-Length` header 13 | pub content_length: Option, 14 | /// The digest header value if the upstream registry sent a `Digest` header. This should be used 15 | /// (in addition to the layer digest) for validation when using partial requests as the library 16 | /// can't validate against the full response. 17 | pub digest_header_value: Option, 18 | /// The stream of bytes 19 | pub stream: BoxStream<'static, Result>, 20 | } 21 | 22 | impl Stream for SizedStream { 23 | type Item = Result; 24 | 25 | fn poll_next( 26 | mut self: std::pin::Pin<&mut Self>, 27 | cx: &mut std::task::Context<'_>, 28 | ) -> Poll> { 29 | self.stream.try_poll_next_unpin(cx) 30 | } 31 | } 32 | 33 | /// The response of a partial blob request 34 | pub enum BlobResponse { 35 | /// The response is a full blob (for example when partial requests aren't supported) 36 | Full(SizedStream), 37 | /// The response is a partial blob as requested 38 | Partial(SizedStream), 39 | } 40 | 41 | pub(crate) struct VerifyingStream { 42 | stream: BoxStream<'static, Result>, 43 | layer_digester: Digester, 44 | expected_layer_digest: String, 45 | header_digester: Option<(Digester, String)>, 46 | } 47 | 48 | impl VerifyingStream { 49 | pub fn new( 50 | stream: BoxStream<'static, Result>, 51 | layer_digester: Digester, 52 | expected_layer_digest: String, 53 | header_digester_and_digest: Option<(Digester, String)>, 54 | ) -> Self { 55 | Self { 56 | stream, 57 | layer_digester, 58 | expected_layer_digest, 59 | header_digester: header_digester_and_digest, 60 | } 61 | } 62 | } 63 | 64 | impl Stream for VerifyingStream { 65 | type Item = Result; 66 | 67 | fn poll_next( 68 | self: std::pin::Pin<&mut Self>, 69 | cx: &mut std::task::Context<'_>, 70 | ) -> Poll> { 71 | let this = self.get_mut(); 72 | match futures_util::ready!(this.stream.as_mut().poll_next(cx)) { 73 | Some(Ok(bytes)) => { 74 | this.layer_digester.update(&bytes); 75 | if let Some((digester, _)) = this.header_digester.as_mut() { 76 | digester.update(&bytes); 77 | } 78 | Poll::Ready(Some(Ok(bytes))) 79 | } 80 | Some(Err(e)) => Poll::Ready(Some(Err(e))), 81 | None => { 82 | // Now that we've reached the end of the stream, verify the digest(s) 83 | match this.header_digester.as_mut() { 84 | Some((digester, expected)) => { 85 | // Check the header digester and then the layer digester before returning 86 | let digest = digester.finalize(); 87 | if digest != *expected { 88 | return Poll::Ready(Some(Err(std::io::Error::other( 89 | DigestError::VerificationError { 90 | expected: expected.clone(), 91 | actual: digest, 92 | }, 93 | )))); 94 | } 95 | let digest = this.layer_digester.finalize(); 96 | if digest == this.expected_layer_digest { 97 | Poll::Ready(None) 98 | } else { 99 | Poll::Ready(Some(Err(std::io::Error::other( 100 | DigestError::VerificationError { 101 | expected: expected.clone(), 102 | actual: digest, 103 | }, 104 | )))) 105 | } 106 | } 107 | None => { 108 | let digest = this.layer_digester.finalize(); 109 | if digest == this.expected_layer_digest { 110 | Poll::Ready(None) 111 | } else { 112 | Poll::Ready(Some(Err(std::io::Error::other( 113 | DigestError::VerificationError { 114 | expected: this.expected_layer_digest.clone(), 115 | actual: digest, 116 | }, 117 | )))) 118 | } 119 | } 120 | } 121 | } 122 | } 123 | } 124 | } 125 | 126 | #[cfg(test)] 127 | mod tests { 128 | use super::*; 129 | 130 | use bytes::Bytes; 131 | use futures_util::TryStreamExt; 132 | use sha2::Digest as _; 133 | 134 | #[tokio::test] 135 | async fn test_verifying_stream() { 136 | // Test with correct SHA 137 | let data = b"Hello, world!"; 138 | let correct_sha = format!("sha256:{:x}", sha2::Sha256::digest(data)); 139 | let stream = VerifyingStream::new( 140 | Box::pin(futures_util::stream::iter(vec![Ok(Bytes::from_static( 141 | data, 142 | ))])), 143 | Digester::new(&correct_sha).unwrap(), 144 | correct_sha.clone(), 145 | None, 146 | ); 147 | stream 148 | .try_collect::>() 149 | .await 150 | .expect("Should not error with valid data"); 151 | 152 | // Test with incorrect SHA 153 | let incorrect_sha = "sha256:incorrect_hash"; 154 | let stream = VerifyingStream::new( 155 | Box::pin(futures_util::stream::iter(vec![Ok(Bytes::from_static( 156 | data, 157 | ))])), 158 | Digester::new(incorrect_sha).unwrap(), 159 | incorrect_sha.to_string(), 160 | None, 161 | ); 162 | 163 | let err = stream 164 | .try_collect::>() 165 | .await 166 | .expect_err("Should error with invalid sha"); 167 | 168 | let err = err 169 | .into_inner() 170 | .expect("Should have inner error") 171 | .downcast::() 172 | .expect("Should be a DigestError"); 173 | assert!( 174 | matches!(*err, DigestError::VerificationError { .. }), 175 | "Error should be a verification error" 176 | ); 177 | 178 | // Test with correct SHA and header 179 | let correct_header_sha = format!("sha512:{:x}", sha2::Sha512::digest(data)); 180 | let stream = VerifyingStream::new( 181 | Box::pin(futures_util::stream::iter(vec![Ok(Bytes::from_static( 182 | data, 183 | ))])), 184 | Digester::new(&correct_sha).unwrap(), 185 | correct_sha.clone(), 186 | Some(( 187 | Digester::new(&correct_header_sha).unwrap(), 188 | correct_header_sha.clone(), 189 | )), 190 | ); 191 | stream 192 | .try_collect::>() 193 | .await 194 | .expect("Should not error with valid data"); 195 | 196 | // Test with correct layer sha and wrong header sha 197 | let incorrect_header_sha = "sha512:incorrect_hash"; 198 | let stream = VerifyingStream::new( 199 | Box::pin(futures_util::stream::iter(vec![Ok(Bytes::from_static( 200 | data, 201 | ))])), 202 | Digester::new(&correct_sha).unwrap(), 203 | correct_sha.clone(), 204 | Some(( 205 | Digester::new(incorrect_header_sha).unwrap(), 206 | incorrect_header_sha.to_string(), 207 | )), 208 | ); 209 | 210 | let err = stream 211 | .try_collect::>() 212 | .await 213 | .expect_err("Should error with invalid sha"); 214 | 215 | let err = err 216 | .into_inner() 217 | .expect("Should have inner error") 218 | .downcast::() 219 | .expect("Should be a DigestError"); 220 | assert!( 221 | matches!(*err, DigestError::VerificationError { .. }), 222 | "Error should be a verification error" 223 | ); 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | //! Errors related to interacting with an OCI compliant remote store 2 | 3 | use thiserror::Error; 4 | 5 | pub use crate::digest::DigestError; 6 | 7 | /// Errors that can be raised while interacting with an OCI registry 8 | #[derive(Error, Debug)] 9 | pub enum OciDistributionError { 10 | /// Authentication error 11 | #[error("Authentication failure: {0}")] 12 | AuthenticationFailure(String), 13 | #[error("Failed to convert Config into ConfigFile: {0}")] 14 | /// Transparent wrapper around `std::string::FromUtf8Error` 15 | ConfigConversionError(String), 16 | /// An error occurred with a digest operation 17 | #[error("Digest error: {0}")] 18 | DigestError(#[from] DigestError), 19 | /// Generic error, might provide an explanation message 20 | #[error("Generic error: {0:?}")] 21 | GenericError(Option), 22 | /// Transparent wrapper around `reqwest::header::ToStrError` 23 | #[error(transparent)] 24 | HeaderValueError(#[from] reqwest::header::ToStrError), 25 | /// Image manifest not found 26 | #[error("Image manifest not found: {0}")] 27 | ImageManifestNotFoundError(String), 28 | /// Platform resolver not specified 29 | #[error("Received Image Index/Manifest List, but platform_resolver was not defined on the client config. Consider setting platform_resolver")] 30 | ImageIndexParsingNoPlatformResolverError, 31 | /// Registry returned a layer with an incompatible type 32 | #[error("Incompatible layer media type: {0}")] 33 | IncompatibleLayerMediaTypeError(String), 34 | /// IO Error 35 | #[error(transparent)] 36 | IoError(#[from] std::io::Error), 37 | #[error(transparent)] 38 | /// Transparent wrapper around `serde_json::error::Error` 39 | JsonError(#[from] serde_json::error::Error), 40 | /// Manifest is not valid UTF-8 41 | #[error("Manifest is not valid UTF-8")] 42 | ManifestEncodingError(#[from] std::str::Utf8Error), 43 | /// Manifest: JSON unmarshalling error 44 | #[error("Failed to parse manifest as Versioned object: {0}")] 45 | ManifestParsingError(String), 46 | /// Cannot push a blob without data 47 | #[error("cannot push a blob without data")] 48 | PushNoDataError, 49 | /// Cannot push layer object without data 50 | #[error("cannot push a layer without data")] 51 | PushLayerNoDataError, 52 | /// No layers available to be pulled 53 | #[error("No layers to pull")] 54 | PullNoLayersError, 55 | /// OCI registry error 56 | #[error("Registry error: url {url}, envelope: {envelope}")] 57 | RegistryError { 58 | /// List of errors returned the by the OCI registry 59 | envelope: OciEnvelope, 60 | /// Request URL 61 | url: String, 62 | }, 63 | /// Registry didn't return a Digest object 64 | #[error("Registry did not return a digest header")] 65 | RegistryNoDigestError, 66 | /// Registry didn't return a Location header 67 | #[error("Registry did not return a location header")] 68 | RegistryNoLocationError, 69 | /// Registry token: JSON deserialization error 70 | #[error("Failed to decode registry token: {0}")] 71 | RegistryTokenDecodeError(String), 72 | /// Transparent wrapper around `reqwest::Error` 73 | #[error(transparent)] 74 | RequestError(#[from] reqwest::Error), 75 | /// HTTP Server error 76 | #[error("Server error: url {url}, code: {code}, message: {message}")] 77 | ServerError { 78 | /// HTTP status code 79 | code: u16, 80 | /// Request URL 81 | url: String, 82 | /// Error message returned by the remote server 83 | message: String, 84 | }, 85 | /// The [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/main/spec.md) 86 | /// is not respected by the remote registry 87 | #[error("OCI distribution spec violation: {0}")] 88 | SpecViolationError(String), 89 | /// HTTP auth failed - user not authorized 90 | #[error("Not authorized: url {url}")] 91 | UnauthorizedError { 92 | /// request URL 93 | url: String, 94 | }, 95 | /// Cannot parse URL 96 | #[error("Error parsing Url {0}")] 97 | UrlParseError(String), 98 | /// Media type not supported 99 | #[error("Unsupported media type: {0}")] 100 | UnsupportedMediaTypeError(String), 101 | /// Schema version not supported 102 | #[error("Unsupported schema version: {0}")] 103 | UnsupportedSchemaVersionError(i32), 104 | /// Versioned object: JSON deserialization error 105 | #[error("Failed to parse manifest: {0}")] 106 | VersionedParsingError(String), 107 | } 108 | 109 | /// Helper type to declare `Result` objects that might return a `OciDistributionError` 110 | pub type Result = std::result::Result; 111 | 112 | /// The OCI specification defines a specific error format. 113 | /// 114 | /// This struct represents that error format, which is formally described here: 115 | /// 116 | #[derive(serde::Deserialize, serde::Serialize, Debug)] 117 | pub struct OciError { 118 | /// The error code 119 | pub code: OciErrorCode, 120 | /// An optional message associated with the error 121 | #[serde(default)] 122 | pub message: String, 123 | /// Unstructured optional data associated with the error 124 | #[serde(default)] 125 | pub detail: serde_json::Value, 126 | } 127 | 128 | impl std::error::Error for OciError { 129 | fn description(&self) -> &str { 130 | self.message.as_str() 131 | } 132 | } 133 | impl std::fmt::Display for OciError { 134 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 135 | write!(f, "OCI API error: {}", self.message.as_str()) 136 | } 137 | } 138 | 139 | /// A struct that holds a series of OCI errors 140 | #[derive(serde::Deserialize, serde::Serialize, Debug)] 141 | pub struct OciEnvelope { 142 | /// List of OCI registry errors 143 | pub errors: Vec, 144 | } 145 | 146 | impl std::fmt::Display for OciEnvelope { 147 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 148 | let errors: Vec = self.errors.iter().map(|e| e.to_string()).collect(); 149 | write!(f, "OCI API errors: [{}]", errors.join("\n")) 150 | } 151 | } 152 | 153 | /// OCI error codes 154 | /// 155 | /// Outlined [here](https://github.com/opencontainers/distribution-spec/blob/master/spec.md#errors-2) 156 | #[derive(serde::Deserialize, serde::Serialize, Debug, PartialEq, Eq)] 157 | #[serde(rename_all = "SCREAMING_SNAKE_CASE")] 158 | pub enum OciErrorCode { 159 | /// Blob unknown to registry 160 | /// 161 | /// This error MAY be returned when a blob is unknown to the registry in a specified 162 | /// repository. This can be returned with a standard get or if a manifest 163 | /// references an unknown layer during upload. 164 | BlobUnknown, 165 | /// Blob upload is invalid 166 | /// 167 | /// The blob upload encountered an error and can no longer proceed. 168 | BlobUploadInvalid, 169 | /// Blob upload is unknown to registry 170 | BlobUploadUnknown, 171 | /// Provided digest did not match uploaded content. 172 | DigestInvalid, 173 | /// Blob is unknown to registry 174 | ManifestBlobUnknown, 175 | /// Manifest is invalid 176 | /// 177 | /// During upload, manifests undergo several checks ensuring validity. If 178 | /// those checks fail, this error MAY be returned, unless a more specific 179 | /// error is included. The detail will contain information the failed 180 | /// validation. 181 | ManifestInvalid, 182 | /// Manifest unknown 183 | /// 184 | /// This error is returned when the manifest, identified by name and tag is unknown to the repository. 185 | ManifestUnknown, 186 | /// Manifest failed signature validation 187 | /// 188 | /// DEPRECATED: This error code has been removed from the OCI spec. 189 | ManifestUnverified, 190 | /// Invalid repository name 191 | NameInvalid, 192 | /// Repository name is not known 193 | NameUnknown, 194 | /// Manifest is not found 195 | NotFound, 196 | /// Provided length did not match content length 197 | SizeInvalid, 198 | /// Manifest tag did not match URI 199 | /// 200 | /// DEPRECATED: This error code has been removed from the OCI spec. 201 | TagInvalid, 202 | /// Authentication required. 203 | Unauthorized, 204 | /// Requested access to the resource is denied 205 | Denied, 206 | /// This operation is unsupported 207 | Unsupported, 208 | /// Too many requests from client 209 | Toomanyrequests, 210 | } 211 | 212 | #[cfg(test)] 213 | mod test { 214 | use super::*; 215 | 216 | const EXAMPLE_ERROR: &str = r#" 217 | {"errors":[{"code":"UNAUTHORIZED","message":"authentication required","detail":[{"Type":"repository","Name":"hello-wasm","Action":"pull"}]}]} 218 | "#; 219 | #[test] 220 | fn test_deserialize() { 221 | let envelope: OciEnvelope = 222 | serde_json::from_str(EXAMPLE_ERROR).expect("parse example error"); 223 | let e = &envelope.errors[0]; 224 | assert_eq!(OciErrorCode::Unauthorized, e.code); 225 | assert_eq!("authentication required", e.message); 226 | assert_ne!(serde_json::value::Value::Null, e.detail); 227 | } 228 | 229 | const EXAMPLE_ERROR_TOOMANYREQUESTS: &str = r#" 230 | {"errors":[{"code":"TOOMANYREQUESTS","message":"pull request limit exceeded","detail":"You have reached your pull rate limit."}]} 231 | "#; 232 | #[test] 233 | fn test_deserialize_toomanyrequests() { 234 | let envelope: OciEnvelope = 235 | serde_json::from_str(EXAMPLE_ERROR_TOOMANYREQUESTS).expect("parse example error"); 236 | let e = &envelope.errors[0]; 237 | assert_eq!(OciErrorCode::Toomanyrequests, e.code); 238 | assert_eq!("pull request limit exceeded", e.message); 239 | assert_ne!(serde_json::value::Value::Null, e.detail); 240 | } 241 | 242 | const EXAMPLE_ERROR_MISSING_MESSAGE: &str = r#" 243 | {"errors":[{"code":"UNAUTHORIZED","detail":[{"Type":"repository","Name":"hello-wasm","Action":"pull"}]}]} 244 | "#; 245 | #[test] 246 | fn test_deserialize_without_message_field() { 247 | let envelope: OciEnvelope = 248 | serde_json::from_str(EXAMPLE_ERROR_MISSING_MESSAGE).expect("parse example error"); 249 | let e = &envelope.errors[0]; 250 | assert_eq!(OciErrorCode::Unauthorized, e.code); 251 | assert_eq!(String::default(), e.message); 252 | assert_ne!(serde_json::value::Value::Null, e.detail); 253 | } 254 | 255 | const EXAMPLE_ERROR_MISSING_DETAIL: &str = r#" 256 | {"errors":[{"code":"UNAUTHORIZED","message":"authentication required"}]} 257 | "#; 258 | #[test] 259 | fn test_deserialize_without_detail_field() { 260 | let envelope: OciEnvelope = 261 | serde_json::from_str(EXAMPLE_ERROR_MISSING_DETAIL).expect("parse example error"); 262 | let e = &envelope.errors[0]; 263 | assert_eq!(OciErrorCode::Unauthorized, e.code); 264 | assert_eq!("authentication required", e.message); 265 | assert_eq!(serde_json::value::Value::Null, e.detail); 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright (c) The ORAS Authors 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /tests/digest_validation.rs: -------------------------------------------------------------------------------- 1 | // Tests for validating digests of different types and for malicious servers 2 | use std::net::SocketAddr; 3 | 4 | use axum::{ 5 | extract::{Path, State}, 6 | http::{HeaderMap, StatusCode}, 7 | routing::get, 8 | Router, 9 | }; 10 | use oci_client::{ 11 | client::{linux_amd64_resolver, ClientConfig, ClientProtocol}, 12 | Client, Reference, 13 | }; 14 | use sha2::{Digest, Sha256, Sha512}; 15 | use tokio::{net::TcpListener, task::JoinHandle}; 16 | 17 | const DIGEST_HEADER: &str = "Docker-Content-Digest"; 18 | 19 | static MANIFEST: &[u8] = include_bytes!("./fixtures/manifest.json"); 20 | static BLOB: &[u8] = include_bytes!("./fixtures/blob.tar.gz"); 21 | static CONFIG: &[u8] = include_bytes!("./fixtures/config.json"); 22 | 23 | lazy_static::lazy_static! { 24 | static ref MANIFEST_DIGEST: String = digest(MANIFEST); 25 | static ref MANIFEST_DIGEST_SHA512: String = digest_sha512(MANIFEST); 26 | static ref BLOB_DIGEST: String = digest(BLOB); 27 | static ref BLOB_DIGEST_SHA512: String = digest_sha512(BLOB); 28 | static ref CONFIG_DIGEST: String = digest(CONFIG); 29 | static ref CONFIG_DIGEST_SHA512: String = digest_sha512(CONFIG); 30 | } 31 | 32 | fn digest(data: &[u8]) -> String { 33 | format!("sha256:{:x}", Sha256::digest(data)) 34 | } 35 | 36 | fn digest_sha512(data: &[u8]) -> String { 37 | format!("sha512:{:x}", Sha512::digest(data)) 38 | } 39 | 40 | async fn manifest_handler( 41 | State(state): State, 42 | Path(digest): Path, 43 | ) -> (HeaderMap, &'static [u8]) { 44 | let resp_digest = if digest.starts_with("sha256:") && state.bad_manifest { 45 | digest 46 | } else { 47 | MANIFEST_DIGEST.clone() 48 | }; 49 | 50 | let mut headers = HeaderMap::new(); 51 | headers.insert(DIGEST_HEADER, resp_digest.parse().unwrap()); 52 | headers.insert( 53 | "Content-Type", 54 | "application/vnd.docker.distribution.manifest.v2+json" 55 | .parse() 56 | .unwrap(), 57 | ); 58 | 59 | (headers, MANIFEST) 60 | } 61 | 62 | async fn blob_handler( 63 | State(state): State, 64 | Path(digest): Path, 65 | ) -> Result<(HeaderMap, &'static [u8]), StatusCode> { 66 | let (content, resp_digest) = match digest.as_str() { 67 | d if d == CONFIG_DIGEST.as_str() => ( 68 | CONFIG, 69 | if state.bad_config { 70 | "sha256:deadbeef" 71 | } else { 72 | CONFIG_DIGEST.as_str() 73 | }, 74 | ), 75 | d if state.blob_sha512 && d == BLOB_DIGEST.as_str() => ( 76 | BLOB, 77 | if state.bad_blob { 78 | "sha256:deadbeef" 79 | } else { 80 | BLOB_DIGEST_SHA512.as_str() 81 | }, 82 | ), 83 | d if d == BLOB_DIGEST.as_str() => ( 84 | BLOB, 85 | if state.bad_blob { 86 | "sha256:deadbeef" 87 | } else { 88 | BLOB_DIGEST.as_str() 89 | }, 90 | ), 91 | _ => return Err(StatusCode::NOT_FOUND), 92 | }; 93 | 94 | let mut headers = HeaderMap::new(); 95 | headers.insert(DIGEST_HEADER, resp_digest.parse().unwrap()); 96 | 97 | Ok((headers, content)) 98 | } 99 | 100 | #[derive(Clone, Copy)] 101 | struct ServerConfig { 102 | bad_manifest: bool, 103 | bad_config: bool, 104 | bad_blob: bool, 105 | blob_sha512: bool, 106 | } 107 | 108 | struct BadServer { 109 | handle: JoinHandle<()>, 110 | pub server: String, 111 | } 112 | 113 | impl Drop for BadServer { 114 | fn drop(&mut self) { 115 | self.handle.abort() 116 | } 117 | } 118 | 119 | impl BadServer { 120 | pub async fn new(config: ServerConfig) -> Self { 121 | let app = Router::new() 122 | .route("/v2/busybox/manifests/{digest}", get(manifest_handler)) 123 | .route("/v2/busybox/blobs/{digest}", get(blob_handler)) 124 | .with_state(config); 125 | 126 | let addr = SocketAddr::from(([127, 0, 0, 1], 0)); 127 | let listener = TcpListener::bind(addr).await.unwrap(); 128 | let server_addr = listener.local_addr().unwrap(); 129 | let port = server_addr.port(); 130 | let server = format!("127.0.0.1:{port}"); 131 | let handle = tokio::spawn(async move { 132 | axum::serve(listener, app).await.unwrap(); 133 | }); 134 | Self { handle, server } 135 | } 136 | } 137 | #[tokio::test] 138 | async fn test_bad_manifest() { 139 | let server = BadServer::new(ServerConfig { 140 | bad_manifest: true, 141 | bad_config: false, 142 | bad_blob: false, 143 | blob_sha512: false, 144 | }) 145 | .await; 146 | 147 | let client = Client::new(ClientConfig { 148 | protocol: ClientProtocol::Http, 149 | platform_resolver: Some(Box::new(linux_amd64_resolver)), 150 | ..Default::default() 151 | }); 152 | let auth = &oci_client::secrets::RegistryAuth::Anonymous; 153 | 154 | let reference = Reference::try_from(format!( 155 | "{}/busybox@sha256:deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", 156 | server.server 157 | )) 158 | .expect("failed to parse reference"); 159 | 160 | client 161 | .pull_manifest(&reference, auth) 162 | .await 163 | .expect_err("Expected an error with a mismatched sha"); 164 | } 165 | 166 | #[tokio::test] 167 | async fn test_bad_config() { 168 | let server = BadServer::new(ServerConfig { 169 | bad_manifest: false, 170 | bad_config: true, 171 | bad_blob: false, 172 | blob_sha512: false, 173 | }) 174 | .await; 175 | 176 | let client = Client::new(ClientConfig { 177 | protocol: ClientProtocol::Http, 178 | platform_resolver: Some(Box::new(linux_amd64_resolver)), 179 | ..Default::default() 180 | }); 181 | let auth = &oci_client::secrets::RegistryAuth::Anonymous; 182 | 183 | let reference = Reference::try_from(format!( 184 | "{}/busybox@{}", 185 | server.server, 186 | MANIFEST_DIGEST.as_str() 187 | )) 188 | .expect("failed to parse reference"); 189 | 190 | assert!( 191 | client 192 | .pull( 193 | &reference, 194 | auth, 195 | vec!["application/vnd.docker.image.rootfs.diff.tar.gzip"], 196 | ) 197 | .await 198 | .is_err(), 199 | "Expected an error with a bad config" 200 | ); 201 | } 202 | 203 | #[tokio::test] 204 | async fn test_bad_blob() { 205 | let server = BadServer::new(ServerConfig { 206 | bad_manifest: false, 207 | bad_config: false, 208 | bad_blob: true, 209 | blob_sha512: false, 210 | }) 211 | .await; 212 | let client = Client::new(ClientConfig { 213 | protocol: ClientProtocol::Http, 214 | platform_resolver: Some(Box::new(linux_amd64_resolver)), 215 | ..Default::default() 216 | }); 217 | let auth = &oci_client::secrets::RegistryAuth::Anonymous; 218 | 219 | let reference = Reference::try_from(format!( 220 | "{}/busybox@{}", 221 | server.server, 222 | MANIFEST_DIGEST.as_str() 223 | )) 224 | .expect("failed to parse reference"); 225 | 226 | assert!( 227 | client 228 | .pull( 229 | &reference, 230 | auth, 231 | vec!["application/vnd.docker.image.rootfs.diff.tar.gzip"], 232 | ) 233 | .await 234 | .is_err(), 235 | "Expected an error with a bad blob" 236 | ); 237 | } 238 | 239 | #[tokio::test] 240 | async fn test_good_pull() { 241 | let server = BadServer::new(ServerConfig { 242 | bad_manifest: false, 243 | bad_config: false, 244 | bad_blob: false, 245 | blob_sha512: false, 246 | }) 247 | .await; 248 | 249 | let client = Client::new(ClientConfig { 250 | protocol: ClientProtocol::Http, 251 | platform_resolver: Some(Box::new(linux_amd64_resolver)), 252 | ..Default::default() 253 | }); 254 | let auth = &oci_client::secrets::RegistryAuth::Anonymous; 255 | 256 | let reference = Reference::try_from(format!( 257 | "{}/busybox@{}", 258 | server.server, 259 | MANIFEST_DIGEST.as_str() 260 | )) 261 | .expect("failed to parse reference"); 262 | 263 | client 264 | .pull( 265 | &reference, 266 | auth, 267 | vec!["application/vnd.docker.image.rootfs.diff.tar.gzip"], 268 | ) 269 | .await 270 | .expect("Expected a good pull"); 271 | } 272 | 273 | #[tokio::test] 274 | async fn test_different_reference_sha() { 275 | let server = BadServer::new(ServerConfig { 276 | bad_manifest: false, 277 | bad_config: false, 278 | bad_blob: false, 279 | blob_sha512: false, 280 | }) 281 | .await; 282 | 283 | let client = Client::new(ClientConfig { 284 | protocol: ClientProtocol::Http, 285 | platform_resolver: Some(Box::new(linux_amd64_resolver)), 286 | ..Default::default() 287 | }); 288 | let auth = &oci_client::secrets::RegistryAuth::Anonymous; 289 | 290 | let reference = Reference::try_from(format!( 291 | "{}/busybox@sha256:deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", 292 | server.server 293 | )) 294 | .expect("failed to parse reference"); 295 | 296 | client 297 | .pull_manifest(&reference, auth) 298 | .await 299 | .expect_err("Expected an error with a mismatched reference sha"); 300 | 301 | // Also try using a sha512 digest 302 | let reference = Reference::try_from(format!( 303 | "{}/busybox@sha256:deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef", 304 | server.server 305 | )) 306 | .expect("failed to parse reference"); 307 | 308 | client 309 | .pull_manifest(&reference, auth) 310 | .await 311 | .expect_err("Expected an error with a mismatched reference sha"); 312 | } 313 | 314 | #[tokio::test] 315 | async fn test_different_manifest_algos() { 316 | let server = BadServer::new(ServerConfig { 317 | bad_manifest: false, 318 | bad_config: false, 319 | bad_blob: false, 320 | blob_sha512: false, 321 | }) 322 | .await; 323 | let client = Client::new(ClientConfig { 324 | protocol: ClientProtocol::Http, 325 | platform_resolver: Some(Box::new(linux_amd64_resolver)), 326 | ..Default::default() 327 | }); 328 | let auth = &oci_client::secrets::RegistryAuth::Anonymous; 329 | let reference = Reference::try_from(format!( 330 | "{}/busybox@{}", 331 | server.server, 332 | MANIFEST_DIGEST_SHA512.as_str() 333 | )) 334 | .expect("failed to parse reference"); 335 | 336 | client 337 | .pull_manifest(&reference, auth) 338 | .await 339 | .expect("Expected a good pull with two different algorithms"); 340 | } 341 | 342 | #[tokio::test] 343 | async fn test_different_blob_algos() { 344 | let server = BadServer::new(ServerConfig { 345 | bad_manifest: false, 346 | bad_config: false, 347 | bad_blob: false, 348 | blob_sha512: true, 349 | }) 350 | .await; 351 | 352 | let client = Client::new(ClientConfig { 353 | protocol: ClientProtocol::Http, 354 | platform_resolver: Some(Box::new(linux_amd64_resolver)), 355 | ..Default::default() 356 | }); 357 | let auth = &oci_client::secrets::RegistryAuth::Anonymous; 358 | 359 | let reference = Reference::try_from(format!( 360 | "{}/busybox@{}", 361 | server.server, 362 | MANIFEST_DIGEST.as_str() 363 | )) 364 | .expect("failed to parse reference"); 365 | 366 | client 367 | .pull( 368 | &reference, 369 | auth, 370 | vec!["application/vnd.docker.image.rootfs.diff.tar.gzip"], 371 | ) 372 | .await 373 | .expect("Expected a good pull"); 374 | } 375 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | //! OCI Image Configuration 2 | //! 3 | //! Definition following 4 | 5 | use std::collections::{HashMap, HashSet}; 6 | 7 | use chrono::{DateTime, Utc}; 8 | use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; 9 | 10 | /// The CPU architecture which the binaries in this image are 11 | /// built to run on. 12 | /// Validated values are listed in [Go Language document for GOARCH](https://golang.org/doc/install/source#environment) 13 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)] 14 | #[serde(rename_all = "lowercase")] 15 | pub enum Architecture { 16 | /// Arm 17 | Arm, 18 | /// Arm 64bit 19 | Arm64, 20 | /// Amd64/x86-64 21 | #[default] 22 | Amd64, 23 | /// Intel i386 24 | #[serde(rename = "386")] 25 | I386, 26 | /// Wasm 27 | Wasm, 28 | /// Loong64 29 | Loong64, 30 | /// MIPS 31 | Mips, 32 | /// MIPSle 33 | Mipsle, 34 | /// MIPS64 35 | Mips64, 36 | /// MIPS64le 37 | Mips64le, 38 | /// Power PC64 39 | PPC64, 40 | /// Power PC64le 41 | PPC64le, 42 | /// RiscV 64 43 | Riscv64, 44 | /// IBM s390x 45 | S390x, 46 | /// With this field empty 47 | #[serde(rename = "")] 48 | None, 49 | } 50 | 51 | /// The name of the operating system which the image is 52 | /// built to run on. 53 | /// Validated values are listed in [Go Language document for GOARCH](https://golang.org/doc/install/source#environment) 54 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)] 55 | #[serde(rename_all = "lowercase")] 56 | pub enum Os { 57 | /// IBM AIX 58 | Aix, 59 | /// Android 60 | Android, 61 | /// Apple Darwin 62 | Darwin, 63 | /// FreeBSD Dragonfly 64 | Dragonfly, 65 | /// FreeBSD 66 | Freebsd, 67 | /// Illumos 68 | Illumos, 69 | /// iOS 70 | Ios, 71 | /// Js 72 | Js, 73 | /// Linux 74 | #[default] 75 | Linux, 76 | /// NetBSD 77 | Netbsd, 78 | /// OpenBSD 79 | Openbsd, 80 | /// Plan9 from Bell Labs 81 | Plan9, 82 | /// Solaris 83 | Solaris, 84 | /// WASI Preview 1 85 | Wasip1, 86 | /// Microsoft Windows 87 | Windows, 88 | /// With this field empty 89 | #[serde(rename = "")] 90 | None, 91 | } 92 | 93 | /// An OCI Image is an ordered collection of root filesystem changes 94 | /// and the corresponding execution parameters for use within a 95 | /// container runtime. 96 | /// 97 | /// Format defined [here](https://github.com/opencontainers/image-spec/blob/v1.0/config.md) 98 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)] 99 | pub struct ConfigFile { 100 | /// An combined date and time at which the image was created, 101 | /// formatted as defined by 102 | /// [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6) 103 | #[serde(skip_serializing_if = "Option::is_none")] 104 | pub created: Option>, 105 | 106 | /// Gives the name and/or email address of the person or entity 107 | /// which created and is responsible for maintaining the image. 108 | #[serde(skip_serializing_if = "Option::is_none")] 109 | pub author: Option, 110 | 111 | /// The CPU architecture which the binaries in this image are 112 | /// built to run on. 113 | pub architecture: Architecture, 114 | 115 | /// The name of the operating system which the image is built to run on. 116 | /// Validated values are listed in [Go Language document for GOOS](https://golang.org/doc/install/source#environment) 117 | pub os: Os, 118 | 119 | /// The execution parameters which SHOULD be used as a base when running a container using the image. 120 | #[serde(skip_serializing_if = "Option::is_none")] 121 | pub config: Option, 122 | 123 | /// The rootfs key references the layer content addresses used by the image. 124 | pub rootfs: Rootfs, 125 | 126 | /// Describes the history of each layer. 127 | #[serde(skip_serializing_if = "is_option_vec_empty")] 128 | pub history: Option>, 129 | } 130 | 131 | fn is_option_vec_empty(opt_vec: &Option>) -> bool { 132 | if let Some(vec) = opt_vec { 133 | vec.is_empty() 134 | } else { 135 | true 136 | } 137 | } 138 | 139 | /// Helper struct to be serialized into and deserialized from `{}` 140 | #[derive(Deserialize, Serialize)] 141 | struct Empty {} 142 | 143 | /// Helper to deserialize a `map[string]struct{}` of golang 144 | fn optional_hashset_from_str<'de, D: Deserializer<'de>>( 145 | d: D, 146 | ) -> Result>, D::Error> { 147 | let res = >>::deserialize(d)?.map(|h| h.into_keys().collect()); 148 | Ok(res) 149 | } 150 | 151 | /// Helper to serialize an optional hashset 152 | fn serialize_optional_hashset( 153 | value: &Option>, 154 | serializer: S, 155 | ) -> Result 156 | where 157 | T: Serialize, 158 | S: Serializer, 159 | { 160 | match value { 161 | Some(set) => { 162 | let empty = Empty {}; 163 | let mut map = serializer.serialize_map(Some(set.len()))?; 164 | for k in set { 165 | map.serialize_entry(k, &empty)?; 166 | } 167 | 168 | map.end() 169 | } 170 | None => serializer.serialize_none(), 171 | } 172 | } 173 | 174 | /// The execution parameters which SHOULD be used as a base when running a container using the image. 175 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)] 176 | #[serde(rename_all = "PascalCase")] 177 | pub struct Config { 178 | /// The username or UID which is a platform-specific structure 179 | /// that allows specific control over which user the process run as. This acts as a default value to use when the value is 180 | /// not specified when creating a container. For Linux based 181 | /// systems, all of the following are valid: `user`, `uid`, 182 | /// `user:group`, `uid:gid`, `uid:group`, `user:gid`. If `group`/`gid` is 183 | /// not specified, the default group and supplementary groups 184 | /// of the given `user`/`uid` in `/etc/passwd` from the container are 185 | /// applied. 186 | #[serde(skip_serializing_if = "Option::is_none")] 187 | pub user: Option, 188 | 189 | /// A set of ports to expose from a container running this 190 | /// image. Its keys can be in the format of: `port/tcp`, `port/udp`, 191 | /// `port` with the default protocol being `tcp` if not specified. 192 | /// These values act as defaults and are merged with any 193 | /// specified when creating a container. 194 | #[serde( 195 | skip_serializing_if = "is_option_hashset_empty", 196 | deserialize_with = "optional_hashset_from_str", 197 | serialize_with = "serialize_optional_hashset", 198 | default 199 | )] 200 | pub exposed_ports: Option>, 201 | 202 | /// Entries are in the format of `VARNAME=VARVALUE`. 203 | #[serde(skip_serializing_if = "is_option_vec_empty")] 204 | pub env: Option>, 205 | 206 | /// Default arguments to the entrypoint of the container. 207 | #[serde(skip_serializing_if = "is_option_vec_empty")] 208 | pub cmd: Option>, 209 | 210 | /// A list of arguments to use as the command to execute when 211 | /// the container starts.. 212 | #[serde(skip_serializing_if = "is_option_vec_empty")] 213 | pub entrypoint: Option>, 214 | 215 | /// A set of directories describing where the process is likely write data specific to a container instance. 216 | #[serde( 217 | skip_serializing_if = "is_option_hashset_empty", 218 | deserialize_with = "optional_hashset_from_str", 219 | serialize_with = "serialize_optional_hashset", 220 | default 221 | )] 222 | pub volumes: Option>, 223 | 224 | /// Sets the current working directory of the entrypoint 225 | /// process in the container. 226 | #[serde(skip_serializing_if = "Option::is_none")] 227 | pub working_dir: Option, 228 | 229 | /// The field contains arbitrary metadata for the container. 230 | /// This property MUST use the [annotation rules](https://github.com/opencontainers/image-spec/blob/v1.0/annotations.md#rules). 231 | #[serde(skip_serializing_if = "is_option_hashmap_empty")] 232 | pub labels: Option>, 233 | 234 | /// The field contains the system call signal that will be sent 235 | /// to the container to exit. The signal can be a signal name 236 | /// in the format `SIGNAME`, for instance `SIGKILL` or `SIGRTMIN+3`. 237 | #[serde(skip_serializing_if = "Option::is_none")] 238 | pub stop_signal: Option, 239 | } 240 | 241 | fn is_option_hashset_empty(opt_hash: &Option>) -> bool { 242 | if let Some(hash) = opt_hash { 243 | hash.is_empty() 244 | } else { 245 | true 246 | } 247 | } 248 | 249 | fn is_option_hashmap_empty(opt_hash: &Option>) -> bool { 250 | if let Some(hash) = opt_hash { 251 | hash.is_empty() 252 | } else { 253 | true 254 | } 255 | } 256 | 257 | /// Default value of the type of a [`Rootfs`] 258 | pub const ROOTFS_TYPE: &str = "layers"; 259 | 260 | /// The rootfs key references the layer content addresses used by the image. 261 | /// This makes the image config hash depend on the filesystem hash. 262 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 263 | pub struct Rootfs { 264 | /// MUST be set to `layers`. 265 | pub r#type: String, 266 | 267 | /// An array of layer content hashes (`DiffIDs`), in order from first to last. 268 | pub diff_ids: Vec, 269 | } 270 | 271 | impl Default for Rootfs { 272 | fn default() -> Self { 273 | Self { 274 | r#type: String::from(ROOTFS_TYPE), 275 | diff_ids: Default::default(), 276 | } 277 | } 278 | } 279 | 280 | /// Describes the history of each layer. The array is ordered from first to last. 281 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Default)] 282 | pub struct History { 283 | /// A combined date and time at which the layer was created, 284 | /// formatted as defined by [RFC 3339, section 5.6](https://tools.ietf.org/html/rfc3339#section-5.6). 285 | #[serde(skip_serializing_if = "Option::is_none")] 286 | pub created: Option>, 287 | 288 | /// The author of the build point. 289 | #[serde(skip_serializing_if = "Option::is_none")] 290 | pub author: Option, 291 | 292 | /// The command which created the layer. 293 | #[serde(skip_serializing_if = "Option::is_none")] 294 | pub created_by: Option, 295 | 296 | /// A custom message set when creating the layer. 297 | #[serde(skip_serializing_if = "Option::is_none")] 298 | pub comment: Option, 299 | 300 | /// This field is used to mark if the history item created a 301 | /// filesystem diff. It is set to true if this history item 302 | /// doesn't correspond to an actual layer in the rootfs section 303 | /// (for example, Dockerfile's `ENV` command results in no 304 | /// change to the filesystem). 305 | #[serde(skip_serializing_if = "Option::is_none")] 306 | pub empty_layer: Option, 307 | } 308 | 309 | #[cfg(test)] 310 | mod tests { 311 | use assert_json_diff::assert_json_eq; 312 | use chrono::DateTime; 313 | use rstest::*; 314 | use serde_json::Value; 315 | use std::collections::{HashMap, HashSet}; 316 | 317 | use super::{Architecture, Config, ConfigFile, History, Os, Rootfs}; 318 | 319 | const EXAMPLE_CONFIG: &str = r#" 320 | { 321 | "created": "2015-10-31T22:22:56.015925234Z", 322 | "author": "Alyssa P. Hacker ", 323 | "architecture": "amd64", 324 | "os": "linux", 325 | "config": { 326 | "User": "alice", 327 | "ExposedPorts": { 328 | "8080/tcp": {} 329 | }, 330 | "Env": [ 331 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", 332 | "FOO=oci_is_a", 333 | "BAR=well_written_spec" 334 | ], 335 | "Entrypoint": [ 336 | "/bin/my-app-binary" 337 | ], 338 | "Cmd": [ 339 | "--foreground", 340 | "--config", 341 | "/etc/my-app.d/default.cfg" 342 | ], 343 | "Volumes": { 344 | "/var/job-result-data": {}, 345 | "/var/log/my-app-logs": {} 346 | }, 347 | "WorkingDir": "/home/alice", 348 | "Labels": { 349 | "com.example.project.git.url": "https://example.com/project.git", 350 | "com.example.project.git.commit": "45a939b2999782a3f005621a8d0f29aa387e1d6b" 351 | } 352 | }, 353 | "rootfs": { 354 | "diff_ids": [ 355 | "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", 356 | "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 357 | ], 358 | "type": "layers" 359 | }, 360 | "history": [ 361 | { 362 | "created": "2015-10-31T22:22:54.690851953Z", 363 | "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" 364 | }, 365 | { 366 | "created": "2015-10-31T22:22:55.613815829Z", 367 | "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", 368 | "empty_layer": true 369 | } 370 | ] 371 | }"#; 372 | 373 | fn example_config() -> ConfigFile { 374 | let config = Config { 375 | user: Some("alice".into()), 376 | exposed_ports: Some(HashSet::from_iter(vec!["8080/tcp".into()])), 377 | env: Some(vec![ 378 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin".into(), 379 | "FOO=oci_is_a".into(), 380 | "BAR=well_written_spec".into(), 381 | ]), 382 | cmd: Some(vec![ 383 | "--foreground".into(), 384 | "--config".into(), 385 | "/etc/my-app.d/default.cfg".into(), 386 | ]), 387 | entrypoint: Some(vec!["/bin/my-app-binary".into()]), 388 | volumes: Some(HashSet::from_iter(vec![ 389 | "/var/job-result-data".into(), 390 | "/var/log/my-app-logs".into(), 391 | ])), 392 | working_dir: Some("/home/alice".into()), 393 | labels: Some(HashMap::from_iter(vec![ 394 | ( 395 | "com.example.project.git.url".into(), 396 | "https://example.com/project.git".into(), 397 | ), 398 | ( 399 | "com.example.project.git.commit".into(), 400 | "45a939b2999782a3f005621a8d0f29aa387e1d6b".into(), 401 | ), 402 | ])), 403 | stop_signal: None, 404 | }; 405 | let rootfs = Rootfs { 406 | r#type: "layers".into(), 407 | diff_ids: vec![ 408 | "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1".into(), 409 | "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef".into(), 410 | ], 411 | }; 412 | 413 | let history = Some(vec![History { 414 | created: Some(DateTime::parse_from_rfc3339("2015-10-31T22:22:54.690851953Z").expect("parse time failed").into()), 415 | author: None, 416 | created_by: Some("/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /".into()), 417 | comment: None, 418 | empty_layer: None, 419 | }, 420 | History { 421 | created: Some(DateTime::parse_from_rfc3339("2015-10-31T22:22:55.613815829Z").expect("parse time failed").into()), 422 | author: None, 423 | created_by: Some("/bin/sh -c #(nop) CMD [\"sh\"]".into()), 424 | comment: None, 425 | empty_layer: Some(true), 426 | }]); 427 | ConfigFile { 428 | created: Some( 429 | DateTime::parse_from_rfc3339("2015-10-31T22:22:56.015925234Z") 430 | .expect("parse time failed") 431 | .into(), 432 | ), 433 | author: Some("Alyssa P. Hacker ".into()), 434 | architecture: Architecture::Amd64, 435 | os: Os::Linux, 436 | config: Some(config), 437 | rootfs, 438 | history, 439 | } 440 | } 441 | 442 | const MINIMAL_CONFIG: &str = r#" 443 | { 444 | "architecture": "amd64", 445 | "os": "linux", 446 | "rootfs": { 447 | "diff_ids": [ 448 | "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", 449 | "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 450 | ], 451 | "type": "layers" 452 | } 453 | }"#; 454 | 455 | fn minimal_config() -> ConfigFile { 456 | let rootfs = Rootfs { 457 | r#type: "layers".into(), 458 | diff_ids: vec![ 459 | "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1".into(), 460 | "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef".into(), 461 | ], 462 | }; 463 | 464 | ConfigFile { 465 | architecture: Architecture::Amd64, 466 | os: Os::Linux, 467 | config: None, 468 | rootfs, 469 | history: None, 470 | created: None, 471 | author: None, 472 | } 473 | } 474 | 475 | const MINIMAL_CONFIG2: &str = r#" 476 | { 477 | "architecture":"arm64", 478 | "config":{ 479 | "Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"], 480 | "WorkingDir":"/" 481 | }, 482 | "created":"2023-04-21T11:53:28.176613804Z", 483 | "history":[{ 484 | "created":"2023-04-21T11:53:28.176613804Z", 485 | "created_by":"COPY ./src/main.rs / # buildkit", 486 | "comment":"buildkit.dockerfile.v0" 487 | }], 488 | "os":"linux", 489 | "rootfs":{ 490 | "type":"layers", 491 | "diff_ids":[ 492 | "sha256:267fbf1f5a9377e40a2dc65b355000111e000a35ac77f7b19a59f587d4dd778e" 493 | ] 494 | } 495 | }"#; 496 | 497 | fn minimal_config2() -> ConfigFile { 498 | let config = Some(Config { 499 | env: Some(vec![ 500 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin".into(), 501 | ]), 502 | working_dir: Some("/".into()), 503 | ..Config::default() 504 | }); 505 | let history = Some(vec![History { 506 | created: Some( 507 | DateTime::parse_from_rfc3339("2023-04-21T11:53:28.176613804Z") 508 | .expect("parse time failed") 509 | .into(), 510 | ), 511 | author: None, 512 | created_by: Some("COPY ./src/main.rs / # buildkit".into()), 513 | comment: Some("buildkit.dockerfile.v0".into()), 514 | empty_layer: None, 515 | }]); 516 | 517 | let rootfs = Rootfs { 518 | r#type: "layers".into(), 519 | diff_ids: vec![ 520 | "sha256:267fbf1f5a9377e40a2dc65b355000111e000a35ac77f7b19a59f587d4dd778e".into(), 521 | ], 522 | }; 523 | 524 | ConfigFile { 525 | architecture: Architecture::Arm64, 526 | os: Os::Linux, 527 | config, 528 | rootfs, 529 | history, 530 | created: Some( 531 | DateTime::parse_from_rfc3339("2023-04-21T11:53:28.176613804Z") 532 | .expect("parse time failed") 533 | .into(), 534 | ), 535 | author: None, 536 | } 537 | } 538 | 539 | #[rstest] 540 | #[case(example_config(), EXAMPLE_CONFIG)] 541 | #[case(minimal_config(), MINIMAL_CONFIG)] 542 | #[case(minimal_config2(), MINIMAL_CONFIG2)] 543 | fn deserialize_test(#[case] config: ConfigFile, #[case] expected: &str) { 544 | let parsed: ConfigFile = serde_json::from_str(expected).expect("parsed failed"); 545 | assert_eq!(config, parsed); 546 | } 547 | 548 | #[rstest] 549 | #[case(example_config(), EXAMPLE_CONFIG)] 550 | #[case(minimal_config(), MINIMAL_CONFIG)] 551 | #[case(minimal_config2(), MINIMAL_CONFIG2)] 552 | fn serialize_test(#[case] config: ConfigFile, #[case] expected: &str) { 553 | let serialized = serde_json::to_value(&config).expect("serialize failed"); 554 | let parsed: Value = serde_json::from_str(expected).expect("parsed failed"); 555 | assert_json_eq!(serialized, parsed); 556 | } 557 | } 558 | -------------------------------------------------------------------------------- /src/manifest.rs: -------------------------------------------------------------------------------- 1 | //! OCI Manifest 2 | use std::collections::BTreeMap; 3 | 4 | use crate::{ 5 | client::{Config, ImageLayer}, 6 | sha256_digest, 7 | }; 8 | 9 | /// The mediatype for WASM layers. 10 | pub const WASM_LAYER_MEDIA_TYPE: &str = "application/vnd.wasm.content.layer.v1+wasm"; 11 | /// The mediatype for a WASM image config. 12 | pub const WASM_CONFIG_MEDIA_TYPE: &str = "application/vnd.wasm.config.v1+json"; 13 | /// The mediatype for an docker v2 schema 2 manifest. 14 | pub const IMAGE_MANIFEST_MEDIA_TYPE: &str = "application/vnd.docker.distribution.manifest.v2+json"; 15 | /// The mediatype for an docker v2 shema 2 manifest list. 16 | pub const IMAGE_MANIFEST_LIST_MEDIA_TYPE: &str = 17 | "application/vnd.docker.distribution.manifest.list.v2+json"; 18 | /// The mediatype for an OCI image index manifest. 19 | pub const OCI_IMAGE_INDEX_MEDIA_TYPE: &str = "application/vnd.oci.image.index.v1+json"; 20 | /// The mediatype for an OCI image manifest. 21 | pub const OCI_IMAGE_MEDIA_TYPE: &str = "application/vnd.oci.image.manifest.v1+json"; 22 | /// The mediatype for an image config (manifest). 23 | pub const IMAGE_CONFIG_MEDIA_TYPE: &str = "application/vnd.oci.image.config.v1+json"; 24 | /// The mediatype that Docker uses for image configs. 25 | pub const IMAGE_DOCKER_CONFIG_MEDIA_TYPE: &str = "application/vnd.docker.container.image.v1+json"; 26 | /// The mediatype for a layer. 27 | pub const IMAGE_LAYER_MEDIA_TYPE: &str = "application/vnd.oci.image.layer.v1.tar"; 28 | /// The mediatype for a layer that is gzipped. 29 | pub const IMAGE_LAYER_GZIP_MEDIA_TYPE: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; 30 | /// The mediatype that Docker uses for a layer that is tarred. 31 | pub const IMAGE_DOCKER_LAYER_TAR_MEDIA_TYPE: &str = "application/vnd.docker.image.rootfs.diff.tar"; 32 | /// The mediatype that Docker uses for a layer that is gzipped. 33 | pub const IMAGE_DOCKER_LAYER_GZIP_MEDIA_TYPE: &str = 34 | "application/vnd.docker.image.rootfs.diff.tar.gzip"; 35 | /// The mediatype for a layer that is nondistributable. 36 | pub const IMAGE_LAYER_NONDISTRIBUTABLE_MEDIA_TYPE: &str = 37 | "application/vnd.oci.image.layer.nondistributable.v1.tar"; 38 | /// The mediatype for a layer that is nondistributable and gzipped. 39 | pub const IMAGE_LAYER_NONDISTRIBUTABLE_GZIP_MEDIA_TYPE: &str = 40 | "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"; 41 | 42 | /// An image, or image index, OCI manifest 43 | #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] 44 | #[serde(untagged)] 45 | #[allow(clippy::large_enum_variant)] 46 | pub enum OciManifest { 47 | /// An OCI image manifest 48 | Image(OciImageManifest), 49 | /// An OCI image index manifest 50 | ImageIndex(OciImageIndex), 51 | } 52 | 53 | impl OciManifest { 54 | /// Returns the appropriate content-type for each variant. 55 | pub fn content_type(&self) -> &str { 56 | match self { 57 | OciManifest::Image(image) => { 58 | image.media_type.as_deref().unwrap_or(OCI_IMAGE_MEDIA_TYPE) 59 | } 60 | OciManifest::ImageIndex(image) => image 61 | .media_type 62 | .as_deref() 63 | .unwrap_or(IMAGE_MANIFEST_LIST_MEDIA_TYPE), 64 | } 65 | } 66 | } 67 | 68 | /// The OCI image manifest describes an OCI image. 69 | /// 70 | /// It is part of the OCI specification, and is defined [here](https://github.com/opencontainers/image-spec/blob/main/manifest.md) 71 | #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] 72 | #[serde(rename_all = "camelCase")] 73 | pub struct OciImageManifest { 74 | /// This is a schema version. 75 | /// 76 | /// The specification does not specify the width of this integer. 77 | /// However, the only version allowed by the specification is `2`. 78 | /// So we have made this a u8. 79 | pub schema_version: u8, 80 | 81 | /// This is an optional media type describing this manifest. 82 | /// 83 | /// This property SHOULD be used and [remain compatible](https://github.com/opencontainers/image-spec/blob/main/media-types.md#compatibility-matrix) 84 | /// with earlier versions of this specification and with other similar external formats. 85 | #[serde(skip_serializing_if = "Option::is_none")] 86 | pub media_type: Option, 87 | 88 | /// The image configuration. 89 | /// 90 | /// This object is required. 91 | pub config: OciDescriptor, 92 | 93 | /// The OCI image layers 94 | /// 95 | /// The specification is unclear whether this is required. We have left it 96 | /// required, assuming an empty vector can be used if necessary. 97 | pub layers: Vec, 98 | 99 | /// This is an optional subject linking this manifest to another manifest 100 | /// forming an association between the image manifest and the other manifest. 101 | /// 102 | /// NOTE: The responsibility of implementing the fall back mechanism when encountering 103 | /// a registry with an [unavailable referrers API](https://github.com/opencontainers/distribution-spec/blob/main/spec.md#referrers-tag-schema) 104 | /// falls on the consumer of the client. 105 | #[serde(skip_serializing_if = "Option::is_none")] 106 | pub subject: Option, 107 | 108 | /// The OCI artifact type 109 | /// 110 | /// This OPTIONAL property contains the type of an artifact when the manifest is used for an 111 | /// artifact. This MUST be set when config.mediaType is set to the empty value. If defined, 112 | /// the value MUST comply with RFC 6838, including the naming requirements in its section 4.2, 113 | /// and MAY be registered with IANA. Implementations storing or copying image manifests 114 | /// MUST NOT error on encountering an artifactType that is unknown to the implementation. 115 | /// 116 | /// Introduced in OCI Image Format spec v1.1 117 | #[serde(skip_serializing_if = "Option::is_none")] 118 | pub artifact_type: Option, 119 | 120 | /// The annotations for this manifest 121 | /// 122 | /// The specification says "If there are no annotations then this property 123 | /// MUST either be absent or be an empty map." 124 | /// TO accomodate either, this is optional. 125 | #[serde(skip_serializing_if = "Option::is_none")] 126 | pub annotations: Option>, 127 | } 128 | 129 | impl Default for OciImageManifest { 130 | fn default() -> Self { 131 | OciImageManifest { 132 | schema_version: 2, 133 | media_type: None, 134 | config: OciDescriptor::default(), 135 | layers: vec![], 136 | subject: None, 137 | artifact_type: None, 138 | annotations: None, 139 | } 140 | } 141 | } 142 | 143 | impl OciImageManifest { 144 | /// Create a new OciImageManifest using the given parameters 145 | /// 146 | /// This can be useful to create an OCI Image Manifest with 147 | /// custom annotations. 148 | pub fn build( 149 | layers: &[ImageLayer], 150 | config: &Config, 151 | annotations: Option>, 152 | ) -> Self { 153 | let mut manifest = OciImageManifest::default(); 154 | 155 | manifest.config.media_type = config.media_type.to_string(); 156 | manifest.config.size = config.data.len() as i64; 157 | manifest.config.digest = sha256_digest(&config.data); 158 | manifest.annotations = annotations; 159 | 160 | for layer in layers { 161 | let digest = sha256_digest(&layer.data); 162 | 163 | let descriptor = OciDescriptor { 164 | size: layer.data.len() as i64, 165 | digest, 166 | media_type: layer.media_type.to_string(), 167 | annotations: layer.annotations.clone(), 168 | ..Default::default() 169 | }; 170 | 171 | manifest.layers.push(descriptor); 172 | } 173 | 174 | manifest 175 | } 176 | } 177 | 178 | impl From for OciManifest { 179 | fn from(m: OciImageIndex) -> Self { 180 | Self::ImageIndex(m) 181 | } 182 | } 183 | impl From for OciManifest { 184 | fn from(m: OciImageManifest) -> Self { 185 | Self::Image(m) 186 | } 187 | } 188 | 189 | impl std::fmt::Display for OciManifest { 190 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 191 | match self { 192 | OciManifest::Image(oci_image_manifest) => write!(f, "{oci_image_manifest}"), 193 | OciManifest::ImageIndex(oci_image_index) => write!(f, "{oci_image_index}"), 194 | } 195 | } 196 | } 197 | 198 | impl std::fmt::Display for OciImageIndex { 199 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 200 | let media_type = self 201 | .media_type 202 | .clone() 203 | .unwrap_or_else(|| String::from("N/A")); 204 | let manifests: Vec = self.manifests.iter().map(|m| m.to_string()).collect(); 205 | write!( 206 | f, 207 | "OCI Image Index( schema-version: '{}', media-type: '{}', manifests: '{}' )", 208 | self.schema_version, 209 | media_type, 210 | manifests.join(","), 211 | ) 212 | } 213 | } 214 | 215 | impl std::fmt::Display for OciImageManifest { 216 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 217 | let media_type = self 218 | .media_type 219 | .clone() 220 | .unwrap_or_else(|| String::from("N/A")); 221 | let annotations = self.annotations.clone().unwrap_or_default(); 222 | let layers: Vec = self.layers.iter().map(|l| l.to_string()).collect(); 223 | 224 | write!( 225 | f, 226 | "OCI Image Manifest( schema-version: '{}', media-type: '{}', config: '{}', artifact-type: '{:?}', layers: '{:?}', annotations: '{:?}' )", 227 | self.schema_version, 228 | media_type, 229 | self.config, 230 | self.artifact_type, 231 | layers, 232 | annotations, 233 | ) 234 | } 235 | } 236 | 237 | /// Versioned provides a struct with the manifest's schemaVersion and mediaType. 238 | /// Incoming content with unknown schema versions can be decoded against this 239 | /// struct to check the version. 240 | #[derive(Clone, Debug, serde::Deserialize)] 241 | #[serde(rename_all = "camelCase")] 242 | pub struct Versioned { 243 | /// schema_version is the image manifest schema that this image follows 244 | pub schema_version: i32, 245 | 246 | /// media_type is the media type of this schema. 247 | #[serde(skip_serializing_if = "Option::is_none")] 248 | pub media_type: Option, 249 | } 250 | 251 | /// The OCI descriptor is a generic object used to describe other objects. 252 | /// 253 | /// It is defined in the [OCI Image Specification](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#properties): 254 | #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] 255 | #[serde(rename_all = "camelCase")] 256 | pub struct OciDescriptor { 257 | /// The media type of this descriptor. 258 | /// 259 | /// Layers, config, and manifests may all have descriptors. Each 260 | /// is differentiated by its mediaType. 261 | /// 262 | /// This REQUIRED property contains the media type of the referenced 263 | /// content. Values MUST comply with RFC 6838, including the naming 264 | /// requirements in its section 4.2. 265 | pub media_type: String, 266 | /// The SHA 256 or 512 digest of the object this describes. 267 | /// 268 | /// This REQUIRED property is the digest of the targeted content, conforming 269 | /// to the requirements outlined in Digests. Retrieved content SHOULD be 270 | /// verified against this digest when consumed via untrusted sources. 271 | pub digest: String, 272 | /// The size, in bytes, of the object this describes. 273 | /// 274 | /// This REQUIRED property specifies the size, in bytes, of the raw 275 | /// content. This property exists so that a client will have an expected 276 | /// size for the content before processing. If the length of the retrieved 277 | /// content does not match the specified length, the content SHOULD NOT be 278 | /// trusted. 279 | pub size: i64, 280 | /// This OPTIONAL property specifies a list of URIs from which this 281 | /// object MAY be downloaded. Each entry MUST conform to RFC 3986. 282 | /// Entries SHOULD use the http and https schemes, as defined in RFC 7230. 283 | #[serde(skip_serializing_if = "Option::is_none")] 284 | pub urls: Option>, 285 | 286 | /// This OPTIONAL property contains arbitrary metadata for this descriptor. 287 | /// This OPTIONAL property MUST use the annotation rules. 288 | /// 289 | #[serde(skip_serializing_if = "Option::is_none")] 290 | pub annotations: Option>, 291 | } 292 | 293 | impl std::fmt::Display for OciDescriptor { 294 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 295 | let urls = self.urls.clone().unwrap_or_default(); 296 | let annotations = self.annotations.clone().unwrap_or_default(); 297 | 298 | write!( 299 | f, 300 | "( media-type: '{}', digest: '{}', size: '{}', urls: '{:?}', annotations: '{:?}' )", 301 | self.media_type, self.digest, self.size, urls, annotations, 302 | ) 303 | } 304 | } 305 | 306 | impl Default for OciDescriptor { 307 | fn default() -> Self { 308 | OciDescriptor { 309 | media_type: IMAGE_CONFIG_MEDIA_TYPE.to_owned(), 310 | digest: "".to_owned(), 311 | size: 0, 312 | urls: None, 313 | annotations: None, 314 | } 315 | } 316 | } 317 | 318 | /// The image index is a higher-level manifest which points to specific image manifest. 319 | /// 320 | /// It is part of the OCI specification, and is defined [here](https://github.com/opencontainers/image-spec/blob/main/image-index.md): 321 | #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] 322 | #[serde(rename_all = "camelCase")] 323 | pub struct OciImageIndex { 324 | /// This is a schema version. 325 | /// 326 | /// The specification does not specify the width of this integer. 327 | /// However, the only version allowed by the specification is `2`. 328 | /// So we have made this a u8. 329 | pub schema_version: u8, 330 | 331 | /// This is an optional media type describing this manifest. 332 | /// 333 | /// It is reserved for compatibility, but the specification does not seem 334 | /// to recommend setting it. 335 | #[serde(skip_serializing_if = "Option::is_none")] 336 | pub media_type: Option, 337 | 338 | /// This property contains a list of manifests for specific platforms. 339 | /// The spec says this field must be present but the value may be an empty array. 340 | pub manifests: Vec, 341 | 342 | /// This property contains the type of an artifact when the manifest is used for an artifact. 343 | #[serde(skip_serializing_if = "Option::is_none")] 344 | pub artifact_type: Option, 345 | 346 | /// The annotations for this manifest 347 | /// 348 | /// The specification says "If there are no annotations then this property 349 | /// MUST either be absent or be an empty map." 350 | /// TO accomodate either, this is optional. 351 | #[serde(skip_serializing_if = "Option::is_none")] 352 | pub annotations: Option>, 353 | } 354 | 355 | /// The manifest entry of an `ImageIndex`. 356 | /// 357 | /// It is part of the OCI specification, and is defined in the `manifests` 358 | /// section [here](https://github.com/opencontainers/image-spec/blob/main/image-index.md#image-index-property-descriptions): 359 | #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] 360 | #[serde(rename_all = "camelCase")] 361 | pub struct ImageIndexEntry { 362 | /// The media type of this descriptor. 363 | /// 364 | /// Layers, config, and manifests may all have descriptors. Each 365 | /// is differentiated by its mediaType. 366 | /// 367 | /// This REQUIRED property contains the media type of the referenced 368 | /// content. Values MUST comply with RFC 6838, including the naming 369 | /// requirements in its section 4.2. 370 | pub media_type: String, 371 | /// The SHA 256 or 512 digest of the object this describes. 372 | /// 373 | /// This REQUIRED property is the digest of the targeted content, conforming 374 | /// to the requirements outlined in Digests. Retrieved content SHOULD be 375 | /// verified against this digest when consumed via untrusted sources. 376 | pub digest: String, 377 | /// The size, in bytes, of the object this describes. 378 | /// 379 | /// This REQUIRED property specifies the size, in bytes, of the raw 380 | /// content. This property exists so that a client will have an expected 381 | /// size for the content before processing. If the length of the retrieved 382 | /// content does not match the specified length, the content SHOULD NOT be 383 | /// trusted. 384 | pub size: i64, 385 | /// This OPTIONAL property describes the minimum runtime requirements of the image. 386 | /// This property SHOULD be present if its target is platform-specific. 387 | #[serde(skip_serializing_if = "Option::is_none")] 388 | pub platform: Option, 389 | 390 | /// This OPTIONAL property contains arbitrary metadata for the image index. 391 | /// This OPTIONAL property MUST use the [annotation rules](https://github.com/opencontainers/image-spec/blob/main/annotations.md#rules). 392 | #[serde(skip_serializing_if = "Option::is_none")] 393 | pub annotations: Option>, 394 | } 395 | 396 | impl std::fmt::Display for ImageIndexEntry { 397 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 398 | let platform = self 399 | .platform 400 | .clone() 401 | .map(|p| p.to_string()) 402 | .unwrap_or_else(|| String::from("N/A")); 403 | let annotations = self.annotations.clone().unwrap_or_default(); 404 | 405 | write!( 406 | f, 407 | "(media-type: '{}', digest: '{}', size: '{}', platform: '{}', annotations: {:?})", 408 | self.media_type, self.digest, self.size, platform, annotations, 409 | ) 410 | } 411 | } 412 | 413 | /// Platform specific fields of an Image Index manifest entry. 414 | /// 415 | /// It is part of the OCI specification, and is in the `platform` 416 | /// section [here](https://github.com/opencontainers/image-spec/blob/main/image-index.md#image-index-property-descriptions): 417 | #[derive(Debug, Clone, serde::Deserialize, serde::Serialize, PartialEq, Eq)] 418 | #[serde(rename_all = "camelCase")] 419 | pub struct Platform { 420 | /// This REQUIRED property specifies the CPU architecture. 421 | /// Image indexes SHOULD use, and implementations SHOULD understand, values 422 | /// listed in the Go Language document for [`GOARCH`](https://golang.org/doc/install/source#environment). 423 | pub architecture: String, 424 | /// This REQUIRED property specifies the operating system. 425 | /// Image indexes SHOULD use, and implementations SHOULD understand, values 426 | /// listed in the Go Language document for [`GOOS`](https://golang.org/doc/install/source#environment). 427 | pub os: String, 428 | /// This OPTIONAL property specifies the version of the operating system 429 | /// targeted by the referenced blob. 430 | /// Implementations MAY refuse to use manifests where `os.version` is not known 431 | /// to work with the host OS version. 432 | /// Valid values are implementation-defined. e.g. `10.0.14393.1066` on `windows`. 433 | #[serde(rename = "os.version")] 434 | #[serde(skip_serializing_if = "Option::is_none")] 435 | pub os_version: Option, 436 | /// This OPTIONAL property specifies an array of strings, each specifying a mandatory OS feature. 437 | /// When `os` is `windows`, image indexes SHOULD use, and implementations SHOULD understand the following values: 438 | /// - `win32k`: image requires `win32k.sys` on the host (Note: `win32k.sys` is missing on Nano Server) 439 | /// 440 | /// When `os` is not `windows`, values are implementation-defined and SHOULD be submitted to this specification for standardization. 441 | #[serde(rename = "os.features")] 442 | #[serde(skip_serializing_if = "Option::is_none")] 443 | pub os_features: Option>, 444 | /// This OPTIONAL property specifies the variant of the CPU. 445 | /// Image indexes SHOULD use, and implementations SHOULD understand, `variant` values listed in the [Platform Variants](#platform-variants) table. 446 | #[serde(skip_serializing_if = "Option::is_none")] 447 | pub variant: Option, 448 | /// This property is RESERVED for future versions of the specification. 449 | #[serde(skip_serializing_if = "Option::is_none")] 450 | pub features: Option>, 451 | } 452 | 453 | impl std::fmt::Display for Platform { 454 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 455 | let os_version = self 456 | .os_version 457 | .clone() 458 | .unwrap_or_else(|| String::from("N/A")); 459 | let os_features = self.os_features.clone().unwrap_or_default(); 460 | let variant = self.variant.clone().unwrap_or_else(|| String::from("N/A")); 461 | let features = self.os_features.clone().unwrap_or_default(); 462 | write!(f, "( architecture: '{}', os: '{}', os-version: '{}', os-features: '{:?}', variant: '{}', features: '{:?}' )", 463 | self.architecture, 464 | self.os, 465 | os_version, 466 | os_features, 467 | variant, 468 | features, 469 | ) 470 | } 471 | } 472 | 473 | #[cfg(test)] 474 | mod test { 475 | use super::*; 476 | 477 | const TEST_MANIFEST: &str = r#"{ 478 | "schemaVersion": 2, 479 | "mediaType": "application/vnd.docker.distribution.manifest.v2+json", 480 | "config": { 481 | "mediaType": "application/vnd.docker.container.image.v1+json", 482 | "size": 2, 483 | "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" 484 | }, 485 | "artifactType": "application/vnd.wasm.component.v1+wasm", 486 | "layers": [ 487 | { 488 | "mediaType": "application/vnd.wasm.content.layer.v1+wasm", 489 | "size": 1615998, 490 | "digest": "sha256:f9c91f4c280ab92aff9eb03b279c4774a80b84428741ab20855d32004b2b983f", 491 | "annotations": { 492 | "org.opencontainers.image.title": "module.wasm" 493 | } 494 | } 495 | ] 496 | } 497 | "#; 498 | 499 | #[test] 500 | fn test_manifest() { 501 | let manifest: OciImageManifest = 502 | serde_json::from_str(TEST_MANIFEST).expect("parsed manifest"); 503 | assert_eq!(2, manifest.schema_version); 504 | assert_eq!( 505 | Some(IMAGE_MANIFEST_MEDIA_TYPE.to_owned()), 506 | manifest.media_type 507 | ); 508 | let config = manifest.config; 509 | // Note that this is the Docker config media type, not the OCI one. 510 | assert_eq!(IMAGE_DOCKER_CONFIG_MEDIA_TYPE.to_owned(), config.media_type); 511 | assert_eq!(2, config.size); 512 | assert_eq!( 513 | "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a".to_owned(), 514 | config.digest 515 | ); 516 | assert_eq!( 517 | "application/vnd.wasm.component.v1+wasm".to_owned(), 518 | manifest.artifact_type.unwrap() 519 | ); 520 | 521 | assert_eq!(1, manifest.layers.len()); 522 | let wasm_layer = &manifest.layers[0]; 523 | assert_eq!(1_615_998, wasm_layer.size); 524 | assert_eq!(WASM_LAYER_MEDIA_TYPE.to_owned(), wasm_layer.media_type); 525 | assert_eq!( 526 | 1, 527 | wasm_layer 528 | .annotations 529 | .as_ref() 530 | .expect("annotations map") 531 | .len() 532 | ); 533 | } 534 | } 535 | --------------------------------------------------------------------------------