├── .claude └── CLAUDE.md ├── .gitignore ├── .bootc-dev-infra-commit.txt ├── renovate.json ├── ci └── test-ostree-rs-ext.sh ├── .github ├── workflows │ ├── cratesio-release.yml │ ├── rebase.yml │ ├── ci.yaml │ └── openssf-scorecard.yml └── actions │ ├── setup-rust │ └── action.yml │ └── bootc-ubuntu-setup │ └── action.yml ├── .gemini └── config.yaml ├── .devcontainer └── devcontainer.json ├── AGENTS.md ├── Cargo.toml ├── README.md ├── examples └── client.rs ├── LICENSE └── src └── imageproxy.rs /.claude/CLAUDE.md: -------------------------------------------------------------------------------- 1 | ../AGENTS.md -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target/ 3 | -------------------------------------------------------------------------------- /.bootc-dev-infra-commit.txt: -------------------------------------------------------------------------------- 1 | 2dd498656b9653c321e5d9a8600e6b506714acb3 2 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json" 3 | } 4 | -------------------------------------------------------------------------------- /ci/test-ostree-rs-ext.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xeuo pipefail 3 | cd ostree-rs-ext 4 | ./ci/installdeps.sh 5 | #cat >> Cargo.toml <<'EOF' 6 | #[patch.crates-io] 7 | #containers-image-proxy = { path = ".." } 8 | #EOF 9 | cargo test 10 | -------------------------------------------------------------------------------- /.github/workflows/cratesio-release.yml: -------------------------------------------------------------------------------- 1 | # See https://crates.io/docs/trusted-publishing 2 | name: Publish to crates.io 3 | on: 4 | push: 5 | tags: ['v*'] # Triggers when pushing tags starting with 'v' 6 | jobs: 7 | publish: 8 | runs-on: ubuntu-24.04 9 | permissions: 10 | id-token: write # Required for OIDC token exchange 11 | steps: 12 | - uses: actions/checkout@v6 13 | - uses: rust-lang/crates-io-auth-action@v1 14 | id: auth 15 | - run: cargo publish 16 | env: 17 | CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }} 18 | 19 | -------------------------------------------------------------------------------- /.gemini/config.yaml: -------------------------------------------------------------------------------- 1 | # NOTE: This file is canonically maintained in 2 | # 3 | # DO NOT EDIT 4 | # 5 | # This config mainly overrides `summary: false` by default 6 | # as it's really noisy. 7 | have_fun: true 8 | code_review: 9 | disable: false 10 | # Even medium level can be quite noisy, I don't think 11 | # we need LOW. Anyone who wants that type of stuff should 12 | # be able to get it locally or before review. 13 | comment_severity_threshold: MEDIUM 14 | max_review_comments: -1 15 | pull_request_opened: 16 | help: false 17 | summary: false # turned off by default 18 | code_review: true 19 | ignore_patterns: [] 20 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bootc-devenv-debian", 3 | // TODO override this back to prod image 4 | "image": "ghcr.io/bootc-dev/devenv-debian", 5 | "customizations": { 6 | "vscode": { 7 | // Abitrary, but most of our code is in one of these two 8 | "extensions": [ 9 | "rust-lang.rust-analyzer", 10 | "golang.Go" 11 | ] 12 | } 13 | }, 14 | "features": {}, 15 | "runArgs": [ 16 | // Because we want to be able to run podman and also use e.g. /dev/kvm 17 | // among other things 18 | "--privileged" 19 | ], 20 | "postCreateCommand": { 21 | // Our init script 22 | "devenv-init": "sudo /usr/local/bin/devenv-init.sh" 23 | }, 24 | "remoteEnv": { 25 | "PATH": "${containerEnv:PATH}:/usr/local/cargo/bin" 26 | } 27 | } 28 | 29 | -------------------------------------------------------------------------------- /.github/actions/setup-rust/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Setup Rust' 2 | description: 'Install Rust toolchain with caching and nextest' 3 | runs: 4 | using: 'composite' 5 | steps: 6 | - name: Install Rust toolchain 7 | uses: dtolnay/rust-toolchain@stable 8 | - name: Install nextest 9 | uses: taiki-e/install-action@v2 10 | with: 11 | tool: nextest 12 | - name: Setup Rust cache 13 | uses: Swatinem/rust-cache@v2 14 | with: 15 | cache-all-crates: true 16 | # Only generate caches on push to git main 17 | save-if: ${{ github.ref == 'refs/heads/main' }} 18 | # Suppress actually using the cache for builds running from 19 | # git main so that we avoid incremental compilation bugs 20 | lookup-only: ${{ github.ref == 'refs/heads/main' }} 21 | -------------------------------------------------------------------------------- /AGENTS.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Instructions for AI agents 4 | 5 | ## CRITICAL instructions for generating commits 6 | 7 | ### Signed-off-by 8 | 9 | Human review is required for all code that is generated 10 | or assisted by a large language model. If you 11 | are a LLM, you MUST NOT include a `Signed-off-by` 12 | on any automatically generated git commits. Only explicit 13 | human action or request should include a Signed-off-by. 14 | If for example you automatically create a pull request 15 | and the DCO check fails, tell the human to review 16 | the code and give them instructions on how to add 17 | a signoff. 18 | 19 | ### Attribution 20 | 21 | When generating substantial amounts of code, you SHOULD 22 | include an `Assisted-by: TOOLNAME (MODELNAME)`. For example, 23 | `Assisted-by: Goose (Sonnet 4.5)`. 24 | 25 | ## Follow other guidelines 26 | 27 | Look at the project README.md and look for guidelines 28 | related to contribution, such as a CONTRIBUTING.md 29 | and follow those. 30 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | description = "Interact with the github.com/containers/image library via skopeo" 3 | edition = "2021" 4 | license = "MIT OR Apache-2.0" 5 | name = "containers-image-proxy" 6 | readme = "README.md" 7 | repository = "https://github.com/containers/containers-image-proxy-rs" 8 | version = "0.9.0" 9 | rust-version = "1.70.0" 10 | 11 | [dependencies] 12 | futures-util = "0.3.13" 13 | # NOTE when bumping this in a semver-incompatible way, because we re-export it you 14 | # must also bump the semver of this project. 15 | # See also https://github.com/youki-dev/oci-spec-rs/pull/288 16 | oci-spec = "0.8.2" 17 | rustix = { version = "1.0", features = ["process", "fs", "net"] } 18 | serde = { features = ["derive"], version = "1.0.125" } 19 | serde_json = "1.0.64" 20 | semver = "1.0.4" 21 | thiserror = "2" 22 | tokio = { features = ["fs", "io-util", "macros", "process", "rt", "sync"], version = "1" } 23 | tracing = "0.1" 24 | # We support versions 2, 3 and 4 25 | cap-std-ext = ">= 2.0, <= 4.0" 26 | itertools = "0.14.0" 27 | 28 | [dev-dependencies] 29 | anyhow = "1.0" 30 | bytes = "1.5" 31 | clap = { version = "4.4", features = ["derive"] } 32 | tempfile = "3.20.0" 33 | 34 | [lib] 35 | path = "src/imageproxy.rs" 36 | -------------------------------------------------------------------------------- /.github/workflows/rebase.yml: -------------------------------------------------------------------------------- 1 | name: Automatic Rebase 2 | on: 3 | pull_request: 4 | types: [labeled] 5 | 6 | permissions: 7 | contents: read 8 | 9 | jobs: 10 | rebase: 11 | name: Rebase 12 | if: github.event.label.name == 'needs-rebase' 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Generate Actions Token 16 | id: token 17 | uses: actions/create-github-app-token@v2 18 | with: 19 | app-id: ${{ secrets.APP_ID }} 20 | private-key: ${{ secrets.APP_PRIVATE_KEY }} 21 | owner: ${{ github.repository_owner }} 22 | 23 | - name: Checkout 24 | uses: actions/checkout@v6 25 | with: 26 | token: ${{ steps.token.outputs.token }} 27 | fetch-depth: 0 28 | 29 | - name: Automatic Rebase 30 | uses: peter-evans/rebase@v4 31 | with: 32 | token: ${{ steps.token.outputs.token }} 33 | 34 | - name: Remove needs-rebase label 35 | if: always() 36 | uses: actions/github-script@v8 37 | with: 38 | github-token: ${{ steps.token.outputs.token }} 39 | script: | 40 | await github.rest.issues.removeLabel({ 41 | owner: context.repo.owner, 42 | repo: context.repo.repo, 43 | issue_number: context.issue.number, 44 | name: 'needs-rebase' 45 | }); 46 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | permissions: 4 | actions: read 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | env: 13 | CARGO_NET_RETRY: 10 14 | CARGO_TERM_COLOR: always 15 | CI: 1 16 | RUSTUP_MAX_RETRIES: 10 17 | RUST_BACKTRACE: short 18 | 19 | jobs: 20 | build-test: 21 | name: Build+Test 22 | runs-on: ubuntu-latest 23 | container: quay.io/coreos-assembler/fcos-buildroot:testing-devel 24 | strategy: 25 | fail-fast: false 26 | matrix: 27 | os: [ubuntu-latest] 28 | steps: 29 | - name: Checkout repository 30 | uses: actions/checkout@v6 31 | with: 32 | ref: ${{ github.event.pull_request.head.sha }} 33 | fetch-depth: 20 34 | - name: Cache Dependencies 35 | uses: Swatinem/rust-cache@v2 36 | - name: cargo fmt (check) 37 | run: cargo fmt -- --check -l 38 | - name: Compile (no features) 39 | run: cargo test --no-run 40 | - name: Compile (all features) 41 | run: cargo test --no-run --all-features 42 | - name: Test 43 | run: cargo test --all-features -- --nocapture --quiet 44 | - name: cargo clippy (non-gating) 45 | run: cargo clippy 46 | - name: Checkout ostree-rs-ext 47 | uses: actions/checkout@v6 48 | with: 49 | repository: ostreedev/ostree-rs-ext 50 | path: ostree-rs-ext 51 | fetch-depth: 20 52 | - name: Test ostree-rs-ext 53 | run: ./ci/test-ostree-rs-ext.sh 54 | -------------------------------------------------------------------------------- /.github/workflows/openssf-scorecard.yml: -------------------------------------------------------------------------------- 1 | # Upstream https://github.com/ossf/scorecard/blob/main/.github/workflows/scorecard-analysis.yml 2 | # Tweaked to not pin actions by SHA digest as I think that's overkill noisy security theater. 3 | name: OpenSSF Scorecard analysis 4 | on: 5 | push: 6 | branches: 7 | - main 8 | 9 | permissions: read-all 10 | 11 | jobs: 12 | analysis: 13 | name: Scorecard analysis 14 | runs-on: ubuntu-24.04 15 | permissions: 16 | # Needed for Code scanning upload 17 | security-events: write 18 | # Needed for GitHub OIDC token if publish_results is true 19 | id-token: write 20 | 21 | steps: 22 | - name: "Checkout code" 23 | uses: actions/checkout@v6 24 | with: 25 | persist-credentials: false 26 | 27 | - name: "Run analysis" 28 | uses: ossf/scorecard-action@v2.4.3 29 | with: 30 | results_file: results.sarif 31 | results_format: sarif 32 | # Scorecard team runs a weekly scan of public GitHub repos, 33 | # see https://github.com/ossf/scorecard#public-data. 34 | # Setting `publish_results: true` helps us scale by leveraging your workflow to 35 | # extract the results instead of relying on our own infrastructure to run scans. 36 | # And it's free for you! 37 | publish_results: true 38 | 39 | - name: "Upload artifact" 40 | uses: actions/upload-artifact@v6 41 | with: 42 | name: SARIF file 43 | path: results.sarif 44 | retention-days: 5 45 | 46 | - name: "Upload to code-scanning" 47 | uses: github/codeql-action/upload-sarif@v4 48 | with: 49 | sarif_file: results.sarif 50 | 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rust bindings for accessing the Go containers/image stack 2 | 3 | This crate contains a Rust API that forks `/usr/bin/skopeo` and 4 | talks to it via a custom API. You can use it to fetch container 5 | images in a streaming fashion. 6 | 7 | At the time of this writing, you will need skopeo 1.6.0 or later. 8 | 9 | # Why? 10 | 11 | First, assume one is operating on a codebase that isn't Go, but wants 12 | to interact with container images - we can't just include the Go containers/image 13 | library. 14 | 15 | The primary intended use case of this is for things like 16 | [ostree-containers](https://github.com/ostreedev/ostree-rs-ext/issues/18) 17 | where we're using container images to encapsulate host operating system 18 | updates, but we don't want to involve the [containers/image](github.com/containers/image/) 19 | storage layer. 20 | 21 | What we *do* want from the containers/image library is support for things like 22 | signatures and offline mirroring. More on this below. 23 | 24 | Forgetting things like ostree exist for a second - imagine that you wanted to 25 | encapsulate a set of Debian/RPM/etc packages inside 26 | a container image to ship for package-based operating systems. You could use this to stream 27 | out the layer containing those packages and extract them directly, rather than serializing 28 | everything to disk in the containers/storage disk location, only to copy it out again and delete the first. 29 | 30 | Another theoretical use case could be something like [krustlet](https://github.com/deislabs/krustlet), 31 | which fetches WebAssembly blobs inside containers. Here again, we don't want to involve 32 | containers/storage. 33 | 34 | # Desired containers/image features 35 | 36 | There are e.g. Rust libraries like [dkregistry-rs](https://github.com/camallo/dkregistry-rs) and 37 | [oci-distribution](https://crates.io/crates/oci-distribution) and similar for other languages. 38 | 39 | However, the containers/image Go library has a lot of additional infrastructure 40 | that will impose a maintenance burden to replicate: 41 | 42 | - Signatures (`man containers-auth.json`) 43 | - Mirroring/renaming (`man containers-registries.conf`) 44 | - Support for `~/.docker/config.json` for authentication as well as `/run` 45 | 46 | # Status 47 | 48 | API is subject to change. 49 | -------------------------------------------------------------------------------- /.github/actions/bootc-ubuntu-setup/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Bootc Ubuntu Setup' 2 | description: 'Default host setup' 3 | inputs: 4 | libvirt: 5 | description: 'Install libvirt and virtualization stack' 6 | required: false 7 | default: 'false' 8 | runs: 9 | using: 'composite' 10 | steps: 11 | # The default runners have TONS of crud on them... 12 | - name: Free up disk space on runner 13 | shell: bash 14 | run: | 15 | set -xeuo pipefail 16 | sudo df -h 17 | unwanted_pkgs=('^aspnetcore-.*' '^dotnet-.*' '^llvm-.*' 'php.*' '^mongodb-.*' '^mysql-.*' 18 | azure-cli google-chrome-stable firefox mono-devel) 19 | unwanted_dirs=(/usr/share/dotnet /opt/ghc /usr/local/lib/android /opt/hostedtoolcache/CodeQL) 20 | # Start background removal operations as systemd units; if this causes 21 | # races in the future around disk space we can look at waiting for cleanup 22 | # before starting further jobs, but right now we spent a lot of time waiting 23 | # on the network and scripts and such below, giving these plenty of time to run. 24 | n=0 25 | runcleanup() { 26 | sudo systemd-run -r -u action-cleanup-${n} -- "$@" 27 | n=$(($n + 1)) 28 | } 29 | runcleanup docker image prune --all --force 30 | for x in ${unwanted_dirs[@]}; do 31 | runcleanup rm -rf "$x" 32 | done 33 | # Apt removals in foreground, as we can't parallelize these 34 | for x in ${unwanted_pkgs[@]}; do 35 | /bin/time -f '%E %C' sudo apt-get remove -y $x 36 | done 37 | # We really want support for heredocs 38 | - name: Update podman and install just 39 | shell: bash 40 | run: | 41 | set -eux 42 | # Require the runner is ubuntu-24.04 43 | IDV=$(. /usr/lib/os-release && echo ${ID}-${VERSION_ID}) 44 | test "${IDV}" = "ubuntu-24.04" 45 | # plucky is the next release 46 | echo 'deb http://azure.archive.ubuntu.com/ubuntu plucky universe main' | sudo tee /etc/apt/sources.list.d/plucky.list 47 | /bin/time -f '%E %C' sudo apt update 48 | # skopeo is currently older in plucky for some reason hence --allow-downgrades 49 | /bin/time -f '%E %C' sudo apt install -y --allow-downgrades crun/plucky podman/plucky skopeo/plucky just 50 | # This is the default on e.g. Fedora derivatives, but not Debian 51 | - name: Enable unprivileged /dev/kvm access 52 | shell: bash 53 | run: | 54 | set -xeuo pipefail 55 | echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules 56 | sudo udevadm control --reload-rules 57 | sudo udevadm trigger --name-match=kvm 58 | ls -l /dev/kvm 59 | # Used by a few workflows, but generally useful 60 | - name: Set architecture variable 61 | id: set_arch 62 | shell: bash 63 | run: echo "ARCH=$(arch)" >> $GITHUB_ENV 64 | # Install libvirt stack if requested 65 | - name: Install libvirt and virtualization stack 66 | if: ${{ inputs.libvirt == 'true' }} 67 | shell: bash 68 | run: | 69 | set -xeuo pipefail 70 | export BCVK_VERSION=0.9.0 71 | # see https://github.com/bootc-dev/bcvk/issues/176 72 | /bin/time -f '%E %C' sudo apt install -y libkrb5-dev pkg-config libvirt-dev genisoimage qemu-utils qemu-kvm virtiofsd libvirt-daemon-system python3-virt-firmware 73 | # Something in the stack is overriding this, but we want session right now for bcvk 74 | echo LIBVIRT_DEFAULT_URI=qemu:///session >> $GITHUB_ENV 75 | td=$(mktemp -d) 76 | cd $td 77 | # Install bcvk 78 | target=bcvk-$(arch)-unknown-linux-gnu 79 | /bin/time -f '%E %C' curl -LO https://github.com/bootc-dev/bcvk/releases/download/v${BCVK_VERSION}/${target}.tar.gz 80 | tar xzf ${target}.tar.gz 81 | sudo install -T ${target} /usr/bin/bcvk 82 | cd - 83 | rm -rf "$td" 84 | 85 | # Also bump the default fd limit as a workaround for https://github.com/bootc-dev/bcvk/issues/65 86 | sudo sed -i -e 's,^\* hard nofile 65536,* hard nofile 524288,' /etc/security/limits.conf 87 | - name: Cleanup status 88 | shell: bash 89 | run: | 90 | set -xeuo pipefail 91 | systemctl list-units 'action-cleanup*' 92 | df -h 93 | -------------------------------------------------------------------------------- /examples/client.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | 3 | use anyhow::Result; 4 | use clap::Parser; 5 | use containers_image_proxy::ImageProxyConfig; 6 | use oci_spec::image::{Digest, ImageManifest}; 7 | use tokio::io::AsyncReadExt; 8 | 9 | #[derive(clap::Parser, Debug)] 10 | struct CommonOpts { 11 | /// Emit debugging to stderr 12 | #[clap(long)] 13 | debug: bool, 14 | 15 | /// Disable TLS verification 16 | #[clap(long)] 17 | insecure: bool, 18 | } 19 | 20 | #[derive(clap::Parser, Debug)] 21 | struct GetMetadataOpts { 22 | #[clap(flatten)] 23 | common: CommonOpts, 24 | 25 | /// The skopeo-style transport:image reference 26 | reference: String, 27 | } 28 | 29 | #[derive(clap::Parser, Debug)] 30 | struct GetBlobOpts { 31 | #[clap(flatten)] 32 | common: CommonOpts, 33 | 34 | /// The skopeo-style transport:image reference 35 | reference: String, 36 | 37 | /// The digest of the target blob to fetch 38 | digest: Digest, 39 | 40 | /// The size of the blob to fetch 41 | size: u64, 42 | } 43 | 44 | #[derive(clap::Parser, Debug)] 45 | struct FetchContainerToDevNullOpts { 46 | #[clap(flatten)] 47 | metaopts: GetMetadataOpts, 48 | 49 | /// Use the "raw" path for fetching blobs 50 | #[clap(long)] 51 | raw_blobs: bool, 52 | } 53 | 54 | /// Simple program to greet a person 55 | #[derive(clap::Parser, Debug)] 56 | #[command(version, about, long_about = None)] 57 | enum Opt { 58 | GetMetadata(GetMetadataOpts), 59 | GetBlob(GetBlobOpts), 60 | GetBlobRaw(GetBlobOpts), 61 | FetchContainerToDevNull(FetchContainerToDevNullOpts), 62 | } 63 | 64 | #[derive(serde::Serialize, Debug)] 65 | struct Metadata { 66 | digest: String, 67 | manifest: ImageManifest, 68 | } 69 | 70 | impl CommonOpts { 71 | fn to_config(self) -> ImageProxyConfig { 72 | let mut r = ImageProxyConfig::default(); 73 | if self.debug { 74 | r.debug = true; 75 | } 76 | if self.insecure { 77 | r.insecure_skip_tls_verification = Some(true) 78 | } 79 | r 80 | } 81 | } 82 | 83 | async fn get_metadata(o: GetMetadataOpts) -> Result<()> { 84 | let config = o.common.to_config(); 85 | let proxy = containers_image_proxy::ImageProxy::new_with_config(config).await?; 86 | let img = proxy.open_image(&o.reference).await?; 87 | let (digest, manifest) = proxy.fetch_manifest(&img).await?; 88 | let metadata = Metadata { digest, manifest }; 89 | serde_json::to_writer_pretty(&mut std::io::stdout().lock(), &metadata)?; 90 | Ok(()) 91 | } 92 | 93 | async fn get_blob(o: GetBlobOpts) -> Result<()> { 94 | let config = o.common.to_config(); 95 | let proxy = containers_image_proxy::ImageProxy::new_with_config(config).await?; 96 | let img = proxy.open_image(&o.reference).await?; 97 | let (mut blob, driver) = proxy.get_blob(&img, &o.digest, o.size).await?; 98 | 99 | let mut stdout = std::io::stdout().lock(); 100 | let reader = async move { 101 | let mut buffer = [0u8; 8192]; 102 | loop { 103 | let n = blob.read(&mut buffer).await?; 104 | if n == 0 { 105 | return anyhow::Ok(()); 106 | } 107 | stdout.write_all(&buffer[..n])?; 108 | } 109 | }; 110 | 111 | let (a, b) = tokio::join!(reader, driver); 112 | a?; 113 | b?; 114 | Ok(()) 115 | } 116 | 117 | async fn get_blob_raw(o: GetBlobOpts) -> Result<()> { 118 | let config = o.common.to_config(); 119 | let proxy = containers_image_proxy::ImageProxy::new_with_config(config).await?; 120 | let img = proxy.open_image(&o.reference).await?; 121 | let (_, mut datafd, err) = proxy.get_raw_blob(&img, &o.digest).await?; 122 | 123 | let mut stdout = std::io::stdout().lock(); 124 | let reader = async move { 125 | let mut buffer = [0u8; 8192]; 126 | loop { 127 | let n = datafd.read(&mut buffer).await?; 128 | if n == 0 { 129 | return anyhow::Ok(()); 130 | } 131 | stdout.write_all(&buffer[..n])?; 132 | } 133 | }; 134 | 135 | let (a, b) = tokio::join!(reader, err); 136 | a?; 137 | b?; 138 | Ok(()) 139 | } 140 | 141 | async fn fetch_container_to_devnull(o: FetchContainerToDevNullOpts) -> Result<()> { 142 | let config = o.metaopts.common.to_config(); 143 | let proxy = containers_image_proxy::ImageProxy::new_with_config(config).await?; 144 | let img = &proxy.open_image(&o.metaopts.reference).await?; 145 | let manifest = proxy.fetch_manifest(img).await?.1; 146 | for layer in manifest.layers() { 147 | let mut devnull = tokio::io::sink(); 148 | if o.raw_blobs { 149 | let (_, mut blob, err) = proxy.get_raw_blob(img, layer.digest()).await?; 150 | let copier = tokio::io::copy(&mut blob, &mut devnull); 151 | let (copier, err) = tokio::join!(copier, err); 152 | copier?; 153 | err?; 154 | } else { 155 | let (mut blob, driver) = proxy.get_descriptor(img, layer).await?; 156 | let copier = tokio::io::copy(&mut blob, &mut devnull); 157 | let (copier, driver) = tokio::join!(copier, driver); 158 | copier?; 159 | driver?; 160 | } 161 | } 162 | Ok(()) 163 | } 164 | 165 | async fn run() -> Result<()> { 166 | match Opt::parse() { 167 | Opt::GetMetadata(o) => get_metadata(o).await, 168 | Opt::GetBlob(o) => get_blob(o).await, 169 | Opt::GetBlobRaw(o) => get_blob_raw(o).await, 170 | Opt::FetchContainerToDevNull(o) => fetch_container_to_devnull(o).await, 171 | } 172 | } 173 | 174 | #[tokio::main(flavor = "current_thread")] 175 | async fn main() { 176 | if let Err(e) = run().await { 177 | eprintln!("{:#}", e); 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | https://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Licensed under the Apache License, Version 2.0 (the "License"); 180 | you may not use this file except in compliance with the License. 181 | You may obtain a copy of the License at 182 | 183 | https://www.apache.org/licenses/LICENSE-2.0 184 | 185 | Unless required by applicable law or agreed to in writing, software 186 | distributed under the License is distributed on an "AS IS" BASIS, 187 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 188 | See the License for the specific language governing permissions and 189 | limitations under the License. 190 | -------------------------------------------------------------------------------- /src/imageproxy.rs: -------------------------------------------------------------------------------- 1 | //! Run skopeo as a subprocess to fetch container images. 2 | //! 3 | //! This allows fetching a container image manifest and layers in a streaming fashion. 4 | //! 5 | //! More information: 6 | 7 | use cap_std_ext::prelude::CapStdExtCommandExt; 8 | use cap_std_ext::{cap_std, cap_tempfile}; 9 | use futures_util::{Future, FutureExt}; 10 | use itertools::Itertools; 11 | use oci_spec::image::{Descriptor, Digest}; 12 | use serde::{Deserialize, Serialize}; 13 | use std::fs::File; 14 | use std::iter::FusedIterator; 15 | use std::num::NonZeroU32; 16 | use std::ops::Range; 17 | use std::os::fd::OwnedFd; 18 | use std::os::unix::prelude::CommandExt; 19 | use std::path::PathBuf; 20 | use std::pin::Pin; 21 | use std::process::{Command, Stdio}; 22 | use std::sync::{Arc, Mutex, OnceLock}; 23 | use thiserror::Error; 24 | use tokio::io::{AsyncBufRead, AsyncReadExt}; 25 | use tokio::sync::Mutex as AsyncMutex; 26 | use tokio::task::JoinError; 27 | use tracing::instrument; 28 | 29 | /// Errors returned by this crate. 30 | #[derive(Error, Debug)] 31 | #[non_exhaustive] 32 | pub enum Error { 33 | #[error("i/o error: {0}")] 34 | /// An input/output error 35 | Io(#[from] std::io::Error), 36 | #[error("skopeo spawn error: {0}")] 37 | /// An error spawning skopeo 38 | SkopeoSpawnError(#[source] std::io::Error), 39 | #[error("serialization error: {0}")] 40 | /// Returned when serialization or deserialization fails 41 | SerDe(#[from] serde_json::Error), 42 | /// The proxy failed to initiate a request 43 | #[error("failed to invoke method {method}: {error}")] 44 | RequestInitiationFailure { method: Box, error: Box }, 45 | /// An error returned from the remote proxy 46 | #[error("proxy request returned error: {0}")] 47 | RequestReturned(Box), 48 | #[error("semantic version error: {0}")] 49 | SemanticVersion(#[from] semver::Error), 50 | #[error("proxy too old (requested={requested_version} found={found_version}) error")] 51 | /// The proxy doesn't support the requested semantic version 52 | ProxyTooOld { 53 | requested_version: Box, 54 | found_version: Box, 55 | }, 56 | #[error("configuration error: {0}")] 57 | /// Conflicting or missing configuration 58 | Configuration(Box), 59 | #[error("other error: {0}")] 60 | /// An unknown other error 61 | Other(Box), 62 | } 63 | 64 | impl Error { 65 | pub(crate) fn new_other(e: impl Into>) -> Self { 66 | Self::Other(e.into()) 67 | } 68 | } 69 | 70 | /// Errors returned by get_raw_blob 71 | #[derive(Error, Debug)] 72 | #[non_exhaustive] 73 | pub enum GetBlobError { 74 | /// A client may reasonably retry on this type of error. 75 | #[error("retryable error: {0}")] 76 | Retryable(Box), 77 | #[error("other error: {0}")] 78 | /// An unknown other error 79 | Other(Box), 80 | } 81 | 82 | impl From for Error { 83 | fn from(value: rustix::io::Errno) -> Self { 84 | Self::Io(value.into()) 85 | } 86 | } 87 | 88 | /// The error type returned from this crate. 89 | pub type Result = std::result::Result; 90 | 91 | /// Re-export because we use this in our public APIs 92 | pub use oci_spec; 93 | 94 | /// File descriptor range which is reserved for passing data down into the proxy; 95 | /// avoid configuring the command to use files in this range. (Also, stdin is 96 | /// reserved) 97 | pub const RESERVED_FD_RANGE: Range = 100..200; 98 | 99 | // This is defined in skopeo; maximum size of JSON we will read/write. 100 | // Note that payload data (non-metadata) should go over a pipe file descriptor. 101 | const MAX_MSG_SIZE: usize = 32 * 1024; 102 | 103 | fn base_proto_version() -> &'static semver::VersionReq { 104 | // Introduced in https://github.com/containers/skopeo/pull/1523 105 | static BASE_PROTO_VERSION: OnceLock = OnceLock::new(); 106 | BASE_PROTO_VERSION.get_or_init(|| semver::VersionReq::parse("0.2.3").unwrap()) 107 | } 108 | 109 | fn layer_info_proto_version() -> &'static semver::VersionReq { 110 | static LAYER_INFO_PROTO_VERSION: OnceLock = OnceLock::new(); 111 | LAYER_INFO_PROTO_VERSION.get_or_init(|| semver::VersionReq::parse("0.2.5").unwrap()) 112 | } 113 | 114 | fn layer_info_piped_proto_version() -> &'static semver::VersionReq { 115 | static LAYER_INFO_PROTO_VERSION: OnceLock = OnceLock::new(); 116 | LAYER_INFO_PROTO_VERSION.get_or_init(|| semver::VersionReq::parse("0.2.7").unwrap()) 117 | } 118 | 119 | #[derive(Serialize)] 120 | struct Request { 121 | method: String, 122 | args: Vec, 123 | } 124 | 125 | impl Request { 126 | fn new(method: &str, args: T) -> Self 127 | where 128 | T: IntoIterator, 129 | I: Into, 130 | { 131 | let args: Vec<_> = args.into_iter().map(|v| v.into()).collect(); 132 | Self { 133 | method: method.to_string(), 134 | args, 135 | } 136 | } 137 | 138 | fn new_bare(method: &str) -> Self { 139 | Self { 140 | method: method.to_string(), 141 | args: vec![], 142 | } 143 | } 144 | } 145 | 146 | #[derive(Deserialize)] 147 | struct Reply { 148 | success: bool, 149 | error: String, 150 | pipeid: u32, 151 | value: serde_json::Value, 152 | } 153 | 154 | type ChildFuture = Pin< 155 | Box< 156 | dyn Future, JoinError>> 157 | + Send, 158 | >, 159 | >; 160 | 161 | /// Manage a child process proxy to fetch container images. 162 | pub struct ImageProxy { 163 | sockfd: Arc>, 164 | childwait: Arc>, 165 | protover: semver::Version, 166 | } 167 | 168 | impl std::fmt::Debug for ImageProxy { 169 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 170 | f.debug_struct("ImageProxy").finish() 171 | } 172 | } 173 | 174 | /// Opaque identifier for an image 175 | #[derive(Debug, PartialEq, Eq)] 176 | pub struct OpenedImage(u32); 177 | 178 | #[derive(Debug, PartialEq, Eq)] 179 | struct PipeId(NonZeroU32); 180 | 181 | impl PipeId { 182 | fn try_new(pipeid: u32) -> Option { 183 | Some(Self(NonZeroU32::new(pipeid)?)) 184 | } 185 | } 186 | 187 | /// Configuration for the proxy. 188 | #[derive(Debug, Default)] 189 | pub struct ImageProxyConfig { 190 | /// Path to container auth file; equivalent to `skopeo --authfile`. 191 | /// This conflicts with [`auth_data`]. 192 | pub authfile: Option, 193 | 194 | /// Data stream for container auth. This conflicts with [`authfile`]. 195 | pub auth_data: Option, 196 | 197 | /// Do not use default container authentication paths; equivalent to `skopeo --no-creds`. 198 | /// 199 | /// Defaults to `false`; in other words, use the default file paths from `man containers-auth.json`. 200 | pub auth_anonymous: bool, 201 | 202 | // Directory with certificates (*.crt, *.cert, *.key) used to connect to registry 203 | // Equivalent to `skopeo --cert-dir` 204 | pub certificate_directory: Option, 205 | 206 | /// Decryption keys to decrypt an encrypted container image. 207 | /// equivalent to `skopeo copy --decryption-key ` 208 | pub decryption_keys: Option>, 209 | 210 | /// If set, disable TLS verification. Equivalent to `skopeo --tls-verify=false`. 211 | pub insecure_skip_tls_verification: Option, 212 | 213 | /// Prefix to add to the user agent string. Equivalent to `skopeo --user-agent-prefix`. 214 | /// The resulting user agent will be in the format "prefix skopeo/version". 215 | /// This option is only used if the installed skopeo version supports it. 216 | pub user_agent_prefix: Option, 217 | 218 | /// If enabled, propagate debug-logging level from the proxy via stderr to the 219 | /// current process' stderr. Note than when enabled, this also means that standard 220 | /// error will no longer be captured. 221 | pub debug: bool, 222 | 223 | /// Provide a configured [`std::process::Command`] instance. 224 | /// 225 | /// This allows configuring aspects of the resulting child `skopeo` process. 226 | /// The intention of this hook is to allow the caller to use e.g. 227 | /// `systemd-run` or equivalent containerization tools. For example you 228 | /// can set up a command whose arguments are `systemd-run -Pq -p DynamicUser=yes -- skopeo`. 229 | /// You can also set up arbitrary aspects of the child via e.g. 230 | /// [`current_dir`] [`pre_exec`]. 231 | /// 232 | /// [`current_dir`]: https://doc.rust-lang.org/std/process/struct.Command.html#method.current_dir 233 | /// [`pre_exec`]: https://doc.rust-lang.org/std/os/unix/process/trait.CommandExt.html#tymethod.pre_exec 234 | /// 235 | /// The default is to wrap via util-linux `setpriv --pdeathsig SIGTERM -- skopeo`, 236 | /// which on Linux binds the lifecycle of the child process to the parent. 237 | /// 238 | /// Note that you *must* add `skopeo` as the primary argument or 239 | /// indirectly. However, all other command line options including 240 | /// `experimental-image-proxy` will be injected by this library. 241 | /// You may use a different command name from `skopeo` if your 242 | /// application has set up a compatible copy, e.g. `/usr/lib/myapp/my-private-skopeo`/ 243 | pub skopeo_cmd: Option, 244 | } 245 | 246 | /// Check if skopeo supports --user-agent-prefix by probing --help output 247 | fn supports_user_agent_prefix() -> bool { 248 | static SUPPORTS_USER_AGENT: OnceLock = OnceLock::new(); 249 | *SUPPORTS_USER_AGENT.get_or_init(|| { 250 | Command::new("skopeo") 251 | .arg("--help") 252 | .stdout(Stdio::piped()) 253 | .stderr(Stdio::null()) 254 | .output() 255 | .ok() 256 | .and_then(|output| { 257 | String::from_utf8(output.stdout) 258 | .ok() 259 | .map(|help| help.contains("--user-agent-prefix")) 260 | }) 261 | .unwrap_or(false) 262 | }) 263 | } 264 | 265 | impl TryFrom for Command { 266 | type Error = Error; 267 | 268 | fn try_from(config: ImageProxyConfig) -> Result { 269 | let debug = config.debug || std::env::var_os("CONTAINERS_IMAGE_PROXY_DEBUG").is_some(); 270 | let mut allocated_fds = RESERVED_FD_RANGE.clone(); 271 | let mut alloc_fd = || { 272 | allocated_fds.next().ok_or_else(|| { 273 | Error::Other("Ran out of reserved file descriptors for child".into()) 274 | }) 275 | }; 276 | 277 | // By default, we set up pdeathsig to "lifecycle bind" the child process to us. 278 | let mut c = config.skopeo_cmd.unwrap_or_else(|| { 279 | let mut c = std::process::Command::new("skopeo"); 280 | unsafe { 281 | c.pre_exec(|| { 282 | Ok(rustix::process::set_parent_process_death_signal(Some( 283 | rustix::process::Signal::TERM, 284 | ))?) 285 | }); 286 | } 287 | c 288 | }); 289 | c.arg("experimental-image-proxy"); 290 | if debug { 291 | c.arg("--debug"); 292 | } 293 | let auth_option_count = [ 294 | config.authfile.is_some(), 295 | config.auth_data.is_some(), 296 | config.auth_anonymous, 297 | ] 298 | .into_iter() 299 | .filter(|&x| x) 300 | .count(); 301 | if auth_option_count > 1 { 302 | // This is a programmer error really 303 | return Err(Error::Configuration( 304 | "Conflicting authentication options".into(), 305 | )); 306 | } 307 | if let Some(authfile) = config.authfile { 308 | c.arg("--authfile"); 309 | c.arg(authfile); 310 | } else if let Some(mut auth_data) = config.auth_data.map(std::io::BufReader::new) { 311 | // If we get the authentication data as a file, we always copy it to a new temporary file under 312 | // the assumption that the caller provided it this way to aid in privilege separation where 313 | // the file is only readable to privileged code. 314 | let target_fd = alloc_fd()?; 315 | let tmpd = &cap_std::fs::Dir::open_ambient_dir("/tmp", cap_std::ambient_authority())?; 316 | let mut tempfile = 317 | cap_tempfile::TempFile::new_anonymous(tmpd).map(std::io::BufWriter::new)?; 318 | std::io::copy(&mut auth_data, &mut tempfile)?; 319 | let tempfile = tempfile 320 | .into_inner() 321 | .map_err(|e| e.into_error())? 322 | .into_std(); 323 | let fd = std::sync::Arc::new(tempfile.into()); 324 | c.take_fd_n(fd, target_fd); 325 | c.arg("--authfile"); 326 | c.arg(format!("/proc/self/fd/{target_fd}")); 327 | } else if config.auth_anonymous { 328 | c.arg("--no-creds"); 329 | } 330 | 331 | if let Some(certificate_directory) = config.certificate_directory { 332 | c.arg("--cert-dir"); 333 | c.arg(certificate_directory); 334 | } 335 | 336 | if let Some(decryption_keys) = config.decryption_keys { 337 | for decryption_key in &decryption_keys { 338 | c.arg("--decryption-key"); 339 | c.arg(decryption_key); 340 | } 341 | } 342 | 343 | if config.insecure_skip_tls_verification.unwrap_or_default() { 344 | c.arg("--tls-verify=false"); 345 | } 346 | 347 | // Add user agent prefix if provided and supported by skopeo 348 | if let Some(user_agent_prefix) = config.user_agent_prefix { 349 | if supports_user_agent_prefix() { 350 | c.arg("--user-agent-prefix"); 351 | c.arg(user_agent_prefix); 352 | } 353 | } 354 | 355 | c.stdout(Stdio::null()); 356 | if !debug { 357 | c.stderr(Stdio::piped()); 358 | } 359 | Ok(c) 360 | } 361 | } 362 | 363 | /// BlobInfo collects known information about a blob 364 | #[derive(Debug, serde::Deserialize)] 365 | pub struct ConvertedLayerInfo { 366 | /// Uncompressed digest of a layer; for more information, see 367 | /// https://github.com/opencontainers/image-spec/blob/main/config.md#layer-diffid 368 | pub digest: Digest, 369 | 370 | /// Size of blob 371 | pub size: u64, 372 | 373 | /// Mediatype of blob 374 | pub media_type: oci_spec::image::MediaType, 375 | } 376 | 377 | /// A single fd; requires invoking FinishPipe 378 | #[derive(Debug)] 379 | struct FinishPipe { 380 | pipeid: PipeId, 381 | datafd: OwnedFd, 382 | } 383 | 384 | /// There is a data FD and an error FD. The error FD will be JSON. 385 | #[derive(Debug)] 386 | struct DualFds { 387 | datafd: OwnedFd, 388 | errfd: OwnedFd, 389 | } 390 | 391 | /// Helper trait for parsing the pipeid and/or file descriptors of a reply 392 | trait FromReplyFds: Send + 'static 393 | where 394 | Self: Sized, 395 | { 396 | fn from_reply( 397 | iterable: impl IntoIterator, 398 | pipeid: u32, 399 | ) -> Result; 400 | } 401 | 402 | /// No file descriptors or pipeid expected 403 | impl FromReplyFds for () { 404 | fn from_reply(fds: impl IntoIterator, pipeid: u32) -> Result { 405 | if fds.into_iter().next().is_some() { 406 | return Err(Error::Other("expected no fds".into())); 407 | } 408 | if pipeid != 0 { 409 | return Err(Error::Other("unexpected pipeid".into())); 410 | } 411 | Ok(()) 412 | } 413 | } 414 | 415 | /// A FinishPipe instance 416 | impl FromReplyFds for FinishPipe { 417 | fn from_reply(fds: impl IntoIterator, pipeid: u32) -> Result { 418 | let Some(pipeid) = PipeId::try_new(pipeid) else { 419 | return Err(Error::Other("Expected pipeid for FinishPipe".into())); 420 | }; 421 | let datafd = fds 422 | .into_iter() 423 | .exactly_one() 424 | .map_err(|_| Error::Other("Expected exactly one fd for FinishPipe".into()))?; 425 | Ok(Self { pipeid, datafd }) 426 | } 427 | } 428 | 429 | /// A DualFds instance 430 | impl FromReplyFds for DualFds { 431 | fn from_reply(fds: impl IntoIterator, pipeid: u32) -> Result { 432 | if pipeid != 0 { 433 | return Err(Error::Other("Unexpected pipeid with DualFds".into())); 434 | } 435 | let [datafd, errfd] = fds 436 | .into_iter() 437 | .collect_array() 438 | .ok_or_else(|| Error::Other("Expected two fds for DualFds".into()))?; 439 | Ok(Self { datafd, errfd }) 440 | } 441 | } 442 | 443 | impl ImageProxy { 444 | /// Create an image proxy that fetches the target image, using default configuration. 445 | pub async fn new() -> Result { 446 | Self::new_with_config(Default::default()).await 447 | } 448 | 449 | /// Create an image proxy that fetches the target image 450 | #[instrument] 451 | pub async fn new_with_config(config: ImageProxyConfig) -> Result { 452 | let mut c = Command::try_from(config)?; 453 | let (mysock, theirsock) = rustix::net::socketpair( 454 | rustix::net::AddressFamily::UNIX, 455 | rustix::net::SocketType::SEQPACKET, 456 | rustix::net::SocketFlags::CLOEXEC, 457 | None, 458 | )?; 459 | c.stdin(Stdio::from(theirsock)); 460 | let child = match c.spawn() { 461 | Ok(c) => c, 462 | Err(error) => return Err(Error::SkopeoSpawnError(error)), 463 | }; 464 | tracing::debug!("Spawned skopeo pid={:?}", child.id()); 465 | // Here we use std sync API via thread because tokio installs 466 | // a SIGCHLD handler which can conflict with e.g. the glib one 467 | // which may also be in process. 468 | // xref https://github.com/tokio-rs/tokio/issues/3520#issuecomment-968985861 469 | let childwait = tokio::task::spawn_blocking(move || child.wait_with_output()); 470 | let sockfd = Arc::new(Mutex::new(mysock)); 471 | 472 | let mut r = Self { 473 | sockfd, 474 | childwait: Arc::new(AsyncMutex::new(Box::pin(childwait))), 475 | protover: semver::Version::new(0, 0, 0), 476 | }; 477 | 478 | // Verify semantic version 479 | let protover: String = r.impl_request("Initialize", [(); 0]).await?; 480 | tracing::debug!("Remote protocol version: {protover}"); 481 | let protover = semver::Version::parse(protover.as_str())?; 482 | // Previously we had a feature to opt-in to requiring newer versions using `if cfg!()`. 483 | let supported = base_proto_version(); 484 | if !supported.matches(&protover) { 485 | return Err(Error::ProxyTooOld { 486 | requested_version: protover.to_string().into(), 487 | found_version: supported.to_string().into(), 488 | }); 489 | } 490 | r.protover = protover; 491 | 492 | Ok(r) 493 | } 494 | 495 | /// Create and send a request. Should only be used by impl_request. 496 | async fn impl_request_raw( 497 | sockfd: Arc>, 498 | req: Request, 499 | ) -> Result<(T, F)> { 500 | tracing::trace!("sending request {}", req.method.as_str()); 501 | // TODO: Investigate https://crates.io/crates/uds for SOCK_SEQPACKET tokio 502 | let r = tokio::task::spawn_blocking(move || { 503 | let sockfd = sockfd.lock().unwrap(); 504 | let sendbuf = serde_json::to_vec(&req)?; 505 | let sockfd = &*sockfd; 506 | rustix::net::send(sockfd, &sendbuf, rustix::net::SendFlags::empty())?; 507 | drop(sendbuf); 508 | let mut buf = [0u8; MAX_MSG_SIZE]; 509 | let mut cmsg_space: Vec> = 510 | vec![std::mem::MaybeUninit::uninit(); rustix::cmsg_space!(ScmRights(1))]; 511 | let mut cmsg_buffer = rustix::net::RecvAncillaryBuffer::new(cmsg_space.as_mut_slice()); 512 | let iov = std::io::IoSliceMut::new(buf.as_mut()); 513 | let mut iov = [iov]; 514 | let nread = rustix::net::recvmsg( 515 | sockfd, 516 | &mut iov, 517 | &mut cmsg_buffer, 518 | rustix::net::RecvFlags::CMSG_CLOEXEC, 519 | )? 520 | .bytes; 521 | let fdret = cmsg_buffer 522 | .drain() 523 | .filter_map(|m| match m { 524 | rustix::net::RecvAncillaryMessage::ScmRights(f) => Some(f), 525 | _ => None, 526 | }) 527 | .flatten(); 528 | let buf = &buf[..nread]; 529 | let reply: Reply = serde_json::from_slice(buf)?; 530 | if !reply.success { 531 | return Err(Error::RequestInitiationFailure { 532 | method: req.method.clone().into(), 533 | error: reply.error.into(), 534 | }); 535 | } 536 | let fds = FromReplyFds::from_reply(fdret, reply.pipeid)?; 537 | Ok((serde_json::from_value(reply.value)?, fds)) 538 | }) 539 | .await 540 | .map_err(|e| Error::Other(e.to_string().into()))??; 541 | tracing::trace!("completed request"); 542 | Ok(r) 543 | } 544 | 545 | /// Create a request that may return file descriptors, and also check for an unexpected 546 | /// termination of the child process. 547 | #[instrument(skip(args))] 548 | async fn impl_request_with_fds< 549 | T: serde::de::DeserializeOwned + Send + 'static, 550 | F: FromReplyFds, 551 | >( 552 | &self, 553 | method: &str, 554 | args: impl IntoIterator>, 555 | ) -> Result<(T, F)> { 556 | let req = Self::impl_request_raw(Arc::clone(&self.sockfd), Request::new(method, args)); 557 | let mut childwait = self.childwait.lock().await; 558 | tokio::select! { 559 | r = req => { r } 560 | r = childwait.as_mut() => { 561 | let r = r.map_err(|e| Error::Other(e.to_string().into()))??; 562 | let stderr = String::from_utf8_lossy(&r.stderr); 563 | Err(Error::Other(format!("skopeo proxy unexpectedly exited during request method {}: {}\n{}", method, r.status, stderr).into())) 564 | } 565 | } 566 | } 567 | 568 | /// A synchronous invocation which does not return any file descriptors. 569 | async fn impl_request( 570 | &self, 571 | method: &str, 572 | args: impl IntoIterator>, 573 | ) -> Result { 574 | let (r, ()) = self.impl_request_with_fds(method, args).await?; 575 | Ok(r) 576 | } 577 | 578 | #[instrument] 579 | async fn finish_pipe(&self, pipeid: PipeId) -> Result<()> { 580 | tracing::debug!("closing pipe"); 581 | let (r, ()) = self 582 | .impl_request_with_fds("FinishPipe", [pipeid.0.get()]) 583 | .await?; 584 | Ok(r) 585 | } 586 | 587 | #[instrument] 588 | pub async fn open_image(&self, imgref: &str) -> Result { 589 | tracing::debug!("opening image"); 590 | let imgid = self.impl_request("OpenImage", [imgref]).await?; 591 | Ok(OpenedImage(imgid)) 592 | } 593 | 594 | #[instrument] 595 | pub async fn open_image_optional(&self, imgref: &str) -> Result> { 596 | tracing::debug!("opening image"); 597 | let imgid = self.impl_request("OpenImageOptional", [imgref]).await?; 598 | if imgid == 0 { 599 | Ok(None) 600 | } else { 601 | Ok(Some(OpenedImage(imgid))) 602 | } 603 | } 604 | 605 | #[instrument] 606 | pub async fn close_image(&self, img: &OpenedImage) -> Result<()> { 607 | self.impl_request("CloseImage", [img.0]).await 608 | } 609 | 610 | async fn read_finish_pipe(&self, pipe: FinishPipe) -> Result> { 611 | let fd = tokio::fs::File::from_std(std::fs::File::from(pipe.datafd)); 612 | let mut fd = tokio::io::BufReader::new(fd); 613 | let mut r = Vec::new(); 614 | let reader = fd.read_to_end(&mut r); 615 | let (nbytes, finish) = tokio::join!(reader, self.finish_pipe(pipe.pipeid)); 616 | finish?; 617 | assert_eq!(nbytes?, r.len()); 618 | Ok(r) 619 | } 620 | 621 | /// Fetch the manifest as raw bytes, converted to OCI if necessary. 622 | /// The original digest of the unconverted manifest is also returned. 623 | /// For more information on OCI manifests, see 624 | pub async fn fetch_manifest_raw_oci(&self, img: &OpenedImage) -> Result<(String, Vec)> { 625 | let (digest, pipefd) = self.impl_request_with_fds("GetManifest", [img.0]).await?; 626 | Ok((digest, self.read_finish_pipe(pipefd).await?)) 627 | } 628 | 629 | /// Fetch the manifest. 630 | /// For more information on OCI manifests, see 631 | pub async fn fetch_manifest( 632 | &self, 633 | img: &OpenedImage, 634 | ) -> Result<(String, oci_spec::image::ImageManifest)> { 635 | let (digest, raw) = self.fetch_manifest_raw_oci(img).await?; 636 | let manifest = serde_json::from_slice(&raw)?; 637 | Ok((digest, manifest)) 638 | } 639 | 640 | /// Fetch the config. 641 | /// For more information on OCI config, see 642 | pub async fn fetch_config_raw(&self, img: &OpenedImage) -> Result> { 643 | let ((), pipe) = self.impl_request_with_fds("GetFullConfig", [img.0]).await?; 644 | self.read_finish_pipe(pipe).await 645 | } 646 | 647 | /// Fetch the config. 648 | /// For more information on OCI config, see 649 | pub async fn fetch_config( 650 | &self, 651 | img: &OpenedImage, 652 | ) -> Result { 653 | let raw = self.fetch_config_raw(img).await?; 654 | serde_json::from_slice(&raw).map_err(Into::into) 655 | } 656 | 657 | /// Fetch a blob identified by e.g. `sha256:`. 658 | /// 659 | /// 660 | /// The requested size and digest are verified (by the proxy process). 661 | /// 662 | /// Note that because of the implementation details of this function, you should 663 | /// [`futures::join!`] the returned futures instead of polling one after the other. The 664 | /// secondary "driver" future will only return once everything has been read from 665 | /// the reader future. 666 | #[instrument] 667 | pub async fn get_blob( 668 | &self, 669 | img: &OpenedImage, 670 | digest: &Digest, 671 | size: u64, 672 | ) -> Result<( 673 | impl AsyncBufRead + Send + Unpin, 674 | impl Future> + Unpin + '_, 675 | )> { 676 | // For previous discussion on digest/size verification, see 677 | // https://github.com/cgwalters/container-image-proxy/issues/1#issuecomment-926712009 678 | tracing::debug!("fetching blob"); 679 | let args: Vec = 680 | vec![img.0.into(), digest.to_string().into(), size.into()]; 681 | // Note that size may be -1 here if e.g. the remote registry doesn't give a Content-Length 682 | // for example. 683 | // We have always validated the size later (in FinishPipe) so out of conservatism we 684 | // just ignore the size here. 685 | let (_bloblen, pipe): (serde_json::Number, FinishPipe) = 686 | self.impl_request_with_fds("GetBlob", args).await?; 687 | let fd = tokio::fs::File::from_std(std::fs::File::from(pipe.datafd)); 688 | let fd = tokio::io::BufReader::new(fd); 689 | let finish = Box::pin(self.finish_pipe(pipe.pipeid)); 690 | Ok((fd, finish)) 691 | } 692 | 693 | async fn read_blob_error(fd: OwnedFd) -> std::result::Result<(), GetBlobError> { 694 | let fd = tokio::fs::File::from_std(std::fs::File::from(fd)); 695 | let mut errfd = tokio::io::BufReader::new(fd); 696 | let mut buf = Vec::new(); 697 | errfd 698 | .read_to_end(&mut buf) 699 | .await 700 | .map_err(|e| GetBlobError::Other(e.to_string().into_boxed_str()))?; 701 | if buf.is_empty() { 702 | return Ok(()); 703 | } 704 | #[derive(Deserialize)] 705 | struct RemoteError { 706 | code: String, 707 | message: String, 708 | } 709 | let e: RemoteError = serde_json::from_slice(&buf) 710 | .map_err(|e| GetBlobError::Other(e.to_string().into_boxed_str()))?; 711 | match e.code.as_str() { 712 | // Actually this is OK 713 | "EPIPE" => Ok(()), 714 | "retryable" => Err(GetBlobError::Retryable(e.message.into_boxed_str())), 715 | _ => Err(GetBlobError::Other(e.message.into_boxed_str())), 716 | } 717 | } 718 | 719 | /// Fetch a blob identified by e.g. `sha256:`; does not perform 720 | /// any verification that the blob matches the digest. The size of the 721 | /// blob (if available) and a pipe file descriptor are returned. 722 | #[instrument] 723 | pub async fn get_raw_blob( 724 | &self, 725 | img: &OpenedImage, 726 | digest: &Digest, 727 | ) -> Result<( 728 | Option, 729 | tokio::fs::File, 730 | impl Future> + Unpin + '_, 731 | )> { 732 | tracing::debug!("fetching blob"); 733 | let args: Vec = vec![img.0.into(), digest.to_string().into()]; 734 | let (bloblen, fds): (i64, DualFds) = self.impl_request_with_fds("GetRawBlob", args).await?; 735 | // See the GetBlob case, we have a best-effort attempt to return the size, but it might not be known 736 | let bloblen = u64::try_from(bloblen).ok(); 737 | let fd = tokio::fs::File::from_std(std::fs::File::from(fds.datafd)); 738 | let err = Self::read_blob_error(fds.errfd).boxed(); 739 | Ok((bloblen, fd, err)) 740 | } 741 | 742 | /// Fetch a descriptor. The requested size and digest are verified (by the proxy process). 743 | #[instrument] 744 | pub async fn get_descriptor( 745 | &self, 746 | img: &OpenedImage, 747 | descriptor: &Descriptor, 748 | ) -> Result<( 749 | impl AsyncBufRead + Send + Unpin, 750 | impl Future> + Unpin + '_, 751 | )> { 752 | self.get_blob(img, descriptor.digest(), descriptor.size()) 753 | .await 754 | } 755 | 756 | ///Returns data that can be used to find the "diffid" corresponding to a particular layer. 757 | #[instrument] 758 | pub async fn get_layer_info( 759 | &self, 760 | img: &OpenedImage, 761 | ) -> Result>> { 762 | tracing::debug!("Getting layer info"); 763 | if layer_info_piped_proto_version().matches(&self.protover) { 764 | let ((), pipe) = self 765 | .impl_request_with_fds("GetLayerInfoPiped", [img.0]) 766 | .await?; 767 | let buf = self.read_finish_pipe(pipe).await?; 768 | return Ok(Some(serde_json::from_slice(&buf)?)); 769 | } 770 | if !layer_info_proto_version().matches(&self.protover) { 771 | return Ok(None); 772 | } 773 | let layers = self.impl_request("GetLayerInfo", [img.0]).await?; 774 | Ok(Some(layers)) 775 | } 776 | 777 | /// Close the connection and wait for the child process to exit successfully. 778 | #[instrument] 779 | pub async fn finalize(self) -> Result<()> { 780 | let _ = &self; 781 | let req = Request::new_bare("Shutdown"); 782 | let sendbuf = serde_json::to_vec(&req)?; 783 | // SAFETY: Only panics if a worker thread already panic'd 784 | let sockfd = Arc::try_unwrap(self.sockfd).unwrap().into_inner().unwrap(); 785 | rustix::net::send(sockfd, &sendbuf, rustix::net::SendFlags::empty())?; 786 | drop(sendbuf); 787 | tracing::debug!("sent shutdown request"); 788 | let mut childwait = self.childwait.lock().await; 789 | let output = childwait 790 | .as_mut() 791 | .await 792 | .map_err(|e| Error::new_other(e.to_string()))??; 793 | if !output.status.success() { 794 | let stderr = String::from_utf8_lossy(&output.stderr); 795 | return Err(Error::RequestReturned( 796 | format!("proxy failed: {}\n{}", output.status, stderr).into(), 797 | )); 798 | } 799 | tracing::debug!("proxy exited successfully"); 800 | Ok(()) 801 | } 802 | } 803 | 804 | #[cfg(test)] 805 | mod tests { 806 | use std::io::{BufWriter, Seek, Write}; 807 | use std::os::fd::{AsRawFd, OwnedFd}; 808 | 809 | use super::*; 810 | use cap_std_ext::cap_std::fs::Dir; 811 | use rustix::fs::{memfd_create, MemfdFlags}; 812 | 813 | /// Check if we have skopeo 814 | fn check_skopeo() -> bool { 815 | static HAVE_SKOPEO: OnceLock = OnceLock::new(); 816 | *HAVE_SKOPEO.get_or_init(|| { 817 | Command::new("skopeo") 818 | .arg("--help") 819 | .stdout(Stdio::null()) 820 | .stderr(Stdio::null()) 821 | .status() 822 | .is_ok() 823 | }) 824 | } 825 | 826 | fn validate(c: Command, contains: &[&str], not_contains: &[&str]) { 827 | // Format via debug, because 828 | // https://doc.rust-lang.org/std/process/struct.Command.html#method.get_args 829 | // is experimental 830 | let d = format!("{:?}", c); 831 | for c in contains { 832 | assert!(d.contains(c), "{} missing {}", d, c); 833 | } 834 | for c in not_contains { 835 | assert!(!d.contains(c), "{} should not contain {}", d, c); 836 | } 837 | } 838 | 839 | #[test] 840 | fn proxy_configs() { 841 | let tmpd = &cap_tempfile::tempdir(cap_std::ambient_authority()).unwrap(); 842 | 843 | let c = Command::try_from(ImageProxyConfig::default()).unwrap(); 844 | validate( 845 | c, 846 | &["experimental-image-proxy"], 847 | &["--no-creds", "--tls-verify", "--authfile"], 848 | ); 849 | 850 | let c = Command::try_from(ImageProxyConfig { 851 | authfile: Some(PathBuf::from("/path/to/authfile")), 852 | ..Default::default() 853 | }) 854 | .unwrap(); 855 | validate(c, &[r"--authfile", "/path/to/authfile"], &[]); 856 | 857 | let decryption_key_path = "/path/to/decryption_key"; 858 | let c = Command::try_from(ImageProxyConfig { 859 | decryption_keys: Some(vec![decryption_key_path.to_string()]), 860 | ..Default::default() 861 | }) 862 | .unwrap(); 863 | validate(c, &[r"--decryption-key", "/path/to/decryption_key"], &[]); 864 | 865 | let c = Command::try_from(ImageProxyConfig { 866 | certificate_directory: Some(PathBuf::from("/path/to/certs")), 867 | ..Default::default() 868 | }) 869 | .unwrap(); 870 | validate(c, &[r"--cert-dir", "/path/to/certs"], &[]); 871 | 872 | let c = Command::try_from(ImageProxyConfig { 873 | insecure_skip_tls_verification: Some(true), 874 | ..Default::default() 875 | }) 876 | .unwrap(); 877 | validate(c, &[r"--tls-verify=false"], &[]); 878 | 879 | let mut tmpf = cap_tempfile::TempFile::new_anonymous(tmpd).unwrap(); 880 | tmpf.write_all(r#"{ "auths": {} "#.as_bytes()).unwrap(); 881 | tmpf.seek(std::io::SeekFrom::Start(0)).unwrap(); 882 | let c = Command::try_from(ImageProxyConfig { 883 | auth_data: Some(tmpf.into_std()), 884 | ..Default::default() 885 | }) 886 | .unwrap(); 887 | validate(c, &["--authfile", "/proc/self/fd/100"], &[]); 888 | 889 | // Test user-agent-prefix - only validate if supported 890 | let c = Command::try_from(ImageProxyConfig { 891 | user_agent_prefix: Some("bootc/1.0".to_string()), 892 | ..Default::default() 893 | }) 894 | .unwrap(); 895 | if supports_user_agent_prefix() { 896 | validate(c, &["--user-agent-prefix", "bootc/1.0"], &[]); 897 | } else { 898 | validate(c, &[], &["--user-agent-prefix"]); 899 | } 900 | } 901 | 902 | #[tokio::test] 903 | async fn skopeo_not_found() { 904 | let mut config = ImageProxyConfig { 905 | ..ImageProxyConfig::default() 906 | }; 907 | config.skopeo_cmd = Some(Command::new("no-skopeo")); 908 | 909 | match ImageProxy::new_with_config(config).await { 910 | Ok(_) => panic!("Expected an error"), 911 | Err(ref e @ Error::SkopeoSpawnError(ref inner)) => { 912 | assert_eq!(inner.kind(), std::io::ErrorKind::NotFound); 913 | // Just to double check 914 | assert!(e 915 | .to_string() 916 | .contains("skopeo spawn error: No such file or directory")); 917 | } 918 | Err(e) => panic!("Unexpected error {e}"), 919 | } 920 | } 921 | 922 | #[tokio::test] 923 | async fn test_proxy_send_sync() { 924 | fn assert_send_sync(_x: impl Send + Sync) {} 925 | 926 | let Ok(proxy) = ImageProxy::new().await else { 927 | // doesn't matter: we only actually care to test if this compiles 928 | return; 929 | }; 930 | assert_send_sync(&proxy); 931 | assert_send_sync(proxy); 932 | 933 | let opened = OpenedImage(0); 934 | assert_send_sync(&opened); 935 | assert_send_sync(opened); 936 | } 937 | 938 | fn generate_err_fd(v: serde_json::Value) -> Result { 939 | let tmp = Dir::open_ambient_dir("/tmp", cap_std::ambient_authority())?; 940 | let mut tf = cap_tempfile::TempFile::new_anonymous(&tmp).map(BufWriter::new)?; 941 | serde_json::to_writer(&mut tf, &v)?; 942 | let mut tf = tf.into_inner().map_err(|e| e.into_error())?; 943 | tf.seek(std::io::SeekFrom::Start(0))?; 944 | let r = tf.into_std().into(); 945 | Ok(r) 946 | } 947 | 948 | #[tokio::test] 949 | async fn test_read_blob_error_retryable() -> Result<()> { 950 | let retryable = serde_json::json!({ 951 | "code": "retryable", 952 | "message": "foo", 953 | }); 954 | let retryable = generate_err_fd(retryable)?; 955 | let err = ImageProxy::read_blob_error(retryable).boxed(); 956 | let e = err.await.unwrap_err(); 957 | match e { 958 | GetBlobError::Retryable(s) => assert_eq!(s.as_ref(), "foo"), 959 | _ => panic!("Unexpected error {e:?}"), 960 | } 961 | Ok(()) 962 | } 963 | 964 | #[tokio::test] 965 | async fn test_read_blob_error_none() -> Result<()> { 966 | let tmp = Dir::open_ambient_dir("/tmp", cap_std::ambient_authority())?; 967 | let tf = cap_tempfile::TempFile::new_anonymous(&tmp)?.into_std(); 968 | let err = ImageProxy::read_blob_error(tf.into()).boxed(); 969 | err.await.unwrap(); 970 | Ok(()) 971 | } 972 | 973 | #[tokio::test] 974 | async fn test_read_blob_error_other() -> Result<()> { 975 | let other = serde_json::json!({ 976 | "code": "other", 977 | "message": "bar", 978 | }); 979 | let other = generate_err_fd(other)?; 980 | let err = ImageProxy::read_blob_error(other).boxed(); 981 | let e = err.await.unwrap_err(); 982 | match e { 983 | GetBlobError::Other(s) => assert_eq!(s.as_ref(), "bar"), 984 | _ => panic!("Unexpected error {e:?}"), 985 | } 986 | Ok(()) 987 | } 988 | 989 | #[tokio::test] 990 | async fn test_read_blob_error_epipe() -> Result<()> { 991 | let epipe = serde_json::json!({ 992 | "code": "EPIPE", 993 | "message": "baz", 994 | }); 995 | let epipe = generate_err_fd(epipe)?; 996 | let err = ImageProxy::read_blob_error(epipe).boxed(); 997 | err.await.unwrap(); 998 | Ok(()) 999 | } 1000 | 1001 | // Helper to create a dummy OwnedFd using memfd_create for testing. 1002 | fn create_dummy_fd() -> OwnedFd { 1003 | memfd_create(c"test-fd", MemfdFlags::CLOEXEC).unwrap() 1004 | } 1005 | 1006 | #[test] 1007 | fn test_new_from_raw_values_finish_pipe() { 1008 | let datafd = create_dummy_fd(); 1009 | // Keep a raw fd to compare later, as fds_and_pipeid consumes datafd 1010 | let raw_datafd_val = datafd.as_raw_fd(); 1011 | let fds = vec![datafd]; 1012 | let v = FinishPipe::from_reply(fds, 1).unwrap(); 1013 | assert_eq!(v.pipeid.0.get(), 1); 1014 | assert_eq!(v.datafd.as_raw_fd(), raw_datafd_val); 1015 | } 1016 | 1017 | #[test] 1018 | fn test_new_from_raw_values_dual_fds() { 1019 | let datafd = create_dummy_fd(); 1020 | let errfd = create_dummy_fd(); 1021 | let raw_datafd_val = datafd.as_raw_fd(); 1022 | let raw_errfd_val = errfd.as_raw_fd(); 1023 | let fds = vec![datafd, errfd]; 1024 | let v = DualFds::from_reply(fds, 0).unwrap(); 1025 | assert_eq!(v.datafd.as_raw_fd(), raw_datafd_val); 1026 | assert_eq!(v.errfd.as_raw_fd(), raw_errfd_val); 1027 | } 1028 | 1029 | #[test] 1030 | fn test_new_from_raw_values_error_too_many_fds() { 1031 | let fds = vec![create_dummy_fd(), create_dummy_fd(), create_dummy_fd()]; 1032 | match DualFds::from_reply(fds, 0) { 1033 | Ok(v) => unreachable!("{v:?}"), 1034 | Err(Error::Other(msg)) => { 1035 | assert_eq!(msg.as_ref(), "Expected two fds for DualFds") 1036 | } 1037 | Err(other) => unreachable!("{other}"), 1038 | } 1039 | } 1040 | 1041 | #[test] 1042 | fn test_new_from_raw_values_error_fd_with_zero_pipeid() { 1043 | let fds = vec![create_dummy_fd()]; 1044 | match FinishPipe::from_reply(fds, 0) { 1045 | Ok(v) => unreachable!("{v:?}"), 1046 | Err(Error::Other(msg)) => { 1047 | assert_eq!(msg.as_ref(), "Expected pipeid for FinishPipe") 1048 | } 1049 | Err(other) => unreachable!("{other}"), 1050 | } 1051 | } 1052 | 1053 | #[test] 1054 | fn test_new_from_raw_values_error_pipeid_with_both_fds() { 1055 | let fds = vec![create_dummy_fd(), create_dummy_fd()]; 1056 | match DualFds::from_reply(fds, 1) { 1057 | Ok(v) => unreachable!("{v:?}"), 1058 | Err(Error::Other(msg)) => { 1059 | assert_eq!(msg.as_ref(), "Unexpected pipeid with DualFds") 1060 | } 1061 | Err(other) => unreachable!("{other}"), 1062 | } 1063 | } 1064 | 1065 | #[test] 1066 | fn test_new_from_raw_values_error_no_fd_with_pipeid() { 1067 | let fds: Vec = vec![]; 1068 | match FinishPipe::from_reply(fds, 1) { 1069 | Ok(v) => unreachable!("{v:?}"), 1070 | Err(Error::Other(msg)) => { 1071 | assert_eq!(msg.as_ref(), "Expected exactly one fd for FinishPipe") 1072 | } 1073 | Err(other) => unreachable!("{other}"), 1074 | } 1075 | } 1076 | 1077 | #[tokio::test] 1078 | #[ignore = "https://github.com/coreos/rpm-ostree/issues/5442"] 1079 | async fn test_open_optional() -> Result<()> { 1080 | if !check_skopeo() { 1081 | return Ok(()); 1082 | } 1083 | 1084 | let td = tempfile::tempdir()?; 1085 | let td = td.path().to_str().unwrap(); 1086 | let proxy = ImageProxy::new().await?; 1087 | let imgpath = format!("oci-archive:{td}/some-nonexistent-image.ociarchive"); 1088 | let img = proxy.open_image_optional(&imgpath).await.unwrap(); 1089 | assert!(img.is_none()); 1090 | 1091 | Ok(()) 1092 | } 1093 | } 1094 | --------------------------------------------------------------------------------