├── .any-go-nt.sh ├── .gitattributes ├── .github ├── CODEOWNERS └── workflows │ └── ci.yml ├── .go-env.sh ├── .test ├── .coverage │ ├── .gitignore │ └── builds.sh ├── .external-pins │ ├── file.sh │ ├── library │ │ ├── alpine___3.16 │ │ ├── alpine___3.18 │ │ ├── debian___bookworm-slim │ │ └── debian___unstable-slim │ ├── list.sh │ └── mcr.microsoft.com │ │ └── windows │ │ ├── servercore___1809 │ │ └── servercore___ltsc2022 ├── .gitignore ├── all-tags.json ├── builds.json ├── cache-builds.json ├── deploy-all │ ├── in.json │ ├── out.json │ └── test.jq ├── deploy-amd64 │ ├── in.json │ ├── out.json │ └── test.jq ├── deploy-dry-run-test.json ├── jq.sh ├── library │ ├── busybox │ ├── docker │ ├── infosiftr-moby │ └── notary ├── lookup-test.json ├── meta-commands │ ├── in.json │ ├── out.sh │ └── test.jq ├── meta-queue │ ├── in.jq │ ├── in.json │ ├── out.json │ └── test.jq ├── oci-import │ ├── in.json │ ├── out.sh │ ├── temp │ │ ├── blobs │ │ │ └── sha256 │ │ │ │ ├── 166d2948c01a6ec70e44b073b0a4c56a3d7c4a4b8fd390d9ebfcb16a3ecf658e │ │ │ │ ├── 4be429a5fbb2e71ae7958bfa558bc637cf3a61baf40a708cb8fff532b39e52d0 │ │ │ │ ├── 7b2699543f22d5b8dc8d66a5873eb246767bca37232dee1e7a3b8c9956bceb0c │ │ │ │ └── ba5dc23f65d4cc4a4535bce55cf9e63b068eb02946e3422d3587e8ce803b6aab │ │ ├── image-config.json │ │ ├── image-manifest.json │ │ ├── index.json │ │ └── oci-layout │ ├── test.jq │ └── test.sh ├── oci-sort-manifests │ ├── in.json │ ├── out.json │ └── test.jq ├── oci-sort-platforms │ ├── out.json │ └── test.jq ├── provenance │ ├── in.json │ ├── out.json │ └── test.jq ├── sources.json └── test.sh ├── Jenkinsfile.build ├── Jenkinsfile.deploy ├── Jenkinsfile.meta ├── Jenkinsfile.trigger ├── LICENSE ├── bin └── .gitignore ├── builds.sh ├── cmd ├── builds │ └── main.go ├── deploy │ ├── input.go │ ├── input_test.go │ └── main.go └── lookup │ └── main.go ├── deploy.jq ├── doi.jq ├── go.mod ├── go.sum ├── helpers ├── oci-import.sh ├── oci-sbom.sh └── oci-validate.sh ├── jenkins.jq ├── meta.jq ├── oci.jq ├── om ├── om.go └── om_test.go ├── provenance.jq ├── registry ├── annotations.go ├── cache.go ├── client.go ├── docker-hub.go ├── lookup.go ├── manifest-children.go ├── push.go ├── rate-limits.go ├── read-helpers.go ├── ref.go ├── ref_test.go ├── synthesize-index.go └── user-agent.go ├── sort.jq ├── sources.sh └── validate.jq /.any-go-nt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | # usage: if ./.any-go-nt.sh builds; then expensive-docker-run-command ... go build -o builds ...; fi 5 | 6 | shopt -s globstar 7 | 8 | for go in **/**.go go.mod go.sum; do 9 | for f; do 10 | if [ "$go" -nt "$f" ]; then 11 | exit 0 12 | fi 13 | done 14 | done 15 | 16 | exit 1 17 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | Jenkinsfile* linguist-language=groovy 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @docker-library/maintainers 2 | 3 | /doi.jq @LaurentGoderre @docker-library/maintainers 4 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | schedule: 7 | - cron: 0 0 * * 0 8 | workflow_dispatch: 9 | 10 | permissions: 11 | contents: read 12 | 13 | defaults: 14 | run: 15 | shell: 'bash -Eeuo pipefail -x {0}' 16 | 17 | jobs: 18 | test: 19 | name: Smoke Test 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: actions/checkout@v4 23 | - name: Install Bashbrew 24 | run: | 25 | # not doing "uses: docker-library/bashbrew@xxx" because it'll build which is slow and we don't need more than just bashbrew here 26 | mkdir .bin 27 | wget --timeout=5 -O .bin/bashbrew 'https://github.com/docker-library/bashbrew/releases/download/v0.1.13/bashbrew-amd64' 28 | echo 'a13dca73181bc68dc9fb695ca1b4003a12077551ccc02eb0c232a0313e88d7c1 *.bin/bashbrew' | sha256sum --strict --check - 29 | chmod +x .bin/bashbrew 30 | .bin/bashbrew --version 31 | echo "$PWD/.bin" >> "$GITHUB_PATH" 32 | - run: .test/test.sh --deploy 33 | - uses: actions/upload-artifact@v4 34 | with: 35 | name: coverage 36 | path: | 37 | .test/.coverage/coverage.* 38 | .test/.coverage/GOCOVERDIR/ 39 | include-hidden-files: true 40 | if-no-files-found: error 41 | - name: gofmt 42 | run: find -name '*.go' -type f -exec ./.go-env.sh gofmt -l -s -w '{}' + 43 | - run: git diff --exit-code 44 | # TODO download latest coverage artifacts from HEAD / PR target to emulate Codecov but without another flaky third-party service that's begging for write-access to all our repositories via a GitHub App? 👀 45 | -------------------------------------------------------------------------------- /.go-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | dir="$(dirname "$BASH_SOURCE")" 5 | dir="$(readlink -ve "$dir")" 6 | 7 | src="$dir" 8 | dst="$dir" 9 | tmp='/tmp' 10 | goDir='/go' 11 | 12 | msys= 13 | cygwin= 14 | case "$(uname -o)" in 15 | Msys) 16 | msys=1 17 | ;; 18 | Cygwin) 19 | cygwin=1 20 | ;; 21 | esac 22 | if [ -n "${msys:-$cygwin}" ] && command -v cygpath > /dev/null; then 23 | src="$(cygpath --windows "$dst")" 24 | fi 25 | windowsContainers= 26 | serverOs="$(docker version --format '{{ .Server.Os }}')" 27 | if [ "$serverOs" = 'windows' ]; then 28 | windowsContainers=1 29 | # normally we'd want this to match $src so error messages, traces, etc are easier to follow, but $src might be on a non-C: drive letter and not be usable in the container as-is 😭 30 | dst='C:\app' 31 | tmp='C:\Temp' 32 | goDir='C:\go' 33 | fi 34 | 35 | args=( 36 | --interactive --rm --init 37 | --mount "type=bind,src=$src,dst=$dst" 38 | --workdir "$dst" 39 | --tmpfs "$tmp",exec 40 | --env HOME="$tmp" 41 | 42 | # "go mod" cache is stored in /go/pkg/mod/cache 43 | --env GOPATH="$goDir" 44 | --mount type=volume,src=doi-meta-gopath,dst="$goDir" 45 | --env GOCACHE="$goDir/.cache" 46 | 47 | --env "CGO_ENABLED=${CGO_ENABLED-0}" 48 | --env "GOTOOLCHAIN=${GOTOOLCHAIN-local}" 49 | --env GOCOVERDIR # https://go.dev/doc/build-cover 50 | --env GODEBUG 51 | --env GOFLAGS 52 | --env GOOS --env GOARCH 53 | --env GO386 54 | --env GOAMD64 55 | --env GOARM 56 | 57 | # hack hack hack (useful for "go run" during dev/test) 58 | --env DOCKERHUB_PUBLIC_PROXY 59 | --env DOCKERHUB_PUBLIC_PROXY_HOST 60 | ) 61 | 62 | if [ -z "$windowsContainers" ]; then 63 | user="$(id -u)" 64 | user+=":$(id -g)" 65 | args+=( --user "$user" ) 66 | fi 67 | 68 | winpty=() 69 | if [ -t 0 ] && [ -t 1 ]; then 70 | args+=( --tty ) 71 | if [ -n "$msys" ] && command -v winpty > /dev/null; then 72 | winpty=( winpty ) 73 | fi 74 | fi 75 | 76 | if [ -z "${GOLANG_IMAGE:-}" ]; then 77 | go="$(awk '$1 == "go" { print $2; exit }' "$dir/go.mod")" 78 | if [[ "$go" == *.*.* ]]; then 79 | go="${go%.*}" # strip to just X.Y 80 | fi 81 | GOLANG_IMAGE="golang:$go" 82 | 83 | # handle riscv64 "gracefully" (no golang image yet because no stable distro releases yet) 84 | { 85 | if ! docker image inspect --format '.' "$GOLANG_IMAGE" &> /dev/null && ! docker pull "$GOLANG_IMAGE"; then 86 | if [ -n "${BASHBREW_ARCH:-}" ] && docker buildx inspect "bashbrew-$BASHBREW_ARCH" &> /dev/null; then 87 | # a very rough hack to avoid: 88 | # ERROR: failed to solve: failed to solve with frontend dockerfile.v0: failed to read dockerfile: failed to load cache key: subdir not supported yet 89 | # (we need buildkit/buildx for --build-context, but newer buildkit than our dockerd might have for build-from-git-with-subdir) 90 | export BUILDX_BUILDER="bashbrew-$BASHBREW_ARCH" 91 | fi 92 | ( 93 | set -x 94 | # TODO make this more dynamic, less hard-coded 🙈 95 | # https://github.com/docker-library/golang/blob/ea6bbce8c9b13acefed0f5507336be01f0918f97/1.21/bookworm/Dockerfile 96 | GOLANG_IMAGE='golang:1.21' # to be explicit 97 | docker buildx build --load --tag "$GOLANG_IMAGE" --build-context 'buildpack-deps:bookworm-scm=docker-image://buildpack-deps:unstable-scm' 'https://github.com/docker-library/golang.git#ea6bbce8c9b13acefed0f5507336be01f0918f97:1.21/bookworm' 98 | ) 99 | fi 100 | } >&2 101 | fi 102 | 103 | args+=( 104 | "$GOLANG_IMAGE" 105 | "$@" 106 | ) 107 | 108 | set -x 109 | exec "${winpty[@]}" docker run "${args[@]}" 110 | -------------------------------------------------------------------------------- /.test/.coverage/.gitignore: -------------------------------------------------------------------------------- 1 | ** 2 | !.gitignore 3 | !builds.sh 4 | -------------------------------------------------------------------------------- /.test/.coverage/builds.sh: -------------------------------------------------------------------------------- 1 | ../../builds.sh -------------------------------------------------------------------------------- /.test/.external-pins/file.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | # given an image (name:tag), return the appropriate filename 5 | 6 | dir="$(dirname "$BASH_SOURCE")" 7 | 8 | for img; do 9 | if [[ "$img" != *:* ]]; then 10 | echo >&2 "error: '$img' does not contain ':' -- this violates our assumptions! (did you mean '$img:latest' ?)" 11 | exit 1 12 | fi 13 | 14 | imgFile="$dir/${img/:/___}" # see ".external-pins/list.sh" 15 | echo "$imgFile" 16 | done 17 | -------------------------------------------------------------------------------- /.test/.external-pins/library/alpine___3.16: -------------------------------------------------------------------------------- 1 | sha256:e4cdb7d47b06ba0a062ad2a97a7d154967c8f83934594d9f2bd3efa89292996b 2 | -------------------------------------------------------------------------------- /.test/.external-pins/library/alpine___3.18: -------------------------------------------------------------------------------- 1 | sha256:34871e7290500828b39e22294660bee86d966bc0017544e848dd9a255cdf59e0 2 | -------------------------------------------------------------------------------- /.test/.external-pins/library/debian___bookworm-slim: -------------------------------------------------------------------------------- 1 | sha256:155280b00ee0133250f7159b567a07d7cd03b1645714c3a7458b2287b0ca83cb 2 | -------------------------------------------------------------------------------- /.test/.external-pins/library/debian___unstable-slim: -------------------------------------------------------------------------------- 1 | sha256:8ab93b5dec6c19b4a45fbfdebc55c5d08ce7d39a90dc66cc273440ba3a1a5af0 2 | -------------------------------------------------------------------------------- /.test/.external-pins/list.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | dir="$(dirname "$BASH_SOURCE")" 5 | 6 | find "$dir" -mindepth 2 -type f -printf '%P\n' | sed -e 's/___/:/' | sort 7 | 8 | # assumptions which make the "___" -> ":" conversion ~safe (examples referencing "example.com/foo/bar:baz"): 9 | # 10 | # 1. we *always* specify a tag ("baz") 11 | # 2. the domain ("example.com") cannot contain underscores 12 | # 3. we do not pin to any registry with a non-443 port ("example.com:8443") 13 | # 4. the repository ("foo/bar") can only contain singular or double underscores (never triple underscore), and only between alphanumerics (thus never right up next to ":") 14 | # 5. we do *not* use the "g" regex modifier in our sed, which means only the first instance of triple underscore is replaced (in pure Bash, that's "${img/:/___}" or "${img/___/:}" depending on the conversion direction) 15 | # 16 | # see https://github.com/distribution/distribution/blob/411d6bcfd2580d7ebe6e346359fa16aceec109d5/reference/regexp.go 17 | # (see also https://github.com/docker-library/perl-bashbrew/blob/6685582f7889ef4806f0544b93f10640c7608b1a/lib/Bashbrew/RemoteImageRef.pm#L9-L26 for a condensed version) 18 | # 19 | # see https://github.com/docker-library/official-images/issues/13608 for why we can't just use ":" as-is (even though Linux, macOS, and even Windows via MSYS / WSL2 don't have any issues with it) 20 | -------------------------------------------------------------------------------- /.test/.external-pins/mcr.microsoft.com/windows/servercore___1809: -------------------------------------------------------------------------------- 1 | sha256:4fe58f25a157ea749c7b770acebfdbd70c3cb2088c446943e90fe89ea059558b 2 | -------------------------------------------------------------------------------- /.test/.external-pins/mcr.microsoft.com/windows/servercore___ltsc2022: -------------------------------------------------------------------------------- 1 | sha256:308ef3f8ee3e9c9a1bdec460009c1e6394b329db13eb3149461f8841be5b538a 2 | -------------------------------------------------------------------------------- /.test/.gitignore: -------------------------------------------------------------------------------- 1 | coverage** 2 | -------------------------------------------------------------------------------- /.test/deploy-all/in.json: -------------------------------------------------------------------------------- 1 | ../builds.json -------------------------------------------------------------------------------- /.test/deploy-all/test.jq: -------------------------------------------------------------------------------- 1 | include "deploy"; 2 | 3 | # every single ref both "library/" and arch-specific we should push to 4 | tagged_manifests(true; .source.arches[.build.arch].tags, .source.arches[.build.arch].archTags) 5 | # ... converted into a list of canonical inputs for "cmd/deploy" 6 | | deploy_objects 7 | -------------------------------------------------------------------------------- /.test/deploy-amd64/in.json: -------------------------------------------------------------------------------- 1 | ../builds.json -------------------------------------------------------------------------------- /.test/deploy-amd64/test.jq: -------------------------------------------------------------------------------- 1 | include "deploy"; 2 | 3 | # just amd64 arch-specific manifests 4 | arch_tagged_manifests("amd64") 5 | # ... converted into a list of canonical inputs for "cmd/deploy" 6 | | deploy_objects 7 | -------------------------------------------------------------------------------- /.test/jq.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | shopt -s nullglob # if * matches nothing, return nothing 5 | 6 | dir="$(dirname "$BASH_SOURCE")" 7 | dir="$(readlink -ve "$dir")" 8 | 9 | export SOURCE_DATE_EPOCH=0 # TODO come up with a better way for a test to specify it needs things like this (maybe a file that gets sourced/read for options/setup type things? could also provide args/swap 'out' like our "-r" hank below) 10 | 11 | # TODO arguments for choosing a test? directory? name? 12 | for t in "$dir/"*"/test.jq"; do 13 | td="$(dirname "$t")" 14 | echo -n 'test: ' 15 | basename "$td" 16 | args=( --tab -L "$dir/.." ) 17 | if [ -s "$td/in.jq" ]; then 18 | jq "${args[@]}" -n -f "$td/in.jq" > "$td/in.json" 19 | fi 20 | args+=( -f "$t" ) 21 | if [ -s "$td/in.json" ]; then 22 | args+=( "$td/in.json" ) 23 | else 24 | args+=( -n ) 25 | fi 26 | out="$td/out.json" 27 | outs=( "$td/out."* ) 28 | if [ "${#outs[@]}" -eq 1 ]; then 29 | out="${outs[0]}" 30 | if [[ "$out" != *.json ]]; then 31 | args+=( -r ) 32 | fi 33 | fi 34 | jq "${args[@]}" > "$out" 35 | done 36 | -------------------------------------------------------------------------------- /.test/library/busybox: -------------------------------------------------------------------------------- 1 | # this file is generated via https://github.com/docker-library/busybox/blob/a20bcbde28bc3ccb18d189b808d373d27caccbe4/generate-stackbrew-library.sh 2 | 3 | Maintainers: Tianon Gravi (@tianon), 4 | Joseph Ferguson (@yosifkit) 5 | GitRepo: https://github.com/docker-library/busybox.git 6 | GitCommit: a20bcbde28bc3ccb18d189b808d373d27caccbe4 7 | Builder: oci-import 8 | File: index.json 9 | # https://github.com/docker-library/busybox/tree/dist-amd64 10 | amd64-GitFetch: refs/heads/dist-amd64 11 | amd64-GitCommit: d0b7d566eb4f1fa9933984e6fc04ab11f08f4592 12 | # https://github.com/docker-library/busybox/tree/dist-arm32v5 13 | arm32v5-GitFetch: refs/heads/dist-arm32v5 14 | arm32v5-GitCommit: 7044abc7ee26712d998311b402b975124786e0cf 15 | # https://github.com/docker-library/busybox/tree/dist-arm32v6 16 | arm32v6-GitFetch: refs/heads/dist-arm32v6 17 | arm32v6-GitCommit: c8b6d08f1f78467e7dd1ae3d5e4ec3563877e9a5 18 | # https://github.com/docker-library/busybox/tree/dist-arm32v7 19 | arm32v7-GitFetch: refs/heads/dist-arm32v7 20 | arm32v7-GitCommit: 185a3f7f21c307b15ef99b7088b228f004ff5f11 21 | # https://github.com/docker-library/busybox/tree/dist-arm64v8 22 | arm64v8-GitFetch: refs/heads/dist-arm64v8 23 | arm64v8-GitCommit: a8344687869ba9f95e140a62a915a30822ff2147 24 | # https://github.com/docker-library/busybox/tree/dist-i386 25 | i386-GitFetch: refs/heads/dist-i386 26 | i386-GitCommit: 64e761e756e3281bc9a49235ee200dfc1f5a525e 27 | # https://github.com/docker-library/busybox/tree/dist-mips64le 28 | mips64le-GitFetch: refs/heads/dist-mips64le 29 | mips64le-GitCommit: ea5639e7af6b21b81230ccaba4c05ccb2d80b9e3 30 | # https://github.com/docker-library/busybox/tree/dist-ppc64le 31 | ppc64le-GitFetch: refs/heads/dist-ppc64le 32 | ppc64le-GitCommit: 97dad737e59de0698f74b81a7dac4ce4d834e36c 33 | # https://github.com/docker-library/busybox/tree/dist-riscv64 34 | riscv64-GitFetch: refs/heads/dist-riscv64 35 | riscv64-GitCommit: 10a1d6f931c0fd84f31e5b3e464fed9773a9fdaa 36 | # https://github.com/docker-library/busybox/tree/dist-s390x 37 | s390x-GitFetch: refs/heads/dist-s390x 38 | s390x-GitCommit: ecf31f814875084a2bc85a162b78f512ea2df0c9 39 | 40 | Tags: 1.36.1-glibc, 1.36-glibc, 1-glibc, stable-glibc, glibc 41 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, riscv64, s390x 42 | amd64-Directory: latest/glibc/amd64 43 | arm32v5-Directory: latest/glibc/arm32v5 44 | arm32v7-Directory: latest/glibc/arm32v7 45 | arm64v8-Directory: latest/glibc/arm64v8 46 | i386-Directory: latest/glibc/i386 47 | mips64le-Directory: latest/glibc/mips64le 48 | ppc64le-Directory: latest/glibc/ppc64le 49 | riscv64-Directory: latest/glibc/riscv64 50 | s390x-Directory: latest/glibc/s390x 51 | 52 | Tags: 1.36.1-uclibc, 1.36-uclibc, 1-uclibc, stable-uclibc, uclibc 53 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, riscv64 54 | amd64-Directory: latest/uclibc/amd64 55 | arm32v5-Directory: latest/uclibc/arm32v5 56 | arm32v7-Directory: latest/uclibc/arm32v7 57 | arm64v8-Directory: latest/uclibc/arm64v8 58 | i386-Directory: latest/uclibc/i386 59 | mips64le-Directory: latest/uclibc/mips64le 60 | riscv64-Directory: latest/uclibc/riscv64 61 | 62 | Tags: 1.36.1-musl, 1.36-musl, 1-musl, stable-musl, musl 63 | Architectures: amd64, arm32v6, arm32v7, arm64v8, i386, ppc64le, riscv64, s390x 64 | amd64-Directory: latest/musl/amd64 65 | arm32v6-Directory: latest/musl/arm32v6 66 | arm32v7-Directory: latest/musl/arm32v7 67 | arm64v8-Directory: latest/musl/arm64v8 68 | i386-Directory: latest/musl/i386 69 | ppc64le-Directory: latest/musl/ppc64le 70 | riscv64-Directory: latest/musl/riscv64 71 | s390x-Directory: latest/musl/s390x 72 | 73 | Tags: 1.36.1, 1.36, 1, stable, latest 74 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x, riscv64, arm32v6 75 | amd64-Directory: latest/glibc/amd64 76 | arm32v5-Directory: latest/glibc/arm32v5 77 | arm32v7-Directory: latest/glibc/arm32v7 78 | arm64v8-Directory: latest/glibc/arm64v8 79 | i386-Directory: latest/glibc/i386 80 | mips64le-Directory: latest/glibc/mips64le 81 | ppc64le-Directory: latest/glibc/ppc64le 82 | s390x-Directory: latest/glibc/s390x 83 | riscv64-Directory: latest/uclibc/riscv64 84 | arm32v6-Directory: latest/musl/arm32v6 85 | 86 | Tags: 1.35.0-glibc, 1.35-glibc 87 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, riscv64, s390x 88 | amd64-Directory: latest-1/glibc/amd64 89 | arm32v5-Directory: latest-1/glibc/arm32v5 90 | arm32v7-Directory: latest-1/glibc/arm32v7 91 | arm64v8-Directory: latest-1/glibc/arm64v8 92 | i386-Directory: latest-1/glibc/i386 93 | mips64le-Directory: latest-1/glibc/mips64le 94 | ppc64le-Directory: latest-1/glibc/ppc64le 95 | riscv64-Directory: latest-1/glibc/riscv64 96 | s390x-Directory: latest-1/glibc/s390x 97 | 98 | Tags: 1.35.0-uclibc, 1.35-uclibc 99 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, riscv64 100 | amd64-Directory: latest-1/uclibc/amd64 101 | arm32v5-Directory: latest-1/uclibc/arm32v5 102 | arm32v7-Directory: latest-1/uclibc/arm32v7 103 | arm64v8-Directory: latest-1/uclibc/arm64v8 104 | i386-Directory: latest-1/uclibc/i386 105 | mips64le-Directory: latest-1/uclibc/mips64le 106 | riscv64-Directory: latest-1/uclibc/riscv64 107 | 108 | Tags: 1.35.0-musl, 1.35-musl 109 | Architectures: amd64, arm32v6, arm32v7, arm64v8, i386, ppc64le, riscv64, s390x 110 | amd64-Directory: latest-1/musl/amd64 111 | arm32v6-Directory: latest-1/musl/arm32v6 112 | arm32v7-Directory: latest-1/musl/arm32v7 113 | arm64v8-Directory: latest-1/musl/arm64v8 114 | i386-Directory: latest-1/musl/i386 115 | ppc64le-Directory: latest-1/musl/ppc64le 116 | riscv64-Directory: latest-1/musl/riscv64 117 | s390x-Directory: latest-1/musl/s390x 118 | 119 | Tags: 1.35.0, 1.35 120 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x, riscv64, arm32v6 121 | amd64-Directory: latest-1/glibc/amd64 122 | arm32v5-Directory: latest-1/glibc/arm32v5 123 | arm32v7-Directory: latest-1/glibc/arm32v7 124 | arm64v8-Directory: latest-1/glibc/arm64v8 125 | i386-Directory: latest-1/glibc/i386 126 | mips64le-Directory: latest-1/glibc/mips64le 127 | ppc64le-Directory: latest-1/glibc/ppc64le 128 | s390x-Directory: latest-1/glibc/s390x 129 | riscv64-Directory: latest-1/uclibc/riscv64 130 | arm32v6-Directory: latest-1/musl/arm32v6 131 | -------------------------------------------------------------------------------- /.test/library/docker: -------------------------------------------------------------------------------- 1 | # this file is generated via https://github.com/docker-library/docker/blob/b0550bbda87ae407b6fcf5b039afed7b8c256251/generate-stackbrew-library.sh 2 | 3 | Maintainers: Tianon Gravi (@tianon), 4 | Joseph Ferguson (@yosifkit) 5 | GitRepo: https://github.com/docker-library/docker.git 6 | Builder: buildkit 7 | 8 | Tags: 25.0.0-beta.1-cli, 25-rc-cli, rc-cli, 25.0.0-beta.1-cli-alpine3.18 9 | Architectures: amd64, arm32v6, arm32v7, arm64v8 10 | GitCommit: 01cebba606d33d2eeb9e3dcf52e4bf218913e211 11 | Directory: 25-rc/cli 12 | 13 | Tags: 25.0.0-beta.1-dind, 25-rc-dind, rc-dind, 25.0.0-beta.1-dind-alpine3.18, 25.0.0-beta.1, 25-rc, rc, 25.0.0-beta.1-alpine3.18 14 | Architectures: amd64, arm32v6, arm32v7, arm64v8 15 | GitCommit: 5c2833e7ce9e5af0921154416d59ee13c6185cbf 16 | Directory: 25-rc/dind 17 | 18 | Tags: 25.0.0-beta.1-dind-rootless, 25-rc-dind-rootless, rc-dind-rootless 19 | Architectures: amd64, arm64v8 20 | GitCommit: 2e213030c57a2134a77bf17b0710dff1a184a7c1 21 | Directory: 25-rc/dind-rootless 22 | 23 | Tags: 25.0.0-beta.1-git, 25-rc-git, rc-git 24 | Architectures: amd64, arm32v6, arm32v7, arm64v8 25 | GitCommit: 2e213030c57a2134a77bf17b0710dff1a184a7c1 26 | Directory: 25-rc/git 27 | 28 | Tags: 25.0.0-beta.1-windowsservercore-ltsc2022, 25-rc-windowsservercore-ltsc2022, rc-windowsservercore-ltsc2022 29 | SharedTags: 25.0.0-beta.1-windowsservercore, 25-rc-windowsservercore, rc-windowsservercore 30 | Architectures: windows-amd64 31 | GitCommit: 01cebba606d33d2eeb9e3dcf52e4bf218913e211 32 | Directory: 25-rc/windows/windowsservercore-ltsc2022 33 | Constraints: windowsservercore-ltsc2022 34 | Builder: classic 35 | 36 | Tags: 25.0.0-beta.1-windowsservercore-1809, 25-rc-windowsservercore-1809, rc-windowsservercore-1809 37 | SharedTags: 25.0.0-beta.1-windowsservercore, 25-rc-windowsservercore, rc-windowsservercore 38 | Architectures: windows-amd64 39 | GitCommit: 01cebba606d33d2eeb9e3dcf52e4bf218913e211 40 | Directory: 25-rc/windows/windowsservercore-1809 41 | Constraints: windowsservercore-1809 42 | Builder: classic 43 | 44 | Tags: 24.0.7-cli, 24.0-cli, 24-cli, cli, 24.0.7-cli-alpine3.18 45 | Architectures: amd64, arm32v6, arm32v7, arm64v8 46 | GitCommit: 6d541d27b5dd12639e5a33a675ebca04d3837d74 47 | Directory: 24/cli 48 | 49 | Tags: 24.0.7-dind, 24.0-dind, 24-dind, dind, 24.0.7-dind-alpine3.18, 24.0.7, 24.0, 24, latest, 24.0.7-alpine3.18 50 | Architectures: amd64, arm32v6, arm32v7, arm64v8 51 | GitCommit: 99073a3b6be3aa7e6b5af1e69509e8c532254500 52 | Directory: 24/dind 53 | 54 | Tags: 24.0.7-dind-rootless, 24.0-dind-rootless, 24-dind-rootless, dind-rootless 55 | Architectures: amd64, arm64v8 56 | GitCommit: 62c197fe7632d3bce351d4c337a5a129ebd8f771 57 | Directory: 24/dind-rootless 58 | 59 | Tags: 24.0.7-git, 24.0-git, 24-git, git 60 | Architectures: amd64, arm32v6, arm32v7, arm64v8 61 | GitCommit: 6964fd52030c2e6e9e0943eaac07d78c9841fbb3 62 | Directory: 24/git 63 | 64 | Tags: 24.0.7-windowsservercore-ltsc2022, 24.0-windowsservercore-ltsc2022, 24-windowsservercore-ltsc2022, windowsservercore-ltsc2022 65 | SharedTags: 24.0.7-windowsservercore, 24.0-windowsservercore, 24-windowsservercore, windowsservercore 66 | Architectures: windows-amd64 67 | GitCommit: 6d541d27b5dd12639e5a33a675ebca04d3837d74 68 | Directory: 24/windows/windowsservercore-ltsc2022 69 | Constraints: windowsservercore-ltsc2022 70 | Builder: classic 71 | 72 | Tags: 24.0.7-windowsservercore-1809, 24.0-windowsservercore-1809, 24-windowsservercore-1809, windowsservercore-1809 73 | SharedTags: 24.0.7-windowsservercore, 24.0-windowsservercore, 24-windowsservercore, windowsservercore 74 | Architectures: windows-amd64 75 | GitCommit: 6d541d27b5dd12639e5a33a675ebca04d3837d74 76 | Directory: 24/windows/windowsservercore-1809 77 | Constraints: windowsservercore-1809 78 | Builder: classic 79 | -------------------------------------------------------------------------------- /.test/library/infosiftr-moby: -------------------------------------------------------------------------------- 1 | Maintainers: Tianon Gravi (@tianon) 2 | GitRepo: https://github.com/tianon/dockerfiles.git 3 | Architectures: amd64, arm32v5, arm32v7, arm64v8, i386, mips64le, ppc64le, s390x, riscv64 4 | 5 | Tags: 20.10.27, 20.10, 20, latest 6 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 7 | Directory: infosiftr-moby 8 | riscv64-File: Dockerfile.unstable 9 | 10 | Tags: amd64 11 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 12 | Directory: infosiftr-moby 13 | Architectures: amd64 14 | 15 | Tags: arm32v5 16 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 17 | Directory: infosiftr-moby 18 | Architectures: arm32v5 19 | 20 | Tags: arm32v7 21 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 22 | Directory: infosiftr-moby 23 | Architectures: arm32v7 24 | 25 | Tags: arm64v8 26 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 27 | Directory: infosiftr-moby 28 | Architectures: arm64v8 29 | 30 | Tags: i386 31 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 32 | Directory: infosiftr-moby 33 | Architectures: i386 34 | 35 | Tags: mips64le 36 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 37 | Directory: infosiftr-moby 38 | Architectures: mips64le 39 | 40 | Tags: ppc64le 41 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 42 | Directory: infosiftr-moby 43 | Architectures: ppc64le 44 | 45 | Tags: s390x 46 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 47 | Directory: infosiftr-moby 48 | Architectures: s390x 49 | 50 | Tags: riscv64 51 | GitCommit: 37f1b58c0dc59e6589a0f55849f73d6ef0988cd8 52 | Directory: infosiftr-moby 53 | Architectures: riscv64 54 | riscv64-File: Dockerfile.unstable 55 | -------------------------------------------------------------------------------- /.test/library/notary: -------------------------------------------------------------------------------- 1 | Maintainers: Justin Cormack (@justincormack) 2 | GitRepo: https://github.com/docker/notary-official-images.git 3 | GitCommit: 77b9b7833f8dd6be07104b214193788795a320ff 4 | Architectures: amd64, arm32v6, arm64v8, i386, ppc64le, s390x 5 | Builder: buildkit 6 | 7 | Tags: server-0.7.0, server 8 | Directory: notary-server 9 | 10 | Tags: signer-0.7.0, signer 11 | Directory: notary-signer 12 | -------------------------------------------------------------------------------- /.test/meta-commands/in.json: -------------------------------------------------------------------------------- 1 | ../builds.json -------------------------------------------------------------------------------- /.test/meta-commands/out.sh: -------------------------------------------------------------------------------- 1 | # docker:24.0.7-cli [amd64] 2 | # 3 | 4 | # 5 | # 6 | SOURCE_DATE_EPOCH=1700741054 \ 7 | docker buildx build --progress=plain \ 8 | --provenance=mode=max,builder-id='https://github.com/docker-library' \ 9 | --output '"type=oci","dest=temp.tar"' \ 10 | --annotation 'org.opencontainers.image.source=https://github.com/docker-library/docker.git#6d541d27b5dd12639e5a33a675ebca04d3837d74:24/cli' \ 11 | --annotation 'org.opencontainers.image.revision=6d541d27b5dd12639e5a33a675ebca04d3837d74' \ 12 | --annotation 'org.opencontainers.image.created=2023-11-23T12:04:14Z' \ 13 | --annotation 'org.opencontainers.image.version=24.0.7-cli' \ 14 | --annotation 'org.opencontainers.image.url=https://hub.docker.com/_/docker' \ 15 | --annotation 'com.docker.official-images.bashbrew.arch=amd64' \ 16 | --annotation 'org.opencontainers.image.base.name=alpine:3.18' \ 17 | --annotation 'org.opencontainers.image.base.digest=sha256:d695c3de6fcd8cfe3a6222b0358425d40adfd129a8a47c3416faff1a8aece389' \ 18 | --annotation 'manifest-descriptor:org.opencontainers.image.source=https://github.com/docker-library/docker.git#6d541d27b5dd12639e5a33a675ebca04d3837d74:24/cli' \ 19 | --annotation 'manifest-descriptor:org.opencontainers.image.revision=6d541d27b5dd12639e5a33a675ebca04d3837d74' \ 20 | --annotation 'manifest-descriptor:org.opencontainers.image.created=1970-01-01T00:00:00Z' \ 21 | --annotation 'manifest-descriptor:org.opencontainers.image.version=24.0.7-cli' \ 22 | --annotation 'manifest-descriptor:org.opencontainers.image.url=https://hub.docker.com/_/docker' \ 23 | --annotation 'manifest-descriptor:com.docker.official-images.bashbrew.arch=amd64' \ 24 | --annotation 'manifest-descriptor:org.opencontainers.image.base.name=alpine:3.18' \ 25 | --annotation 'manifest-descriptor:org.opencontainers.image.base.digest=sha256:d695c3de6fcd8cfe3a6222b0358425d40adfd129a8a47c3416faff1a8aece389' \ 26 | --tag 'docker:24.0.7-cli' \ 27 | --tag 'docker:24.0-cli' \ 28 | --tag 'docker:24-cli' \ 29 | --tag 'docker:cli' \ 30 | --tag 'docker:24.0.7-cli-alpine3.18' \ 31 | --tag 'amd64/docker:24.0.7-cli' \ 32 | --tag 'amd64/docker:24.0-cli' \ 33 | --tag 'amd64/docker:24-cli' \ 34 | --tag 'amd64/docker:cli' \ 35 | --tag 'amd64/docker:24.0.7-cli-alpine3.18' \ 36 | --tag 'oisupport/staging-amd64:4b199ac326c74b3058a147e14f553af9e8e1659abc29bd3e82c9c9807b66ee43' \ 37 | --platform 'linux/amd64' \ 38 | --build-context 'alpine:3.18=docker-image://alpine@sha256:d695c3de6fcd8cfe3a6222b0358425d40adfd129a8a47c3416faff1a8aece389' \ 39 | --build-arg BUILDKIT_SYNTAX="$BASHBREW_BUILDKIT_SYNTAX" \ 40 | --build-arg BUILDKIT_DOCKERFILE_CHECK=skip=all \ 41 | --file 'Dockerfile' \ 42 | 'https://github.com/docker-library/docker.git#6d541d27b5dd12639e5a33a675ebca04d3837d74:24/cli' 43 | mkdir temp 44 | tar -xvf temp.tar -C temp 45 | rm temp.tar 46 | jq ' 47 | .manifests |= ( 48 | unique_by([ .digest, .size, .mediaType ]) 49 | | if length != 1 then 50 | error("unexpected number of manifests: \(length)") 51 | else . end 52 | ) 53 | ' temp/index.json > temp/index.json.new 54 | mv temp/index.json.new temp/index.json 55 | # 56 | # 57 | crane push temp 'oisupport/staging-amd64:4b199ac326c74b3058a147e14f553af9e8e1659abc29bd3e82c9c9807b66ee43' 58 | rm -rf temp 59 | # 60 | 61 | # docker:24.0.7-windowsservercore-ltsc2022 [windows-amd64] 62 | # 63 | docker pull 'mcr.microsoft.com/windows/servercore@sha256:d4ab2dd7d3d0fce6edc5df459565a4c96bbb1d0148065b215ab5ddcab1e42eb4' 64 | docker tag 'mcr.microsoft.com/windows/servercore@sha256:d4ab2dd7d3d0fce6edc5df459565a4c96bbb1d0148065b215ab5ddcab1e42eb4' 'mcr.microsoft.com/windows/servercore:ltsc2022' 65 | # 66 | # 67 | SOURCE_DATE_EPOCH=1700741054 \ 68 | DOCKER_BUILDKIT=0 \ 69 | docker build \ 70 | --tag 'docker:24.0.7-windowsservercore-ltsc2022' \ 71 | --tag 'docker:24.0-windowsservercore-ltsc2022' \ 72 | --tag 'docker:24-windowsservercore-ltsc2022' \ 73 | --tag 'docker:windowsservercore-ltsc2022' \ 74 | --tag 'docker:24.0.7-windowsservercore' \ 75 | --tag 'docker:24.0-windowsservercore' \ 76 | --tag 'docker:24-windowsservercore' \ 77 | --tag 'docker:windowsservercore' \ 78 | --tag 'winamd64/docker:24.0.7-windowsservercore-ltsc2022' \ 79 | --tag 'winamd64/docker:24.0-windowsservercore-ltsc2022' \ 80 | --tag 'winamd64/docker:24-windowsservercore-ltsc2022' \ 81 | --tag 'winamd64/docker:windowsservercore-ltsc2022' \ 82 | --tag 'winamd64/docker:24.0.7-windowsservercore' \ 83 | --tag 'winamd64/docker:24.0-windowsservercore' \ 84 | --tag 'winamd64/docker:24-windowsservercore' \ 85 | --tag 'winamd64/docker:windowsservercore' \ 86 | --tag 'oisupport/staging-windows-amd64:9b405cfa5b88ba65121aabdb95ae90fd2e1fee7582174de82ae861613ae3072e' \ 87 | --platform 'windows/amd64' \ 88 | --file 'Dockerfile' \ 89 | 'https://github.com/docker-library/docker.git#6d541d27b5dd12639e5a33a675ebca04d3837d74:24/windows/windowsservercore-ltsc2022' 90 | # 91 | # 92 | docker push 'oisupport/staging-windows-amd64:9b405cfa5b88ba65121aabdb95ae90fd2e1fee7582174de82ae861613ae3072e' 93 | # 94 | 95 | # busybox:1.36.1 [amd64] 96 | # 97 | 98 | # 99 | # 100 | build='{"buildId":"191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f","build":{"img":"oisupport/staging-amd64:191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f","resolved":{"schemaVersion":2,"mediaType":"application/vnd.oci.image.index.v1+json","manifests":[{"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:4be429a5fbb2e71ae7958bfa558bc637cf3a61baf40a708cb8fff532b39e52d0","size":610,"annotations":{"com.docker.official-images.bashbrew.arch":"amd64","org.opencontainers.image.base.name":"scratch","org.opencontainers.image.created":"2024-02-28T00:44:18Z","org.opencontainers.image.ref.name":"oisupport/staging-amd64:191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f@sha256:4be429a5fbb2e71ae7958bfa558bc637cf3a61baf40a708cb8fff532b39e52d0","org.opencontainers.image.revision":"d0b7d566eb4f1fa9933984e6fc04ab11f08f4592","org.opencontainers.image.source":"https://github.com/docker-library/busybox.git","org.opencontainers.image.url":"https://hub.docker.com/_/busybox","org.opencontainers.image.version":"1.36.1-glibc"},"platform":{"architecture":"amd64","os":"linux"}}],"annotations":{"org.opencontainers.image.ref.name":"oisupport/staging-amd64:191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f@sha256:70a227928672dffb7d24880bad1a705b527fab650f7503c191e48a209c4a0d10"}},"sourceId":"df39fa95e66c7e19e56af0f9dfb8b79b15a0422a9b44eb0f16274d3f1f8939a2","arch":"amd64","parents":{},"resolvedParents":{}},"source":{"sourceId":"df39fa95e66c7e19e56af0f9dfb8b79b15a0422a9b44eb0f16274d3f1f8939a2","reproducibleGitChecksum":"17e76ce3a5b47357c5724738db231ed2477c94d43df69ce34ae0871c99f7de78","entries":[{"GitRepo":"https://github.com/docker-library/busybox.git","GitFetch":"refs/heads/dist-amd64","GitCommit":"d0b7d566eb4f1fa9933984e6fc04ab11f08f4592","Directory":"latest/glibc/amd64","File":"index.json","Builder":"oci-import","SOURCE_DATE_EPOCH":1709081058}],"arches":{"amd64":{"tags":["busybox:1.36.1","busybox:1.36","busybox:1","busybox:stable","busybox:latest","busybox:1.36.1-glibc","busybox:1.36-glibc","busybox:1-glibc","busybox:stable-glibc","busybox:glibc"],"archTags":["amd64/busybox:1.36.1","amd64/busybox:1.36","amd64/busybox:1","amd64/busybox:stable","amd64/busybox:latest","amd64/busybox:1.36.1-glibc","amd64/busybox:1.36-glibc","amd64/busybox:1-glibc","amd64/busybox:stable-glibc","amd64/busybox:glibc"],"froms":["scratch"],"lastStageFrom":"scratch","platformString":"linux/amd64","platform":{"architecture":"amd64","os":"linux"},"parents":{"scratch":{"sourceId":null,"pin":null}}}}}}' 101 | "$BASHBREW_META_SCRIPTS/helpers/oci-import.sh" <<<"$build" temp 102 | # SBOM 103 | mv temp temp.orig 104 | "$BASHBREW_META_SCRIPTS/helpers/oci-sbom.sh" <<<"$build" temp.orig temp 105 | rm -rf temp.orig 106 | # 107 | # 108 | crane push temp 'oisupport/staging-amd64:191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f' 109 | rm -rf temp 110 | # 111 | -------------------------------------------------------------------------------- /.test/meta-commands/test.jq: -------------------------------------------------------------------------------- 1 | include "meta"; 2 | [ 3 | first(.[] | select(normalized_builder == "buildkit")), 4 | first(.[] | select(normalized_builder == "classic")), 5 | first(.[] | select(normalized_builder == "oci-import")), 6 | empty 7 | ] 8 | | map( 9 | . as $b 10 | | commands 11 | | to_entries 12 | | map("# <\(.key)>\n\(.value)\n# ") 13 | | "# \($b.source.arches[$b.build.arch].tags[0]) [\($b.build.arch)]\n" + join("\n") 14 | ) 15 | | join("\n\n") 16 | -------------------------------------------------------------------------------- /.test/meta-queue/in.jq: -------------------------------------------------------------------------------- 1 | [ 2 | # add new test cases here 3 | # each item will be used for each architecture generated 4 | # [ ".build.resloved", "count", "skips" ] 5 | [ null, 1, 0 ], # buildable, tried once 6 | [ null, 23, 0 ], # buildable, tried many but less than skip threshold 7 | [ null, 24, 0 ], # buildable, tried many, just on skip threshold 8 | [ null, 25, 23 ], # buildable, final skip 9 | [ null, 25, 24 ], # buildable, no longer skipped 10 | [ {}, 3, 0 ], # build "complete" (not queued or skipped) 11 | empty # trailing comma 12 | ] 13 | | map( 14 | ("amd64", "arm32v7") as $arch 15 | | ([ $arch, .[] | tostring ] | join("-")) as $buildId 16 | | { 17 | # give our inputs cuter names 18 | resolved: .[0], 19 | count: .[1], 20 | skips: .[2], 21 | } 22 | | [ 23 | { 24 | count, 25 | skips, 26 | }, 27 | { 28 | $buildId, 29 | build: { 30 | $arch, 31 | resolved, 32 | }, 33 | "source": { 34 | "arches": { 35 | ($arch): { 36 | "tags": ["fake:\($buildId)"] 37 | }, 38 | }, 39 | }, 40 | }, 41 | empty # trailing comma 42 | ] 43 | | map({ ($buildId): . }) 44 | ) 45 | | transpose 46 | | map(add) 47 | | { pastJobs: .[0], builds: .[1] } 48 | -------------------------------------------------------------------------------- /.test/meta-queue/in.json: -------------------------------------------------------------------------------- 1 | { 2 | "pastJobs": { 3 | "amd64-null-1-0": { 4 | "count": 1, 5 | "skips": 0 6 | }, 7 | "arm32v7-null-1-0": { 8 | "count": 1, 9 | "skips": 0 10 | }, 11 | "amd64-null-23-0": { 12 | "count": 23, 13 | "skips": 0 14 | }, 15 | "arm32v7-null-23-0": { 16 | "count": 23, 17 | "skips": 0 18 | }, 19 | "amd64-null-24-0": { 20 | "count": 24, 21 | "skips": 0 22 | }, 23 | "arm32v7-null-24-0": { 24 | "count": 24, 25 | "skips": 0 26 | }, 27 | "amd64-null-25-23": { 28 | "count": 25, 29 | "skips": 23 30 | }, 31 | "arm32v7-null-25-23": { 32 | "count": 25, 33 | "skips": 23 34 | }, 35 | "amd64-null-25-24": { 36 | "count": 25, 37 | "skips": 24 38 | }, 39 | "arm32v7-null-25-24": { 40 | "count": 25, 41 | "skips": 24 42 | }, 43 | "amd64-{}-3-0": { 44 | "count": 3, 45 | "skips": 0 46 | }, 47 | "arm32v7-{}-3-0": { 48 | "count": 3, 49 | "skips": 0 50 | } 51 | }, 52 | "builds": { 53 | "amd64-null-1-0": { 54 | "buildId": "amd64-null-1-0", 55 | "build": { 56 | "arch": "amd64", 57 | "resolved": null 58 | }, 59 | "source": { 60 | "arches": { 61 | "amd64": { 62 | "tags": [ 63 | "fake:amd64-null-1-0" 64 | ] 65 | } 66 | } 67 | } 68 | }, 69 | "arm32v7-null-1-0": { 70 | "buildId": "arm32v7-null-1-0", 71 | "build": { 72 | "arch": "arm32v7", 73 | "resolved": null 74 | }, 75 | "source": { 76 | "arches": { 77 | "arm32v7": { 78 | "tags": [ 79 | "fake:arm32v7-null-1-0" 80 | ] 81 | } 82 | } 83 | } 84 | }, 85 | "amd64-null-23-0": { 86 | "buildId": "amd64-null-23-0", 87 | "build": { 88 | "arch": "amd64", 89 | "resolved": null 90 | }, 91 | "source": { 92 | "arches": { 93 | "amd64": { 94 | "tags": [ 95 | "fake:amd64-null-23-0" 96 | ] 97 | } 98 | } 99 | } 100 | }, 101 | "arm32v7-null-23-0": { 102 | "buildId": "arm32v7-null-23-0", 103 | "build": { 104 | "arch": "arm32v7", 105 | "resolved": null 106 | }, 107 | "source": { 108 | "arches": { 109 | "arm32v7": { 110 | "tags": [ 111 | "fake:arm32v7-null-23-0" 112 | ] 113 | } 114 | } 115 | } 116 | }, 117 | "amd64-null-24-0": { 118 | "buildId": "amd64-null-24-0", 119 | "build": { 120 | "arch": "amd64", 121 | "resolved": null 122 | }, 123 | "source": { 124 | "arches": { 125 | "amd64": { 126 | "tags": [ 127 | "fake:amd64-null-24-0" 128 | ] 129 | } 130 | } 131 | } 132 | }, 133 | "arm32v7-null-24-0": { 134 | "buildId": "arm32v7-null-24-0", 135 | "build": { 136 | "arch": "arm32v7", 137 | "resolved": null 138 | }, 139 | "source": { 140 | "arches": { 141 | "arm32v7": { 142 | "tags": [ 143 | "fake:arm32v7-null-24-0" 144 | ] 145 | } 146 | } 147 | } 148 | }, 149 | "amd64-null-25-23": { 150 | "buildId": "amd64-null-25-23", 151 | "build": { 152 | "arch": "amd64", 153 | "resolved": null 154 | }, 155 | "source": { 156 | "arches": { 157 | "amd64": { 158 | "tags": [ 159 | "fake:amd64-null-25-23" 160 | ] 161 | } 162 | } 163 | } 164 | }, 165 | "arm32v7-null-25-23": { 166 | "buildId": "arm32v7-null-25-23", 167 | "build": { 168 | "arch": "arm32v7", 169 | "resolved": null 170 | }, 171 | "source": { 172 | "arches": { 173 | "arm32v7": { 174 | "tags": [ 175 | "fake:arm32v7-null-25-23" 176 | ] 177 | } 178 | } 179 | } 180 | }, 181 | "amd64-null-25-24": { 182 | "buildId": "amd64-null-25-24", 183 | "build": { 184 | "arch": "amd64", 185 | "resolved": null 186 | }, 187 | "source": { 188 | "arches": { 189 | "amd64": { 190 | "tags": [ 191 | "fake:amd64-null-25-24" 192 | ] 193 | } 194 | } 195 | } 196 | }, 197 | "arm32v7-null-25-24": { 198 | "buildId": "arm32v7-null-25-24", 199 | "build": { 200 | "arch": "arm32v7", 201 | "resolved": null 202 | }, 203 | "source": { 204 | "arches": { 205 | "arm32v7": { 206 | "tags": [ 207 | "fake:arm32v7-null-25-24" 208 | ] 209 | } 210 | } 211 | } 212 | }, 213 | "amd64-{}-3-0": { 214 | "buildId": "amd64-{}-3-0", 215 | "build": { 216 | "arch": "amd64", 217 | "resolved": {} 218 | }, 219 | "source": { 220 | "arches": { 221 | "amd64": { 222 | "tags": [ 223 | "fake:amd64-{}-3-0" 224 | ] 225 | } 226 | } 227 | } 228 | }, 229 | "arm32v7-{}-3-0": { 230 | "buildId": "arm32v7-{}-3-0", 231 | "build": { 232 | "arch": "arm32v7", 233 | "resolved": {} 234 | }, 235 | "source": { 236 | "arches": { 237 | "arm32v7": { 238 | "tags": [ 239 | "fake:arm32v7-{}-3-0" 240 | ] 241 | } 242 | } 243 | } 244 | } 245 | } 246 | } 247 | -------------------------------------------------------------------------------- /.test/meta-queue/out.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "buildId": "arm32v7-null-1-0", 4 | "build": { 5 | "arch": "arm32v7", 6 | "resolved": null 7 | }, 8 | "source": { 9 | "arches": { 10 | "arm32v7": { 11 | "tags": [ 12 | "fake:arm32v7-null-1-0" 13 | ] 14 | } 15 | } 16 | }, 17 | "identifier": "fake:arm32v7-null-1-0" 18 | }, 19 | { 20 | "buildId": "arm32v7-null-23-0", 21 | "build": { 22 | "arch": "arm32v7", 23 | "resolved": null 24 | }, 25 | "source": { 26 | "arches": { 27 | "arm32v7": { 28 | "tags": [ 29 | "fake:arm32v7-null-23-0" 30 | ] 31 | } 32 | } 33 | }, 34 | "identifier": "fake:arm32v7-null-23-0" 35 | }, 36 | { 37 | "buildId": "arm32v7-null-25-24", 38 | "build": { 39 | "arch": "arm32v7", 40 | "resolved": null 41 | }, 42 | "source": { 43 | "arches": { 44 | "arm32v7": { 45 | "tags": [ 46 | "fake:arm32v7-null-25-24" 47 | ] 48 | } 49 | } 50 | }, 51 | "identifier": "fake:arm32v7-null-25-24" 52 | } 53 | ] 54 | { 55 | "arm32v7-null-1-0": { 56 | "count": 2, 57 | "skips": 0, 58 | "identifier": "fake:arm32v7-null-1-0" 59 | }, 60 | "arm32v7-null-23-0": { 61 | "count": 24, 62 | "skips": 0, 63 | "identifier": "fake:arm32v7-null-23-0" 64 | }, 65 | "arm32v7-null-24-0": { 66 | "count": 24, 67 | "skips": 1, 68 | "identifier": "fake:arm32v7-null-24-0" 69 | }, 70 | "arm32v7-null-25-23": { 71 | "count": 25, 72 | "skips": 24, 73 | "identifier": "fake:arm32v7-null-25-23" 74 | }, 75 | "arm32v7-null-25-24": { 76 | "count": 26, 77 | "skips": 0, 78 | "identifier": "fake:arm32v7-null-25-24" 79 | } 80 | } 81 | 2 82 | -------------------------------------------------------------------------------- /.test/meta-queue/test.jq: -------------------------------------------------------------------------------- 1 | include "jenkins"; 2 | .pastJobs as $pastJobs 3 | | .builds 4 | | get_arch_queue("arm32v7") as $rawQueue 5 | | $rawQueue | jobs_record($pastJobs) as $newJobs 6 | | $rawQueue | filter_skips_queue($newJobs) as $filteredQueue 7 | | ( 8 | ($rawQueue | length) - ($filteredQueue | length) 9 | ) as $skippedCount 10 | # queue, skips/builds record, number of skipped items 11 | | $filteredQueue, $newJobs, $skippedCount 12 | -------------------------------------------------------------------------------- /.test/oci-import/in.json: -------------------------------------------------------------------------------- 1 | ../builds.json -------------------------------------------------------------------------------- /.test/oci-import/out.sh: -------------------------------------------------------------------------------- 1 | build='{"buildId":"191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f","build":{"img":"oisupport/staging-amd64:191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f","resolved":{"schemaVersion":2,"mediaType":"application/vnd.oci.image.index.v1+json","manifests":[{"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:4be429a5fbb2e71ae7958bfa558bc637cf3a61baf40a708cb8fff532b39e52d0","size":610,"annotations":{"com.docker.official-images.bashbrew.arch":"amd64","org.opencontainers.image.base.name":"scratch","org.opencontainers.image.created":"2024-02-28T00:44:18Z","org.opencontainers.image.ref.name":"oisupport/staging-amd64:191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f@sha256:4be429a5fbb2e71ae7958bfa558bc637cf3a61baf40a708cb8fff532b39e52d0","org.opencontainers.image.revision":"d0b7d566eb4f1fa9933984e6fc04ab11f08f4592","org.opencontainers.image.source":"https://github.com/docker-library/busybox.git","org.opencontainers.image.url":"https://hub.docker.com/_/busybox","org.opencontainers.image.version":"1.36.1-glibc"},"platform":{"architecture":"amd64","os":"linux"}}],"annotations":{"org.opencontainers.image.ref.name":"oisupport/staging-amd64:191402ad0feacf03daf9d52a492207e73ef08b0bd17265043aea13aa27e2bb3f@sha256:70a227928672dffb7d24880bad1a705b527fab650f7503c191e48a209c4a0d10"}},"sourceId":"df39fa95e66c7e19e56af0f9dfb8b79b15a0422a9b44eb0f16274d3f1f8939a2","arch":"amd64","parents":{},"resolvedParents":{}},"source":{"sourceId":"df39fa95e66c7e19e56af0f9dfb8b79b15a0422a9b44eb0f16274d3f1f8939a2","reproducibleGitChecksum":"17e76ce3a5b47357c5724738db231ed2477c94d43df69ce34ae0871c99f7de78","entries":[{"GitRepo":"https://github.com/docker-library/busybox.git","GitFetch":"refs/heads/dist-amd64","GitCommit":"d0b7d566eb4f1fa9933984e6fc04ab11f08f4592","Directory":"latest/glibc/amd64","File":"index.json","Builder":"oci-import","SOURCE_DATE_EPOCH":1709081058}],"arches":{"amd64":{"tags":["busybox:1.36.1","busybox:1.36","busybox:1","busybox:stable","busybox:latest","busybox:1.36.1-glibc","busybox:1.36-glibc","busybox:1-glibc","busybox:stable-glibc","busybox:glibc"],"archTags":["amd64/busybox:1.36.1","amd64/busybox:1.36","amd64/busybox:1","amd64/busybox:stable","amd64/busybox:latest","amd64/busybox:1.36.1-glibc","amd64/busybox:1.36-glibc","amd64/busybox:1-glibc","amd64/busybox:stable-glibc","amd64/busybox:glibc"],"froms":["scratch"],"lastStageFrom":"scratch","platformString":"linux/amd64","platform":{"architecture":"amd64","os":"linux"},"parents":{"scratch":{"sourceId":null,"pin":null}}}}}}' 2 | "$BASHBREW_META_SCRIPTS/helpers/oci-import.sh" <<<"$build" temp 3 | -------------------------------------------------------------------------------- /.test/oci-import/temp/blobs/sha256/166d2948c01a6ec70e44b073b0a4c56a3d7c4a4b8fd390d9ebfcb16a3ecf658e: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": 2, 3 | "mediaType": "application/vnd.oci.image.index.v1+json", 4 | "manifests": [ 5 | { 6 | "mediaType": "application/vnd.oci.image.manifest.v1+json", 7 | "digest": "sha256:4be429a5fbb2e71ae7958bfa558bc637cf3a61baf40a708cb8fff532b39e52d0", 8 | "size": 610, 9 | "platform": { 10 | "os": "linux", 11 | "architecture": "amd64" 12 | }, 13 | "annotations": { 14 | "com.docker.official-images.bashbrew.arch": "amd64", 15 | "org.opencontainers.image.base.name": "scratch", 16 | "org.opencontainers.image.created": "2024-02-28T00:44:18Z", 17 | "org.opencontainers.image.revision": "d0b7d566eb4f1fa9933984e6fc04ab11f08f4592", 18 | "org.opencontainers.image.source": "https://github.com/docker-library/busybox.git", 19 | "org.opencontainers.image.url": "https://hub.docker.com/_/busybox", 20 | "org.opencontainers.image.version": "1.36.1" 21 | } 22 | } 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /.test/oci-import/temp/blobs/sha256/4be429a5fbb2e71ae7958bfa558bc637cf3a61baf40a708cb8fff532b39e52d0: -------------------------------------------------------------------------------- 1 | ../../image-manifest.json -------------------------------------------------------------------------------- /.test/oci-import/temp/blobs/sha256/7b2699543f22d5b8dc8d66a5873eb246767bca37232dee1e7a3b8c9956bceb0c: -------------------------------------------------------------------------------- 1 | ../../rootfs.tar.gz -------------------------------------------------------------------------------- /.test/oci-import/temp/blobs/sha256/ba5dc23f65d4cc4a4535bce55cf9e63b068eb02946e3422d3587e8ce803b6aab: -------------------------------------------------------------------------------- 1 | ../../image-config.json -------------------------------------------------------------------------------- /.test/oci-import/temp/image-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "config": { 3 | "Cmd": [ 4 | "sh" 5 | ] 6 | }, 7 | "created": "2023-05-18T22:34:17Z", 8 | "history": [ 9 | { 10 | "created": "2023-05-18T22:34:17Z", 11 | "created_by": "BusyBox 1.36.1 (glibc), Debian 12" 12 | } 13 | ], 14 | "rootfs": { 15 | "type": "layers", 16 | "diff_ids": [ 17 | "sha256:95c4a60383f7b6eb6f7b8e153a07cd6e896de0476763bef39d0f6cf3400624bd" 18 | ] 19 | }, 20 | "architecture": "amd64", 21 | "os": "linux" 22 | } 23 | -------------------------------------------------------------------------------- /.test/oci-import/temp/image-manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": 2, 3 | "mediaType": "application/vnd.oci.image.manifest.v1+json", 4 | "config": { 5 | "mediaType": "application/vnd.oci.image.config.v1+json", 6 | "digest": "sha256:ba5dc23f65d4cc4a4535bce55cf9e63b068eb02946e3422d3587e8ce803b6aab", 7 | "size": 372 8 | }, 9 | "layers": [ 10 | { 11 | "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", 12 | "digest": "sha256:7b2699543f22d5b8dc8d66a5873eb246767bca37232dee1e7a3b8c9956bceb0c", 13 | "size": 2152262 14 | } 15 | ], 16 | "annotations": { 17 | "org.opencontainers.image.url": "https://github.com/docker-library/busybox", 18 | "org.opencontainers.image.version": "1.36.1-glibc" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /.test/oci-import/temp/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "schemaVersion": 2, 3 | "mediaType": "application/vnd.oci.image.index.v1+json", 4 | "manifests": [ 5 | { 6 | "mediaType": "application/vnd.oci.image.index.v1+json", 7 | "digest": "sha256:166d2948c01a6ec70e44b073b0a4c56a3d7c4a4b8fd390d9ebfcb16a3ecf658e", 8 | "size": 838 9 | } 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /.test/oci-import/temp/oci-layout: -------------------------------------------------------------------------------- 1 | {"imageLayoutVersion":"1.0.0"} 2 | -------------------------------------------------------------------------------- /.test/oci-import/test.jq: -------------------------------------------------------------------------------- 1 | include "meta"; 2 | 3 | first(.[] | select(normalized_builder == "oci-import")) 4 | 5 | | build_command 6 | 7 | # TODO find a better way to stop the SBOM bits from being included here 8 | | sub("(?s)\n+# SBOM.*"; "") 9 | -------------------------------------------------------------------------------- /.test/oci-import/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | dir="$(dirname "$BASH_SOURCE")" 5 | 6 | set -x 7 | 8 | cd "$dir" 9 | 10 | export BASHBREW_META_SCRIPTS=../.. 11 | 12 | rm -rf temp 13 | source out.sh 14 | 15 | # TODO this should be part of "oci-import.sh" 16 | "$BASHBREW_META_SCRIPTS/helpers/oci-validate.sh" temp 17 | 18 | # make sure we don't commit the rootfs tarballs 19 | find temp -type f -size '+1k' -print -delete 20 | # TODO rely on .gitignore instead so that when the test finishes, we have a valid + complete OCI layout locally (that we can test push code against, for example)? 21 | -------------------------------------------------------------------------------- /.test/oci-sort-manifests/test.jq: -------------------------------------------------------------------------------- 1 | include "oci"; 2 | 3 | map(normalize_descriptor) 4 | | sort_manifests 5 | -------------------------------------------------------------------------------- /.test/oci-sort-platforms/test.jq: -------------------------------------------------------------------------------- 1 | include "oci"; 2 | 3 | [ 4 | { 5 | os: "linux", 6 | architecture: ( 7 | "386", 8 | "amd64", 9 | "arm", 10 | "arm64", 11 | "mips64le", 12 | "ppc64le", 13 | "riscv64", 14 | "s390x", 15 | empty 16 | ), 17 | }, 18 | 19 | { 20 | os: "windows", 21 | architecture: ( "amd64", "arm64" ), 22 | "os.version": ( 23 | # https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/base-image-lifecycle 24 | # https://oci.dag.dev/?repo=mcr.microsoft.com/windows/servercore 25 | # https://oci.dag.dev/?image=hell/win:core 26 | "10.0.14393.6796", 27 | "10.0.16299.1087", 28 | "10.0.17134.1305", 29 | "10.0.17763.5576", 30 | "10.0.18362.1256", 31 | "10.0.18363.1556", 32 | "10.0.19041.1415", 33 | "10.0.19042.1889", 34 | "10.0.20348.2340", 35 | empty 36 | ) 37 | }, 38 | 39 | { 40 | os: "freebsd", 41 | architecture: ( "amd64", "arm64" ), 42 | "os.version": ( "12.1", "13.1" ), 43 | }, 44 | 45 | # buildkit attestations 46 | # https://github.com/moby/buildkit/blob/c6145c2423de48f891862ac02f9b2653864d3c9e/docs/attestations/attestation-storage.md#attestation-manifest-descriptor 47 | { 48 | architecture: "unknown", 49 | os: "unknown", 50 | }, 51 | 52 | empty 53 | ] 54 | 55 | # explode out variant matricies 56 | | map( 57 | { 58 | # https://github.com/opencontainers/image-spec/pull/1172 59 | amd64: [ "v1", "v2", "v3", "v4" ], 60 | arm64: [ "v8", "v9", "v8.0", "v9.0", "v8.1", "v9.5" ], 61 | arm: [ "v5", "v6", "v7", "v8" ], 62 | riscv64: [ "rva20u64", "rva22u64" ], 63 | ppc64le: [ "power8", "power9", "power10" ], 64 | }[.architecture] as $variants 65 | | ., if $variants then 66 | . + { variant: $variants[] } 67 | else empty end 68 | ) 69 | 70 | | map(normalize_platform) 71 | | unique 72 | | sort_by(sort_split_platform) 73 | -------------------------------------------------------------------------------- /.test/provenance/in.json: -------------------------------------------------------------------------------- 1 | ../builds.json -------------------------------------------------------------------------------- /.test/provenance/out.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "_type": "https://in-toto.io/Statement/v1", 4 | "subject": [ 5 | { 6 | "name": "pkg:docker/docker:24.0.7-cli?platform=linux%2Famd64", 7 | "digest": { 8 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 9 | } 10 | }, 11 | { 12 | "name": "pkg:docker/docker:24.0-cli?platform=linux%2Famd64", 13 | "digest": { 14 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 15 | } 16 | }, 17 | { 18 | "name": "pkg:docker/docker:24-cli?platform=linux%2Famd64", 19 | "digest": { 20 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 21 | } 22 | }, 23 | { 24 | "name": "pkg:docker/docker:cli?platform=linux%2Famd64", 25 | "digest": { 26 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 27 | } 28 | }, 29 | { 30 | "name": "pkg:docker/docker:24.0.7-cli-alpine3.18?platform=linux%2Famd64", 31 | "digest": { 32 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 33 | } 34 | }, 35 | { 36 | "name": "pkg:docker/amd64/docker:24.0.7-cli?platform=linux%2Famd64", 37 | "digest": { 38 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 39 | } 40 | }, 41 | { 42 | "name": "pkg:docker/amd64/docker:24.0-cli?platform=linux%2Famd64", 43 | "digest": { 44 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 45 | } 46 | }, 47 | { 48 | "name": "pkg:docker/amd64/docker:24-cli?platform=linux%2Famd64", 49 | "digest": { 50 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 51 | } 52 | }, 53 | { 54 | "name": "pkg:docker/amd64/docker:cli?platform=linux%2Famd64", 55 | "digest": { 56 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 57 | } 58 | }, 59 | { 60 | "name": "pkg:docker/amd64/docker:24.0.7-cli-alpine3.18?platform=linux%2Famd64", 61 | "digest": { 62 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 63 | } 64 | }, 65 | { 66 | "name": "pkg:docker/oisupport/staging-amd64:4b199ac326c74b3058a147e14f553af9e8e1659abc29bd3e82c9c9807b66ee43?platform=linux%2Famd64", 67 | "digest": { 68 | "sha256": "153793dfbac130679ad1eebd9e88b3772c47d3903a3f299c49d5c3f23a6e35d2" 69 | } 70 | } 71 | ], 72 | "predicateType": "https://slsa.dev/provenance/v1", 73 | "predicate": { 74 | "buildDefinition": { 75 | "buildType": "https://actions.github.io/buildtypes/workflow/v1", 76 | "externalParameters": { 77 | "workflow": { 78 | "ref": "refs/heads/main", 79 | "repository": "https://github.com/docker-library/meta", 80 | "path": ".github/workflows/build.yml", 81 | "digest": { 82 | "gitCommit": "0123456789abcdef0123456789abcdef01234567" 83 | } 84 | }, 85 | "inputs": { 86 | "buildId": "4b199ac326c74b3058a147e14f553af9e8e1659abc29bd3e82c9c9807b66ee43", 87 | "bashbrewArch": "amd64", 88 | "firstTag": "docker:24.0.7-cli" 89 | } 90 | }, 91 | "internalParameters": { 92 | "github": { 93 | "event_name": "workflow_dispatch", 94 | "repository_id": "1234", 95 | "repository_owner_id": "5678", 96 | "runner_environment": "github-hosted" 97 | } 98 | }, 99 | "resolvedDependencies": [ 100 | { 101 | "uri": "git+https://github.com/docker-library/meta@refs/heads/main", 102 | "digest": { 103 | "gitCommit": "0123456789abcdef0123456789abcdef01234567" 104 | } 105 | } 106 | ] 107 | }, 108 | "runDetails": { 109 | "builder": { 110 | "id": "https://github.com/docker-library/meta/.github/workflows/build.yml@refs/heads/main" 111 | }, 112 | "metadata": { 113 | "invocationId": "https://github.com/docker-library/meta/actions/runs/9001/attempts/2" 114 | } 115 | } 116 | } 117 | }, 118 | { 119 | "_type": "https://in-toto.io/Statement/v1", 120 | "subject": [ 121 | { 122 | "name": "pkg:docker/docker:24.0.7-windowsservercore-ltsc2022?platform=windows%2Famd64", 123 | "digest": { 124 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 125 | } 126 | }, 127 | { 128 | "name": "pkg:docker/docker:24.0-windowsservercore-ltsc2022?platform=windows%2Famd64", 129 | "digest": { 130 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 131 | } 132 | }, 133 | { 134 | "name": "pkg:docker/docker:24-windowsservercore-ltsc2022?platform=windows%2Famd64", 135 | "digest": { 136 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 137 | } 138 | }, 139 | { 140 | "name": "pkg:docker/docker:windowsservercore-ltsc2022?platform=windows%2Famd64", 141 | "digest": { 142 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 143 | } 144 | }, 145 | { 146 | "name": "pkg:docker/docker:24.0.7-windowsservercore?platform=windows%2Famd64", 147 | "digest": { 148 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 149 | } 150 | }, 151 | { 152 | "name": "pkg:docker/docker:24.0-windowsservercore?platform=windows%2Famd64", 153 | "digest": { 154 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 155 | } 156 | }, 157 | { 158 | "name": "pkg:docker/docker:24-windowsservercore?platform=windows%2Famd64", 159 | "digest": { 160 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 161 | } 162 | }, 163 | { 164 | "name": "pkg:docker/docker:windowsservercore?platform=windows%2Famd64", 165 | "digest": { 166 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 167 | } 168 | }, 169 | { 170 | "name": "pkg:docker/winamd64/docker:24.0.7-windowsservercore-ltsc2022?platform=windows%2Famd64", 171 | "digest": { 172 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 173 | } 174 | }, 175 | { 176 | "name": "pkg:docker/winamd64/docker:24.0-windowsservercore-ltsc2022?platform=windows%2Famd64", 177 | "digest": { 178 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 179 | } 180 | }, 181 | { 182 | "name": "pkg:docker/winamd64/docker:24-windowsservercore-ltsc2022?platform=windows%2Famd64", 183 | "digest": { 184 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 185 | } 186 | }, 187 | { 188 | "name": "pkg:docker/winamd64/docker:windowsservercore-ltsc2022?platform=windows%2Famd64", 189 | "digest": { 190 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 191 | } 192 | }, 193 | { 194 | "name": "pkg:docker/winamd64/docker:24.0.7-windowsservercore?platform=windows%2Famd64", 195 | "digest": { 196 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 197 | } 198 | }, 199 | { 200 | "name": "pkg:docker/winamd64/docker:24.0-windowsservercore?platform=windows%2Famd64", 201 | "digest": { 202 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 203 | } 204 | }, 205 | { 206 | "name": "pkg:docker/winamd64/docker:24-windowsservercore?platform=windows%2Famd64", 207 | "digest": { 208 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 209 | } 210 | }, 211 | { 212 | "name": "pkg:docker/winamd64/docker:windowsservercore?platform=windows%2Famd64", 213 | "digest": { 214 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 215 | } 216 | }, 217 | { 218 | "name": "pkg:docker/oisupport/staging-windows-amd64:9b405cfa5b88ba65121aabdb95ae90fd2e1fee7582174de82ae861613ae3072e?platform=windows%2Famd64", 219 | "digest": { 220 | "sha256": "69aba7120e3f4014bfa80f4eae2cfc9698dcb6b8a5d64daf06de4039a19846ce" 221 | } 222 | } 223 | ], 224 | "predicateType": "https://slsa.dev/provenance/v1", 225 | "predicate": { 226 | "buildDefinition": { 227 | "buildType": "https://actions.github.io/buildtypes/workflow/v1", 228 | "externalParameters": { 229 | "workflow": { 230 | "ref": "refs/heads/main", 231 | "repository": "https://github.com/docker-library/meta", 232 | "path": ".github/workflows/build.yml", 233 | "digest": { 234 | "gitCommit": "0123456789abcdef0123456789abcdef01234567" 235 | } 236 | }, 237 | "inputs": { 238 | "buildId": "9b405cfa5b88ba65121aabdb95ae90fd2e1fee7582174de82ae861613ae3072e", 239 | "bashbrewArch": "windows-amd64", 240 | "firstTag": "docker:24.0.7-windowsservercore-ltsc2022", 241 | "windowsVersion": "2022" 242 | } 243 | }, 244 | "internalParameters": { 245 | "github": { 246 | "event_name": "workflow_dispatch", 247 | "repository_id": "1234", 248 | "repository_owner_id": "5678", 249 | "runner_environment": "github-hosted" 250 | } 251 | }, 252 | "resolvedDependencies": [ 253 | { 254 | "uri": "git+https://github.com/docker-library/meta@refs/heads/main", 255 | "digest": { 256 | "gitCommit": "0123456789abcdef0123456789abcdef01234567" 257 | } 258 | } 259 | ] 260 | }, 261 | "runDetails": { 262 | "builder": { 263 | "id": "https://github.com/docker-library/meta/.github/workflows/build.yml@refs/heads/main" 264 | }, 265 | "metadata": { 266 | "invocationId": "https://github.com/docker-library/meta/actions/runs/9001/attempts/2" 267 | } 268 | } 269 | } 270 | } 271 | ] 272 | -------------------------------------------------------------------------------- /.test/provenance/test.jq: -------------------------------------------------------------------------------- 1 | include "provenance"; 2 | include "jenkins"; 3 | 4 | [ 5 | first(.[] | select(.build.arch == "amd64" and .build.resolved)), 6 | first(.[] | select(.build.arch == "windows-amd64" and .build.resolved)), 7 | empty # trailing comma 8 | 9 | | (.build.resolved.annotations["org.opencontainers.image.ref.name"] | split("@")[1]) as $digest 10 | 11 | # some faked GitHub event data so we can ~test the provenance generation 12 | | gha_payload as $payload 13 | | { 14 | event: $payload, 15 | event_name: "workflow_dispatch", 16 | ref: "refs/heads/\($payload.ref)", 17 | repository: "docker-library/meta", 18 | repository_id: "1234", 19 | repository_owner_id: "5678", 20 | run_attempt: "2", 21 | run_id: "9001", 22 | server_url: "https://github.com", 23 | sha: "0123456789abcdef0123456789abcdef01234567", 24 | workflow_ref: "docker-library/meta/.github/workflows/build.yml@refs/heads/\($payload.ref)", 25 | workflow_sha: "0123456789abcdef0123456789abcdef01234567", 26 | } as $github 27 | | { 28 | environment: "github-hosted", 29 | } as $runner 30 | 31 | | github_actions_provenance($github; $runner; $digest) 32 | ] 33 | -------------------------------------------------------------------------------- /Jenkinsfile.build: -------------------------------------------------------------------------------- 1 | // any number of jobs per arch that build the specified buildId (triggered by the respective trigger job) 2 | properties([ 3 | // limited by one job per buildId so that the same build cannot run concurrently 4 | throttleJobProperty( 5 | limitOneJobWithMatchingParams: true, 6 | paramsToUseForLimit: 'buildId', 7 | throttleEnabled: true, 8 | throttleOption: 'project', 9 | ), 10 | disableResume(), 11 | durabilityHint('PERFORMANCE_OPTIMIZED'), 12 | parameters([ 13 | string(name: 'buildId', trim: true), 14 | string(name: 'identifier', trim: true, description: '(optional) used to set currentBuild.displayName to a meaningful value earlier'), 15 | ]), 16 | ]) 17 | 18 | env.BASHBREW_ARCH = env.JOB_NAME.minus('/build').split('/')[-1] // "windows-amd64", "arm64v8", etc 19 | env.BUILD_ID = params.buildId 20 | if (params.identifier) { 21 | currentBuild.displayName = params.identifier + ' (#' + currentBuild.number + ')' 22 | } 23 | 24 | node('multiarch-' + env.BASHBREW_ARCH) { ansiColor('xterm') { 25 | stage('Checkout') { 26 | checkout(scmGit( 27 | userRemoteConfigs: [[ 28 | url: 'https://github.com/docker-library/meta.git', 29 | name: 'origin', 30 | ]], 31 | branches: [[name: '*/main']], 32 | extensions: [ 33 | cloneOption( 34 | noTags: true, 35 | shallow: true, 36 | depth: 1, 37 | ), 38 | submodule( 39 | parentCredentials: true, 40 | recursiveSubmodules: true, 41 | trackingSubmodules: true, 42 | ), 43 | cleanBeforeCheckout(), 44 | cleanAfterCheckout(), 45 | [$class: 'RelativeTargetDirectory', relativeTargetDir: 'meta'], 46 | ], 47 | )) 48 | } 49 | 50 | env.BASHBREW_META_SCRIPTS = env.WORKSPACE + '/meta/.scripts' 51 | 52 | dir('.bin') { 53 | deleteDir() 54 | 55 | stage('Crane') { 56 | sh '''#!/usr/bin/env bash 57 | set -Eeuo pipefail -x 58 | 59 | ext='' 60 | if [ "$BASHBREW_ARCH" = 'windows-amd64' ]; then 61 | ext='.exe' 62 | fi 63 | 64 | # https://doi-janky.infosiftr.net/job/wip/job/crane 65 | # ipv6 can be extremely slow on s390x so set a timeout and have wget try the other DNS addresses instead 66 | wget --timeout=5 -O "crane$ext" "https://doi-janky.infosiftr.net/job/wip/job/crane/lastSuccessfulBuild/artifact/crane-$BASHBREW_ARCH$ext" --progress=dot:giga 67 | # TODO checksum verification ("checksums.txt") 68 | chmod +x "crane$ext" 69 | "./crane$ext" version 70 | ''' 71 | if (env.BASHBREW_ARCH == 'windows-amd64') { 72 | env.PATH = "${workspace}/.bin;${env.PATH}" 73 | } else { 74 | env.PATH = "${workspace}/.bin:${env.PATH}" 75 | } 76 | } 77 | } 78 | 79 | dir('meta') { 80 | def obj = '' 81 | stage('JSON') { 82 | obj = sh(returnStdout: true, script: ''' 83 | [ -n "$BUILD_ID" ] 84 | shell="$( 85 | jq -L"$BASHBREW_META_SCRIPTS" -r ' 86 | include "meta"; 87 | .[env.BUILD_ID] 88 | | select(needs_build and .build.arch == env.BASHBREW_ARCH) # sanity check 89 | | .commands = commands 90 | | @sh "if ! crane digest \\(.build.img) >&2; then printf %s \\(tojson); exit 0; fi" 91 | ' builds.json 92 | )" 93 | eval "$shell" 94 | ''').trim() 95 | } 96 | if (obj) { 97 | obj = readJSON(text: obj) 98 | currentBuild.displayName = obj.source.arches[obj.build.arch].tags[0] + ' (#' + currentBuild.number + ')' 99 | currentBuild.description = '' + obj.build.img + '' 100 | } else { 101 | currentBuild.displayName = 'nothing to do (#' + currentBuild.number + ')' 102 | return 103 | } 104 | 105 | timeout(time: 3, unit: 'HOURS') { 106 | /* 107 | // TODO this is currently already done on the worker machines themselves, which is a tradeoff 108 | // make sure "docker login" is localized to this workspace 109 | env.DOCKER_CONFIG = workspace + '/.docker' 110 | dir(env.DOCKER_CONFIG) { deleteDir() } 111 | 112 | withCredentials([usernamePassword( 113 | credentialsId: 'docker-hub-' + env.BASHBREW_ARCH, // TODO windows? 114 | usernameVariable: 'DOCKER_USERNAME', 115 | passwordVariable: 'DOCKER_PASSWORD', 116 | )]) { 117 | sh '''#!/usr/bin/env bash 118 | set -Eeuo pipefail 119 | docker login --username "$DOCKER_USERNAME" --password-stdin <<<"$DOCKER_PASSWORD" 120 | ''' 121 | } 122 | */ 123 | 124 | def buildEnvs = [] 125 | stage('Prep') { 126 | def json = sh(returnStdout: true, script: '''#!/usr/bin/env bash 127 | set -Eeuo pipefail -x 128 | 129 | .doi/.bin/bashbrew-buildkit-env-setup.sh \\ 130 | | jq 'to_entries | map(.key + "=" + .value)' 131 | ''').trim() 132 | if (json) { 133 | buildEnvs += readJSON(text: json) 134 | } 135 | } 136 | 137 | withEnv(buildEnvs) { 138 | dir('build') { 139 | deleteDir() 140 | 141 | stage('Pull') { 142 | sh """#!/usr/bin/env bash 143 | set -Eeuo pipefail -x 144 | 145 | ${ obj.commands.pull } 146 | """ 147 | } 148 | 149 | stage('Build') { 150 | sh """#!/usr/bin/env bash 151 | set -Eeuo pipefail -x 152 | 153 | ${ obj.commands.build } 154 | """ 155 | } 156 | 157 | stage('Push') { 158 | sh """#!/usr/bin/env bash 159 | set -Eeuo pipefail -x 160 | 161 | ${ obj.commands.push } 162 | """ 163 | } 164 | } 165 | } 166 | } 167 | } 168 | } } 169 | -------------------------------------------------------------------------------- /Jenkinsfile.deploy: -------------------------------------------------------------------------------- 1 | // one job per arch (for now) that copies built images to the arch-specific namespaces 2 | properties([ 3 | disableConcurrentBuilds(), 4 | disableResume(), 5 | durabilityHint('PERFORMANCE_OPTIMIZED'), 6 | pipelineTriggers([ 7 | githubPush(), 8 | cron('@daily'), // check periodically, just in case 9 | ]), 10 | ]) 11 | 12 | env.BASHBREW_ARCH = env.JOB_NAME.minus('/deploy').split('/')[-1] // "windows-amd64", "arm64v8", etc 13 | 14 | node('put-shared') { ansiColor('xterm') { 15 | stage('Checkout') { 16 | checkout(scmGit( 17 | userRemoteConfigs: [[ 18 | url: 'https://github.com/docker-library/meta.git', 19 | name: 'origin', 20 | ]], 21 | branches: [[name: '*/main']], 22 | extensions: [ 23 | cloneOption( 24 | noTags: true, 25 | shallow: true, 26 | depth: 1, 27 | ), 28 | submodule( 29 | parentCredentials: true, 30 | recursiveSubmodules: true, 31 | trackingSubmodules: true, 32 | ), 33 | cleanBeforeCheckout(), 34 | cleanAfterCheckout(), 35 | [$class: 'RelativeTargetDirectory', relativeTargetDir: 'meta'], 36 | ], 37 | )) 38 | } 39 | 40 | // make sure "docker login" is localized to this workspace 41 | env.DOCKER_CONFIG = workspace + '/.docker' 42 | dir(env.DOCKER_CONFIG) { deleteDir() } 43 | 44 | stage('Login') { 45 | withCredentials([ 46 | usernamePassword( 47 | credentialsId: 'docker-hub-' + env.BASHBREW_ARCH, 48 | usernameVariable: 'DOCKER_USERNAME', 49 | passwordVariable: 'DOCKER_PASSWORD', 50 | ), 51 | ]) { 52 | sh '''#!/usr/bin/env bash 53 | set -Eeuo pipefail # no -x 54 | docker login --username "$DOCKER_USERNAME" --password-stdin <<<"$DOCKER_PASSWORD" 55 | ''' 56 | } 57 | } 58 | 59 | dir('meta') { 60 | stage('Generate') { 61 | sh '''#!/usr/bin/env bash 62 | set -Eeuo pipefail -x 63 | 64 | jq -L.scripts ' 65 | include "deploy"; 66 | arch_tagged_manifests(env.BASHBREW_ARCH) 67 | | deploy_objects[] 68 | ' builds.json > deploy.json 69 | ''' 70 | } 71 | stage('Filter') { 72 | // using the previous successful deploy.json, filter the current deploy.json with items already pushed last time 73 | sh ''' 74 | wget --timeout=5 -qO past-deploy.json "$JOB_URL/lastSuccessfulBuild/artifact/deploy.json" 75 | # swap to this touch instead of the wget above to (re)bootstrap 76 | #touch past-deploy.json 77 | jq --slurpfile past ./past-deploy.json 'select( IN($past[]) | not )' ./deploy.json > filtered-deploy.json 78 | ''' 79 | } 80 | stage('Archive') { 81 | archiveArtifacts( 82 | artifacts: [ 83 | 'deploy.json', 84 | 'filtered-deploy.json', 85 | ].join(','), 86 | fingerprint: true, 87 | ) 88 | } 89 | 90 | withCredentials([ 91 | string(credentialsId: 'dockerhub-public-proxy', variable: 'DOCKERHUB_PUBLIC_PROXY'), 92 | ]) { 93 | stage('Deploy') { 94 | sh '''#!/usr/bin/env bash 95 | set -Eeuo pipefail -x 96 | 97 | ( 98 | cd .scripts 99 | # TODO make a helper to build binaries correctly/consistently 🙃 100 | if ./.any-go-nt.sh bin/deploy; then 101 | ./.go-env.sh go build -trimpath -o bin/deploy ./cmd/deploy 102 | fi 103 | ) 104 | .scripts/bin/deploy --parallel < filtered-deploy.json 105 | ''' 106 | } 107 | } 108 | } 109 | 110 | // "docker logout" 111 | dir(env.DOCKER_CONFIG) { deleteDir() } 112 | } } 113 | -------------------------------------------------------------------------------- /Jenkinsfile.meta: -------------------------------------------------------------------------------- 1 | properties([ 2 | disableConcurrentBuilds(), 3 | disableResume(), 4 | durabilityHint('PERFORMANCE_OPTIMIZED'), 5 | pipelineTriggers([ 6 | githubPush(), 7 | cron('@hourly'), // check periodically to bring in new image builds 8 | ]), 9 | ]) 10 | 11 | node { 12 | stage('Checkout') { 13 | // prevent meta from triggering itself 14 | // If 'Include in polling' is enabled or 'Include in changelog' is enabled, then when polling occurs, the job will be started if changes are detected from this SCM source. 15 | checkout(changelog: false, poll: false, scm: scmGit( 16 | userRemoteConfigs: [[ 17 | url: 'git@github.com:docker-library/meta.git', 18 | credentialsId: 'docker-library-bot', 19 | name: 'origin', 20 | ]], 21 | branches: [[name: '*/main']], 22 | extensions: [ 23 | cloneOption( 24 | noTags: true, 25 | shallow: true, 26 | depth: 1, 27 | ), 28 | submodule( 29 | parentCredentials: true, 30 | recursiveSubmodules: true, 31 | trackingSubmodules: true, 32 | ), 33 | cleanBeforeCheckout(), 34 | cleanAfterCheckout(), 35 | [$class: 'RelativeTargetDirectory', relativeTargetDir: 'meta'], 36 | ], 37 | )) 38 | checkout(scmGit( 39 | userRemoteConfigs: [[ 40 | url: 'https://github.com/docker-library/official-images.git', 41 | name: 'origin', 42 | ]], 43 | branches: [[name: '*/master']], 44 | extensions: [ 45 | cleanBeforeCheckout(), 46 | cleanAfterCheckout(), 47 | [$class: 'RelativeTargetDirectory', relativeTargetDir: 'meta/.doi'], 48 | ], 49 | )) 50 | checkout(scmGit( 51 | userRemoteConfigs: [[ 52 | url: 'https://github.com/docker-library/meta-scripts.git', 53 | name: 'origin', 54 | ]], 55 | branches: [[name: '*/main']], 56 | extensions: [ 57 | cleanBeforeCheckout(), 58 | cleanAfterCheckout(), 59 | [$class: 'RelativeTargetDirectory', relativeTargetDir: 'meta/.scripts'], 60 | ], 61 | )) 62 | sh ''' 63 | git -C meta config user.name 'Docker Library Bot' 64 | git -C meta config user.email 'doi+docker-library-bot@docker.com' 65 | ''' 66 | } 67 | 68 | env.BASHBREW_LIBRARY = workspace + '/meta/.doi/library' 69 | 70 | dir('meta') { 71 | withCredentials([ 72 | // thanks to rate limits, we either have to "docker login" or look things up via our proxy 73 | string(credentialsId: 'dockerhub-public-proxy', variable: 'DOCKERHUB_PUBLIC_PROXY'), 74 | ]) { 75 | stage('Fetch') { 76 | sh 'bashbrew --library .doi/library fetch --all' 77 | } 78 | stage('Sources') { 79 | sh ''' 80 | # we only need to regenerate "sources.json" if ".doi" or ".scripts" have changed since we last generated it 81 | 82 | needsBuild= 83 | if [ ! -s commits.json ] || [ ! -s sources.json ]; then 84 | needsBuild=1 85 | fi 86 | 87 | doi="$(git -C .doi log -1 --format='format:%H')" 88 | scripts="$(git -C .scripts log -1 --format='format:%H')" 89 | export doi scripts 90 | jq -n '{ doi: env.doi, scripts: env.scripts }' | tee commits.json 91 | if [ -z "$needsBuild" ] && ! git diff --exit-code commits.json; then 92 | needsBuild=1 93 | fi 94 | 95 | if [ -n "$needsBuild" ]; then 96 | # use previous run as cache 97 | [ -s sources.json ] && cp sources.json sources-copy.json 98 | 99 | .scripts/sources.sh --cache-file sources-copy.json --all > sources.json 100 | 101 | # clean up temporary cache 102 | rm -f sources-copy.json 103 | fi 104 | ''' 105 | } 106 | stage('Builds') { 107 | sh '.scripts/builds.sh --cache cache-builds.json sources.json > builds.json' 108 | } 109 | } 110 | stage('Janky') { 111 | // ideally, the other jobs that act on the data generated by this one would directly reference each Jenkinsfile.* from *within* the ".scripts" submodule that this job has updated (so that we only run updated scripts with updated data, in the case of something major changing for example) 112 | // Jenkins *does* technically support this, but it requires disabling "lightweight checkout", and doing the full checkout of "meta" (even just making sure it's up-to-date) *just* to grab a single Jenkinsfile from the .scripts submodule is really heavy and kicks over our Jenkins server 113 | // to mitigate this, we "copy up" the Jenkinsfiles directly into "meta" so that we can go back to a "lightweight" checkout 114 | sh ''' 115 | rm -rf .jenkins 116 | mkdir .jenkins 117 | echo 'Jenkinsfile* linguist-language=groovy' > .jenkins/.gitattributes 118 | cp -av .scripts/Jenkinsfile* .jenkins/ 119 | ''' 120 | } 121 | stage('Commit') { 122 | sh ''' 123 | git add -A . 124 | if ! git diff --staged --exit-code; then # commit fails if there's nothing to commit 125 | git commit -m 'Update and regenerate' 126 | fi 127 | ''' 128 | } 129 | sshagent(['docker-library-bot']) { 130 | stage('Push') { 131 | sh 'git push origin HEAD:main' 132 | } 133 | } 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /Jenkinsfile.trigger: -------------------------------------------------------------------------------- 1 | // one job per arch (for now) that triggers builds for all unbuilt images 2 | properties([ 3 | disableConcurrentBuilds(), 4 | disableResume(), 5 | durabilityHint('PERFORMANCE_OPTIMIZED'), 6 | pipelineTriggers([ 7 | githubPush(), 8 | cron('@hourly'), // run hourly whether we "need" it or not 9 | ]), 10 | ]) 11 | 12 | env.BASHBREW_ARCH = env.JOB_NAME.minus('/trigger').split('/')[-1] // "windows-amd64", "arm64v8", etc 13 | 14 | def queue = [] 15 | def breakEarly = false // thanks Jenkins... 16 | 17 | // string filled with all images needing build and whether they were skipped this time for recording after queue completion 18 | // { buildId: { "count": 1, skip: 0, ... }, ... } 19 | def currentJobsJson = '' 20 | 21 | node { 22 | stage('Checkout') { 23 | checkout(scmGit( 24 | userRemoteConfigs: [[ 25 | url: 'https://github.com/docker-library/meta.git', 26 | name: 'origin', 27 | ]], 28 | branches: [[name: '*/main']], 29 | extensions: [ 30 | cloneOption( 31 | noTags: true, 32 | shallow: true, 33 | depth: 1, 34 | ), 35 | submodule( 36 | parentCredentials: true, 37 | recursiveSubmodules: true, 38 | trackingSubmodules: true, 39 | ), 40 | cleanBeforeCheckout(), 41 | cleanAfterCheckout(), 42 | [$class: 'RelativeTargetDirectory', relativeTargetDir: 'meta'], 43 | ], 44 | )) 45 | } 46 | 47 | dir('meta') { 48 | stage('Queue') { 49 | // using pastJobsJson, sort the needs_build queue so that previously attempted builds always live at the bottom of the queue 50 | // list of builds that have been failing and will be skipped this trigger 51 | def queueAndFailsJson = sh(returnStdout: true, script: ''' 52 | if \\ 53 | ! wget --timeout=5 -qO past-jobs.json "$JOB_URL/lastSuccessfulBuild/artifact/past-jobs.json" \\ 54 | || ! jq 'empty' past-jobs.json \\ 55 | ; then 56 | echo '{}' > past-jobs.json 57 | fi 58 | jq -c -L.scripts --slurpfile pastJobs past-jobs.json ' 59 | include "jenkins"; 60 | get_arch_queue as $rawQueue 61 | | $rawQueue | jobs_record($pastJobs[0]) as $newJobs 62 | | $rawQueue | filter_skips_queue($newJobs) as $filteredQueue 63 | | ( 64 | ($rawQueue | length) - ($filteredQueue | length) 65 | ) as $skippedCount 66 | # queue, skips/builds record, number of skipped items 67 | | $filteredQueue, $newJobs, $skippedCount 68 | ' builds.json 69 | ''').tokenize('\r\n') 70 | 71 | def queueJson = queueAndFailsJson[0] 72 | currentJobsJson = queueAndFailsJson[1] 73 | def skips = queueAndFailsJson[2].toInteger() 74 | //echo(queueJson) 75 | 76 | def jobName = '' 77 | if (queueJson && queueJson != '[]') { 78 | queue = readJSON(text: queueJson) 79 | jobName += 'queue: ' + queue.size() 80 | } else { 81 | jobName += 'queue: 0' 82 | breakEarly = true 83 | } 84 | if (skips > 0) { 85 | jobName += ' skip: ' + skips 86 | if (breakEarly) { 87 | // if we're skipping some builds but the effective queue is empty, we want to set the job as "unstable" instead of successful (so we know there's still *something* that needs to build but it isn't being built right now) 88 | currentBuild.result = 'UNSTABLE' 89 | } 90 | // queue to build might be empty, be we still need to record these skipped builds 91 | breakEarly = false 92 | } 93 | currentBuild.displayName = jobName + ' (#' + currentBuild.number + ')' 94 | } 95 | } 96 | } 97 | 98 | // with an empty queue and nothing to skip we can end early 99 | if (breakEarly) { return } // thanks Jenkins... 100 | 101 | // new data to be added to the past-jobs.json 102 | // { lastTime: unixTimestamp, url: "" } 103 | buildCompletionData = [:] 104 | 105 | // list of closures that we can use to wait for the jobs on. 106 | def waitQueue = [:] 107 | def waitQueueClosure(identifier, buildId, externalizableId) { 108 | return { 109 | stage(identifier) { 110 | // "catchError" to set "stageResult" :( 111 | catchError(message: 'Build of "' + identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') { 112 | def res = waitForBuild( 113 | runId: externalizableId, 114 | propagateAbort: true, // allow cancelling this job to cancel all the triggered jobs 115 | ) 116 | buildCompletionData[buildId] = [ 117 | lastTime: (res.startTimeInMillis + res.duration) / 1000, // convert to seconds 118 | url: res.absoluteUrl, 119 | ] 120 | if (res.result != 'SUCCESS') { 121 | // set stage result via catchError 122 | error(res.result) 123 | } 124 | } 125 | } 126 | } 127 | } 128 | 129 | // stage to wrap up all the build job triggers that get waited on later 130 | stage('trigger') { 131 | for (buildObj in queue) { 132 | if (buildObj.gha_payload) { 133 | stage(buildObj.identifier) { 134 | // "catchError" to set "stageResult" :( 135 | catchError(message: 'Build of "' + buildObj.identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') { 136 | node { 137 | withEnv([ 138 | 'payload=' + buildObj.gha_payload, 139 | ]) { 140 | withCredentials([ 141 | string( 142 | variable: 'GH_TOKEN', 143 | credentialsId: 'github-access-token-docker-library-bot-meta', 144 | ), 145 | ]) { 146 | sh ''' 147 | set -u +x 148 | 149 | # https://docs.github.com/en/free-pro-team@latest/rest/actions/workflows?apiVersion=2022-11-28#create-a-workflow-dispatch-event 150 | curl -fL \ 151 | -X POST \ 152 | -H 'Accept: application/vnd.github+json' \ 153 | -H "Authorization: Bearer $GH_TOKEN" \ 154 | -H 'X-GitHub-Api-Version: 2022-11-28' \ 155 | https://api.github.com/repos/docker-library/meta/actions/workflows/build.yml/dispatches \ 156 | -d "$payload" 157 | ''' 158 | } 159 | } 160 | // record that GHA was triggered (for tracking continued triggers that fail to push an image) 161 | buildCompletionData[buildObj.buildId] = [ 162 | lastTime: System.currentTimeMillis() / 1000, // convert to seconds 163 | url: currentBuild.absoluteUrl, 164 | ] 165 | } 166 | } 167 | } 168 | } else { 169 | // "catchError" to set "stageResult" :( 170 | catchError(message: 'Build of "' + buildObj.identifier + '" failed', buildResult: 'UNSTABLE', stageResult: 'FAILURE') { 171 | 172 | // why not parallel these build() invocations? 173 | // jenkins parallel closures get started in a randomish order, ruining our sorted queue 174 | def res = build( 175 | job: 'build', 176 | parameters: [ 177 | string(name: 'buildId', value: buildObj.buildId), 178 | string(name: 'identifier', value: buildObj.identifier), 179 | ], 180 | propagate: false, 181 | // trigger these quickly so they all get added to Jenkins queue in "queue" order (also using "waitForStart" means we have to wait for the entire "quietPeriod" before we get to move on and schedule more) 182 | quietPeriod: 1, // seconds 183 | // we'll wait on the builds in parallel after they are all queued (so our sorted order is the queue order) 184 | waitForStart: true, 185 | ) 186 | waitQueue[buildObj.identifier] = waitQueueClosure(buildObj.identifier, buildObj.buildId, res.externalizableId) 187 | } 188 | } 189 | } 190 | } 191 | 192 | // wait on all the 'build' jobs that were queued 193 | if (waitQueue.size() > 0) { 194 | parallel waitQueue 195 | } 196 | 197 | // save currentJobs so we can use it next run as pastJobs 198 | node { 199 | def buildCompletionDataJson = writeJSON(json: buildCompletionData, returnText: true) 200 | withEnv([ 201 | 'buildCompletionDataJson=' + buildCompletionDataJson, 202 | 'currentJobsJson=' + currentJobsJson, 203 | ]) { 204 | stage('Archive') { 205 | dir('builds') { 206 | deleteDir() 207 | sh '''#!/usr/bin/env bash 208 | set -Eeuo pipefail -x 209 | 210 | jq <<<"$currentJobsJson" ' 211 | # save firstTime if it is not set yet 212 | map_values(.firstTime //= .lastTime) 213 | # merge the two objects recursively, preferring data from "buildCompletionDataJson" 214 | | . * ( env.buildCompletionDataJson | fromjson ) 215 | ' | tee past-jobs.json 216 | ''' 217 | archiveArtifacts( 218 | artifacts: '*.json', 219 | fingerprint: true, 220 | onlyIfSuccessful: true, 221 | ) 222 | } 223 | } 224 | } 225 | } 226 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2023 Docker, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /bin/.gitignore: -------------------------------------------------------------------------------- 1 | ** 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /builds.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | # TODO drop this from the defaults and set it explicitly in DOI instead (to prevent accidents) 5 | : "${BASHBREW_STAGING_TEMPLATE:=oisupport/staging-ARCH:BUILD}" 6 | export BASHBREW_STAGING_TEMPLATE 7 | 8 | # put the binary in the directory of a symlink of "builds.sh" (used for testing coverage; see GOCOVERDIR below) 9 | dir="$(dirname "$BASH_SOURCE")" 10 | dir="$(readlink -ve "$dir")" 11 | bin="$dir/bin/builds" 12 | 13 | # but run the script/build from the directory of the *actual* "builds.sh" script 14 | dir="$(readlink -ve "$BASH_SOURCE")" 15 | dir="$(dirname "$dir")" 16 | 17 | if ( cd "$dir" && ./.any-go-nt.sh "$bin" ); then 18 | { 19 | echo "building '$bin'" 20 | "$dir/.go-env.sh" go build ${GOCOVERDIR:+-coverpkg=./...} -v -trimpath -o "$bin" ./cmd/builds 21 | ls -l "$bin" 22 | } >&2 23 | fi 24 | [ -x "$bin" ] 25 | 26 | "$bin" "$@" | jq . 27 | -------------------------------------------------------------------------------- /cmd/deploy/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "os" 8 | "os/exec" 9 | "os/signal" 10 | "sync" 11 | 12 | "github.com/docker-library/meta-scripts/registry" 13 | 14 | ocispec "github.com/opencontainers/image-spec/specs-go/v1" 15 | ) 16 | 17 | func main() { 18 | ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) 19 | defer stop() 20 | 21 | var ( 22 | args = os.Args[1:] 23 | 24 | // --dry-run 25 | dryRun bool 26 | 27 | // --parallel 28 | parallel bool 29 | ) 30 | for len(args) > 0 { 31 | arg := args[0] 32 | args = args[1:] 33 | 34 | switch arg { 35 | case "--dry-run": 36 | dryRun = true 37 | 38 | case "--parallel": 39 | parallel = true 40 | 41 | default: 42 | panic("unknown argument: " + arg) 43 | } 44 | } 45 | 46 | // TODO the best we can do on whether or not this actually updated tags is "yes, definitely (we had to copy some children)" and "maybe (we didn't have to copy any children)", but we should maybe still output those so we can trigger put-shared based on them (~immediately on "definitely" and with some medium delay on "maybe") 47 | 48 | // see "input.go" and "inputRaw" for details on the expected JSON input format 49 | 50 | // we pass through "jq" to pretty-print any JSON-form data fields with sane whitespace 51 | jq := exec.Command("jq", "del(.data), .data") 52 | jq.Stdin = os.Stdin 53 | jq.Stderr = os.Stderr 54 | 55 | stdout, err := jq.StdoutPipe() 56 | if err != nil { 57 | panic(err) 58 | } 59 | if err := jq.Start(); err != nil { 60 | panic(err) 61 | } 62 | 63 | // a set of RWMutex objects for synchronizing the pushing of "child" objects before their parents later in the list of documents 64 | // for every RWMutex, it will be *write*-locked during push, and *read*-locked during reading (which means we won't limit the parallelization of multiple parents after a given child is pushed, but we will stop parents from being pushed before their children) 65 | childMutexes := sync.Map{} 66 | wg := sync.WaitGroup{} 67 | 68 | var dryRunOuts chan chan []byte 69 | if dryRun { 70 | // we want to allow parallel, but want the output to be in-order so we resynchronize output with a channel of channels (technically this also limits parallelization, but hopefully this limit is generous enough that it doesn't matter) 71 | dryRunOuts = make(chan chan []byte, 100000) 72 | 73 | // we also have to start consuming that channel immediately, just in case we *do* hit that parallelization limit 🙈 74 | wg.Add(1) 75 | go func() { 76 | defer wg.Done() 77 | 78 | for dryRunOut := range dryRunOuts { 79 | j, ok := <-dryRunOut 80 | if !ok { 81 | // (I think) this means we didn't output anything, so this should be all our "skips" 82 | continue 83 | } 84 | fmt.Printf("%s\n", j) 85 | } 86 | }() 87 | } 88 | 89 | dec := json.NewDecoder(stdout) 90 | for dec.More() { 91 | var raw inputRaw 92 | if err := dec.Decode(&raw); err != nil { 93 | panic(err) 94 | } 95 | if err := dec.Decode(&raw.Data); err != nil { 96 | panic(err) 97 | } 98 | 99 | normal, err := NormalizeInput(raw) 100 | if err != nil { 101 | panic(err) 102 | } 103 | refsDigest := normal.Refs[0].Digest 104 | 105 | var logSuffix string = " (" + string(raw.Type) + ") " 106 | if normal.CopyFrom != nil { 107 | // normal copy (one repo/registry to another) 108 | logSuffix = " 🤝" + logSuffix + normal.CopyFrom.String() 109 | // "localhost:32774/test 🤝 (manifest) tianon/test@sha256:4077658bc7e39f02f81d1682fe49f66b3db2c420813e43f5db0c53046167c12f" 110 | } else { 111 | // push (raw/embedded blob or manifest data) 112 | logSuffix = " 🦾" + logSuffix + string(refsDigest) 113 | // "localhost:32774/test 🦾 (blob) sha256:1a51828d59323e0e02522c45652b6a7a44a032b464b06d574f067d2358b0e9f1" 114 | } 115 | startedPrefix := "❔ " 116 | successPrefix := "✅ " 117 | failurePrefix := "❌ " 118 | 119 | // locks are per-digest, but refs might be 20 tags on the same digest, so we need to get one write lock per repo@digest and release it when the first tag completes, and every other tag needs a read lock 120 | seenRefs := map[string]bool{} 121 | 122 | for _, ref := range normal.Refs { 123 | ref := ref // https://github.com/golang/go/issues/60078 124 | 125 | necessaryReadLockRefs := []registry.Reference{} 126 | 127 | // before parallelization, collect the pushing "child" mutex we need to lock for writing right away (but only for the first entry) 128 | var mutex *sync.RWMutex 129 | if ref.Digest != "" { 130 | lockRef := ref 131 | lockRef.Tag = "" 132 | lockRefStr := lockRef.String() 133 | if seenRefs[lockRefStr] { 134 | // if we've already seen this specific ref for this input, we need a read lock, not a write lock (since they're per-repo@digest) 135 | necessaryReadLockRefs = append(necessaryReadLockRefs, lockRef) 136 | } else { 137 | seenRefs[lockRefStr] = true 138 | lock, _ := childMutexes.LoadOrStore(lockRefStr, &sync.RWMutex{}) 139 | mutex = lock.(*sync.RWMutex) 140 | // if we have a "child" mutex, lock it immediately so we don't create a race between inputs 141 | mutex.Lock() // (this gets unlocked in the goroutine below) 142 | // this is sane to lock here because interdependent inputs are required to be in-order (children first), so if this hangs it's 100% a bug in the input order 143 | } 144 | } 145 | 146 | // make a (deep) copy of "normal" so that we can use it in a goroutine ("normal.do" is not safe for concurrent invocation) 147 | normal := normal.clone() 148 | 149 | var dryRunOut chan []byte 150 | if dryRun { 151 | dryRunOut = make(chan []byte, 1) 152 | dryRunOuts <- dryRunOut 153 | } 154 | 155 | wg.Add(1) 156 | // (making a function instead of direct "go func() ..." so we can support the --parallel toggle) 157 | f := func() { 158 | defer wg.Done() 159 | 160 | if mutex != nil { 161 | defer mutex.Unlock() 162 | } 163 | 164 | if dryRun { 165 | defer close(dryRunOut) 166 | } 167 | 168 | // before we start this job (parallelized), if it's a raw data job we need to parse the raw data and see if any of the "children" are objects we're still in the process of pushing (from a previously parallel job) 169 | if len(normal.Data) > 2 { // needs to at least be bigger than "{}" for us to care (anything else either doesn't have data or can't have children) 170 | // explicitly ignoring errors because this might not actually be JSON (or even a manifest at all!); this is best-effort 171 | // TODO optimize this by checking whether normal.Data matches "^\s*{.+}\s*$" first so we have some assurance it might work before we go further? 172 | manifestChildren, _ := registry.ParseManifestChildren(normal.Data) 173 | childDescs := []ocispec.Descriptor{} 174 | childDescs = append(childDescs, manifestChildren.Manifests...) 175 | if manifestChildren.Config != nil { 176 | childDescs = append(childDescs, *manifestChildren.Config) 177 | } 178 | childDescs = append(childDescs, manifestChildren.Layers...) 179 | for _, childDesc := range childDescs { 180 | childRef := ref 181 | childRef.Digest = childDesc.Digest 182 | necessaryReadLockRefs = append(necessaryReadLockRefs, childRef) 183 | 184 | // these read locks are cheap, so let's be aggressive with our "lookup" refs too 185 | if lookupRef, ok := normal.Lookup[childDesc.Digest]; ok { 186 | lookupRef.Digest = childDesc.Digest 187 | necessaryReadLockRefs = append(necessaryReadLockRefs, lookupRef) 188 | } 189 | if fallbackRef, ok := normal.Lookup[""]; ok { 190 | fallbackRef.Digest = childDesc.Digest 191 | necessaryReadLockRefs = append(necessaryReadLockRefs, fallbackRef) 192 | } 193 | } 194 | } 195 | // we don't *know* that all the lookup references are children, but if any of them have an explicit digest, let's treat them as potential children too (which is fair, because they *are* explicit potential references that it's sane to make sure exist) 196 | for digest, lookupRef := range normal.Lookup { 197 | necessaryReadLockRefs = append(necessaryReadLockRefs, lookupRef) 198 | if digest != lookupRef.Digest { 199 | lookupRef.Digest = digest 200 | necessaryReadLockRefs = append(necessaryReadLockRefs, lookupRef) 201 | } 202 | } 203 | // if we're going to do a copy, we need to *also* include the artifact we're copying in our list 204 | if normal.CopyFrom != nil { 205 | necessaryReadLockRefs = append(necessaryReadLockRefs, *normal.CopyFrom) 206 | } 207 | // ok, we've built up a list, let's start grabbing (ro) mutexes 208 | seenChildren := map[string]bool{} 209 | for _, lockRef := range necessaryReadLockRefs { 210 | lockRef.Tag = "" 211 | if lockRef.Digest == "" { 212 | continue 213 | } 214 | lockRefStr := lockRef.String() 215 | if seenChildren[lockRefStr] { 216 | continue 217 | } 218 | seenChildren[lockRefStr] = true 219 | lock, _ := childMutexes.LoadOrStore(lockRefStr, &sync.RWMutex{}) 220 | lock.(*sync.RWMutex).RLock() 221 | defer lock.(*sync.RWMutex).RUnlock() 222 | } 223 | 224 | logText := ref.StringWithKnownDigest(refsDigest) + logSuffix 225 | fmt.Fprintln(os.Stderr, startedPrefix+logText) 226 | 227 | if dryRun { 228 | needsDeploy, err := normal.dryRun(ctx, ref) 229 | if err != nil { 230 | fmt.Fprintf(os.Stderr, "%s -- ERROR: %v\n", failurePrefix+ref.String()+logSuffix, err) 231 | panic(err) // TODO exit in a more clean way (we can't use "os.Exit" because that causes *more* errors 😭) 232 | } 233 | if needsDeploy { 234 | normal.Refs = []registry.Reference{ref} 235 | j, err := json.MarshalIndent(normal, "", "\t") 236 | if err != nil { 237 | fmt.Fprintf(os.Stderr, "%s -- JSON ERROR: %v\n", failurePrefix+ref.String()+logSuffix, err) 238 | panic(err) // TODO exit in a more clean way (we can't use "os.Exit" because that causes *more* errors 😭) 239 | } 240 | dryRunOut <- j 241 | 242 | // https://github.com/docker-library/meta-scripts/pull/119#discussion_r1978375608 -- "failure" here because we would've pushed, but the configuration (--dry-run) blocks us from doing so 243 | fmt.Fprintln(os.Stderr, failurePrefix+logText) 244 | } else { 245 | fmt.Fprintln(os.Stderr, successPrefix+logText) 246 | } 247 | } else { 248 | desc, err := normal.do(ctx, ref) 249 | if err != nil { 250 | fmt.Fprintf(os.Stderr, "%s%s -- ERROR: %v\n", failurePrefix, logText, err) 251 | panic(err) // TODO exit in a more clean way (we can't use "os.Exit" because that causes *more* errors 😭) 252 | } 253 | if ref.Digest == "" && refsDigest == "" { 254 | logText += "@" + string(desc.Digest) 255 | } 256 | 257 | fmt.Fprintln(os.Stderr, successPrefix+logText) 258 | } 259 | } 260 | if parallel { 261 | go f() 262 | } else { 263 | f() 264 | } 265 | } 266 | } 267 | 268 | if dryRun { 269 | close(dryRunOuts) 270 | } 271 | 272 | wg.Wait() 273 | } 274 | -------------------------------------------------------------------------------- /cmd/lookup/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // a simple utility for debugging "registry.SynthesizeIndex" (similar to / the next evolution of "bashbrew remote arches --json") 4 | 5 | import ( 6 | "context" 7 | "encoding/json" 8 | "io" 9 | "os" 10 | "os/signal" 11 | "sync" 12 | 13 | "github.com/docker-library/meta-scripts/registry" 14 | ) 15 | 16 | func main() { 17 | ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) 18 | defer stop() 19 | 20 | var ( 21 | zeroOpts registry.LookupOptions 22 | opts = zeroOpts 23 | ) 24 | 25 | args := os.Args[1:] 26 | 27 | var ( 28 | parallel = false 29 | wg sync.WaitGroup 30 | ) 31 | if len(args) > 0 && args[0] == "--parallel" { 32 | args = args[1:] 33 | parallel = true 34 | } 35 | 36 | for len(args) > 0 { 37 | img := args[0] 38 | args = args[1:] 39 | switch img { 40 | case "--type": 41 | opts.Type = registry.LookupType(args[0]) 42 | args = args[1:] 43 | continue 44 | case "--head": 45 | opts.Head = true 46 | continue 47 | } 48 | 49 | do := func(opts registry.LookupOptions) { 50 | ref, err := registry.ParseRef(img) 51 | if err != nil { 52 | panic(err) 53 | } 54 | 55 | var obj any 56 | if opts == zeroOpts { 57 | // if we have no explicit type and didn't request a HEAD, invoke SynthesizeIndex instead of Lookup 58 | obj, err = registry.SynthesizeIndex(ctx, ref) 59 | if err != nil { 60 | panic(err) 61 | } 62 | } else { 63 | r, err := registry.Lookup(ctx, ref, &opts) 64 | if err != nil { 65 | panic(err) 66 | } 67 | if r != nil { 68 | desc := r.Descriptor() 69 | if opts.Head { 70 | obj = desc 71 | } else { 72 | b, err := io.ReadAll(r) 73 | if err != nil { 74 | r.Close() 75 | panic(err) 76 | } 77 | if opts.Type == registry.LookupTypeManifest { 78 | // if it was a manifest lookup, cast the byte slice to json.RawMessage so we get the actual JSON (not base64) 79 | obj = json.RawMessage(b) 80 | } else { 81 | obj = b 82 | } 83 | } 84 | err = r.Close() 85 | if err != nil { 86 | panic(err) 87 | } 88 | } else { 89 | obj = nil 90 | } 91 | } 92 | 93 | e := json.NewEncoder(os.Stdout) 94 | e.SetIndent("", "\t") 95 | if err := e.Encode(obj); err != nil { 96 | panic(err) 97 | } 98 | } 99 | 100 | if parallel { 101 | wg.Add(1) 102 | go func(opts registry.LookupOptions) { 103 | defer wg.Done() 104 | // TODO synchronize output so that it still arrives in-order? maybe the randomness is part of the charm? 105 | do(opts) 106 | }(opts) 107 | } else { 108 | do(opts) 109 | } 110 | 111 | // reset state 112 | opts = zeroOpts 113 | } 114 | 115 | if opts != zeroOpts { 116 | panic("dangling --type, --head, etc (without a following reference for it to apply to)") 117 | } 118 | 119 | if parallel { 120 | wg.Wait() 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /deploy.jq: -------------------------------------------------------------------------------- 1 | include "oci"; 2 | 3 | # input: array of "build" objects (with "buildId" top level keys) 4 | # output: map of { "tag": [ list of OCI descriptors ], ... } 5 | def tagged_manifests(builds_selector; tags_extractor): 6 | reduce (.[] | select(.build.resolved and builds_selector)) as $i ({}; 7 | .[ 8 | $i 9 | | tags_extractor 10 | | ..|strings # no matter what "tags_extractor" gives us, this will flatten us to a stream of strings 11 | ] += [ 12 | # as an extra protection against cross-architecture "bleeding" ("riscv64" infra pushing "amd64" images, for example), filter the list of manifests to those whose architecture matches the architecture it is supposed to be for 13 | # to be explicitly clear, this filtering is *also* done as part of our "builds.json" generation, so this is an added layer of best-effort protection that will be especially important to preserve and/or replicate if/when we solve the "not built yet so include the previous contents of the tag" portion of the problem at this layer instead of in the currently-separate put-shared process 14 | $i.build.resolved.manifests[] 15 | | select(.annotations["com.docker.official-images.bashbrew.arch"] // "" == $i.build.arch) # this assumes "registry.SynthesizeIndex" created this list of manifests (because it sets this annotation), but it would be reasonable for us to reimplement that conversion of "OCI platform object" to "bashbrew architecture" in pure jq if it was prudent or necessary to do so 16 | ] 17 | ) 18 | ; 19 | def arch_tagged_manifests($arch): 20 | tagged_manifests(.build.arch == $arch; .source.arches[.build.arch].archTags) 21 | ; 22 | 23 | # input: output of tagged_manifests (map of tag -> list of OCI descriptors) 24 | # output: array of input objects for "cmd/deploy" ({ "type": "manifest", "refs": [ ... ], "data": { ... } }) 25 | def deploy_objects: 26 | reduce to_entries[] as $in ({}; 27 | $in.key as $ref 28 | | ( 29 | $in.value 30 | | map(normalize_descriptor) # normalized platforms *and* normalized field ordering 31 | | sort_manifests 32 | ) as $manifests 33 | | ([ $manifests[].digest ] | join("\n")) as $key 34 | | .[$key] |= ( 35 | if . then 36 | .refs += [ $ref ] 37 | else 38 | { 39 | type: "manifest", 40 | refs: [ $ref ], 41 | 42 | # add appropriate "lookup" values for copying child objects properly 43 | lookup: ( 44 | $manifests 45 | | map({ 46 | key: .digest, 47 | value: ( 48 | .digest as $dig 49 | | .annotations["org.opencontainers.image.ref.name"] 50 | | rtrimstr("@" + $dig) 51 | ), 52 | }) 53 | | from_entries 54 | ), 55 | 56 | # convert the list of "manifests" into a full (canonical!) index/manifest list for deploying 57 | data: { 58 | schemaVersion: 2, 59 | mediaType: ( 60 | if $manifests[0].mediaType == media_type_dockerv2_image then 61 | media_type_dockerv2_list 62 | else 63 | media_type_oci_index 64 | end 65 | ), 66 | manifests: ( 67 | $manifests 68 | | del(.[].annotations["org.opencontainers.image.ref.name"]) 69 | ), 70 | }, 71 | } 72 | end 73 | ) 74 | ) 75 | | [ .[] ] # strip off our synthetic map keys to avoid leaking our implementation detail 76 | ; 77 | -------------------------------------------------------------------------------- /doi.jq: -------------------------------------------------------------------------------- 1 | # a helper for "build_should_sbom" 2 | def _sbom_subset: 3 | [ 4 | # only repositories we have explicitly verified 5 | "aerospike", 6 | "almalinux", 7 | "alpine", 8 | "alt", 9 | "amazoncorretto", 10 | "amazonlinux", 11 | "api-firewall", 12 | "arangodb", 13 | "archlinux", 14 | "backdrop", 15 | "bash", 16 | "bonita", 17 | "buildpack-deps", 18 | "busybox", 19 | "caddy", 20 | "cassandra", 21 | "chronograf", 22 | "cirros", 23 | "clojure", 24 | "composer", 25 | "convertigo", 26 | "couchdb", 27 | "crate", 28 | "debian", 29 | "drupal", 30 | "eclipse-mosquitto", 31 | "eclipse-temurin", 32 | "eggdrop", 33 | "elasticsearch", 34 | "elixir", 35 | "emqx", 36 | "erlang", 37 | "fedora", 38 | "flink", 39 | "fluentd", 40 | "gazebo", 41 | "gcc", 42 | "geonetwork", 43 | "ghost", 44 | "golang", 45 | "gradle", 46 | "groovy", 47 | "haproxy", 48 | "haskell", 49 | "hitch", 50 | "httpd", 51 | "hylang", 52 | "ibm-semeru-runtimes", 53 | "ibmjava", 54 | "influxdb", 55 | "irssi", 56 | "jetty", 57 | "jruby", 58 | "julia", 59 | "kapacitor", 60 | "kibana", 61 | "kong", 62 | "liquibase", 63 | "logstash", 64 | "mageia", 65 | "mariadb", 66 | "maven", 67 | "memcached", 68 | "mongo", 69 | "mongo-express", 70 | "mono", 71 | "mysql", 72 | "neo4j", 73 | "neurodebian", 74 | "nginx", 75 | "node", 76 | "odoo", 77 | "openjdk", 78 | "open-liberty", 79 | "oraclelinux", 80 | "orientdb", 81 | "perl", 82 | "photon", 83 | "php", 84 | "plone", 85 | "postgres", 86 | "pypy", 87 | "python", 88 | "r-base", 89 | "rabbitmq", 90 | "rakudo-star", 91 | "redis", 92 | "registry", 93 | "rethinkdb", 94 | "rockylinux", 95 | "ros", 96 | "ruby", 97 | "rust", 98 | "sapmachine", 99 | "satosa", 100 | "silverpeas", 101 | "solr", 102 | "sonarqube", 103 | "spark", 104 | "spiped", 105 | "storm", 106 | "swift", 107 | "swipl", 108 | "telegraf", 109 | "tomcat", 110 | "tomee", 111 | "traefik", 112 | "ubuntu", 113 | "websphere-liberty", 114 | "wordpress", 115 | "xwiki", 116 | "znc", 117 | "zookeeper", 118 | 119 | # TODO: add these when PHP extensions and PECL packages are supported in Syft 120 | # "friendica", 121 | # "joomla", 122 | # "matomo", 123 | # "mediawiki", 124 | # "monica", 125 | # "nextcloud", 126 | # "phpmyadmin", 127 | # "postfixadmin", 128 | # "yourls", 129 | 130 | # TODO: add these when the golang dependencies are fixed 131 | # "nats", 132 | # "couchbase", 133 | 134 | # TODO: add these when sbom scanning issues fixed 135 | # "dart", 136 | # "clearlinux", 137 | # "rocket.chat", 138 | # "teamspeak", 139 | # "varnish", 140 | 141 | empty 142 | ] 143 | ; 144 | 145 | # https://github.com/docker-library/meta-scripts/pull/61 (for lack of better documentation for setting this in buildkit) 146 | # https://slsa.dev/provenance/v0.2#builder.id 147 | def buildkit_provenance_builder_id: 148 | "https://github.com/docker-library" 149 | ; 150 | 151 | # input: "build" object (with "buildId" top level key) 152 | # output: boolean 153 | def build_should_sbom: 154 | # see "bashbrew remote arches docker/scout-sbom-indexer:1" (we need the SBOM scanner to be runnable on the host architecture) 155 | # bashbrew remote arches --json docker/scout-sbom-indexer:1 | jq '.arches | keys_unsorted' -c 156 | ( 157 | .build.arch as $arch | ["amd64","arm32v5","arm32v7","arm64v8","i386","ppc64le","riscv64","s390x"] | index($arch) 158 | ) and ( 159 | .source.arches[.build.arch].tags 160 | | map(split(":")[0]) 161 | | unique 162 | | _sbom_subset as $subset 163 | | any(.[]; 164 | . as $i 165 | | $subset 166 | | index($i) 167 | ) 168 | ) 169 | ; 170 | 171 | # input: "build" object (with "buildId" top level key) 172 | # output: boolean 173 | def build_should_sign: 174 | .build.arch == "amd64" and ( 175 | .source.arches[.build.arch].tags 176 | | map(split(":")[0]) 177 | | unique 178 | | index("notary") 179 | ) 180 | ; 181 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/docker-library/meta-scripts 2 | 3 | // ideally this would be the single source of truth for this entire repository, but riscv64 means this bleeds into .go-env.sh too -- if changing this, see that file too 4 | go 1.21 5 | 6 | require ( 7 | cuelabs.dev/go/oci/ociregistry v0.0.0-20240214163758-5ebe80b0a9a6 8 | github.com/docker-library/bashbrew v0.1.11 9 | github.com/opencontainers/go-digest v1.0.0 10 | github.com/opencontainers/image-spec v1.1.0 11 | golang.org/x/time v0.5.0 12 | ) 13 | 14 | require ( 15 | github.com/containerd/containerd v1.6.19 // indirect 16 | github.com/golang/protobuf v1.5.2 // indirect 17 | github.com/sirupsen/logrus v1.9.0 // indirect 18 | golang.org/x/sys v0.13.0 // indirect 19 | google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect 20 | google.golang.org/grpc v1.51.0 // indirect 21 | google.golang.org/protobuf v1.28.1 // indirect 22 | ) 23 | 24 | // https://github.com/cue-labs/oci/pull/29 25 | replace cuelabs.dev/go/oci/ociregistry => github.com/tianon/cuelabs-oci/ociregistry v0.0.0-20240329232705-b652d611e4b3 26 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/containerd/containerd v1.6.19 h1:F0qgQPrG0P2JPgwpxWxYavrVeXAG0ezUIB9Z/4FTUAU= 2 | github.com/containerd/containerd v1.6.19/go.mod h1:HZCDMn4v/Xl2579/MvtOC2M206i+JJ6VxFWU/NetrGY= 3 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 5 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/docker-library/bashbrew v0.1.11 h1:9S6jYFu0+RaqEAfvS2lh7jcaDkcvFi2maB2aU3yb0TM= 7 | github.com/docker-library/bashbrew v0.1.11/go.mod h1:6fyRRSm4vgBAgTw87EsfOT7wXKsc4JA9I5cdQJmwOm8= 8 | github.com/go-quicktest/qt v1.100.0 h1:I7iSLgIwNp0E0UnSvKJzs7ig0jg/Iq83zsZjtQNW7jY= 9 | github.com/go-quicktest/qt v1.100.0/go.mod h1:leyLsQ4jksGmF1KaQEyabnqGIiJTbOU5S46QegToEj4= 10 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 11 | github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= 12 | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 13 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 14 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 15 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 16 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 17 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 18 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 19 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 20 | github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= 21 | github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= 22 | github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= 23 | github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= 24 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 25 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 26 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 27 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 28 | github.com/rogpeppe/go-internal v1.10.1-0.20230524175051-ec119421bb97 h1:3RPlVWzZ/PDqmVuf/FKHARG5EMid/tl7cv54Sw/QRVY= 29 | github.com/rogpeppe/go-internal v1.10.1-0.20230524175051-ec119421bb97/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= 30 | github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= 31 | github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 32 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 33 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 34 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 35 | github.com/tianon/cuelabs-oci/ociregistry v0.0.0-20240329232705-b652d611e4b3 h1:kfAfFbiZ+2ZErqgqKtaMge1qeeE/0rnxuTl21G7fSwk= 36 | github.com/tianon/cuelabs-oci/ociregistry v0.0.0-20240329232705-b652d611e4b3/go.mod h1:pK23AUVXuNzzTpfMCA06sxZGeVQ/75FdVtW249de9Uo= 37 | golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= 38 | golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 39 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 40 | golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= 41 | golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 42 | golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= 43 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 44 | golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= 45 | golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= 46 | golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= 47 | golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= 48 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 49 | google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= 50 | google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= 51 | google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= 52 | google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= 53 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 54 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 55 | google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= 56 | google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 57 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 58 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 59 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 60 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 61 | gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= 62 | gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= 63 | -------------------------------------------------------------------------------- /helpers/oci-import.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | # this is "docker build" but for "Builder: oci-import" 5 | # https://github.com/docker-library/bashbrew/blob/4e0ea8d8aba49d54daf22bd8415fabba65dc83ee/cmd/bashbrew/oci-builder.go#L90-L91 6 | 7 | # usage: 8 | # .../oci-import.sh temp <<<'{"buildId":"...","build":{...},"source":{"entries":[{"Builder":"oci-import","GitCommit":...},...],...}}' 9 | 10 | target="$1"; shift # target directory to put OCI layout into (must not exist!) 11 | # stdin: JSON of the full "builds.json" object 12 | 13 | [ ! -e "$target" ] 14 | [ -d "$BASHBREW_META_SCRIPTS" ] 15 | [ -s "$BASHBREW_META_SCRIPTS/oci.jq" ] 16 | BASHBREW_META_SCRIPTS="$(cd "$BASHBREW_META_SCRIPTS" && pwd -P)" 17 | 18 | # TODO come up with clean ways to harden this against path traversal attacks 🤔 (bad symlinks, "File:" values, etc) 19 | # - perhaps we run the script in a container? (so the impact of attacks declines to essentially zero) 20 | 21 | shell="$(jq -L"$BASHBREW_META_SCRIPTS" --slurp --raw-output ' 22 | include "validate"; 23 | validate_one 24 | | @sh "buildObj=\(tojson)", 25 | ( 26 | .source.entries[0] | 27 | @sh "gitRepo=\(.GitRepo)", 28 | @sh "gitFetch=\(.GitFetch)", 29 | @sh "gitCommit=\(.GitCommit)", 30 | @sh "gitArchive=\(.GitCommit + ":" + (.Directory | if . == "." then "" else . + "/" end))", 31 | @sh "file=\(.File)", 32 | empty # trailing comma 33 | ) 34 | ')" 35 | eval "$shell" 36 | [ -n "$buildObj" ] 37 | [ -n "$gitRepo" ] 38 | [ -n "$gitFetch" ] 39 | [ -n "$gitCommit" ] 40 | [ -n "$gitArchive" ] 41 | [ -n "$file" ] 42 | export buildObj 43 | 44 | # "bashbrew fetch" but in Bash (because we have bashbrew, but not the library file -- we could synthesize a library file instead, but six of one half a dozen of another and this avoids the explicit hard bashbrew dependency) 45 | 46 | # initialize "~/.cache/bashbrew/git" 47 | #"gitCache=\"$(bashbrew cat --format '{{ gitCache }}' <(echo 'Maintainers: empty hack (@example)'))\"", 48 | # https://github.com/docker-library/bashbrew/blob/5152c0df682515cbe7ac62b68bcea4278856429f/cmd/bashbrew/git.go#L52-L80 49 | export BASHBREW_CACHE="${BASHBREW_CACHE:-${XDG_CACHE_HOME:-$HOME/.cache}/bashbrew}" 50 | gitCache="$BASHBREW_CACHE/git" 51 | git init --quiet --bare "$gitCache" 52 | _git() { git -C "$gitCache" "$@"; } 53 | _git config gc.auto 0 54 | 55 | _commit() { _git rev-parse "$gitCommit^{commit}"; } 56 | if ! _commit &> /dev/null; then 57 | _git fetch --quiet "$gitRepo" "$gitCommit:" \ 58 | || _git fetch --quiet "$gitRepo" "$gitFetch:" 59 | fi 60 | _commit > /dev/null 61 | 62 | mkdir "$target" 63 | 64 | # https://github.com/docker-library/bashbrew/blob/5152c0df682515cbe7ac62b68bcea4278856429f/cmd/bashbrew/git.go#L140-L147 (TODO "bashbrew context" ?) 65 | _git archive --format=tar "$gitArchive" > "$target/oci.tar" 66 | tar --extract --file "$target/oci.tar" --directory "$target" 67 | rm -f "$target/oci.tar" 68 | 69 | cd "$target" 70 | 71 | # TODO if we normalize everything to an OCI layout, we could have a "standard" script that validates *all* our outputs and not need quite so much here 🤔 (it would even be reasonable to let publishers provide a provenance attestation object like buildkit does, if they so desire, and then we validate that it's roughly something acceptable to us) 72 | 73 | # validate oci-layout 74 | jq -L"$BASHBREW_META_SCRIPTS" --slurp ' 75 | include "oci"; 76 | include "validate"; 77 | 78 | validate_one 79 | | validate_oci_layout_file 80 | | empty 81 | ' oci-layout 82 | 83 | # validate "File:" (upgrading it to an index if it's not "index.json"), creating a new canonical "index.json" in the process 84 | jq -L"$BASHBREW_META_SCRIPTS" --slurp --tab ' 85 | include "oci"; 86 | include "validate"; 87 | include "meta"; 88 | 89 | validate_one 90 | 91 | # https://github.com/docker-library/bashbrew/blob/4e0ea8d8aba49d54daf22bd8415fabba65dc83ee/cmd/bashbrew/oci-builder.go#L116 92 | | if input_filename != "index.json" then 93 | { 94 | schemaVersion: 2, 95 | mediaType: media_type_oci_index, 96 | manifests: [ . ], 97 | } 98 | else . end 99 | 100 | | .mediaType //= media_type_oci_index # TODO index normalize function? just force this to be set/valid instead? 101 | | validate_oci_index({ indexPlatformsOptional: true }) 102 | | validate_length(.manifests; 1) # TODO allow upstream attestation in the future? 103 | 104 | # purge maintainer-provided URLs / annotations (https://github.com/docker-library/bashbrew/blob/4e0ea8d8aba49d54daf22bd8415fabba65dc83ee/cmd/bashbrew/oci-builder.go#L146-L147) 105 | # (also purge maintainer-provided "data" fields here, since including that in the index is a bigger conversation/decision) 106 | | del(.manifests[].urls, .manifests[].data) 107 | | del(.manifests[0].annotations) 108 | | if .manifests[1].annotations then # TODO have this mean something 😂 (see TODOs above about attestations) 109 | # filter .manifest[1].annotations to *just* the attestation-related annotations 110 | .manifests[1].annotations |= with_entries( 111 | select(.key | IN( 112 | "vnd.docker.reference.type", 113 | "vnd.docker.reference.digest", 114 | empty # trailing comma 115 | )) 116 | ) 117 | else . end 118 | 119 | | (env.buildObj | fromjson) as $build 120 | 121 | # make sure "platform" is correct 122 | | .manifests[0].platform = ( 123 | $build 124 | | .source.arches[.build.arch].platform 125 | ) 126 | 127 | # inject our build annotations 128 | | .manifests[0].annotations += ( 129 | $build 130 | | build_annotations(.source.entries[0].GitRepo) 131 | ) 132 | # TODO perhaps, instead, we stop injecting the index annotations via buildkit/buildx and we normalize these two in a separate "inject index annotations" step/script? 🤔 133 | 134 | | normalize_manifest 135 | ' "$file" | tee index.json.new 136 | mv -f index.json.new index.json 137 | 138 | # now that "index.json" represents the exact index we want to push, let's push it down into a blob and make a new appropriate "index.json" for "crane push" 139 | # TODO we probably want/need some "traverse/manipulate an OCI layout" helpers 😭 140 | mediaType="$(jq --raw-output '.mediaType' index.json)" 141 | digest="$(sha256sum index.json | cut -d' ' -f1)" 142 | digest="sha256:$digest" 143 | size="$(stat --dereference --format '%s' index.json)" 144 | mv -f index.json "blobs/${digest//://}" 145 | export mediaType digest size 146 | jq -L"$BASHBREW_META_SCRIPTS" --null-input --tab ' 147 | include "oci"; 148 | { 149 | schemaVersion: 2, 150 | mediaType: media_type_oci_index, 151 | manifests: [ { 152 | mediaType: env.mediaType, 153 | digest: env.digest, 154 | size: (env.size | tonumber), 155 | } ], 156 | } 157 | | normalize_manifest 158 | ' > index.json 159 | 160 | # TODO move this further out 161 | "$BASHBREW_META_SCRIPTS/helpers/oci-validate.sh" . 162 | -------------------------------------------------------------------------------- /helpers/oci-sbom.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | # this will trick BuildKit into generating an SBOM for us, then inject it into our OCI layout 5 | 6 | # usage: 7 | # .../oci-sbom.sh input-oci output-oci 8 | 9 | input="$1"; shift # input OCI layout (single image) 10 | output="$1"; shift # output OCI layout 11 | # stdin: JSON of the full "builds.json" object 12 | 13 | [ -n "$BASHBREW_BUILDKIT_SBOM_GENERATOR" ] 14 | [ -d "$input" ] 15 | [ ! -e "$output" ] 16 | [ -d "$BASHBREW_META_SCRIPTS" ] 17 | [ -s "$BASHBREW_META_SCRIPTS/oci.jq" ] 18 | input="$(cd "$input" && pwd -P)" 19 | BASHBREW_META_SCRIPTS="$(cd "$BASHBREW_META_SCRIPTS" && pwd -P)" 20 | 21 | shell="$(jq -L"$BASHBREW_META_SCRIPTS" --slurp --raw-output ' 22 | include "validate"; 23 | validate_one 24 | | @sh "buildObj=\(tojson)", 25 | @sh "SOURCE_DATE_EPOCH=\(.source.entries[0].SOURCE_DATE_EPOCH)", 26 | @sh "platform=\(.source.arches[.build.arch].platformString)", 27 | empty # trailing comma 28 | ')" 29 | eval "$shell" 30 | [ -n "$buildObj" ] 31 | [ -n "$SOURCE_DATE_EPOCH" ] 32 | [ -n "$platform" ] 33 | export buildObj 34 | 35 | mkdir "$output" 36 | cd "$output" 37 | 38 | imageIndex="$(jq -L"$BASHBREW_META_SCRIPTS" --raw-output ' 39 | include "oci"; 40 | include "validate"; 41 | validate_oci_index({ indexPlatformsOptional: true }) 42 | | validate_length(.manifests; 1) 43 | | validate_IN(.manifests[0].mediaType; media_types_index) 44 | | .manifests[0].digest 45 | ' "$input/index.json")" 46 | 47 | shell="$(jq -L"$BASHBREW_META_SCRIPTS" --raw-output ' 48 | include "oci"; 49 | include "validate"; 50 | validate_oci_index 51 | | validate_length(.manifests; 1) # TODO technically it would be OK if we had provenance here 🤔 (it just is harder to "merge" 2x provenance than to append 1x) 52 | | validate_IN(.manifests[0].mediaType; media_types_image) 53 | # TODO should we pull "$platform" from .manifests[0].platform instead of the build object above? (making the build object input optional would make this script easier to test by hand; so maybe just if we did not get it from build?) 54 | | @sh "export imageManifest=\(.manifests[0].digest)", 55 | empty # trailing comma 56 | ' "$input/blobs/${imageIndex/://}")" 57 | eval "$shell" 58 | 59 | copyBlobs=( "$imageManifest" ) 60 | shell="$(jq -L"$BASHBREW_META_SCRIPTS" --raw-output ' 61 | include "oci"; 62 | validate_oci_image 63 | | "copyBlobs+=( \( 64 | [ 65 | .config.digest, 66 | .layers[].digest 67 | | @sh 68 | ] 69 | | join(" ") 70 | ) )" 71 | ' "$input/blobs/${imageManifest/://}")" 72 | eval "$shell" 73 | 74 | args=( 75 | --progress=plain 76 | --load=false --provenance=false # explicitly disable a few features we want to avoid 77 | --build-arg BUILDKIT_DOCKERFILE_CHECK=skip=all # disable linting (https://github.com/moby/buildkit/pull/4962) 78 | --sbom=generator="$BASHBREW_BUILDKIT_SBOM_GENERATOR" 79 | --output "type=oci,tar=false,dest=." 80 | # TODO also add appropriate "--tag" lines (which would give us a mostly correct "subject" block in the generated SBOM, but we'd then need to replace instances of $sbomImageManifest with $imageManifest for their values to be correct) 81 | --platform "$platform" 82 | --build-context "fake=oci-layout://$input@$imageManifest" 83 | '-' 84 | ) 85 | docker buildx build "${args[@]}" <<<'FROM fake' 86 | 87 | for blob in "${copyBlobs[@]}"; do 88 | cp --force --dereference --link "$input/blobs/${blob/://}" "blobs/${blob/://}" 89 | done 90 | 91 | sbomIndex="$(jq -L"$BASHBREW_META_SCRIPTS" --raw-output ' 92 | include "oci"; 93 | include "validate"; 94 | validate_oci_index({ indexPlatformsOptional: true }) 95 | | validate_length(.manifests; 1) 96 | | validate_IN(.manifests[0].mediaType; media_types_index) 97 | | .manifests[0].digest 98 | ' index.json)" 99 | 100 | shell="$(jq -L"$BASHBREW_META_SCRIPTS" --raw-output ' 101 | include "oci"; 102 | include "validate"; 103 | validate_oci_index 104 | | validate_length(.manifests; 2) 105 | | validate_IN(.manifests[].mediaType; media_types_image) 106 | | validate_IN(.manifests[1].annotations["vnd.docker.reference.type"]; "attestation-manifest") 107 | | .manifests[0].digest as $fakeImageDigest 108 | | validate_IN(.manifests[1].annotations["vnd.docker.reference.digest"]; $fakeImageDigest) 109 | | @sh "sbomManifest=\(.manifests[1].digest)", 110 | # TODO (see "--tag" TODO above) @sh "sbomImageManifest=\(.manifests[0].digest)", 111 | @sh "export sbomManifestDesc=\( 112 | .manifests[1] 113 | | .annotations["vnd.docker.reference.digest"] = env.imageManifest 114 | | tojson 115 | )", 116 | empty # trailing comma 117 | ' "blobs/${sbomIndex/://}")" 118 | eval "$shell" 119 | 120 | jq -L"$BASHBREW_META_SCRIPTS" --tab ' 121 | include "oci"; 122 | # we already validate this exact object above, so we do not need to revalidate here 123 | .manifests[1] = (env.sbomManifestDesc | fromjson) # TODO merge provenance, if applicable (see TODOs above) 124 | | normalize_manifest 125 | ' "$input/blobs/${imageIndex/://}" | tee index.json 126 | 127 | # (this is an exact copy of the end of "oci-import.sh" 😭) 128 | # now that "index.json" represents the exact index we want to push, let's push it down into a blob and make a new appropriate "index.json" for "crane push" 129 | # TODO we probably want/need some "traverse/manipulate an OCI layout" helpers 😭 130 | mediaType="$(jq --raw-output '.mediaType' index.json)" 131 | digest="$(sha256sum index.json | cut -d' ' -f1)" 132 | digest="sha256:$digest" 133 | size="$(stat --dereference --format '%s' index.json)" 134 | mv -f index.json "blobs/${digest//://}" 135 | export mediaType digest size 136 | jq -L"$BASHBREW_META_SCRIPTS" --null-input --tab ' 137 | include "oci"; 138 | { 139 | schemaVersion: 2, 140 | mediaType: media_type_oci_index, 141 | manifests: [ { 142 | mediaType: env.mediaType, 143 | digest: env.digest, 144 | size: (env.size | tonumber), 145 | } ], 146 | } 147 | | normalize_manifest 148 | ' > index.json 149 | 150 | # TODO move this further out 151 | "$BASHBREW_META_SCRIPTS/helpers/oci-validate.sh" . 152 | -------------------------------------------------------------------------------- /helpers/oci-validate.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | # given an OCI image layout (https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md), verifies all descriptors as much as possible (digest matches content, size, media types, layer diff_ids, etc) 5 | 6 | layout="$1"; shift 7 | 8 | [ -d "$layout" ] 9 | [ -d "$BASHBREW_META_SCRIPTS" ] 10 | [ -s "$BASHBREW_META_SCRIPTS/oci.jq" ] 11 | BASHBREW_META_SCRIPTS="$(cd "$BASHBREW_META_SCRIPTS" && pwd -P)" 12 | 13 | cd "$layout" 14 | 15 | # validate oci-layout 16 | echo 'oci-layout' 17 | jq -L"$BASHBREW_META_SCRIPTS" --slurp ' 18 | include "oci"; 19 | include "validate"; 20 | 21 | validate_one 22 | | validate_oci_layout_file 23 | | empty 24 | ' oci-layout 25 | 26 | # TODO (recursively?) validate subject descriptors in here somewhere 🤔 27 | 28 | # TODO handle objects that *only* exist in the "data" field too 🤔 https://github.com/docker-library/meta-scripts/pull/125#discussion_r2070633122 29 | # maybe descriptor takes a "--data" flag that then returns the input descriptor, but enhanced with a "data" field so the other functions can use that to extract the data instead of relying on files? 30 | 31 | descriptor() { 32 | local file="$1"; shift # "blobs/sha256/xxx" 33 | local desc; desc="$(cat)" 34 | local shell 35 | shell="$(jq <<<"$desc" -L"$BASHBREW_META_SCRIPTS" --slurp --raw-output ' 36 | include "validate"; 37 | include "oci"; 38 | validate_one 39 | | validate_oci_descriptor 40 | | ( 41 | @sh "local algo=\( 42 | .digest 43 | | split(":")[0] 44 | | validate_IN(.; "sha256", "sha512") # TODO more algorithms? need more tools on the host 45 | )", 46 | 47 | @sh "local data=\( 48 | if has("data") then 49 | .data 50 | else " " end # empty string is valid base64 (which we should validate), but spaces are not, so we can use a single space to detect "data not set" 51 | )", 52 | 53 | empty 54 | ) 55 | ')" 56 | eval "$shell" 57 | local digest size dataDigest= dataSize= 58 | digest="$("${algo}sum" "$file" | cut -d' ' -f1)" 59 | digest="$algo:$digest" 60 | size="$(stat --dereference --format '%s' "$file")" 61 | if [ "$data" != ' ' ]; then 62 | dataDigest="$(base64 <<<"$data" -d | "${algo}sum" | cut -d' ' -f1)" 63 | dataDigest="$algo:$dataDigest" 64 | dataSize="$(base64 <<<"$data" -d | wc --bytes)" 65 | # TODO *technically* we could get clever here and pass `base64 -d` to something like `tee >(wc --bytes) >(dig="$(sha256sum | cut -d' ' -f1)" && echo "sha256:$dig" && false) > /dev/null` to avoid parsing the base64 twice, but then failure cases are less likely to be caught, so it's safer to simply redecode (and we can't decode into a variable because this might be binary data *and* bash will do newline munging in both directions) 66 | fi 67 | jq <<<"$desc" -L"$BASHBREW_META_SCRIPTS" --slurp --arg digest "$digest" --arg size "$size" --arg dataDigest "$dataDigest" --arg dataSize "$dataSize" ' 68 | include "validate"; 69 | validate_one 70 | | validate_IN(.digest; $digest) 71 | | validate_IN(.size; $size | tonumber) 72 | | if has("data") then 73 | validate(.data; 74 | $digest == $dataDigest 75 | and $size == $dataSize 76 | ; "(decoded) data has size \($dataSize) and digest \($dataDigest) (expected \($size) and \($digest))") 77 | else . end 78 | | empty 79 | ' 80 | } 81 | 82 | # TODO validate config (diff_ids, history, platform - gotta carry *two* levels of descriptors for that, and decompress all the layers 🙊) 83 | # TODO validate provenance/SBOM layer contents? 84 | 85 | image() { 86 | local file="$1"; shift 87 | echo "image: $file" 88 | local desc; desc="$(cat)" 89 | descriptor <<<"$desc" "$file" 90 | local shell 91 | shell="$( 92 | jq <<<"$desc" -L"$BASHBREW_META_SCRIPTS" --slurp --raw-output ' 93 | include "validate"; 94 | include "oci"; 95 | validate_length(.; 2) 96 | | .[0] as $desc 97 | | .[1] 98 | | validate_oci_image({ 99 | imageAttestation: IN($desc.annotations["vnd.docker.reference.type"]; "attestation-manifest"), 100 | }) 101 | | if $desc then 102 | validate_IN(.mediaType; $desc.mediaType) 103 | | validate_IN(.artifactType; $desc.artifactType) 104 | else . end 105 | | ( 106 | ( 107 | .config, .layers[] 108 | | @sh "descriptor <<<\(tojson) \(.digest | "blobs/\(sub(":"; "/"))")" 109 | ), 110 | 111 | empty # trailing comma 112 | ) 113 | ' /dev/stdin "$file" 114 | )" 115 | eval "$shell" 116 | } 117 | 118 | index() { 119 | local file="$1"; shift 120 | echo "index: $file" 121 | local desc; desc="$(cat)" 122 | if [ "$desc" != 'null' ]; then 123 | descriptor <<<"$desc" "$file" 124 | fi 125 | local shell 126 | shell="$( 127 | jq <<<"$desc" -L"$BASHBREW_META_SCRIPTS" --slurp --raw-output ' 128 | include "validate"; 129 | include "oci"; 130 | validate_length(.; 2) 131 | | .[0] as $desc 132 | | .[1] 133 | | validate_oci_index({ 134 | indexPlatformsOptional: (input_filename == "index.json"), 135 | }) 136 | | if $desc then 137 | validate_IN(.mediaType; $desc.mediaType) 138 | | validate_IN(.artifactType; $desc.artifactType) 139 | else . end 140 | | .manifests[] 141 | | ( 142 | .mediaType 143 | | if IN(media_types_index) then 144 | "index" 145 | elif IN(media_types_image) then 146 | "image" 147 | else 148 | error("UNSUPPORTED MEDIA TYPE: \(.)") 149 | end 150 | ) + @sh " <<<\(tojson) \(.digest | "blobs/\(sub(":"; "/"))")" 151 | ' /dev/stdin "$file" 152 | )" 153 | eval "$shell" 154 | } 155 | 156 | index <<<'null' index.json 157 | -------------------------------------------------------------------------------- /jenkins.jq: -------------------------------------------------------------------------------- 1 | include "meta"; 2 | 3 | # input: list of build objects i.e., builds.json 4 | # output: stream of crane copy command strings 5 | def crane_deploy_commands: 6 | reduce (.[] | select(.build.resolved and .build.arch == env.BASHBREW_ARCH)) as $i ({}; 7 | .[ $i.source.arches[$i.build.arch].archTags[] ] += [ 8 | $i.build.resolved.annotations["org.opencontainers.image.ref.name"] // error("\($i.build.img) missing a resolved ref") 9 | # TODO ideally we'd use .manifests[] here to take advantage of the filtering we've done at previous steps, but then we lose in-index annotations because `crane index append` can't really do that 10 | ] 11 | ) 12 | | to_entries[] 13 | | .key as $target 14 | | .value 15 | | if length == 1 then 16 | @sh "crane copy \(.) \($target)" 17 | else 18 | @sh "crane index append --tag \($target) " + (map("--manifest " + @sh) | join(" ")) + " --flatten" 19 | end 20 | ; 21 | 22 | # input: "build" object (with "buildId" top level key) 23 | # output: json object (to trigger the build on GitHub Actions) 24 | def gha_payload: 25 | { 26 | ref: "main", 27 | inputs: ( 28 | { 29 | buildId: .buildId, 30 | bashbrewArch: .build.arch, 31 | firstTag: .source.arches[.build.arch].tags[0], 32 | } + ( 33 | [ .build.resolvedParents[].manifests[].platform? | select(has("os.version")) | ."os.version" ][0] // "" 34 | | if . != "" then 35 | { windowsVersion: ( 36 | # https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/base-image-lifecycle 37 | # https://github.com/microsoft/hcsshim/blob/d9a4231b9d7a03dffdabb6019318fc43eb6ba996/osversion/windowsbuilds.go 38 | capture("^10[.]0[.](?[0-9]+)([.]|$)") 39 | | { 40 | # since this is specifically for GitHub Actions support, this is limited to the underlying versions they actually support 41 | # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources 42 | "26100": "2025", # https://oci.dag.dev/?image=mcr.microsoft.com/windows/servercore:ltsc2025 43 | "20348": "2022", # https://oci.dag.dev/?image=mcr.microsoft.com/windows/servercore:ltsc2022 44 | "17763": "2019", # https://oci.dag.dev/?image=mcr.microsoft.com/windows/servercore:ltsc2019 45 | "": "", 46 | }[.build] // "unknown" 47 | ) } 48 | else {} end 49 | ) 50 | ) 51 | } 52 | ; 53 | 54 | # input: full "build" object list (with "buildId" top level key) 55 | # output: filtered build list { "buildId value": { build object } } 56 | def get_arch_queue($arch): 57 | map_values( 58 | select( 59 | needs_build 60 | and .build.arch == $arch 61 | ) 62 | | if IN(.build.arch; "amd64", "i386", "windows-amd64") then 63 | # "GHA" architectures (anything we add a "gha_payload" to will be run on GHA in the queue) 64 | .gha_payload = (gha_payload | @json) 65 | else . end 66 | | .identifier = .source.arches[.build.arch].tags[0] 67 | ) 68 | ; 69 | def get_arch_queue: 70 | get_arch_queue(env.BASHBREW_ARCH) 71 | ; 72 | 73 | # input: filtered "needs_build" build object list, like from get_raw_queue 74 | # output: simplified list of builds with record of (build/trigger) count and number of current skips 75 | def jobs_record($pastJobs): 76 | map_values( 77 | .identifier as $identifier 78 | | $pastJobs[.buildId] // { count: 0, skips: 0 } 79 | | .identifier = $identifier 80 | # start skipping after 24 attempts, try once every 24 skips 81 | | if .count >= 24 and .skips < 24 then 82 | .skips += 1 83 | else 84 | # these ones shold be built 85 | .skips = 0 86 | | .count += 1 87 | end 88 | ) 89 | ; 90 | 91 | # input: filtered "needs_build" build object list, like from get_raw_queue 92 | # newJobs list, output of jobs_record: used for filtering and sorting the queue 93 | # ouput: sorted build queue with skipped items removed 94 | def filter_skips_queue($newJobs): 95 | map( 96 | select( 97 | $newJobs[.buildId].skips == 0 98 | ) 99 | ) 100 | # this Jenkins job exports a JSON file that includes the number of attempts so far per failing buildId so that this can sort by attempts which means failing builds always live at the bottom of the queue (sorted by the number of times they have failed, so the most failing is always last) 101 | | sort_by($newJobs[.buildId].count) 102 | ; 103 | -------------------------------------------------------------------------------- /meta.jq: -------------------------------------------------------------------------------- 1 | # "build_should_sbom", etc. 2 | include "doi"; 3 | 4 | # input: "build" object (with "buildId" top level key) 5 | # output: boolean 6 | def needs_build: 7 | .build.resolved == null 8 | ; 9 | # input: "build" object (with "buildId" top level key) 10 | # output: string ("Builder", but normalized) 11 | def normalized_builder: 12 | .build.arch as $arch 13 | | .source.entries[0].Builder 14 | | if . == "" then 15 | if $arch | startswith("windows-") then 16 | # https://github.com/microsoft/Windows-Containers/issues/34 17 | "classic" 18 | else 19 | "buildkit" 20 | end 21 | else . end 22 | ; 23 | # input: "docker.io/library/foo:bar" 24 | # output: "foo:bar" 25 | def normalize_ref_to_docker: 26 | ltrimstr("docker.io/") 27 | | ltrimstr("library/") 28 | ; 29 | # input: "build" object (with "buildId" top level key) 30 | # output: string "pull command" ("docker pull ..."), may be multiple lines, expects to run in Bash with "set -Eeuo pipefail", might be empty 31 | def pull_command: 32 | normalized_builder as $builder 33 | | if $builder == "classic" then 34 | [ 35 | ( 36 | .build.resolvedParents 37 | | to_entries[] 38 | | ( 39 | .value.manifests[0].annotations["org.opencontainers.image.ref.name"] 40 | // .value.annotations["org.opencontainers.image.ref.name"] 41 | // error("parent \(.key) missing ref") 42 | | normalize_ref_to_docker 43 | ) as $ref 44 | | @sh "docker pull \($ref)", 45 | @sh "docker tag \($ref) \(.key)" 46 | ), 47 | empty 48 | ] | join("\n") 49 | elif $builder == "buildkit" then 50 | "" # buildkit has to pull during build 🙈 51 | elif $builder == "oci-import" then 52 | "" # "oci-import" is essentially "FROM scratch" 53 | else 54 | error("unknown/unimplemented Builder: \($builder)") 55 | end 56 | ; 57 | # input: "build" object (with "buildId" top level key) 58 | # output: string "giturl" ("https://github.com/docker-library/golang.git#commit:directory), used for "docker buildx build giturl" 59 | def git_build_url: 60 | .source.entries[0] 61 | | ( 62 | .GitRepo 63 | | if (endswith(".git") | not) then 64 | if test("^https?://github.com/") then 65 | # without ".git" in the url "docker buildx build url" fails and tries to build the html repo page as a Dockerfile 66 | # https://github.com/moby/buildkit/blob/0e1e36ba9eb8142968b2c5cfa2f12549bf9246d9/util/gitutil/git_ref.go#L81-L87 67 | # https://github.com/docker/cli/issues/1738 68 | . + ".git" 69 | else 70 | error("\(.) does not end in '.git' so build will fail to recognize it as a Git URL") 71 | end 72 | else . end 73 | ) + "#" + .GitCommit + ":" + .Directory 74 | ; 75 | # input: "build" object (with "buildId" top level key) 76 | # output: map of annotations to set 77 | def build_annotations($buildUrl): 78 | { 79 | # https://github.com/opencontainers/image-spec/blob/v1.1.0/annotations.md#pre-defined-annotation-keys 80 | "org.opencontainers.image.source": $buildUrl, 81 | "org.opencontainers.image.revision": .source.entries[0].GitCommit, 82 | "org.opencontainers.image.created": (.source.entries[0].SOURCE_DATE_EPOCH | strftime("%FT%TZ")), # see notes below about image index vs image manifest 83 | 84 | # TODO come up with less assuming values here? (Docker Hub assumption, tag ordering assumption) 85 | "org.opencontainers.image.version": ( # value of the first image tag 86 | first(.source.arches[.build.arch].tags[] | select(contains(":"))) 87 | | sub("^.*:"; "") 88 | # TODO maybe we should do the first, longest, non-latest tag instead of just the first tag? 89 | ), 90 | "org.opencontainers.image.url": ( # URL to Docker Hub 91 | first(.source.arches[.build.arch].tags[] | select(contains(":"))) 92 | | sub(":.*$"; "") 93 | | if contains("/") then 94 | "r/" + . 95 | else 96 | "_/" + . 97 | end 98 | | "https://hub.docker.com/" + . 99 | ), 100 | 101 | # TODO org.opencontainers.image.vendor ? (feels leaky to put "Docker Official Images" here when this is all otherwise mostly generic) 102 | 103 | "com.docker.official-images.bashbrew.arch": .build.arch, 104 | } 105 | + ( 106 | .source.arches[.build.arch].lastStageFrom as $lastStageFrom 107 | | if $lastStageFrom then 108 | .build.parents[$lastStageFrom] as $lastStageDigest 109 | | { 110 | "org.opencontainers.image.base.name": $lastStageFrom, 111 | } 112 | + if $lastStageDigest then 113 | { 114 | "org.opencontainers.image.base.digest": .build.parents[$lastStageFrom], 115 | } 116 | else {} end 117 | else {} end 118 | ) 119 | | with_entries(select(.value)) # strip off anything missing a value (possibly "source", "url", "version", "base.digest", etc) 120 | ; 121 | def build_annotations: 122 | build_annotations(git_build_url) 123 | ; 124 | # input: multi-line string with indentation and comments 125 | # output: multi-line string with less indentation and no comments 126 | def unindent_and_decomment_jq($indents): 127 | # trim out comment lines and unnecessary indentation 128 | gsub("(?m)^(\t+#[^\n]*\n?|\t{\($indents)}(?.*)$)"; "\(.extra // "")") 129 | # trim out empty lines 130 | | gsub("\n\n+"; "\n") 131 | ; 132 | # input: "build" object (with "buildId" top level key) 133 | # output: string "build command" ("docker buildx build ..."), may be multiple lines, expects to run in Bash with "set -Eeuo pipefail" 134 | def build_command: 135 | normalized_builder as $builder 136 | | if $builder == "buildkit" then 137 | git_build_url as $buildUrl 138 | | [ 139 | ( 140 | [ 141 | @sh "SOURCE_DATE_EPOCH=\(.source.entries[0].SOURCE_DATE_EPOCH)", 142 | # TODO EXPERIMENTAL_BUILDKIT_SOURCE_POLICY=<(jq ...) 143 | "docker buildx build --progress=plain", 144 | @sh "--provenance=mode=max,builder-id=\(buildkit_provenance_builder_id)", 145 | if build_should_sbom then 146 | "--sbom=generator=\"$BASHBREW_BUILDKIT_SBOM_GENERATOR\"" 147 | else empty end, 148 | "--output " + ( 149 | [ 150 | "type=oci", 151 | "dest=temp.tar", 152 | empty 153 | ] 154 | | @csv 155 | | @sh 156 | ), 157 | ( 158 | build_annotations($buildUrl) 159 | | to_entries 160 | # separate loops so that "image manifest" annotations are grouped separate from the index/descriptor annotations (easier to read) 161 | | ( 162 | .[] 163 | | @sh "--annotation \(.key + "=" + .value)" 164 | ), 165 | ( 166 | .[] 167 | | @sh "--annotation \( 168 | "manifest-descriptor:" + .key + "=" 169 | + if .key == "org.opencontainers.image.created" then 170 | # the "current" time breaks reproducibility (for the purposes of build verification), so we put "now" in the image index but "SOURCE_DATE_EPOCH" in the image manifest (which is the thing we'd ideally like to have reproducible, eventually) 171 | (env.SOURCE_DATE_EPOCH // now) | tonumber | strftime("%FT%TZ") 172 | # (this assumes the actual build is going to happen shortly after generating the command) 173 | else .value end 174 | )", 175 | empty 176 | ) 177 | ), 178 | ( 179 | ( 180 | .source.arches[.build.arch] 181 | | .tags[], .archTags[] 182 | ), 183 | .build.img 184 | | "--tag " + @sh 185 | ), 186 | @sh "--platform \(.source.arches[.build.arch].platformString)", 187 | ( 188 | .build.resolvedParents 189 | | to_entries[] 190 | | .key + "=docker-image://" + ( 191 | .value.manifests[0].annotations["org.opencontainers.image.ref.name"] 192 | // .value.annotations["org.opencontainers.image.ref.name"] 193 | // error("parent \(.key) missing ref") 194 | | normalize_ref_to_docker 195 | ) 196 | | "--build-context " + @sh 197 | ), 198 | "--build-arg BUILDKIT_SYNTAX=\"$BASHBREW_BUILDKIT_SYNTAX\"", # TODO .doi/.bin/bashbrew-buildkit-env-setup.sh 199 | "--build-arg BUILDKIT_DOCKERFILE_CHECK=skip=all", # disable linting (https://github.com/moby/buildkit/pull/4962) 200 | @sh "--file \(.source.entries[0].File)", 201 | ($buildUrl | @sh), 202 | empty 203 | ] | join(" \\\n\t") 204 | ), 205 | # munge the tarball into a suitable "oci layout" directory (ready for "crane push") 206 | "mkdir temp", 207 | "tar -xvf temp.tar -C temp", 208 | "rm temp.tar", 209 | # TODO munge the image config here to remove any label that doesn't have a "." in the name (https://github.com/docker-library/official-images/pull/18692#issuecomment-2797149554; "thanks UBI/OpenShift/RedHat!") 210 | # munge the index to what crane wants ("Error: layout contains 5 entries, consider --index") 211 | @sh "jq \(" 212 | .manifests |= ( 213 | unique_by([ .digest, .size, .mediaType ]) 214 | | if length != 1 then 215 | error(\"unexpected number of manifests: \\(length)\") 216 | else . end 217 | ) 218 | " | unindent_and_decomment_jq(3)) temp/index.json > temp/index.json.new", 219 | "mv temp/index.json.new temp/index.json", 220 | # possible improvements in buildkit/buildx that could help us: 221 | # - allowing OCI output directly to a directory instead of a tar (thus getting symmetry with the oci-layout:// inputs it can take) 222 | # - allowing tag as one thing and push as something else, potentially mutually exclusive 223 | # - allowing annotations that are set for both "manifest" and "manifest-descriptor" simultaneously 224 | # - direct-to-containerd image storage 225 | empty 226 | ] | join("\n") 227 | elif $builder == "classic" then 228 | git_build_url as $buildUrl 229 | | [ 230 | ( 231 | [ 232 | @sh "SOURCE_DATE_EPOCH=\(.source.entries[0].SOURCE_DATE_EPOCH)", 233 | "DOCKER_BUILDKIT=0", 234 | "docker build", 235 | ( 236 | ( 237 | .source.arches[.build.arch] 238 | | .tags[], .archTags[] 239 | ), 240 | .build.img 241 | | "--tag " + @sh 242 | ), 243 | @sh "--platform \(.source.arches[.build.arch].platformString)", 244 | @sh "--file \(.source.entries[0].File)", 245 | ($buildUrl | @sh), 246 | empty 247 | ] 248 | | join(" \\\n\t") 249 | ), 250 | empty 251 | ] | join("\n") 252 | elif $builder == "oci-import" then 253 | [ 254 | @sh "build=\(tojson)", 255 | "\"$BASHBREW_META_SCRIPTS/helpers/oci-import.sh\" <<<\"$build\" temp", 256 | 257 | if build_should_sbom then 258 | "# SBOM", 259 | "mv temp temp.orig", 260 | "\"$BASHBREW_META_SCRIPTS/helpers/oci-sbom.sh\" <<<\"$build\" temp.orig temp", 261 | "rm -rf temp.orig", 262 | empty 263 | else empty end 264 | ] | join("\n") 265 | else 266 | error("unknown/unimplemented Builder: \($builder)") 267 | end 268 | ; 269 | # input: "build" object (with "buildId" top level key) 270 | # output: string "push command" ("docker push ..."), may be multiple lines, expects to run in Bash with "set -Eeuo pipefail" 271 | def push_command: 272 | normalized_builder as $builder 273 | | if $builder == "classic" then 274 | @sh "docker push \(.build.img)" 275 | elif IN($builder; "buildkit", "oci-import") then 276 | [ 277 | @sh "crane push temp \(.build.img)", 278 | "rm -rf temp", 279 | empty 280 | ] | join("\n") 281 | else 282 | error("unknown/unimplemented Builder: \($builder)") 283 | end 284 | ; 285 | # input: "build" object (with "buildId" top level key) 286 | # output: "commands" object with keys "pull", "build", "push" 287 | def commands: 288 | { 289 | pull: pull_command, 290 | build: build_command, 291 | push: push_command, 292 | } 293 | ; 294 | -------------------------------------------------------------------------------- /om/om.go: -------------------------------------------------------------------------------- 1 | package om 2 | 3 | // https://github.com/golang/go/issues/27179 4 | 5 | import ( 6 | "bytes" 7 | "encoding/json" 8 | "fmt" 9 | ) 10 | 11 | // only supports string keys because JSON is the intended use case (and the JSON spec says only string keys are allowed) 12 | type OrderedMap[T any] struct { 13 | m map[string]T 14 | keys []string 15 | } 16 | 17 | func (m OrderedMap[T]) Keys() []string { 18 | return append([]string{}, m.keys...) 19 | } 20 | 21 | func (m OrderedMap[T]) Get(key string) T { 22 | return m.m[key] 23 | } 24 | 25 | // TODO Has()? two-return form of Get? (we don't need either right now) 26 | 27 | func (m *OrderedMap[T]) Set(key string, val T) { // TODO make this variadic so it can take an arbitrary number of pairs? (would be useful for tests, but we don't need something like that right now) 28 | if m.m == nil || m.keys == nil { 29 | m.m = map[string]T{} 30 | m.keys = []string{} 31 | } 32 | if _, ok := m.m[key]; !ok { 33 | m.keys = append(m.keys, key) 34 | } 35 | m.m[key] = val 36 | } 37 | 38 | func (m *OrderedMap[T]) UnmarshalJSON(b []byte) error { 39 | dec := json.NewDecoder(bytes.NewReader(b)) 40 | 41 | // read opening { 42 | if tok, err := dec.Token(); err != nil { 43 | return err 44 | } else if tok != json.Delim('{') { 45 | return fmt.Errorf("expected '{', got %T: %#v", tok, tok) 46 | } 47 | 48 | for { 49 | tok, err := dec.Token() 50 | if err != nil { 51 | return err 52 | } 53 | if tok == json.Delim('}') { 54 | break 55 | } 56 | key, ok := tok.(string) 57 | if !ok { 58 | return fmt.Errorf("expected string key, got %T: %#v", tok, tok) 59 | } 60 | var val T 61 | err = dec.Decode(&val) 62 | if err != nil { 63 | return err 64 | } 65 | m.Set(key, val) 66 | } 67 | 68 | if dec.More() { 69 | return fmt.Errorf("unexpected extra content after closing '}'") 70 | } 71 | 72 | return nil 73 | } 74 | 75 | func (m OrderedMap[T]) MarshalJSON() ([]byte, error) { 76 | var buf bytes.Buffer 77 | enc := json.NewEncoder(&buf) 78 | if err := buf.WriteByte('{'); err != nil { 79 | return nil, err 80 | } 81 | for i, key := range m.keys { 82 | if i > 0 { 83 | buf.WriteByte(',') 84 | } 85 | if err := enc.Encode(key); err != nil { 86 | return nil, err 87 | } 88 | buf.WriteByte(':') 89 | if err := enc.Encode(m.m[key]); err != nil { 90 | return nil, err 91 | } 92 | } 93 | if err := buf.WriteByte('}'); err != nil { 94 | return nil, err 95 | } 96 | return buf.Bytes(), nil 97 | } 98 | -------------------------------------------------------------------------------- /om/om_test.go: -------------------------------------------------------------------------------- 1 | package om_test 2 | 3 | import ( 4 | "encoding/json" 5 | "strconv" 6 | "testing" 7 | 8 | "github.com/docker-library/meta-scripts/om" 9 | ) 10 | 11 | func BenchmarkSet(b *testing.B) { 12 | var m om.OrderedMap[int] 13 | for i := 0; i < b.N; i++ { 14 | m.Set(strconv.Itoa(i), i) 15 | } 16 | } 17 | 18 | func assert[V comparable](t *testing.T, v V, expected V) { 19 | t.Helper() 20 | if v != expected { 21 | t.Fatalf("expected %v, got %v", expected, v) 22 | } 23 | } 24 | 25 | func assertJSON[V any](t *testing.T, v V, expected string) { 26 | t.Helper() 27 | b, err := json.Marshal(v) 28 | assert(t, err, nil) 29 | assert(t, string(b), expected) 30 | } 31 | 32 | func TestOrderedMapSet(t *testing.T) { 33 | var m om.OrderedMap[string] 34 | assertJSON(t, m, `{}`) 35 | m.Set("c", "a") 36 | assert(t, m.Get("c"), "a") 37 | assert(t, m.Get("b"), "") 38 | assertJSON(t, m, `{"c":"a"}`) 39 | m.Set("b", "b") 40 | assertJSON(t, m, `{"c":"a","b":"b"}`) 41 | m.Set("a", "c") 42 | assertJSON(t, m, `{"c":"a","b":"b","a":"c"}`) 43 | m.Set("c", "d") 44 | assert(t, m.Get("c"), "d") 45 | assertJSON(t, m, `{"c":"d","b":"b","a":"c"}`) 46 | keys := m.Keys() 47 | assert(t, len(keys), 3) 48 | assert(t, keys[0], "c") 49 | assert(t, keys[1], "b") 50 | assert(t, keys[2], "a") 51 | keys[0] = "d" // make sure the result of .Keys cannot modify the original 52 | keys = m.Keys() 53 | assert(t, keys[0], "c") 54 | } 55 | 56 | func TestOrderedMapUnmarshal(t *testing.T) { 57 | var m om.OrderedMap[string] 58 | assert(t, json.Unmarshal([]byte(`{}`), &m), nil) 59 | assertJSON(t, m, `{}`) 60 | assert(t, json.Unmarshal([]byte(`{ "foo" : "bar" }`), &m), nil) 61 | assertJSON(t, m, `{"foo":"bar"}`) 62 | assert(t, json.Unmarshal([]byte(`{ "baz" : "buzz" }`), &m), nil) 63 | assertJSON(t, m, `{"foo":"bar","baz":"buzz"}`) 64 | assert(t, json.Unmarshal([]byte(`{ "foo" : "foo" }`), &m), nil) 65 | assertJSON(t, m, `{"foo":"foo","baz":"buzz"}`) 66 | } 67 | 68 | func TestOrderedMapUnmarshalDupes(t *testing.T) { 69 | var m om.OrderedMap[string] 70 | assert(t, json.Unmarshal([]byte(`{ "foo":"foo", "bar":"bar", "foo":"baz" }`), &m), nil) 71 | assertJSON(t, m, `{"foo":"baz","bar":"bar"}`) 72 | } 73 | -------------------------------------------------------------------------------- /provenance.jq: -------------------------------------------------------------------------------- 1 | # input: "build" object with platform and image digest 2 | # $github: "github" context; CONTAINS SENSITIVE INFORMATION (https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/accessing-contextual-information-about-workflow-runs#github-context) 3 | # $runner: "runner" context; https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/accessing-contextual-information-about-workflow-runs#runner-context 4 | # $digest: the OCI image digest for the just-built image (normally in .build.resolved.annotations["org.opencontainers.image.ref.name"] but only post-push/regeneration and we haven't pushed yet) 5 | # 6 | # output: in-toto provenance statement (https://slsa.dev/spec/v1.0/provenance) 7 | # see also: https://github.com/actions/buildtypes/tree/main/workflow/v1 8 | def github_actions_provenance($github; $runner; $digest): 9 | if $github.event_name != "workflow_dispatch" then error("error: '\($github.event_name)' is not a supported event type for provenance generation") else 10 | { 11 | _type: "https://in-toto.io/Statement/v1", 12 | subject: [ 13 | ($digest | split(":")) as $splitDigest 14 | | (.source.arches[.build.arch].platformString) as $platform 15 | | ( 16 | .source.arches[.build.arch].tags[], 17 | .source.arches[.build.arch].archTags[], 18 | .build.img, 19 | empty # trailing comma 20 | ) 21 | | { 22 | # https://github.com/package-url/purl-spec/blob/b33dda1cf4515efa8eabbbe8e9b140950805f845/PURL-TYPES.rst#docker (this matches what BuildKit generates as of 2024-09-18; "oci" would also be a reasonable choice, but would require signer and policy changes to support, and be more complex to generate accurately) 23 | name: "pkg:docker/\(.)?platform=\($platform | @uri)", 24 | digest: { ($splitDigest[0]): $splitDigest[1] }, 25 | } 26 | ], 27 | predicateType: "https://slsa.dev/provenance/v1", 28 | predicate: { 29 | buildDefinition: { 30 | buildType: "https://actions.github.io/buildtypes/workflow/v1", 31 | externalParameters: { 32 | workflow: { 33 | # TODO this matches how this is documented/suggested in GitHub's buildType documentation, but does not account for the workflow file being in a separate repository at a separate ref from the "source" (which the "workflow_ref" field *does* account for), so that would/will change how we need to calculate these values if we ever do that (something like "^(?[^/]+/[^/]+)/(?.*)@(?refs/.*)$" on $github.workflow_ref ?) 34 | ref: $github.ref, 35 | repository: ($github.server_url + "/" + $github.repository), 36 | path: ( 37 | $github.workflow_ref 38 | | ltrimstr($github.repository + "/") 39 | | rtrimstr("@" + $github.ref) 40 | | if contains("@") then error("parsing 'workflow_ref' failed: '\(.)'") else . end 41 | ), 42 | # not required, but useful/important (and potentially but unlikely different from $github.sha used in resolvedDependencies below): 43 | digest: { gitCommit: $github.workflow_sha }, 44 | }, 45 | inputs: $github.event.inputs, # https://docs.github.com/en/webhooks/webhook-events-and-payloads#workflow_dispatch 46 | }, 47 | internalParameters: { 48 | github: { 49 | event_name: $github.event_name, 50 | repository_id: $github.repository_id, 51 | repository_owner_id: $github.repository_owner_id, 52 | runner_environment: $runner.environment, 53 | }, 54 | }, 55 | resolvedDependencies: [ 56 | { 57 | uri: "git+\($github.server_url)/\($github.repository)@\($github.ref)", 58 | digest: { "gitCommit": $github.sha }, 59 | }, 60 | # TODO figure out a way to include resolved action SHAs from "uses:" expressions 61 | # TODO include more resolved dependencies 62 | empty # tailing comma 63 | ], 64 | }, 65 | runDetails: { 66 | # builder.id identifies the transitive closure of the trusted build platform evalution. 67 | # any changes that alter security properties or build level must update this ID and rotate the signing key. 68 | # https://slsa.dev/spec/v1.0/provenance#builder 69 | builder: { 70 | id: ($github.server_url + "/" + $github.workflow_ref), 71 | }, 72 | metadata: { 73 | invocationId: ($github.server_url + "/" + $github.repository + "/actions/runs/" + $github.run_id + "/attempts/" + $github.run_attempt) 74 | }, 75 | }, 76 | }, 77 | } 78 | end 79 | ; 80 | -------------------------------------------------------------------------------- /registry/annotations.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | const ( 4 | AnnotationBashbrewArch = "com.docker.official-images.bashbrew.arch" 5 | 6 | // https://github.com/moby/buildkit/blob/c6145c2423de48f891862ac02f9b2653864d3c9e/docs/attestations/attestation-storage.md 7 | annotationBuildkitReferenceType = "vnd.docker.reference.type" 8 | annotationBuildkitReferenceTypeAttestation = "attestation-manifest" 9 | annotationBuildkitReferenceDigest = "vnd.docker.reference.digest" 10 | 11 | // https://github.com/distribution/distribution/blob/v3.0.0/docs/content/spec/manifest-v2-2.md 12 | mediaTypeDockerManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" 13 | mediaTypeDockerImageManifest = "application/vnd.docker.distribution.manifest.v2+json" 14 | mediaTypeDockerImageConfig = "application/vnd.docker.container.image.v1+json" 15 | ) 16 | -------------------------------------------------------------------------------- /registry/client.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/url" 7 | "os" 8 | "slices" 9 | "strings" 10 | "sync" 11 | 12 | "cuelabs.dev/go/oci/ociregistry" 13 | "cuelabs.dev/go/oci/ociregistry/ociauth" 14 | "cuelabs.dev/go/oci/ociregistry/ociclient" 15 | ) 16 | 17 | // returns an [ociregistry.Interface] that automatically implements an in-memory cache (see [RegistryCache]) *and* transparent rate limiting + retry (see [registryRateLimiters]/[rateLimitedRetryingDoer]) / `DOCKERHUB_PUBLIC_PROXY` support for Docker Hub (cached such that multiple calls for the same registry transparently return the same client object / in-memory registry cache) 18 | func Client(host string, opts *ociclient.Options) (ociregistry.Interface, error) { 19 | f, _ := clientCache.LoadOrStore(host, sync.OnceValues(func() (ociregistry.Interface, error) { 20 | authConfig, err := authConfigFunc() 21 | if err != nil { 22 | return nil, err 23 | } 24 | 25 | var clientOptions ociclient.Options 26 | if opts != nil { 27 | clientOptions = *opts 28 | } 29 | if clientOptions.Transport == nil { 30 | clientOptions.Transport = http.DefaultTransport 31 | } 32 | 33 | // make sure we set User-Agent explicitly; this is first so that everything else has an explicit layer at the bottom setting User-Agent so we don't miss any requests 34 | // IMPORTANT: this wrapper stays first! (https://github.com/cue-labs/oci/issues/37#issuecomment-2628321222) 35 | clientOptions.Transport = &userAgentRoundTripper{ 36 | roundTripper: clientOptions.Transport, 37 | userAgent: "https://github.com/docker-library/meta-scripts", // TODO allow this to be modified via environment variable 38 | } 39 | 40 | // if we have a rate limiter configured for this registry, shim it in 41 | if limiter, ok := registryRateLimiters[host]; ok { 42 | clientOptions.Transport = &rateLimitedRetryingRoundTripper{ 43 | roundTripper: clientOptions.Transport, 44 | limiter: limiter, 45 | } 46 | } 47 | 48 | // install the "authorization" wrapper/shim 49 | clientOptions.Transport = ociauth.NewStdTransport(ociauth.StdTransportParams{ 50 | Config: authConfig, 51 | Transport: clientOptions.Transport, 52 | }) 53 | 54 | connectHost := host 55 | if host == dockerHubCanonical { 56 | connectHost = dockerHubConnect 57 | } else if host == "localhost" || strings.HasPrefix(host, "localhost:") { 58 | // assume localhost means HTTP 59 | clientOptions.Insecure = true 60 | // TODO some way for callers to specify that their "localhost" *does* require TLS (maybe only do this if `opts == nil`, but then users cannot supply *any* options and still get help setting Insecure for localhost 🤔 -- at least this is a more narrow use case than the opposite of not having a way to have non-localhost insecure registries) 61 | } 62 | 63 | hostOptions := clientOptions // make a copy, since "ociclient.New" mutates it (such that sharing the object afterwards probably isn't the best idea -- they'll have the same DebugID if so, which isn't ideal) 64 | client, err := ociclient.New(connectHost, &hostOptions) 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | if host == dockerHubCanonical { 70 | var proxyHost string 71 | proxyOptions := clientOptions 72 | if proxy := os.Getenv("DOCKERHUB_PUBLIC_PROXY"); proxy != "" { 73 | proxyUrl, err := url.Parse(proxy) 74 | if err != nil { 75 | return nil, fmt.Errorf("error parsing DOCKERHUB_PUBLIC_PROXY: %w", err) 76 | } 77 | if proxyUrl.Host == "" { 78 | return nil, fmt.Errorf("DOCKERHUB_PUBLIC_PROXY was set, but has no host") 79 | } 80 | proxyHost = proxyUrl.Host 81 | switch proxyUrl.Scheme { 82 | case "", "https": 83 | proxyOptions.Insecure = false 84 | case "http": 85 | proxyOptions.Insecure = true 86 | default: 87 | return nil, fmt.Errorf("unknown DOCKERHUB_PUBLIC_PROXY scheme: %q", proxyUrl.Scheme) 88 | } 89 | switch proxyUrl.Path { 90 | case "", "/": 91 | // do nothing, this is fine 92 | default: 93 | return nil, fmt.Errorf("unsupported DOCKERHUB_PUBLIC_PROXY (with path)") 94 | } 95 | // TODO complain about other URL bits (unsupported by "ociclient" except via custom "RoundTripper") 96 | } else if proxy := os.Getenv("DOCKERHUB_PUBLIC_PROXY_HOST"); proxy != "" { 97 | proxyHost = proxy 98 | } 99 | if proxyHost != "" { 100 | proxyClient, err := ociclient.New(proxyHost, &proxyOptions) 101 | if err != nil { 102 | return nil, err 103 | } 104 | 105 | // see https://github.com/cue-labs/oci/blob/8cd71b4d542c55ae2ab515d4a0408ffafe41b549/ociregistry/ocifilter/readonly.go#L22 for the inspiration of this amazing hack (DOCKERHUB_PUBLIC_PROXY is designed for *only* reading content, but is a "pure" mirror in that even a 404 from the proxy is considered authoritative / accurate) 106 | // TODO *technically*, a non-404/429 error from the proxy should probably result in a fallback to Docker Hub, but this should be fine for now 107 | type deeper struct { 108 | // "If you're writing your own implementation of Funcs, you'll need to embed a *Funcs value to get an implementation of the private method. This means that it will be possible to add members to Interface in the future without breaking compatibility." 109 | *ociregistry.Funcs 110 | } 111 | client = struct { 112 | // see also https://pkg.go.dev/cuelabs.dev/go/oci/ociregistry#Interface 113 | ociregistry.Reader 114 | ociregistry.Lister 115 | ociregistry.Writer 116 | ociregistry.Deleter 117 | deeper // "One level deeper so the Reader and Lister values take precedence, following Go's shallower-method-wins rules." 118 | }{ 119 | Reader: proxyClient, 120 | Lister: proxyClient, 121 | Writer: client, 122 | Deleter: client, 123 | } 124 | } 125 | } 126 | 127 | // make sure this registry gets a dedicated in-memory cache (so we never look up the same repo@digest or repo:tag twice for the lifetime of our program) 128 | client = RegistryCache(client) 129 | // TODO some way for callers of this to *not* get a RegistryCache instance? (or to provide options to the one we create -- see TODO on RegistryCache constructor function) 130 | 131 | return client, nil 132 | })) 133 | return f.(func() (ociregistry.Interface, error))() 134 | } 135 | 136 | type dockerAuthConfigWrapper struct { 137 | ociauth.Config 138 | } 139 | 140 | // for Docker Hub, display should be docker.io, auth should be index.docker.io, and requests should be registry-1.docker.io (thanks to a lot of mostly uninteresting historical facts), so this hacks up ociauth to handle that case more cleanly by wrapping the actual auth config (see "dockerHubHosts" map) 141 | func (c dockerAuthConfigWrapper) EntryForRegistry(host string) (ociauth.ConfigEntry, error) { 142 | var zero ociauth.ConfigEntry // "EntryForRegistry" doesn't return an error on a miss - it just returns an empty object (so we create this to have something to trivially compare against for our fallback) 143 | if entry, err := c.Config.EntryForRegistry(host); err == nil && entry != zero { 144 | return entry, err 145 | } else if dockerHubHosts[host] { 146 | // https://github.com/docker-library/meta-scripts/pull/32#issuecomment-2018950756 (TODO hopefully someday we can replace this with something like `iter.Sorted(maps.Keys(dockerHubHosts))`; https://github.com/golang/go/issues/61900) 147 | keys := make([]string, 0, len(dockerHubHosts)) 148 | for k := range dockerHubHosts { 149 | keys = append(keys, k) 150 | } 151 | slices.Sort(keys) 152 | for _, dockerHubHost := range keys { 153 | if dockerHubHost == "" { 154 | continue 155 | } 156 | if entry, err := c.Config.EntryForRegistry(dockerHubHost); err == nil && entry != zero { 157 | return entry, err 158 | } 159 | } 160 | return entry, err 161 | } else { 162 | return entry, err 163 | } 164 | } 165 | 166 | var ( 167 | authConfigFunc = sync.OnceValues(func() (ociauth.Config, error) { 168 | config, err := ociauth.Load(nil) 169 | if err != nil { 170 | return nil, fmt.Errorf("cannot load auth configuration: %w", err) 171 | } 172 | return dockerAuthConfigWrapper{config}, nil 173 | }) 174 | clientCache = sync.Map{} // "(normalized) host" => OnceValues() => ociregistry.Interface, error 175 | ) 176 | -------------------------------------------------------------------------------- /registry/docker-hub.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | const ( 4 | dockerHubCanonical = "docker.io" 5 | dockerHubConnect = "registry-1.docker.io" // https://github.com/moby/moby/blob/bf053be997f87af233919a76e6ecbd7d17390e62/registry/config.go#L42 6 | ) 7 | 8 | var ( 9 | dockerHubHosts = map[string]bool{ 10 | // both dockerHub values above should be listed here (not using variables so this list stays prettier and so we don't miss any if dockerHubConnect changes in the future) 11 | "": true, 12 | "docker.io": true, 13 | "index.docker.io": true, 14 | "registry-1.docker.io": true, 15 | "registry.hub.docker.com": true, 16 | } 17 | ) 18 | 19 | // see also "rate-limits.go" for per-registry rate limits (of which Docker Hub is the primary use case) 20 | -------------------------------------------------------------------------------- /registry/lookup.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | 8 | "cuelabs.dev/go/oci/ociregistry" 9 | "cuelabs.dev/go/oci/ociregistry/ocimem" 10 | ) 11 | 12 | // see `LookupType*` consts for possible values for this type 13 | type LookupType string 14 | 15 | const ( 16 | LookupTypeManifest LookupType = "manifest" 17 | LookupTypeBlob LookupType = "blob" 18 | ) 19 | 20 | type LookupOptions struct { 21 | // unspecified implies [LookupTypeManifest] 22 | Type LookupType 23 | 24 | // whether or not to do a HEAD instead of a GET (will still return an [ociregistry.BlobReader], but with an empty body / zero bytes) 25 | Head bool 26 | 27 | // TODO allow providing a Descriptor here for more validation and/or for automatic usage of any usable/valid Data field? 28 | // TODO (also, if the provided Reference includes a Digest, we should probably validate it? are there cases where we don't want to / shouldn't?) 29 | } 30 | 31 | // a wrapper around [ociregistry.Interface.GetManifest] (and `GetTag`, `GetBlob`, and the `Resolve*` versions of the above) that accepts a [Reference] and always returns a [ociregistry.BlobReader] (in the case of a HEAD request, it will be a zero-length reader with just a valid descriptor) 32 | func Lookup(ctx context.Context, ref Reference, opts *LookupOptions) (ociregistry.BlobReader, error) { 33 | client, err := Client(ref.Host, nil) 34 | if err != nil { 35 | return nil, fmt.Errorf("%s: failed getting client: %w", ref, err) 36 | } 37 | 38 | var o LookupOptions 39 | if opts != nil { 40 | o = *opts 41 | } 42 | 43 | var ( 44 | r ociregistry.BlobReader 45 | desc ociregistry.Descriptor 46 | ) 47 | switch o.Type { 48 | case LookupTypeManifest, "": 49 | if ref.Digest != "" { 50 | if o.Head { 51 | desc, err = client.ResolveManifest(ctx, ref.Repository, ref.Digest) 52 | } else { 53 | r, err = client.GetManifest(ctx, ref.Repository, ref.Digest) 54 | } 55 | } else { 56 | tag := ref.Tag 57 | if tag == "" { 58 | tag = "latest" 59 | } 60 | if o.Head { 61 | desc, err = client.ResolveTag(ctx, ref.Repository, tag) 62 | } else { 63 | r, err = client.GetTag(ctx, ref.Repository, tag) 64 | } 65 | } 66 | 67 | case LookupTypeBlob: 68 | // TODO error if Digest == "" ? (ociclient already does for us, so we can probably just pass it through here without much worry) 69 | if o.Head { 70 | desc, err = client.ResolveBlob(ctx, ref.Repository, ref.Digest) 71 | } else { 72 | r, err = client.GetBlob(ctx, ref.Repository, ref.Digest) 73 | } 74 | 75 | default: 76 | return nil, fmt.Errorf("unknown LookupType: %q", o.Type) 77 | } 78 | 79 | // normalize 404 and 404-like to nil return (so it's easier to detect) 80 | if err != nil { 81 | if errors.Is(err, ociregistry.ErrBlobUnknown) || 82 | errors.Is(err, ociregistry.ErrManifestUnknown) || 83 | errors.Is(err, ociregistry.ErrNameUnknown) { 84 | // obvious 404 cases 85 | return nil, nil 86 | } 87 | var httpErr ociregistry.HTTPError 88 | if errors.As(err, &httpErr) && (httpErr.StatusCode() == 404 || 89 | // 401 often means "repository not found" (due to the nature of public/private mixing on Hub and the fact that ociauth definitely handled any possible authentication for us, so if we're still getting 401 it's unavoidable and might as well be 404, and 403 because getting 401 is actually a server bug that ociclient/ociauth works around for us in https://github.com/cue-labs/oci/commit/7eb5fc60a0e025038cd64d7f5df0a461136d5e9b) 90 | httpErr.StatusCode() == 401 || httpErr.StatusCode() == 403) { 91 | return nil, nil 92 | } 93 | return r, err 94 | } 95 | 96 | if o.Head { 97 | r = ocimem.NewBytesReader(nil, desc) 98 | } 99 | 100 | return r, err 101 | } 102 | -------------------------------------------------------------------------------- /registry/manifest-children.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | ocispec "github.com/opencontainers/image-spec/specs-go/v1" 7 | ) 8 | 9 | type ManifestChildren struct { 10 | // *technically* this should be two separate structs chosen based on mediaType (https://github.com/opencontainers/distribution-spec/security/advisories/GHSA-mc8v-mgrf-8f4m), but that makes the code a lot more annoying when we're just collecting a list of potential children we need to copy over for the parent object to push successfully 11 | 12 | // intentional subset of https://github.com/opencontainers/image-spec/blob/v1.1.0/specs-go/v1/index.go#L21 to minimize parsing 13 | Manifests []ocispec.Descriptor `json:"manifests"` 14 | 15 | // intentional subset of https://github.com/opencontainers/image-spec/blob/v1.1.0/specs-go/v1/manifest.go#L20 to minimize parsing 16 | Config *ocispec.Descriptor `json:"config"` // have to turn this into a pointer so we can recognize when it's not set easier / more correctly 17 | Layers []ocispec.Descriptor `json:"layers"` 18 | } 19 | 20 | // opportunistically parse a given manifest for any *potential* child objects; will return JSON parsing errors for non-JSON 21 | func ParseManifestChildren(manifest []byte) (ManifestChildren, error) { 22 | var manifestChildren ManifestChildren 23 | err := json.Unmarshal(manifest, &manifestChildren) 24 | return manifestChildren, err 25 | } 26 | -------------------------------------------------------------------------------- /registry/push.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "maps" 10 | 11 | "cuelabs.dev/go/oci/ociregistry" 12 | godigest "github.com/opencontainers/go-digest" 13 | ocispec "github.com/opencontainers/image-spec/specs-go/v1" 14 | ) 15 | 16 | var ( 17 | // if a blob is more than this many bytes, we'll do a pre-flight HEAD request to verify whether we need to even bother pushing it before we do so (65535 is the theoretical maximum size of a single TCP packet, although MTU means it's usually closer to 1448 bytes, but this seemed like a sane place to draw a line to where a second request that might fail is worth our time) 18 | BlobSizeWorthHEAD = int64(65535) 19 | ) 20 | 21 | // this makes sure the given manifest (index or image) is available at the provided name (tag or digest), including copying any children (manifests or config+layers) if necessary and able (via the provided child lookup map) 22 | func EnsureManifest(ctx context.Context, ref Reference, manifest json.RawMessage, mediaType string, childRefs map[ociregistry.Digest]Reference) (ociregistry.Descriptor, error) { 23 | desc := ociregistry.Descriptor{ 24 | MediaType: mediaType, 25 | Digest: godigest.FromBytes(manifest), 26 | Size: int64(len(manifest)), 27 | } 28 | if ref.Digest != "" { 29 | if ref.Digest != desc.Digest { 30 | return desc, fmt.Errorf("%s: digest mismatch: %s", ref, desc.Digest) 31 | } 32 | } else if ref.Tag == "" { 33 | ref.Digest = desc.Digest 34 | } 35 | 36 | if _, ok := childRefs[""]; !ok { 37 | // empty digest is a "fallback" ref for where missing children might be found (if we don't have one, inject one) 38 | childRefs[""] = ref 39 | } 40 | 41 | client, err := Client(ref.Host, nil) 42 | if err != nil { 43 | return desc, fmt.Errorf("%s: failed getting client: %w", ref, err) 44 | } 45 | 46 | // try HEAD request before pushing 47 | // if it matches, then we can assume child objects exist as well 48 | headRef := ref 49 | if headRef.Tag != "" { 50 | // if this function is called with *both* tag *and* digest, the code below works correctly and pushes by tag and then validates by digest, but this lookup specifically will prefer the digest instead and skip when it shouldn't 51 | headRef.Digest = "" 52 | } 53 | r, err := Lookup(ctx, headRef, &LookupOptions{Head: true}) 54 | if err != nil { 55 | return desc, fmt.Errorf("%s: failed HEAD: %w", ref, err) 56 | } 57 | // TODO if we had some kind of progress interface, this would be a great place for some kind of debug log of head's contents 58 | if r != nil { 59 | head := r.Descriptor() 60 | r.Close() 61 | if head.Digest == desc.Digest && head.Size == desc.Size { 62 | return head, nil 63 | } 64 | } 65 | 66 | // since we need to potentially retry this call after copying/mounting children, let's wrap it up for ease of use 67 | pushManifest := func() (ociregistry.Descriptor, error) { 68 | return client.PushManifest(ctx, ref.Repository, ref.Tag, manifest, mediaType) 69 | } 70 | rDesc, err := pushManifest() 71 | if err != nil { 72 | var httpErr ociregistry.HTTPError 73 | if errors.Is(err, ociregistry.ErrManifestBlobUnknown) || 74 | errors.Is(err, ociregistry.ErrBlobUnknown) || 75 | (errors.As(err, &httpErr) && httpErr.StatusCode() >= 400 && httpErr.StatusCode() <= 499) { 76 | // this probably means we need to push some child manifests and/or mount missing blobs (and then retry the manifest push) 77 | manifestChildren, err := ParseManifestChildren(manifest) 78 | if err != nil { 79 | return desc, fmt.Errorf("%s: failed parsing manifest JSON: %w", ref, err) 80 | } 81 | 82 | childToRefs := func(child ocispec.Descriptor) (Reference, Reference) { 83 | childTargetRef := Reference{ 84 | Host: ref.Host, 85 | Repository: ref.Repository, 86 | Digest: child.Digest, 87 | } 88 | childRef, ok := childRefs[child.Digest] 89 | if !ok { 90 | childRef = childRefs[""] 91 | } 92 | childRef.Tag = "" 93 | childRef.Digest = child.Digest 94 | return childRef, childTargetRef 95 | } 96 | 97 | for _, child := range manifestChildren.Manifests { 98 | childRef, childTargetRef := childToRefs(child) 99 | r, err := Lookup(ctx, childRef, nil) 100 | if err != nil { 101 | return desc, fmt.Errorf("%s: manifest lookup failed: %w", childRef, err) 102 | } 103 | if r == nil { 104 | return desc, fmt.Errorf("%s: manifest not found", childRef) 105 | } 106 | //defer r.Close() 107 | // TODO validate r.Descriptor ? 108 | // TODO use readHelperRaw here (maybe a new "readHelperAll" wrapper too?) 109 | b, err := io.ReadAll(r) 110 | if err != nil { 111 | r.Close() 112 | return desc, fmt.Errorf("%s: ReadAll of GetManifest failed: %w", childRef, err) 113 | } 114 | if err := r.Close(); err != nil { 115 | return desc, fmt.Errorf("%s: Close of GetManifest failed: %w", childRef, err) 116 | } 117 | grandchildRefs := maps.Clone(childRefs) 118 | grandchildRefs[""] = childRef // make the child's ref explicitly the "fallback" ref for any of its children 119 | if _, err := EnsureManifest(ctx, childTargetRef, b, child.MediaType, grandchildRefs); err != nil { 120 | return desc, fmt.Errorf("%s: EnsureManifest failed: %w", ref, err) 121 | } 122 | // TODO validate descriptor from EnsureManifest? (at the very least, Digest and Size) 123 | } 124 | 125 | var childBlobs []ocispec.Descriptor 126 | if manifestChildren.Config != nil { 127 | childBlobs = append(childBlobs, *manifestChildren.Config) 128 | } 129 | childBlobs = append(childBlobs, manifestChildren.Layers...) 130 | for _, child := range childBlobs { 131 | childRef, childTargetRef := childToRefs(child) 132 | // TODO if blob sets URLs, don't bother (foreign layer) -- maybe check for those MediaTypes explicitly? (not a high priority as they're no longer used and officially discouraged/deprecated; would only matter if Tianon wants to use this for "hell/win" too 👀) 133 | if _, err := CopyBlob(ctx, childRef, childTargetRef); err != nil { 134 | return desc, fmt.Errorf("%s: CopyBlob(%s) failed: %w", childTargetRef, childRef, err) 135 | } 136 | // TODO validate CopyBlob returned descriptor? (at the very least, Digest and Size) 137 | } 138 | 139 | rDesc, err = pushManifest() 140 | if err != nil { 141 | return desc, fmt.Errorf("%s: PushManifest failed: %w", ref, err) 142 | } 143 | } else { 144 | return desc, fmt.Errorf("%s: error pushing (does not appear to be missing manifest/blob related): %w", ref, err) 145 | } 146 | } 147 | // TODO validate MediaType and Size too? 🤷 148 | if rDesc.Digest != desc.Digest { 149 | return desc, fmt.Errorf("%s: pushed digest from registry (%s) does not match expected digest (%s)", ref, rDesc.Digest, desc.Digest) 150 | } 151 | return desc, nil 152 | } 153 | 154 | // this copies a manifest (index or image) and all child objects (manifests or config+layers) from one name to another 155 | func CopyManifest(ctx context.Context, srcRef, dstRef Reference, childRefs map[ociregistry.Digest]Reference) (ociregistry.Descriptor, error) { 156 | var desc ociregistry.Descriptor 157 | 158 | // wouldn't it be nice if MountBlob for manifests was a thing? 🥺 159 | r, err := Lookup(ctx, srcRef, nil) 160 | if err != nil { 161 | return desc, fmt.Errorf("%s: lookup failed: %w", srcRef, err) 162 | } 163 | if r == nil { 164 | return desc, fmt.Errorf("%s: manifest not found", srcRef) 165 | } 166 | defer r.Close() 167 | desc = r.Descriptor() 168 | 169 | manifest, err := io.ReadAll(r) 170 | if err != nil { 171 | return desc, fmt.Errorf("%s: reading manifest failed: %w", srcRef, err) 172 | } 173 | 174 | if _, ok := childRefs[""]; !ok { 175 | // if we don't have a fallback, set it to src 176 | childRefs[""] = srcRef 177 | } 178 | 179 | return EnsureManifest(ctx, dstRef, manifest, desc.MediaType, childRefs) 180 | } 181 | 182 | // this takes an [io.Reader] of content and makes sure it is available as a blob in the given repository+digest (if larger than [BlobSizeWorthHEAD], this might return without consuming any of the provided [io.Reader]) 183 | func EnsureBlob(ctx context.Context, ref Reference, size int64, content io.Reader) (ociregistry.Descriptor, error) { 184 | desc := ociregistry.Descriptor{ 185 | Digest: ref.Digest, 186 | Size: size, 187 | } 188 | 189 | if ref.Digest == "" { 190 | return desc, fmt.Errorf("%s: blobs must be pushed by digest", ref) 191 | } 192 | if ref.Tag != "" { 193 | return desc, fmt.Errorf("%s: blobs cannot have tags", ref) 194 | } 195 | 196 | if desc.Size > BlobSizeWorthHEAD { 197 | r, err := Lookup(ctx, ref, &LookupOptions{Type: LookupTypeBlob, Head: true}) 198 | if err != nil { 199 | return desc, fmt.Errorf("%s: failed HEAD: %w", ref, err) 200 | } 201 | // TODO if we had some kind of progress interface, this would be a great place for some kind of debug log of head's contents 202 | if r != nil { 203 | head := r.Descriptor() 204 | r.Close() 205 | if head.Digest == desc.Digest && head.Size == desc.Size { 206 | return head, nil 207 | } 208 | } 209 | } 210 | 211 | client, err := Client(ref.Host, nil) 212 | if err != nil { 213 | return desc, fmt.Errorf("%s: error getting Client: %w", ref, err) 214 | } 215 | 216 | return client.PushBlob(ctx, ref.Repository, desc, content) 217 | } 218 | 219 | // this copies a blob from one repository to another 220 | func CopyBlob(ctx context.Context, srcRef, dstRef Reference) (ociregistry.Descriptor, error) { 221 | var desc ociregistry.Descriptor 222 | 223 | if srcRef.Digest == "" { 224 | return desc, fmt.Errorf("%s: missing digest (cannot copy blob without digest)", srcRef) 225 | } else if !(dstRef.Digest == "" || dstRef.Digest == srcRef.Digest) { 226 | return desc, fmt.Errorf("%s: digest mismatch in copy: %s", dstRef, srcRef) 227 | } else { 228 | dstRef.Digest = srcRef.Digest 229 | } 230 | if srcRef.Tag != "" { 231 | return desc, fmt.Errorf("%s: blobs cannot have tags", srcRef) 232 | } else if dstRef.Tag != "" { 233 | return desc, fmt.Errorf("%s: blobs cannot have tags", dstRef) 234 | } 235 | 236 | if srcRef.Host == dstRef.Host { 237 | client, err := Client(srcRef.Host, nil) 238 | if err != nil { 239 | return desc, fmt.Errorf("%s: error getting Client: %w", srcRef, err) 240 | } 241 | return client.MountBlob(ctx, srcRef.Repository, dstRef.Repository, srcRef.Digest) 242 | } 243 | 244 | // TODO Push/Reader progress / progresswriter concerns again 😭 245 | 246 | r, err := Lookup(ctx, srcRef, &LookupOptions{Type: LookupTypeBlob}) 247 | if err != nil { 248 | return desc, fmt.Errorf("%s: blob lookup failed: %w", srcRef, err) 249 | } 250 | if r == nil { 251 | return desc, fmt.Errorf("%s: blob not found", srcRef) 252 | } 253 | defer r.Close() 254 | desc = r.Descriptor() 255 | 256 | if dstRef.Digest != desc.Digest { 257 | return desc, fmt.Errorf("%s: registry digest mismatch: %s (%s)", dstRef, desc.Digest, srcRef) 258 | } 259 | 260 | if _, err := EnsureBlob(ctx, dstRef, desc.Size, r); err != nil { 261 | return desc, fmt.Errorf("%s: EnsureBlob(%s) failed: %w", dstRef, srcRef, err) 262 | } 263 | // TODO validate returned descriptor? (at least digest/size) 264 | 265 | if err := r.Close(); err != nil { 266 | return desc, fmt.Errorf("%s: Close of GetBlob(%s) failed: %w", dstRef, srcRef, err) 267 | } 268 | 269 | return desc, nil 270 | } 271 | -------------------------------------------------------------------------------- /registry/rate-limits.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "net/http" 5 | "slices" 6 | "time" 7 | 8 | "golang.org/x/time/rate" 9 | ) 10 | 11 | var ( 12 | registryRateLimiters = map[string]*rate.Limiter{ 13 | dockerHubCanonical: rate.NewLimiter(300/rate.Limit((1*time.Minute).Seconds()), 300), // stick to at most 300/min in registry/Hub requests (and allow an immediate burst of 300) 14 | } 15 | ) 16 | 17 | // an implementation of [net/http.RoundTripper] that transparently adds a total requests rate limit and 429-retrying behavior 18 | type rateLimitedRetryingRoundTripper struct { 19 | roundTripper http.RoundTripper 20 | limiter *rate.Limiter 21 | } 22 | 23 | func (d *rateLimitedRetryingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { 24 | var ( 25 | // cap request retries at once per second 26 | requestRetryLimiter = rate.NewLimiter(rate.Every(time.Second), 1) 27 | 28 | // if we see 50x three times during retry, we should bail 29 | maxTry50X = 3 30 | 31 | ctx = req.Context() 32 | ) 33 | for { 34 | if err := requestRetryLimiter.Wait(ctx); err != nil { 35 | return nil, err 36 | } 37 | if err := d.limiter.Wait(ctx); err != nil { 38 | return nil, err 39 | } 40 | 41 | // in theory, this RoundTripper we're invoking should close req.Body (per the RoundTripper contract), so we shouldn't have to 🤞 42 | res, err := d.roundTripper.RoundTrip(req) 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | doRetry := false 48 | 49 | if res.StatusCode == 429 { 50 | // just eat all available tokens and starve out the rate limiter (any 429 means we need to slow down, so our whole "bucket" is shot) 51 | for i := d.limiter.Tokens(); i > 0; i-- { 52 | _ = d.limiter.Allow() 53 | } 54 | doRetry = true // TODO maximum number of retries? (perhaps a deadline instead? req.WithContext to inject a deadline? 👀) 55 | } 56 | 57 | // certain status codes should result in a few auto-retries (especially with the automatic retry delay this injects), but up to a limit so we don't contribute to the "thundering herd" too much in a serious outage 58 | if maxTry50X > 1 && slices.Contains([]int{500, 502, 503, 504}, res.StatusCode) { 59 | maxTry50X-- 60 | doRetry = true 61 | // no need to eat up the rate limiter tokens as we do for 429 because this is not a rate limiting error (and we have the "requestRetryLimiter" that separately limits our retries of *this* request) 62 | } 63 | 64 | if doRetry { 65 | // satisfy the big scary warnings on https://pkg.go.dev/net/http#RoundTripper and https://pkg.go.dev/net/http#Client.Do about the downsides of failing to Close the response body 66 | if err := res.Body.Close(); err != nil { 67 | return nil, err 68 | } 69 | 70 | // https://pkg.go.dev/net/http#RoundTripper 71 | // "RoundTrip should not modify the request, except for consuming and closing the Request's Body." 72 | if req.Body != nil { 73 | req.Body.Close() 74 | } 75 | req = req.Clone(ctx) 76 | if req.GetBody != nil { 77 | var err error 78 | req.Body, err = req.GetBody() 79 | if err != nil { 80 | return nil, err 81 | } 82 | } 83 | 84 | // TODO some way to notify upwards that we retried? 85 | // TODO implement more backoff logic than just one retry per second + docker hub rate limit (+ limited 50X retry)? 86 | continue 87 | } 88 | 89 | return res, nil 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /registry/read-helpers.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "unicode" 8 | 9 | "cuelabs.dev/go/oci/ociregistry" 10 | ) 11 | 12 | // reads a JSON object from the given [ociregistry.BlobReader], but also validating the [ociregistry.Descriptor.Digest] and [ociregistry.Descriptor.Size] from [ociregistry.BlobReader.Descriptor] (and returning appropriate errors) 13 | // 14 | // TODO split this up for reading raw objects ~safely too? (https://github.com/docker-library/bashbrew/commit/0f3f0042d0da95affb75e250a77100b4ae58832f) -- maybe even a separate `io.Reader`+`Descriptor` interface that doesn't require a BlobReader specifically? 15 | func readJSONHelper(r ociregistry.BlobReader, v interface{}) error { 16 | desc := r.Descriptor() 17 | 18 | // prevent go-digest panics later 19 | if err := desc.Digest.Validate(); err != nil { 20 | return err 21 | } 22 | 23 | // TODO if desc.Data != nil and len() == desc.Size, we should probably check/use that? 👀 24 | 25 | // make sure we can't possibly read (much) more than we're supposed to 26 | limited := &io.LimitedReader{ 27 | R: r, 28 | N: desc.Size + 1, // +1 to allow us to detect if we read too much (see verification below) 29 | } 30 | 31 | // copy all read data into the digest verifier so we can validate afterwards 32 | verifier := desc.Digest.Verifier() 33 | tee := io.TeeReader(limited, verifier) 34 | 35 | // decode directly! (mostly avoids double memory hit for big objects) 36 | // (TODO protect against malicious objects somehow?) 37 | if err := json.NewDecoder(tee).Decode(v); err != nil { 38 | return err 39 | } 40 | 41 | // read anything leftover ... 42 | bs, err := io.ReadAll(tee) 43 | if err != nil { 44 | return err 45 | } 46 | // ... and make sure it was just whitespace, if anything 47 | for _, b := range bs { 48 | if !unicode.IsSpace(rune(b)) { 49 | return fmt.Errorf("unexpected non-whitespace at the end of %q: %+v\n", string(desc.Digest), rune(b)) 50 | } 51 | } 52 | 53 | // now that we know we've read everything, we're safe to close the original reader 54 | if err := r.Close(); err != nil { 55 | return err 56 | } 57 | 58 | // after reading *everything*, we should have exactly one byte left in our LimitedReader (anything else is an error) 59 | if limited.N < 1 { 60 | return fmt.Errorf("size of %q is bigger than it should be (%d)", string(desc.Digest), desc.Size) 61 | } else if limited.N > 1 { 62 | return fmt.Errorf("size of %q is %d bytes smaller than it should be (%d)", string(desc.Digest), limited.N-1, desc.Size) 63 | } 64 | 65 | // and finally, let's verify our checksum 66 | if !verifier.Verified() { 67 | return fmt.Errorf("digest of %q not correct", string(desc.Digest)) 68 | } 69 | 70 | return nil 71 | } 72 | -------------------------------------------------------------------------------- /registry/ref.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "strings" 5 | 6 | // thanks, go-digest... 7 | _ "crypto/sha256" 8 | _ "crypto/sha512" 9 | 10 | "cuelabs.dev/go/oci/ociregistry" 11 | "cuelabs.dev/go/oci/ociregistry/ociref" 12 | ) 13 | 14 | // parse a string ref like `hello-world:latest` directly into a [Reference] object, with Docker Hub canonicalization applied: `docker.io/library/hello-world:latest` 15 | // 16 | // See also [Reference.Normalize] and [ociref.ParseRelative] (which are the underlying implementation details of this method). 17 | func ParseRef(img string) (Reference, error) { 18 | r, err := ociref.ParseRelative(img) 19 | if err != nil { 20 | return Reference{}, err 21 | } 22 | ref := Reference(r) 23 | ref.Normalize() 24 | return ref, nil 25 | } 26 | 27 | // copy ociref.Reference so we can add methods (especially for JSON round-trip, but also Docker-isms like the implied default [Reference.Host] and `library/` prefix for DOI) 28 | type Reference ociref.Reference 29 | 30 | // normalize Docker Hub refs like `hello-world:latest`: `docker.io/library/hello-world:latest` 31 | // 32 | // NOTE: this explicitly does *not* normalize Tag to `:latest` because it's useful to be able to parse a reference and know it did not specify either tag or digest (and `if ref.Tag == "" { ref.Tag = "latest" }` is really trivial code outside this for that case) 33 | func (ref *Reference) Normalize() { 34 | if dockerHubHosts[ref.Host] { 35 | // normalize Docker Hub host value 36 | ref.Host = dockerHubCanonical 37 | // normalize Docker Official Images to library/ prefix 38 | if !strings.Contains(ref.Repository, "/") { 39 | ref.Repository = "library/" + ref.Repository 40 | } 41 | // add an error return and return an error if we have more than one "/" in Repository? probably not worth embedding that many "Hub" implementation details this low (since it'll error appropriately on use of such invalid references anyhow) 42 | } 43 | } 44 | 45 | // like [ociref.Reference.String], but with Docker Hub "denormalization" applied (no explicit `docker.io` host, no `library/` prefix for DOI) 46 | func (ref Reference) String() string { 47 | if ref.Host == dockerHubCanonical { 48 | ref.Host = "" 49 | ref.Repository = strings.TrimPrefix(ref.Repository, "library/") 50 | } 51 | return ociref.Reference(ref).String() 52 | } 53 | 54 | // like [Reference.String], but also stripping a known digest if this object's value matches 55 | func (ref Reference) StringWithKnownDigest(commonDigest ociregistry.Digest) string { 56 | if ref.Digest == commonDigest { 57 | ref.Digest = "" 58 | } 59 | return ref.String() 60 | } 61 | 62 | // implements [encoding.TextMarshaler] (especially for [Reference]-in-JSON) 63 | func (ref Reference) MarshalText() ([]byte, error) { 64 | return []byte(ref.String()), nil 65 | } 66 | 67 | // implements [encoding.TextUnmarshaler] (especially for [Reference]-from-JSON) 68 | func (ref *Reference) UnmarshalText(text []byte) error { 69 | r, err := ParseRef(string(text)) 70 | if err == nil { 71 | *ref = r 72 | } 73 | return err 74 | } 75 | -------------------------------------------------------------------------------- /registry/ref_test.go: -------------------------------------------------------------------------------- 1 | package registry_test 2 | 3 | import ( 4 | "encoding/json" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/docker-library/meta-scripts/registry" 9 | 10 | "cuelabs.dev/go/oci/ociregistry/ociref" 11 | ) 12 | 13 | func toJson(t *testing.T, v any) string { 14 | t.Helper() 15 | b, err := json.Marshal(v) 16 | if err != nil { 17 | t.Fatal("unexpected JSON error", err) 18 | } 19 | return string(b) 20 | } 21 | 22 | func fromJson(t *testing.T, j string, v any) { 23 | t.Helper() 24 | err := json.Unmarshal([]byte(j), v) 25 | if err != nil { 26 | t.Fatal("unexpected JSON error", err) 27 | } 28 | } 29 | 30 | func TestParseRef(t *testing.T) { 31 | t.Parallel() 32 | 33 | for _, o := range []struct { 34 | in string 35 | out string 36 | }{ 37 | {"hello-world:latest", "docker.io/library/hello-world:latest"}, 38 | {"tianon/true:oci", "docker.io/tianon/true:oci"}, 39 | {"docker.io/tianon/true:oci", "docker.io/tianon/true:oci"}, 40 | {"localhost:5000/foo", "localhost:5000/foo"}, 41 | 42 | // Docker Hub edge cases 43 | {"hello-world", "docker.io/library/hello-world"}, 44 | {"library/hello-world", "docker.io/library/hello-world"}, 45 | {"docker.io/hello-world", "docker.io/library/hello-world"}, 46 | {"docker.io/library/hello-world", "docker.io/library/hello-world"}, 47 | {"index.docker.io/library/hello-world", "docker.io/library/hello-world"}, 48 | {"registry-1.docker.io/library/hello-world", "docker.io/library/hello-world"}, 49 | {"registry.hub.docker.com/library/hello-world", "docker.io/library/hello-world"}, 50 | } { 51 | o := o // https://github.com/golang/go/issues/60078 52 | dockerOut := strings.TrimPrefix(strings.TrimPrefix(o.out, "docker.io/library/"), "docker.io/") 53 | 54 | t.Run(o.in, func(t *testing.T) { 55 | ref, err := registry.ParseRef(o.in) 56 | if err != nil { 57 | t.Fatal("unexpected error", err) 58 | } 59 | 60 | out := ociref.Reference(ref).String() 61 | if out != o.out { 62 | t.Fatalf("expected %q, got %q", o.out, out) 63 | } 64 | 65 | out = ref.String() 66 | if out != dockerOut { 67 | t.Fatalf("expected %q, got %q", dockerOut, out) 68 | } 69 | }) 70 | 71 | t.Run(o.in+" JSON", func(t *testing.T) { 72 | json := toJson(t, o.in) // "hello-world:latest" (string straight to JSON so we can unmarshal it as a Reference) 73 | var ref registry.Reference 74 | fromJson(t, json, &ref) 75 | out := ociref.Reference(ref).String() 76 | if out != o.out { 77 | t.Fatalf("expected %q, got %q", o.out, out) 78 | } 79 | 80 | json = toJson(t, ref) // "hello-world:latest" (take our reference and convert it to JSON so we can verify it goes out correctly) 81 | fromJson(t, json, &out) // back to a string 82 | if out != dockerOut { 83 | t.Fatalf("expected %q, got %q", dockerOut, out) 84 | } 85 | }) 86 | } 87 | } 88 | 89 | func TestRefStringWithKnownDigest(t *testing.T) { 90 | ref, err := registry.ParseRef("hello-world:latest@sha256:53641cd209a4fecfc68e21a99871ce8c6920b2e7502df0a20671c6fccc73a7c6") 91 | if err != nil { 92 | t.Fatal("unexpected error", err) 93 | } 94 | str := ref.String() 95 | 96 | if got := ref.StringWithKnownDigest("sha256:0000000000000000000000000000000000000000000000000000000000000000"); got != str { 97 | t.Fatalf("expected %q, got %q", str, got) 98 | } 99 | 100 | if got := ref.StringWithKnownDigest("sha256:53641cd209a4fecfc68e21a99871ce8c6920b2e7502df0a20671c6fccc73a7c6"); got != "hello-world:latest" { 101 | t.Fatalf("expected %q, got %q", "hello-world:latest", got) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /registry/synthesize-index.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/docker-library/bashbrew/architecture" 8 | 9 | "cuelabs.dev/go/oci/ociregistry" 10 | "cuelabs.dev/go/oci/ociregistry/ocimem" 11 | ocispec "github.com/opencontainers/image-spec/specs-go/v1" 12 | ) 13 | 14 | // returns a synthesized [ocispec.Index] object for the given reference that includes automatically pulling up [ocispec.Platform] objects for entries missing them plus annotations for bashbrew architecture ([AnnotationBashbrewArch]) and where to find the "upstream" object if it needs to be copied/pulled ([ocispec.AnnotationRefName]) 15 | func SynthesizeIndex(ctx context.Context, ref Reference) (*ocispec.Index, error) { 16 | client, err := Client(ref.Host, nil) 17 | if err != nil { 18 | return nil, fmt.Errorf("%s: failed getting client: %w", ref, err) 19 | } 20 | 21 | r, err := Lookup(ctx, ref, nil) 22 | if err != nil { 23 | return nil, fmt.Errorf("%s: failed GET: %w", ref, err) 24 | } 25 | if r == nil { 26 | return nil, nil 27 | } 28 | defer r.Close() 29 | 30 | desc := r.Descriptor() 31 | 32 | var index ocispec.Index 33 | 34 | switch desc.MediaType { 35 | case ocispec.MediaTypeImageManifest, mediaTypeDockerImageManifest: 36 | if err := normalizeManifestPlatform(ctx, &desc, r, client, ref); err != nil { 37 | return nil, fmt.Errorf("%s: failed normalizing manifest platform: %w", ref, err) 38 | } 39 | 40 | index.Manifests = append(index.Manifests, desc) 41 | 42 | case ocispec.MediaTypeImageIndex, mediaTypeDockerManifestList: 43 | if err := readJSONHelper(r, &index); err != nil { 44 | return nil, fmt.Errorf("%s: failed reading index: %w", ref, err) 45 | } 46 | 47 | default: 48 | return nil, fmt.Errorf("unsupported mediaType: %q", desc.MediaType) 49 | } 50 | 51 | switch index.SchemaVersion { 52 | case 0: 53 | index.SchemaVersion = 2 54 | case 2: 55 | // all good, do nothing! 56 | default: 57 | return nil, fmt.Errorf("unsupported index schemaVersion: %q", index.SchemaVersion) 58 | } 59 | 60 | switch index.MediaType { 61 | case "": 62 | index.MediaType = ocispec.MediaTypeImageIndex 63 | if len(index.Manifests) >= 1 { 64 | // if the first item in our list is a Docker media type, our list should probably be too 65 | if index.Manifests[0].MediaType == mediaTypeDockerImageManifest { 66 | index.MediaType = mediaTypeDockerManifestList 67 | } 68 | } 69 | case ocispec.MediaTypeImageIndex, mediaTypeDockerManifestList: 70 | // all good, do nothing! 71 | default: 72 | return nil, fmt.Errorf("unsupported index mediaType: %q", index.MediaType) 73 | } 74 | 75 | setRefAnnotation(&index.Annotations, ref, desc.Digest) 76 | 77 | seen := map[string]*ociregistry.Descriptor{} 78 | i := 0 // https://go.dev/wiki/SliceTricks#filter-in-place (used to delete references we don't have the subject of) 79 | for _, m := range index.Manifests { 80 | if seen[string(m.Digest)] != nil { 81 | // skip digests we've already seen (de-dupe), since we have a map already for dropping dangling attestations 82 | continue 83 | // if there was unique data on this lower entry (different annotations, etc), perhaps we should merge/overwrite? OCI spec technically says "first match SHOULD win", so this is probably fine/sane 84 | // https://github.com/opencontainers/image-spec/blob/v1.1.0/image-index.md#:~:text=If%20multiple%20manifests%20match%20a%20client%20or%20runtime%27s%20requirements%2C%20the%20first%20matching%20entry%20SHOULD%20be%20used. 85 | } 86 | 87 | setRefAnnotation(&m.Annotations, ref, m.Digest) 88 | 89 | if err := normalizeManifestPlatform(ctx, &m, nil, client, ref); err != nil { 90 | return nil, fmt.Errorf("%s: failed normalizing manifest platform: %w", m.Annotations[ocispec.AnnotationRefName], err) 91 | } 92 | 93 | delete(m.Annotations, AnnotationBashbrewArch) // don't trust any remote-provided value for bashbrew arch (since it's really inexpensive for us to calculate fresh and it's only a hint anyhow) 94 | if m.Annotations[annotationBuildkitReferenceType] == annotationBuildkitReferenceTypeAttestation { 95 | if subject := seen[m.Annotations[annotationBuildkitReferenceDigest]]; subject != nil && subject.Annotations[AnnotationBashbrewArch] != "" { 96 | m.Annotations[AnnotationBashbrewArch] = subject.Annotations[AnnotationBashbrewArch] 97 | } else { 98 | // if our subject is missing, delete this entry from the index (see "i") 99 | continue 100 | } 101 | } else if m.Platform != nil { 102 | imagePlatform := architecture.OCIPlatform(*m.Platform) 103 | // match "platform" to bashbrew arch and set an appropriate annotation 104 | for bashbrewArch, supportedPlatform := range architecture.SupportedArches { 105 | if imagePlatform.Is(supportedPlatform) { 106 | m.Annotations[AnnotationBashbrewArch] = bashbrewArch 107 | break 108 | } 109 | } 110 | } 111 | 112 | // TODO if m.Size > 2048 { 113 | // make sure we don't return any (big) data fields, now that we know we don't need them for sure (they might exist in the index we queried, but they're also used as an implementation detail in our registry cache code to store the original upstream data) 114 | m.Data = nil 115 | // } 116 | 117 | index.Manifests[i] = m 118 | seen[string(m.Digest)] = &index.Manifests[i] 119 | i++ 120 | } 121 | index.Manifests = index.Manifests[:i] // https://go.dev/wiki/SliceTricks#filter-in-place 122 | 123 | // TODO set an annotation on the index to specify whether or not we actually filtered anything (or whether it's safe to copy the original index as-is during arch-specific deploy instead of reconstructing it from all the parts); maybe a list of digests that were skipped/excluded? 124 | 125 | return &index, nil 126 | } 127 | 128 | // given a (potentially `nil`) map of annotations, add [ocispec.AnnotationRefName] including the supplied [Reference] (but with [Reference.Digest] set to a new value) 129 | func setRefAnnotation(annotations *map[string]string, ref Reference, digest ociregistry.Digest) { 130 | if *annotations == nil { 131 | // "assignment to nil map" 🙃 132 | *annotations = map[string]string{} 133 | } 134 | ref.Digest = digest // since ref is already copied by value, we're safe to modify it to inject the new digest 135 | (*annotations)[ocispec.AnnotationRefName] = ref.String() 136 | } 137 | 138 | // given a manifest descriptor (and optionally an existing [ociregistry.BlobReader] on the manifest object itself), make sure it has a valid [ocispec.Platform] object if possible, querying down into the [ocispec.Image] ("config" blob) if necessary 139 | func normalizeManifestPlatform(ctx context.Context, m *ocispec.Descriptor, r ociregistry.BlobReader, client ociregistry.Interface, ref Reference) error { 140 | if m.Platform == nil || m.Platform.OS == "" || m.Platform.Architecture == "" { 141 | // if missing (or obviously invalid) "platform", we need to (maybe) reach downwards and synthesize 142 | m.Platform = nil 143 | 144 | switch m.MediaType { 145 | case ocispec.MediaTypeImageManifest, mediaTypeDockerImageManifest: 146 | var err error 147 | if r == nil { 148 | if m.Data != nil && int64(len(m.Data)) == m.Size { 149 | r = ocimem.NewBytesReader(m.Data, *m) 150 | } else { 151 | r, err = client.GetManifest(ctx, ref.Repository, m.Digest) 152 | if err != nil { 153 | return err 154 | } 155 | } 156 | defer r.Close() 157 | } 158 | 159 | var manifest ocispec.Manifest 160 | if err := readJSONHelper(r, &manifest); err != nil { 161 | return err 162 | } 163 | 164 | switch manifest.Config.MediaType { 165 | case ocispec.MediaTypeImageConfig, mediaTypeDockerImageConfig: 166 | var r ociregistry.BlobReader 167 | if manifest.Config.Data != nil && int64(len(manifest.Config.Data)) == manifest.Config.Size { 168 | r = ocimem.NewBytesReader(manifest.Config.Data, manifest.Config) 169 | } else { 170 | r, err = client.GetBlob(ctx, ref.Repository, manifest.Config.Digest) 171 | if err != nil { 172 | return err 173 | } 174 | } 175 | defer r.Close() 176 | 177 | var config ocispec.Image 178 | if err := readJSONHelper(r, &config); err != nil { 179 | return err 180 | } 181 | 182 | if config.Platform.OS != "" && config.Platform.Architecture != "" { 183 | m.Platform = &config.Platform 184 | } 185 | } 186 | } 187 | } 188 | 189 | if m.Platform != nil { 190 | // if we have a platform object now, let's normalize it 191 | normal := architecture.Normalize(*m.Platform) 192 | m.Platform = &normal 193 | } 194 | 195 | return nil 196 | } 197 | -------------------------------------------------------------------------------- /registry/user-agent.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | // https://github.com/docker-library/meta-scripts/issues/111 4 | // https://github.com/cue-labs/oci/issues/37 5 | 6 | import ( 7 | "fmt" 8 | "maps" 9 | "net/http" 10 | ) 11 | 12 | // an implementation of [net/http.RoundTripper] that transparently injects User-Agent (as a wrapper around another [net/http.RoundTripper]) 13 | type userAgentRoundTripper struct { 14 | roundTripper http.RoundTripper 15 | userAgent string 16 | } 17 | 18 | func (d *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { 19 | // if d is nil or if d.roundTripper is nil, we'll just let the runtime panic because those are both 100% coding errors in the consuming code 20 | 21 | if d.userAgent == "" { 22 | // arguably we could `panic` here too since this is *also* a coding error, but it'd be pretty reasonable to source this from an environment variable so `panic` is perhaps a bit user-hostile 23 | return nil, fmt.Errorf("missing userAgent in userAgentRoundTripper! (request %s)", req.URL) 24 | } 25 | 26 | // https://github.com/cue-lang/cue/blob/0a43336cccf3b6fc632e976912d74fb2c9670557/internal/cueversion/transport.go#L27-L34 27 | reqClone := *req 28 | reqClone.Header = maps.Clone(reqClone.Header) 29 | reqClone.Header.Set("User-Agent", d.userAgent) 30 | return d.roundTripper.RoundTrip(&reqClone) 31 | } 32 | -------------------------------------------------------------------------------- /sort.jq: -------------------------------------------------------------------------------- 1 | # input: string 2 | # output: something suitable for use in "sort_by" for sorting in "natural sort" order 3 | def sort_split_natural: 4 | # https://en.wikipedia.org/wiki/Natural_sort_order 5 | # similar to https://github.com/tianon/debian-bin/blob/448b5784ac63e6341d5e5762004e3d9e64331cf2/jq/dpkg-version.jq#L3 but a much smaller/simpler problem set (numbers vs non-numbers) 6 | [ 7 | scan("[0-9]+|[^0-9]+|^$") 8 | | tonumber? // . 9 | ] 10 | ; 11 | 12 | # input: ~anything 13 | # output: something suitable for use in "sort_by" for sorting in descending order (for numbers, they become negative, etc) 14 | def sort_split_desc: 15 | walk( 16 | if type == "number" then 17 | -. 18 | elif type == "string" then 19 | # https://stackoverflow.com/a/74058663/433558 20 | [ -explode[], 0 ] # the "0" here helps us with the empty string case; [ "a", "b", "c", "" ] 21 | elif type == "array" then 22 | . # TODO sorting an array of arrays where one is empty goes wonky here (for similar reasons to the empty string sorting); [ [1],[2],[3],[0],[] ] 23 | else 24 | error("cannot reverse sort type '\(type)': \(.)") 25 | end 26 | ) 27 | ; 28 | 29 | # input: key to sort 30 | # output: something suitable for use in "sort_by" for sorting things based on explicit preferences 31 | # top: ordered list of sort preference 32 | # bottom: ordered list of *end* sort preference (ie, what to put at the end, in order) 33 | # [ 1, 2, 3, 4, 5 ] | sort_by(sort_split_pref([ 6, 5, 3 ]; [ 4, 2 ])) => [ 5, 3, 1, 4, 2 ] 34 | def sort_split_pref($top; $bottom): 35 | . as $o 36 | | [ 37 | ( 38 | $top 39 | | index($o) # items in $top get just their index in $top 40 | // ( 41 | length 42 | + ( 43 | $bottom 44 | | index($o) # items in $bottom get ($top | length) + 1 + index in $bottom 45 | // -1 # items in neither get ($top | length) 46 | | . + 1 47 | ) 48 | ) 49 | ), 50 | $o 51 | ] 52 | ; 53 | # a one-argument version of sort_split_pref for the more common usage 54 | def sort_split_pref(top): 55 | sort_split_pref(top; []) 56 | ; 57 | -------------------------------------------------------------------------------- /validate.jq: -------------------------------------------------------------------------------- 1 | # a set of ~generic validation helpers 2 | 3 | # usage: validate(.some.value; . >= 123; "must be 123 or bigger") 4 | # will also "nest" sanely: validate(.some; validate(.value; . >= 123; "123+")) 5 | def validate(selector; condition; err): 6 | # if "selector" contains something like "$foo", "path($foo)" will break, but emit the first few things (so "path(.foo, $foo, .bar)" will emit ["foo"] before the exception is caught on the second round) 7 | [ try path(selector) catch "BORKBORKBORK" ] as $paths 8 | | IN($paths[]; "BORKBORKBORK") as $bork 9 | | (if $bork then [ selector ] else $paths end) as $data 10 | | reduce $data[] as $maybepath (.; 11 | (if $bork then $maybepath else getpath($maybepath) end) as $val 12 | | try ( 13 | if $val | condition then . else 14 | error("") 15 | end 16 | ) catch ( 17 | # invalid .["foo"]["bar"]: ERROR MESSAGE HERE 18 | # value: {"baz":"buzz"} 19 | error( 20 | "\ninvalid " 21 | + if $bork then 22 | "value" 23 | else 24 | ".\($maybepath | map("[\(tojson)]") | add // "")" 25 | end 26 | + ":\n\t\($val | tojson)" 27 | + ( 28 | $val 29 | | err 30 | | if . and length > 0 then 31 | "\n\(.)" 32 | else "" end 33 | ) 34 | + ( 35 | ltrimstr("\n") 36 | | if . and length > 0 then "\n\(.)" else "" end 37 | ) 38 | ) 39 | ) 40 | ) 41 | ; 42 | def validate(selector; condition): 43 | validate(selector; condition; null) 44 | ; 45 | def validate(condition): 46 | validate(.; condition) 47 | ; 48 | 49 | # usage: validate_IN(.some[].mediaType; "foo/bar", "baz/buzz") 50 | def validate_IN(selector; options): 51 | validate(selector; IN(options); "valid:\n\t\([ options | tojson ] | join("\n\t"))") 52 | ; 53 | 54 | # usage: validate_length(.manifests; 1, 2) 55 | def validate_length(selector; lengths): 56 | validate(selector; IN(length; lengths); "length (\(length)) must be: \([ lengths | tojson ] | join(", "))") 57 | ; 58 | 59 | # usage: (jq --slurp) validate_one | .some.thing 60 | def validate_one: 61 | validate_length(.; 1) 62 | | .[0] 63 | ; 64 | --------------------------------------------------------------------------------