├── .chainguard
└── source.yaml
├── .github
├── chainguard
│ └── scorecard.sts.yaml
├── dependabot.yml
└── workflows
│ ├── boilerplate.yaml
│ ├── build.yaml
│ ├── golangci-lint.yaml
│ ├── release.yaml
│ └── scorecard.yml
├── .gitignore
├── .golangci.yml
├── .goreleaser.yaml
├── Dockerfile
├── Dockerfile.chainguard
├── LICENSE
├── README.md
├── RELEASE.md
├── SECURITY.md
├── builtin-mappings.yaml
├── dfc.png
├── go.mod
├── go.sum
├── hack
└── boilerplate
│ ├── boilerplate.go.txt
│ ├── boilerplate.sh.txt
│ ├── boilerplate.yaml.txt
│ └── boilerplate.yml.txt
├── integration
├── README.md
└── docker_build_integration_test.go
├── main.go
├── mcp-server
├── Dockerfile
├── README.md
├── demo-cursor.png
├── go.mod
├── go.sum
└── main.go
├── pkg
└── dfc
│ ├── adduser.go
│ ├── adduser_test.go
│ ├── builtin-mappings.yaml
│ ├── dfc.go
│ ├── dfc_test.go
│ ├── mappings.go
│ ├── shell.go
│ ├── shell_test.go
│ ├── tar.go
│ ├── tar_test.go
│ ├── update.go
│ └── update_test.go
└── testdata
├── README.md
├── apt-add-repo.after.Dockerfile
├── apt-add-repo.before.Dockerfile
├── canada-ca-tracker.after.Dockerfile
├── canada-ca-tracker.before.Dockerfile
├── digest.after.Dockerfile
├── digest.before.Dockerfile
├── distroless-go-integration
├── go.mod
└── main.go
├── distroless-go.after.Dockerfile
├── distroless-go.before.Dockerfile
├── django-integration
└── requirements
│ ├── common.txt
│ └── prod.txt
├── django.after.Dockerfile
├── django.before.Dockerfile
├── from-with-platform.after.Dockerfile
├── from-with-platform.before.Dockerfile
├── gcds-hugo.after.Dockerfile
├── gcds-hugo.before.Dockerfile
├── golang-multi-stage.after.Dockerfile
├── golang-multi-stage.before.Dockerfile
├── kind.after.Dockerfile
├── kind.before.Dockerfile
├── no-root.after.Dockerfile
├── no-root.before.Dockerfile
├── no-tag.after.Dockerfile
├── no-tag.before.Dockerfile
├── nodejs-ubuntu.after.Dockerfile
├── nodejs-ubuntu.before.Dockerfile
├── pipes.after.Dockerfile
├── pipes.before.Dockerfile
├── python-multi-stage.after.Dockerfile
├── python-multi-stage.before.Dockerfile
├── python-nodejs.after.Dockerfile
├── python-nodejs.before.Dockerfile
├── ruby-rails.after.Dockerfile
├── ruby-rails.before.Dockerfile
├── yum-dnf-flags.after.Dockerfile
└── yum-dnf-flags.before.Dockerfile
/.chainguard/source.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | spec:
5 | authorities:
6 | - keyless:
7 | identities:
8 | - issuer: https://accounts.google.com
9 | - issuer: https://github.com/login/oauth
10 | - key:
11 | # allow commits signed by GitHub, e.g. the UI
12 | kms: https://github.com/web-flow.gpg
13 |
--------------------------------------------------------------------------------
/.github/chainguard/scorecard.sts.yaml:
--------------------------------------------------------------------------------
1 | issuer: https://token.actions.githubusercontent.com
2 | subject: repo:chainguard-dev/dfc:ref:refs/heads/main
3 | claim_pattern:
4 | job_workflow_ref: chainguard-dev/dfc/.github/workflows/scorecard.yml@.*
5 |
6 | permissions:
7 | actions: read
8 | checks: read
9 | contents: read
10 | issues: read
11 | pull_requests: read
12 | security_events: write
13 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 | version: 2
6 | updates:
7 | - package-ecosystem: gomod
8 | directories:
9 | - "/*"
10 | - "/*/*"
11 | schedule:
12 | interval: daily
13 | open-pull-requests-limit: 5
14 | groups:
15 | all:
16 | update-types:
17 | - "minor"
18 | - "patch"
19 | - package-ecosystem: "github-actions"
20 | directory: "/*"
21 | schedule:
22 | interval: daily
23 | open-pull-requests-limit: 5
24 | groups:
25 | all:
26 | update-types:
27 | - "minor"
28 | - "patch"
29 |
--------------------------------------------------------------------------------
/.github/workflows/boilerplate.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | name: Boilerplate
5 |
6 | on:
7 | pull_request:
8 | branches:
9 | - "main"
10 |
11 | permissions: {}
12 |
13 | jobs:
14 | check:
15 | permissions:
16 | contents: read
17 |
18 | name: Boilerplate Check
19 | runs-on: ubuntu-latest
20 | strategy:
21 | fail-fast: false # Keep running if one leg fails.
22 | matrix:
23 | extension:
24 | - go
25 | - sh
26 | - yaml
27 | - yml
28 |
29 | # Map between extension and human-readable name.
30 | include:
31 | - extension: go
32 | language: Go
33 | - extension: sh
34 | language: Bash
35 | - extension: yaml
36 | language: YAML
37 | - extension: yml
38 | language: YAML
39 |
40 | steps:
41 | - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
42 | with:
43 | egress-policy: audit
44 |
45 | - name: Check out code
46 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
47 |
48 | - uses: chainguard-dev/actions/boilerplate@76af37936141ab4766b6578400f136c2621614fd # main
49 | with:
50 | extension: ${{ matrix.extension }}
51 | language: ${{ matrix.language }}
52 | exclude: "((vendor|third_party|testdata|static)/|.pb(.gw)?.go|.generated.go)"
53 |
54 | boilerplate-check:
55 | permissions:
56 | actions: read
57 |
58 | needs:
59 | - check
60 | runs-on: ubuntu-latest
61 | if: always() # always run as required and if any dependent job is skipped, this is skipped if not always
62 | steps:
63 | - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
64 | with:
65 | egress-policy: audit
66 |
67 | - uses: step-security/workflow-conclusion-action@3854991aedf48f496a83c417ee559f5e28ec57ad # v3.0.7
68 |
69 | - if: ${{ env.WORKFLOW_CONCLUSION == 'success' }}
70 | working-directory: /tmp
71 | run: echo ${{ env.WORKFLOW_CONCLUSION }} && exit 0
72 |
73 | - if: ${{ env.WORKFLOW_CONCLUSION == 'failure' }}
74 | working-directory: /tmp
75 | run: echo ${{ env.WORKFLOW_CONCLUSION }} && exit 1
76 |
--------------------------------------------------------------------------------
/.github/workflows/build.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | name: ci
5 |
6 | on:
7 | push:
8 | branches: [ "main" ]
9 | pull_request:
10 | branches: [ "main" ]
11 |
12 | permissions: {}
13 |
14 | jobs:
15 | build:
16 | name: build
17 | runs-on: ubuntu-latest
18 |
19 | permissions:
20 | contents: read
21 |
22 | steps:
23 | - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
24 | with:
25 | egress-policy: audit
26 |
27 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
28 |
29 | - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
30 | with:
31 | go-version-file: './go.mod'
32 | check-latest: true
33 |
34 | - name: build
35 | run: |
36 | set -x
37 | go build -o dfc .
38 | ./dfc --help
39 |
40 | - name: test
41 | run: |
42 | set -x
43 | go test -v -cover ./...
44 |
45 | - name: build mcp-server
46 | run: |
47 | set -x
48 | cd mcp-server/
49 | go build -o mcp-server .
50 |
--------------------------------------------------------------------------------
/.github/workflows/golangci-lint.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | name: golangci-lint
5 |
6 | on:
7 | push:
8 | branches:
9 | - 'main'
10 | pull_request:
11 |
12 | permissions: {}
13 |
14 | jobs:
15 | golangci:
16 | name: lint
17 | runs-on: ubuntu-latest
18 |
19 | permissions:
20 | contents: read
21 |
22 | steps:
23 | - name: Harden the runner (Audit all outbound calls)
24 | uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
25 | with:
26 | egress-policy: audit
27 |
28 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
29 | - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0
30 | with:
31 | go-version-file: './go.mod'
32 | check-latest: true
33 |
34 | - name: golangci-lint
35 | uses: golangci/golangci-lint-action@55c2c1448f86e01eaae002a5a3a9624417608d84 # v6.5.2
36 | with:
37 | version: v1.64
38 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | name: Create Release
5 |
6 | on:
7 | push:
8 | tags:
9 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
10 |
11 | permissions: {}
12 |
13 | jobs:
14 | cli:
15 | # Only release CLI for tagged releases
16 | if: startsWith(github.event.ref, 'refs/tags/v')
17 |
18 | name: Release the CLI
19 | runs-on: ubuntu-latest
20 |
21 | # https://docs.github.com/en/actions/reference/authentication-in-a-workflow
22 | permissions:
23 | id-token: write # federate OIDC to use in octo-sts
24 | contents: write # need to push artifacts to the release
25 |
26 | steps:
27 | - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0
28 | with:
29 | egress-policy: audit
30 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
31 | - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v2.2.0
32 | with:
33 | go-version-file: 'go.mod'
34 | check-latest: true
35 |
36 | - uses: sigstore/cosign-installer@d7d6bc7722e3daa8354c50bcb52f4837da5e9b6a # v3.8.1
37 |
38 | - uses: goreleaser/goreleaser-action@90a3faa9d0182683851fbfa97ca1a2cb983bfca3 # v2.8.1
39 | with:
40 | install-only: true
41 |
42 | # Federate to create a token to authenticate with the homebrew-tap repository.
43 | - uses: octo-sts/action@6177b4481c00308b3839969c3eca88c96a91775f # v1.0.0
44 | id: octo-sts
45 | with:
46 | scope: chainguard-dev/homebrew-tap
47 | identity: dfc
48 |
49 | - name: Release
50 | env:
51 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
52 | HOMEBREW_TAP_GITHUB_TOKEN: ${{ steps.octo-sts.outputs.token }}
53 | run: |
54 | set -x
55 | VERSION="${GITHUB_REF#refs/tags/}"
56 | REVISION="$(git rev-parse HEAD)"
57 | export LDFLAGS="-X main.Version=$VERSION -X main.Revision=$REVISION"
58 | goreleaser release --clean
59 |
--------------------------------------------------------------------------------
/.github/workflows/scorecard.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | name: Scorecard supply-chain security
5 | on:
6 | branch_protection_rule:
7 | schedule:
8 | - cron: "25 4 * * 2"
9 | push:
10 | branches: ["main"]
11 |
12 | permissions: {}
13 |
14 | jobs:
15 | analysis:
16 | name: Scorecard analysis
17 | runs-on: ubuntu-latest
18 | permissions:
19 | # Needed to upload the results to code-scanning dashboard.
20 | security-events: write
21 | # Needed to publish results and get a badge (see publish_results below).
22 | id-token: write
23 | # Uncomment the permissions below if installing in a private repository.
24 | contents: read
25 | actions: read
26 | issues: read
27 | pull-requests: read
28 | checks: read
29 |
30 | steps:
31 | - name: Harden the runner (Audit all outbound calls)
32 | uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0
33 | with:
34 | egress-policy: audit
35 |
36 | - name: "Checkout code"
37 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
38 | with:
39 | persist-credentials: false
40 |
41 | - name: "Run analysis"
42 | uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
43 | with:
44 | results_file: results.sarif
45 | results_format: sarif
46 | publish_results: true
47 |
48 | - name: "Upload artifact"
49 | uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
50 | with:
51 | name: SARIF file
52 | path: results.sarif
53 | retention-days: 5
54 |
55 | - name: "Upload to code-scanning"
56 | uses: github/codeql-action/upload-sarif@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11
57 | with:
58 | sarif_file: results.sarif
59 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.bak
2 | /dfc
3 | coverage.*
4 | *.yaml
5 | .idea
6 | .DS_Store
7 | mcp-server/mcp-server
8 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | ---
5 | linters:
6 | enable:
7 | - asciicheck
8 | - unused
9 | - errcheck
10 | - errorlint
11 | - gofmt
12 | - goimports
13 | - gosec
14 | - gocritic
15 | - importas
16 | - prealloc
17 | - revive
18 | - misspell
19 | - stylecheck
20 | - tparallel
21 | - unconvert
22 | - unparam
23 | - whitespace
24 | issues:
25 | uniq-by-line: false
26 | exclude-rules:
27 | - path: _test\.go
28 | linters:
29 | - errcheck
30 | - gosec
31 | max-issues-per-linter: 0
32 | max-same-issues: 0
33 | run:
34 | issues-exit-code: 1
35 | timeout: 10m
--------------------------------------------------------------------------------
/.goreleaser.yaml:
--------------------------------------------------------------------------------
1 | project_name: dfc
2 | version: 2
3 |
4 | before:
5 | hooks:
6 | - go mod download
7 |
8 | env:
9 | - COSIGN_YES=true
10 |
11 | builds:
12 | - id: dfc-build
13 | binary: dfc
14 | main: ./
15 | env:
16 | - CGO_ENABLED=0
17 | goos:
18 | - darwin
19 | - linux
20 | goarch:
21 | - "386"
22 | - amd64
23 | - arm64
24 | ignore:
25 | - goos: darwin
26 | goarch: "386"
27 | flags:
28 | - -trimpath
29 | mod_timestamp: '{{ .CommitTimestamp }}'
30 | ldflags:
31 | - -extldflags "-static"
32 | - "{{ .Env.LDFLAGS }}"
33 |
34 | signs:
35 | - id: dfc-cosign
36 | cmd: cosign
37 | certificate: "${artifact}.crt"
38 | args: ["sign-blob", "--output-signature", "${signature}", "--output-certificate", "${certificate}", "${artifact}", "--yes"]
39 | artifacts: all
40 |
41 | archives:
42 | - files:
43 | - LICENSE
44 | wrap_in_directory: true
45 |
46 | brews:
47 | - name: dfc
48 | repository:
49 | owner: chainguard-dev
50 | name: homebrew-tap
51 | branch: main
52 | token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}"
53 | url_template: "https://github.com/chainguard-dev/dfc/releases/download/v{{ .Version }}/{{ .ArtifactName }}"
54 | directory: Formula
55 | commit_author:
56 | name: guardian
57 | email: guardian@chainguard.dev
58 | homepage: "https://github.com/chainguard-dev/dfc"
59 | description: "Convert Dockerfiles to use Chainguard"
60 | install: |
61 | bin.install "{{ .Binary }}" => "{{ .ProjectName }}"
62 | test: |
63 | system "#{bin}/{{ .ProjectName }}", "--version"
64 |
65 | checksum:
66 | name_template: 'checksums.txt'
67 |
68 | snapshot:
69 | version_template: "{{ .Tag }}-next"
70 |
71 | changelog:
72 | sort: asc
73 | filters:
74 | exclude:
75 | - '^docs:'
76 | - '^test:'
77 |
78 | release:
79 | draft: false
80 | prerelease: false
81 | name_template: "Release {{ .Tag }}"
82 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | testdata/django.before.Dockerfile
--------------------------------------------------------------------------------
/Dockerfile.chainguard:
--------------------------------------------------------------------------------
1 | testdata/django.after.Dockerfile
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # dfc
2 |
3 |
4 |
5 |
6 |
7 |
8 | d
ockerf
ile c
onverter
9 |
10 |
CLI to convert Dockerfiles to use Chainguard Images and APKs in FROM and RUN lines etc.
11 |
12 |
13 |
14 | ---
15 | ## About
16 |
17 | The `dfc` tool helps you migrate your Dockerfiles to use Chainguard Images. For comprehensive guides on migration, check out:
18 | - [Dockerfile Conversion Guide](https://edu.chainguard.dev/chainguard/migration/dockerfile-conversion/)
19 | - [Chainguard Migration Overview](https://edu.chainguard.dev/chainguard/migration/migrations-overview/)
20 |
21 | ## Installation
22 |
23 | You can install `dfc` from Homebrew:
24 |
25 | ```sh
26 | brew install chainguard-dev/tap/dfc
27 | ```
28 |
29 | You can also install `dfc` from source:
30 |
31 | ```sh
32 | go install github.com/chainguard-dev/dfc@latest
33 | ```
34 |
35 | You can also use the `dfc` container image (from Docker Hub or `cgr.dev`):
36 |
37 | ```sh
38 | docker run --rm -v "$PWD":/work chainguard/dfc
39 | docker run --rm -v "$PWD":/work cgr.dev/chainguard/dfc
40 | ```
41 |
42 | ## Usage
43 |
44 | Convert Dockerfile and print converted contents to terminal:
45 |
46 | ```sh
47 | dfc ./Dockerfile
48 | ```
49 |
50 | Save the output to new Dockerfile called `Dockerfile.chainguard`:
51 |
52 | ```sh
53 | dfc ./Dockerfile > ./Dockerfile.chainguard
54 | ```
55 |
56 | You can also pipe from stdin:
57 |
58 | ```sh
59 | cat ./Dockerfile | dfc -
60 | ```
61 |
62 | Convert the file in-place using `--in-place` / `-i` (saves backup in `.bak` file):
63 |
64 | ```sh
65 | dfc --in-place ./Dockerfile
66 | mv ./Dockerfile.bak ./Dockerfile # revert
67 | ```
68 |
69 | Note: the `Dockerfile` and `Dockerfile.chainguard` in the root of this repo are not actually for building `dfc`, they
70 | are symlinks to files in the [`testdata/`](./testdata/) folder so users can run the commands in this README.
71 |
72 | ## Examples
73 |
74 | For complete before and after examples, see the [`testdata/`](./testdata/) folder.
75 |
76 | ### Convert a single `FROM` line
77 |
78 | ```sh
79 | echo "FROM node" | dfc -
80 | ```
81 |
82 | Result:
83 |
84 | ```Dockerfile
85 | FROM cgr.dev/ORG/node:latest-dev
86 | ```
87 |
88 | ### Convert a single `RUN` line
89 |
90 | ```sh
91 | echo "RUN apt-get update && apt-get install -y nano" | dfc -
92 | ```
93 |
94 | Result:
95 |
96 | ```Dockerfile
97 | RUN apk add --no-cache nano
98 | ```
99 |
100 | ### Convert a whole Dockerfile
101 |
102 | ```sh
103 | cat <
138 | ```
139 |
140 | To configure your `cgr.dev` namespace use the `--org` flag:
141 |
142 | ```
143 | dfc --org="example.com" ./Dockerfile
144 | ```
145 |
146 | Resulting in:
147 |
148 | ```Dockerfile
149 | FROM cgr.dev/example.com/
150 | ```
151 |
152 | If mistakenly ran `dfc` with no configuration options and just want to replace the ORG
153 | in the converted file, you can run something like this:
154 |
155 | ```sh
156 | sed "s|/ORG/|/example.com/|" ./Dockerfile > dfc.tmp && mv dfc.tmp ./Dockerfile
157 | ```
158 |
159 | ### Alternate registry
160 |
161 | To use an alternative registry domain and root namespace, use the `--registry` flag:
162 |
163 | ```
164 | dfc --registry="r.example.com/cgr-mirror" ./Dockerfile
165 | ```
166 |
167 | Resulting in:
168 |
169 | ```Dockerfile
170 | FROM r.example.com/cgr-mirror/
171 | ```
172 |
173 | Note: the `--registry` flag takes precedence over the `--org` flag.
174 |
175 | ### Custom mappings file
176 |
177 | If you need to supply extra image or package mappings, use the `--mappings` flag:
178 |
179 | ```sh
180 | dfc --mappings="./custom-mappings.yaml" ./Dockerfile
181 | ```
182 |
183 | By default, custom mappings specified with `--mappings` will overlay the built-in mappings found in [`pkg/dfc/builtin-mappings.yaml`](./pkg/dfc/builtin-mappings.yaml). If you wish to bypass the built-in mappings entirely and only use your custom mappings, use the `--no-builtin` flag:
184 |
185 | ```sh
186 | dfc --mappings="./custom-mappings.yaml" --no-builtin ./Dockerfile
187 | ```
188 |
189 | ### Updating Built-in Mappings
190 |
191 | The `--update` flag is used to update the built-in mappings in a local cache from the latest version available in the repository:
192 |
193 | ```sh
194 | dfc --update
195 | ```
196 |
197 | You can use this flag as a standalone command to update mappings without performing any conversion, or combine it with a conversion command to ensure you're using the latest mappings:
198 |
199 | ```sh
200 | dfc --update ./Dockerfile
201 | ```
202 |
203 | When combined with a conversion command, the update check is performed prior to running the conversion, ensuring your conversions use the most up-to-date mappings available.
204 |
205 | ### Submitting New Built-in Mappings
206 |
207 | If you'd like to request new mappings to be added to the built-in mappings file, please [open a GitHub issue](https://github.com/chainguard-dev/dfc/issues/new?template=BLANK_ISSUE).
208 |
209 | Note that the `builtin-mappings.yaml` file is generated via internal automation and cannot be edited directly. Your issue will be reviewed by the maintainers, and if approved, the mappings will be added to the internal automation that generates the built-in mappings.
210 |
211 | ### Configuration Files and Cache
212 |
213 | `dfc` follows the XDG specification for configuration and cache directories:
214 |
215 | - **XDG_CONFIG_HOME**: Stores configuration files, including a symlink to `builtin-mappings.yaml`. By default, this is `~/.config/dev.chainguard.dfc/` (on macOS: `~/Library/Application\ Support/dev.chainguard.dfc/`).
216 | - **XDG_CACHE_HOME**: Stores cached data following the [OCI layout specification]((https://github.com/opencontainers/image-spec/blob/main/image-layout.md). By default, this is `~/.cache/dev.chainguard.dfc/` (on macOS: `~/Library/Caches/dev.chainguard.dfc/`).
217 |
218 | Note: `dfc` does not make any network requests unless the `--update` flag is provided. However, `dfc` will perform a syscall to check for the existence of the `builtin-mappings.yaml` file (symlink) in the XDG_CONFIG directory.
219 |
220 | ## How it works
221 |
222 | ### `FROM` line modifications
223 |
224 | For each `FROM` line in the Dockerfile, `dfc` attempts to replace the base image with an equivalent Chainguard Image.
225 |
226 | ### `RUN` line modifications
227 |
228 | For each `RUN` line in the Dockerfile, `dfc` attempts to detect the use of a known package manager (e.g. `apt-get` / `yum` / `apk`), extract the names of any packages being installed, try to map them via the package mappings in [`mappings.yaml`](./mappings.yaml), and replacing the old install with `apk add --no-cache `.
229 |
230 | ### `USER` line modifications
231 |
232 | If `dfc` has detected the use of a package manager and ended up converting a RUN line,
233 | then `USER root` will be appended under the last `FROM` line.
234 |
235 | In the future we plan to handle this more elegantly, but this is the current state.
236 |
237 | ### `ARG` line modifications
238 |
239 | For each `ARG` line in the Dockerfile, `dfc` checks if the ARG is used as a base image in a subsequent `FROM` line. If it is, and the ARG has a default value that appears to be a base image, then `dfc` will modify the default value to use a Chainguard Image instead.
240 |
241 | ## Special considerations
242 |
243 | ### Busybox command syntax
244 |
245 | #### useradd/groupadd vs. adduser/addgroup
246 |
247 | Since adding users and groups in Chainguard Images in Dockerfiles requires
248 | `adduser` / `addgroup` (via busybox), when we detect the use of
249 | `useradd` or `groupadd` commands in `RUN` lines, we will automatically try to
250 | convert them to the equivalent `adduser` / `addgroup` commands.
251 |
252 | If we see that you have installed the `shadow` package
253 | (which actually provides `useradd` and `groupadd`), then we do not modify
254 | these commands and leave them as is.
255 |
256 | #### tar command
257 |
258 | The syntax for the `tar` command is slightly different in busybox than it is
259 | in the GNU version which is present by default on various distros.
260 |
261 | For that reason, we will attempt to convert `tar` commands in `RUN` lines
262 | using the GNU syntax to use the busybox syntax instead.
263 |
264 | ## Base image and tag mapping
265 |
266 | When converting Dockerfiles, `dfc` applies the following logic to determine which Chainguard Image and tag to use:
267 |
268 | ### Base Image Mapping
269 | - Image mappings are defined in the `mappings.yaml` file under the `images` section
270 | - Each mapping defines a source image name (e.g., `ubuntu`, `nodejs`) and its Chainguard equivalent
271 | - Glob matching is supported using the asterisk (*) wildcard (e.g., `nodejs*` matches both `nodejs` and `nodejs20-debian12`)
272 | - If a mapping includes a tag (e.g., `chainguard-base:latest`), that tag is always used
273 | - If no tag is specified in the mapping (e.g., `node`), tag selection follows the standard tag mapping rules
274 | - If no mapping is found for a base image, the original name is preserved and tag mapping rules apply
275 | - Docker Hub images with full domain references (e.g., `docker.io/library/node`, `index.docker.io/library/node`) are normalized before mapping by removing the domain and `library/` prefix, which allows them to match against the simple image name entries in mappings.yaml
276 |
277 | ### Tag Mapping
278 | The tag conversion follows these rules:
279 |
280 | 1. **For chainguard-base**:
281 | - Always uses `latest` tag, regardless of the original tag or presence of RUN commands
282 |
283 | 2. **For tags containing ARG variables** (like `${NODE_VERSION}`):
284 | - Preserves the original variable reference
285 | - Adds `-dev` suffix only if the stage contains RUN commands
286 | - Example: `FROM node:${NODE_VERSION}` → `FROM cgr.dev/ORG/node:${NODE_VERSION}-dev` (if stage has RUN commands)
287 |
288 | 3. **For other images**:
289 | - If no tag is specified in the original Dockerfile:
290 | - Uses `latest-dev` if the stage contains RUN commands
291 | - Uses `latest` if the stage has no RUN commands
292 | - If a tag is specified:
293 | - If it's a semantic version (e.g., `1.2.3` or `v1.2.3`):
294 | - Truncates to major.minor only (e.g., `1.2`)
295 | - Adds `-dev` suffix only if the stage contains RUN commands
296 | - If the tag starts with `v` followed by numbers, the `v` is removed
297 | - For non-semver tags (e.g., `alpine`, `slim`):
298 | - Uses `latest-dev` if the stage has RUN commands
299 | - Uses `latest` if the stage has no RUN commands
300 |
301 | This approach ensures that:
302 | - Development variants (`-dev`) with shell access are only used when needed
303 | - Semantic version tags are simplified to major.minor for better compatibility
304 | - The final stage in multi-stage builds uses minimal images without dev tools when possible
305 | - Build arg variables in tags are preserved with proper `-dev` suffix handling
306 |
307 | ### Examples
308 | - `FROM node:14` → `FROM cgr.dev/ORG/node:14-dev` (if stage has RUN commands)
309 | - `FROM node:14.17.3` → `FROM cgr.dev/ORG/node:14.17-dev` (if stage has RUN commands)
310 | - `FROM debian:bullseye` → `FROM cgr.dev/ORG/chainguard-base:latest` (always)
311 | - `FROM golang:1.19-alpine` → `FROM cgr.dev/ORG/go:1.19` (if stage has RUN commands)
312 | - `FROM node:${VERSION}` → `FROM cgr.dev/ORG/node:${VERSION}-dev` (if stage has RUN commands)
313 |
314 | ## JSON mode
315 |
316 | Get converted Dockerfile as JSON using `--json` / `-j`:
317 |
318 | ```sh
319 | dfc --json ./Dockerfile
320 | ```
321 |
322 | Pipe it to `jq`:
323 |
324 | ```sh
325 | dfc -j ./Dockerfile | jq
326 | ```
327 |
328 | ### Useful jq formulas
329 |
330 | Reconstruct the Dockerfile pre-conversion:
331 |
332 | ```sh
333 | dfc -j ./Dockerfile | jq -r '.lines[]|(.extra + .raw)'
334 | ```
335 |
336 | Reconstruct the Dockerfile post-conversion:
337 |
338 | ```sh
339 | dfc -j ./Dockerfile | jq -r '.lines[]|(.extra + (if .converted then .converted else .raw end))'
340 | ```
341 |
342 | Convert and strip comments:
343 |
344 | ```sh
345 | dfc -j ./Dockerfile | jq -r '.lines[]|(if .converted then .converted else .raw end)'
346 | ```
347 |
348 | Get list of all distros detected from RUN lines:
349 |
350 | ```sh
351 | dfc -j ./Dockerfile | jq -r '.lines[].run.distro' | grep -v null | sort -u
352 | ```
353 |
354 | Get list of package managers detected from RUN lines:
355 |
356 | ```sh
357 | dfc -j ./Dockerfile | jq -r '.lines[].run.manager' | grep -v null | sort -u
358 | ```
359 |
360 | Get all the packages initially detected during parsing:
361 |
362 | ```sh
363 | dfc -j ./Dockerfile | jq -r '.lines[].run.packages' | grep '"' | cut -d'"' -f 2 | sort -u | xargs
364 | ```
365 |
366 | ## Using from Go
367 |
368 | The package `github.com/chainguard-dev/dfc/pkg/dfc` can be imported in Go and you can
369 | parse and convert Dockerfiles on your own without the `dfc` CLI:
370 |
371 | ```go
372 | package main
373 |
374 | import (
375 | "context"
376 | "fmt"
377 | "log"
378 | "path/filepath"
379 | "strings"
380 |
381 | "github.com/chainguard-dev/dfc/pkg/dfc"
382 | )
383 |
384 | var (
385 | raw = []byte(strings.TrimSpace(`
386 | FROM node
387 | RUN apt-get update && apt-get install -y nano
388 | `))
389 |
390 | org = "example.com"
391 | )
392 |
393 | func main() {
394 | ctx := context.Background()
395 |
396 | // Parse the Dockefile bytes
397 | dockerfile, err := dfc.ParseDockerfile(ctx, raw)
398 | if err != nil {
399 | log.Fatalf("ParseDockerfile(): %v", err)
400 | }
401 |
402 | // Convert
403 | converted, err := dockerfile.Convert(ctx, dfc.Options{
404 | Organization: org,
405 | // Registry: "r.example.com/cgr-mirror", // Optional: registry override
406 | // Update: true, // Optional: update mappings before conversion
407 | // ExtraMappings: myCustomMappings, // Optional: overlay mappings on top of builtin
408 | // NoBuiltIn: true, // Optional: skip built-in mappings
409 | })
410 | if err != nil {
411 | log.Fatalf("dockerfile.Convert(): %v", err)
412 | }
413 |
414 | // Print converted Dockerfile content
415 | fmt.Println(converted)
416 | }
417 | ```
418 |
419 | ### Custom Base Image Conversion
420 |
421 | You can customize how base images are converted by providing a `FromLineConverter` function. This example shows how to handle internal repository images differently while using the default Chainguard conversion for other images:
422 |
423 | ```go
424 | package main
425 |
426 | import (
427 | "context"
428 | "fmt"
429 | "log"
430 | "path/filepath"
431 | "strings"
432 |
433 | "github.com/chainguard-dev/dfc/pkg/dfc"
434 | )
435 |
436 | func main() {
437 | ctx := context.Background()
438 |
439 | // Sample Dockerfile with multiple FROM lines
440 | raw := []byte(strings.TrimSpace(`
441 | FROM node:14
442 | RUN npm install
443 |
444 | FROM internal-repo.example.com/python:3.9
445 | COPY --from=0 /app/node_modules /app/node_modules
446 | RUN pip install -r requirements.txt
447 | `))
448 |
449 | // Parse the Dockerfile
450 | dockerfile, err := dfc.ParseDockerfile(ctx, raw)
451 | if err != nil {
452 | log.Fatalf("ParseDockerfile(): %v", err)
453 | }
454 |
455 | // Define a custom converter that:
456 | // 1. For internal repository images, adds basename as a suffix to the tag
457 | // 2. For all other images, uses the default Chainguard conversion
458 | // 3. Appends "-dev" suffix to tags when the stage contains RUN commands
459 | customConverter := func(from *dfc.FromDetails, converted string, stageHasRun bool) (string, error) {
460 | // Check if this is an internal repository image
461 | if strings.Contains(from.Orig, "internal-repo") {
462 | // Extract the image basename
463 | basename := filepath.Base(from.Base)
464 |
465 | // Extract tag part if present
466 | tagPart := ""
467 | if from.Tag != "" {
468 | tagPart = from.Tag
469 | } else {
470 | tagPart = "latest"
471 | }
472 |
473 | // Add -dev suffix if the stage has a RUN command and the tag doesn't already have it
474 | if stageHasRun && !strings.HasSuffix(tagPart, "-dev") {
475 | tagPart += "-dev"
476 | }
477 |
478 | // For internal images, we maintain the internal repo but add our org
479 | // and image basename as a suffix to the tag
480 | return fmt.Sprintf("internal-repo.example.com/%s:%s-my-org-%s",
481 | from.Base, tagPart, basename), nil
482 | }
483 |
484 | // For all other images, use the default Chainguard conversion
485 | return converted, nil
486 | }
487 |
488 | // Convert with custom image conversion
489 | converted, err := dockerfile.Convert(ctx, dfc.Options{
490 | Organization: "my-org",
491 | FromLineConverter: customConverter,
492 | })
493 | if err != nil {
494 | log.Fatalf("dockerfile.Convert(): %v", err)
495 | }
496 |
497 | // Print the results
498 | fmt.Println("Original Dockerfile:")
499 | fmt.Println(string(raw))
500 | fmt.Println("\nConverted Dockerfile:")
501 | fmt.Println(converted)
502 | }
503 | ```
504 |
505 | Example output:
506 | ```
507 | Original Dockerfile:
508 | FROM node:14
509 | RUN npm install
510 |
511 | FROM internal-repo.example.com/python:3.9
512 | COPY --from=0 /app/node_modules /app/node_modules
513 | RUN pip install -r requirements.txt
514 |
515 | Converted Dockerfile:
516 | FROM cgr.dev/my-org/node:14-dev
517 | USER root
518 | RUN apk add --no-cache npm
519 | RUN npm install
520 |
521 | FROM internal-repo.example.com/python:3.9-my-org-python
522 | USER root
523 | COPY --from=0 /app/node_modules /app/node_modules
524 | RUN apk add --no-cache pip
525 | RUN pip install -r requirements.txt
526 | ```
527 |
528 | This approach gives you full control over image reference conversion while preserving DFC's package manager and command conversion capabilities.
529 |
530 | ## Usage via AI Agent (MCP Server)
531 |
532 | While `dfc` operates completely offline and does not in itself use AI to
533 | perform conversion of Dockerfiles, it can be leveraged as an [MCP Server](https://modelcontextprotocol.io/) to integrate with an AI-based prompt engineering workflow.
534 |
535 | For example, after configuring the `dfc` MCP server, you could ask an AI coding agent "Please convert this Dockerfile to use Chainguard Images: ...", and the MCP server will perform the same conversion as the `dfc` CLI (by using it as a Go library as described above).
536 |
537 | This may provide powerful functionality for converting Dockerfiles that are highly specific to your organization's use case, for example using `dfc` for 90% of the conversion effort, and tailored AI prompts for the other 10%.
538 |
539 | This being said, this work is **highly experimental** and is purposely not packaged as part of the `dfc` release (for now). Please use at your own risk.
540 |
541 | For more information on using `dfc` as an MCP server, please see the README in the [mcp-server/](./mcp-server/) directory at the root of this repo.
542 |
543 | ## Limitations
544 |
545 | - **Incomplete Conversion**: The tool makes a best effort to convert Dockerfiles but does not guarantee that the converted Dockerfiles will be buildable by Docker.
546 | - **Comment and Spacing Preservation**: While the tool attempts to preserve comments and spacing, there may be cases where formatting is altered during conversion.
547 | - **Dynamic Variables**: The tool may not handle dynamic variables in Dockerfiles correctly, especially if they are used in complex expressions.
548 | - **Unsupported Directives**: Some Dockerfile directives may not be fully supported or converted, leading to potential build issues.
549 | - **Package Manager Commands**: The tool focuses on converting package manager commands but may not cover all possible variations or custom commands.
550 | - **Multi-stage Builds**: While the tool supports multi-stage builds, it may not handle all edge cases, particularly with complex stage dependencies.
551 | - **Platform-Specific Features**: The tool may not account for platform-specific features or optimizations in Dockerfiles.
552 | - **Security Considerations**: The tool does not perform security checks on the converted Dockerfiles, and users should review the output for potential vulnerabilities.
553 |
554 | ## Contact Us
555 |
556 | For issues related strictly to `dfc` as an open source tool,
557 | please [open a GitHub issue](https://github.com/chainguard-dev/dfc/issues/new?template=BLANK_ISSUE).
558 |
559 | Chainguard customers: please share issues or feature requests with
560 | your support contact so we can prioritize and escalate internally
561 | (with or without a GitHub issue/PR).
562 |
563 | Interested in Chainguard Images and want to get in touch with sales? Use [this form](https://www.chainguard.dev/contact).
564 |
--------------------------------------------------------------------------------
/RELEASE.md:
--------------------------------------------------------------------------------
1 | # dfc Release Process
2 |
3 | ## Patch releases
4 |
5 | The most common type of release of dfc is a patch release. Generally we should aim to do these as often as necessary to release _backward compatible_ changes, especially to release updated dependencies to fix vulnerabilities.
6 |
7 | To cut a release:
8 | - go to https://github.com/chainguard-dev/dfc/releases/new
9 | - click "Choose a tag" then "Find or create a new tag"
10 | - type a new patch version tag for the latest minor version
11 | - for example, if the latest version is `v0.11.5`, create a patch release `v0.11.6`
12 | - click "Create new tag: v0.X.Y on publish"
13 | - you can leave the release title empty
14 | - click "Generate release notes"
15 | - make any editorial changes to the release notes you think are relevant
16 | - make sure "Set as the latest release" is checked
17 | - click **"Publish release"**
18 |
19 | ### Monitor the release automation
20 |
21 | Once the tag is pushed, the [`Create Release` action](https://github.com/chainguard-dev/dfc/actions/workflows/release.yaml)
22 | will attach the appropriate release artifacts and update release notes.
23 |
24 | At the time of this writing, the release job takes 20 to 30 minutes to execute.
25 |
26 | Make any editorial changes to the release notes you think are necessary.
27 | You may want to highlight certain changes or remove items that aren't interesting.
28 |
29 | Once the `Release` action has been completed successfully, find your release on
30 | the [releases page](https://github.com/chainguard-dev/dfc/releases)
31 |
32 | ## Minor releases
33 |
34 | Occasionally there are large or breaking changes to dfc that we want to highlight with a new minor release.
35 | A minor release should be cut shortly after a breaking change is made, so that regular patch releases don't release breaking changes.
36 |
37 | The process for cutting a release is exactly the same as above, except that you should pick a new minor version.
38 |
39 | For example, if the latest version is `v0.11.5`, create a minor release `v0.12.0`.
40 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Reporting a Vulnerability
4 |
5 | To report a security issue, please email
6 | [security@chainguard.dev](mailto:security@chainguard.dev)
7 | with a description of the issue, the steps you took to create the issue,
8 | affected versions, and, if known, mitigations for the issue.
9 |
10 | All support will be made on the best effort base, so please indicate the "urgency level" of the vulnerability as Critical, High, Medium or Low.
11 |
--------------------------------------------------------------------------------
/builtin-mappings.yaml:
--------------------------------------------------------------------------------
1 | pkg/dfc/builtin-mappings.yaml
--------------------------------------------------------------------------------
/dfc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chainguard-dev/dfc/eac44bf0011544e2f88a0079b18277e55bae509a/dfc.png
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/chainguard-dev/dfc
2 |
3 | go 1.24
4 |
5 | require (
6 | github.com/adrg/xdg v0.5.3
7 | github.com/chainguard-dev/clog v1.7.0
8 | github.com/google/go-cmp v0.7.0
9 | github.com/spf13/cobra v1.9.1
10 | gopkg.in/yaml.v3 v3.0.1
11 | )
12 |
13 | require (
14 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
15 | github.com/spf13/pflag v1.0.6 // indirect
16 | golang.org/x/sys v0.26.0 // indirect
17 | )
18 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
2 | github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
3 | github.com/chainguard-dev/clog v1.7.0 h1:guPznsK8vLHvzz1QJe2yU6MFeYaiSOFOQBYw4OXu+g8=
4 | github.com/chainguard-dev/clog v1.7.0/go.mod h1:4+WFhRMsGH79etYXY3plYdp+tCz/KCkU8fAr0HoaPvs=
5 | github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
6 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
7 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
8 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
9 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
10 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
11 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
12 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
13 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
14 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
15 | github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
16 | github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
17 | github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
18 | github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
19 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
20 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
21 | golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
22 | golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
23 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
24 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
25 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
26 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
27 |
--------------------------------------------------------------------------------
/hack/boilerplate/boilerplate.go.txt:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
--------------------------------------------------------------------------------
/hack/boilerplate/boilerplate.sh.txt:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Copyright 2025 Chainguard, Inc.
4 | # SPDX-License-Identifier: Apache-2.0
5 |
--------------------------------------------------------------------------------
/hack/boilerplate/boilerplate.yaml.txt:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
--------------------------------------------------------------------------------
/hack/boilerplate/boilerplate.yml.txt:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
--------------------------------------------------------------------------------
/integration/README.md:
--------------------------------------------------------------------------------
1 | ## Integration testing
2 |
3 | These tests verify that converted Dockerfiles actually build.
4 |
5 | In [`../testdata/`](../testdata/) , if we see a folder called `-integration>`, this automatically opts the test into
6 | integration testing.
7 |
8 | To run:
9 |
10 | ```sh
11 | go test -v ./integration/ -tags=integration
12 | ```
13 |
14 | TODO: hook these tests up to CI, requires a valid Chainguard org
15 | with access to all necessary base images.
16 |
--------------------------------------------------------------------------------
/integration/docker_build_integration_test.go:
--------------------------------------------------------------------------------
1 | //go:build integration
2 | // +build integration
3 |
4 | /*
5 | Copyright 2025 Chainguard, Inc.
6 | SPDX-License-Identifier: Apache-2.0
7 | */
8 |
9 | package integration
10 |
11 | import (
12 | "bufio"
13 | "context"
14 | "fmt"
15 | "io"
16 | "os"
17 | "os/exec"
18 | "path/filepath"
19 | "strings"
20 | "testing"
21 | "time"
22 | )
23 |
24 | // TestIntegrationBuild tests building the Dockerfiles from the after files
25 | // This test is only run when the -tags=integration flag is passed to go test
26 | func TestIntegrationBuild(t *testing.T) {
27 | fmt.Println("Running TestIntegrationBuild")
28 |
29 | // Find all .after.Dockerfile files in the testdata directory
30 | files, err := filepath.Glob("../testdata/*.after.Dockerfile")
31 | if err != nil {
32 | t.Fatalf("Failed to find test files: %v", err)
33 | }
34 |
35 | t.Logf("Found %d .after.Dockerfile files, will only test those with corresponding directories", len(files))
36 | availableTests := 0
37 |
38 | for _, file := range files {
39 | // Extract the test name from the file path
40 | // e.g., "../testdata/django.after.Dockerfile" -> "django"
41 | baseName := filepath.Base(file)
42 | testName := strings.TrimSuffix(baseName, ".after.Dockerfile")
43 |
44 | // Check if a directory with the same name exists
45 | dirPath := filepath.Join("../testdata", fmt.Sprintf("%s-integration", testName))
46 | if _, err := os.Stat(dirPath); os.IsNotExist(err) {
47 | // Skip tests without a corresponding directory
48 | t.Logf("Skipping %s: no directory at %s", testName, dirPath)
49 | continue
50 | }
51 |
52 | availableTests++
53 | t.Logf("Will run test for %s using context directory %s", testName, dirPath)
54 |
55 | // Run the test for this file
56 | t.Run(testName, func(t *testing.T) {
57 | // Get the after file path
58 | afterFile := filepath.Join("../testdata", testName+".after.Dockerfile")
59 |
60 | // Check if the after file exists
61 | if _, err := os.Stat(afterFile); os.IsNotExist(err) {
62 | t.Fatalf("After file %s does not exist", afterFile)
63 | }
64 |
65 | // Build the Docker image from the temp Dockerfile
66 | tagName := fmt.Sprintf("dfc-%s-after:test", testName)
67 | buildImage(t, afterFile, tagName, dirPath)
68 |
69 | // Clean up the image after the test
70 | defer cleanupImage(t, tagName)
71 | })
72 | }
73 |
74 | if availableTests == 0 {
75 | t.Log("No tests were run because no directories matching test names were found")
76 | } else {
77 | t.Logf("Successfully ran %d integration tests", availableTests)
78 | }
79 | }
80 |
81 | // buildImage builds a Docker image from the given Dockerfile
82 | func buildImage(t *testing.T, dockerfilePath, tagName, contextDir string) {
83 | t.Logf("Building Docker image %s using %s with context %s", tagName, dockerfilePath, contextDir)
84 |
85 | // Create a context with timeout
86 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
87 | defer cancel()
88 |
89 | // Prepare the Docker build command
90 | cmd := exec.CommandContext(ctx, "docker", "build", "--progress=plain", "-t", tagName, "-f", dockerfilePath, contextDir)
91 |
92 | // Create pipes for stdout and stderr
93 | stdoutPipe, err := cmd.StdoutPipe()
94 | if err != nil {
95 | t.Fatalf("Failed to create stdout pipe: %v", err)
96 | }
97 |
98 | stderrPipe, err := cmd.StderrPipe()
99 | if err != nil {
100 | t.Fatalf("Failed to create stderr pipe: %v", err)
101 | }
102 |
103 | // Start the command
104 | if err := cmd.Start(); err != nil {
105 | t.Fatalf("Failed to start Docker build: %v", err)
106 | }
107 |
108 | // Stream stdout in real-time
109 | go streamOutput(t, stdoutPipe, "stdout")
110 |
111 | // Stream stderr in real-time
112 | go streamOutput(t, stderrPipe, "stderr")
113 |
114 | // Wait for the command to complete
115 | if err := cmd.Wait(); err != nil {
116 | if ctx.Err() == context.DeadlineExceeded {
117 | t.Fatalf("Docker build timed out after 5 minutes")
118 | }
119 | t.Fatalf("Docker build failed: %v", err)
120 | }
121 |
122 | t.Logf("Successfully built Docker image %s", tagName)
123 | }
124 |
125 | // cleanupImage removes a Docker image
126 | func cleanupImage(t *testing.T, tagName string) {
127 | t.Logf("Cleaning up Docker image %s", tagName)
128 |
129 | // Create a context with timeout
130 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
131 | defer cancel()
132 |
133 | // Prepare the Docker rmi command
134 | cmd := exec.CommandContext(ctx, "docker", "rmi", tagName)
135 |
136 | // Run the command
137 | output, err := cmd.CombinedOutput()
138 | if err != nil {
139 | t.Logf("Failed to remove Docker image %s: %v\nOutput: %s", tagName, err, output)
140 | return
141 | }
142 |
143 | t.Logf("Successfully removed Docker image %s", tagName)
144 | }
145 |
146 | // streamOutput reads from a pipe and logs each line to the test logger
147 | func streamOutput(t *testing.T, pipe io.ReadCloser, name string) {
148 | scanner := bufio.NewScanner(pipe)
149 | for scanner.Scan() {
150 | t.Logf("[%s] %s", name, scanner.Text())
151 | }
152 |
153 | if err := scanner.Err(); err != nil {
154 | t.Errorf("Error reading from %s: %v", name, err)
155 | }
156 | }
157 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package main
7 |
8 | import (
9 | "bytes"
10 | "context"
11 | "encoding/json"
12 | "fmt"
13 | "log/slog"
14 | "os"
15 | "os/signal"
16 | "path/filepath"
17 | "syscall"
18 |
19 | "github.com/chainguard-dev/clog"
20 | "github.com/chainguard-dev/clog/slag"
21 | "github.com/spf13/cobra"
22 | "gopkg.in/yaml.v3"
23 |
24 | "github.com/chainguard-dev/dfc/pkg/dfc"
25 | )
26 |
27 | var (
28 | // Version is the semantic version (added at compile time via -X main.Version=$VERSION)
29 | Version string
30 |
31 | // Revision is the git commit id (added at compile time via -X main.Revision=$REVISION)
32 | Revision string
33 | )
34 |
35 | func main() {
36 | ctx := context.Background()
37 | if err := mainE(ctx); err != nil {
38 | clog.FromContext(ctx).Fatal(err.Error())
39 | }
40 | }
41 |
42 | func mainE(ctx context.Context) error {
43 | ctx, done := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
44 | defer done()
45 | return cli().ExecuteContext(ctx)
46 | }
47 |
48 | func cli() *cobra.Command {
49 | var j bool
50 | var inPlace bool
51 | var org string
52 | var registry string
53 | var mappingsFile string
54 | var updateFlag bool
55 | var noBuiltInFlag bool
56 | var strictFlag bool
57 |
58 | // Default log level is info
59 | var level = slag.Level(slog.LevelInfo)
60 |
61 | v := "dev"
62 | if Version != "" {
63 | v = Version
64 | if Revision != "" {
65 | v += fmt.Sprintf(" (%s)", Revision)
66 | }
67 | }
68 |
69 | cmd := &cobra.Command{
70 | Use: "dfc",
71 | Example: "dfc ",
72 | Args: cobra.MaximumNArgs(1),
73 | Version: v,
74 | RunE: func(cmd *cobra.Command, args []string) error {
75 | // Setup logging
76 | slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: &level})))
77 | log := clog.New(slog.Default().Handler())
78 | ctx := clog.WithLogger(cmd.Context(), log)
79 |
80 | // If update flag is set but no args, just update and exit
81 | if updateFlag && len(args) == 0 {
82 | // Set up update options
83 | updateOpts := dfc.UpdateOptions{}
84 |
85 | // Set UserAgent if version info is available
86 | if Version != "" {
87 | updateOpts.UserAgent = "dfc/" + Version
88 | }
89 |
90 | if err := dfc.Update(ctx, updateOpts); err != nil {
91 | return fmt.Errorf("failed to update: %w", err)
92 | }
93 | return nil
94 | }
95 |
96 | // If no args and no update flag, require an argument
97 | if len(args) == 0 {
98 | return fmt.Errorf("requires at least 1 arg(s), only received 0")
99 | }
100 |
101 | // Allow for piping into the CLI if first arg is "-"
102 | input := cmd.InOrStdin()
103 | isFile := args[0] != "-"
104 | var path string
105 | if isFile {
106 | path = args[0]
107 | file, err := os.Open(filepath.Clean(path))
108 | if err != nil {
109 | return fmt.Errorf("failed open file: %s: %w", path, err)
110 | }
111 | defer file.Close()
112 | input = file
113 | }
114 | buf := new(bytes.Buffer)
115 | if _, err := buf.ReadFrom(input); err != nil {
116 | return fmt.Errorf("failed to read input: %w", err)
117 | }
118 | raw := buf.Bytes()
119 |
120 | // Use dfc2 to parse the Dockerfile
121 | dockerfile, err := dfc.ParseDockerfile(ctx, raw)
122 | if err != nil {
123 | return fmt.Errorf("unable to parse dockerfile: %w", err)
124 | }
125 |
126 | // Setup conversion options
127 | opts := dfc.Options{
128 | Organization: org,
129 | Registry: registry,
130 | Update: updateFlag,
131 | NoBuiltIn: noBuiltInFlag,
132 | Strict: strictFlag,
133 | }
134 |
135 | // If custom mappings file is provided, load it as ExtraMappings
136 | if mappingsFile != "" {
137 | log.Info("Loading custom mappings file", "file", mappingsFile)
138 | mappingsBytes, err := os.ReadFile(mappingsFile)
139 | if err != nil {
140 | return fmt.Errorf("reading mappings file %s: %w", mappingsFile, err)
141 | }
142 |
143 | var extraMappings dfc.MappingsConfig
144 | if err := yaml.Unmarshal(mappingsBytes, &extraMappings); err != nil {
145 | return fmt.Errorf("unmarshalling package mappings: %w", err)
146 | }
147 |
148 | opts.ExtraMappings = extraMappings
149 | }
150 |
151 | // If --no-builtin flag is used without --mappings, warn the user
152 | if noBuiltInFlag && mappingsFile == "" {
153 | log.Warn("Using --no-builtin without --mappings will use default conversion logic without any package/image mappings")
154 | }
155 |
156 | // Convert the Dockerfile
157 | convertedDockerfile, err := dockerfile.Convert(ctx, opts)
158 | if err != nil {
159 | return fmt.Errorf("converting dockerfile: %w", err)
160 | }
161 |
162 | // Output the Dockerfile as JSON
163 | if j {
164 | if inPlace {
165 | return fmt.Errorf("unable to use --in-place and --json flag at same time")
166 | }
167 |
168 | // Output the Dockerfile as JSON
169 | b, err := json.Marshal(convertedDockerfile)
170 | if err != nil {
171 | return fmt.Errorf("marshalling dockerfile to json: %w", err)
172 | }
173 | fmt.Println(string(b))
174 | return nil
175 | }
176 |
177 | // Get the string representation
178 | result := convertedDockerfile.String()
179 |
180 | // modify file in place
181 | if inPlace {
182 | if !isFile {
183 | return fmt.Errorf("unable to use --in-place flag when processing stdin")
184 | }
185 |
186 | // Get original file info to preserve permissions
187 | fileInfo, err := os.Stat(path)
188 | if err != nil {
189 | return fmt.Errorf("getting file info for %s: %w", path, err)
190 | }
191 | originalMode := fileInfo.Mode().Perm()
192 |
193 | backupPath := path + ".bak"
194 | log.Info("Saving dockerfile backup", "path", backupPath)
195 | if err := os.WriteFile(backupPath, raw, originalMode); err != nil {
196 | return fmt.Errorf("saving dockerfile backup to %s: %w", backupPath, err)
197 | }
198 | log.Info("Overwriting dockerfile", "path", path)
199 | if err := os.WriteFile(path, []byte(result), originalMode); err != nil {
200 | return fmt.Errorf("overwriting %s: %w", path, err)
201 | }
202 | return nil
203 | }
204 |
205 | // Print to stdout
206 | fmt.Print(result)
207 |
208 | return nil
209 | },
210 | }
211 |
212 | cmd.Flags().StringVar(&org, "org", dfc.DefaultOrg, "the organization for cgr.dev// (defaults to ORG)")
213 | cmd.Flags().StringVar(®istry, "registry", "", "an alternate registry and root namepace (e.g. r.example.com/cg-mirror)")
214 | cmd.Flags().BoolVarP(&inPlace, "in-place", "i", false, "modified the Dockerfile in place (vs. stdout), saving original in a .bak file")
215 | cmd.Flags().BoolVarP(&j, "json", "j", false, "print dockerfile as json (before conversion)")
216 | cmd.Flags().StringVarP(&mappingsFile, "mappings", "m", "", "path to a custom package mappings YAML file (instead of the default)")
217 | cmd.Flags().BoolVar(&updateFlag, "update", false, "check for and apply available updates")
218 | cmd.Flags().BoolVar(&noBuiltInFlag, "no-builtin", false, "skip built-in package/image mappings, still apply default conversion logic")
219 | cmd.Flags().Var(&level, "log-level", "log level (e.g. debug, info, warn, error)")
220 | cmd.Flags().BoolVar(&strictFlag, "strict", false, "when true, fail if any package is unknown")
221 | // nolint:errcheck
222 | cmd.Flags().MarkHidden("strict")
223 |
224 | return cmd
225 | }
226 |
--------------------------------------------------------------------------------
/mcp-server/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM cgr.dev/chainguard/go:latest AS builder
2 |
3 | WORKDIR /app
4 |
5 | COPY go.mod ./
6 | COPY main.go ./
7 |
8 | RUN go mod download
9 | RUN CGO_ENABLED=0 GOOS=linux go build -o mcp-server
10 |
11 | FROM cgr.dev/chainguard/static:latest
12 |
13 | WORKDIR /app
14 | COPY --from=builder /app/mcp-server /app/mcp-server
15 |
16 | ENTRYPOINT ["/app/mcp-server"]
--------------------------------------------------------------------------------
/mcp-server/README.md:
--------------------------------------------------------------------------------
1 | # dfc MCP Server
2 |
3 | This is a Go implementation of an MCP (Model Context Protocol) server for `dfc`. It provides a standardized interface that allows AI assistants and other clients to convert Dockerfiles to use Chainguard Images and APKs through the stdio protocol.
4 |
5 | 
6 |
7 | ## Features
8 |
9 | - Full MCP protocol implementation
10 | - Converts Dockerfiles to use Chainguard Images
11 | - Analyzes Dockerfile structures
12 | - Healthcheck endpoint for diagnostics
13 | - Optimizes FROM and RUN lines
14 | - Configurable organization and registry
15 |
16 | ## Tools
17 |
18 | This MCP server provides the following tools:
19 |
20 | 1. `convert_dockerfile` - Converts a Dockerfile to use Chainguard Images and APKs
21 | 2. `analyze_dockerfile` - Analyzes a Dockerfile and provides information about its structure
22 | 3. `healthcheck` - Checks if the server is running correctly
23 |
24 | ## Directory Structure
25 |
26 | ```
27 | ├── main.go # Main MCP server implementation
28 | ├── go.mod/go.sum # Go module dependencies
29 | ├── Dockerfile # Container definition
30 | ├── README.md # Documentation
31 | ├── mcp-server # Built binary
32 | ```
33 |
34 | ## Prerequisites
35 |
36 | - Go 1.20 or higher
37 |
38 | ## Installation
39 |
40 | Clone the repository:
41 |
42 | ```bash
43 | git clone https://github.com/chainguard-dev/dfc.git
44 | cd dfc/mcp-server
45 | ```
46 |
47 | Build the server:
48 |
49 | ```bash
50 | go build -o mcp-server .
51 | ```
52 |
53 | Run the server:
54 |
55 | ```bash
56 | ./mcp-server
57 | ```
58 |
59 | ## Docker
60 |
61 | You can also run the server in a Docker container:
62 |
63 | ```bash
64 | docker build -t dfc-mcp-server .
65 | docker run -p 3000:3000 dfc-mcp-server
66 | ```
67 |
68 | ## Configuring with AI Assistants
69 |
70 | ### Configuring in Claude Code
71 |
72 | To use this server with Claude Code, run the following:
73 |
74 | ```
75 | claude mcp add dfc -- /path/to/dfc/mcp-server/mcp-server
76 | ```
77 |
78 | Then you can invoke the server by asking to convert a Dockerfile:
79 |
80 | ```
81 | Can you convert the following Dockerfile to use Chainguard Images? https://raw.githubusercontent.com/django/djangoproject.com/refs/heads/main/Dockerfile
82 | ```
83 |
84 | ### Configuring in Cursor
85 |
86 | To configure this MCP server in Cursor, add the following configuration to your Cursor settings:
87 |
88 | ```json
89 | {
90 | "mcp.servers": [
91 | {
92 | "name": "Dockerfile Converter",
93 | "command": "path/to/dfc/mcp-server/mcp-server",
94 | "transport": "stdio"
95 | }
96 | ]
97 | }
98 | ```
99 |
100 | You can then invoke the Dockerfile converter tool from Cursor with commands like:
101 |
102 | ```
103 | @dfc convert my Dockerfile to use Chainguard Images
104 | ```
105 |
106 | ### Configuring in Claude Desktop
107 |
108 | To use this server with Claude Desktop, add the following to your `claude_desktop_config.json` file (typically found in your home directory):
109 |
110 | ```json
111 | {
112 | "mcpServers": {
113 | "dfc": {
114 | "command": "/path/to/dfc/mcp-server/mcp-server",
115 | "transport": "stdio"
116 | }
117 | }
118 | }
119 | ```
120 |
121 | Then you can invoke the server in Claude Desktop using:
122 |
123 | ```
124 | @dfc analyze this Dockerfile
125 | ```
126 |
127 | ### Configuring in Windsurf
128 |
129 | To add this MCP server to Windsurf, follow these steps:
130 |
131 | 1. Open Windsurf and navigate to Settings
132 | 2. Find the "MCP Servers" section
133 | 3. Click "Add New Server"
134 | 4. Fill in the following details:
135 | - Name: `Dockerfile Converter`
136 | - Command: `/path/to/dfc/mcp-server/mcp-server`
137 | - Transport Type: `stdio`
138 | 5. Click "Save"
139 |
140 | You can then invoke the tool in Windsurf using:
141 |
142 | ```
143 | @dfc convert this Dockerfile
144 | ```
145 |
146 | ### Configuring with General MCP Clients
147 |
148 | For other MCP clients or custom implementations, you'll need:
149 |
150 | 1. The path to the built `mcp-server` executable
151 | 2. Configuration for stdio transport
152 | 3. Tool names to invoke:
153 | - `convert_dockerfile`
154 | - `analyze_dockerfile`
155 | - `healthcheck`
156 |
157 | General configuration format for most MCP clients:
158 |
159 | ```json
160 | {
161 | "servers": {
162 | "dfc": {
163 | "command": "/path/to/dfc/mcp-server/mcp-server",
164 | "transport": "stdio"
165 | }
166 | }
167 | }
168 | ```
169 |
170 | ## API Usage
171 |
172 | ### Convert a Dockerfile
173 |
174 | To convert a Dockerfile, provide the following parameters:
175 |
176 | - `dockerfile_content` (required) - The content of the Dockerfile to convert
177 | - `organization` (optional) - The Chainguard organization to use (defaults to 'ORG')
178 | - `registry` (optional) - Alternative registry to use instead of cgr.dev
179 |
180 | Example request:
181 |
182 | ```json
183 | {
184 | "name": "convert_dockerfile",
185 | "arguments": {
186 | "dockerfile_content": "FROM alpine\nRUN apk add --no-cache curl",
187 | "organization": "mycorp",
188 | "registry": "registry.mycorp.com"
189 | }
190 | }
191 | ```
192 |
193 | ### Analyze a Dockerfile
194 |
195 | To analyze a Dockerfile, provide the following parameter:
196 |
197 | - `dockerfile_content` (required) - The content of the Dockerfile to analyze
198 |
199 | Example request:
200 |
201 | ```json
202 | {
203 | "name": "analyze_dockerfile",
204 | "arguments": {
205 | "dockerfile_content": "FROM alpine\nRUN apk add --no-cache curl"
206 | }
207 | }
208 | ```
209 |
210 | ## Development
211 |
212 | When making changes, ensure the server follows the MCP protocol specification correctly. The server uses stdio for communication with clients.
213 |
--------------------------------------------------------------------------------
/mcp-server/demo-cursor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chainguard-dev/dfc/eac44bf0011544e2f88a0079b18277e55bae509a/mcp-server/demo-cursor.png
--------------------------------------------------------------------------------
/mcp-server/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/chainguard-dev/dfc/mcp-server
2 |
3 | go 1.24
4 |
5 | toolchain go1.24.2
6 |
7 | require (
8 | github.com/chainguard-dev/dfc v0.0.0-20250101000000-000000000000
9 | github.com/mark3labs/mcp-go v0.25.0
10 | )
11 |
12 | require (
13 | github.com/adrg/xdg v0.5.3 // indirect
14 | github.com/chainguard-dev/clog v1.7.0 // indirect
15 | github.com/google/uuid v1.6.0 // indirect
16 | github.com/spf13/cast v1.7.1 // indirect
17 | github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
18 | golang.org/x/sys v0.26.0 // indirect
19 | gopkg.in/yaml.v3 v3.0.1 // indirect
20 | )
21 |
22 | replace github.com/chainguard-dev/dfc => ../
23 |
--------------------------------------------------------------------------------
/mcp-server/go.sum:
--------------------------------------------------------------------------------
1 | github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
2 | github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
3 | github.com/chainguard-dev/clog v1.7.0 h1:guPznsK8vLHvzz1QJe2yU6MFeYaiSOFOQBYw4OXu+g8=
4 | github.com/chainguard-dev/clog v1.7.0/go.mod h1:4+WFhRMsGH79etYXY3plYdp+tCz/KCkU8fAr0HoaPvs=
5 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
6 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
7 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
8 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
9 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
10 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
11 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
12 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
13 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
14 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
15 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
16 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
17 | github.com/mark3labs/mcp-go v0.25.0 h1:UUpcMT3L5hIhuDy7aifj4Bphw4Pfx1Rf8mzMXDe8RQw=
18 | github.com/mark3labs/mcp-go v0.25.0/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4=
19 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
20 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
21 | github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
22 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
23 | github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
24 | github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
25 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
26 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
27 | github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
28 | github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
29 | golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
30 | golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
31 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
32 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
33 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
34 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
35 |
--------------------------------------------------------------------------------
/mcp-server/main.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package main
7 |
8 | import (
9 | "context"
10 | "encoding/json"
11 | "fmt"
12 | "log"
13 | "os"
14 | "os/signal"
15 | "strings"
16 | "syscall"
17 |
18 | "github.com/chainguard-dev/dfc/pkg/dfc"
19 | "github.com/mark3labs/mcp-go/mcp"
20 | "github.com/mark3labs/mcp-go/server"
21 | )
22 |
23 | // Version information
24 | const (
25 | Version = "dev"
26 | )
27 |
28 | func main() {
29 | // Set up logging to stderr for diagnostics
30 | logger := log.New(os.Stderr, "[dfc-mcp] ", log.LstdFlags)
31 | logger.Printf("Starting dfc MCP Server v%s", Version)
32 |
33 | // Create a context that listens for termination signals
34 | _, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
35 | defer stop()
36 |
37 | // Create an MCP server instance
38 | s := server.NewMCPServer(
39 | "dfc - Dockerfile Converter",
40 | Version,
41 | server.WithLogging(),
42 | server.WithRecovery(),
43 | server.WithToolCapabilities(true),
44 | )
45 |
46 | // Define the Dockerfile converter tool
47 | dockerfileConverterTool := mcp.NewTool("convert_dockerfile",
48 | mcp.WithDescription("Convert a Dockerfile to use Chainguard Images and APKs in FROM and RUN lines"),
49 | mcp.WithString("dockerfile_content",
50 | mcp.Required(),
51 | mcp.Description("The content of the Dockerfile to convert"),
52 | ),
53 | mcp.WithString("organization",
54 | mcp.Description("The Chainguard organization to use (defaults to 'ORG')"),
55 | ),
56 | mcp.WithString("registry",
57 | mcp.Description("Alternative registry to use instead of cgr.dev"),
58 | ),
59 | )
60 |
61 | // Add a healthcheck tool for diagnostics
62 | healthcheckTool := mcp.NewTool("healthcheck",
63 | mcp.WithDescription("Check if the dfc MCP server is running correctly"),
64 | )
65 |
66 | // Add the handler for the Dockerfile converter tool
67 | s.AddTool(dockerfileConverterTool, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
68 | logger.Printf("Received convert_dockerfile request")
69 |
70 | // Extract parameters
71 | dockerfileContent, ok := request.Params.Arguments["dockerfile_content"].(string)
72 | if !ok || dockerfileContent == "" {
73 | logger.Printf("Error: Empty dockerfile content in request")
74 | return mcp.NewToolResultError("Dockerfile content cannot be empty"), nil
75 | }
76 |
77 | // Log a sample of the Dockerfile content (first 50 chars)
78 | contentPreview := dockerfileContent
79 | if len(contentPreview) > 50 {
80 | contentPreview = contentPreview[:50] + "..."
81 | }
82 | logger.Printf("Processing Dockerfile (preview): %s", contentPreview)
83 |
84 | // Extract optional parameters with defaults
85 | organization := "ORG"
86 | if org, ok := request.Params.Arguments["organization"].(string); ok && org != "" {
87 | organization = org
88 | logger.Printf("Using custom organization: %s", organization)
89 | }
90 |
91 | var registry string
92 | if reg, ok := request.Params.Arguments["registry"].(string); ok && reg != "" {
93 | registry = reg
94 | logger.Printf("Using custom registry: %s", registry)
95 | }
96 |
97 | // Convert the Dockerfile
98 | convertedDockerfile, err := convertDockerfile(ctx, dockerfileContent, organization, registry)
99 | if err != nil {
100 | logger.Printf("Error converting Dockerfile: %v", err)
101 | return mcp.NewToolResultError(fmt.Sprintf("Error converting Dockerfile: %v", err)), nil
102 | }
103 |
104 | // Log success
105 | logger.Printf("Successfully converted Dockerfile (length: %d bytes)", len(convertedDockerfile))
106 |
107 | // Return the result
108 | return mcp.NewToolResultText(convertedDockerfile), nil
109 | })
110 |
111 | // Add the healthcheck handler
112 | s.AddTool(healthcheckTool, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
113 | logger.Printf("Received healthcheck request")
114 |
115 | // Create test Dockerfile content
116 | testDockerfile := "FROM alpine\nRUN apk add --no-cache curl"
117 |
118 | // Try a test conversion to ensure dfc package is working
119 | _, err := convertDockerfile(ctx, testDockerfile, "ORG", "")
120 | if err != nil {
121 | logger.Printf("Healthcheck failed: %v", err)
122 | return mcp.NewToolResultError(fmt.Sprintf("Healthcheck failed: %v", err)), nil
123 | }
124 |
125 | // If we get here, all systems are operational
126 | statusInfo := map[string]interface{}{
127 | "status": "ok",
128 | "version": Version,
129 | "dfc_package": "operational",
130 | }
131 |
132 | statusJSON, _ := json.Marshal(statusInfo)
133 | return mcp.NewToolResultText(fmt.Sprintf("Healthcheck passed: %s", string(statusJSON))), nil
134 | })
135 |
136 | // Add a tool that analyzes a Dockerfile
137 | analyzeDockerfileTool := mcp.NewTool("analyze_dockerfile",
138 | mcp.WithDescription("Analyze a Dockerfile and provide information about its structure"),
139 | mcp.WithString("dockerfile_content",
140 | mcp.Required(),
141 | mcp.Description("The content of the Dockerfile to analyze"),
142 | ),
143 | )
144 |
145 | // Add the analyzer handler
146 | s.AddTool(analyzeDockerfileTool, func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
147 | logger.Printf("Received analyze_dockerfile request")
148 |
149 | // Extract parameters
150 | dockerfileContent, ok := request.Params.Arguments["dockerfile_content"].(string)
151 | if !ok || dockerfileContent == "" {
152 | logger.Printf("Error: Empty dockerfile content in analyze request")
153 | return mcp.NewToolResultError("Dockerfile content cannot be empty"), nil
154 | }
155 |
156 | // Parse the Dockerfile
157 | dockerfile, err := dfc.ParseDockerfile(ctx, []byte(dockerfileContent))
158 | if err != nil {
159 | logger.Printf("Error parsing Dockerfile for analysis: %v", err)
160 | return mcp.NewToolResultError(fmt.Sprintf("Failed to parse Dockerfile: %v", err)), nil
161 | }
162 |
163 | // Analyze the Dockerfile
164 | stageCount := 0
165 | baseImages := []string{}
166 | packageManagers := map[string]bool{}
167 |
168 | for _, line := range dockerfile.Lines {
169 | if line.From != nil {
170 | stageCount++
171 | if line.From.Orig != "" {
172 | baseImages = append(baseImages, line.From.Orig)
173 | } else {
174 | baseImg := line.From.Base
175 | if line.From.Tag != "" {
176 | baseImg += ":" + line.From.Tag
177 | }
178 | baseImages = append(baseImages, baseImg)
179 | }
180 | }
181 | if line.Run != nil && line.Run.Manager != "" {
182 | packageManagers[string(line.Run.Manager)] = true
183 | }
184 | }
185 |
186 | // Build package manager list
187 | // TODO: something seems to be off here, returning "No package managers detected"
188 | packageManagerList := []string{}
189 | for pm := range packageManagers {
190 | packageManagerList = append(packageManagerList, pm)
191 | }
192 |
193 | // Build analysis text
194 | analysis := "Dockerfile Analysis:\n\n"
195 | analysis += fmt.Sprintf("- Total stages: %d\n", stageCount)
196 | analysis += fmt.Sprintf("- Base images: %s\n", strings.Join(baseImages, ", "))
197 | if len(packageManagerList) > 0 {
198 | analysis += fmt.Sprintf("- Package managers: %s\n", strings.Join(packageManagerList, ", "))
199 | } else {
200 | analysis += "- No package managers detected\n"
201 | }
202 |
203 | logger.Printf("Successfully analyzed Dockerfile: %d stages, %d base images",
204 | stageCount, len(baseImages))
205 |
206 | // Return the result
207 | return mcp.NewToolResultText(analysis), nil
208 | })
209 |
210 | // Announce that we're ready to serve
211 | logger.Printf("MCP server initialization complete, ready to handle requests")
212 |
213 | // Start the server
214 | if err := server.ServeStdio(s); err != nil {
215 | logger.Printf("Server error: %v", err)
216 | os.Exit(1)
217 | }
218 | }
219 |
220 | // convertDockerfile converts a Dockerfile to use Chainguard Images and APKs
221 | func convertDockerfile(ctx context.Context, dockerfileContent, organization, registry string) (string, error) {
222 | // Parse the Dockerfile
223 | dockerfile, err := dfc.ParseDockerfile(ctx, []byte(dockerfileContent))
224 | if err != nil {
225 | return "", fmt.Errorf("failed to parse Dockerfile: %w", err)
226 | }
227 |
228 | // Create options for conversion
229 | opts := dfc.Options{
230 | Organization: organization,
231 | }
232 |
233 | // If registry is provided, set it in options
234 | if registry != "" {
235 | opts.Registry = registry
236 | }
237 |
238 | // Convert the Dockerfile
239 | converted, err := dockerfile.Convert(ctx, opts)
240 | if err != nil {
241 | return "", fmt.Errorf("failed to convert Dockerfile: %w", err)
242 | }
243 |
244 | // Return the converted Dockerfile as a string
245 | return converted.String(), nil
246 | }
247 |
--------------------------------------------------------------------------------
/pkg/dfc/adduser.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package dfc
7 |
8 | import (
9 | "strings"
10 | )
11 |
12 | // ConvertUserAddToAddUser converts a useradd command to the equivalent adduser command
13 | func ConvertUserAddToAddUser(part *ShellPart) *ShellPart {
14 | if part.Command != CommandUserAdd {
15 | return part
16 | }
17 |
18 | // Create a new shell part with the same extra content and delimiter
19 | result := &ShellPart{
20 | ExtraPre: part.ExtraPre,
21 | Command: CommandAddUser,
22 | Delimiter: part.Delimiter,
23 | }
24 |
25 | // Process arguments
26 | var resultArgs []string
27 | var username string
28 | var hasUsername bool
29 | i := 0
30 |
31 | // Process arguments
32 | for i < len(part.Args) {
33 | arg := part.Args[i]
34 |
35 | // Look for the username (first non-option argument)
36 | if !strings.HasPrefix(arg, "-") && !hasUsername {
37 | username = arg
38 | hasUsername = true
39 | i++
40 | continue
41 | }
42 |
43 | // Process options
44 | switch arg {
45 | // Options that are simply removed (create home is default in adduser)
46 | case "-m", "--create-home":
47 | i++
48 | continue
49 |
50 | // Options that are renamed
51 | case "-r", "--system":
52 | resultArgs = append(resultArgs, "--system")
53 | i++
54 |
55 | case "-M", "--no-create-home":
56 | resultArgs = append(resultArgs, "--no-create-home")
57 | i++
58 |
59 | // Options that need arguments and are renamed
60 | case "-s", "--shell":
61 | if i+1 < len(part.Args) {
62 | resultArgs = append(resultArgs, "--shell", part.Args[i+1])
63 | i += 2
64 | } else {
65 | i++
66 | }
67 |
68 | case "-d", "--home-dir":
69 | if i+1 < len(part.Args) {
70 | resultArgs = append(resultArgs, "--home", part.Args[i+1])
71 | i += 2
72 | } else {
73 | i++
74 | }
75 |
76 | case "-c", "--comment":
77 | if i+1 < len(part.Args) {
78 | resultArgs = append(resultArgs, "--gecos", part.Args[i+1])
79 | i += 2
80 | } else {
81 | i++
82 | }
83 |
84 | case "-g", "--gid":
85 | if i+1 < len(part.Args) {
86 | resultArgs = append(resultArgs, "--ingroup", part.Args[i+1])
87 | i += 2
88 | } else {
89 | i++
90 | }
91 |
92 | case "-u", "--uid":
93 | if i+1 < len(part.Args) {
94 | resultArgs = append(resultArgs, "--uid", part.Args[i+1])
95 | i += 2
96 | } else {
97 | i++
98 | }
99 |
100 | // Password options converted to --disabled-password
101 | case "-p", "--password":
102 | resultArgs = append(resultArgs, "--disabled-password")
103 | if i+1 < len(part.Args) && !strings.HasPrefix(part.Args[i+1], "-") {
104 | i += 2
105 | } else {
106 | i++
107 | }
108 |
109 | // Options that we skip along with their arguments
110 | case "-k", "--skel", "-N", "--no-user-group":
111 | if i+1 < len(part.Args) && !strings.HasPrefix(part.Args[i+1], "-") {
112 | i += 2
113 | } else {
114 | i++
115 | }
116 |
117 | // Include other parts that haven't been processed
118 | default:
119 | resultArgs = append(resultArgs, arg)
120 | i++
121 | }
122 | }
123 |
124 | // Add username at the end
125 | if hasUsername {
126 | resultArgs = append(resultArgs, username)
127 | }
128 |
129 | result.Args = resultArgs
130 | return result
131 | }
132 |
133 | // ConvertGroupAddToAddGroup converts a groupadd command to the equivalent addgroup command
134 | func ConvertGroupAddToAddGroup(part *ShellPart) *ShellPart {
135 | if part.Command != CommandGroupAdd {
136 | return part
137 | }
138 |
139 | // Create a new shell part with the same extra content and delimiter
140 | result := &ShellPart{
141 | ExtraPre: part.ExtraPre,
142 | Command: CommandAddGroup,
143 | Delimiter: part.Delimiter,
144 | }
145 |
146 | // Process arguments
147 | var resultArgs []string
148 | var groupname string
149 | var hasGroupname bool
150 | i := 0
151 |
152 | // Process arguments
153 | for i < len(part.Args) {
154 | arg := part.Args[i]
155 |
156 | // Look for the groupname (first non-option argument)
157 | if !strings.HasPrefix(arg, "-") && !hasGroupname {
158 | groupname = arg
159 | hasGroupname = true
160 | i++
161 | continue
162 | }
163 |
164 | // Process options
165 | switch arg {
166 | // Options that are renamed
167 | case "-r", "--system":
168 | resultArgs = append(resultArgs, "--system")
169 | i++
170 |
171 | // Options that need arguments and are renamed
172 | case "-g", "--gid":
173 | if i+1 < len(part.Args) {
174 | resultArgs = append(resultArgs, "--gid", part.Args[i+1])
175 | i += 2
176 | } else {
177 | i++
178 | }
179 |
180 | // Options that we skip (not supported in addgroup)
181 | case "-f", "--force", "-o", "--non-unique":
182 | i++
183 | continue
184 |
185 | // Options that we skip along with their arguments
186 | case "-K", "--key", "-p", "--password":
187 | if i+1 < len(part.Args) && !strings.HasPrefix(part.Args[i+1], "-") {
188 | i += 2
189 | } else {
190 | i++
191 | }
192 |
193 | // Include other parts that haven't been processed
194 | default:
195 | resultArgs = append(resultArgs, arg)
196 | i++
197 | }
198 | }
199 |
200 | // Add groupname at the end
201 | if hasGroupname {
202 | resultArgs = append(resultArgs, groupname)
203 | }
204 |
205 | result.Args = resultArgs
206 | return result
207 | }
208 |
--------------------------------------------------------------------------------
/pkg/dfc/adduser_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package dfc
7 |
8 | import (
9 | "testing"
10 | )
11 |
12 | func TestConvertUserAddToAddUser(t *testing.T) {
13 | testCases := []struct {
14 | name string
15 | input *ShellPart
16 | expected *ShellPart
17 | }{
18 | {
19 | name: "basic useradd",
20 | input: &ShellPart{
21 | Command: CommandUserAdd,
22 | Args: []string{"myuser"},
23 | },
24 | expected: &ShellPart{
25 | Command: CommandAddUser,
26 | Args: []string{"myuser"},
27 | },
28 | },
29 | {
30 | name: "create home directory",
31 | input: &ShellPart{
32 | Command: CommandUserAdd,
33 | Args: []string{"-m", "myuser"},
34 | },
35 | expected: &ShellPart{
36 | Command: CommandAddUser,
37 | Args: []string{"myuser"},
38 | },
39 | },
40 | {
41 | name: "create home directory long option",
42 | input: &ShellPart{
43 | Command: CommandUserAdd,
44 | Args: []string{"--create-home", "myuser"},
45 | },
46 | expected: &ShellPart{
47 | Command: CommandAddUser,
48 | Args: []string{"myuser"},
49 | },
50 | },
51 | {
52 | name: "system user",
53 | input: &ShellPart{
54 | Command: CommandUserAdd,
55 | Args: []string{"-r", "myuser"},
56 | },
57 | expected: &ShellPart{
58 | Command: CommandAddUser,
59 | Args: []string{"--system", "myuser"},
60 | },
61 | },
62 | {
63 | name: "system user long option",
64 | input: &ShellPart{
65 | Command: CommandUserAdd,
66 | Args: []string{"--system", "myuser"},
67 | },
68 | expected: &ShellPart{
69 | Command: CommandAddUser,
70 | Args: []string{"--system", "myuser"},
71 | },
72 | },
73 | {
74 | name: "custom shell",
75 | input: &ShellPart{
76 | Command: CommandUserAdd,
77 | Args: []string{"-s", "/bin/bash", "myuser"},
78 | },
79 | expected: &ShellPart{
80 | Command: CommandAddUser,
81 | Args: []string{"--shell", "/bin/bash", "myuser"},
82 | },
83 | },
84 | {
85 | name: "custom shell long option",
86 | input: &ShellPart{
87 | Command: CommandUserAdd,
88 | Args: []string{"--shell", "/bin/bash", "myuser"},
89 | },
90 | expected: &ShellPart{
91 | Command: CommandAddUser,
92 | Args: []string{"--shell", "/bin/bash", "myuser"},
93 | },
94 | },
95 | {
96 | name: "custom home directory",
97 | input: &ShellPart{
98 | Command: CommandUserAdd,
99 | Args: []string{"-d", "/custom/home", "myuser"},
100 | },
101 | expected: &ShellPart{
102 | Command: CommandAddUser,
103 | Args: []string{"--home", "/custom/home", "myuser"},
104 | },
105 | },
106 | {
107 | name: "custom home directory long option",
108 | input: &ShellPart{
109 | Command: CommandUserAdd,
110 | Args: []string{"--home-dir", "/custom/home", "myuser"},
111 | },
112 | expected: &ShellPart{
113 | Command: CommandAddUser,
114 | Args: []string{"--home", "/custom/home", "myuser"},
115 | },
116 | },
117 | {
118 | name: "with comment",
119 | input: &ShellPart{
120 | Command: CommandUserAdd,
121 | Args: []string{"-c", "Test User", "myuser"},
122 | },
123 | expected: &ShellPart{
124 | Command: CommandAddUser,
125 | Args: []string{"--gecos", "Test User", "myuser"},
126 | },
127 | },
128 | {
129 | name: "with comment long option",
130 | input: &ShellPart{
131 | Command: CommandUserAdd,
132 | Args: []string{"--comment", "Test User", "myuser"},
133 | },
134 | expected: &ShellPart{
135 | Command: CommandAddUser,
136 | Args: []string{"--gecos", "Test User", "myuser"},
137 | },
138 | },
139 | {
140 | name: "with password",
141 | input: &ShellPart{
142 | Command: CommandUserAdd,
143 | Args: []string{"-p", "password123", "myuser"},
144 | },
145 | expected: &ShellPart{
146 | Command: CommandAddUser,
147 | Args: []string{"--disabled-password", "myuser"},
148 | },
149 | },
150 | {
151 | name: "with password long option",
152 | input: &ShellPart{
153 | Command: CommandUserAdd,
154 | Args: []string{"--password", "password123", "myuser"},
155 | },
156 | expected: &ShellPart{
157 | Command: CommandAddUser,
158 | Args: []string{"--disabled-password", "myuser"},
159 | },
160 | },
161 | {
162 | name: "with primary group",
163 | input: &ShellPart{
164 | Command: CommandUserAdd,
165 | Args: []string{"-g", "mygroup", "myuser"},
166 | },
167 | expected: &ShellPart{
168 | Command: CommandAddUser,
169 | Args: []string{"--ingroup", "mygroup", "myuser"},
170 | },
171 | },
172 | {
173 | name: "with primary group long option",
174 | input: &ShellPart{
175 | Command: CommandUserAdd,
176 | Args: []string{"--gid", "mygroup", "myuser"},
177 | },
178 | expected: &ShellPart{
179 | Command: CommandAddUser,
180 | Args: []string{"--ingroup", "mygroup", "myuser"},
181 | },
182 | },
183 | {
184 | name: "with user ID",
185 | input: &ShellPart{
186 | Command: CommandUserAdd,
187 | Args: []string{"-u", "1001", "myuser"},
188 | },
189 | expected: &ShellPart{
190 | Command: CommandAddUser,
191 | Args: []string{"--uid", "1001", "myuser"},
192 | },
193 | },
194 | {
195 | name: "with user ID long option",
196 | input: &ShellPart{
197 | Command: CommandUserAdd,
198 | Args: []string{"--uid", "1001", "myuser"},
199 | },
200 | expected: &ShellPart{
201 | Command: CommandAddUser,
202 | Args: []string{"--uid", "1001", "myuser"},
203 | },
204 | },
205 | {
206 | name: "no home directory",
207 | input: &ShellPart{
208 | Command: CommandUserAdd,
209 | Args: []string{"-M", "myuser"},
210 | },
211 | expected: &ShellPart{
212 | Command: CommandAddUser,
213 | Args: []string{"--no-create-home", "myuser"},
214 | },
215 | },
216 | {
217 | name: "no home directory long option",
218 | input: &ShellPart{
219 | Command: CommandUserAdd,
220 | Args: []string{"--no-create-home", "myuser"},
221 | },
222 | expected: &ShellPart{
223 | Command: CommandAddUser,
224 | Args: []string{"--no-create-home", "myuser"},
225 | },
226 | },
227 | {
228 | name: "multiple options",
229 | input: &ShellPart{
230 | Command: CommandUserAdd,
231 | Args: []string{"-m", "-s", "/bin/bash", "-u", "1001", "-g", "mygroup", "myuser"},
232 | },
233 | expected: &ShellPart{
234 | Command: CommandAddUser,
235 | Args: []string{"--shell", "/bin/bash", "--uid", "1001", "--ingroup", "mygroup", "myuser"},
236 | },
237 | },
238 | {
239 | name: "preserves extra parts",
240 | input: &ShellPart{
241 | ExtraPre: "# This is a comment",
242 | Command: CommandUserAdd,
243 | Args: []string{"myuser"},
244 | Delimiter: "&&",
245 | },
246 | expected: &ShellPart{
247 | ExtraPre: "# This is a comment",
248 | Command: CommandAddUser,
249 | Args: []string{"myuser"},
250 | Delimiter: "&&",
251 | },
252 | },
253 | }
254 |
255 | for _, tc := range testCases {
256 | t.Run(tc.name, func(t *testing.T) {
257 | result := ConvertUserAddToAddUser(tc.input)
258 |
259 | // Compare command
260 | if result.Command != tc.expected.Command {
261 | t.Errorf("Command: expected %q, got %q", tc.expected.Command, result.Command)
262 | }
263 |
264 | // Compare args
265 | if len(result.Args) != len(tc.expected.Args) {
266 | t.Errorf("Args length: expected %d, got %d", len(tc.expected.Args), len(result.Args))
267 | } else {
268 | for i, arg := range tc.expected.Args {
269 | if result.Args[i] != arg {
270 | t.Errorf("Arg[%d]: expected %q, got %q", i, arg, result.Args[i])
271 | }
272 | }
273 | }
274 |
275 | // Compare ExtraPre and Delimiter
276 | if result.ExtraPre != tc.expected.ExtraPre {
277 | t.Errorf("ExtraPre: expected %q, got %q", tc.expected.ExtraPre, result.ExtraPre)
278 | }
279 | if result.Delimiter != tc.expected.Delimiter {
280 | t.Errorf("Delimiter: expected %q, got %q", tc.expected.Delimiter, result.Delimiter)
281 | }
282 | })
283 | }
284 | }
285 |
286 | func TestConvertGroupAddToAddGroup(t *testing.T) {
287 | testCases := []struct {
288 | name string
289 | input *ShellPart
290 | expected *ShellPart
291 | }{
292 | {
293 | name: "basic groupadd",
294 | input: &ShellPart{
295 | Command: CommandGroupAdd,
296 | Args: []string{"mygroup"},
297 | },
298 | expected: &ShellPart{
299 | Command: CommandAddGroup,
300 | Args: []string{"mygroup"},
301 | },
302 | },
303 | {
304 | name: "system group",
305 | input: &ShellPart{
306 | Command: CommandGroupAdd,
307 | Args: []string{"-r", "mygroup"},
308 | },
309 | expected: &ShellPart{
310 | Command: CommandAddGroup,
311 | Args: []string{"--system", "mygroup"},
312 | },
313 | },
314 | {
315 | name: "system group long option",
316 | input: &ShellPart{
317 | Command: CommandGroupAdd,
318 | Args: []string{"--system", "mygroup"},
319 | },
320 | expected: &ShellPart{
321 | Command: CommandAddGroup,
322 | Args: []string{"--system", "mygroup"},
323 | },
324 | },
325 | {
326 | name: "custom GID",
327 | input: &ShellPart{
328 | Command: CommandGroupAdd,
329 | Args: []string{"-g", "1001", "mygroup"},
330 | },
331 | expected: &ShellPart{
332 | Command: CommandAddGroup,
333 | Args: []string{"--gid", "1001", "mygroup"},
334 | },
335 | },
336 | {
337 | name: "custom GID long option",
338 | input: &ShellPart{
339 | Command: CommandGroupAdd,
340 | Args: []string{"--gid", "1001", "mygroup"},
341 | },
342 | expected: &ShellPart{
343 | Command: CommandAddGroup,
344 | Args: []string{"--gid", "1001", "mygroup"},
345 | },
346 | },
347 | {
348 | name: "with force option",
349 | input: &ShellPart{
350 | Command: CommandGroupAdd,
351 | Args: []string{"-f", "mygroup"},
352 | },
353 | expected: &ShellPart{
354 | Command: CommandAddGroup,
355 | Args: []string{"mygroup"},
356 | },
357 | },
358 | {
359 | name: "with force option long",
360 | input: &ShellPart{
361 | Command: CommandGroupAdd,
362 | Args: []string{"--force", "mygroup"},
363 | },
364 | expected: &ShellPart{
365 | Command: CommandAddGroup,
366 | Args: []string{"mygroup"},
367 | },
368 | },
369 | {
370 | name: "with non-unique option",
371 | input: &ShellPart{
372 | Command: CommandGroupAdd,
373 | Args: []string{"-o", "mygroup"},
374 | },
375 | expected: &ShellPart{
376 | Command: CommandAddGroup,
377 | Args: []string{"mygroup"},
378 | },
379 | },
380 | {
381 | name: "with non-unique option long",
382 | input: &ShellPart{
383 | Command: CommandGroupAdd,
384 | Args: []string{"--non-unique", "mygroup"},
385 | },
386 | expected: &ShellPart{
387 | Command: CommandAddGroup,
388 | Args: []string{"mygroup"},
389 | },
390 | },
391 | {
392 | name: "with password option",
393 | input: &ShellPart{
394 | Command: CommandGroupAdd,
395 | Args: []string{"-p", "password123", "mygroup"},
396 | },
397 | expected: &ShellPart{
398 | Command: CommandAddGroup,
399 | Args: []string{"mygroup"},
400 | },
401 | },
402 | {
403 | name: "with password option long",
404 | input: &ShellPart{
405 | Command: CommandGroupAdd,
406 | Args: []string{"--password", "password123", "mygroup"},
407 | },
408 | expected: &ShellPart{
409 | Command: CommandAddGroup,
410 | Args: []string{"mygroup"},
411 | },
412 | },
413 | {
414 | name: "multiple options",
415 | input: &ShellPart{
416 | Command: CommandGroupAdd,
417 | Args: []string{"-r", "-g", "1001", "mygroup"},
418 | },
419 | expected: &ShellPart{
420 | Command: CommandAddGroup,
421 | Args: []string{"--system", "--gid", "1001", "mygroup"},
422 | },
423 | },
424 | {
425 | name: "preserves extra parts",
426 | input: &ShellPart{
427 | ExtraPre: "# This is a comment",
428 | Command: CommandGroupAdd,
429 | Args: []string{"mygroup"},
430 | Delimiter: "&&",
431 | },
432 | expected: &ShellPart{
433 | ExtraPre: "# This is a comment",
434 | Command: CommandAddGroup,
435 | Args: []string{"mygroup"},
436 | Delimiter: "&&",
437 | },
438 | },
439 | }
440 |
441 | for _, tc := range testCases {
442 | t.Run(tc.name, func(t *testing.T) {
443 | result := ConvertGroupAddToAddGroup(tc.input)
444 |
445 | // Compare command
446 | if result.Command != tc.expected.Command {
447 | t.Errorf("Command: expected %q, got %q", tc.expected.Command, result.Command)
448 | }
449 |
450 | // Compare args
451 | if len(result.Args) != len(tc.expected.Args) {
452 | t.Errorf("Args length: expected %d, got %d", len(tc.expected.Args), len(result.Args))
453 | } else {
454 | for i, arg := range tc.expected.Args {
455 | if result.Args[i] != arg {
456 | t.Errorf("Arg[%d]: expected %q, got %q", i, arg, result.Args[i])
457 | }
458 | }
459 | }
460 |
461 | // Compare ExtraPre and Delimiter
462 | if result.ExtraPre != tc.expected.ExtraPre {
463 | t.Errorf("ExtraPre: expected %q, got %q", tc.expected.ExtraPre, result.ExtraPre)
464 | }
465 | if result.Delimiter != tc.expected.Delimiter {
466 | t.Errorf("Delimiter: expected %q, got %q", tc.expected.Delimiter, result.Delimiter)
467 | }
468 | })
469 | }
470 | }
471 |
--------------------------------------------------------------------------------
/pkg/dfc/builtin-mappings.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2025 Chainguard, Inc.
2 | # SPDX-License-Identifier: Apache-2.0
3 |
4 | # NOTE: this file is managed by automation and should not be edited directly
5 |
6 | images:
7 | alpine: chainguard-base:latest
8 | amazon/cloudwatch-agent: amazon-cloudwatch-agent-operator
9 | apache/airflow: airflow-core
10 | apache/beam_python3.7_sdk: apache-beam-python-sdk
11 | apache/nifi: apache-nifi
12 | apache/tika: apache-tika
13 | apache/yunikorn: yunikorn-scheduler
14 | argoproj/argo-rollouts: kubectl-argo-rollouts
15 | argoproj/argocd: argocd-repo-server
16 | atmoz/sftp: atmoz-sftp
17 | banzaicloud/logging-operator: kube-logging-operator
18 | calico/node: calico-typha
19 | camunda/zeebe: camunda-zeebe
20 | cfssl/cfssl: cfssl-self-sign
21 | chartmuseum/chartmuseum: helm-chartmuseum
22 | cilium/cilium: cilium-operator-aws
23 | clickhouse/clickhouse-server: clickhouse
24 | confluentinc/cp-kafka: confluent-kafka
25 | cr.l5d.io/linkerd/extension-init: linkerd-extension-init
26 | crossplane/provider-aws: crossplane-aws-dynamodb
27 | crossplane/provider-azure: crossplane-azure-storage
28 | crossplane/provider-sql: crossplane-sql
29 | cybertecpostgresql/pg_timetable: pg-timetable
30 | cypress/base: cypress-base
31 | dart: dart-runtime
32 | daskgateway/dask-gateway: dask-gateway-server
33 | datadog/agent: datadog-agent
34 | debezium/connect: debezium-connect
35 | debian: chainguard-base:latest
36 | dependencytrack/bundled: dependency-track
37 | dopplerhq/kubernetes-operator: doppler-kubernetes-operator
38 | dragonflyoss/dfdaemon: dragonfly
39 | eclipse-temurin: jdk
40 | envoyproxy/gateway: envoy-gateway
41 | envoyproxy/ratelimit: envoy-ratelimit
42 | fedora: chainguard-base:latest
43 | fluxcd/flux: flux-image-automation-controller
44 | gcc: gcc-glibc
45 | gcr.io/kaniko-project/executor: kaniko
46 | gcr.io/kaniko-project/warmer: kaniko-warmer
47 | gcr.io/knative-releases/knative.dev/operator/cmd/operator: knative-operator-webhook
48 | gcr.io/knative-releases/knative.dev/serving/cmd/queue: knative-serving-queue
49 | ghcr.io/kyverno/kyverno: kyvernopre
50 | ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator: opentelemetry-operator-target-allocator
51 | ghcr.io/opencost/opencost: opencost-ui
52 | ghcr.io/opencost/opencost-ui: opencost
53 | goharbor/harbor-core: harbor-jobservice
54 | golang*: go
55 | google/cloud-sdk: google-cloud-sdk
56 | grafana/agent-operator: grafana-agent-operator
57 | grafana/alloy: grafana-alloy
58 | grafana/mimir: grafana-mimir
59 | grafana/oncall: grafana-oncall
60 | grafana/rollout-operator: grafana-rollout-operator
61 | guacamole/guacamole: guacamole-server
62 | hashicorp/vault: vault-k8s
63 | istio/install-cni: istio-pilot
64 | istio/operator: istio-pilot
65 | istio/pilot: istio-pilot
66 | istio/proxyv2: istio-pilot
67 | jaegertracing/all-in-one: jaeger-query
68 | jitsucom/bulker: jitsucom-syncctl
69 | jitsucom/jitsu: jitsucom-console
70 | jupyterhub/k8s-hub: jupyterhub-k8s-hub
71 | jupyterhub/k8s-network-tools: jupyterhub-k8s-network-tools
72 | justwatch/elasticsearch_exporter: prometheus-elasticsearch-exporter
73 | kedacore/keda: keda-admission-webhooks
74 | kubernetesui/dashboard: kubernetes-dashboard
75 | kubernetesui/dashboard-api: kubernetes-dashboard-api
76 | kubernetesui/dashboard-auth: kubernetes-dashboard-auth
77 | kubernetesui/dashboard-metrics-scraper: kubernetes-dashboard-metrics-scraper
78 | kubernetesui/dashboard-web: kubernetes-dashboard-web
79 | library/docker: docker-dind
80 | library/tomcat: tomcat-jdk8
81 | mailcow/unbound: unbound-mailcow
82 | mattermost/mattermost-team-edition: mattermost
83 | mcr.microsoft.com/dotnet/aspnet: aspnet-runtime
84 | mcr.microsoft.com/dotnet/runtime: dotnet-runtime
85 | mcr.microsoft.com/dotnet/sdk: dotnet-runtime
86 | minio/minio: minio-client
87 | minio/operator: minio-operator
88 | mongo: mongodb
89 | neuvector/controller: neuvector-manager
90 | newrelic/infrastructure-bundle: newrelic-infrastructure-bundle
91 | newrelic/infrastructure-k8s: newrelic-infrastructure-k8s
92 | newrelic/k8s-events-forwarder: newrelic-k8s-events-forwarder
93 | newrelic/nri-kube-events: newrelic-kube-events
94 | newrelic/nri-kubernetes: newrelic-kubernetes
95 | newrelic/nri-prometheus: newrelic-prometheus
96 | newrelic/nri-statsd: newrelic-nri-statsd
97 | nodejs*: node
98 | nvidia/container-toolkit: nvidia-container-toolkit
99 | nvidia/k8s-device-plugin: nvidia-device-plugin
100 | oliver006/redis_exporter: prometheus-redis-exporter
101 | openbao/openbao: openbao-k8s
102 | openebs/provisioner-localpv: dynamic-localpv-provisioner
103 | openjdk: jdk
104 | prom/alertmanager: prometheus-alertmanager
105 | prom/blackbox-exporter: prometheus-blackbox-exporter
106 | prom/cloudwatch-exporter: prometheus-cloudwatch-exporter
107 | prom/mysqld-exporter: prometheus-mysqld-exporter
108 | prom/node-exporter: prometheus-node-exporter
109 | prom/pushgateway: prometheus-pushgateway
110 | prom/statsd-exporter: prometheus-statsd-exporter
111 | public.ecr.aws/karpenter/controller: karpenter
112 | public.ecr.aws/mountpoint-s3-csi-driver/aws-mountpoint-s3-csi-driver: mountpoint-s3-csi-driver
113 | quay.io/debezium/connect: debezium-connect
114 | quay.io/jetstack/cert-manager-controller: cert-manager-webhook
115 | quay.io/jupyter/base-notebook: jupyterhub-base-notebook
116 | quay.io/prometheus/cloudwatch-exporter: prometheus-cloudwatch-exporter
117 | quay.io/prometheuscommunity/yet-another-cloudwatch-exporter: yace
118 | rancher/agent: rancher-agent
119 | rancher/fleet: rancher-fleet-agent
120 | rancher/k3s: k3s-static
121 | redis: redis-sentinel
122 | redpandadata/console: redpanda-data-console
123 | registry.k8s.io/provider-aws/cloud-controller-manager: cloud-provider-aws
124 | registryk8s: cluster-api-clusterctl
125 | rook/ceph: rook-ceph
126 | s3-controller: aws-s3-controller
127 | selenium/hub: docker-selenium-hub
128 | stakater/reloader: stakater-reloader
129 | static*: static:latest
130 | strimzi/kafka: strimzi-kafka
131 | strimzi/operator: strimzi-kafka-operator
132 | temporalio/admin-tools: temporal-admin-tools
133 | temporalio/server: temporal-server
134 | thingsboard/tb: thingsboard-tb-js-executor
135 | ubuntu: chainguard-base:latest
136 | upstream-image: dapr-sentry
137 | vault: vault-k8s
138 | victoriametrics/operator: victoriametrics-operator
139 | victoriametrics/victoria-metrics: victoriametrics-vmstorage
140 | vmware/kube-fluentd-operator: kube-logging-operator-fluentd
141 | wrouesnel/postgres_exporter: prometheus-postgres-exporter
142 | xpkg.upbound.io/crossplane-contrib/provider-keycloak: crossplane-keycloak
143 | packages:
144 | alpine: {}
145 | debian:
146 | awscli:
147 | - aws-cli
148 | build-essential:
149 | - build-base
150 | fonts-liberation:
151 | - font-liberation
152 | fonts-open-sans:
153 | - font-opensans
154 | fuse:
155 | - fuse2
156 | - fuse-common
157 | g++:
158 | - gcc
159 | gettext-base:
160 | - gettext
161 | git-delta:
162 | - delta
163 | gnupg2:
164 | - gnupg
165 | google-chrome-stable:
166 | - chromium
167 | libbz2-dev:
168 | - bzip2-dev
169 | libc-client-dev:
170 | - glibc-dev
171 | libc6-dev:
172 | - glibc-dev
173 | libcairo2:
174 | - cairo
175 | libcups2:
176 | - cups-libs
177 | libcurl4-openssl-dev:
178 | - curl-dev
179 | libgssapi-krb5-2:
180 | - krb5-libs
181 | libicu-dev:
182 | - icu-dev
183 | libkrb5-dev:
184 | - krb5-dev
185 | liblzma-dev:
186 | - xz-dev
187 | libmagic1:
188 | - libmagic
189 | - libmagic-dev
190 | libncurses5-dev:
191 | - ncurses
192 | libncursesw5-dev:
193 | - ncurses-dev
194 | libpq-dev:
195 | - postgresql-dev
196 | libpq5:
197 | - libpq
198 | librdkafka1:
199 | - librdkafka
200 | libreadline-dev:
201 | - readline
202 | libsqlite3-dev:
203 | - sqlite-libs
204 | libssl-dev:
205 | - libssl3
206 | libxi6:
207 | - libxi
208 | libxmlsec1:
209 | - xmlsec
210 | libxmlsec1-dev:
211 | - xmlsec-dev
212 | libxmlsec1-openssl:
213 | - xmlsec-openssl
214 | locales:
215 | - glibc-locales
216 | netbase:
217 | - wolfi-baselayout
218 | netcat-traditional:
219 | - netcat-openbsd
220 | pcre2-utils:
221 | - pcre2
222 | pkg-config:
223 | - pkgconf
224 | postgresql-client-14:
225 | - postgresql-14-client
226 | postgresql-contrib:
227 | - postgresql-14-contrib
228 | protobuf-compiler:
229 | - protobuf-c-compiler
230 | python3:
231 | - python-3
232 | python3-pip:
233 | - py3-pip
234 | python3-virtualenv:
235 | - py3-virtualenv
236 | python3-wheel:
237 | - py3-wheel
238 | python3-openssl:
239 | - py3-pyopenssl
240 | s3fs:
241 | - s3fs-fuse
242 | ssh:
243 | - openssh-client
244 | - openssh-server
245 | software-properties-common: []
246 | uuid-runtime:
247 | - util-linux-misc
248 | watch:
249 | - procps
250 | xfonts-utils:
251 | - font-util
252 | - mkfontscale
253 | - bdftopcf
254 | xz-utils:
255 | - xz
256 | zlib1g-dev:
257 | - zlib-dev
258 | fedora: {}
259 |
--------------------------------------------------------------------------------
/pkg/dfc/mappings.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package dfc
7 |
8 | import (
9 | "context"
10 | _ "embed"
11 | "fmt"
12 |
13 | "github.com/chainguard-dev/clog"
14 | "gopkg.in/yaml.v3"
15 | )
16 |
17 | //go:embed builtin-mappings.yaml
18 | var builtinMappingsYAMLBytes []byte
19 |
20 | // defaultGetDefaultMappings is the real implementation of GetDefaultMappings
21 | func defaultGetDefaultMappings(ctx context.Context, update bool) (MappingsConfig, error) {
22 | log := clog.FromContext(ctx)
23 | var mappings MappingsConfig
24 |
25 | // If update is requested, try to update the mappings first
26 | if update {
27 | // Set up update options
28 | updateOpts := UpdateOptions{}
29 | // Use the default URL
30 | updateOpts.MappingsURL = defaultMappingsURL
31 |
32 | if err := Update(ctx, updateOpts); err != nil {
33 | log.Warn("Failed to update mappings, will try to use existing mappings", "error", err)
34 | }
35 | }
36 |
37 | // Try to use XDG config mappings file if available
38 | xdgMappings, err := getMappingsConfig()
39 | if err != nil {
40 | return mappings, fmt.Errorf("checking XDG config mappings: %w", err)
41 | }
42 |
43 | var mappingsBytes []byte
44 | if xdgMappings != nil {
45 | log.Debug("Using mappings from XDG config directory")
46 | mappingsBytes = xdgMappings
47 | } else {
48 | // Fall back to embedded mappings
49 | log.Debug("Using embedded builtin mappings")
50 | mappingsBytes = builtinMappingsYAMLBytes
51 | }
52 |
53 | // Unmarshal the mappings
54 | if err := yaml.Unmarshal(mappingsBytes, &mappings); err != nil {
55 | return mappings, fmt.Errorf("unmarshalling mappings: %w", err)
56 | }
57 |
58 | return mappings, nil
59 | }
60 |
61 | // MergeMappings merges the base and overlay mappings
62 | // Any values in the overlay take precedence over the base
63 | func MergeMappings(base, overlay MappingsConfig) MappingsConfig {
64 | result := MappingsConfig{
65 | Images: make(map[string]string),
66 | Packages: make(PackageMap),
67 | }
68 |
69 | // Copy base images
70 | for k, v := range base.Images {
71 | result.Images[k] = v
72 | }
73 |
74 | // Overlay with extra images
75 | for k, v := range overlay.Images {
76 | result.Images[k] = v
77 | }
78 |
79 | // Copy base packages for each distro
80 | for distro, packages := range base.Packages {
81 | if result.Packages[distro] == nil {
82 | result.Packages[distro] = make(map[string][]string)
83 | }
84 | for pkg, mappings := range packages {
85 | result.Packages[distro][pkg] = mappings
86 | }
87 | }
88 |
89 | // Overlay with extra packages
90 | for distro, packages := range overlay.Packages {
91 | if result.Packages[distro] == nil {
92 | result.Packages[distro] = make(map[string][]string)
93 | }
94 | for pkg, mappings := range packages {
95 | result.Packages[distro][pkg] = mappings
96 | }
97 | }
98 |
99 | return result
100 | }
101 |
--------------------------------------------------------------------------------
/pkg/dfc/shell.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package dfc
7 |
8 | import (
9 | "strings"
10 | )
11 |
12 | // ShellCommand represents a parsed shell command or group of commands
13 | type ShellCommand struct {
14 | Parts []*ShellPart // The parsed parts of this command
15 | }
16 |
17 | // ShellPart represents a single part of a shell command
18 | type ShellPart struct {
19 | ExtraPre string // Environment variable dcecalrations and other command prefixes
20 | Command string // The command such as "apt-get"
21 | Args []string // All the args such as "install" "-y" "nano" "vim" (includes pipe character)
22 | Delimiter string // The delimiter for this part, such as "&&" or "||" or ";"
23 | }
24 |
25 | const partSeparator = " \\\n "
26 |
27 | // String converts a ShellCommand back to its string representation
28 | func (sc *ShellCommand) String() string {
29 | // If no parts, return "true" as fallback
30 | if len(sc.Parts) == 0 {
31 | return "true"
32 | }
33 |
34 | s := ""
35 | for i, part := range sc.Parts {
36 | if i != 0 {
37 | s += partSeparator
38 | }
39 | if part.ExtraPre != "" {
40 | s += part.ExtraPre + " "
41 | }
42 | s += part.Command
43 | if len(part.Args) > 0 {
44 | s += " " + strings.Join(part.Args, " ")
45 | }
46 | if part.Delimiter != "" {
47 | s += " " + part.Delimiter
48 | }
49 | }
50 | return s
51 | }
52 |
53 | // ParseMultilineShell parses a shell command into a structured representation
54 | func ParseMultilineShell(raw string) *ShellCommand {
55 | if strings.TrimSpace(raw) == "" {
56 | return nil
57 | }
58 |
59 | // Remove comments and normalize whitespace
60 | cleaned := removeComments(raw)
61 | if strings.TrimSpace(cleaned) == "" {
62 | return nil
63 | }
64 |
65 | // Known delimiters - removed pipe ("|") from the list
66 | delimiters := []string{"&&", "||", ";", "&"}
67 |
68 | var parts []*ShellPart
69 | remainingCmd := strings.TrimSpace(cleaned)
70 |
71 | for len(remainingCmd) > 0 {
72 | // Find next delimiter not inside quotes, parentheses, or subshells
73 | nextDelim, nextDelimPos := findNextDelimiter(remainingCmd, delimiters)
74 | if nextDelimPos == -1 {
75 | // No more delimiters, this is the last part
76 | part := parseShellPart(remainingCmd, "")
77 | parts = append(parts, part)
78 | break
79 | }
80 |
81 | // Split command into current part and remaining
82 | currentCmdRaw := strings.TrimSpace(remainingCmd[:nextDelimPos])
83 | part := parseShellPart(currentCmdRaw, nextDelim)
84 | parts = append(parts, part)
85 |
86 | // Move past the delimiter for next iteration
87 | remainingCmd = strings.TrimSpace(remainingCmd[nextDelimPos+len(nextDelim):])
88 | }
89 |
90 | return &ShellCommand{Parts: parts}
91 | }
92 |
93 | // removeComments removes all comments from the command string and normalizes newlines
94 | func removeComments(input string) string {
95 | var result strings.Builder
96 | lines := strings.Split(input, "\n")
97 |
98 | for i, line := range lines {
99 | // Find comment position (if any)
100 | commentPos := -1
101 | inSingleQuote := false
102 | inDoubleQuote := false
103 |
104 | for j := 0; j < len(line); j++ {
105 | if line[j] == '\'' && !inDoubleQuote {
106 | inSingleQuote = !inSingleQuote
107 | continue
108 | }
109 | if line[j] == '"' && !inSingleQuote {
110 | inDoubleQuote = !inDoubleQuote
111 | continue
112 | }
113 |
114 | // Only treat # as comment marker if outside quotes
115 | if line[j] == '#' && !inSingleQuote && !inDoubleQuote {
116 | commentPos = j
117 | break
118 | }
119 | }
120 |
121 | // Process line with possible comment removal
122 | var processedLine string
123 | if commentPos >= 0 {
124 | processedLine = strings.TrimSpace(line[:commentPos])
125 | } else {
126 | processedLine = strings.TrimSpace(line)
127 | }
128 |
129 | if processedLine != "" {
130 | // Check if the line ends with a backslash (line continuation)
131 | if strings.HasSuffix(processedLine, "\\") && i < len(lines)-1 {
132 | // Add the line without the trailing backslash
133 | result.WriteString(strings.TrimSpace(processedLine[:len(processedLine)-1]))
134 | result.WriteString(" ") // Just add a space instead of a newline
135 | } else {
136 | result.WriteString(processedLine)
137 | result.WriteString(" ") // Add space to separate from next line
138 | }
139 | }
140 | }
141 |
142 | return strings.TrimSpace(result.String())
143 | }
144 |
145 | // findNextDelimiter finds the position of the next delimiter not inside quotes/parentheses
146 | func findNextDelimiter(cmd string, delimiters []string) (string, int) {
147 | inSingleQuote := false
148 | inDoubleQuote := false
149 | parenDepth := 0
150 | backtickDepth := 0
151 | subshellDepth := 0
152 |
153 | for i := 0; i < len(cmd); i++ {
154 | // Check for quote start/end
155 | if cmd[i] == '\'' && !inDoubleQuote {
156 | inSingleQuote = !inSingleQuote
157 | continue
158 | }
159 | if cmd[i] == '"' && !inSingleQuote {
160 | inDoubleQuote = !inDoubleQuote
161 | continue
162 | }
163 |
164 | // Check for parentheses
165 | if !inSingleQuote && !inDoubleQuote {
166 | if cmd[i] == '(' {
167 | parenDepth++
168 | continue
169 | }
170 | if cmd[i] == ')' && parenDepth > 0 {
171 | parenDepth--
172 | continue
173 | }
174 |
175 | // Check for backticks
176 | if cmd[i] == '`' {
177 | backtickDepth = 1 - backtickDepth // Toggle between 0 and 1
178 | continue
179 | }
180 |
181 | // Check for subshell $()
182 | if i < len(cmd)-1 && cmd[i] == '$' && cmd[i+1] == '(' {
183 | subshellDepth++
184 | i++ // Skip the next character
185 | continue
186 | }
187 | if cmd[i] == ')' && subshellDepth > 0 {
188 | subshellDepth--
189 | continue
190 | }
191 |
192 | // Skip comments
193 | if cmd[i] == '#' {
194 | break // Ignore everything until the end of this segment
195 | }
196 |
197 | // Only check for delimiters when not in any quotes or special sections
198 | if !inSingleQuote && !inDoubleQuote && parenDepth == 0 && backtickDepth == 0 && subshellDepth == 0 {
199 | for _, delim := range delimiters {
200 | if i+len(delim) <= len(cmd) && cmd[i:i+len(delim)] == delim {
201 | return delim, i
202 | }
203 | }
204 | }
205 | }
206 | }
207 |
208 | return "", -1
209 | }
210 |
211 | // parseShellPart parses a command part into command and args
212 | func parseShellPart(cmdPart string, delimiter string) *ShellPart {
213 | cmdPart = strings.TrimSpace(cmdPart)
214 |
215 | // Special handling for parenthesized commands
216 | if strings.HasPrefix(cmdPart, "(") && strings.HasSuffix(cmdPart, ")") {
217 | return &ShellPart{
218 | Command: cmdPart,
219 | Args: nil,
220 | Delimiter: delimiter,
221 | }
222 | }
223 |
224 | // Tokenize the command part, respecting quotes
225 | tokens := tokenize(cmdPart)
226 | if len(tokens) == 0 {
227 | return &ShellPart{
228 | Command: "",
229 | Delimiter: delimiter,
230 | }
231 | }
232 |
233 | // Find the actual command by skipping environment variable declarations
234 | commandIndex := findCommandIndex(tokens)
235 |
236 | // If we can't find a command after env vars, use all env vars as the command
237 | if commandIndex >= len(tokens) {
238 | return &ShellPart{
239 | Command: strings.Join(tokens, " "),
240 | Args: nil,
241 | Delimiter: delimiter,
242 | }
243 | }
244 |
245 | // Extract environment variables to ExtraPre and the actual command
246 | var extraPre string
247 | if commandIndex > 0 {
248 | extraPre = strings.Join(tokens[:commandIndex], " ")
249 | }
250 |
251 | return &ShellPart{
252 | ExtraPre: extraPre,
253 | Command: tokens[commandIndex],
254 | Args: tokens[commandIndex+1:],
255 | Delimiter: delimiter,
256 | }
257 | }
258 |
259 | // findCommandIndex finds the index of the first token that's not an environment variable declaration
260 | func findCommandIndex(tokens []string) int {
261 | for i, token := range tokens {
262 | // If it doesn't look like an env var assignment, consider it the command
263 | if !isEnvVarAssignment(token) {
264 | return i
265 | }
266 | }
267 | return len(tokens) // All tokens are env vars
268 | }
269 |
270 | // isEnvVarAssignment checks if a token is an environment variable assignment
271 | func isEnvVarAssignment(token string) bool {
272 | // Check for assignment pattern (VAR=value)
273 | for i, c := range token {
274 | if c == '=' {
275 | // Make sure there's at least one character before '='
276 | return i > 0
277 | }
278 | }
279 | return false
280 | }
281 |
282 | // tokenize splits a command into tokens, respecting quotes
283 | func tokenize(cmd string) []string {
284 | var tokens []string
285 | var currentToken strings.Builder
286 | inSingleQuote := false
287 | inDoubleQuote := false
288 | parenDepth := 0
289 | backtickDepth := 0
290 | subshellDepth := 0
291 | inToken := false
292 |
293 | for i := 0; i < len(cmd); i++ {
294 | char := cmd[i]
295 |
296 | // Handle quotes
297 | if char == '\'' && !inDoubleQuote {
298 | inSingleQuote = !inSingleQuote
299 | inToken = true
300 | currentToken.WriteByte(char)
301 | continue
302 | }
303 | if char == '"' && !inSingleQuote {
304 | inDoubleQuote = !inDoubleQuote
305 | inToken = true
306 | currentToken.WriteByte(char)
307 | continue
308 | }
309 |
310 | // Handle parentheses, backticks, and subshells
311 | if !inSingleQuote && !inDoubleQuote {
312 | if char == '(' {
313 | parenDepth++
314 | inToken = true
315 | currentToken.WriteByte(char)
316 | continue
317 | }
318 | if char == ')' {
319 | parenDepth--
320 | inToken = true
321 | currentToken.WriteByte(char)
322 | continue
323 | }
324 | if char == '`' {
325 | backtickDepth = 1 - backtickDepth
326 | inToken = true
327 | currentToken.WriteByte(char)
328 | continue
329 | }
330 | if i < len(cmd)-1 && char == '$' && cmd[i+1] == '(' {
331 | subshellDepth++
332 | inToken = true
333 | currentToken.WriteByte(char)
334 | continue
335 | }
336 | if char == ')' && subshellDepth > 0 {
337 | subshellDepth--
338 | inToken = true
339 | currentToken.WriteByte(char)
340 | continue
341 | }
342 | }
343 |
344 | // Handle spaces to separate tokens
345 | if char == ' ' || char == '\t' {
346 | if inSingleQuote || inDoubleQuote || parenDepth > 0 || backtickDepth > 0 || subshellDepth > 0 {
347 | // Space inside quotes or special constructs - keep it
348 | inToken = true
349 | currentToken.WriteByte(char)
350 | } else if inToken {
351 | // End of a token
352 | tokens = append(tokens, currentToken.String())
353 | currentToken.Reset()
354 | inToken = false
355 | }
356 | } else {
357 | // Regular character
358 | inToken = true
359 | currentToken.WriteByte(char)
360 | }
361 | }
362 |
363 | // Add the last token if any
364 | if inToken {
365 | tokens = append(tokens, currentToken.String())
366 | }
367 |
368 | return tokens
369 | }
370 |
--------------------------------------------------------------------------------
/pkg/dfc/shell_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package dfc
7 |
8 | import (
9 | "testing"
10 |
11 | "github.com/google/go-cmp/cmp"
12 | )
13 |
14 | func TestShellParsing(t *testing.T) {
15 | type testCase struct {
16 | name string
17 | raw string
18 | expected string
19 | wantCommand *ShellCommand
20 | }
21 | cases := []testCase{}
22 |
23 | for _, delimiter := range []string{"&&", "||", ";"} { // Removed "|" from delimiters
24 | cases = append(cases, testCase{
25 | name: "basic " + delimiter,
26 | raw: `echo hello ` + delimiter + ` echo world`,
27 | expected: `echo hello ` + delimiter + partSeparator + `echo world`,
28 | wantCommand: &ShellCommand{
29 | Parts: []*ShellPart{
30 | {
31 | Command: "echo",
32 | Args: []string{"hello"},
33 | Delimiter: delimiter,
34 | },
35 | {
36 | Command: "echo",
37 | Args: []string{"world"},
38 | },
39 | },
40 | },
41 | })
42 |
43 | cases = append(cases, testCase{
44 | name: "spacing " + delimiter,
45 | raw: ` echo hello ` + delimiter + ` echo world `,
46 | expected: `echo hello ` + delimiter + partSeparator + `echo world`,
47 | wantCommand: &ShellCommand{
48 | Parts: []*ShellPart{
49 | {
50 | Command: "echo",
51 | Args: []string{"hello"},
52 | Delimiter: delimiter,
53 | },
54 | {
55 | Command: "echo",
56 | Args: []string{"world"},
57 | },
58 | },
59 | },
60 | })
61 |
62 | cases = append(cases, testCase{
63 | name: "quoted args " + delimiter,
64 | raw: `echo "hello notanarg" other ` + delimiter + ` echo world 'not an arg' other`,
65 | expected: `echo "hello notanarg" other ` + delimiter + partSeparator + `echo world 'not an arg' other`,
66 | wantCommand: &ShellCommand{
67 | Parts: []*ShellPart{
68 | {
69 | Command: "echo",
70 | Args: []string{`"hello notanarg"`, "other"},
71 | Delimiter: delimiter,
72 | },
73 | {
74 | Command: "echo",
75 | Args: []string{"world", `'not an arg'`, "other"},
76 | },
77 | },
78 | },
79 | })
80 |
81 | cases = append(cases, testCase{
82 | name: "parentheses section treated as single command " + delimiter,
83 | raw: `(echo "hello" && echo "bye") ` + delimiter + ` echo world`,
84 | expected: `(echo "hello" && echo "bye") ` + delimiter + partSeparator + `echo world`,
85 | wantCommand: &ShellCommand{
86 | Parts: []*ShellPart{
87 | {
88 | Command: `(echo "hello" && echo "bye")`,
89 | Delimiter: delimiter,
90 | },
91 | {
92 | Command: "echo",
93 | Args: []string{"world"},
94 | },
95 | },
96 | },
97 | })
98 |
99 | cases = append(cases, testCase{
100 | name: "subshell section treated as single arg " + delimiter,
101 | raw: `echo $(echo "hello") ` + delimiter + ` echo world`,
102 | expected: `echo $(echo "hello") ` + delimiter + partSeparator + `echo world`,
103 | wantCommand: &ShellCommand{
104 | Parts: []*ShellPart{
105 | {
106 | Command: `echo`,
107 | Args: []string{`$(echo "hello")`},
108 | Delimiter: delimiter,
109 | },
110 | {
111 | Command: "echo",
112 | Args: []string{"world"},
113 | },
114 | },
115 | },
116 | })
117 |
118 | cases = append(cases, testCase{
119 | name: "backtick section treated as single arg " + delimiter,
120 | raw: `echo ` + "`" + `echo "hello"` + "`" + ` ` + delimiter + ` echo world`,
121 | expected: `echo ` + "`" + `echo "hello"` + "`" + ` ` + delimiter + partSeparator + `echo world`,
122 | wantCommand: &ShellCommand{
123 | Parts: []*ShellPart{
124 | {
125 | Command: `echo`,
126 | Args: []string{"`" + `echo "hello"` + "`"},
127 | Delimiter: delimiter,
128 | },
129 | {
130 | Command: "echo",
131 | Args: []string{"world"},
132 | },
133 | },
134 | },
135 | })
136 |
137 | cases = append(cases, testCase{
138 | name: "delimiter inside quotes ignored " + delimiter,
139 | raw: `echo "hello notanarg ` + delimiter + `" other ` + delimiter + ` echo world 'not an arg ` + delimiter + `' other`,
140 | expected: `echo "hello notanarg ` + delimiter + `" other ` + delimiter + partSeparator + `echo world 'not an arg ` + delimiter + `' other`,
141 | wantCommand: &ShellCommand{
142 | Parts: []*ShellPart{
143 | {
144 | Command: "echo",
145 | Args: []string{`"hello notanarg ` + delimiter + `"`, "other"},
146 | Delimiter: delimiter,
147 | },
148 | {
149 | Command: "echo",
150 | Args: []string{"world", `'not an arg ` + delimiter + `'`, "other"},
151 | },
152 | },
153 | },
154 | })
155 |
156 | cases = append(cases, testCase{
157 | name: "env vars get preserved " + delimiter,
158 | raw: `A=1 B="2 ||" C= echo hello ` + delimiter + ` X=3 Y=4 Z="5 &&" echo world`,
159 | expected: `A=1 B="2 ||" C= echo hello ` + delimiter + partSeparator + `X=3 Y=4 Z="5 &&" echo world`,
160 | wantCommand: &ShellCommand{
161 | Parts: []*ShellPart{
162 | {
163 | ExtraPre: `A=1 B="2 ||" C=`,
164 | Command: `echo`,
165 | Args: []string{"hello"},
166 | Delimiter: delimiter,
167 | },
168 | {
169 | ExtraPre: `X=3 Y=4 Z="5 &&"`,
170 | Command: `echo`,
171 | Args: []string{"world"},
172 | },
173 | },
174 | },
175 | })
176 |
177 | cases = append(cases, testCase{
178 | name: "comments and whitespace get stripped " + delimiter,
179 | raw: "# comment before\n" + `echo hello ` + delimiter + ` echo world` + "\n# comment after\n",
180 | expected: `echo hello ` + delimiter + partSeparator + `echo world`,
181 | wantCommand: &ShellCommand{
182 | Parts: []*ShellPart{
183 | {
184 | Command: "echo",
185 | Args: []string{"hello"},
186 | Delimiter: delimiter,
187 | },
188 | {
189 | Command: "echo",
190 | Args: []string{"world"},
191 | },
192 | },
193 | },
194 | })
195 |
196 | cases = append(cases, testCase{
197 | name: "incomplete commands parsed correctly, doublequote " + delimiter,
198 | raw: `echo "hello world ` + delimiter + ` blah blah blah`,
199 | expected: `echo "hello world ` + delimiter + ` blah blah blah`,
200 | wantCommand: &ShellCommand{
201 | Parts: []*ShellPart{
202 | {
203 | Command: "echo",
204 | Args: []string{`"hello world ` + delimiter + ` blah blah blah`},
205 | },
206 | },
207 | },
208 | })
209 |
210 | cases = append(cases, testCase{
211 | name: "incomplete commands parsed correctly, singlequote " + delimiter,
212 | raw: `echo 'hello world ` + delimiter + ` blah blah blah`,
213 | expected: `echo 'hello world ` + delimiter + ` blah blah blah`,
214 | wantCommand: &ShellCommand{
215 | Parts: []*ShellPart{
216 | {
217 | Command: "echo",
218 | Args: []string{`'hello world ` + delimiter + ` blah blah blah`},
219 | },
220 | },
221 | },
222 | })
223 | }
224 |
225 | // Add specific test case for pipe commands
226 | cases = append(cases, testCase{
227 | name: "pipe-commands-as-single-command",
228 | raw: `apt-get -s dist-upgrade | grep "^Inst" | grep -i securi | awk -F " " {'print $2'} | xargs apt-get install --yes`,
229 | expected: `apt-get -s dist-upgrade | grep "^Inst" | grep -i securi | awk -F " " {'print $2'} | xargs apt-get install --yes`,
230 | wantCommand: &ShellCommand{
231 | Parts: []*ShellPart{
232 | {
233 | Command: "apt-get",
234 | Args: []string{"-s", "dist-upgrade", "|", "grep", `"^Inst"`, "|", "grep", "-i", "securi", "|", "awk", "-F", `" "`, "{'print $2'}", "|", "xargs", "apt-get", "install", "--yes"},
235 | },
236 | },
237 | },
238 | })
239 |
240 | // Add real-world test case for pipes.before.Dockerfile example
241 | cases = append(cases, testCase{
242 | name: "real-world-pipes-example",
243 | raw: `apt-get -s dist-upgrade | grep "^Inst" | grep -i securi | awk -F " " {'print $2'} | xargs apt-get install --yes`,
244 | expected: `apt-get -s dist-upgrade | grep "^Inst" | grep -i securi | awk -F " " {'print $2'} | xargs apt-get install --yes`,
245 | wantCommand: &ShellCommand{
246 | Parts: []*ShellPart{
247 | {
248 | Command: "apt-get",
249 | Args: []string{"-s", "dist-upgrade", "|", "grep", `"^Inst"`, "|", "grep", "-i", "securi", "|", "awk", "-F", `" "`, "{'print $2'}", "|", "xargs", "apt-get", "install", "--yes"},
250 | },
251 | },
252 | },
253 | })
254 |
255 | cases = append(cases, testCase{
256 | name: "real world - django ",
257 | raw: `apt-get update \
258 | && apt-get install --assume-yes --no-install-recommends \
259 | g++ \
260 | gcc \
261 | libc6-dev \
262 | libpq-dev \
263 | zlib1g-dev \
264 | && python3 -m pip install --no-cache-dir -r ${REQ_FILE} \
265 | && apt-get purge --assume-yes --auto-remove \
266 | g++ \
267 | gcc \
268 | libc6-dev \
269 | libpq-dev \
270 | zlib1g-dev \
271 | && rm -rf /var/lib/apt/lists/*`,
272 | expected: `apt-get update && \
273 | apt-get install --assume-yes --no-install-recommends g++ gcc libc6-dev libpq-dev zlib1g-dev && \
274 | python3 -m pip install --no-cache-dir -r ${REQ_FILE} && \
275 | apt-get purge --assume-yes --auto-remove g++ gcc libc6-dev libpq-dev zlib1g-dev && \
276 | rm -rf /var/lib/apt/lists/*`,
277 | wantCommand: &ShellCommand{
278 | Parts: []*ShellPart{
279 | {
280 | Command: "apt-get",
281 | Args: []string{"update"},
282 | Delimiter: "&&",
283 | },
284 | {
285 | Command: "apt-get",
286 | Args: []string{"install", "--assume-yes", "--no-install-recommends", "g++", "gcc", "libc6-dev", "libpq-dev", "zlib1g-dev"},
287 | Delimiter: "&&",
288 | },
289 | {
290 | Command: "python3",
291 | Args: []string{"-m", "pip", "install", "--no-cache-dir", "-r", "${REQ_FILE}"},
292 | Delimiter: "&&",
293 | },
294 | {
295 | Command: "apt-get",
296 | Args: []string{"purge", "--assume-yes", "--auto-remove", "g++", "gcc", "libc6-dev", "libpq-dev", "zlib1g-dev"},
297 | Delimiter: "&&",
298 | },
299 | {
300 | Command: "rm",
301 | Args: []string{"-rf", "/var/lib/apt/lists/*"},
302 | },
303 | },
304 | },
305 | })
306 |
307 | cases = append(cases, testCase{
308 | name: "real world - inner comment stripped ",
309 | raw: `apt-get update \
310 | # some comment here
311 | # more comments here
312 | && X=1 Y='2' apt-get install -qy something \
313 | && apt-get remove -y somethingelse`,
314 | expected: `apt-get update && \
315 | X=1 Y='2' apt-get install -qy something && \
316 | apt-get remove -y somethingelse`,
317 | wantCommand: &ShellCommand{
318 | Parts: []*ShellPart{
319 | {
320 | Command: "apt-get",
321 | Args: []string{"update"},
322 | Delimiter: "&&",
323 | },
324 | {
325 | ExtraPre: `X=1 Y='2'`,
326 | Command: "apt-get",
327 | Args: []string{"install", "-qy", "something"},
328 | Delimiter: "&&",
329 | },
330 | {
331 | Command: "apt-get",
332 | Args: []string{"remove", "-y", "somethingelse"},
333 | },
334 | },
335 | },
336 | })
337 |
338 | for _, tt := range cases {
339 | t.Run(tt.name, func(t *testing.T) {
340 | got := ParseMultilineShell(tt.raw)
341 | if got == nil {
342 | t.Fatalf("%s: got nil shell command", tt.name)
343 | }
344 |
345 | if diff := cmp.Diff(tt.wantCommand, got); diff != "" {
346 | t.Errorf("%s: shell parse mismatch (-want, +got):\n%s\n", tt.name, diff)
347 | }
348 |
349 | // Make sure the command can reconstruct properly
350 | reconstructed := got.String()
351 | if diff := cmp.Diff(tt.expected, reconstructed); diff != "" {
352 | t.Errorf("%s: reconstructing shell (-want, +got):\n%s\n", tt.name, diff)
353 | }
354 | })
355 | }
356 | }
357 |
--------------------------------------------------------------------------------
/pkg/dfc/tar.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package dfc
7 |
8 | import (
9 | "strings"
10 | )
11 |
12 | // Command constants for tar commands
13 | const (
14 | CommandGNUTar = "tar"
15 | CommandBusyBoxTar = "tar"
16 | )
17 |
18 | // ConvertGNUTarToBusyboxTar converts a GNU tar command to the equivalent BusyBox tar command
19 | // BusyBox tar has fewer options and some different syntax compared to GNU tar
20 | func ConvertGNUTarToBusyboxTar(part *ShellPart) *ShellPart {
21 | if part.Command != CommandGNUTar {
22 | return part
23 | }
24 |
25 | // Create a new shell part with the same extra content and delimiter
26 | result := &ShellPart{
27 | ExtraPre: part.ExtraPre,
28 | Command: CommandBusyBoxTar,
29 | Delimiter: part.Delimiter,
30 | }
31 |
32 | // We'll use separate slices for options, files, and file option
33 | var options []string
34 | var files []string
35 | var hasFile bool
36 | var filename string
37 |
38 | i := 0
39 |
40 | // First pass to check for common options and gather information
41 | for i < len(part.Args) {
42 | arg := part.Args[i]
43 |
44 | // Handle the main operation flags (first argument usually)
45 | if i == 0 && !strings.HasPrefix(arg, "-") && len(arg) > 0 {
46 | // Convert combined options like "xvf" to individual options
47 | for _, c := range arg {
48 | switch c {
49 | case 'x':
50 | options = append(options, "-x")
51 | case 'c':
52 | options = append(options, "-c")
53 | case 'v':
54 | options = append(options, "-v")
55 | case 'f':
56 | hasFile = true
57 | // Skip adding -f here, we'll add it with the filename
58 | if i+1 < len(part.Args) {
59 | filename = part.Args[i+1]
60 | i++
61 | }
62 | case 'z':
63 | options = append(options, "-z")
64 | case 'j':
65 | options = append(options, "-j")
66 | default:
67 | // Pass through other single-letter options
68 | options = append(options, "-"+string(c))
69 | }
70 | }
71 | i++
72 | continue
73 | }
74 |
75 | // Handle --file=value format
76 | if strings.HasPrefix(arg, "--file=") {
77 | hasFile = true
78 | filename = arg[7:] // Extract part after --file=
79 | i++
80 | continue
81 | }
82 |
83 | // Handle long options and their short equivalents
84 | switch arg {
85 | // Extract operations
86 | case "--extract", "-x":
87 | options = append(options, "-x")
88 |
89 | // Create operations
90 | case "--create", "-c":
91 | options = append(options, "-c")
92 |
93 | // Verbose output
94 | case "--verbose", "-v":
95 | options = append(options, "-v")
96 |
97 | // File specification
98 | case "--file", "-f":
99 | hasFile = true
100 | if i+1 < len(part.Args) {
101 | filename = part.Args[i+1]
102 | i += 2
103 | continue
104 | }
105 |
106 | // Compress with gzip
107 | case "--gzip", "--gunzip", "-z":
108 | options = append(options, "-z")
109 |
110 | // Compress with bzip2
111 | case "--bzip2", "-j":
112 | options = append(options, "-j")
113 |
114 | // Change directory
115 | case "--directory", "-C":
116 | if i+1 < len(part.Args) {
117 | options = append(options, "-C", part.Args[i+1])
118 | i += 2
119 | continue
120 | }
121 |
122 | // Handle unsupported or ignored GNU tar options
123 | case "--same-owner", "--preserve-permissions", "--preserve-order",
124 | "--preserve", "--same-permissions", "--numeric-owner",
125 | "--overwrite", "--remove-files", "--ignore-failed-read":
126 | // These options are either default or not needed in BusyBox tar
127 | i++
128 | continue
129 |
130 | default:
131 | // Check if it's a long option we need to skip
132 | if strings.HasPrefix(arg, "--") {
133 | // Skip unknown long options
134 | i++
135 | continue
136 | }
137 |
138 | // If it doesn't start with -, it's probably a file or directory
139 | files = append(files, arg)
140 | }
141 | i++
142 | }
143 |
144 | // Build the final args in the correct order
145 | var resultArgs []string
146 |
147 | // First add all the options
148 | resultArgs = append(resultArgs, options...)
149 |
150 | // Then add the files list
151 | resultArgs = append(resultArgs, files...)
152 |
153 | // Finally add the file option at the end if present
154 | if hasFile && filename != "" {
155 | resultArgs = append(resultArgs, "-f", filename)
156 | }
157 |
158 | result.Args = resultArgs
159 | return result
160 | }
161 |
--------------------------------------------------------------------------------
/pkg/dfc/tar_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package dfc
7 |
8 | import (
9 | "testing"
10 | )
11 |
12 | func TestConvertGNUTarToBusyboxTar(t *testing.T) {
13 | testCases := []struct {
14 | name string
15 | input *ShellPart
16 | expected *ShellPart
17 | }{
18 | {
19 | name: "basic extract tar with short options",
20 | input: &ShellPart{
21 | Command: CommandGNUTar,
22 | Args: []string{"xf", "archive.tar"},
23 | },
24 | expected: &ShellPart{
25 | Command: CommandBusyBoxTar,
26 | Args: []string{"-x", "-f", "archive.tar"},
27 | },
28 | },
29 | {
30 | name: "create tar with verbose",
31 | input: &ShellPart{
32 | Command: CommandGNUTar,
33 | Args: []string{"cvf", "archive.tar", "file1", "file2"},
34 | },
35 | expected: &ShellPart{
36 | Command: CommandBusyBoxTar,
37 | Args: []string{"-c", "-v", "file1", "file2", "-f", "archive.tar"},
38 | },
39 | },
40 | {
41 | name: "extract with gzip",
42 | input: &ShellPart{
43 | Command: CommandGNUTar,
44 | Args: []string{"xzf", "archive.tar.gz"},
45 | },
46 | expected: &ShellPart{
47 | Command: CommandBusyBoxTar,
48 | Args: []string{"-x", "-z", "-f", "archive.tar.gz"},
49 | },
50 | },
51 | {
52 | name: "extract with bzip2",
53 | input: &ShellPart{
54 | Command: CommandGNUTar,
55 | Args: []string{"xjf", "archive.tar.bz2"},
56 | },
57 | expected: &ShellPart{
58 | Command: CommandBusyBoxTar,
59 | Args: []string{"-x", "-j", "-f", "archive.tar.bz2"},
60 | },
61 | },
62 | {
63 | name: "extract to specific directory",
64 | input: &ShellPart{
65 | Command: CommandGNUTar,
66 | Args: []string{"xf", "archive.tar", "-C", "/tmp/extract"},
67 | },
68 | expected: &ShellPart{
69 | Command: CommandBusyBoxTar,
70 | Args: []string{"-x", "-C", "/tmp/extract", "-f", "archive.tar"},
71 | },
72 | },
73 | {
74 | name: "with long options",
75 | input: &ShellPart{
76 | Command: CommandGNUTar,
77 | Args: []string{"--create", "--verbose", "--file=archive.tar", "file1", "file2"},
78 | },
79 | expected: &ShellPart{
80 | Command: CommandBusyBoxTar,
81 | Args: []string{"-c", "-v", "file1", "file2", "-f", "archive.tar"},
82 | },
83 | },
84 | {
85 | name: "skip unsupported GNU options",
86 | input: &ShellPart{
87 | Command: CommandGNUTar,
88 | Args: []string{"--extract", "--file", "archive.tar", "--same-owner", "--preserve-permissions", "file1"},
89 | },
90 | expected: &ShellPart{
91 | Command: CommandBusyBoxTar,
92 | Args: []string{"-x", "file1", "-f", "archive.tar"},
93 | },
94 | },
95 | {
96 | name: "preserves extra parts",
97 | input: &ShellPart{
98 | ExtraPre: "# Extract archive",
99 | Command: CommandGNUTar,
100 | Args: []string{"xf", "archive.tar"},
101 | Delimiter: "&&",
102 | },
103 | expected: &ShellPart{
104 | ExtraPre: "# Extract archive",
105 | Command: CommandBusyBoxTar,
106 | Args: []string{"-x", "-f", "archive.tar"},
107 | Delimiter: "&&",
108 | },
109 | },
110 | {
111 | name: "handles complex scenario",
112 | input: &ShellPart{
113 | Command: CommandGNUTar,
114 | Args: []string{"--extract", "--verbose", "--file", "archive.tar", "--directory", "/tmp", "--same-owner", "dir1", "file1"},
115 | },
116 | expected: &ShellPart{
117 | Command: CommandBusyBoxTar,
118 | Args: []string{"-x", "-v", "-C", "/tmp", "dir1", "file1", "-f", "archive.tar"},
119 | },
120 | },
121 | {
122 | name: "already busybox style - extract with individual options",
123 | input: &ShellPart{
124 | Command: CommandGNUTar,
125 | Args: []string{"-x", "-v", "-f", "archive.tar"},
126 | },
127 | expected: &ShellPart{
128 | Command: CommandBusyBoxTar,
129 | Args: []string{"-x", "-v", "-f", "archive.tar"},
130 | },
131 | },
132 | {
133 | name: "already busybox style - extract with directory option",
134 | input: &ShellPart{
135 | Command: CommandGNUTar,
136 | Args: []string{"-x", "-v", "-C", "/tmp", "-f", "archive.tar"},
137 | },
138 | expected: &ShellPart{
139 | Command: CommandBusyBoxTar,
140 | Args: []string{"-x", "-v", "-C", "/tmp", "-f", "archive.tar"},
141 | },
142 | },
143 | {
144 | name: "already busybox style - create with individual options",
145 | input: &ShellPart{
146 | Command: CommandGNUTar,
147 | Args: []string{"-c", "-v", "file1", "file2", "-f", "archive.tar"},
148 | },
149 | expected: &ShellPart{
150 | Command: CommandBusyBoxTar,
151 | Args: []string{"-c", "-v", "file1", "file2", "-f", "archive.tar"},
152 | },
153 | },
154 | {
155 | name: "already busybox style - extract with gzip",
156 | input: &ShellPart{
157 | Command: CommandGNUTar,
158 | Args: []string{"-x", "-z", "-f", "archive.tar.gz"},
159 | },
160 | expected: &ShellPart{
161 | Command: CommandBusyBoxTar,
162 | Args: []string{"-x", "-z", "-f", "archive.tar.gz"},
163 | },
164 | },
165 | }
166 |
167 | for _, tc := range testCases {
168 | t.Run(tc.name, func(t *testing.T) {
169 | result := ConvertGNUTarToBusyboxTar(tc.input)
170 |
171 | // Compare command
172 | if result.Command != tc.expected.Command {
173 | t.Errorf("Command: expected %q, got %q", tc.expected.Command, result.Command)
174 | }
175 |
176 | // Compare args
177 | if len(result.Args) != len(tc.expected.Args) {
178 | t.Errorf("Args length: expected %d, got %d\nExpected: %v\nGot: %v",
179 | len(tc.expected.Args), len(result.Args),
180 | tc.expected.Args, result.Args)
181 | } else {
182 | for i, arg := range tc.expected.Args {
183 | if result.Args[i] != arg {
184 | t.Errorf("Arg[%d]: expected %q, got %q", i, arg, result.Args[i])
185 | }
186 | }
187 | }
188 |
189 | // Compare ExtraPre and Delimiter
190 | if result.ExtraPre != tc.expected.ExtraPre {
191 | t.Errorf("ExtraPre: expected %q, got %q", tc.expected.ExtraPre, result.ExtraPre)
192 | }
193 | if result.Delimiter != tc.expected.Delimiter {
194 | t.Errorf("Delimiter: expected %q, got %q", tc.expected.Delimiter, result.Delimiter)
195 | }
196 | })
197 | }
198 | }
199 |
--------------------------------------------------------------------------------
/pkg/dfc/update.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Chainguard, Inc.
3 | SPDX-License-Identifier: Apache-2.0
4 | */
5 |
6 | package dfc
7 |
8 | import (
9 | "context"
10 | "crypto/sha256"
11 | "encoding/hex"
12 | "encoding/json"
13 | "fmt"
14 | "io"
15 | "net/http"
16 | "os"
17 | "path/filepath"
18 | "time"
19 |
20 | "github.com/adrg/xdg"
21 | "github.com/chainguard-dev/clog"
22 | )
23 |
24 | const (
25 | // defaultMappingsURL is the default URL for fetching mappings
26 | defaultMappingsURL = "https://raw.githubusercontent.com/chainguard-dev/dfc/refs/heads/main/pkg/dfc/builtin-mappings.yaml"
27 |
28 | // orgName is the organization name used in XDG paths
29 | orgName = "dev.chainguard.dfc"
30 | )
31 |
32 | // UpdateOptions configures the update behavior
33 | type UpdateOptions struct {
34 | // UserAgent is the user agent string to use for update requests
35 | UserAgent string
36 |
37 | // MappingsURL is the URL to fetch the latest mappings from
38 | MappingsURL string
39 | }
40 |
41 | // ociLayout represents the oci-layout file
42 | type ociLayout struct {
43 | ImageLayoutVersion string `json:"imageLayoutVersion"`
44 | }
45 |
46 | // ociIndex represents the index.json file
47 | type ociIndex struct {
48 | SchemaVersion int `json:"schemaVersion"`
49 | MediaType string `json:"mediaType"`
50 | Manifests []ociDescriptor `json:"manifests"`
51 | }
52 |
53 | // ociDescriptor represents a descriptor in the index
54 | type ociDescriptor struct {
55 | MediaType string `json:"mediaType"`
56 | Digest string `json:"digest"`
57 | Size int64 `json:"size"`
58 | Annotations map[string]string `json:"annotations,omitempty"`
59 | }
60 |
61 | // getCacheDir returns the XDG cache directory for dfc using the xdg library
62 | func getCacheDir() string {
63 | // Use xdg library to get the cache directory path
64 | return filepath.Join(xdg.CacheHome, orgName, "mappings")
65 | }
66 |
67 | // getConfigDir returns the XDG config directory for dfc using the xdg library
68 | func getConfigDir() string {
69 | // Use xdg library to get the config directory
70 | return xdg.ConfigHome
71 | }
72 |
73 | // GetMappingsConfigPath returns the path to the builtin-mappings.yaml file in XDG_CONFIG_HOME
74 | func getMappingsConfigPath() (string, error) {
75 | // Use xdg library's ConfigFile to get the proper location
76 | mappingsPath, err := xdg.ConfigFile(filepath.Join(orgName, "builtin-mappings.yaml"))
77 | if err != nil {
78 | return "", fmt.Errorf("getting mappings config path: %w", err)
79 | }
80 |
81 | // Ensure parent directory exists
82 | if err := os.MkdirAll(filepath.Dir(mappingsPath), 0755); err != nil {
83 | return "", fmt.Errorf("creating config directory: %w", err)
84 | }
85 |
86 | return mappingsPath, nil
87 | }
88 |
89 | // getMappingsConfig reads and returns the contents of the builtin-mappings.yaml file
90 | func getMappingsConfig() ([]byte, error) {
91 | mappingsPath, err := getMappingsConfigPath()
92 | if err != nil {
93 | return nil, err
94 | }
95 |
96 | // Check if the file exists
97 | if _, err := os.Stat(mappingsPath); err != nil {
98 | if os.IsNotExist(err) {
99 | // File doesn't exist, return nil with no error
100 | return nil, nil
101 | }
102 | return nil, fmt.Errorf("checking mappings file: %w", err)
103 | }
104 |
105 | // Read the mappings file
106 | data, err := os.ReadFile(mappingsPath)
107 | if err != nil {
108 | return nil, fmt.Errorf("reading mappings file: %w", err)
109 | }
110 |
111 | return data, nil
112 | }
113 |
114 | // initOCILayout initializes the OCI layout in the cache directory
115 | func initOCILayout(cacheDir string) error {
116 | // Create the blobs/sha256 directory
117 | blobsDir := filepath.Join(cacheDir, "blobs", "sha256")
118 | if err := os.MkdirAll(blobsDir, 0755); err != nil {
119 | return fmt.Errorf("creating blobs directory: %w", err)
120 | }
121 |
122 | // Create the oci-layout file
123 | layout := ociLayout{ImageLayoutVersion: "1.0.0"}
124 | layoutData, err := json.Marshal(layout)
125 | if err != nil {
126 | return fmt.Errorf("marshalling oci-layout: %w", err)
127 | }
128 |
129 | if err := os.WriteFile(filepath.Join(cacheDir, "oci-layout"), layoutData, 0600); err != nil {
130 | return fmt.Errorf("writing oci-layout file: %w", err)
131 | }
132 |
133 | // Create an empty index.json file
134 | index := ociIndex{
135 | SchemaVersion: 2,
136 | MediaType: "application/vnd.oci.image.index.v1+json",
137 | Manifests: []ociDescriptor{},
138 | }
139 |
140 | indexData, err := json.MarshalIndent(index, "", " ")
141 | if err != nil {
142 | return fmt.Errorf("marshalling index.json: %w", err)
143 | }
144 |
145 | if err := os.WriteFile(filepath.Join(cacheDir, "index.json"), indexData, 0600); err != nil {
146 | return fmt.Errorf("writing index.json file: %w", err)
147 | }
148 |
149 | return nil
150 | }
151 |
152 | // updateIndexJSON updates the index.json file with the new mapping blob
153 | func updateIndexJSON(cacheDir, digest string, size int64) error {
154 | // Read the current index.json
155 | indexPath := filepath.Join(cacheDir, "index.json")
156 | indexData, err := os.ReadFile(indexPath)
157 | if err != nil && !os.IsNotExist(err) {
158 | return fmt.Errorf("reading index.json: %w", err)
159 | }
160 |
161 | var index ociIndex
162 | if len(indexData) > 0 {
163 | if err := json.Unmarshal(indexData, &index); err != nil {
164 | return fmt.Errorf("unmarshalling index.json: %w", err)
165 | }
166 | } else {
167 | // Initialize a new index
168 | index = ociIndex{
169 | SchemaVersion: 2,
170 | MediaType: "application/vnd.oci.image.index.v1+json",
171 | Manifests: []ociDescriptor{},
172 | }
173 | }
174 |
175 | // Remove any existing entries with this digest
176 | filteredManifests := []ociDescriptor{}
177 | for _, manifest := range index.Manifests {
178 | // Skip if it has the same digest
179 | if manifest.Digest == digest {
180 | continue
181 | }
182 | filteredManifests = append(filteredManifests, manifest)
183 | }
184 |
185 | // Create a new descriptor for the mapping
186 | now := time.Now().UTC().Format(time.RFC3339)
187 |
188 | descriptor := ociDescriptor{
189 | MediaType: "application/yaml",
190 | Digest: digest,
191 | Size: size,
192 | Annotations: map[string]string{
193 | "vnd.chainguard.dfc.mappings.downloadedAt": now,
194 | },
195 | }
196 |
197 | // Add the new descriptor
198 | filteredManifests = append(filteredManifests, descriptor)
199 | index.Manifests = filteredManifests
200 |
201 | // Write the updated index.json
202 | updatedIndexData, err := json.MarshalIndent(index, "", " ")
203 | if err != nil {
204 | return fmt.Errorf("marshalling updated index.json: %w", err)
205 | }
206 |
207 | if err := os.WriteFile(indexPath, updatedIndexData, 0600); err != nil {
208 | return fmt.Errorf("writing updated index.json: %w", err)
209 | }
210 |
211 | return nil
212 | }
213 |
214 | // Update checks for available updates to the dfc tool
215 | func Update(ctx context.Context, opts UpdateOptions) error {
216 | log := clog.FromContext(ctx)
217 | log.Info("Checking for mappings update...")
218 |
219 | // Set default MappingsURL if not provided
220 | mappingsURL := opts.MappingsURL
221 | if mappingsURL == "" {
222 | mappingsURL = defaultMappingsURL
223 | }
224 |
225 | // Create a new HTTP request
226 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, mappingsURL, nil)
227 | if err != nil {
228 | return fmt.Errorf("creating request: %w", err)
229 | }
230 |
231 | // Set the User-Agent header
232 | userAgent := opts.UserAgent
233 | if userAgent == "" {
234 | userAgent = "dfc/dev"
235 | }
236 | req.Header.Set("User-Agent", userAgent)
237 |
238 | // Send the request
239 | log.Debug("Fetching mappings", "url", mappingsURL)
240 | client := http.DefaultClient
241 | resp, err := client.Do(req)
242 | if err != nil {
243 | return fmt.Errorf("fetching mappings: %w", err)
244 | }
245 | defer resp.Body.Close()
246 |
247 | // Check the response status
248 | if resp.StatusCode != http.StatusOK {
249 | return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
250 | }
251 |
252 | // Read the response body
253 | body, err := io.ReadAll(resp.Body)
254 | if err != nil {
255 | return fmt.Errorf("reading response body: %w", err)
256 | }
257 |
258 | // Calculate SHA256 hash
259 | hash := sha256.Sum256(body)
260 | hashString := hex.EncodeToString(hash[:])
261 | digestString := "sha256:" + hashString
262 |
263 | // Get the XDG cache directory
264 | cacheDir := getCacheDir()
265 |
266 | // Check if the cache directory exists, if not initialize the OCI layout
267 | if _, err := os.Stat(cacheDir); os.IsNotExist(err) {
268 | // Create the directory structure
269 | if err := os.MkdirAll(cacheDir, 0755); err != nil {
270 | return fmt.Errorf("creating cache directory structure: %w", err)
271 | }
272 |
273 | if err := initOCILayout(cacheDir); err != nil {
274 | return fmt.Errorf("initializing OCI layout: %w", err)
275 | }
276 | }
277 |
278 | // Check if we already have this mapping file
279 | blobPath := filepath.Join(cacheDir, "blobs", "sha256", hashString)
280 | if _, err := os.Stat(blobPath); err == nil {
281 | // Get the XDG config directory for the symlink
282 | configDir := getConfigDir()
283 |
284 | // Ensure the nested config directory exists
285 | nestedConfigDir := filepath.Join(configDir, orgName)
286 | if err := os.MkdirAll(nestedConfigDir, 0755); err != nil {
287 | return fmt.Errorf("creating nested config directory: %w", err)
288 | }
289 |
290 | // Check if the symlink exists and points to the correct file
291 | symlinkPath := filepath.Join(nestedConfigDir, "builtin-mappings.yaml")
292 | currentTarget, err := os.Readlink(symlinkPath)
293 | if err != nil || currentTarget != blobPath {
294 | // Remove existing symlink if it exists
295 | _ = os.Remove(symlinkPath)
296 | // Create new symlink
297 | if err := os.Symlink(blobPath, symlinkPath); err != nil {
298 | return fmt.Errorf("creating symlink: %w", err)
299 | }
300 | }
301 |
302 | log.Info("Already have latest mappings", "location", symlinkPath)
303 | } else {
304 | log.Info("Saving latest version of mappings", "location", blobPath)
305 |
306 | // Save the mapping file
307 | blobsDir := filepath.Join(cacheDir, "blobs", "sha256")
308 | if err := os.MkdirAll(blobsDir, 0755); err != nil {
309 | return fmt.Errorf("creating blobs directory: %w", err)
310 | }
311 |
312 | if err := os.WriteFile(blobPath, body, 0600); err != nil {
313 | return fmt.Errorf("writing mapping file: %w", err)
314 | }
315 |
316 | // Update the index.json file
317 | if err := updateIndexJSON(cacheDir, digestString, int64(len(body))); err != nil {
318 | return fmt.Errorf("updating index.json: %w", err)
319 | }
320 |
321 | // Get the XDG config directory for the symlink
322 | configDir := getConfigDir()
323 |
324 | // Ensure the nested config directory exists
325 | nestedConfigDir := filepath.Join(configDir, orgName)
326 | if err := os.MkdirAll(nestedConfigDir, 0755); err != nil {
327 | return fmt.Errorf("creating nested config directory: %w", err)
328 | }
329 |
330 | // Create or update the symlink to point to the latest mappings file
331 | symlinkPath := filepath.Join(nestedConfigDir, "builtin-mappings.yaml")
332 | log.Info("Created symlink to latest mappings", "location", symlinkPath)
333 |
334 | // Remove existing symlink if it exists
335 | _ = os.Remove(symlinkPath)
336 | // Create new symlink
337 | if err := os.Symlink(blobPath, symlinkPath); err != nil {
338 | return fmt.Errorf("creating symlink: %w", err)
339 | }
340 | }
341 |
342 | log.Info("Mappings checksum", "sha256", hashString)
343 |
344 | return nil
345 | }
346 |
--------------------------------------------------------------------------------
/testdata/README.md:
--------------------------------------------------------------------------------
1 | ## Testdata
2 |
3 | We have various Dockerfiles copied from various opensource projects stored in this directory.
4 | We use these before and after Dockerfile for various internal tests related to conversion etc.
5 |
6 | ### Get all test names
7 |
8 | ```sh
9 | TESTNAMES="$(find testdata/ | grep '\.before\.' | xargs -L 1 basename | sed 's/\.before\..*//' | sort)"
10 | ```
11 |
12 | Run something for each each test name (print them all with `echo`):
13 | ```sh
14 | for NAME in $TESTNAMES; do echo $NAME; done
15 | ```
16 |
17 | ### Running dfc conversion on a test Dockerfile
18 |
19 | ```sh
20 | go run . testdata/$NAME.before.Dockerfile
21 | ```
22 |
23 | ### Regenerating expected conversion outputs (after files)
24 |
25 | Single (gcds-hugo):
26 |
27 | ```sh
28 | NAME=gcds-hugo go run . --org chainguard-private testdata/$NAME.before.Dockerfile > testdata/$NAME.after.Dockerfile
29 | ```
30 |
31 | All:
32 |
33 | ```sh
34 | for NAME in $TESTNAMES; do go run . --org chainguard testdata/$NAME.before.Dockerfile > testdata/$NAME.after.Dockerfile; done
35 | ```
36 |
37 | ### Build the before Dockerfile
38 |
39 | For the original version of the Dockerfile (gcds-hugo):
40 | ```sh
41 | NAME=gcds-hugo WORKDIR=$([ -d testdata/$NAME ] && echo testdata/$NAME || echo .) && ( \
42 | set -x; docker build -t dfc-$NAME-before:dev -f testdata/$NAME.before.Dockerfile $WORKDIR)
43 | ```
44 |
45 | ### Build the after Dockerfile
46 |
47 | For the original version of the Dockerfile after dfc conversion applied (or expected):
48 |
49 | ```sh
50 | NAME=django WORKDIR=$([ -d testdata/$NAME ] && echo testdata/$NAME || echo .) && ( \
51 | set -x; docker build -t dfc-$NAME-after:dev -f testdata/$NAME.after.Dockerfile $WORKDIR)
52 | ```
53 |
54 | ### Create a new case from a file on disk
55 |
56 | ```sh
57 | NAME=my-image cp path/to/Dockerfile testdata/$NAME.before.Dockerfile && ( \
58 | set -x; go run . testdata/$NAME.before.Dockerfile > testdata/$NAME.after.Dockerfile)
59 | ```
60 |
--------------------------------------------------------------------------------
/testdata/apt-add-repo.after.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM cgr.dev/ORG/chainguard-base:latest
2 | USER root
3 | RUN apk add --no-cache libreoffice
4 |
--------------------------------------------------------------------------------
/testdata/apt-add-repo.before.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:bookworm
2 | RUN apt-get update \
3 | && apt-get install -y software-properties-common=0.99.22.9 \
4 | && add-apt-repository ppa:libreoffice/libreoffice-still \
5 | && apt-get install -y libreoffice \
6 | && apt-get clean
--------------------------------------------------------------------------------
/testdata/canada-ca-tracker.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # from https://github.com/canada-ca/tracker/blob/master/frontend/Dockerfile
2 |
3 | FROM cgr.dev/ORG/node:20.16-dev AS build-env
4 |
5 | WORKDIR /app
6 |
7 | # Copy in whatever isn't filtered by .dockerignore
8 | COPY . .
9 |
10 | RUN npm ci && npm run build && npm prune --production
11 |
12 | FROM cgr.dev/ORG/node:latest
13 |
14 | ENV HOST 0.0.0.0
15 | ENV PORT 3000
16 |
17 | WORKDIR /app
18 |
19 | COPY --from=build-env /app .
20 |
21 | ENV NODE_ENV production
22 | # https://github.com/webpack/webpack/issues/14532#issuecomment-947012063
23 | ENV NODE_OPTIONS=--openssl-legacy-provider
24 |
25 | USER nonroot
26 | EXPOSE 3000
27 |
28 | CMD ["index.js"]
29 |
--------------------------------------------------------------------------------
/testdata/canada-ca-tracker.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # from https://github.com/canada-ca/tracker/blob/master/frontend/Dockerfile
2 |
3 | FROM node:20.16-alpine3.19 as build-env
4 |
5 | WORKDIR /app
6 |
7 | # Copy in whatever isn't filtered by .dockerignore
8 | COPY . .
9 |
10 | RUN npm ci && npm run build && npm prune --production
11 |
12 | FROM gcr.io/distroless/nodejs20-debian12
13 |
14 | ENV HOST 0.0.0.0
15 | ENV PORT 3000
16 |
17 | WORKDIR /app
18 |
19 | COPY --from=build-env /app .
20 |
21 | ENV NODE_ENV production
22 | # https://github.com/webpack/webpack/issues/14532#issuecomment-947012063
23 | ENV NODE_OPTIONS=--openssl-legacy-provider
24 |
25 | USER nonroot
26 | EXPOSE 3000
27 |
28 | CMD ["index.js"]
29 |
--------------------------------------------------------------------------------
/testdata/digest.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # just test that the digest is stripped
2 | FROM cgr.dev/ORG/python:3.12-dev
3 | USER root
4 |
5 | RUN apk add --no-cache gettext git libpq make rsync
6 |
--------------------------------------------------------------------------------
/testdata/digest.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # just test that the digest is stripped
2 | FROM python:3.12-slim-bookworm@sha256:a866731a6b71c4a194a845d86e06568725e430ed21821d0c52e4efb385cf6c6f
3 |
4 | RUN apt-get update \
5 | && apt-get install --assume-yes --no-install-recommends \
6 | gettext \
7 | git \
8 | libpq5 \
9 | make \
10 | rsync
11 |
--------------------------------------------------------------------------------
/testdata/distroless-go-integration/go.mod:
--------------------------------------------------------------------------------
1 | // from https://github.com/GoogleContainerTools/distroless/blob/main/examples/go/go.mod
2 |
3 | module github.com/GoogleContainerTools/distroless/examples/go
4 |
5 | go 1.18
6 |
--------------------------------------------------------------------------------
/testdata/distroless-go-integration/main.go:
--------------------------------------------------------------------------------
1 | // from https://github.com/GoogleContainerTools/distroless/blob/main/examples/go/main.go
2 |
3 | // Copyright 2017 Google Inc. All rights reserved.
4 |
5 | // Licensed under the Apache License, Version 2.0 (the "License");
6 | // you may not use this file except in compliance with the License.
7 | // You may obtain a copy of the License at
8 |
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | // Unless required by applicable law or agreed to in writing, software
12 | // distributed under the License is distributed on an "AS IS" BASIS,
13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | // See the License for the specific language governing permissions and
15 | // limitations under the License.
16 |
17 | package main
18 |
19 | import "fmt"
20 |
21 | func main() {
22 | fmt.Println("Hello, world!")
23 | }
24 |
--------------------------------------------------------------------------------
/testdata/distroless-go.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # from https://github.com/GoogleContainerTools/distroless/blob/main/examples/go/Dockerfile
2 |
3 | FROM cgr.dev/ORG/go:1.22-dev AS build
4 |
5 | WORKDIR /go/src/app
6 | COPY . .
7 |
8 | RUN go mod download
9 | RUN go vet -v
10 | RUN go test -v
11 |
12 | RUN CGO_ENABLED=0 go build -o /go/bin/app
13 |
14 | FROM cgr.dev/ORG/static:latest
15 |
16 | COPY --from=build /go/bin/app /
17 | CMD ["/app"]
18 |
--------------------------------------------------------------------------------
/testdata/distroless-go.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # from https://github.com/GoogleContainerTools/distroless/blob/main/examples/go/Dockerfile
2 |
3 | FROM golang:1.22 as build
4 |
5 | WORKDIR /go/src/app
6 | COPY . .
7 |
8 | RUN go mod download
9 | RUN go vet -v
10 | RUN go test -v
11 |
12 | RUN CGO_ENABLED=0 go build -o /go/bin/app
13 |
14 | FROM gcr.io/distroless/static-debian12
15 |
16 | COPY --from=build /go/bin/app /
17 | CMD ["/app"]
18 |
--------------------------------------------------------------------------------
/testdata/django-integration/requirements/common.txt:
--------------------------------------------------------------------------------
1 | # from https://github.com/django/djangoproject.com/blob/main/requirements/common.txt
2 | Babel==2.17.0
3 | django-contact-form==5.1.1
4 | django-countries==7.6.1
5 | django-hosts==5.1
6 | django-money==3.5.3
7 | django-push @ git+https://github.com/brutasse/django-push.git@22fda99641cfbd2f3075a723d92652a8e38220a5
8 | django-read-only==1.19.0
9 | django-recaptcha==4.0.0
10 | django-registration-redux==2.13
11 | Django==5.1.7
12 | docutils==0.21.2
13 | feedparser==6.0.11
14 | Jinja2==3.1.6
15 | libsass==0.23.0
16 | Markdown==3.7
17 | Pillow==11.1.0
18 | psycopg[c]==3.2.5
19 | Pygments==2.19.1
20 | pykismet3==0.1.1
21 | requests==2.32.3
22 | sorl-thumbnail==12.11.0
23 | Sphinx==8.1.3
24 | stripe==11.5.0
25 | time-machine==2.16.0
--------------------------------------------------------------------------------
/testdata/django-integration/requirements/prod.txt:
--------------------------------------------------------------------------------
1 | # from https://github.com/django/djangoproject.com/blob/main/requirements/prod.txt
2 | -r common.txt
3 | gunicorn==23.0.0
4 | redis==5.2.1
5 | sentry-sdk==2.22.0
--------------------------------------------------------------------------------
/testdata/django.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://github.com/django/djangoproject.com/blob/main/Dockerfile
2 |
3 | # pull official base image
4 | FROM cgr.dev/ORG/python:3.12-dev
5 | USER root
6 |
7 | # set work directory
8 | WORKDIR /usr/src/app
9 |
10 | # set environment varibles
11 | ENV PYTHONDONTWRITEBYTECODE 1
12 | ENV PYTHONUNBUFFERED 1
13 |
14 | # install deb packages
15 | RUN apk add --no-cache gettext git libpq make rsync
16 |
17 | ARG REQ_FILE=requirements/prod.txt
18 |
19 | # install python dependencies
20 | COPY ./requirements ./requirements
21 | RUN apk add --no-cache gcc glibc-dev postgresql-dev zlib-dev && \
22 | python3 -m pip install --no-cache-dir -r ${REQ_FILE}
23 |
24 | # copy project
25 | COPY . .
26 |
27 | # ENTRYPOINT is specified only in the local docker-compose.yml to avoid
28 | # accidentally running it in deployed environments.
29 |
--------------------------------------------------------------------------------
/testdata/django.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://github.com/django/djangoproject.com/blob/main/Dockerfile
2 |
3 | # pull official base image
4 | FROM python:3.12-slim-bookworm@sha256:abc
5 |
6 | # set work directory
7 | WORKDIR /usr/src/app
8 |
9 | # set environment varibles
10 | ENV PYTHONDONTWRITEBYTECODE 1
11 | ENV PYTHONUNBUFFERED 1
12 |
13 | # install deb packages
14 | RUN apt-get update \
15 | && apt-get install --assume-yes --no-install-recommends \
16 | gettext \
17 | git \
18 | libpq5 \
19 | make \
20 | rsync \
21 | && rm -rf /var/lib/apt/lists/*
22 |
23 | ARG REQ_FILE=requirements/prod.txt
24 |
25 | # install python dependencies
26 | COPY ./requirements ./requirements
27 | RUN apt-get update \
28 | && apt-get install --assume-yes --no-install-recommends \
29 | g++ \
30 | gcc \
31 | libc6-dev \
32 | libpq-dev \
33 | zlib1g-dev \
34 | && python3 -m pip install --no-cache-dir -r ${REQ_FILE} \
35 | && apt-get purge --assume-yes --auto-remove \
36 | g++ \
37 | gcc \
38 | libc6-dev \
39 | libpq-dev \
40 | zlib1g-dev \
41 | && rm -rf /var/lib/apt/lists/*
42 |
43 | # copy project
44 | COPY . .
45 |
46 | # ENTRYPOINT is specified only in the local docker-compose.yml to avoid
47 | # accidentally running it in deployed environments.
48 |
--------------------------------------------------------------------------------
/testdata/from-with-platform.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # Originally from https://github.com/chainguard-dev/dfc/issues/90
2 | FROM --platform=linux/amd64 cgr.dev/ORG/go:1.23-dev AS build
3 | USER root
4 | RUN apk add --no-cache make
5 | WORKDIR /usr/src/app
6 | COPY . .
7 | FROM --platform=linux/amd64 cgr.dev/ORG/chainguard-base:latest
8 | WORKDIR /usr/src/app
9 | EXPOSE 8880 8881 8882
10 | CMD ["./init.sh"]
11 |
--------------------------------------------------------------------------------
/testdata/from-with-platform.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # Originally from https://github.com/chainguard-dev/dfc/issues/90
2 | FROM --platform=linux/amd64 golang:1.23.8-bookworm AS build
3 | RUN apt-get update && apt-get install make -y
4 | WORKDIR /usr/src/app
5 | COPY . .
6 | FROM --platform=linux/amd64 ubuntu:latest
7 | WORKDIR /usr/src/app
8 | EXPOSE 8880 8881 8882
9 | CMD ["./init.sh"]
10 |
--------------------------------------------------------------------------------
/testdata/gcds-hugo.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # from https://github.com/gccloudone/gcds-hugo/blob/main/.devcontainer/Dockerfile
2 |
3 | ARG NODE_VERSION=18
4 | FROM cgr.dev/ORG/node:${NODE_VERSION}-dev
5 | USER root
6 |
7 | ARG HUGO_VERSION=0.126.3
8 | ARG GO_VERSION=1.22.3
9 |
10 | RUN apk add --no-cache ca-certificates curl git make openssl
11 |
12 | RUN ARCH=$(uname -m) && \
13 | if [ "$ARCH" = "aarch64" ] ; \
14 | then ARCH=arm64 ; \
15 | else ARCH=amd64 ; \
16 | fi && \
17 | echo "Architecture: $ARCH" && \
18 | wget -O hugo_extended_${HUGO_VERSION}.tar.gz https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_linux-${ARCH}.tar.gz && \
19 | tar -x -f hugo_extended_${HUGO_VERSION}.tar.gz && \
20 | mv hugo /usr/bin/hugo && \
21 | rm hugo_extended_${HUGO_VERSION}.tar.gz && \
22 | echo "Hugo ${HUGO_VERSION} installed" && \
23 | wget -O go${GO_VERSION}.linux-${ARCH}.tar.gz https://dl.google.com/go/go${GO_VERSION}.linux-${ARCH}.tar.gz && \
24 | tar -C /usr/local -xzf go${GO_VERSION}.linux-${ARCH}.tar.gz && \
25 | rm go${GO_VERSION}.linux-${ARCH}.tar.gz && \
26 | echo "Go ${GO_VERSION} installed"
27 |
28 | ENV PATH=$PATH:/usr/local/go/bin
29 |
30 | USER node
--------------------------------------------------------------------------------
/testdata/gcds-hugo.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # from https://github.com/gccloudone/gcds-hugo/blob/main/.devcontainer/Dockerfile
2 |
3 | ARG NODE_VERSION=18
4 | FROM node:${NODE_VERSION}
5 |
6 | ARG HUGO_VERSION=0.126.3
7 | ARG GO_VERSION=1.22.3
8 |
9 | RUN apt-get update && \
10 | apt-get install -y ca-certificates openssl git curl make && \
11 | rm -rf /var/lib/apt/lists/*
12 |
13 | RUN ARCH=$(uname -m) && \
14 | if [ "$ARCH" = "aarch64" ]; then ARCH=arm64; else ARCH=amd64; fi && \
15 | echo "Architecture: $ARCH" && \
16 | wget -O hugo_extended_${HUGO_VERSION}.tar.gz https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_linux-${ARCH}.tar.gz && \
17 | tar xf hugo_extended_${HUGO_VERSION}.tar.gz && \
18 | mv hugo /usr/bin/hugo && \
19 | rm hugo_extended_${HUGO_VERSION}.tar.gz && \
20 | echo "Hugo ${HUGO_VERSION} installed" && \
21 | wget -O go${GO_VERSION}.linux-${ARCH}.tar.gz https://dl.google.com/go/go${GO_VERSION}.linux-${ARCH}.tar.gz && \
22 | tar -C /usr/local -xzf go${GO_VERSION}.linux-${ARCH}.tar.gz && \
23 | rm go${GO_VERSION}.linux-${ARCH}.tar.gz && \
24 | echo "Go ${GO_VERSION} installed"
25 |
26 | ENV PATH=$PATH:/usr/local/go/bin
27 |
28 | USER node
--------------------------------------------------------------------------------
/testdata/golang-multi-stage.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # Created from patterns seen at https://github.com/AlphaWong/go-test-multi-stage-build
2 | # This Dockerfile demonstrates a multi-stage Go build with package optimization
3 |
4 | # Build stage
5 | FROM cgr.dev/ORG/go:1.20-dev AS builder
6 | USER root
7 |
8 | WORKDIR /app
9 |
10 | # Copy go.mod and go.sum files
11 | COPY go.mod go.sum ./
12 |
13 | # Download dependencies
14 | RUN go mod download
15 |
16 | # Copy source code
17 | COPY . .
18 |
19 | # Build the application
20 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o app .
21 |
22 | # Install UPX for binary compression (optional)
23 | RUN apk add --no-cache wget xz
24 | RUN wget -P /tmp/ https://github.com/upx/upx/releases/download/v3.95/upx-3.95-amd64_linux.tar.xz
25 | RUN tar -x -v -C /tmp -f /tmp/upx-3.95-amd64_linux.tar.xz
26 | RUN mv /tmp/upx-3.95-amd64_linux/upx /usr/local/bin/upx
27 |
28 | # Compress the binary to reduce size
29 | RUN upx --ultra-brute -qq app && \
30 | upx -t app
31 |
32 | # Final stage
33 | FROM cgr.dev/ORG/chainguard-base:latest
34 | USER root
35 |
36 | # Install any runtime dependencies
37 | RUN apk add --no-cache ca-certificates tzdata
38 |
39 | WORKDIR /app
40 |
41 | # Copy the binary from the builder stage
42 | COPY --from=builder /app/app .
43 |
44 | # Create a non-root user
45 | RUN adduser -D appuser
46 | USER appuser
47 |
48 | # Command to run
49 | ENTRYPOINT ["/app/app"]
--------------------------------------------------------------------------------
/testdata/golang-multi-stage.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # Created from patterns seen at https://github.com/AlphaWong/go-test-multi-stage-build
2 | # This Dockerfile demonstrates a multi-stage Go build with package optimization
3 |
4 | # Build stage
5 | FROM golang:1.20.1 AS builder
6 |
7 | WORKDIR /app
8 |
9 | # Copy go.mod and go.sum files
10 | COPY go.mod go.sum ./
11 |
12 | # Download dependencies
13 | RUN go mod download
14 |
15 | # Copy source code
16 | COPY . .
17 |
18 | # Build the application
19 | RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o app .
20 |
21 | # Install UPX for binary compression (optional)
22 | RUN apt-get update && apt-get install -y wget xz-utils
23 | RUN wget -P /tmp/ https://github.com/upx/upx/releases/download/v3.95/upx-3.95-amd64_linux.tar.xz
24 | RUN tar xvf /tmp/upx-3.95-amd64_linux.tar.xz -C /tmp
25 | RUN mv /tmp/upx-3.95-amd64_linux/upx /usr/local/bin/upx
26 |
27 | # Compress the binary to reduce size
28 | RUN upx --ultra-brute -qq app && \
29 | upx -t app
30 |
31 | # Final stage
32 | FROM alpine:3.17
33 |
34 | # Install any runtime dependencies
35 | RUN apk add --no-cache ca-certificates tzdata
36 |
37 | WORKDIR /app
38 |
39 | # Copy the binary from the builder stage
40 | COPY --from=builder /app/app .
41 |
42 | # Create a non-root user
43 | RUN adduser -D appuser
44 | USER appuser
45 |
46 | # Command to run
47 | ENTRYPOINT ["/app/app"]
--------------------------------------------------------------------------------
/testdata/kind.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://raw.githubusercontent.com/kubernetes-sigs/kind/refs/heads/main/images/base/Dockerfile
2 |
3 | # Copyright 2018 The Kubernetes Authors.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # kind node base image
18 | #
19 | # For systemd + docker configuration used below, see the following references:
20 | # https://systemd.io/CONTAINER_INTERFACE/
21 |
22 | # start from debian slim, this image is reasonably small as a starting point
23 | # for a kubernetes node image, it doesn't contain much (anything?) we don't need
24 | # this stage will install basic files and packages
25 | ARG BASE_IMAGE=cgr.dev/ORG/chainguard-base:latest
26 | FROM $BASE_IMAGE AS base
27 |
28 | # copy in static files
29 | # all scripts and directories are 0755 (rwx r-x r-x)
30 | # all non-scripts are 0644 (rw- r-- r--)
31 | COPY --chmod=0755 files/usr/local/bin/* /usr/local/bin/
32 |
33 | COPY --chmod=0644 files/kind/ /kind/
34 | # COPY only applies to files, not the directory itself, so the permissions are
35 | # fixed in RUN below with a chmod.
36 | COPY --chmod=0755 files/kind/bin/ /kind/bin/
37 |
38 | COPY --chmod=0644 files/LICENSES/* /LICENSES/*
39 | COPY --chmod=0644 files/etc/* /etc/
40 | COPY --chmod=0644 files/etc/containerd/* /etc/containerd/
41 | COPY --chmod=0644 files/etc/default/* /etc/default/
42 | COPY --chmod=0644 files/etc/sysctl.d/* /etc/sysctl.d/
43 | COPY --chmod=0644 files/etc/systemd/system/* /etc/systemd/system/
44 | COPY --chmod=0644 files/etc/systemd/system/kubelet.service.d/* /etc/systemd/system/kubelet.service.d/
45 |
46 | # Install dependencies, first from apt, then from release tarballs.
47 | # NOTE: we use one RUN to minimize layers.
48 | #
49 | # The base image already has a basic userspace + apt but we need to install more packages.
50 | # Packages installed are broken down into (each on a line):
51 | # - packages needed to run services (systemd)
52 | # - packages needed for kubernetes components
53 | # - packages needed for networked backed storage with kubernetes
54 | # - packages needed by the container runtime
55 | # - misc packages kind uses itself
56 | # - packages that provide semi-core kubernetes functionality
57 | # After installing packages we cleanup by:
58 | # - removing unwanted systemd services
59 | # - disabling kmsg in journald (these log entries would be confusing)
60 | #
61 | # Then we install containerd from our nightly build infrastructure, as this
62 | # build for multiple architectures and allows us to upgrade to patched releases
63 | # more quickly.
64 | #
65 | # Next we download and extract crictl and CNI plugin binaries from upstream.
66 | #
67 | # Next we ensure the /etc/kubernetes/manifests directory exists. Normally
68 | # a kubeadm debian / rpm package would ensure that this exists but we install
69 | # freshly built binaries directly when we build the node image.
70 | #
71 | # Finally we adjust tempfiles cleanup to be 1 minute after "boot" instead of 15m
72 | # This is plenty after we've done initial setup for a node, but before we are
73 | # likely to try to export logs etc.
74 | RUN chmod 755 /kind/bin && \
75 | echo "Installing Packages ..." \
76 | && DEBIAN_FRONTEND=noninteractive clean-install \
77 | systemd \
78 | conntrack iptables nftables iproute2 ethtool util-linux mount kmod \
79 | libseccomp2 pigz fuse-overlayfs \
80 | nfs-common open-iscsi \
81 | bash ca-certificates curl jq procps \
82 | && find /lib/systemd/system/sysinit.target.wants/ -name "systemd-tmpfiles-setup.service" -delete \
83 | && rm -f /lib/systemd/system/multi-user.target.wants/* \
84 | && rm -f /etc/systemd/system/*.wants/* \
85 | && rm -f /lib/systemd/system/local-fs.target.wants/* \
86 | && rm -f /lib/systemd/system/sockets.target.wants/*udev* \
87 | && rm -f /lib/systemd/system/sockets.target.wants/*initctl* \
88 | && rm -f /lib/systemd/system/basic.target.wants/* \
89 | && echo "ReadKMsg=no" >> /etc/systemd/journald.conf \
90 | && ln -s "$(which systemd)" /sbin/init
91 |
92 | # NOTE: systemd-binfmt.service will register things into binfmt_misc which is kernel-global
93 | RUN echo "Enabling / Disabling services ... " \
94 | && systemctl enable kubelet.service \
95 | && systemctl enable containerd.service \
96 | && systemctl enable undo-mount-hacks.service \
97 | && systemctl mask systemd-binfmt.service
98 |
99 | RUN echo "Ensuring /etc/kubernetes/manifests" \
100 | && mkdir -p /etc/kubernetes/manifests
101 |
102 | # shared stage to setup go version for building binaries
103 | # NOTE we will be cross-compiling for performance reasons
104 | # This is also why we start again FROM the same base image but a different
105 | # platform and only the files needed for building
106 | # We will copy the built binaries from later stages to the final stage(s)
107 | FROM --platform=$BUILDPLATFORM $BASE_IMAGE AS go-build
108 | COPY --chmod=0755 files/usr/local/bin/* /usr/local/bin/
109 | COPY --chmod=0755 scripts/third_party/gimme/gimme /usr/local/bin/
110 | COPY --chmod=0755 scripts/target-cc /usr/local/bin/
111 | # tools needed at build-time only
112 | # first ensure we can install packages for both architectures
113 | RUN dpkg --add-architecture arm64 && dpkg --add-architecture amd64 \
114 | && clean-install bash ca-certificates curl git make pkg-config \
115 | crossbuild-essential-amd64 crossbuild-essential-arm64 \
116 | libseccomp-dev:amd64 libseccomp-dev:arm64
117 | # set by makefile to .go-version
118 | ARG GO_VERSION
119 | RUN eval "$(gimme "${GO_VERSION}")" \
120 | && export GOTOOLCHAIN="go${GO_VERSION}" \
121 | && GOBIN=/usr/local/bin go install github.com/google/go-licenses@latest
122 |
123 |
124 | # stage for building containerd
125 | FROM go-build AS build-containerd
126 | ARG TARGETARCH GO_VERSION
127 | ARG CONTAINERD_VERSION="v2.0.3"
128 | ARG CONTAINERD_CLONE_URL="https://github.com/containerd/containerd"
129 | # we don't build with optional snapshotters, we never select any of these
130 | # they're not ideal inside kind anyhow, and we save some disk space
131 | ARG BUILDTAGS="no_aufs no_zfs no_btrfs no_devmapper"
132 | RUN git clone --filter=tree:0 "${CONTAINERD_CLONE_URL}" /containerd \
133 | && cd /containerd \
134 | && git checkout "${CONTAINERD_VERSION}" \
135 | && eval "$(gimme "${GO_VERSION}")" \
136 | && export GOTOOLCHAIN="go${GO_VERSION}" \
137 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=1 \
138 | && make bin/ctr bin/containerd bin/containerd-shim-runc-v2 \
139 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES \
140 | ./cmd/ctr ./cmd/containerd ./cmd/containerd-shim-runc-v2
141 |
142 | # stage for building runc
143 | FROM go-build AS build-runc
144 | ARG TARGETARCH GO_VERSION
145 | ARG RUNC_VERSION="v1.2.5"
146 | ARG RUNC_CLONE_URL="https://github.com/opencontainers/runc"
147 | RUN git clone --filter=tree:0 "${RUNC_CLONE_URL}" /runc \
148 | && cd /runc \
149 | && git checkout "${RUNC_VERSION}" \
150 | && eval "$(gimme "${GO_VERSION}")" \
151 | && export GOTOOLCHAIN="go${GO_VERSION}" \
152 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=1 \
153 | && make runc \
154 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES .
155 |
156 | # stage for building crictl
157 | FROM go-build AS build-crictl
158 | ARG TARGETARCH GO_VERSION
159 | ARG CRI_TOOLS_CLONE_URL="https://github.com/kubernetes-sigs/cri-tools"
160 | ARG CRICTL_VERSION="v1.32.0"
161 | RUN git clone --filter=tree:0 "${CRI_TOOLS_CLONE_URL}" /cri-tools \
162 | && cd /cri-tools \
163 | && git checkout "${CRICTL_VERSION}" \
164 | && eval "$(gimme "${GO_VERSION}")" \
165 | && export GOTOOLCHAIN="go${GO_VERSION}" \
166 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=1 \
167 | && make BUILD_BIN_PATH=./build crictl \
168 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES ./cmd/crictl
169 |
170 | # stage for building cni-plugins
171 | FROM go-build AS build-cni
172 | ARG TARGETARCH GO_VERSION
173 | ARG CNI_PLUGINS_VERSION="v1.6.1"
174 | ARG CNI_PLUGINS_CLONE_URL="https://github.com/containernetworking/plugins"
175 | RUN git clone --filter=tree:0 "${CNI_PLUGINS_CLONE_URL}" /cni-plugins \
176 | && cd /cni-plugins \
177 | && git checkout "${CNI_PLUGINS_VERSION}" \
178 | && eval "$(gimme "${GO_VERSION}")" \
179 | && export GOTOOLCHAIN="go${GO_VERSION}" \
180 | && mkdir ./bin \
181 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=0 \
182 | && go build -o ./bin/host-local -mod=vendor ./plugins/ipam/host-local \
183 | && go build -o ./bin/loopback -mod=vendor ./plugins/main/loopback \
184 | && go build -o ./bin/ptp -mod=vendor ./plugins/main/ptp \
185 | && go build -o ./bin/portmap -mod=vendor ./plugins/meta/portmap \
186 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES \
187 | ./plugins/ipam/host-local \
188 | ./plugins/main/loopback ./plugins/main/ptp \
189 | ./plugins/meta/portmap
190 |
191 | # stage for building containerd-fuse-overlayfs
192 | FROM go-build AS build-fuse-overlayfs
193 | ARG TARGETARCH GO_VERSION
194 | ARG CONTAINERD_FUSE_OVERLAYFS_VERSION="v2.1.0"
195 | ARG CONTAINERD_FUSE_OVERLAYFS_CLONE_URL="https://github.com/containerd/fuse-overlayfs-snapshotter"
196 | RUN git clone --filter=tree:0 "${CONTAINERD_FUSE_OVERLAYFS_CLONE_URL}" /fuse-overlayfs-snapshotter \
197 | && cd /fuse-overlayfs-snapshotter \
198 | && git checkout "${CONTAINERD_FUSE_OVERLAYFS_VERSION}" \
199 | && eval "$(gimme "${GO_VERSION}")" \
200 | && export GOTOOLCHAIN="go${GO_VERSION}" \
201 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=1 \
202 | && make bin/containerd-fuse-overlayfs-grpc \
203 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES ./cmd/containerd-fuse-overlayfs-grpc
204 |
205 |
206 | # build final image layout from other stages
207 | FROM base AS build
208 | # copy over containerd build and install
209 | COPY --from=build-containerd /containerd/bin/containerd /usr/local/bin/
210 | COPY --from=build-containerd /containerd/bin/ctr /usr/local/bin/
211 | COPY --from=build-containerd /containerd/bin/containerd-shim-runc-v2 /usr/local/bin/
212 | RUN ctr oci spec \
213 | | jq '.hooks.createContainer[.hooks.createContainer| length] |= . + {"path": "/kind/bin/mount-product-files.sh"}' \
214 | | jq 'del(.process.rlimits)' \
215 | > /etc/containerd/cri-base.json \
216 | && containerd --version
217 | COPY --from=build-containerd /_LICENSES/* /LICENSES/
218 | # copy over runc build and install
219 | COPY --from=build-runc /runc/runc /usr/local/sbin/runc
220 | RUN runc --version
221 | COPY --from=build-runc /_LICENSES/* /LICENSES/
222 | # copy over crictl build and install
223 | COPY --from=build-crictl /cri-tools/build/crictl /usr/local/bin/
224 | COPY --from=build-crictl /_LICENSES/* /LICENSES/
225 | # copy over CNI plugins build and install
226 | RUN mkdir -p /opt/cni/bin
227 | COPY --from=build-cni /cni-plugins/bin/host-local /opt/cni/bin/
228 | COPY --from=build-cni /cni-plugins/bin/loopback /opt/cni/bin/
229 | COPY --from=build-cni /cni-plugins/bin/ptp /opt/cni/bin/
230 | COPY --from=build-cni /cni-plugins/bin/portmap /opt/cni/bin/
231 | COPY --from=build-cni /_LICENSES/* /LICENSES/
232 | # copy over containerd-fuse-overlayfs and install
233 | COPY --from=build-fuse-overlayfs /fuse-overlayfs-snapshotter/bin/containerd-fuse-overlayfs-grpc /usr/local/bin/
234 | COPY --from=build-fuse-overlayfs /_LICENSES/* /LICENSES/
235 |
236 | # squash down to one compressed layer, without any lingering whiteout files etc
237 | FROM scratch
238 | COPY --from=build / /
239 | # add metadata, must be done after the squashing
240 | # first tell systemd that it is in docker (it will check for the container env)
241 | # https://systemd.io/CONTAINER_INTERFACE/
242 | ENV container=docker
243 | # systemd exits on SIGRTMIN+3, not SIGTERM (which re-executes it)
244 | # https://bugzilla.redhat.com/show_bug.cgi?id=1201657
245 | STOPSIGNAL SIGRTMIN+3
246 | # NOTE: this is *only* for documentation, the entrypoint is overridden later
247 | ENTRYPOINT [ "/usr/local/bin/entrypoint", "/sbin/init" ]
248 |
--------------------------------------------------------------------------------
/testdata/kind.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://raw.githubusercontent.com/kubernetes-sigs/kind/refs/heads/main/images/base/Dockerfile
2 |
3 | # Copyright 2018 The Kubernetes Authors.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # kind node base image
18 | #
19 | # For systemd + docker configuration used below, see the following references:
20 | # https://systemd.io/CONTAINER_INTERFACE/
21 |
22 | # start from debian slim, this image is reasonably small as a starting point
23 | # for a kubernetes node image, it doesn't contain much (anything?) we don't need
24 | # this stage will install basic files and packages
25 | ARG BASE_IMAGE=debian:bookworm-slim
26 | FROM $BASE_IMAGE AS base
27 |
28 | # copy in static files
29 | # all scripts and directories are 0755 (rwx r-x r-x)
30 | # all non-scripts are 0644 (rw- r-- r--)
31 | COPY --chmod=0755 files/usr/local/bin/* /usr/local/bin/
32 |
33 | COPY --chmod=0644 files/kind/ /kind/
34 | # COPY only applies to files, not the directory itself, so the permissions are
35 | # fixed in RUN below with a chmod.
36 | COPY --chmod=0755 files/kind/bin/ /kind/bin/
37 |
38 | COPY --chmod=0644 files/LICENSES/* /LICENSES/*
39 | COPY --chmod=0644 files/etc/* /etc/
40 | COPY --chmod=0644 files/etc/containerd/* /etc/containerd/
41 | COPY --chmod=0644 files/etc/default/* /etc/default/
42 | COPY --chmod=0644 files/etc/sysctl.d/* /etc/sysctl.d/
43 | COPY --chmod=0644 files/etc/systemd/system/* /etc/systemd/system/
44 | COPY --chmod=0644 files/etc/systemd/system/kubelet.service.d/* /etc/systemd/system/kubelet.service.d/
45 |
46 | # Install dependencies, first from apt, then from release tarballs.
47 | # NOTE: we use one RUN to minimize layers.
48 | #
49 | # The base image already has a basic userspace + apt but we need to install more packages.
50 | # Packages installed are broken down into (each on a line):
51 | # - packages needed to run services (systemd)
52 | # - packages needed for kubernetes components
53 | # - packages needed for networked backed storage with kubernetes
54 | # - packages needed by the container runtime
55 | # - misc packages kind uses itself
56 | # - packages that provide semi-core kubernetes functionality
57 | # After installing packages we cleanup by:
58 | # - removing unwanted systemd services
59 | # - disabling kmsg in journald (these log entries would be confusing)
60 | #
61 | # Then we install containerd from our nightly build infrastructure, as this
62 | # build for multiple architectures and allows us to upgrade to patched releases
63 | # more quickly.
64 | #
65 | # Next we download and extract crictl and CNI plugin binaries from upstream.
66 | #
67 | # Next we ensure the /etc/kubernetes/manifests directory exists. Normally
68 | # a kubeadm debian / rpm package would ensure that this exists but we install
69 | # freshly built binaries directly when we build the node image.
70 | #
71 | # Finally we adjust tempfiles cleanup to be 1 minute after "boot" instead of 15m
72 | # This is plenty after we've done initial setup for a node, but before we are
73 | # likely to try to export logs etc.
74 | RUN chmod 755 /kind/bin && \
75 | echo "Installing Packages ..." \
76 | && DEBIAN_FRONTEND=noninteractive clean-install \
77 | systemd \
78 | conntrack iptables nftables iproute2 ethtool util-linux mount kmod \
79 | libseccomp2 pigz fuse-overlayfs \
80 | nfs-common open-iscsi \
81 | bash ca-certificates curl jq procps \
82 | && find /lib/systemd/system/sysinit.target.wants/ -name "systemd-tmpfiles-setup.service" -delete \
83 | && rm -f /lib/systemd/system/multi-user.target.wants/* \
84 | && rm -f /etc/systemd/system/*.wants/* \
85 | && rm -f /lib/systemd/system/local-fs.target.wants/* \
86 | && rm -f /lib/systemd/system/sockets.target.wants/*udev* \
87 | && rm -f /lib/systemd/system/sockets.target.wants/*initctl* \
88 | && rm -f /lib/systemd/system/basic.target.wants/* \
89 | && echo "ReadKMsg=no" >> /etc/systemd/journald.conf \
90 | && ln -s "$(which systemd)" /sbin/init
91 |
92 | # NOTE: systemd-binfmt.service will register things into binfmt_misc which is kernel-global
93 | RUN echo "Enabling / Disabling services ... " \
94 | && systemctl enable kubelet.service \
95 | && systemctl enable containerd.service \
96 | && systemctl enable undo-mount-hacks.service \
97 | && systemctl mask systemd-binfmt.service
98 |
99 | RUN echo "Ensuring /etc/kubernetes/manifests" \
100 | && mkdir -p /etc/kubernetes/manifests
101 |
102 | # shared stage to setup go version for building binaries
103 | # NOTE we will be cross-compiling for performance reasons
104 | # This is also why we start again FROM the same base image but a different
105 | # platform and only the files needed for building
106 | # We will copy the built binaries from later stages to the final stage(s)
107 | FROM --platform=$BUILDPLATFORM $BASE_IMAGE AS go-build
108 | COPY --chmod=0755 files/usr/local/bin/* /usr/local/bin/
109 | COPY --chmod=0755 scripts/third_party/gimme/gimme /usr/local/bin/
110 | COPY --chmod=0755 scripts/target-cc /usr/local/bin/
111 | # tools needed at build-time only
112 | # first ensure we can install packages for both architectures
113 | RUN dpkg --add-architecture arm64 && dpkg --add-architecture amd64 \
114 | && clean-install bash ca-certificates curl git make pkg-config \
115 | crossbuild-essential-amd64 crossbuild-essential-arm64 \
116 | libseccomp-dev:amd64 libseccomp-dev:arm64
117 | # set by makefile to .go-version
118 | ARG GO_VERSION
119 | RUN eval "$(gimme "${GO_VERSION}")" \
120 | && export GOTOOLCHAIN="go${GO_VERSION}" \
121 | && GOBIN=/usr/local/bin go install github.com/google/go-licenses@latest
122 |
123 |
124 | # stage for building containerd
125 | FROM go-build AS build-containerd
126 | ARG TARGETARCH GO_VERSION
127 | ARG CONTAINERD_VERSION="v2.0.3"
128 | ARG CONTAINERD_CLONE_URL="https://github.com/containerd/containerd"
129 | # we don't build with optional snapshotters, we never select any of these
130 | # they're not ideal inside kind anyhow, and we save some disk space
131 | ARG BUILDTAGS="no_aufs no_zfs no_btrfs no_devmapper"
132 | RUN git clone --filter=tree:0 "${CONTAINERD_CLONE_URL}" /containerd \
133 | && cd /containerd \
134 | && git checkout "${CONTAINERD_VERSION}" \
135 | && eval "$(gimme "${GO_VERSION}")" \
136 | && export GOTOOLCHAIN="go${GO_VERSION}" \
137 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=1 \
138 | && make bin/ctr bin/containerd bin/containerd-shim-runc-v2 \
139 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES \
140 | ./cmd/ctr ./cmd/containerd ./cmd/containerd-shim-runc-v2
141 |
142 | # stage for building runc
143 | FROM go-build AS build-runc
144 | ARG TARGETARCH GO_VERSION
145 | ARG RUNC_VERSION="v1.2.5"
146 | ARG RUNC_CLONE_URL="https://github.com/opencontainers/runc"
147 | RUN git clone --filter=tree:0 "${RUNC_CLONE_URL}" /runc \
148 | && cd /runc \
149 | && git checkout "${RUNC_VERSION}" \
150 | && eval "$(gimme "${GO_VERSION}")" \
151 | && export GOTOOLCHAIN="go${GO_VERSION}" \
152 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=1 \
153 | && make runc \
154 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES .
155 |
156 | # stage for building crictl
157 | FROM go-build AS build-crictl
158 | ARG TARGETARCH GO_VERSION
159 | ARG CRI_TOOLS_CLONE_URL="https://github.com/kubernetes-sigs/cri-tools"
160 | ARG CRICTL_VERSION="v1.32.0"
161 | RUN git clone --filter=tree:0 "${CRI_TOOLS_CLONE_URL}" /cri-tools \
162 | && cd /cri-tools \
163 | && git checkout "${CRICTL_VERSION}" \
164 | && eval "$(gimme "${GO_VERSION}")" \
165 | && export GOTOOLCHAIN="go${GO_VERSION}" \
166 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=1 \
167 | && make BUILD_BIN_PATH=./build crictl \
168 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES ./cmd/crictl
169 |
170 | # stage for building cni-plugins
171 | FROM go-build AS build-cni
172 | ARG TARGETARCH GO_VERSION
173 | ARG CNI_PLUGINS_VERSION="v1.6.1"
174 | ARG CNI_PLUGINS_CLONE_URL="https://github.com/containernetworking/plugins"
175 | RUN git clone --filter=tree:0 "${CNI_PLUGINS_CLONE_URL}" /cni-plugins \
176 | && cd /cni-plugins \
177 | && git checkout "${CNI_PLUGINS_VERSION}" \
178 | && eval "$(gimme "${GO_VERSION}")" \
179 | && export GOTOOLCHAIN="go${GO_VERSION}" \
180 | && mkdir ./bin \
181 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=0 \
182 | && go build -o ./bin/host-local -mod=vendor ./plugins/ipam/host-local \
183 | && go build -o ./bin/loopback -mod=vendor ./plugins/main/loopback \
184 | && go build -o ./bin/ptp -mod=vendor ./plugins/main/ptp \
185 | && go build -o ./bin/portmap -mod=vendor ./plugins/meta/portmap \
186 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES \
187 | ./plugins/ipam/host-local \
188 | ./plugins/main/loopback ./plugins/main/ptp \
189 | ./plugins/meta/portmap
190 |
191 | # stage for building containerd-fuse-overlayfs
192 | FROM go-build AS build-fuse-overlayfs
193 | ARG TARGETARCH GO_VERSION
194 | ARG CONTAINERD_FUSE_OVERLAYFS_VERSION="v2.1.0"
195 | ARG CONTAINERD_FUSE_OVERLAYFS_CLONE_URL="https://github.com/containerd/fuse-overlayfs-snapshotter"
196 | RUN git clone --filter=tree:0 "${CONTAINERD_FUSE_OVERLAYFS_CLONE_URL}" /fuse-overlayfs-snapshotter \
197 | && cd /fuse-overlayfs-snapshotter \
198 | && git checkout "${CONTAINERD_FUSE_OVERLAYFS_VERSION}" \
199 | && eval "$(gimme "${GO_VERSION}")" \
200 | && export GOTOOLCHAIN="go${GO_VERSION}" \
201 | && export GOARCH=$TARGETARCH && export CC=$(target-cc) && export CGO_ENABLED=1 \
202 | && make bin/containerd-fuse-overlayfs-grpc \
203 | && GOARCH=$TARGETARCH go-licenses save --save_path=/_LICENSES ./cmd/containerd-fuse-overlayfs-grpc
204 |
205 |
206 | # build final image layout from other stages
207 | FROM base AS build
208 | # copy over containerd build and install
209 | COPY --from=build-containerd /containerd/bin/containerd /usr/local/bin/
210 | COPY --from=build-containerd /containerd/bin/ctr /usr/local/bin/
211 | COPY --from=build-containerd /containerd/bin/containerd-shim-runc-v2 /usr/local/bin/
212 | RUN ctr oci spec \
213 | | jq '.hooks.createContainer[.hooks.createContainer| length] |= . + {"path": "/kind/bin/mount-product-files.sh"}' \
214 | | jq 'del(.process.rlimits)' \
215 | > /etc/containerd/cri-base.json \
216 | && containerd --version
217 | COPY --from=build-containerd /_LICENSES/* /LICENSES/
218 | # copy over runc build and install
219 | COPY --from=build-runc /runc/runc /usr/local/sbin/runc
220 | RUN runc --version
221 | COPY --from=build-runc /_LICENSES/* /LICENSES/
222 | # copy over crictl build and install
223 | COPY --from=build-crictl /cri-tools/build/crictl /usr/local/bin/
224 | COPY --from=build-crictl /_LICENSES/* /LICENSES/
225 | # copy over CNI plugins build and install
226 | RUN mkdir -p /opt/cni/bin
227 | COPY --from=build-cni /cni-plugins/bin/host-local /opt/cni/bin/
228 | COPY --from=build-cni /cni-plugins/bin/loopback /opt/cni/bin/
229 | COPY --from=build-cni /cni-plugins/bin/ptp /opt/cni/bin/
230 | COPY --from=build-cni /cni-plugins/bin/portmap /opt/cni/bin/
231 | COPY --from=build-cni /_LICENSES/* /LICENSES/
232 | # copy over containerd-fuse-overlayfs and install
233 | COPY --from=build-fuse-overlayfs /fuse-overlayfs-snapshotter/bin/containerd-fuse-overlayfs-grpc /usr/local/bin/
234 | COPY --from=build-fuse-overlayfs /_LICENSES/* /LICENSES/
235 |
236 | # squash down to one compressed layer, without any lingering whiteout files etc
237 | FROM scratch
238 | COPY --from=build / /
239 | # add metadata, must be done after the squashing
240 | # first tell systemd that it is in docker (it will check for the container env)
241 | # https://systemd.io/CONTAINER_INTERFACE/
242 | ENV container=docker
243 | # systemd exits on SIGRTMIN+3, not SIGTERM (which re-executes it)
244 | # https://bugzilla.redhat.com/show_bug.cgi?id=1201657
245 | STOPSIGNAL SIGRTMIN+3
246 | # NOTE: this is *only* for documentation, the entrypoint is overridden later
247 | ENTRYPOINT [ "/usr/local/bin/entrypoint", "/sbin/init" ]
248 |
--------------------------------------------------------------------------------
/testdata/no-root.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # make sure USER root not added unless it needes to be
2 | FROM cgr.dev/ORG/python:3.12-dev
3 |
4 | RUN echo "hello world"
5 |
--------------------------------------------------------------------------------
/testdata/no-root.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # make sure USER root not added unless it needes to be
2 | FROM python:3.12-slim-bookworm
3 |
4 | RUN echo "hello world"
5 |
--------------------------------------------------------------------------------
/testdata/no-tag.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # see if latest-dev used when no tag is specified
2 | FROM cgr.dev/ORG/python:latest-dev
3 | USER root
4 |
5 | RUN apk add --no-cache gettext git libpq make rsync
6 |
--------------------------------------------------------------------------------
/testdata/no-tag.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # see if latest-dev used when no tag is specified
2 | FROM python
3 |
4 | RUN apt-get update \
5 | && apt-get install --assume-yes --no-install-recommends \
6 | gettext \
7 | git \
8 | libpq5 \
9 | make \
10 | rsync
11 |
--------------------------------------------------------------------------------
/testdata/nodejs-ubuntu.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # Based on patterns from https://gist.github.com/BretFisher/da34530726ff8076b83b583e527e91ed
2 | # This Dockerfile demonstrates a Node.js app with Ubuntu and apt-get packages
3 |
4 | FROM cgr.dev/ORG/chainguard-base:latest
5 | USER root
6 |
7 | # Set environment variables
8 | ENV DEBIAN_FRONTEND=noninteractive
9 | ENV NODE_VERSION=16.x
10 |
11 | # Update and install dependencies
12 | RUN apk add --no-cache build-base curl git gnupg python-3 wget
13 |
14 | # Add Node.js repository and install
15 | RUN curl -sL https://deb.nodesource.com/setup_${NODE_VERSION} | bash - && \
16 | apk add --no-cache nodejs && \
17 | npm install -g npm@latest
18 |
19 | # Add a non-root user
20 | RUN adduser --shell /bin/bash appuser
21 | WORKDIR /home/appuser/app
22 | RUN chown -R appuser:appuser /home/appuser
23 |
24 | # Switch to non-root user
25 | USER appuser
26 |
27 | # Copy application files
28 | COPY --chown=appuser:appuser package*.json ./
29 | RUN npm install
30 |
31 | # Copy the rest of the application
32 | COPY --chown=appuser:appuser . .
33 |
34 | # Expose port
35 | EXPOSE 3000
36 |
37 | # Start command
38 | CMD ["npm", "start"]
--------------------------------------------------------------------------------
/testdata/nodejs-ubuntu.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # Based on patterns from https://gist.github.com/BretFisher/da34530726ff8076b83b583e527e91ed
2 | # This Dockerfile demonstrates a Node.js app with Ubuntu and apt-get packages
3 |
4 | FROM ubuntu:20.04
5 |
6 | # Set environment variables
7 | ENV DEBIAN_FRONTEND=noninteractive
8 | ENV NODE_VERSION=16.x
9 |
10 | # Update and install dependencies
11 | RUN apt-get update && apt-get upgrade -y && \
12 | apt-get install -y curl wget gnupg git build-essential python3 && \
13 | apt-get clean && \
14 | rm -rf /var/lib/apt/lists/*
15 |
16 | # Add Node.js repository and install
17 | RUN curl -sL https://deb.nodesource.com/setup_${NODE_VERSION} | bash - && \
18 | apt-get install -y nodejs && \
19 | npm install -g npm@latest
20 |
21 | # Add a non-root user
22 | RUN useradd -m -s /bin/bash appuser
23 | WORKDIR /home/appuser/app
24 | RUN chown -R appuser:appuser /home/appuser
25 |
26 | # Switch to non-root user
27 | USER appuser
28 |
29 | # Copy application files
30 | COPY --chown=appuser:appuser package*.json ./
31 | RUN npm install
32 |
33 | # Copy the rest of the application
34 | COPY --chown=appuser:appuser . .
35 |
36 | # Expose port
37 | EXPOSE 3000
38 |
39 | # Start command
40 | CMD ["npm", "start"]
--------------------------------------------------------------------------------
/testdata/pipes.after.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM cgr.dev/ORG/python:3.9-dev
2 | USER root
3 |
4 | RUN echo "STEP 1" && \
5 | apk add --no-cache py3-pip py3-virtualenv python-3 && \
6 | echo "STEP 2" && \
7 | echo "STEP 3" && \
8 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ~/.cache ~/.npm
9 |
10 | RUN echo hello
11 |
12 | RUN echo hello && \
13 | echo goodbye
14 |
15 | RUN apk add --no-cache py3-pip py3-virtualenv python-3
16 |
17 | RUN true
18 |
--------------------------------------------------------------------------------
/testdata/pipes.before.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9.18-slim
2 |
3 | RUN apt-get update -q -q && \
4 | echo "STEP 1" && \
5 | apt-get install python3 python3-pip python3-virtualenv --yes && \
6 | echo "STEP 2" && \
7 | apt-get -s dist-upgrade | grep "^Inst" | grep -i securi | awk -F " " {'print $2'} | xargs apt-get install --yes && \
8 | echo "STEP 3" && \
9 | apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ~/.cache ~/.npm
10 |
11 | RUN apt-get update -q -q && echo hello
12 |
13 | RUN echo hello && apt-get update -q -q && echo goodbye
14 |
15 | RUN apt-get update -q -q && \
16 | apt-get install python3 python3-pip python3-virtualenv --yes && \
17 | apt-get -s dist-upgrade | grep "^Inst" | grep -i securi | awk -F " " {'print $2'} | xargs apt-get install --yes && \
18 | apt-get clean
19 |
20 | RUN apt-get update -q -q && apt-get clean
21 |
--------------------------------------------------------------------------------
/testdata/python-multi-stage.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://luis-sena.medium.com/creating-the-perfect-python-dockerfile-51bdec41f1c8
2 | # This Dockerfile demonstrates a multi-stage Python build with virtual environment
3 |
4 | # using ubuntu LTS version
5 | FROM cgr.dev/ORG/chainguard-base:latest AS builder-image
6 | USER root
7 |
8 | RUN apk add --no-cache build-base py3-pip py3-wheel python3.9 python3.9-dev python3.9-venv
9 |
10 | # create and activate virtual environment
11 | RUN python3.9 -m venv /opt/venv
12 | ENV PATH="/opt/venv/bin:$PATH"
13 |
14 | # install requirements
15 | COPY requirements.txt .
16 | RUN pip3 install --no-cache-dir -r requirements.txt
17 |
18 | FROM cgr.dev/ORG/chainguard-base:latest AS runner-image
19 | USER root
20 | RUN apk add --no-cache python3-venv python3.9
21 |
22 | COPY --from=builder-image /opt/venv /opt/venv
23 |
24 | # activate virtual environment
25 | ENV VIRTUAL_ENV=/opt/venv
26 | ENV PATH="/opt/venv/bin:$PATH"
27 |
28 | # Add non-root user
29 | RUN adduser appuser
30 | USER appuser
31 | WORKDIR /home/appuser
32 |
33 | # Copy application code
34 | COPY --chown=appuser:appuser . .
35 |
36 | CMD ["python", "app.py"]
--------------------------------------------------------------------------------
/testdata/python-multi-stage.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://luis-sena.medium.com/creating-the-perfect-python-dockerfile-51bdec41f1c8
2 | # This Dockerfile demonstrates a multi-stage Python build with virtual environment
3 |
4 | # using ubuntu LTS version
5 | FROM ubuntu:20.04 AS builder-image
6 |
7 | RUN apt-get update && apt-get install --no-install-recommends -y python3.9 python3.9-dev python3.9-venv python3-pip python3-wheel build-essential && \
8 | apt-get clean && rm -rf /var/lib/apt/lists/*
9 |
10 | # create and activate virtual environment
11 | RUN python3.9 -m venv /opt/venv
12 | ENV PATH="/opt/venv/bin:$PATH"
13 |
14 | # install requirements
15 | COPY requirements.txt .
16 | RUN pip3 install --no-cache-dir -r requirements.txt
17 |
18 | FROM ubuntu:20.04 AS runner-image
19 | RUN apt-get update && apt-get install --no-install-recommends -y python3.9 python3-venv && \
20 | apt-get clean && rm -rf /var/lib/apt/lists/*
21 |
22 | COPY --from=builder-image /opt/venv /opt/venv
23 |
24 | # activate virtual environment
25 | ENV VIRTUAL_ENV=/opt/venv
26 | ENV PATH="/opt/venv/bin:$PATH"
27 |
28 | # Add non-root user
29 | RUN useradd --create-home appuser
30 | USER appuser
31 | WORKDIR /home/appuser
32 |
33 | # Copy application code
34 | COPY --chown=appuser:appuser . .
35 |
36 | CMD ["python", "app.py"]
--------------------------------------------------------------------------------
/testdata/python-nodejs.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://stackoverflow.com/questions/56913746/how-to-install-python-on-nodejs-docker-image
2 | # This Dockerfile demonstrates a Node.js app that requires Python
3 |
4 | FROM cgr.dev/ORG/node:9-dev
5 | USER root
6 |
7 | # Update apt and install Python
8 | RUN : && \
9 | apk add --no-cache python
10 |
11 | WORKDIR /app
12 | COPY . /app
13 | RUN npm install
14 | EXPOSE 3000
15 | CMD ["node", "index.js"]
--------------------------------------------------------------------------------
/testdata/python-nodejs.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://stackoverflow.com/questions/56913746/how-to-install-python-on-nodejs-docker-image
2 | # This Dockerfile demonstrates a Node.js app that requires Python
3 |
4 | FROM node:9-slim
5 |
6 | # Update apt and install Python
7 | RUN apt-get update || : && apt-get install -y python
8 |
9 | WORKDIR /app
10 | COPY . /app
11 | RUN npm install
12 | EXPOSE 3000
13 | CMD ["node", "index.js"]
--------------------------------------------------------------------------------
/testdata/ruby-rails.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://lipanski.com/posts/dockerfile-ruby-best-practices
2 | # This Dockerfile demonstrates a Rails app with multi-stage build pattern
3 |
4 | # Start from a small, trusted base image with the version pinned down
5 | FROM cgr.dev/ORG/ruby:2.7-dev AS base
6 | USER root
7 |
8 | # Install system dependencies required both at runtime and build time
9 | # The image uses Postgres but you can swap it with mariadb-dev (for MySQL) or sqlite-dev
10 | RUN apk add --no-cache nodejs postgresql-dev tzdata yarn
11 |
12 | # This stage will be responsible for installing gems and npm packages
13 | FROM base AS dependencies
14 |
15 | # Install system dependencies required to build some Ruby gems (pg)
16 | RUN apk add --no-cache build-base
17 |
18 | COPY Gemfile Gemfile.lock ./
19 |
20 | # Install gems (excluding development/test dependencies)
21 | RUN bundle config set without "development test" && \
22 | bundle install --jobs=3 --retry=3
23 |
24 | COPY package.json yarn.lock ./
25 |
26 | # Install npm packages
27 | RUN yarn install --frozen-lockfile
28 |
29 | # We're back at the base stage
30 | FROM base
31 |
32 | # Create a non-root user to run the app and own app-specific files
33 | RUN adduser -D app
34 |
35 | # Switch to this user
36 | USER app
37 |
38 | # We'll install the app in this directory
39 | WORKDIR /home/app
40 |
41 | # Copy over gems from the dependencies stage
42 | COPY --from=dependencies /usr/local/bundle/ /usr/local/bundle/
43 |
44 | # Copy over npm packages from the dependencies stage
45 | # Note that we have to use `--chown` here
46 | COPY --chown=app --from=dependencies /node_modules/ node_modules/
47 |
48 | # Finally, copy over the code
49 | # This is where the .dockerignore file comes into play
50 | # Note that we have to use `--chown` here
51 | COPY --chown=app . ./
52 |
53 | # Install assets
54 | RUN RAILS_ENV=production SECRET_KEY_BASE=assets bundle exec rake assets:precompile
55 |
56 | # Launch the server
57 | CMD ["bundle", "exec", "rackup"]
--------------------------------------------------------------------------------
/testdata/ruby-rails.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # From https://lipanski.com/posts/dockerfile-ruby-best-practices
2 | # This Dockerfile demonstrates a Rails app with multi-stage build pattern
3 |
4 | # Start from a small, trusted base image with the version pinned down
5 | FROM ruby:2.7.1-alpine AS base
6 |
7 | # Install system dependencies required both at runtime and build time
8 | # The image uses Postgres but you can swap it with mariadb-dev (for MySQL) or sqlite-dev
9 | RUN apk add --update \
10 | postgresql-dev \
11 | tzdata \
12 | nodejs \
13 | yarn
14 |
15 | # This stage will be responsible for installing gems and npm packages
16 | FROM base AS dependencies
17 |
18 | # Install system dependencies required to build some Ruby gems (pg)
19 | RUN apk add --update build-base
20 |
21 | COPY Gemfile Gemfile.lock ./
22 |
23 | # Install gems (excluding development/test dependencies)
24 | RUN bundle config set without "development test" && \
25 | bundle install --jobs=3 --retry=3
26 |
27 | COPY package.json yarn.lock ./
28 |
29 | # Install npm packages
30 | RUN yarn install --frozen-lockfile
31 |
32 | # We're back at the base stage
33 | FROM base
34 |
35 | # Create a non-root user to run the app and own app-specific files
36 | RUN adduser -D app
37 |
38 | # Switch to this user
39 | USER app
40 |
41 | # We'll install the app in this directory
42 | WORKDIR /home/app
43 |
44 | # Copy over gems from the dependencies stage
45 | COPY --from=dependencies /usr/local/bundle/ /usr/local/bundle/
46 |
47 | # Copy over npm packages from the dependencies stage
48 | # Note that we have to use `--chown` here
49 | COPY --chown=app --from=dependencies /node_modules/ node_modules/
50 |
51 | # Finally, copy over the code
52 | # This is where the .dockerignore file comes into play
53 | # Note that we have to use `--chown` here
54 | COPY --chown=app . ./
55 |
56 | # Install assets
57 | RUN RAILS_ENV=production SECRET_KEY_BASE=assets bundle exec rake assets:precompile
58 |
59 | # Launch the server
60 | CMD ["bundle", "exec", "rackup"]
--------------------------------------------------------------------------------
/testdata/yum-dnf-flags.after.Dockerfile:
--------------------------------------------------------------------------------
1 | # Make sure when the -y flag is used before the install keyword
2 | # that conversion still occurs correctly
3 | FROM cgr.dev/ORG/chainguard-base:latest
4 | USER root
5 |
6 | RUN apk add --no-cache httpd php php-cli php-common
7 |
8 | RUN apk add --no-cache httpd php php-cli php-common
9 |
10 | RUN apk add --no-cache httpd php php-cli php-common
11 |
--------------------------------------------------------------------------------
/testdata/yum-dnf-flags.before.Dockerfile:
--------------------------------------------------------------------------------
1 | # Make sure when the -y flag is used before the install keyword
2 | # that conversion still occurs correctly
3 | FROM fedora:30
4 |
5 | RUN yum update -y && \
6 | yum -y install httpd php php-cli php-common && \
7 | yum clean all && \
8 | rm -rf /var/cache/yum/*
9 |
10 | RUN dnf update -y && \
11 | dnf -y install httpd php php-cli php-common && \
12 | dnf clean all
13 |
14 | RUN microdnf update -y && \
15 | microdnf -y install httpd php php-cli php-common && \
16 | microdnf clean all
17 |
--------------------------------------------------------------------------------