├── .devcontainer
└── devcontainer.json
├── .github
├── dependabot.yml
└── workflows
│ ├── ci.yaml
│ └── release.yaml
├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── buildinfo
└── version.go
├── cmd
└── envbuilder
│ └── main.go
├── devcontainer
├── devcontainer.go
├── devcontainer_test.go
├── features
│ ├── features.go
│ └── features_test.go
├── script.go
└── script_test.go
├── docs
├── build-secrets.md
├── caching.md
├── container-registry-auth.md
├── devcontainer-spec-support.md
├── docker.md
├── env-variables.md
├── git-auth.md
├── img
│ └── proxy.png
├── proxy.md
├── usage-with-coder.md
├── users.md
└── using-local-files.md
├── envbuilder.go
├── envbuilder_internal_test.go
├── examples
├── docker
│ ├── 01_dood
│ │ ├── Dockerfile
│ │ └── devcontainer.json
│ ├── 02_dind
│ │ ├── Dockerfile
│ │ ├── devcontainer.json
│ │ └── on-create.sh
│ ├── 03_dind_feature
│ │ ├── Dockerfile
│ │ ├── devcontainer.json
│ │ └── on-create.sh
│ └── 04_dind_rootless
│ │ ├── Dockerfile
│ │ ├── devcontainer.json
│ │ └── on-create.sh
└── kaniko-cache-warmer.sh
├── git
├── git.go
└── git_test.go
├── go.mod
├── go.sum
├── init.sh
├── integration
├── integration_test.go
└── testdata
│ └── blob-unknown
│ └── Dockerfile
├── internal
├── chmodfs
│ └── chmodfs.go
├── ebutil
│ ├── libs.go
│ ├── libs_amd64.go
│ ├── libs_arm.go
│ ├── libs_arm64.go
│ ├── mock_mounter_test.go
│ ├── remount.go
│ └── remount_internal_test.go
└── workingdir
│ ├── workingdir.go
│ └── workingdir_internal_test.go
├── log
├── coder.go
├── coder_internal_test.go
├── log.go
├── log_test.go
├── logrus.go
└── logrus_test.go
├── options
├── defaults.go
├── defaults_test.go
├── options.go
├── options_test.go
└── testdata
│ └── options.golden
├── scripts
├── Dockerfile
├── build.sh
├── check_fmt.sh
├── develop.sh
├── diagram-dark.png
├── diagram-dark.svg
├── diagram-light.png
├── diagram-light.svg
├── diagram.d2
├── diagram.sh
├── docsgen
│ └── main.go
├── lib.sh
└── version.sh
└── testutil
├── gittest
└── gittest.go
├── mwtest
└── auth_basic.go
└── registrytest
└── registrytest.go
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "envbuilder",
3 | "image": "mcr.microsoft.com/devcontainers/go:1.22",
4 | "features": {
5 | "ghcr.io/devcontainers/features/docker-in-docker": {}
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "gomod"
4 | directory: "/"
5 | schedule:
6 | interval: "weekly"
7 | time: "06:00"
8 | timezone: "America/Chicago"
9 | commit-message:
10 | prefix: "chore"
11 | labels: ["dependencies"]
12 | ignore:
13 | # Ignore patch updates for all dependencies
14 | - dependency-name: "*"
15 | update-types:
16 | - version-update:semver-patch
17 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yaml:
--------------------------------------------------------------------------------
1 | name: ci
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 | workflow_dispatch:
11 |
12 | permissions:
13 | actions: none
14 | checks: none
15 | contents: read
16 | deployments: none
17 | issues: none
18 | pull-requests: none
19 | repository-projects: none
20 | security-events: none
21 | statuses: none
22 | # Necessary to push docker images to ghcr.io.
23 | packages: write
24 |
25 | # Cancel in-progress runs for pull requests when developers push
26 | # additional changes
27 | concurrency:
28 | group: ${{ github.workflow }}-${{ github.ref }}
29 | cancel-in-progress: ${{ github.event_name == 'pull_request' }}
30 |
31 | jobs:
32 | test:
33 | runs-on: ubuntu-latest
34 | steps:
35 | - name: Checkout
36 | uses: actions/checkout@v4
37 |
38 | - uses: actions/setup-go@v5
39 | with:
40 | go-version: "~1.22"
41 |
42 | - name: Download Go modules
43 | run: go mod download
44 |
45 | - name: Lint
46 | run: make -j lint
47 |
48 | - name: Test
49 | run: make test
50 | docs:
51 | runs-on: ubuntu-latest
52 | steps:
53 | - name: Checkout
54 | uses: actions/checkout@v4
55 |
56 | - uses: actions/setup-go@v5
57 | with:
58 | go-version: "~1.22"
59 |
60 | - name: Generate env vars docs
61 | run: make docs/env-variables.md
62 |
63 | - name: Check for unstaged files
64 | run: git diff --exit-code
65 | fmt:
66 | runs-on: ubuntu-latest
67 | steps:
68 | - name: Checkout
69 | uses: actions/checkout@v4
70 |
71 | - uses: actions/setup-go@v5
72 | with:
73 | go-version: "~1.22"
74 |
75 | - name: Check format
76 | run: ./scripts/check_fmt.sh
77 | build:
78 | runs-on: ubuntu-latest
79 | steps:
80 | - name: Checkout
81 | uses: actions/checkout@v4
82 | with:
83 | # Needed to get older tags
84 | fetch-depth: 0
85 |
86 | - uses: actions/setup-go@v5
87 | with:
88 | go-version: "~1.22"
89 |
90 | - name: Login to GitHub Container Registry
91 | if: github.event_name == 'push' && github.ref == 'refs/heads/main'
92 | uses: docker/login-action@v2
93 | with:
94 | registry: ghcr.io
95 | username: ${{ github.actor }}
96 | password: ${{ secrets.GITHUB_TOKEN }}
97 |
98 | # do not push images for pull requests
99 | - name: Build
100 | if: github.event_name == 'pull_request'
101 | run: |
102 | ./scripts/build.sh \
103 | --arch=amd64
104 |
105 | ./scripts/build.sh \
106 | --arch=arm64
107 |
108 | ./scripts/build.sh \
109 | --arch=arm
110 |
111 | - name: Build and Push
112 | if: github.ref == 'refs/heads/main'
113 | run: |
114 | BASE=ghcr.io/coder/envbuilder-preview
115 |
116 | ./scripts/build.sh \
117 | --arch=amd64 \
118 | --arch=arm64 \
119 | --arch=arm \
120 | --base=$BASE \
121 | --push
122 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | name: release
2 | on:
3 | push:
4 | tags:
5 | - "v*"
6 |
7 | permissions:
8 | # Required to publish a release
9 | contents: write
10 | # Necessary to push docker images to ghcr.io.
11 | packages: write
12 | # Necessary for GCP authentication (https://github.com/google-github-actions/setup-gcloud#usage)
13 | id-token: write
14 |
15 | concurrency: ${{ github.workflow }}-${{ github.ref }}
16 |
17 | jobs:
18 | release:
19 | name: Build and publish
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v4
23 |
24 | # Workaround for actions/checkout#1467
25 | - name: Fetch tags
26 | run: |
27 | git fetch --tags --depth 1 --force
28 |
29 | - name: Echo Go Cache Paths
30 | id: go-cache-paths
31 | run: |
32 | echo "GOCACHE=$(go env GOCACHE)" >> ${{ runner.os == 'Windows' && '$env:' || '$' }}GITHUB_OUTPUT
33 | echo "GOMODCACHE=$(go env GOMODCACHE)" >> ${{ runner.os == 'Windows' && '$env:' || '$' }}GITHUB_OUTPUT
34 |
35 | - name: Go Build Cache
36 | uses: actions/cache@v3
37 | with:
38 | path: ${{ steps.go-cache-paths.outputs.GOCACHE }}
39 | key: ${{ runner.os }}-go-build-${{ hashFiles('**/go.**', '**.go') }}
40 |
41 | - uses: actions/setup-go@v3
42 | with:
43 | go-version: "~1.22"
44 |
45 | - name: Docker Login
46 | uses: docker/login-action@v2
47 | with:
48 | registry: ghcr.io
49 | username: ${{ github.actor }}
50 | password: ${{ secrets.GITHUB_TOKEN }}
51 |
52 | - name: Get version
53 | id: get-version
54 | env:
55 | ENVBUILDER_RELEASE: "t"
56 | run: |
57 | echo "ENVBUILDER_VERSION=$(./scripts/version.sh)" >> $GITHUB_OUTPUT
58 |
59 | - name: Build and Push
60 | env:
61 | VERSION: "${{ steps.get-version.outputs.ENVBUILDER_VERSION }}"
62 | BASE: "ghcr.io/coder/envbuilder"
63 | run: |
64 | ./scripts/build.sh \
65 | --arch=amd64 \
66 | --arch=arm64 \
67 | --arch=arm \
68 | --base=$BASE \
69 | --tag=$VERSION \
70 | --push
71 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | scripts/envbuilder-*
2 | .registry-cache
3 | **/.gen-golden
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | GOARCH := $(shell go env GOARCH)
2 | PWD=$(shell pwd)
3 |
4 | GO_SRC_FILES := $(shell find . -type f -name '*.go' -not -name '*_test.go')
5 | GO_TEST_FILES := $(shell find . -type f -not -name '*.go' -name '*_test.go')
6 | GOLDEN_FILES := $(shell find . -type f -name '*.golden')
7 | SHELL_SRC_FILES := $(shell find . -type f -name '*.sh')
8 | GOLANGCI_LINT_VERSION := v1.59.1
9 |
10 | fmt: $(shell find . -type f -name '*.go')
11 | go run mvdan.cc/gofumpt@v0.6.0 -l -w .
12 |
13 | .PHONY: lint
14 | lint: lint/go lint/shellcheck
15 |
16 | .PHONY: lint/go
17 | lint/go: $(GO_SRC_FILES)
18 | go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)
19 | golangci-lint run --timeout=10m
20 |
21 | .PHONY: lint/shellcheck
22 | lint/shellcheck: $(SHELL_SRC_FILES)
23 | echo "--- shellcheck"
24 | shellcheck --external-sources $(SHELL_SRC_FILES)
25 |
26 | develop:
27 | ./scripts/develop.sh
28 |
29 | build: scripts/envbuilder-$(GOARCH)
30 | ./scripts/build.sh
31 |
32 | .PHONY: gen
33 | gen: docs/env-variables.md update-golden-files
34 |
35 | .PHONY: update-golden-files
36 | update-golden-files: .gen-golden
37 |
38 | .gen-golden: $(GOLDEN_FILES) $(GO_SRC_FILES) $(GO_TEST_FILES)
39 | go test ./options -update
40 | @touch "$@"
41 |
42 | docs/env-variables.md: options/options.go options/options_test.go
43 | go run ./scripts/docsgen/main.go
44 |
45 | .PHONY: test
46 | test: test-registry
47 | go test -count=1 ./...
48 |
49 | test-race:
50 | go test -race -count=3 ./...
51 |
52 | .PHONY: update-kaniko-fork
53 | update-kaniko-fork:
54 | go mod edit -replace github.com/GoogleContainerTools/kaniko=github.com/coder/kaniko@main
55 | go mod tidy
56 |
57 | # Starts a local Docker registry on port 5000 with a local disk cache.
58 | .PHONY: test-registry
59 | test-registry: test-registry-container test-images-pull test-images-push
60 |
61 | .PHONY: test-registry-container
62 | test-registry-container: .registry-cache
63 | if ! curl -fsSL http://localhost:5000/v2/_catalog > /dev/null 2>&1; then \
64 | docker rm -f envbuilder-registry && \
65 | docker run -d -p 5000:5000 --name envbuilder-registry --volume $(PWD)/.registry-cache:/var/lib/registry registry:2; \
66 | fi
67 |
68 | # Pulls images referenced in integration tests and pushes them to the local cache.
69 | .PHONY: test-images-push
70 | test-images-push: .registry-cache/docker/registry/v2/repositories/envbuilder-test-alpine .registry-cache/docker/registry/v2/repositories/envbuilder-test-ubuntu .registry-cache/docker/registry/v2/repositories/envbuilder-test-codercom-code-server .registry-cache/docker/registry/v2/repositories/envbuilder-test-blob-unknown
71 |
72 | .PHONY: test-images-pull
73 | test-images-pull:
74 | docker pull alpine:latest
75 | docker tag alpine:latest localhost:5000/envbuilder-test-alpine:latest
76 | docker pull ubuntu:latest
77 | docker tag ubuntu:latest localhost:5000/envbuilder-test-ubuntu:latest
78 | docker pull codercom/code-server:latest
79 | docker tag codercom/code-server:latest localhost:5000/envbuilder-test-codercom-code-server:latest
80 | docker build -t localhost:5000/envbuilder-test-blob-unknown:latest -f integration/testdata/blob-unknown/Dockerfile integration/testdata/blob-unknown
81 |
82 | .registry-cache:
83 | mkdir -p .registry-cache && chmod -R ag+w .registry-cache
84 |
85 | .registry-cache/docker/registry/v2/repositories/envbuilder-test-alpine:
86 | docker push localhost:5000/envbuilder-test-alpine:latest
87 |
88 | .registry-cache/docker/registry/v2/repositories/envbuilder-test-ubuntu:
89 | docker push localhost:5000/envbuilder-test-ubuntu:latest
90 |
91 | .registry-cache/docker/registry/v2/repositories/envbuilder-test-codercom-code-server:
92 | docker push localhost:5000/envbuilder-test-codercom-code-server:latest
93 |
94 | .registry-cache/docker/registry/v2/repositories/envbuilder-test-blob-unknown:
95 | docker push localhost:5000/envbuilder-test-blob-unknown:latest
96 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | # Envbuilder
9 |
10 | 
11 |
12 | _(Video created using [asciinema](https://github.com/asciinema/asciinema) and [agg](https://github.com/asciinema/agg))_
13 |
14 | Build development environments from a Dockerfile on Docker, Kubernetes, and OpenShift. Allow developers to modify their environment in a tight feedback loop.
15 |
16 | - Supports [`devcontainer.json`](https://containers.dev/) and `Dockerfile`
17 | - Cache image layers with registries for speedy builds
18 | - Runs on Kubernetes, Docker, and OpenShift
19 |
20 | ## Getting Started
21 |
22 | The easiest way to get started is by running the `envbuilder` Docker container that clones a repository specified by `ENVBUILDER_GIT_URL`, builds the image from a Dockerfile or `devcontainer.json`, and runs the `$ENVBUILDER_INIT_SCRIPT` in the freshly built container.
23 |
24 | > **Tips**:
25 | > - The `/tmp/envbuilder` directory persists demo data between commands. You can choose a different directory if needed.
26 | > - To clone a different branch, you append it to `ENVBUILDER_GIT_URL` in the form `#refs/heads/my-branch`. For example: `https://github.com/coder/envbuilder-starter-devcontainer#refs/heads/boring-prompt`.
27 | ```bash
28 | docker run -it --rm
29 | -v /tmp/envbuilder:/workspaces
30 | -e ENVBUILDER_GIT_URL=https://github.com/coder/envbuilder-starter-devcontainer
31 | -e ENVBUILDER_INIT_SCRIPT=bash
32 | ghcr.io/coder/envbuilder
33 | ```
34 |
35 | Edit `.devcontainer/Dockerfile` to add `htop`:
36 |
37 | ```bash
38 | vim .devcontainer/Dockerfile
39 | ```
40 |
41 | ```diff
42 | - RUN apt-get install vim sudo -y
43 | + RUN apt-get install vim sudo htop -y
44 | ```
45 |
46 | Exit the container and re-run the `docker run` command. After the build completes, `htop` should be available in the container! 🥳
47 |
48 | To explore more examples, tips, and advanced usage, check out the following guides:
49 |
50 | - [Using Local Files](./docs/using-local-files.md)
51 | - [Usage with Coder](./docs/usage-with-coder.md)
52 | - [Container Registry Authentication](./docs/container-registry-auth.md)
53 | - [Git Authentication](./docs/git-auth.md)
54 | - [Caching](./docs/caching.md)
55 | - [Custom Certificates & Proxies](./docs/proxy.md)
56 | - [Users](./docs/users.md)
57 |
58 | ## Setup Script
59 |
60 | The `ENVBUILDER_SETUP_SCRIPT` environment variable dynamically configures the user and init command (PID 1) after the container build process.
61 |
62 | > **Note**: `TARGET_USER` is passed to the setup script to specify who will execute `ENVBUILDER_INIT_COMMAND` (e.g., `code`).
63 |
64 | Write the following to `$ENVBUILDER_ENV` to shape the container's init process:
65 |
66 | - `TARGET_USER`: Identifies the `ENVBUILDER_INIT_COMMAND` executor (e.g., `root`).
67 | - `ENVBUILDER_INIT_COMMAND`: Defines the command executed by `TARGET_USER` (e.g. `/bin/bash`).
68 | - `ENVBUILDER_INIT_ARGS`: Arguments provided to `ENVBUILDER_INIT_COMMAND` (e.g., `-c 'sleep infinity'`).
69 |
70 | ```bash
71 | # init.sh - Change the init if systemd exists
72 | if command -v systemd >/dev/null; then
73 | echo "Hey 👋 $TARGET_USER"
74 | echo ENVBUILDER_INIT_COMMAND=systemd >> $ENVBUILDER_ENV
75 | else
76 | echo ENVBUILDER_INIT_COMMAND=bash >> $ENVBUILDER_ENV
77 | fi
78 |
79 | # Run envbuilder with the setup script
80 | docker run -it --rm
81 | -v ./:/some-dir
82 | -e ENVBUILDER_SETUP_SCRIPT=/some-dir/init.sh
83 | ...
84 | ```
85 |
86 | ## Environment Variables
87 |
88 | You can see all the supported environment variables in [this document](./docs/env-variables.md).
89 |
90 | ### Development Containers
91 |
92 | [This document](./docs/devcontainer-spec-support.md) keeps track of what parts of the Dev Container specification Envbuilder currently supports.
93 |
94 | Feel free to [create a new issue](https://github.com/coder/envbuilder/issues/new) if you'd like Envbuilder to support a particular feature.
95 |
96 | ### Devfile
97 |
98 | > [Devfiles](https://devfile.io/) automate and simplify development by adopting existing devfiles available in the [public community registry](https://registry.devfile.io/viewer).
99 |
100 | Issue: [#113](https://github.com/coder/envbuilder/issues/113)
101 |
102 | ## Contributing
103 |
104 | Building `envbuilder` currently **requires** a Linux system.
105 |
106 | On macOS or Windows systems, we recommend using a VM or the provided `.devcontainer` for development.
107 |
108 | **Additional Requirements:**
109 |
110 | - `go 1.22`
111 | - `make`
112 | - Docker daemon (for running tests)
113 |
114 | **Makefile targets:**
115 |
116 | - `build`: Builds and tags `envbuilder:latest` for your current architecture.
117 | - `develop`: Runs `envbuilder:latest` against a sample Git repository.
118 | - `test`: Runs tests.
119 | - `test-registry`: Stands up a local registry for caching images used in tests.
120 | - `docs/env-variables.md`: Updated the [environment variables documentation](./docs/env-variables.md).
121 |
--------------------------------------------------------------------------------
/buildinfo/version.go:
--------------------------------------------------------------------------------
1 | package buildinfo
2 |
3 | import (
4 | "fmt"
5 | "runtime/debug"
6 | "sync"
7 |
8 | "golang.org/x/mod/semver"
9 | )
10 |
11 | const (
12 | noVersion = "v0.0.0"
13 | develPreRelease = "devel"
14 | )
15 |
16 | var (
17 | buildInfo *debug.BuildInfo
18 | buildInfoValid bool
19 | readBuildInfo sync.Once
20 |
21 | version string
22 | readVersion sync.Once
23 |
24 | // Injected with ldflags at build time
25 | tag string
26 | )
27 |
28 | func revision() (string, bool) {
29 | return find("vcs.revision")
30 | }
31 |
32 | func find(key string) (string, bool) {
33 | readBuildInfo.Do(func() {
34 | buildInfo, buildInfoValid = debug.ReadBuildInfo()
35 | })
36 | if !buildInfoValid {
37 | panic("could not read build info")
38 | }
39 | for _, setting := range buildInfo.Settings {
40 | if setting.Key != key {
41 | continue
42 | }
43 | return setting.Value, true
44 | }
45 | return "", false
46 | }
47 |
48 | // Version returns the semantic version of the build.
49 | // Use golang.org/x/mod/semver to compare versions.
50 | func Version() string {
51 | readVersion.Do(func() {
52 | revision, valid := revision()
53 | if valid {
54 | revision = "+" + revision[:7]
55 | }
56 | if tag == "" {
57 | // This occurs when the tag hasn't been injected,
58 | // like when using "go run".
59 | // -+
60 | version = fmt.Sprintf("%s-%s%s", noVersion, develPreRelease, revision)
61 | return
62 | }
63 | version = "v" + tag
64 | // The tag must be prefixed with "v" otherwise the
65 | // semver library will return an empty string.
66 | if semver.Build(version) == "" {
67 | version += revision
68 | }
69 | })
70 | return version
71 | }
72 |
--------------------------------------------------------------------------------
/cmd/envbuilder/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "net/url"
7 | "os"
8 | "slices"
9 | "strings"
10 |
11 | "github.com/coder/envbuilder/options"
12 |
13 | "github.com/coder/coder/v2/codersdk"
14 | "github.com/coder/envbuilder"
15 | "github.com/coder/envbuilder/log"
16 | "github.com/coder/serpent"
17 |
18 | // *Never* remove this. Certificates are not bundled as part
19 | // of the container, so this is necessary for all connections
20 | // to not be insecure.
21 | _ "github.com/breml/rootcerts"
22 | )
23 |
24 | func main() {
25 | cmd := envbuilderCmd()
26 | err := cmd.Invoke().WithOS().Run()
27 | if err != nil {
28 | _, _ = fmt.Fprintf(os.Stderr, "error: %v", err)
29 | os.Exit(1)
30 | }
31 | }
32 |
33 | func envbuilderCmd() serpent.Command {
34 | var o options.Options
35 | cmd := serpent.Command{
36 | Use: "envbuilder",
37 | Options: o.CLI(),
38 | Handler: func(inv *serpent.Invocation) error {
39 | o.SetDefaults()
40 | var preExecs []func()
41 | preExec := func() {
42 | for _, fn := range preExecs {
43 | fn()
44 | }
45 | preExecs = nil
46 | }
47 | defer preExec() // Ensure cleanup in case of error.
48 |
49 | o.Logger = log.New(os.Stderr, o.Verbose)
50 | if o.CoderAgentURL != "" {
51 | if o.CoderAgentToken == "" {
52 | return errors.New("CODER_AGENT_URL must be set if CODER_AGENT_TOKEN is set")
53 | }
54 | u, err := url.Parse(o.CoderAgentURL)
55 | if err != nil {
56 | return fmt.Errorf("unable to parse CODER_AGENT_URL as URL: %w", err)
57 | }
58 | coderLog, closeLogs, err := log.Coder(inv.Context(), u, o.CoderAgentToken)
59 | if err == nil {
60 | o.Logger = log.Wrap(o.Logger, coderLog)
61 | preExecs = append(preExecs, func() {
62 | closeLogs()
63 | })
64 | // This adds the envbuilder subsystem.
65 | // If telemetry is enabled in a Coder deployment,
66 | // this will be reported and help us understand
67 | // envbuilder usage.
68 | if !slices.Contains(o.CoderAgentSubsystem, string(codersdk.AgentSubsystemEnvbuilder)) {
69 | o.CoderAgentSubsystem = append(o.CoderAgentSubsystem, string(codersdk.AgentSubsystemEnvbuilder))
70 | _ = os.Setenv("CODER_AGENT_SUBSYSTEM", strings.Join(o.CoderAgentSubsystem, ","))
71 | }
72 | } else {
73 | // Failure to log to Coder should cause a fatal error.
74 | o.Logger(log.LevelError, "unable to send logs to Coder: %s", err.Error())
75 | }
76 | }
77 |
78 | if o.GitSSHPrivateKeyPath != "" && o.GitSSHPrivateKeyBase64 != "" {
79 | return errors.New("cannot have both GIT_SSH_PRIVATE_KEY_PATH and GIT_SSH_PRIVATE_KEY_BASE64 set")
80 | }
81 |
82 | if o.GetCachedImage {
83 | img, err := envbuilder.RunCacheProbe(inv.Context(), o)
84 | if err != nil {
85 | o.Logger(log.LevelError, "error: %s", err)
86 | return err
87 | }
88 | digest, err := img.Digest()
89 | if err != nil {
90 | return fmt.Errorf("get cached image digest: %w", err)
91 | }
92 | _, _ = fmt.Fprintf(inv.Stdout, "ENVBUILDER_CACHED_IMAGE=%s@%s\n", o.CacheRepo, digest.String())
93 | return nil
94 | }
95 |
96 | err := envbuilder.Run(inv.Context(), o, preExec)
97 | if err != nil {
98 | o.Logger(log.LevelError, "error: %s", err)
99 | }
100 | return err
101 | },
102 | }
103 | return cmd
104 | }
105 |
--------------------------------------------------------------------------------
/devcontainer/devcontainer_test.go:
--------------------------------------------------------------------------------
1 | package devcontainer_test
2 |
3 | import (
4 | "crypto/md5"
5 | "fmt"
6 | "io"
7 | "net/url"
8 | "os"
9 | "path/filepath"
10 | "strings"
11 | "testing"
12 |
13 | "github.com/coder/envbuilder/devcontainer"
14 | "github.com/coder/envbuilder/devcontainer/features"
15 | "github.com/coder/envbuilder/testutil/registrytest"
16 | "github.com/go-git/go-billy/v5/memfs"
17 | "github.com/google/go-containerregistry/pkg/name"
18 | v1 "github.com/google/go-containerregistry/pkg/v1"
19 | "github.com/google/go-containerregistry/pkg/v1/partial"
20 | "github.com/google/go-containerregistry/pkg/v1/remote"
21 | "github.com/google/go-containerregistry/pkg/v1/types"
22 | "github.com/stretchr/testify/require"
23 | )
24 |
25 | const workingDir = "/.envbuilder"
26 |
27 | func stubLookupEnv(string) (string, bool) {
28 | return "", false
29 | }
30 |
31 | func TestParse(t *testing.T) {
32 | t.Parallel()
33 | raw := `{
34 | "build": {
35 | "dockerfile": "Dockerfile",
36 | "context": ".",
37 | },
38 | // Comments here!
39 | "image": "codercom/code-server:latest"
40 | }`
41 | parsed, err := devcontainer.Parse([]byte(raw))
42 | require.NoError(t, err)
43 | require.Equal(t, "Dockerfile", parsed.Build.Dockerfile)
44 | }
45 |
46 | func TestCompileWithFeatures(t *testing.T) {
47 | t.Parallel()
48 | registry := registrytest.New(t)
49 | featureOne := registrytest.WriteContainer(t, registry, "coder/one:tomato", features.TarLayerMediaType, map[string]any{
50 | "install.sh": "hey",
51 | "devcontainer-feature.json": features.Spec{
52 | ID: "rust",
53 | Version: "tomato",
54 | Name: "Rust",
55 | Description: "Example description!",
56 | ContainerEnv: map[string]string{
57 | "TOMATO": "example",
58 | },
59 | },
60 | })
61 | featureTwo := registrytest.WriteContainer(t, registry, "coder/two:potato", features.TarLayerMediaType, map[string]any{
62 | "install.sh": "hey",
63 | "devcontainer-feature.json": features.Spec{
64 | ID: "go",
65 | Version: "potato",
66 | Name: "Go",
67 | Description: "Example description!",
68 | ContainerEnv: map[string]string{
69 | "POTATO": "example",
70 | },
71 | Options: map[string]features.Option{
72 | "version": {
73 | Type: "string",
74 | },
75 | },
76 | },
77 | })
78 |
79 | raw := `{
80 | "build": {
81 | "dockerfile": "Dockerfile",
82 | "context": ".",
83 | },
84 | // Comments here!
85 | "image": "localhost:5000/envbuilder-test-codercom-code-server:latest",
86 | "features": {
87 | "` + featureOne + `": {},
88 | "` + featureTwo + `": "potato"
89 | }
90 | }`
91 | dc, err := devcontainer.Parse([]byte(raw))
92 | require.NoError(t, err)
93 | fs := memfs.New()
94 |
95 | featureOneMD5 := md5.Sum([]byte(featureOne))
96 | featureOneDir := fmt.Sprintf("/.envbuilder/features/one-%x", featureOneMD5[:4])
97 | featureTwoMD5 := md5.Sum([]byte(featureTwo))
98 | featureTwoDir := fmt.Sprintf("/.envbuilder/features/two-%x", featureTwoMD5[:4])
99 |
100 | t.Run("WithoutBuildContexts", func(t *testing.T) {
101 | params, err := dc.Compile(fs, "", workingDir, "", "", false, stubLookupEnv)
102 | require.NoError(t, err)
103 |
104 | require.Equal(t, `FROM localhost:5000/envbuilder-test-codercom-code-server:latest
105 |
106 | USER root
107 | # Rust tomato - Example description!
108 | WORKDIR `+featureOneDir+`
109 | ENV TOMATO=example
110 | RUN _CONTAINER_USER="1000" _REMOTE_USER="1000" ./install.sh
111 | # Go potato - Example description!
112 | WORKDIR `+featureTwoDir+`
113 | ENV POTATO=example
114 | RUN VERSION="potato" _CONTAINER_USER="1000" _REMOTE_USER="1000" ./install.sh
115 | USER 1000`, params.DockerfileContent)
116 | })
117 |
118 | t.Run("WithBuildContexts", func(t *testing.T) {
119 | params, err := dc.Compile(fs, "", workingDir, "", "", true, stubLookupEnv)
120 | require.NoError(t, err)
121 |
122 | registryHost := strings.TrimPrefix(registry, "http://")
123 |
124 | require.Equal(t, `FROM scratch AS envbuilder_feature_one
125 | COPY --from=`+registryHost+`/coder/one / /
126 |
127 | FROM scratch AS envbuilder_feature_two
128 | COPY --from=`+registryHost+`/coder/two / /
129 |
130 | FROM localhost:5000/envbuilder-test-codercom-code-server:latest
131 |
132 | USER root
133 | # Rust tomato - Example description!
134 | WORKDIR /.envbuilder/features/one
135 | ENV TOMATO=example
136 | RUN --mount=type=bind,from=envbuilder_feature_one,target=/.envbuilder/features/one,rw _CONTAINER_USER="1000" _REMOTE_USER="1000" ./install.sh
137 | # Go potato - Example description!
138 | WORKDIR /.envbuilder/features/two
139 | ENV POTATO=example
140 | RUN --mount=type=bind,from=envbuilder_feature_two,target=/.envbuilder/features/two,rw VERSION="potato" _CONTAINER_USER="1000" _REMOTE_USER="1000" ./install.sh
141 | USER 1000`, params.DockerfileContent)
142 |
143 | require.Equal(t, map[string]string{
144 | registryHost + "/coder/one": featureOneDir,
145 | registryHost + "/coder/two": featureTwoDir,
146 | }, params.FeatureContexts)
147 | })
148 | }
149 |
150 | func TestCompileDevContainer(t *testing.T) {
151 | t.Parallel()
152 | t.Run("WithImage", func(t *testing.T) {
153 | t.Parallel()
154 | fs := memfs.New()
155 | dc := &devcontainer.Spec{
156 | Image: "localhost:5000/envbuilder-test-ubuntu:latest",
157 | }
158 | params, err := dc.Compile(fs, "", workingDir, "", "", false, stubLookupEnv)
159 | require.NoError(t, err)
160 | require.Equal(t, filepath.Join(workingDir, "Dockerfile"), params.DockerfilePath)
161 | require.Equal(t, workingDir, params.BuildContext)
162 | })
163 | t.Run("WithBuild", func(t *testing.T) {
164 | t.Parallel()
165 | fs := memfs.New()
166 | dc := &devcontainer.Spec{
167 | Build: devcontainer.BuildSpec{
168 | Dockerfile: "Dockerfile",
169 | Context: ".",
170 | Args: map[string]string{
171 | "ARG1": "value1",
172 | "ARG2": "${localWorkspaceFolderBasename}",
173 | },
174 | },
175 | }
176 | dcDir := "/workspaces/coder/.devcontainer"
177 | err := fs.MkdirAll(dcDir, 0o755)
178 | require.NoError(t, err)
179 | file, err := fs.OpenFile(filepath.Join(dcDir, "Dockerfile"), os.O_CREATE|os.O_WRONLY, 0o644)
180 | require.NoError(t, err)
181 | _, err = io.WriteString(file, "FROM localhost:5000/envbuilder-test-ubuntu:latest")
182 | require.NoError(t, err)
183 | _ = file.Close()
184 | params, err := dc.Compile(fs, dcDir, workingDir, "", "/var/workspace", false, stubLookupEnv)
185 | require.NoError(t, err)
186 | require.Equal(t, "ARG1=value1", params.BuildArgs[0])
187 | require.Equal(t, "ARG2=workspace", params.BuildArgs[1])
188 | require.Equal(t, filepath.Join(dcDir, "Dockerfile"), params.DockerfilePath)
189 | require.Equal(t, dcDir, params.BuildContext)
190 | })
191 | }
192 |
193 | func TestImageFromDockerfile(t *testing.T) {
194 | t.Parallel()
195 | for _, tc := range []struct {
196 | content string
197 | image string
198 | }{{
199 | content: "FROM ubuntu",
200 | image: "index.docker.io/library/ubuntu:latest",
201 | }, {
202 | content: "ARG VARIANT=bionic\nFROM ubuntu:$VARIANT",
203 | image: "index.docker.io/library/ubuntu:bionic",
204 | }, {
205 | content: "ARG VARIANT=\"3.10\"\nFROM mcr.microsoft.com/devcontainers/python:0-${VARIANT}",
206 | image: "mcr.microsoft.com/devcontainers/python:0-3.10",
207 | }, {
208 | content: "ARG VARIANT=\"3.10\"\nFROM mcr.microsoft.com/devcontainers/python:0-$VARIANT ",
209 | image: "mcr.microsoft.com/devcontainers/python:0-3.10",
210 | }} {
211 | tc := tc
212 | t.Run(tc.image, func(t *testing.T) {
213 | t.Parallel()
214 | ref, err := devcontainer.ImageFromDockerfile(tc.content)
215 | require.NoError(t, err)
216 | require.Equal(t, tc.image, ref.Name())
217 | })
218 | }
219 | }
220 |
221 | func TestUserFrom(t *testing.T) {
222 | t.Parallel()
223 |
224 | t.Run("Image", func(t *testing.T) {
225 | t.Parallel()
226 | registry := registrytest.New(t)
227 | image, err := partial.UncompressedToImage(emptyImage{configFile: &v1.ConfigFile{
228 | Config: v1.Config{
229 | User: "example",
230 | },
231 | }})
232 | require.NoError(t, err)
233 |
234 | parsed, err := url.Parse("http://" + registry)
235 | require.NoError(t, err)
236 | parsed.Path = "coder/test:latest"
237 | ref, err := name.ParseReference(strings.TrimPrefix(parsed.String(), "http://"))
238 | require.NoError(t, err)
239 | err = remote.Write(ref, image)
240 | require.NoError(t, err)
241 |
242 | user, err := devcontainer.UserFromImage(ref)
243 | require.NoError(t, err)
244 | require.Equal(t, "example", user)
245 | })
246 |
247 | t.Run("Dockerfile", func(t *testing.T) {
248 | t.Parallel()
249 | tests := []struct {
250 | name string
251 | content string
252 | user string
253 | }{
254 | {
255 | name: "Empty",
256 | content: "FROM scratch",
257 | user: "",
258 | },
259 | {
260 | name: "User",
261 | content: "FROM scratch\nUSER kyle",
262 | user: "kyle",
263 | },
264 | {
265 | name: "Env with default",
266 | content: "FROM scratch\nENV MYUSER=maf\nUSER ${MYUSER}",
267 | user: "${MYUSER}", // This should be "maf" but the current implementation doesn't support this.
268 | },
269 | {
270 | name: "Env var with default",
271 | content: "FROM scratch\nUSER ${MYUSER:-maf}",
272 | user: "${MYUSER:-maf}", // This should be "maf" but the current implementation doesn't support this.
273 | },
274 | {
275 | name: "Arg",
276 | content: "FROM scratch\nARG MYUSER\nUSER ${MYUSER}",
277 | user: "${MYUSER}", // This should be "" or populated but the current implementation doesn't support this.
278 | },
279 | {
280 | name: "Arg with default",
281 | content: "FROM scratch\nARG MYUSER=maf\nUSER ${MYUSER}",
282 | user: "${MYUSER}", // This should be "maf" but the current implementation doesn't support this.
283 | },
284 | }
285 | for _, tt := range tests {
286 | t.Run(tt.name, func(t *testing.T) {
287 | t.Parallel()
288 | user, err := devcontainer.UserFromDockerfile(tt.content)
289 | require.NoError(t, err)
290 | require.Equal(t, tt.user, user)
291 | })
292 | }
293 | })
294 |
295 | t.Run("Multi-stage", func(t *testing.T) {
296 | t.Parallel()
297 |
298 | registry := registrytest.New(t)
299 | for tag, user := range map[string]string{
300 | "one": "maf",
301 | "two": "fam",
302 | } {
303 | image, err := partial.UncompressedToImage(emptyImage{configFile: &v1.ConfigFile{
304 | Config: v1.Config{
305 | User: user,
306 | },
307 | }})
308 | require.NoError(t, err)
309 | parsed, err := url.Parse("http://" + registry)
310 | require.NoError(t, err)
311 | parsed.Path = "coder/test:" + tag
312 | ref, err := name.ParseReference(strings.TrimPrefix(parsed.String(), "http://"))
313 | fmt.Println(ref)
314 | require.NoError(t, err)
315 | err = remote.Write(ref, image)
316 | require.NoError(t, err)
317 | }
318 |
319 | tests := []struct {
320 | name string
321 | images map[string]string
322 | content string
323 | user string
324 | }{
325 | {
326 | name: "Single",
327 | content: "FROM coder/test:one",
328 | user: "maf",
329 | },
330 | {
331 | name: "Multi",
332 | content: "FROM ubuntu AS u\nFROM coder/test:two",
333 | user: "fam",
334 | },
335 | {
336 | name: "Multi-2",
337 | content: "FROM coder/test:two AS two\nUSER maffam\nFROM coder/test:one AS one",
338 | user: "maf",
339 | },
340 | {
341 | name: "Multi-3",
342 | content: "FROM coder/test:two AS two\nFROM coder/test:one AS one\nUSER fammaf",
343 | user: "fammaf",
344 | },
345 | {
346 | name: "Multi-4",
347 | content: `FROM ubuntu AS a
348 | USER root
349 | RUN useradd --create-home pickme
350 | USER pickme
351 | FROM a AS other
352 | USER root
353 | RUN useradd --create-home notme
354 | USER notme
355 | FROM a`,
356 | user: "pickme",
357 | },
358 | }
359 | for _, tt := range tests {
360 | t.Run(tt.name, func(t *testing.T) {
361 | t.Parallel()
362 |
363 | content := strings.ReplaceAll(tt.content, "coder/test", strings.TrimPrefix(registry, "http://")+"/coder/test")
364 |
365 | user, err := devcontainer.UserFromDockerfile(content)
366 | require.NoError(t, err)
367 | require.Equal(t, tt.user, user)
368 | })
369 | }
370 | })
371 | }
372 |
373 | type emptyImage struct {
374 | configFile *v1.ConfigFile
375 | }
376 |
377 | func (i emptyImage) MediaType() (types.MediaType, error) {
378 | return types.DockerManifestSchema2, nil
379 | }
380 |
381 | func (i emptyImage) RawConfigFile() ([]byte, error) {
382 | return partial.RawConfigFile(i)
383 | }
384 |
385 | func (i emptyImage) ConfigFile() (*v1.ConfigFile, error) {
386 | return i.configFile, nil
387 | }
388 |
389 | func (i emptyImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) {
390 | return nil, fmt.Errorf("LayerByDiffID(%s): empty image", h)
391 | }
392 |
--------------------------------------------------------------------------------
/devcontainer/features/features.go:
--------------------------------------------------------------------------------
1 | package features
2 |
3 | import (
4 | "archive/tar"
5 | "encoding/json"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "os"
10 | "path/filepath"
11 | "regexp"
12 | "sort"
13 | "strconv"
14 | "strings"
15 |
16 | "github.com/go-git/go-billy/v5"
17 | "github.com/google/go-containerregistry/pkg/name"
18 | "github.com/google/go-containerregistry/pkg/v1/remote"
19 | "github.com/otiai10/copy"
20 | "github.com/tailscale/hujson"
21 | )
22 |
23 | func extractFromImage(fs billy.Filesystem, directory, reference string) error {
24 | ref, err := name.ParseReference(reference)
25 | if err != nil {
26 | return fmt.Errorf("parse feature ref %s: %w", reference, err)
27 | }
28 | image, err := remote.Image(ref)
29 | if err != nil {
30 | return fmt.Errorf("fetch feature image %s: %w", reference, err)
31 | }
32 | manifest, err := image.Manifest()
33 | if err != nil {
34 | return fmt.Errorf("fetch feature manifest %s: %w", reference, err)
35 | }
36 |
37 | var tarLayer *tar.Reader
38 | for _, manifestLayer := range manifest.Layers {
39 | if manifestLayer.MediaType != TarLayerMediaType {
40 | continue
41 | }
42 | layer, err := image.LayerByDigest(manifestLayer.Digest)
43 | if err != nil {
44 | return fmt.Errorf("fetch feature layer %s: %w", reference, err)
45 | }
46 | layerReader, err := layer.Uncompressed()
47 | if err != nil {
48 | return fmt.Errorf("uncompress feature layer %s: %w", reference, err)
49 | }
50 | tarLayer = tar.NewReader(layerReader)
51 | break
52 | }
53 | if tarLayer == nil {
54 | return fmt.Errorf("no tar layer found with media type %q: are you sure this is a devcontainer feature?", TarLayerMediaType)
55 | }
56 |
57 | for {
58 | header, err := tarLayer.Next()
59 | if err == io.EOF {
60 | break
61 | }
62 | if err != nil {
63 | return fmt.Errorf("read feature layer %s: %w", reference, err)
64 | }
65 | path := filepath.Join(directory, header.Name)
66 | switch header.Typeflag {
67 | case tar.TypeDir:
68 | err = fs.MkdirAll(path, 0o755)
69 | if err != nil {
70 | return fmt.Errorf("mkdir %s: %w", path, err)
71 | }
72 | case tar.TypeReg:
73 | outFile, err := fs.Create(path)
74 | if err != nil {
75 | return fmt.Errorf("create %s: %w", path, err)
76 | }
77 | _, err = io.Copy(outFile, tarLayer)
78 | if err != nil {
79 | return fmt.Errorf("copy %s: %w", path, err)
80 | }
81 | err = outFile.Close()
82 | if err != nil {
83 | return fmt.Errorf("close %s: %w", path, err)
84 | }
85 | default:
86 | return fmt.Errorf("unknown type %d in %s", header.Typeflag, path)
87 | }
88 | }
89 | return nil
90 | }
91 |
92 | // Extract unpacks the feature from the image and returns the
93 | // parsed specification.
94 | func Extract(fs billy.Filesystem, devcontainerDir, directory, reference string) (*Spec, error) {
95 | if strings.HasPrefix(reference, "./") {
96 | if err := copy.Copy(filepath.Join(devcontainerDir, reference), directory, copy.Options{
97 | PreserveTimes: true,
98 | PreserveOwner: true,
99 | OnSymlink: func(src string) copy.SymlinkAction {
100 | return copy.Shallow
101 | },
102 | OnError: func(src, dest string, err error) error {
103 | if err == nil {
104 | return nil
105 | }
106 | return fmt.Errorf("copy error: %q -> %q: %w", reference, directory, err)
107 | },
108 | }); err != nil {
109 | return nil, err
110 | }
111 | } else if err := extractFromImage(fs, directory, reference); err != nil {
112 | return nil, err
113 | }
114 |
115 | installScriptPath := filepath.Join(directory, "install.sh")
116 | _, err := fs.Stat(installScriptPath)
117 | if err != nil {
118 | if errors.Is(err, os.ErrNotExist) {
119 | return nil, errors.New("install.sh must be in the root of the feature")
120 | }
121 | return nil, fmt.Errorf("stat install.sh: %w", err)
122 | }
123 | chmodder, ok := fs.(interface {
124 | Chmod(name string, mode os.FileMode) error
125 | })
126 | if ok {
127 | // For some reason the filesystem abstraction doesn't support chmod.
128 | // https://github.com/src-d/go-billy/issues/56
129 | err = chmodder.Chmod(installScriptPath, 0o755)
130 | }
131 | if err != nil {
132 | return nil, fmt.Errorf("chmod install.sh: %w", err)
133 | }
134 | featureFile, err := fs.Open(filepath.Join(directory, "devcontainer-feature.json"))
135 | if err != nil {
136 | if errors.Is(err, os.ErrNotExist) {
137 | return nil, errors.New("devcontainer-feature.json must be in the root of the feature")
138 | }
139 | return nil, fmt.Errorf("open devcontainer-feature.json: %w", err)
140 | }
141 | defer featureFile.Close()
142 | featureFileBytes, err := io.ReadAll(featureFile)
143 | if err != nil {
144 | return nil, fmt.Errorf("read devcontainer-feature.json: %w", err)
145 | }
146 | standardizedFeatureFileBytes, err := hujson.Standardize(featureFileBytes)
147 | if err != nil {
148 | return nil, fmt.Errorf("standardize devcontainer-feature.json: %w", err)
149 | }
150 | var spec *Spec
151 | if err := json.Unmarshal(standardizedFeatureFileBytes, &spec); err != nil {
152 | return nil, fmt.Errorf("decode devcontainer-feature.json: %w", err)
153 | }
154 | // See https://containers.dev/implementors/features/#devcontainer-feature-json-properties
155 | if spec.ID == "" {
156 | return nil, errors.New(`devcontainer-feature.json: id is required`)
157 | }
158 | if spec.Version == "" {
159 | return nil, errors.New(`devcontainer-feature.json: version is required`)
160 | }
161 | if spec.Name == "" {
162 | return nil, errors.New(`devcontainer-feature.json: name is required`)
163 | }
164 |
165 | return spec, nil
166 | }
167 |
168 | const (
169 | TarLayerMediaType = "application/vnd.devcontainers.layer.v1+tar"
170 | )
171 |
172 | type Option struct {
173 | Type string `json:"type"` // "boolean" or "string"
174 | Proposals []string `json:"proposals"`
175 | Enum []string `json:"enum"`
176 | Default any `json:"default"` // boolean or string
177 | Description string `json:"description"`
178 | }
179 |
180 | type Spec struct {
181 | ID string `json:"id"`
182 | Version string `json:"version"`
183 | Name string `json:"name"`
184 | Description string `json:"description"`
185 | DocumentationURL string `json:"documentationURL"`
186 | LicenseURL string `json:"licenseURL"`
187 | Keywords []string `json:"keywords"`
188 | Options map[string]Option `json:"options"`
189 | ContainerEnv map[string]string `json:"containerEnv"`
190 | }
191 |
192 | // Extract unpacks the feature from the image and returns a set of lines
193 | // that should be appended to a Dockerfile to install the feature.
194 | func (s *Spec) Compile(featureRef, featureName, featureDir, containerUser, remoteUser string, useBuildContexts bool, options map[string]any) (string, string, error) {
195 | // TODO not sure how we figure out _(REMOTE|CONTAINER)_USER_HOME
196 | // as per the feature spec.
197 | // See https://containers.dev/implementors/features/#user-env-var
198 | var fromDirective string
199 | runDirective := []string{
200 | "_CONTAINER_USER=" + strconv.Quote(containerUser),
201 | "_REMOTE_USER=" + strconv.Quote(remoteUser),
202 | }
203 | for key, value := range s.Options {
204 | strValue := fmt.Sprint(value.Default)
205 | provided, ok := options[key]
206 | if ok {
207 | strValue = fmt.Sprint(provided)
208 | // delete so we can check if there are any unknown options
209 | delete(options, key)
210 | }
211 | runDirective = append(runDirective, fmt.Sprintf(`%s=%q`, convertOptionNameToEnv(key), strValue))
212 | }
213 | if len(options) > 0 {
214 | return "", "", fmt.Errorf("unknown option: %v", options)
215 | }
216 | // It's critical that the Dockerfile produced is deterministic,
217 | // regardless of map iteration order.
218 | sort.Strings(runDirective)
219 | // See https://containers.dev/implementors/features/#invoking-installsh
220 | if useBuildContexts {
221 | // Use a deterministic target directory to make the resulting Dockerfile cacheable
222 | featureDir = "/.envbuilder/features/" + featureName
223 | fromDirective = "FROM scratch AS envbuilder_feature_" + featureName + "\nCOPY --from=" + featureRef + " / /\n"
224 | runDirective = append([]string{"RUN", "--mount=type=bind,from=envbuilder_feature_" + featureName + ",target=" + featureDir + ",rw"}, runDirective...)
225 | } else {
226 | runDirective = append([]string{"RUN"}, runDirective...)
227 | }
228 | runDirective = append(runDirective, "./install.sh")
229 |
230 | comment := ""
231 | if s.Name != "" {
232 | comment += "# " + s.Name
233 | }
234 | if s.Version != "" {
235 | comment += " " + s.Version
236 | }
237 | if s.Description != "" {
238 | comment += " - " + s.Description
239 | }
240 | lines := []string{}
241 | if comment != "" {
242 | lines = append(lines, comment)
243 | }
244 | lines = append(lines, "WORKDIR "+featureDir)
245 | envKeys := make([]string, 0, len(s.ContainerEnv))
246 | for key := range s.ContainerEnv {
247 | envKeys = append(envKeys, key)
248 | }
249 | // It's critical that the Dockerfile produced is deterministic,
250 | // regardless of map iteration order.
251 | sort.Strings(envKeys)
252 | for _, key := range envKeys {
253 | lines = append(lines, fmt.Sprintf("ENV %s=%s", key, s.ContainerEnv[key]))
254 | }
255 | lines = append(lines, strings.Join(runDirective, " "))
256 |
257 | return fromDirective, strings.Join(lines, "\n"), nil
258 | }
259 |
260 | var (
261 | matchNonWords = regexp.MustCompile(`/[^\w_]/g`)
262 | matchPrefixDigitsAndUnderscores = regexp.MustCompile(`/^[\d_]+/g`)
263 | )
264 |
265 | // See https://containers.dev/implementors/features/#option-resolution
266 | func convertOptionNameToEnv(optionName string) string {
267 | optionName = matchNonWords.ReplaceAllString(optionName, "_")
268 | optionName = matchPrefixDigitsAndUnderscores.ReplaceAllString(optionName, "")
269 | return strings.ToUpper(optionName)
270 | }
271 |
--------------------------------------------------------------------------------
/devcontainer/features/features_test.go:
--------------------------------------------------------------------------------
1 | package features_test
2 |
3 | import (
4 | "strings"
5 | "testing"
6 |
7 | "github.com/coder/envbuilder/devcontainer/features"
8 | "github.com/coder/envbuilder/testutil/registrytest"
9 | "github.com/go-git/go-billy/v5/memfs"
10 | "github.com/stretchr/testify/require"
11 | )
12 |
13 | func TestExtract(t *testing.T) {
14 | t.Parallel()
15 | t.Run("MissingMediaType", func(t *testing.T) {
16 | t.Parallel()
17 | registry := registrytest.New(t)
18 | ref := registrytest.WriteContainer(t, registry, "coder/test:latest", "some/type", nil)
19 | fs := memfs.New()
20 | _, err := features.Extract(fs, "", "/", ref)
21 | require.ErrorContains(t, err, "no tar layer found")
22 | })
23 | t.Run("MissingInstallScript", func(t *testing.T) {
24 | t.Parallel()
25 | registry := registrytest.New(t)
26 | ref := registrytest.WriteContainer(t, registry, "coder/test:latest", features.TarLayerMediaType, map[string]any{
27 | "devcontainer-feature.json": "{}",
28 | })
29 | fs := memfs.New()
30 | _, err := features.Extract(fs, "", "/", ref)
31 | require.ErrorContains(t, err, "install.sh")
32 | })
33 | t.Run("MissingFeatureFile", func(t *testing.T) {
34 | t.Parallel()
35 | registry := registrytest.New(t)
36 | ref := registrytest.WriteContainer(t, registry, "coder/test:latest", features.TarLayerMediaType, map[string]any{
37 | "install.sh": "hey",
38 | })
39 | fs := memfs.New()
40 | _, err := features.Extract(fs, "", "/", ref)
41 | require.ErrorContains(t, err, "devcontainer-feature.json")
42 | })
43 | t.Run("MissingFeatureProperties", func(t *testing.T) {
44 | t.Parallel()
45 | registry := registrytest.New(t)
46 | ref := registrytest.WriteContainer(t, registry, "coder/test:latest", features.TarLayerMediaType, map[string]any{
47 | "install.sh": "hey",
48 | "devcontainer-feature.json": features.Spec{},
49 | })
50 | fs := memfs.New()
51 | _, err := features.Extract(fs, "", "/", ref)
52 | require.ErrorContains(t, err, "id is required")
53 | })
54 | t.Run("Success", func(t *testing.T) {
55 | t.Parallel()
56 | registry := registrytest.New(t)
57 | ref := registrytest.WriteContainer(t, registry, "coder/test:latest", features.TarLayerMediaType, map[string]any{
58 | "install.sh": "hey",
59 | "devcontainer-feature.json": features.Spec{
60 | ID: "go",
61 | Version: "1.0.0",
62 | Name: "Go",
63 | },
64 | })
65 | fs := memfs.New()
66 | _, err := features.Extract(fs, "", "/", ref)
67 | require.NoError(t, err)
68 | })
69 | }
70 |
71 | func TestCompile(t *testing.T) {
72 | t.Parallel()
73 | t.Run("UnknownOption", func(t *testing.T) {
74 | t.Parallel()
75 | spec := &features.Spec{}
76 | _, _, err := spec.Compile("coder/test:latest", "test", "", "containerUser", "remoteUser", false, map[string]any{
77 | "unknown": "value",
78 | })
79 | require.ErrorContains(t, err, "unknown option")
80 | })
81 | t.Run("Basic", func(t *testing.T) {
82 | t.Parallel()
83 | spec := &features.Spec{}
84 | _, directive, err := spec.Compile("coder/test:latest", "test", "/", "containerUser", "remoteUser", false, nil)
85 | require.NoError(t, err)
86 | require.Equal(t, "WORKDIR /\nRUN _CONTAINER_USER=\"containerUser\" _REMOTE_USER=\"remoteUser\" ./install.sh", strings.TrimSpace(directive))
87 | })
88 | t.Run("ContainerEnv", func(t *testing.T) {
89 | t.Parallel()
90 | spec := &features.Spec{
91 | ContainerEnv: map[string]string{
92 | "FOO": "bar",
93 | },
94 | }
95 | _, directive, err := spec.Compile("coder/test:latest", "test", "/", "containerUser", "remoteUser", false, nil)
96 | require.NoError(t, err)
97 | require.Equal(t, "WORKDIR /\nENV FOO=bar\nRUN _CONTAINER_USER=\"containerUser\" _REMOTE_USER=\"remoteUser\" ./install.sh", strings.TrimSpace(directive))
98 | })
99 | t.Run("OptionsEnv", func(t *testing.T) {
100 | t.Parallel()
101 | spec := &features.Spec{
102 | Options: map[string]features.Option{
103 | "foo": {
104 | Default: "bar",
105 | },
106 | },
107 | }
108 | _, directive, err := spec.Compile("coder/test:latest", "test", "/", "containerUser", "remoteUser", false, nil)
109 | require.NoError(t, err)
110 | require.Equal(t, "WORKDIR /\nRUN FOO=\"bar\" _CONTAINER_USER=\"containerUser\" _REMOTE_USER=\"remoteUser\" ./install.sh", strings.TrimSpace(directive))
111 | })
112 | t.Run("BuildContext", func(t *testing.T) {
113 | t.Parallel()
114 | spec := &features.Spec{}
115 | fromDirective, runDirective, err := spec.Compile("coder/test:latest", "test", "/.envbuilder/feature/test-d8e8fc", "containerUser", "remoteUser", true, nil)
116 | require.NoError(t, err)
117 | require.Equal(t, "FROM scratch AS envbuilder_feature_test\nCOPY --from=coder/test:latest / /", strings.TrimSpace(fromDirective))
118 | require.Equal(t, "WORKDIR /.envbuilder/features/test\nRUN --mount=type=bind,from=envbuilder_feature_test,target=/.envbuilder/features/test,rw _CONTAINER_USER=\"containerUser\" _REMOTE_USER=\"remoteUser\" ./install.sh", strings.TrimSpace(runDirective))
119 | })
120 | }
121 |
--------------------------------------------------------------------------------
/devcontainer/script.go:
--------------------------------------------------------------------------------
1 | package devcontainer
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "errors"
7 | "fmt"
8 | "os"
9 | "os/exec"
10 | "strings"
11 | "syscall"
12 |
13 | "golang.org/x/sync/errgroup"
14 | )
15 |
16 | type LifecycleScript struct {
17 | shellCommands map[string]string
18 | nonShellCommands map[string][]string
19 | }
20 |
21 | func (s *LifecycleScript) IsEmpty() bool {
22 | return len(s.shellCommands) == 0 && len(s.nonShellCommands) == 0
23 | }
24 |
25 | func (s *LifecycleScript) UnmarshalJSON(data []byte) error {
26 | var v any
27 | if err := json.Unmarshal(data, &v); err != nil {
28 | return err
29 | }
30 |
31 | switch v := v.(type) {
32 | case string:
33 | s.shellCommands = map[string]string{
34 | v: v,
35 | }
36 | case []any:
37 | args, err := argsFromUntypedSlice(v)
38 | if err != nil {
39 | return err
40 | }
41 | desc := strings.Join(args, " ")
42 | s.nonShellCommands = map[string][]string{
43 | desc: args,
44 | }
45 | case map[string]any:
46 | for desc, command := range v {
47 | switch command := command.(type) {
48 | case string:
49 | if s.shellCommands == nil {
50 | s.shellCommands = make(map[string]string, 1)
51 | }
52 | s.shellCommands[desc] = command
53 | case []any:
54 | args, err := argsFromUntypedSlice(command)
55 | if err != nil {
56 | return err
57 | }
58 | if s.nonShellCommands == nil {
59 | s.nonShellCommands = make(map[string][]string, 1)
60 | }
61 | s.nonShellCommands[desc] = args
62 | }
63 | }
64 | }
65 | return nil
66 | }
67 |
68 | func argsFromUntypedSlice(args []any) ([]string, error) {
69 | if len(args) == 0 {
70 | return nil, errors.New("empty command array")
71 | }
72 | s := make([]string, 0, len(args))
73 | for _, arg := range args {
74 | arg, ok := arg.(string)
75 | if !ok {
76 | return nil, fmt.Errorf("invalid command arg with non-string type: %v", arg)
77 | }
78 | s = append(s, arg)
79 | }
80 | return s, nil
81 | }
82 |
83 | func (s *LifecycleScript) Execute(ctx context.Context, uid, gid int) error {
84 | procAttr := &syscall.ProcAttr{
85 | Env: os.Environ(),
86 | Files: []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()},
87 | Sys: &syscall.SysProcAttr{
88 | Credential: &syscall.Credential{
89 | Uid: uint32(uid),
90 | Gid: uint32(gid),
91 | },
92 | },
93 | }
94 |
95 | var eg errgroup.Group
96 | for desc, command := range s.shellCommands {
97 | desc := desc
98 | command := command
99 | eg.Go(func() error {
100 | pid, err := syscall.ForkExec("/bin/sh", []string{"/bin/sh", "-c", command}, procAttr)
101 | if err != nil {
102 | return fmt.Errorf("lifecycle command %q failed: %v", desc, err)
103 | }
104 | return waitForCommand(desc, pid)
105 | })
106 | }
107 |
108 | for desc, commandAndArgs := range s.nonShellCommands {
109 | desc := desc
110 | commandAndArgs := commandAndArgs
111 | eg.Go(func() error {
112 | path, err := exec.LookPath(commandAndArgs[0])
113 | if err != nil {
114 | return err
115 | }
116 | pid, err := syscall.ForkExec(path, commandAndArgs, procAttr)
117 | if err != nil {
118 | return fmt.Errorf("failed to exec lifecycle command %q: %v", desc, err)
119 | }
120 | return waitForCommand(desc, pid)
121 | })
122 | }
123 |
124 | return eg.Wait()
125 | }
126 |
127 | func waitForCommand(desc string, pid int) error {
128 | process, err := os.FindProcess(pid)
129 | if err != nil {
130 | return fmt.Errorf("failed to look up process for lifecycle command %q: %v", desc, err)
131 | }
132 | status, err := process.Wait()
133 | if err != nil {
134 | return fmt.Errorf("failed to wait for lifecycle command %q: %v", desc, err)
135 | }
136 | if exitCode := status.ExitCode(); exitCode != 0 {
137 | return fmt.Errorf("lifecycle command %q failed with status %d", desc, exitCode)
138 | }
139 | return nil
140 | }
141 |
142 | // ScriptLines returns shell syntax for executing the commands in the
143 | // LifecycleScript.
144 | //
145 | // TODO: Technically the commands could be executed in parallel, but that would
146 | // add a bit of complexity to do portably.
147 | func (s *LifecycleScript) ScriptLines() string {
148 | var lines string
149 | for _, command := range s.shellCommands {
150 | lines += command + "\n"
151 | }
152 | for _, commandAndArgs := range s.nonShellCommands {
153 | // Quote the command arguments to prevent shell interpretation.
154 | quotedCommandAndArgs := make([]string, len(commandAndArgs))
155 | for i := range commandAndArgs {
156 | // Surround each argument with single quotes. If the
157 | // argument contains any single quotes, they are escaped
158 | // by replacing them with the sequence '"'"'. This
159 | // sequence ends the current single-quoted string,
160 | // starts and immediately ends a double-quoted string
161 | // containing a single quote, and then restarts the
162 | // single-quoted string. This approach works because in
163 | // shell syntax, adjacent strings are concatenated, so
164 | // 'arg'"'"'arg' is interpreted as arg'arg.
165 | quotedCommandAndArgs[i] = "'" + strings.ReplaceAll(commandAndArgs[i], "'", "'\"'\"'") + "'"
166 | }
167 | lines += strings.Join(quotedCommandAndArgs, " ") + "\n"
168 | }
169 | return lines
170 | }
171 |
--------------------------------------------------------------------------------
/devcontainer/script_test.go:
--------------------------------------------------------------------------------
1 | package devcontainer
2 |
3 | import (
4 | "encoding/json"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestUnmarshal(t *testing.T) {
11 | t.Parallel()
12 |
13 | tests := []struct {
14 | name string
15 | in string
16 | want LifecycleScript
17 | }{
18 | {
19 | name: "command string",
20 | in: `"echo hello"`,
21 | want: LifecycleScript{
22 | shellCommands: map[string]string{
23 | "echo hello": "echo hello",
24 | },
25 | },
26 | },
27 | {
28 | name: "command array",
29 | in: `["echo", "hello"]`,
30 | want: LifecycleScript{
31 | nonShellCommands: map[string][]string{
32 | "echo hello": {"echo", "hello"},
33 | },
34 | },
35 | },
36 | {
37 | name: "command map",
38 | in: `{"script 1": ["echo", "hello"], "script 2": ["echo", "world"], "script 3": "echo hello world"}`,
39 | want: LifecycleScript{
40 | shellCommands: map[string]string{
41 | "script 3": "echo hello world",
42 | },
43 | nonShellCommands: map[string][]string{
44 | "script 1": {"echo", "hello"},
45 | "script 2": {"echo", "world"},
46 | },
47 | },
48 | },
49 | }
50 |
51 | for _, tt := range tests {
52 | t.Run(tt.name, func(t *testing.T) {
53 | var got LifecycleScript
54 | if err := json.Unmarshal([]byte(tt.in), &got); err != nil {
55 | t.Fatal(err)
56 | }
57 | require.Equal(t, tt.want, got)
58 | })
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/docs/build-secrets.md:
--------------------------------------------------------------------------------
1 | # Build Secrets
2 |
3 | Envbuilder supports [build secrets](https://docs.docker.com/reference/dockerfile/#run---mounttypesecret). Build secrets are useful when you need to use sensitive information during the image build process and:
4 | * the secrets should not be present in the built image.
5 | * the secrets should not be accessible in the container after its build has concluded.
6 |
7 | If your Dockerfile contains directives of the form `RUN --mount=type=secret,...`, Envbuilder will attempt to mount build secrets as specified in the directive. Unlike the `docker build` command, Envbuilder does not support the `--secret` flag. Instead, Envbuilder collects build secrets from the `ENVBUILDER_BUILD_SECRETS` environment variable. These build secrets will not be present in any cached layers or images that are pushed to an image repository. Nor will they be available at run time.
8 |
9 | ## Example
10 |
11 | To illustrate build secrets in Envbuilder, let's build, push and run a container locally. These concepts will transfer to Kubernetes or other containerised environments. Note that this example is for illustrative purposes only and is not fit for production use. Production considerations are discussed in the next section.
12 |
13 | First, start a local docker registry, so that we can push and inspect the built image:
14 | ```bash
15 | docker run --rm -d -p 5000:5000 --name envbuilder-registry registry:2
16 | ```
17 |
18 | Then, prepare the files to build our container.
19 | ```bash
20 | mkdir test-build-secrets
21 | cd test-build-secrets
22 | cat << EOF > Dockerfile
23 | FROM alpine:latest
24 |
25 | RUN --mount=type=secret,id=TEST_BUILD_SECRET_A,env=TEST_BUILD_SECRET_A echo -n \$TEST_BUILD_SECRET_A | sha256sum > /foo_secret_hash.txt
26 | RUN --mount=type=secret,id=TEST_BUILD_SECRET_B,dst=/tmp/bar.secret cat /tmp/bar.secret | sha256sum > /bar_secret_hash.txt
27 | EOF
28 | cat << EOF > devcontainer.json
29 | {
30 | "build": {
31 | "dockerfile": "Dockerfile"
32 | }
33 | }
34 | EOF
35 | echo 'runtime-secret-a' > runtime-secret.txt
36 | ```
37 |
38 | The Dockerfile requires two build secrets: `TEST_BUILD_SECRET_A` and `TEST_BUILD_SECRET_B`. Their values are arbitrarily set to `secret-foo` and `secret-bar` by the command below. Building the container image writes the checksums for these secrets to disk. This illustrates that the secrets can be used in the build to enact side effects without exposing the secrets themselves.
39 |
40 | Execute the build using this command:
41 | ```bash
42 | docker run -it --rm \
43 | -e ENVBUILDER_BUILD_SECRETS='TEST_BUILD_SECRET_A=secret-foo,TEST_BUILD_SECRET_B=secret-bar' \
44 | -e ENVBUILDER_INIT_SCRIPT='/bin/sh' \
45 | -e ENVBUILDER_CACHE_REPO=$(docker inspect envbuilder-registry | jq -r '.[].NetworkSettings.IPAddress'):5000/test-container \
46 | -e ENVBUILDER_PUSH_IMAGE=1 \
47 | -v $PWD:/workspaces/empty \
48 | -v $PWD/runtime-secret.txt:/runtime-secret.txt \
49 | ghcr.io/coder/envbuilder:latest
50 | ```
51 |
52 | This will result in a shell session inside the built container.
53 | You can now verify three things:
54 |
55 | Firstly, the secrets provided to build are not available once the container is running. They are no longer on disk, nor are they in the process environment, or in `/proc/self/environ`:
56 | ```bash
57 | cat /proc/self/environ | tr '\0' '\n'
58 | printenv
59 | ```
60 | Expected output:
61 | ```bash
62 | /workspaces/empty # cat /proc/self/environ | tr '\0' '\n'
63 | HOSTNAME=c0b0ee3d5564
64 | SHLVL=2
65 | HOME=/root
66 | TERM=xterm
67 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
68 | DEVCONTAINER_CONFIG=/workspaces/empty/devcontainer.json
69 | ENVBUILDER=true
70 | TS_DEBUG_TRIM_WIREGUARD=false
71 | PWD=/workspaces/empty
72 | DEVCONTAINER=true
73 | /workspaces/empty # printenv
74 | HOSTNAME=c0b0ee3d5564
75 | SHLVL=2
76 | HOME=/root
77 | TERM=xterm
78 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
79 | DEVCONTAINER_CONFIG=/workspaces/empty/devcontainer.json
80 | ENVBUILDER=true
81 | TS_DEBUG_TRIM_WIREGUARD=false
82 | PWD=/workspaces/empty
83 | DEVCONTAINER=true
84 | /workspaces/empty #
85 | ```
86 |
87 | Secondly, the secrets were still useful during the build. The following commands show that the secrets had side effects inside the build, without remaining in the image:
88 | ```bash
89 | echo -n "secret-foo" | sha256sum
90 | cat /foo_secret_hash.txt
91 | echo -n "secret-bar" | sha256sum
92 | cat /bar_secret_hash.txt
93 | ```
94 |
95 | Notice that the first two checksums match and that the last two checksums match. Expected output:
96 | ```
97 | /workspaces/empty # echo -n "secret-foo" | sha256sum
98 | 9a888f08a057159d2ea8fb69d38c9a25e367d7ca3128035b7f6dee2ca988c3d8 -
99 | /workspaces/empty # cat /foo_secret_hash.txt
100 | 9a888f08a057159d2ea8fb69d38c9a25e367d7ca3128035b7f6dee2ca988c3d8 -
101 | /workspaces/empty # echo -n "secret-bar" | sha256sum
102 | fb1c9d1220e429b30c60d028b882f735b5af72d7b5496d9202737fe9f1d38289 -
103 | /workspaces/empty # cat /bar_secret_hash.txt
104 | fb1c9d1220e429b30c60d028b882f735b5af72d7b5496d9202737fe9f1d38289 -
105 | /workspaces/empty #
106 | ```
107 |
108 | Thirdly, the runtime secret that was mounted as a volume is still mounted into the container and accessible. This is why volumes are inappropriate analogues to native docker build secrets. However, notice further down that this runtime secret volume's contents are not present in the built image. It is therefore safe to mount a volume into envbuilder for use during runtime without fear that it will be present in the image that envbuilder builds.
109 |
110 | Finally, exit the container:
111 | ```bash
112 | exit
113 | ```
114 |
115 | ### Verifying that images are secret free
116 | To verify that the built image doesn't contain build secrets, run the following:
117 |
118 | ```bash
119 | docker pull localhost:5000/test-container:latest
120 | docker save -o test-container.tar localhost:5000/test-container
121 | mkdir -p test-container
122 | tar -xf test-container.tar -C test-container/
123 | cd test-container
124 | # Scan image layers for secrets:
125 | find . -type f | xargs tar -xOf 2>/dev/null | strings | grep -rn "secret-foo"
126 | find . -type f | xargs tar -xOf 2>/dev/null | strings | grep -rn "secret-bar"
127 | find . -type f | xargs tar -xOf 2>/dev/null | strings | grep -rn "runtime-secret"
128 | # Scan image manifests for secrets:
129 | find . -type f | xargs -n1 grep -rnI 'secret-foo'
130 | find . -type f | xargs -n1 grep -rnI 'secret-bar'
131 | find . -type f | xargs -n1 grep -rnI 'runtime-secret'
132 | cd ../
133 | ```
134 |
135 | The output of all find/grep commands should be empty.
136 | To verify that it scans correctly, replace "secret-foo" with "envbuilder" and rerun the commands. It should find strings related to Envbuilder that are not secrets.
137 |
138 | ### Cleanup
139 |
140 | Having verified that no secrets were included in the image, we can now delete the artifacts that we saved to disk and remove the containers.
141 | ```bash
142 | cd ../
143 | rm -r test-build-secrets
144 | docker stop envbuilder-registry
145 | ```
146 |
147 | ## Security and Production Use
148 | The example above ignores various security concerns for the sake of simple illustration. To use build secrets securely, consider these factors:
149 |
150 | ### Build Secret Purpose and Management
151 | Build secrets are meant for use cases where the secret should not be accessible from the built image, nor from the running container. If you need the secret at runtime, use a volume instead. Volumes that are mounted into a container will not be included in the final image, but still be available at runtime.
152 |
153 | Build secrets are only protected if they are not copied or moved from their location as designated in the `RUN` directive. If a build secret is used, care should be taken to ensure that it is not copied or otherwise persisted into an image layer beyond the control of Envbuilder.
154 |
155 | ### Who should be able to access build secrets, when and where?
156 | Anyone with sufficient access to attach directly to the container (eg. using `kubectl`), will be able to read build secrets if they attach to the container before it has concluded its build. Anyone with sufficient access to the platform that hosts the Envbuilder container will also be able to read these build secrets from where the platform stores them. This is true for other build systems, and containerised software in general.
157 |
158 | The secure way to use build secrets with Envbuilder is to deny users access to the platform that hosts Envbuilder. Only grant access to the Envbuilder container once it has concluded its build, using a trusted non-platform channel like ssh or the coder agent running inside the container. Once control has been handed to such a runtime container process, Envbuilder will have cleared all secrets that it set from the container.
159 |
160 | If secrets should be accessible at runtime, do not use build secrets. Rather, mount the secret data using a volume or environment variable. Envbuilder will not include mounted volumes in the image that it pushes to any cache repositories, but they will still be available to users that connect to the container.
161 |
162 | ### Container Management beyond Envbuilder's control
163 | Container orchestration systems mount certain artifacts into containers for various reasons. It is possible that some of these might grant indirect access to build secrets. Consider kubernetes. It will mount a service account token into running containers. Depending on the access granted to this service account token, it may be possible to read build secrets and other sensitive data using the kubernetes API. This should not be possible by default, but Envbuilder cannot provide such a guarantee.
164 |
165 | When building a system that uses Envbuilder, ensure that your platform does not expose unintended secret information to the container.
--------------------------------------------------------------------------------
/docs/caching.md:
--------------------------------------------------------------------------------
1 | # Layer Caching
2 |
3 | Cache layers in a container registry to speed up builds. To enable caching, [authenticate with your registry](#container-registry-authentication) and set the `ENVBUILDER_CACHE_REPO` environment variable.
4 |
5 | ```bash
6 | ENVBUILDER_CACHE_REPO=ghcr.io/coder/repo-cache
7 | ```
8 |
9 | To experiment without setting up a registry, use `ENVBUILDER_LAYER_CACHE_DIR`:
10 |
11 | ```bash
12 | docker run -it --rm \
13 | -v /tmp/envbuilder-cache:/cache \
14 | -e ENVBUILDER_LAYER_CACHE_DIR=/cache
15 | ...
16 | ```
17 |
18 | Each layer is stored in the registry as a separate image. The image tag is the hash of the layer's contents. The image digest is the hash of the image tag. The image digest is used to pull the layer from the registry.
19 |
20 | The performance improvement of builds depends on the complexity of your
21 | Dockerfile. For
22 | [`coder/coder`](https://github.com/coder/coder/blob/main/dogfood/contents/Dockerfile),
23 | uncached builds take 36m while cached builds take 40s (~98% improvement).
24 |
25 | # Pushing the built image
26 |
27 | Set `ENVBUILDER_PUSH_IMAGE=1` to push the entire image to the cache repo
28 | in addition to individual layers. `ENVBUILDER_CACHE_REPO` **must** be set in
29 | order for this to work.
30 |
31 | > **Note:** this option forces Envbuilder to perform a "reproducible" build.
32 | > This will force timestamps for all newly added files to be set to the start of the UNIX epoch.
33 |
34 | # Probe Layer Cache
35 |
36 | To check for the presence of a pre-built image, set
37 | `ENVBUILDER_GET_CACHED_IMAGE=1`. Instead of building the image, this will
38 | perform a "dry-run" build of the image, consulting `ENVBUILDER_CACHE_REPO` for
39 | each layer.
40 |
41 | If any layer is found not to be present in the cache repo, envbuilder
42 | will exit with an error. Otherwise, the image will be emitted in the log output prefixed with the string
43 | `ENVBUILDER_CACHED_IMAGE=...`.
44 |
45 | # Image Caching
46 |
47 | When the base container is large, it can take a long time to pull the image from the registry. You can pre-pull the image into a read-only volume and mount it into the container to speed up builds.
48 |
49 | ```bash
50 | # Pull your base image from the registry to a local directory.
51 | docker run --rm \
52 | -v /tmp/kaniko-cache:/cache \
53 | gcr.io/kaniko-project/warmer:latest \
54 | --cache-dir=/cache \
55 | --image=
56 |
57 | # Run envbuilder with the local image cache.
58 | docker run -it --rm \
59 | -v /tmp/kaniko-cache:/image-cache:ro \
60 | -e ENVBUILDER_BASE_IMAGE_CACHE_DIR=/image-cache
61 | ```
62 |
63 | In Kubernetes, you can pre-populate a persistent volume with the same warmer image, then mount it into many workspaces with the [`ReadOnlyMany` access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).
64 |
65 | A sample script to pre-fetch a number of images can be viewed [here](./examples/kaniko-cache-warmer.sh). This can be run, for example, as a cron job to periodically fetch the latest versions of a number of base images.
66 |
--------------------------------------------------------------------------------
/docs/container-registry-auth.md:
--------------------------------------------------------------------------------
1 | # Container Registry Authentication
2 |
3 | envbuilder uses Kaniko to build containers. You should [follow their instructions](https://github.com/GoogleContainerTools/kaniko#pushing-to-different-registries) to create an authentication configuration.
4 |
5 | After you have a configuration that resembles the following:
6 |
7 | ```json
8 | {
9 | "auths": {
10 | "https://index.docker.io/v1/": {
11 | "auth": "base64-encoded-username-and-password"
12 | }
13 | }
14 | }
15 | ```
16 |
17 | `base64` encode the JSON and provide it to envbuilder as the
18 | `ENVBUILDER_DOCKER_CONFIG_BASE64` environment variable.
19 |
20 | Alternatively, the configuration file can be placed in `/.envbuilder/config.json`.
21 | The `DOCKER_CONFIG` environment variable can be used to define a custom path. The
22 | path must either be the path to a directory containing `config.json` or the full
23 | path to the JSON file itself.
24 |
25 | > [!NOTE] Providing the docker configuration through other means than the
26 | > `ENVBUILDER_DOCKER_CONFIG_BASE64` environment variable will leave the
27 | > configuration file in the container filesystem. This may be a security risk.
28 |
29 | When running `envbuilder` in Kubernetes, you can create an `ImagePullSecret` and
30 | pass it into the pod as a volume mount. This example will work for all registries.
31 |
32 | ```shell
33 | # Artifactory example
34 | kubectl create secret docker-registry regcred \
35 | --docker-server=my-artifactory.jfrog.io \
36 | --docker-username=read-only \
37 | --docker-password=secret-pass \
38 | --docker-email=me@example.com \
39 | -n coder
40 | ```
41 |
42 | ```hcl
43 | resource "kubernetes_deployment" "example" {
44 | metadata {
45 | namespace = coder
46 | }
47 | spec {
48 | spec {
49 | container {
50 | # Define the volumeMount with the pull credentials
51 | volume_mount {
52 | name = "docker-config-volume"
53 | mount_path = "/.envbuilder/config.json"
54 | sub_path = ".dockerconfigjson"
55 | }
56 | }
57 | # Define the volume which maps to the pull credentials
58 | volume {
59 | name = "docker-config-volume"
60 | secret {
61 | secret_name = "regcred"
62 | }
63 | }
64 | }
65 | }
66 | }
67 | ```
68 |
69 | ## Docker Hub
70 |
71 | Authenticate with `docker login` to generate `~/.docker/config.json`. Encode this file using the `base64` command:
72 |
73 | ```bash
74 | $ base64 -w0 ~/.docker/config.json
75 | ewoJImF1dGhzIjogewoJCSJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CgkJCSJhdXRoIjogImJhc2U2NCBlbmNvZGVkIHRva2VuIgoJCX0KCX0KfQo=
76 | ```
77 |
78 | Provide the encoded JSON config to envbuilder:
79 |
80 | ```env
81 | ENVBUILDER_DOCKER_CONFIG_BASE64=ewoJImF1dGhzIjogewoJCSJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOiB7CgkJCSJhdXRoIjogImJhc2U2NCBlbmNvZGVkIHRva2VuIgoJCX0KCX0KfQo=
82 | ```
83 |
84 | ## Docker-in-Docker
85 |
86 | See [here](./docs/docker.md) for instructions on running Docker containers inside
87 | environments built by Envbuilder.
88 |
--------------------------------------------------------------------------------
/docs/docker.md:
--------------------------------------------------------------------------------
1 | # Docker inside Envbuilder
2 |
3 | There are a number of approaches you can use to have access to a Docker daemon
4 | from inside Envbuilder.
5 |
6 | > Note: some of the below methods involve setting `ENVBUILDER_INIT_SCRIPT` to
7 | > work around the lack of an init system inside the Docker container.
8 | > If you are attempting to use the below approaches with [Coder](https://github.com/coder/coder),
9 | > you may need to instead add the relevant content of the init script to your
10 | > agent startup script in your template.
11 | > For example:
12 | >
13 | > ```terraform
14 | > resource "coder_agent" "dev" {
15 | > ...
16 | > startup_script = <<-EOT
17 | > set -eux -o pipefail
18 | > nohup dockerd > /var/log/docker.log 2>&1 &
19 | > EOT
20 | > }
21 | > ```
22 |
23 | ## Docker Outside of Docker (DooD)
24 |
25 | **Security:** None
26 | **Convenience:** High
27 |
28 | This approach re-uses the host Docker socket and passes it inside the container.
29 | It is the simplest approach, but offers **no security** -- any process inside the
30 | container that can connect to the Docker socket will have access to the
31 | underlying host.
32 | Only use it if you are the only person using the Docker socket (for example, if
33 | you are experimenting on your own workstation).
34 |
35 | Example:
36 |
37 | ```console
38 | docker run -it --rm \
39 | -v /tmp/envbuilder:/workspaces \
40 | -e ENVBUILDER_GIT_URL=https://github.com/coder/envbuilder \
41 | -e ENVBUILDER_DEVCONTAINER_DIR=/workspaces/envbuilder/examples/docker/01_dood \
42 | -e ENVBUILDER_INIT_SCRIPT=bash \
43 | -v /var/run/docker.sock:/var/run/docker.sock \
44 | ghcr.io/coder/envbuilder:latest
45 | ```
46 |
47 | ## Docker-in-Docker (DinD)
48 |
49 | **Security:** Low
50 | **Convenience:** High
51 |
52 | This approach entails running a Docker daemon inside the container.
53 | This requires a privileged container to run, and therefore has a wide potential
54 | attack surface.
55 |
56 | Example:
57 |
58 | > Note that due to a lack of init system, the Docker daemon
59 | > needs to be started separately inside the container. In this example, we
60 | > create a custom script to start the Docker daemon in the background and
61 | > call this entrypoint via the Devcontainer `onCreateCommand` lifecycle hook.
62 |
63 | ```console
64 | docker run -it --rm \
65 | --privileged \
66 | -v /tmp/envbuilder:/workspaces \
67 | -e ENVBUILDER_GIT_URL=https://github.com/coder/envbuilder \
68 | -e ENVBUILDER_DEVCONTAINER_DIR=/workspaces/envbuilder/examples/docker/02_dind \
69 | -e ENVBUILDER_INIT_SCRIPT=bash \
70 | ghcr.io/coder/envbuilder:latest
71 | ```
72 |
73 | ### DinD via Devcontainer Feature
74 |
75 | The above can also be accomplished using the [`docker-in-docker` Devcontainer
76 | feature](https://github.com/devcontainers/features/tree/main/src/docker-in-docker).
77 |
78 | > Note: we still need the `onCreateCommand` to start Docker.
79 | > See
80 | > [here](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json#L65)
81 | > for more details.
82 | >
83 | > Known issue: `/run` does not get symlinked correctly to `/var/run`.
84 | > To work around this, we create the symlink manually before running
85 | > the script to start the Docker daemon.
86 |
87 | Example:
88 |
89 | ```console
90 | docker run -it --rm \
91 | --privileged \
92 | -v /tmp/envbuilder:/workspaces \
93 | -e ENVBUILDER_GIT_URL=https://github.com/coder/envbuilder \
94 | -e ENVBUILDER_DEVCONTAINER_DIR=/workspaces/envbuilder/examples/docker/03_dind_feature \
95 | -e ENVBUILDER_INIT_SCRIPT=bash \
96 | ghcr.io/coder/envbuilder:latest
97 | ```
98 |
99 | ## Rootless DinD
100 |
101 | **Security:** Medium
102 | **Convenience:** Medium
103 |
104 | This approach runs a Docker daemon in _rootless_ mode.
105 | While this still requires a privileged container, this allows you to restrict
106 | usage of the `root` user inside the container, as the Docker daemon will be run
107 | under a "fake" root user (via `rootlesskit`). The user inside the workspace can
108 | then be a 'regular' user without root permissions.
109 |
110 | > Note: Once again, we use a custom entrypoint via `ENVBUILDER_INIT_SCRIPT` to
111 | > start the Docker daemon via `rootlesskit`.
112 |
113 | Example:
114 |
115 | ```console
116 | docker run -it --rm \
117 | --privileged \
118 | -v /tmp/envbuilder:/workspaces \
119 | -e ENVBUILDER_GIT_URL=https://github.com/coder/envbuilder \
120 | -e ENVBUILDER_DEVCONTAINER_DIR=/workspaces/envbuilder/examples/docker/04_dind_rootless \
121 | -e ENVBUILDER_INIT_SCRIPT=/entrypoint.sh \
122 | ghcr.io/coder/envbuilder:latest
123 | ```
124 |
125 | ## Docker-in-Docker using Sysbox
126 |
127 | **Security:** High
128 | **Convenience:** Low for infra admins, high for users
129 |
130 | This approach requires installing the [`sysbox-runc` container
131 | runtime](https://github.com/nestybox/sysbox/blob/master/docs/user-guide/install-package.md).
132 | This is an alternative container runtime that provides additional benefits,
133 | including transparently enabling Docker inside workspaces. Most notably, it
134 | **does not require a privileged container**, so you can allow developers root
135 | access inside their workspaces, if required.
136 |
137 | Example:
138 |
139 | ```console
140 | docker run -it --rm \
141 | -v /tmp/envbuilder:/workspaces \
142 | -e ENVBUILDER_GIT_URL=https://github.com/coder/envbuilder \
143 | -e ENVBUILDER_DEVCONTAINER_DIR=/workspaces/envbuilder/examples/docker/02_dind \
144 | -e ENVBUILDER_INIT_SCRIPT=/entrypoint.sh \
145 | --runtime sysbox-runc \
146 | ghcr.io/coder/envbuilder:latest
147 | ```
148 |
149 | For further information on Sysbox, please consult the [Sysbox
150 | Documentation](https://github.com/nestybox/sysbox/blob/master/docs/user-guide/README.md).
151 |
--------------------------------------------------------------------------------
/docs/env-variables.md:
--------------------------------------------------------------------------------
1 |
2 | # Environment Variables
3 |
4 | | Flag | Environment variable | Default | Description |
5 | | - | - | - | - |
6 | | `--setup-script` | `ENVBUILDER_SETUP_SCRIPT` | | The script to run before the init script. It runs as the root user regardless of the user specified in the devcontainer.json file. SetupScript is ran as the root user prior to the init script. It is used to configure envbuilder dynamically during the runtime. e.g. specifying whether to start systemd or tiny init for PID 1. |
7 | | `--init-script` | `ENVBUILDER_INIT_SCRIPT` | | The script to run to initialize the workspace. Default: `sleep infinity`. |
8 | | `--init-command` | `ENVBUILDER_INIT_COMMAND` | | The command to run to initialize the workspace. Default: `/bin/sh`. |
9 | | `--init-args` | `ENVBUILDER_INIT_ARGS` | | The arguments to pass to the init command. They are split according to /bin/sh rules with https://github.com/kballard/go-shellquote. |
10 | | `--cache-repo` | `ENVBUILDER_CACHE_REPO` | | The name of the container registry to push the cache image to. If this is empty, the cache will not be pushed. |
11 | | `--base-image-cache-dir` | `ENVBUILDER_BASE_IMAGE_CACHE_DIR` | | The path to a directory where the base image can be found. This should be a read-only directory solely mounted for the purpose of caching the base image. |
12 | | `--layer-cache-dir` | `ENVBUILDER_LAYER_CACHE_DIR` | | The path to a directory where built layers will be stored. This spawns an in-memory registry to serve the layers from. |
13 | | `--devcontainer-dir` | `ENVBUILDER_DEVCONTAINER_DIR` | | The path to the folder containing the devcontainer.json file that will be used to build the workspace and can either be an absolute path or a path relative to the workspace folder. If not provided, defaults to `.devcontainer`. |
14 | | `--devcontainer-json-path` | `ENVBUILDER_DEVCONTAINER_JSON_PATH` | | The path to a devcontainer.json file that is either an absolute path or a path relative to DevcontainerDir. This can be used in cases where one wants to substitute an edited devcontainer.json file for the one that exists in the repo. |
15 | | `--dockerfile-path` | `ENVBUILDER_DOCKERFILE_PATH` | | The relative path to the Dockerfile that will be used to build the workspace. This is an alternative to using a devcontainer that some might find simpler. |
16 | | `--build-context-path` | `ENVBUILDER_BUILD_CONTEXT_PATH` | | Can be specified when a DockerfilePath is specified outside the base WorkspaceFolder. This path MUST be relative to the WorkspaceFolder path into which the repo is cloned. |
17 | | `--cache-ttl-days` | `ENVBUILDER_CACHE_TTL_DAYS` | | The number of days to use cached layers before expiring them. Defaults to 7 days. |
18 | | `--docker-config-base64` | `ENVBUILDER_DOCKER_CONFIG_BASE64` | | The base64 encoded Docker config file that will be used to pull images from private container registries. When this is set, Docker configuration set via the DOCKER_CONFIG environment variable is ignored. |
19 | | `--fallback-image` | `ENVBUILDER_FALLBACK_IMAGE` | | Specifies an alternative image to use when neither an image is declared in the devcontainer.json file nor a Dockerfile is present. If there's a build failure (from a faulty Dockerfile) or a misconfiguration, this image will be the substitute. Set ExitOnBuildFailure to true to halt the container if the build faces an issue. |
20 | | `--exit-on-build-failure` | `ENVBUILDER_EXIT_ON_BUILD_FAILURE` | | Terminates the container upon a build failure. This is handy when preferring the FALLBACK_IMAGE in cases where no devcontainer.json or image is provided. However, it ensures that the container stops if the build process encounters an error. |
21 | | `--exit-on-push-failure` | `ENVBUILDER_EXIT_ON_PUSH_FAILURE` | | ExitOnPushFailure terminates the container upon a push failure. This is useful if failure to push the built image should abort execution and result in an error. |
22 | | `--force-safe` | `ENVBUILDER_FORCE_SAFE` | | Ignores any filesystem safety checks. This could cause serious harm to your system! This is used in cases where bypass is needed to unblock customers. |
23 | | `--insecure` | `ENVBUILDER_INSECURE` | | Bypass TLS verification when cloning and pulling from container registries. |
24 | | `--ignore-paths` | `ENVBUILDER_IGNORE_PATHS` | | The comma separated list of paths to ignore when building the workspace. |
25 | | `--build-secrets` | `ENVBUILDER_BUILD_SECRETS` | | The list of secret environment variables to use when building the image. |
26 | | `--skip-rebuild` | `ENVBUILDER_SKIP_REBUILD` | | Skip building if the MagicFile exists. This is used to skip building when a container is restarting. e.g. docker stop -> docker start This value can always be set to true - even if the container is being started for the first time. |
27 | | `--git-url` | `ENVBUILDER_GIT_URL` | | The URL of a Git repository containing a Devcontainer or Docker image to clone. This is optional. |
28 | | `--git-clone-depth` | `ENVBUILDER_GIT_CLONE_DEPTH` | | The depth to use when cloning the Git repository. |
29 | | `--git-clone-single-branch` | `ENVBUILDER_GIT_CLONE_SINGLE_BRANCH` | | Clone only a single branch of the Git repository. |
30 | | `--git-clone-thinpack` | `ENVBUILDER_GIT_CLONE_THINPACK` | `true` | Git clone with thin pack compatibility enabled, ensuring that even when thin pack compatibility is activated,it will not be turned on for the domain dev.zaure.com. |
31 | | `--git-username` | `ENVBUILDER_GIT_USERNAME` | | The username to use for Git authentication. This is optional. |
32 | | `--git-password` | `ENVBUILDER_GIT_PASSWORD` | | The password to use for Git authentication. This is optional. |
33 | | `--git-ssh-private-key-path` | `ENVBUILDER_GIT_SSH_PRIVATE_KEY_PATH` | | Path to an SSH private key to be used for Git authentication. If this is set, then GIT_SSH_PRIVATE_KEY_BASE64 cannot be set. |
34 | | `--git-ssh-private-key-base64` | `ENVBUILDER_GIT_SSH_PRIVATE_KEY_BASE64` | | Base64 encoded SSH private key to be used for Git authentication. If this is set, then GIT_SSH_PRIVATE_KEY_PATH cannot be set. |
35 | | `--git-http-proxy-url` | `ENVBUILDER_GIT_HTTP_PROXY_URL` | | The URL for the HTTP proxy. This is optional. |
36 | | `--workspace-base-dir` | `ENVBUILDER_WORKSPACE_BASE_DIR` | `/workspaces` | The path under which workspaces will be placed when workspace folder option is not given. |
37 | | `--workspace-folder` | `ENVBUILDER_WORKSPACE_FOLDER` | | The path to the workspace folder that will be built. This is optional. Defaults to `[workspace base dir]/[name]` where name is the name of the repository or `empty`. |
38 | | `--ssl-cert-base64` | `ENVBUILDER_SSL_CERT_BASE64` | | The content of an SSL cert file. This is useful for self-signed certificates. |
39 | | `--export-env-file` | `ENVBUILDER_EXPORT_ENV_FILE` | | Optional file path to a .env file where envbuilder will dump environment variables from devcontainer.json and the built container image. |
40 | | `--post-start-script-path` | `ENVBUILDER_POST_START_SCRIPT_PATH` | | The path to a script that will be created by envbuilder based on the postStartCommand in devcontainer.json, if any is specified (otherwise the script is not created). If this is set, the specified InitCommand should check for the presence of this script and execute it after successful startup. |
41 | | `--coder-agent-url` | `CODER_AGENT_URL` | | URL of the Coder deployment. If CODER_AGENT_TOKEN is also set, logs from envbuilder will be forwarded here and will be visible in the workspace build logs. |
42 | | `--coder-agent-token` | `CODER_AGENT_TOKEN` | | Authentication token for a Coder agent. If this is set, then CODER_AGENT_URL must also be set. |
43 | | `--coder-agent-subsystem` | `CODER_AGENT_SUBSYSTEM` | | Coder agent subsystems to report when forwarding logs. The envbuilder subsystem is always included. |
44 | | `--push-image` | `ENVBUILDER_PUSH_IMAGE` | | Push the built image to a remote registry. This option forces a reproducible build. |
45 | | `--get-cached-image` | `ENVBUILDER_GET_CACHED_IMAGE` | | Print the digest of the cached image, if available. Exits with an error if not found. |
46 | | `--remote-repo-build-mode` | `ENVBUILDER_REMOTE_REPO_BUILD_MODE` | `false` | Use the remote repository as the source of truth when building the image. Enabling this option ignores user changes to local files and they will not be reflected in the image. This can be used to improving cache utilization when multiple users are building working on the same repository. |
47 | | `--verbose` | `ENVBUILDER_VERBOSE` | | Enable verbose logging. |
48 |
--------------------------------------------------------------------------------
/docs/git-auth.md:
--------------------------------------------------------------------------------
1 | # Git Authentication
2 |
3 | Two methods of authentication are supported:
4 |
5 | ## HTTP Authentication
6 |
7 | If `ENVBUILDER_GIT_URL` starts with `http://` or `https://`, envbuilder will
8 | authenticate with `ENVBUILDER_GIT_USERNAME` and `ENVBUILDER_GIT_PASSWORD`, if set.
9 |
10 | For access token-based authentication, follow the following schema (if empty, there's no need to provide the field):
11 |
12 | | Provider | `ENVBUILDER_GIT_USERNAME` | `ENVBUILDER_GIT_PASSWORD` |
13 | | ------------ | ------------------------- | ------------------------- |
14 | | GitHub | [access-token] | |
15 | | GitLab | oauth2 | [access-token] |
16 | | BitBucket | x-token-auth | [access-token] |
17 | | Azure DevOps | [access-token] | |
18 |
19 | If using envbuilder inside of [Coder](https://github.com/coder/coder), you can use the `coder_external_auth` Terraform resource to automatically provide this token on workspace creation:
20 |
21 | ```hcl
22 | data "coder_external_auth" "github" {
23 | id = "github"
24 | }
25 |
26 | resource "docker_container" "dev" {
27 | env = [
28 | ENVBUILDER_GIT_USERNAME = data.coder_external_auth.github.access_token,
29 | ]
30 | }
31 | ```
32 |
33 | ## SSH Authentication
34 |
35 | If `ENVBUILDER_GIT_URL` does not start with `http://` or `https://`,
36 | envbuilder will assume SSH authentication. You have the following options:
37 |
38 | 1. Public/Private key authentication: set `ENVBUILDER_GIT_SSH_PRIVATE_KEY_PATH` to the path of an
39 | SSH private key mounted inside the container. Envbuilder will use this SSH
40 | key to authenticate. Example:
41 |
42 | ```bash
43 | docker run -it --rm \
44 | -v /tmp/envbuilder:/workspaces \
45 | -e ENVBUILDER_GIT_URL=git@example.com:path/to/private/repo.git \
46 | -e ENVBUILDER_INIT_SCRIPT=bash \
47 | -e ENVBUILDER_GIT_SSH_PRIVATE_KEY_PATH=/.ssh/id_rsa \
48 | -v /home/user/id_rsa:/.ssh/id_rsa \
49 | ghcr.io/coder/envbuilder
50 | ```
51 |
52 | Alternatively, you can set `ENVBUILDER_GIT_SSH_PRIVATE_KEY_BASE64` to the
53 | base64-encoded content of your private key. Example:
54 |
55 | ```bash
56 | docker run -it --rm \
57 | -v /tmp/envbuilder:/workspaces \
58 | -e ENVBUILDER_GIT_URL=git@example.com:path/to/private/repo.git \
59 | -e ENVBUILDER_INIT_SCRIPT=bash \
60 | -e ENVBUILDER_GIT_SSH_PRIVATE_KEY_BASE64=$(base64 < ~/.ssh/id_ed25519) \
61 | ghcr.io/coder/envbuilder
62 | ```
63 |
64 | 1. Agent-based authentication: set `SSH_AUTH_SOCK` and mount in your agent socket, for example:
65 |
66 | ```bash
67 | docker run -it --rm \
68 | -v /tmp/envbuilder:/workspaces \
69 | -e ENVBUILDER_GIT_URL=git@example.com:path/to/private/repo.git \
70 | -e ENVBUILDER_INIT_SCRIPT=bash \
71 | -e SSH_AUTH_SOCK=/tmp/ssh-auth-sock \
72 | -v $SSH_AUTH_SOCK:/tmp/ssh-auth-sock \
73 | ghcr.io/coder/envbuilder
74 | ```
75 |
76 | > Note: by default, envbuilder will accept and log all host keys. If you need
77 | > strict host key checking, set `SSH_KNOWN_HOSTS` and mount in a `known_hosts`
78 | > file.
79 |
--------------------------------------------------------------------------------
/docs/img/proxy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coder/envbuilder/7eabaa4d876c3ce54f7271b6e68fea37b6719c66/docs/img/proxy.png
--------------------------------------------------------------------------------
/docs/proxy.md:
--------------------------------------------------------------------------------
1 | # How to run Envbuilder from behind a proxy
2 |
3 | Envbuilder can be used from behind transparent TLS proxies that would normally risk interrupting TLS verification.
4 |
5 | A summary of how to configure Envbuilder to run behind a HTTPS proxy is provided in the next section. Thereafter an illustrative example is provided that can be followed to prove the concept from first principles before applying it in production.
6 |
7 | ## Summary
8 | To use Envbuilder behind a proxy that requires a custom certificate, the following configuration needs to be applied. Note that if you are using Envbuilder in conjunction with Coder, set these variables on the Envbuilder container itself, not on the Coder agent.
9 |
10 | To configure Envbuilder to route HTTP traffic for git and the container registry to the proxy, do at least one of the following:
11 | * Set the `https_proxy=https://host:port` environment variable for the envbuilder container. This will also proxy traffic for other programs in the container that respect `https_proxy`. If you do set it, look into the related `http_proxy` and `no_proxy` environment variables. For compatibility, set these environment variables in [lowercase](https://about.gitlab.com/blog/2021/01/27/we-need-to-talk-no-proxy/#http_proxy-and-https_proxy).
12 | * Set the `ENVBUILDER_GIT_HTTP_PROXY_URL=https://host:port` environment variable. This will specifically route traffic for Envbuilder's git operations and communication with the container registry without affecting other programs inside the container.
13 |
14 | Once traffic is routed to the proxy, you will need to install the proxy's CA certificate into Envbuilder. To do this, you can do one of the following:
15 | * Set `ENVBUILDER_SSL_CERT_BASE64=...` to the base64 encoded value of your proxy's CA certificate. This will only apply to Envbuilder. Other TLS connections within your container will not be aware of this certificate.
16 | * Mount the certificate file into the Envbuilder container and then set `SSL_CERT_FILE=/path/to/cert.pem`.
17 | * Mount a directory containing all relevant CA certificates into the Envbuilder container and then set `SSL_CERT_DIR=/path/to/certs/`.
18 |
19 | ## Demonstration
20 | Envbuilder clones a repository that contains your `devcontainer.json` and optional `Dockerfile` so that it can build your container. If the clone is done using HTTPS, then TLS verification must succeed or be disabled. If a transparent HTTPS proxy is present, TLS verification will fail unless Envbuilder trusts the proxy’s certificate. Therefore, we need to configure Envbuilder to trust your proxy.
21 |
22 | For this example we'll use docker to show how to apply the configuration in practice. The configuration is applied using environment variables that can be set analogously in Kubernetes or elsewhere.
23 |
24 | Before we introduce an HTTPS proxy, let's prove that envbuilder runs normally. Run the following docker command to obtain a shell within an Envbuilder built environment:
25 | ```bash
26 | docker run -it --rm \
27 | -e ENVBUILDER_INIT_SCRIPT='/bin/sh' \
28 | -e ENVBUILDER_GIT_URL='https://github.com/coder/envbuilder.git' \
29 | ghcr.io/coder/envbuilder:latest
30 | ```
31 |
32 | Notice the log lines:
33 | ```
34 | #1: 📦 Cloning https://github.com/coder/envbuilder.git to /workspaces/envbuilder...`
35 | ...
36 | #1: 📦 Cloned repository! [711.221369ms]
37 | ```
38 |
39 | After some time building, a shell will be presented inside the devcontainer environment specified in envbuilder's own repository. Assuming that envbuilder built and ran successfully, go ahead and exit the container:
40 | ```bash
41 | exit
42 | ```
43 |
44 | Let's now temporarily break Envbuilder by introducing a transparent TLS proxy to intercept traffic. To do this, we'll use [mitmproxy](https://mitmproxy.org/). Start mitmproxy in a container by running the following:
45 | ```bash
46 | docker run --rm -d --user $(id -u):$(id -g) --name mitmproxy -v ./certs:/home/mitmproxy/.mitmproxy -p 8080:8080 -p 127.0.0.1:8081:8081 mitmproxy/mitmproxy mitmweb --web-host 0.0.0.0 --set http2=false
47 | ```
48 |
49 | Notice that we disable HTTP2 on mitmproxy. This is because Envbuilder and mitmproxy do not seem to be able to negotiate which version of HTTP to use. mitmproxy interprets Envbuilder's HTTP1.1 request as an HTTP2 request and then fails to find the expected HTTP2 preamble (because there is no HTTP2 preamble in an HTTP1.1 request). If your production proxy exhibits this behavior, please file a GitHub issue.
50 |
51 | Confirm that mitmproxy is running and determine its IP address:
52 | ```bash
53 | docker inspect mitmproxy | jq -r '.[].NetworkSettings.IPAddress'
54 | ```
55 | yields:
56 | ```
57 | 172.17.0.2
58 | ```
59 |
60 | You may see a different IP address. If you do, use that wherever we use `172.17.0.2` below.
61 |
62 | A new directory called certs will be present in your current working directory. It will contain a CA certificate called `mitmproxy-ca-cert.pem`. This will be what we provide to Envbuilder to trust our proxy.
63 |
64 | To understand why certificate verification fails, inspect the certificates served by mitmproxy:
65 | ```bash
66 | openssl s_client -proxy localhost:8080 -servername github.com -connect github.com:443 | head -n 10
67 | ```
68 | In the output, notice that we are served a certificate that is ostensibly for github.com. However, its issuer common name is "mitmproxy" and s_client couldn't verify the certificate. This is because s_client can't find a CA certificate that trusts the certificate that was served by mitmproxy instead of the actual github.com certificate.
69 | ```
70 | depth=0 CN = github.com
71 | verify error:num=20:unable to get local issuer certificate
72 | verify return:1
73 | depth=0 CN = github.com
74 | verify error:num=21:unable to verify the first certificate
75 | verify return:1
76 | depth=0 CN = github.com
77 | verify return:1
78 | CONNECTED(00000003)
79 | ---
80 | Certificate chain
81 | 0 s:CN = github.com
82 | i:CN = mitmproxy, O = mitmproxy
83 | a:PKEY: rsaEncryption, 2048 (bit); sigalg: RSA-SHA256
84 | v:NotBefore: Nov 7 15:43:48 2024 GMT; NotAfter: Nov 9 15:43:48 2025 GMT
85 | ---
86 | Server certificate
87 | -----BEGIN CERTIFICATE-----
88 | ```
89 |
90 | Let's rerun Envbuilder using the proxy to see how it responds. To do this, we use the same command as before, except that we also set the `https_proxy` environment variable:
91 | ```bash
92 | docker run -it --rm \
93 | -e https_proxy=https://172.17.0.2:8080 \
94 | -e ENVBUILDER_INIT_SCRIPT='/bin/sh' \
95 | -e ENVBUILDER_GIT_URL='https://github.com/coder/envbuilder.git' \
96 | ghcr.io/coder/envbuilder:latest
97 | ```
98 | From the logs, notice that certificate verification fails. It fails because it doesn't trust the certificate that was provided by mitmproxy:
99 | ```
100 | Failed to clone repository: clone "https://github.com/coder/envbuilder.git": Get "https://github.com/coder/envbuilder.git/info/refs?service=git-upload-pack": proxyconnect tcp: tls: failed to verify certificate: x509: certificate signed by unknown authority
101 | ```
102 |
103 | To resolve this, we need to provide a CA certificate that Envbuilder can use to verify the server certificate served by mitmproxy, instead of GitHub’s. Envbuilder offers environment variables for this, as documented above. In this example, the CA certificate is saved in a directory. The simplest approach is to mount this directory as a volume in the Envbuilder container and use the `SSL_CERT_FILE` environment variable. The command to run Envbuilder is now:
104 | ```bash
105 | docker run -it --rm \
106 | -v $PWD/certs:/certs \
107 | -e SSL_CERT_FILE=/certs/mitmproxy-ca-cert.pem \
108 | -e https_proxy=https://172.17.0.2:8080 \
109 | -e ENVBUILDER_INIT_SCRIPT='/bin/sh' \
110 | -e ENVBUILDER_GIT_URL='https://github.com/coder/envbuilder.git' \
111 | ghcr.io/coder/envbuilder:latest
112 | ```
113 |
114 | As before, this command yields a shell inside an Envbuilder built environment. Feel free to test it and then exit the container. Assuming this worked, Envbuilder will have cloned a repository and built the relevant container using a proxy that required accepting a custom CA certificate.
115 |
116 | ### Bonus
117 | To prove that Envbuilder did in fact use the proxy, and also because it is interesting to observe, open `http://localhost:8081/` in your local browser and you see the mitmproxy web interface. In the flow tab, there will be a list of all of the HTTP requests that were required to build the container. The first few requests will be those used to clone the Git repository. The rest will be the requests that were used to pull the devcontainer image.
118 |
119 | 
120 |
121 | ### Cleanup
122 | Once the demonstration has concluded, cleanup the artifacts that were used in the process:
123 | ```bash
124 | docker stop mitmproxy
125 | rm -r certs/
126 | ```
--------------------------------------------------------------------------------
/docs/usage-with-coder.md:
--------------------------------------------------------------------------------
1 | # Usage with Coder
2 |
3 | Coder provides sample
4 | [Docker](https://github.com/coder/coder/tree/main/examples/templates/devcontainer-docker)
5 | and
6 | [Kubernetes](https://github.com/coder/coder/tree/main/examples/templates/devcontainer-kubernetes)
7 | templates for use with Envbuilder. You can import these templates and modify them to fit
8 | your specific requirements.
9 |
10 | Below are some specific points to be aware of when using Envbuilder with a Coder
11 | deployment:
12 |
13 | - The `ENVBUILDER_INIT_SCRIPT` should execute `coder_agent.main.init_script` in
14 | order for you to be able to connect to your workspace.
15 | - In order for the Agent init script to be able to fetch the agent binary from
16 | your Coder deployment, the resulting Devcontainer must contain a download tool
17 | such as `curl`, `wget`, or `busybox`.
18 | - `CODER_AGENT_TOKEN` should be included in the environment variables for the
19 | Envbuilder container. You can also set `CODER_AGENT_URL` if required.
20 |
21 | ## Git Branch Selection
22 |
23 | Choose a branch using `ENVBUILDER_GIT_URL` with a _ref/heads_ reference. For instance:
24 |
25 | ```
26 | ENVBUILDER_GIT_URL=https://github.com/coder/envbuilder-starter-devcontainer/#refs/heads/my-feature-branch
27 | ```
28 |
--------------------------------------------------------------------------------
/docs/users.md:
--------------------------------------------------------------------------------
1 | # Root Privileges
2 |
3 | Envbuilder always expects to be run as `root` in its container, as building an image will most likely require root privileges. Once the image is built, Envbuilder will drop root privileges and `exec` `ENVBUILDER_INIT_COMMAND` / `ENVBUILDER_INIT_SCRIPT` as a non-root user.
4 |
5 | ## Choosing a target user
6 |
7 | Envbuilder will first attempt to switch to the `containerUser` defined `devcontainer.json`.
8 | If this is not specified, it will look up the last `USER` directive from the specified `Dockerfile` or image.
9 | If no alternative user is specified, Envbuilder will fallback to `root`.
10 |
11 | When installing Devcontainer Features, Envbuilder will add a directive `USER ${remoteUser}` directive directly after the feature installation directives.
12 | If `remoteUser` is not defined, it will default to `containerUser`.
13 |
--------------------------------------------------------------------------------
/docs/using-local-files.md:
--------------------------------------------------------------------------------
1 | # Using local files
2 |
3 | If you don't have a remote Git repo or you want to quickly iterate with some
4 | local files, simply omit `ENVBUILDER_GIT_URL` and instead mount the directory
5 | containing your code to `/workspaces/empty` inside the Envbuilder container.
6 |
7 | For example:
8 |
9 | ```shell
10 | # Create a sample Devcontainer and Dockerfile in the current directory
11 | printf '{"build": { "dockerfile": "Dockerfile"}}' > devcontainer.json
12 | printf 'FROM debian:bookworm\nRUN apt-get update && apt-get install -y cowsay' > Dockerfile
13 |
14 | # Run envbuilder with the current directory mounted into `/workspaces/empty`.
15 | # The instructions to add /usr/games to $PATH have been omitted for brevity.
16 | docker run -it --rm -e ENVBUILDER_INIT_SCRIPT='bash' -v $PWD:/workspaces/empty ghcr.io/coder/envbuilder:latest
17 | ```
18 |
19 | Alternatively, if you prefer to mount your project files elsewhere, tell
20 | Envbuilder where to find them by specifying `ENVBUILDER_WORKSPACE_FOLDER`:
21 |
22 | ```shell
23 | docker run -it --rm -e ENVBUILDER_INIT_SCRIPT='bash ' -e ENVBUILDER_WORKSPACE_FOLDER=/src -v $PWD:/src ghcr.io/coder/envbuilder:latest
24 | ```
25 |
26 | By default, Envbuilder will look for a `devcontainer.json` or `Dockerfile` in
27 | both `${ENVBUILDER_WORKSPACE_FOLDER}` and `${ENVBUILDER_WORKSPACE_FOLDER}/.devcontainer`.
28 | You can control where it looks with `ENVBUILDER_DEVCONTAINER_DIR` if needed.
29 |
30 | ```shell
31 | ls build/
32 | Dockerfile devcontainer.json
33 | docker run -it --rm -e ENVBUILDER_INIT_SCRIPT='bash' -e ENVBUILDER_DEVCONTAINER_DIR=build -v $PWD:/src ghcr.io/coder/envbuilder:latest
34 | ```
35 |
--------------------------------------------------------------------------------
/envbuilder_internal_test.go:
--------------------------------------------------------------------------------
1 | package envbuilder
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/coder/envbuilder/options"
7 |
8 | "github.com/go-git/go-billy/v5/memfs"
9 | "github.com/stretchr/testify/assert"
10 | "github.com/stretchr/testify/require"
11 | )
12 |
13 | func TestFindDevcontainerJSON(t *testing.T) {
14 | t.Parallel()
15 |
16 | defaultWorkspaceFolder := "/workspace"
17 |
18 | for _, tt := range []struct {
19 | name string
20 | workspaceFolder string
21 | }{
22 | {
23 | name: "Default",
24 | workspaceFolder: defaultWorkspaceFolder,
25 | },
26 | {
27 | name: "RepoMode",
28 | workspaceFolder: "/.envbuilder/repo",
29 | },
30 | } {
31 | t.Run(tt.name, func(t *testing.T) {
32 | t.Parallel()
33 |
34 | t.Run("empty filesystem", func(t *testing.T) {
35 | t.Parallel()
36 |
37 | // given
38 | fs := memfs.New()
39 |
40 | // when
41 | _, _, err := findDevcontainerJSON(tt.workspaceFolder, options.Options{
42 | Filesystem: fs,
43 | WorkspaceFolder: "/workspace",
44 | })
45 |
46 | // then
47 | require.Error(t, err)
48 | })
49 |
50 | t.Run("devcontainer.json is missing", func(t *testing.T) {
51 | t.Parallel()
52 |
53 | // given
54 | fs := memfs.New()
55 | err := fs.MkdirAll(tt.workspaceFolder+"/.devcontainer", 0o600)
56 | require.NoError(t, err)
57 |
58 | // when
59 | _, _, err = findDevcontainerJSON(tt.workspaceFolder, options.Options{
60 | Filesystem: fs,
61 | WorkspaceFolder: "/workspace",
62 | })
63 |
64 | // then
65 | require.Error(t, err)
66 | })
67 |
68 | t.Run("default configuration", func(t *testing.T) {
69 | t.Parallel()
70 |
71 | // given
72 | fs := memfs.New()
73 | err := fs.MkdirAll(tt.workspaceFolder+"/.devcontainer", 0o600)
74 | require.NoError(t, err)
75 | _, err = fs.Create(tt.workspaceFolder + "/.devcontainer/devcontainer.json")
76 | require.NoError(t, err)
77 |
78 | // when
79 | devcontainerPath, devcontainerDir, err := findDevcontainerJSON(tt.workspaceFolder, options.Options{
80 | Filesystem: fs,
81 | WorkspaceFolder: "/workspace",
82 | })
83 |
84 | // then
85 | require.NoError(t, err)
86 | assert.Equal(t, tt.workspaceFolder+"/.devcontainer/devcontainer.json", devcontainerPath)
87 | assert.Equal(t, tt.workspaceFolder+"/.devcontainer", devcontainerDir)
88 | })
89 |
90 | t.Run("overridden .devcontainer directory", func(t *testing.T) {
91 | t.Parallel()
92 |
93 | // given
94 | fs := memfs.New()
95 | err := fs.MkdirAll(tt.workspaceFolder+"/experimental-devcontainer", 0o600)
96 | require.NoError(t, err)
97 | _, err = fs.Create(tt.workspaceFolder + "/experimental-devcontainer/devcontainer.json")
98 | require.NoError(t, err)
99 |
100 | // when
101 | devcontainerPath, devcontainerDir, err := findDevcontainerJSON(tt.workspaceFolder, options.Options{
102 | Filesystem: fs,
103 | WorkspaceFolder: "/workspace",
104 | DevcontainerDir: "experimental-devcontainer",
105 | })
106 |
107 | // then
108 | require.NoError(t, err)
109 | assert.Equal(t, tt.workspaceFolder+"/experimental-devcontainer/devcontainer.json", devcontainerPath)
110 | assert.Equal(t, tt.workspaceFolder+"/experimental-devcontainer", devcontainerDir)
111 | })
112 |
113 | t.Run("overridden devcontainer.json path", func(t *testing.T) {
114 | t.Parallel()
115 |
116 | // given
117 | fs := memfs.New()
118 | err := fs.MkdirAll(tt.workspaceFolder+"/.devcontainer", 0o600)
119 | require.NoError(t, err)
120 | _, err = fs.Create(tt.workspaceFolder + "/.devcontainer/experimental.json")
121 | require.NoError(t, err)
122 |
123 | // when
124 | devcontainerPath, devcontainerDir, err := findDevcontainerJSON(tt.workspaceFolder, options.Options{
125 | Filesystem: fs,
126 | WorkspaceFolder: "/workspace",
127 | DevcontainerJSONPath: "experimental.json",
128 | })
129 |
130 | // then
131 | require.NoError(t, err)
132 | assert.Equal(t, tt.workspaceFolder+"/.devcontainer/experimental.json", devcontainerPath)
133 | assert.Equal(t, tt.workspaceFolder+"/.devcontainer", devcontainerDir)
134 | })
135 |
136 | t.Run("devcontainer.json in workspace root", func(t *testing.T) {
137 | t.Parallel()
138 |
139 | // given
140 | fs := memfs.New()
141 | err := fs.MkdirAll(tt.workspaceFolder+"", 0o600)
142 | require.NoError(t, err)
143 | _, err = fs.Create(tt.workspaceFolder + "/devcontainer.json")
144 | require.NoError(t, err)
145 |
146 | // when
147 | devcontainerPath, devcontainerDir, err := findDevcontainerJSON(tt.workspaceFolder, options.Options{
148 | Filesystem: fs,
149 | WorkspaceFolder: "/workspace",
150 | })
151 |
152 | // then
153 | require.NoError(t, err)
154 | assert.Equal(t, tt.workspaceFolder+"/devcontainer.json", devcontainerPath)
155 | assert.Equal(t, tt.workspaceFolder+"", devcontainerDir)
156 | })
157 |
158 | t.Run("devcontainer.json in subfolder of .devcontainer", func(t *testing.T) {
159 | t.Parallel()
160 |
161 | // given
162 | fs := memfs.New()
163 | err := fs.MkdirAll(tt.workspaceFolder+"/.devcontainer/sample", 0o600)
164 | require.NoError(t, err)
165 | _, err = fs.Create(tt.workspaceFolder + "/.devcontainer/sample/devcontainer.json")
166 | require.NoError(t, err)
167 |
168 | // when
169 | devcontainerPath, devcontainerDir, err := findDevcontainerJSON(tt.workspaceFolder, options.Options{
170 | Filesystem: fs,
171 | WorkspaceFolder: "/workspace",
172 | })
173 |
174 | // then
175 | require.NoError(t, err)
176 | assert.Equal(t, tt.workspaceFolder+"/.devcontainer/sample/devcontainer.json", devcontainerPath)
177 | assert.Equal(t, tt.workspaceFolder+"/.devcontainer/sample", devcontainerDir)
178 | })
179 | })
180 | }
181 | }
182 |
--------------------------------------------------------------------------------
/examples/docker/01_dood/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:noble
2 | RUN apt-get update && apt-get install -y docker.io
--------------------------------------------------------------------------------
/examples/docker/01_dood/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "build": {
3 | "dockerfile": "Dockerfile"
4 | }
5 | }
--------------------------------------------------------------------------------
/examples/docker/02_dind/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:noble
2 |
3 | # Install Docker using Docker's convenience script.
4 | RUN apt-get update && \
5 | apt-get install -y curl sudo apt-transport-https && \
6 | curl -fsSL https://get.docker.com/ | sh -s -
7 |
8 | # The ubuntu:noble image includes a non-root user by default,
9 | # but it does not have sudo privileges. We need to set this up.
10 | # Note: we chown /var/run/docker.sock to the non-root user
11 | # in the onCreateCommand script. Ideally you would add the
12 | # non-root user to the docker group, but in this scenario
13 | # this is a 'single-user' environment. It also avoids us
14 | # having to run `newgrp docker`.
15 | RUN echo "ubuntu ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/ubuntu
16 |
17 | # Add our onCreateCommand script.
18 | ADD on-create.sh /on-create.sh
19 |
20 | # Switch to the non-root user.
21 | USER ubuntu
22 |
23 | ENTRYPOINT ["bash"]
24 |
--------------------------------------------------------------------------------
/examples/docker/02_dind/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "build": {
3 | "dockerfile": "Dockerfile"
4 | },
5 | "onCreateCommand": "/on-create.sh"
6 | }
7 |
--------------------------------------------------------------------------------
/examples/docker/02_dind/on-create.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euo pipefail
4 |
5 | # Start Docker in the background.
6 | sudo -u root /bin/sh -c 'nohup dockerd > /var/log/docker.log &'
7 |
8 | # Wait up to 10 seconds for Docker to start.
9 | for attempt in $(seq 1 10); do
10 | if [[ $attempt -eq 10 ]]; then
11 | echo "Failed to start Docker"
12 | exit 1
13 | fi
14 | if [[ ! -e /var/run/docker.sock ]]; then
15 | sleep 1
16 | else
17 | break
18 | fi
19 | done
20 |
21 | # Change the owner of the Docker socket so that the non-root user can use it.
22 | sudo chown ubuntu:docker /var/run/docker.sock
23 |
--------------------------------------------------------------------------------
/examples/docker/03_dind_feature/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:noble
2 |
3 | # Install some dependencies such as curl and sudo.
4 | # Also set up passwordless sudo for the ubuntu user.
5 | RUN apt-get update && \
6 | DEBIAN_FRONTEND=noninteractive apt-get install -y \
7 | curl \
8 | sudo \
9 | apt-transport-https && \
10 | echo "ubuntu ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/ubuntu
11 |
12 | # Add our onCreateCommand script.
13 | ADD on-create.sh /on-create.sh
14 |
15 | # Switch to the non-root user.
16 | USER ubuntu
17 |
18 | # The devcontainer feature provides /usr/local/share/docker-init.sh
19 | # which will handle most of the steps of setting up Docker.
20 | # We can't put this in the entrypoint as it gets overridden, so
21 | # we call it in the on-create script.
22 | ENTRYPOINT ["bash"]
23 |
--------------------------------------------------------------------------------
/examples/docker/03_dind_feature/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "build": {
3 | "dockerfile": "Dockerfile"
4 | },
5 | "onCreateCommand": "/on-create.sh",
6 | "features": {
7 | "ghcr.io/devcontainers/features/docker-in-docker:2": {}
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/examples/docker/03_dind_feature/on-create.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euo pipefail
4 |
5 | # Known issue: Kaniko does not symlink /run => /var/run properly.
6 | # This results in /var/run/ being owned by root:root which interferes
7 | # with accessing the Docker socket even if the permissions are set
8 | # correctly. Workaround: symlink it manually
9 | sudo ln -s /run /var/run
10 |
11 | # Run the docker init script. This needs to be
12 | # run as root. It will take care of starting the
13 | # daemon and adding the ubuntu user to the docker
14 | # group.
15 | sudo /usr/local/share/docker-init.sh
16 |
17 | # Change the owner of the Docker socket so that the non-root user can use it.
18 | sudo chown ubuntu:docker /var/run/docker.sock
19 |
--------------------------------------------------------------------------------
/examples/docker/04_dind_rootless/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:noble
2 |
3 | # Based on UID of ubuntu user in container.
4 | ENV XDG_RUNTIME_DIR /run/user/1000
5 | ENV DOCKER_HOST unix:///${XDG_RUNTIME_DIR}/docker.sock
6 |
7 | # Setup as root
8 | USER root
9 | RUN apt-get update && \
10 | # Install prerequisites
11 | apt-get install -y apt-transport-https curl iproute2 uidmap && \
12 | # Install Docker
13 | curl -fsSL https://get.docker.com/ | sh -s - && \
14 | # Add ubuntu user to docker group
15 | usermod -aG docker ubuntu && \
16 | # Create the XDG_RUNTIME_DIR for our user and set DOCKER_HOST
17 | mkdir -p ${XDG_RUNTIME_DIR} && \
18 | chown ubuntu:ubuntu ${XDG_RUNTIME_DIR}
19 |
20 | # Setup rootless mode as the ubuntu user.
21 | USER ubuntu
22 | RUN dockerd-rootless-setuptool.sh install && \
23 | docker context use rootless && \
24 | mkdir -p /home/ubuntu/.local/share/docker
25 |
26 | # Add our onCreateCommand script.
27 | ADD on-create.sh /on-create.sh
28 |
29 | ENTRYPOINT ["bash"]
--------------------------------------------------------------------------------
/examples/docker/04_dind_rootless/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "build": {
3 | "dockerfile": "Dockerfile"
4 | },
5 | "onCreateCommand": "/on-create.sh"
6 | }
7 |
--------------------------------------------------------------------------------
/examples/docker/04_dind_rootless/on-create.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euo pipefail
4 |
5 | # Start the rootless docker daemon as a non-root user
6 | nohup rootlesskit --net=slirp4netns --mtu=1500 --disable-host-loopback --port-driver=builtin --copy-up=/etc --copy-up=/run dockerd >"/tmp/dockerd-rootless.log" 2>&1 &
7 |
--------------------------------------------------------------------------------
/examples/kaniko-cache-warmer.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This is an example script to pull a number of images into the Kaniko cache
4 | # to have them ready for consumption by envbuilder.
5 | # Ref: https://github.com/coder/envbuilder/blob/main/README.md#image-caching
6 | KANIKO_CACHE_VOLUME=${KANIKO_CACHE_VOLUME:-"kanikocache"}
7 | IMAGES=(
8 | alpine:latest
9 | debian:latest
10 | ubuntu:latest
11 | )
12 |
13 | set -euo pipefail
14 |
15 | if ! docker volume inspect "${KANIKO_CACHE_VOLUME}" > /dev/null 2>&1; then
16 | echo "Kaniko cache volume does not exist; creating it."
17 | docker volume create "${KANIKO_CACHE_VOLUME}"
18 | fi
19 |
20 | for img in "${IMAGES[@]}"; do
21 | echo "Fetching image ${img} to kaniko cache ${KANIKO_CACHE_VOLUME}"
22 | docker run --rm \
23 | -v "${KANIKO_CACHE_VOLUME}:/cache" \
24 | gcr.io/kaniko-project/warmer:latest \
25 | --cache-dir=/cache \
26 | --image="${img}"
27 | done
28 |
--------------------------------------------------------------------------------
/init.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo hey there
4 | sleep 1
5 |
6 | echo INIT_COMMAND=/bin/sh >> "${ENVBUILDER_ENV}"
7 | echo INIT_ARGS="-c /bin/bash" >> "${ENVBUILDER_ENV}"
--------------------------------------------------------------------------------
/integration/testdata/blob-unknown/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:latest
2 |
3 | # This will produce an empty layer via Docker. It will allow us to test for a
4 | # conflicting empty layer produced by Kaniko. This is to check against the
5 | # BLOB_UNKNOWN error when trying to upload the built image to a registry and
6 | # Kaniko having overwritten this blob with its own.
7 | WORKDIR /home
8 |
--------------------------------------------------------------------------------
/internal/chmodfs/chmodfs.go:
--------------------------------------------------------------------------------
1 | package chmodfs
2 |
3 | import (
4 | "os"
5 |
6 | "github.com/go-git/go-billy/v5"
7 | )
8 |
9 | func New(fs billy.Filesystem) billy.Filesystem {
10 | return &osfsWithChmod{
11 | Filesystem: fs,
12 | }
13 | }
14 |
15 | type osfsWithChmod struct {
16 | billy.Filesystem
17 | }
18 |
19 | func (fs *osfsWithChmod) Chmod(name string, mode os.FileMode) error {
20 | return os.Chmod(name, mode)
21 | }
22 |
--------------------------------------------------------------------------------
/internal/ebutil/libs.go:
--------------------------------------------------------------------------------
1 | package ebutil
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 | "path/filepath"
8 | )
9 |
10 | // Container runtimes like NVIDIA mount individual libraries into the container
11 | // (e.g. `.so.`) and create symlinks for them
12 | // (e.g. `.so.1`). This code helps with finding the right library
13 | // directory for the target Linux distribution as well as locating the symlinks.
14 | //
15 | // Please see [#143 (comment)] for further details.
16 | //
17 | // [#143 (comment)]: https://github.com/coder/envbuilder/issues/143#issuecomment-2192405828
18 |
19 | // Based on https://github.com/NVIDIA/libnvidia-container/blob/v1.15.0/src/common.h#L29
20 | const usrLibDir = "/usr/lib64"
21 |
22 | const debianVersionFile = "/etc/debian_version"
23 |
24 | // libraryDirectoryPath returns the library directory. It returns a multiarch
25 | // directory if the distribution is Debian or a derivative.
26 | //
27 | // Based on https://github.com/NVIDIA/libnvidia-container/blob/v1.15.0/src/nvc_container.c#L152-L165
28 | func libraryDirectoryPath(m mounter) (string, error) {
29 | // Debian and its derivatives use a multiarch directory scheme.
30 | if _, err := m.Stat(debianVersionFile); err != nil && !errors.Is(err, os.ErrNotExist) {
31 | return "", fmt.Errorf("check if debian: %w", err)
32 | } else if err == nil {
33 | return usrLibMultiarchDir, nil
34 | }
35 |
36 | return usrLibDir, nil
37 | }
38 |
39 | // libraryDirectorySymlinks returns a mapping of each library (basename) with a
40 | // list of their symlinks (basename). Libraries with no symlinks do not appear
41 | // in the mapping.
42 | func libraryDirectorySymlinks(m mounter, libDir string) (map[string][]string, error) {
43 | des, err := m.ReadDir(libDir)
44 | if err != nil {
45 | return nil, fmt.Errorf("read directory %s: %w", libDir, err)
46 | }
47 |
48 | libsSymlinks := make(map[string][]string)
49 | for _, de := range des {
50 | if de.IsDir() {
51 | continue
52 | }
53 |
54 | if de.Type()&os.ModeSymlink != os.ModeSymlink {
55 | // Not a symlink. Skip.
56 | continue
57 | }
58 |
59 | symlink := filepath.Join(libDir, de.Name())
60 | path, err := m.EvalSymlinks(symlink)
61 | if err != nil {
62 | return nil, fmt.Errorf("eval symlink %s: %w", symlink, err)
63 | }
64 |
65 | path = filepath.Base(path)
66 | if _, ok := libsSymlinks[path]; !ok {
67 | libsSymlinks[path] = make([]string, 0, 1)
68 | }
69 |
70 | libsSymlinks[path] = append(libsSymlinks[path], de.Name())
71 | }
72 |
73 | return libsSymlinks, nil
74 | }
75 |
76 | // moveLibSymlinks moves a list of symlinks from source to destination directory.
77 | func moveLibSymlinks(m mounter, symlinks []string, srcDir, destDir string) error {
78 | for _, l := range symlinks {
79 | oldpath := filepath.Join(srcDir, l)
80 | newpath := filepath.Join(destDir, l)
81 | if err := m.Rename(oldpath, newpath); err != nil {
82 | return fmt.Errorf("move symlink %s => %s: %w", oldpath, newpath, err)
83 | }
84 | }
85 | return nil
86 | }
87 |
--------------------------------------------------------------------------------
/internal/ebutil/libs_amd64.go:
--------------------------------------------------------------------------------
1 | //go:build amd64
2 |
3 | package ebutil
4 |
5 | // Based on https://github.com/NVIDIA/libnvidia-container/blob/v1.15.0/src/common.h#L36
6 |
7 | const usrLibMultiarchDir = "/usr/lib/x86_64-linux-gnu"
8 |
--------------------------------------------------------------------------------
/internal/ebutil/libs_arm.go:
--------------------------------------------------------------------------------
1 | //go:build arm
2 |
3 | package ebutil
4 |
5 | // This constant is for 64-bit systems. 32-bit ARM is not supported.
6 | // If ever it becomes supported, it should be handled with a `usrLib32MultiarchDir` constant.
7 | const usrLibMultiarchDir = "/var/empty"
8 |
--------------------------------------------------------------------------------
/internal/ebutil/libs_arm64.go:
--------------------------------------------------------------------------------
1 | //go:build arm64
2 |
3 | package ebutil
4 |
5 | // Based on https://github.com/NVIDIA/libnvidia-container/blob/v1.15.0/src/common.h#L52
6 |
7 | const usrLibMultiarchDir = "/usr/lib/aarch64-linux-gnu"
8 |
--------------------------------------------------------------------------------
/internal/ebutil/mock_mounter_test.go:
--------------------------------------------------------------------------------
1 | // Code generated by MockGen. DO NOT EDIT.
2 | // Source: remount.go
3 | //
4 | // Generated by this command:
5 | //
6 | // mockgen -source=remount.go -package=ebutil -destination=mock_mounter_test.go -write_generate_directive
7 | //
8 |
9 | // Package ebutil is a generated GoMock package.
10 | package ebutil
11 |
12 | import (
13 | os "os"
14 | reflect "reflect"
15 |
16 | procfs "github.com/prometheus/procfs"
17 | gomock "go.uber.org/mock/gomock"
18 | )
19 |
20 | //go:generate mockgen -source=remount.go -package=ebutil -destination=mock_mounter_test.go -write_generate_directive
21 |
22 | // Mockmounter is a mock of mounter interface.
23 | type Mockmounter struct {
24 | ctrl *gomock.Controller
25 | recorder *MockmounterMockRecorder
26 | }
27 |
28 | // MockmounterMockRecorder is the mock recorder for Mockmounter.
29 | type MockmounterMockRecorder struct {
30 | mock *Mockmounter
31 | }
32 |
33 | // NewMockmounter creates a new mock instance.
34 | func NewMockmounter(ctrl *gomock.Controller) *Mockmounter {
35 | mock := &Mockmounter{ctrl: ctrl}
36 | mock.recorder = &MockmounterMockRecorder{mock}
37 | return mock
38 | }
39 |
40 | // EXPECT returns an object that allows the caller to indicate expected use.
41 | func (m *Mockmounter) EXPECT() *MockmounterMockRecorder {
42 | return m.recorder
43 | }
44 |
45 | // EvalSymlinks mocks base method.
46 | func (m *Mockmounter) EvalSymlinks(arg0 string) (string, error) {
47 | m.ctrl.T.Helper()
48 | ret := m.ctrl.Call(m, "EvalSymlinks", arg0)
49 | ret0, _ := ret[0].(string)
50 | ret1, _ := ret[1].(error)
51 | return ret0, ret1
52 | }
53 |
54 | // EvalSymlinks indicates an expected call of EvalSymlinks.
55 | func (mr *MockmounterMockRecorder) EvalSymlinks(arg0 any) *gomock.Call {
56 | mr.mock.ctrl.T.Helper()
57 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EvalSymlinks", reflect.TypeOf((*Mockmounter)(nil).EvalSymlinks), arg0)
58 | }
59 |
60 | // GetMounts mocks base method.
61 | func (m *Mockmounter) GetMounts() ([]*procfs.MountInfo, error) {
62 | m.ctrl.T.Helper()
63 | ret := m.ctrl.Call(m, "GetMounts")
64 | ret0, _ := ret[0].([]*procfs.MountInfo)
65 | ret1, _ := ret[1].(error)
66 | return ret0, ret1
67 | }
68 |
69 | // GetMounts indicates an expected call of GetMounts.
70 | func (mr *MockmounterMockRecorder) GetMounts() *gomock.Call {
71 | mr.mock.ctrl.T.Helper()
72 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMounts", reflect.TypeOf((*Mockmounter)(nil).GetMounts))
73 | }
74 |
75 | // MkdirAll mocks base method.
76 | func (m *Mockmounter) MkdirAll(arg0 string, arg1 os.FileMode) error {
77 | m.ctrl.T.Helper()
78 | ret := m.ctrl.Call(m, "MkdirAll", arg0, arg1)
79 | ret0, _ := ret[0].(error)
80 | return ret0
81 | }
82 |
83 | // MkdirAll indicates an expected call of MkdirAll.
84 | func (mr *MockmounterMockRecorder) MkdirAll(arg0, arg1 any) *gomock.Call {
85 | mr.mock.ctrl.T.Helper()
86 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MkdirAll", reflect.TypeOf((*Mockmounter)(nil).MkdirAll), arg0, arg1)
87 | }
88 |
89 | // Mount mocks base method.
90 | func (m *Mockmounter) Mount(arg0, arg1, arg2 string, arg3 uintptr, arg4 string) error {
91 | m.ctrl.T.Helper()
92 | ret := m.ctrl.Call(m, "Mount", arg0, arg1, arg2, arg3, arg4)
93 | ret0, _ := ret[0].(error)
94 | return ret0
95 | }
96 |
97 | // Mount indicates an expected call of Mount.
98 | func (mr *MockmounterMockRecorder) Mount(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call {
99 | mr.mock.ctrl.T.Helper()
100 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mount", reflect.TypeOf((*Mockmounter)(nil).Mount), arg0, arg1, arg2, arg3, arg4)
101 | }
102 |
103 | // OpenFile mocks base method.
104 | func (m *Mockmounter) OpenFile(arg0 string, arg1 int, arg2 os.FileMode) (*os.File, error) {
105 | m.ctrl.T.Helper()
106 | ret := m.ctrl.Call(m, "OpenFile", arg0, arg1, arg2)
107 | ret0, _ := ret[0].(*os.File)
108 | ret1, _ := ret[1].(error)
109 | return ret0, ret1
110 | }
111 |
112 | // OpenFile indicates an expected call of OpenFile.
113 | func (mr *MockmounterMockRecorder) OpenFile(arg0, arg1, arg2 any) *gomock.Call {
114 | mr.mock.ctrl.T.Helper()
115 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenFile", reflect.TypeOf((*Mockmounter)(nil).OpenFile), arg0, arg1, arg2)
116 | }
117 |
118 | // ReadDir mocks base method.
119 | func (m *Mockmounter) ReadDir(arg0 string) ([]os.DirEntry, error) {
120 | m.ctrl.T.Helper()
121 | ret := m.ctrl.Call(m, "ReadDir", arg0)
122 | ret0, _ := ret[0].([]os.DirEntry)
123 | ret1, _ := ret[1].(error)
124 | return ret0, ret1
125 | }
126 |
127 | // ReadDir indicates an expected call of ReadDir.
128 | func (mr *MockmounterMockRecorder) ReadDir(arg0 any) *gomock.Call {
129 | mr.mock.ctrl.T.Helper()
130 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDir", reflect.TypeOf((*Mockmounter)(nil).ReadDir), arg0)
131 | }
132 |
133 | // Rename mocks base method.
134 | func (m *Mockmounter) Rename(arg0, arg1 string) error {
135 | m.ctrl.T.Helper()
136 | ret := m.ctrl.Call(m, "Rename", arg0, arg1)
137 | ret0, _ := ret[0].(error)
138 | return ret0
139 | }
140 |
141 | // Rename indicates an expected call of Rename.
142 | func (mr *MockmounterMockRecorder) Rename(arg0, arg1 any) *gomock.Call {
143 | mr.mock.ctrl.T.Helper()
144 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Rename", reflect.TypeOf((*Mockmounter)(nil).Rename), arg0, arg1)
145 | }
146 |
147 | // Stat mocks base method.
148 | func (m *Mockmounter) Stat(arg0 string) (os.FileInfo, error) {
149 | m.ctrl.T.Helper()
150 | ret := m.ctrl.Call(m, "Stat", arg0)
151 | ret0, _ := ret[0].(os.FileInfo)
152 | ret1, _ := ret[1].(error)
153 | return ret0, ret1
154 | }
155 |
156 | // Stat indicates an expected call of Stat.
157 | func (mr *MockmounterMockRecorder) Stat(arg0 any) *gomock.Call {
158 | mr.mock.ctrl.T.Helper()
159 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*Mockmounter)(nil).Stat), arg0)
160 | }
161 |
162 | // Unmount mocks base method.
163 | func (m *Mockmounter) Unmount(arg0 string, arg1 int) error {
164 | m.ctrl.T.Helper()
165 | ret := m.ctrl.Call(m, "Unmount", arg0, arg1)
166 | ret0, _ := ret[0].(error)
167 | return ret0
168 | }
169 |
170 | // Unmount indicates an expected call of Unmount.
171 | func (mr *MockmounterMockRecorder) Unmount(arg0, arg1 any) *gomock.Call {
172 | mr.mock.ctrl.T.Helper()
173 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmount", reflect.TypeOf((*Mockmounter)(nil).Unmount), arg0, arg1)
174 | }
175 |
--------------------------------------------------------------------------------
/internal/ebutil/remount.go:
--------------------------------------------------------------------------------
1 | package ebutil
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 | "path/filepath"
8 | "strings"
9 | "sync"
10 | "syscall"
11 |
12 | "github.com/coder/envbuilder/log"
13 | "github.com/hashicorp/go-multierror"
14 | "github.com/prometheus/procfs"
15 | )
16 |
17 | // TempRemount iterates through all read-only mounted filesystems, bind-mounts them at dest,
18 | // and unmounts them from their original source. All mount points underneath ignorePrefixes
19 | // will not be touched.
20 | //
21 | // Some container runtimes such as sysbox-runc will mount in `/lib/modules` read-only.
22 | // See https://github.com/nestybox/sysbox/issues/564
23 | // This trips us up because:
24 | // 1. We call a Kaniko library function `util.DeleteFilesystem` that does exactly what it says
25 | // on the tin. If this hits a read-only volume mounted in, unhappiness is the result.
26 | // 2. After deleting the filesystem and building the image, we extract it to the filesystem.
27 | // If some paths mounted in via volume are present at that time, unhappiness is also likely
28 | // to result -- especially in case of read-only mounts.
29 | //
30 | // To work around this we move the mounts out of the way temporarily by bind-mounting them
31 | // while we do our thing, and move them back when we're done.
32 | //
33 | // It is the responsibility of the caller to call the returned function
34 | // to restore the original mount points. If an error is encountered while attempting to perform
35 | // the operation, calling the returned function will make a best-effort attempt to restore
36 | // the original state.
37 | func TempRemount(logf log.Func, dest string, ignorePrefixes ...string) (restore func() error, err error,
38 | ) {
39 | return tempRemount(&realMounter{}, logf, dest, ignorePrefixes...)
40 | }
41 |
42 | func tempRemount(m mounter, logf log.Func, base string, ignorePrefixes ...string) (restore func() error, err error) {
43 | mountInfos, err := m.GetMounts()
44 | if err != nil {
45 | return func() error { return nil }, fmt.Errorf("get mounts: %w", err)
46 | }
47 |
48 | libDir, err := libraryDirectoryPath(m)
49 | if err != nil {
50 | return func() error { return nil }, fmt.Errorf("get lib directory: %w", err)
51 | }
52 |
53 | libsSymlinks, err := libraryDirectorySymlinks(m, libDir)
54 | if err != nil && !errors.Is(err, os.ErrNotExist) {
55 | return func() error { return nil }, fmt.Errorf("read lib symlinks: %w", err)
56 | }
57 |
58 | // temp move of all ro mounts
59 | mounts := map[string]string{}
60 | var restoreOnce sync.Once
61 | var merr error
62 | // closer to attempt to restore original mount points
63 | restore = func() error {
64 | restoreOnce.Do(func() {
65 | if len(mounts) == 0 {
66 | return
67 | }
68 |
69 | newLibDir, err := libraryDirectoryPath(m)
70 | if err != nil {
71 | merr = multierror.Append(merr, fmt.Errorf("get new lib directory: %w", err))
72 | return
73 | }
74 |
75 | for orig, moved := range mounts {
76 | logf(log.LevelDebug, "restore mount %s", orig)
77 | if err := remount(m, moved, orig, newLibDir, libsSymlinks); err != nil {
78 | merr = multierror.Append(merr, fmt.Errorf("restore mount: %w", err))
79 | }
80 | }
81 | })
82 | return merr
83 | }
84 |
85 | outer:
86 | for _, mountInfo := range mountInfos {
87 | // TODO: do this for all mounts
88 | if _, ok := mountInfo.Options["ro"]; !ok {
89 | logf(log.LevelDebug, "skip rw mount %s", mountInfo.MountPoint)
90 | continue
91 | }
92 |
93 | for _, prefix := range ignorePrefixes {
94 | if strings.HasPrefix(mountInfo.MountPoint, prefix) {
95 | logf(log.LevelDebug, "skip mount %s under ignored prefix %s", mountInfo.MountPoint, prefix)
96 | continue outer
97 | }
98 | }
99 |
100 | src := mountInfo.MountPoint
101 | dest := filepath.Join(base, src)
102 | logf(log.LevelDebug, "temp remount %s", src)
103 | if err := remount(m, src, dest, libDir, libsSymlinks); err != nil {
104 | return restore, fmt.Errorf("temp remount: %w", err)
105 | }
106 |
107 | mounts[src] = dest
108 | }
109 |
110 | return restore, nil
111 | }
112 |
113 | func remount(m mounter, src, dest, libDir string, libsSymlinks map[string][]string) error {
114 | stat, err := m.Stat(src)
115 | if err != nil {
116 | return fmt.Errorf("stat %s: %w", src, err)
117 | }
118 |
119 | var destDir string
120 | if stat.IsDir() {
121 | destDir = dest
122 | } else {
123 | destDir = filepath.Dir(dest)
124 | if destDir == usrLibDir || destDir == usrLibMultiarchDir {
125 | // Restore mount to libDir
126 | destDir = libDir
127 | dest = filepath.Join(destDir, stat.Name())
128 | }
129 | }
130 |
131 | if err := m.MkdirAll(destDir, 0o750); err != nil {
132 | return fmt.Errorf("ensure path: %w", err)
133 | }
134 |
135 | if !stat.IsDir() {
136 | f, err := m.OpenFile(dest, os.O_CREATE, 0o640)
137 | if err != nil {
138 | return fmt.Errorf("ensure file path: %w", err)
139 | }
140 | // This ensure the file is created, it will not be used. It can be closed immediately.
141 | f.Close()
142 |
143 | if symlinks, ok := libsSymlinks[stat.Name()]; ok {
144 | srcDir := filepath.Dir(src)
145 | if err := moveLibSymlinks(m, symlinks, srcDir, destDir); err != nil {
146 | return err
147 | }
148 | }
149 | }
150 |
151 | if err := m.Mount(src, dest, "bind", syscall.MS_BIND, ""); err != nil {
152 | return fmt.Errorf("bind mount %s => %s: %w", src, dest, err)
153 | }
154 |
155 | if err := m.Unmount(src, 0); err != nil {
156 | return fmt.Errorf("unmount orig src %s: %w", src, err)
157 | }
158 | return nil
159 | }
160 |
161 | // mounter is an interface to system-level calls used by TempRemount.
162 | type mounter interface {
163 | // GetMounts wraps procfs.GetMounts
164 | GetMounts() ([]*procfs.MountInfo, error)
165 | // Stat wraps os.Stat
166 | Stat(string) (os.FileInfo, error)
167 | // MkdirAll wraps os.MkdirAll
168 | MkdirAll(string, os.FileMode) error
169 | // OpenFile wraps os.OpenFile
170 | OpenFile(string, int, os.FileMode) (*os.File, error)
171 | // Mount wraps syscall.Mount
172 | Mount(string, string, string, uintptr, string) error
173 | // Unmount wraps syscall.Unmount
174 | Unmount(string, int) error
175 | // ReadDir wraps os.ReadDir
176 | ReadDir(string) ([]os.DirEntry, error)
177 | // EvalSymlinks wraps filepath.EvalSymlinks
178 | EvalSymlinks(string) (string, error)
179 | // Rename wraps os.Rename
180 | Rename(string, string) error
181 | }
182 |
183 | // realMounter implements mounter and actually does the thing.
184 | type realMounter struct{}
185 |
186 | var _ mounter = &realMounter{}
187 |
188 | func (m *realMounter) Mount(src string, dest string, fstype string, flags uintptr, data string) error {
189 | return syscall.Mount(src, dest, fstype, flags, data)
190 | }
191 |
192 | func (m *realMounter) Unmount(tgt string, flags int) error {
193 | return syscall.Unmount(tgt, flags)
194 | }
195 |
196 | func (m *realMounter) GetMounts() ([]*procfs.MountInfo, error) {
197 | return procfs.GetMounts()
198 | }
199 |
200 | func (m *realMounter) MkdirAll(path string, perm os.FileMode) error {
201 | return os.MkdirAll(path, perm)
202 | }
203 |
204 | func (m *realMounter) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
205 | return os.OpenFile(name, flag, perm)
206 | }
207 |
208 | func (m *realMounter) Stat(path string) (os.FileInfo, error) {
209 | return os.Stat(path)
210 | }
211 |
212 | func (m *realMounter) ReadDir(name string) ([]os.DirEntry, error) {
213 | return os.ReadDir(name)
214 | }
215 |
216 | func (m *realMounter) EvalSymlinks(path string) (string, error) {
217 | return filepath.EvalSymlinks(path)
218 | }
219 |
220 | func (m *realMounter) Rename(oldpath, newpath string) error {
221 | return os.Rename(oldpath, newpath)
222 | }
223 |
--------------------------------------------------------------------------------
/internal/workingdir/workingdir.go:
--------------------------------------------------------------------------------
1 | package workingdir
2 |
3 | import (
4 | "fmt"
5 | "path/filepath"
6 | )
7 |
8 | const (
9 | // defaultWorkingDirBase is the default working location for envbuilder.
10 | // This is a special directory that must not be modified by the user
11 | // or images. This is intentionally unexported.
12 | defaultWorkingDirBase = "/.envbuilder"
13 |
14 | // TempDir is a directory inside the build context inside which
15 | // we place files referenced by MagicDirectives.
16 | TempDir = ".envbuilder.tmp"
17 | )
18 |
19 | var (
20 | // Default is the default working directory for Envbuilder.
21 | // This defaults to /.envbuilder. It should only be used when Envbuilder
22 | // is known to be running as root inside a container.
23 | Default WorkingDir
24 | // Directives are directives automatically appended to Dockerfiles
25 | // when pushing the image. These directives allow the built image to be
26 | // 're-used'.
27 | Directives = fmt.Sprintf(`
28 | COPY --chmod=0755 %[1]s/envbuilder %[2]s/bin/envbuilder
29 | COPY --chmod=0644 %[1]s/image %[2]s/image
30 | USER root
31 | WORKDIR /
32 | ENTRYPOINT ["%[2]s/bin/envbuilder"]
33 | `, TempDir, defaultWorkingDirBase)
34 | )
35 |
36 | // WorkingDir is a working directory for envbuilder. It
37 | // will also be present in images built by envbuilder.
38 | type WorkingDir struct {
39 | base string
40 | }
41 |
42 | // At returns a WorkingDir rooted at filepath.Join(paths...)
43 | func At(paths ...string) WorkingDir {
44 | if len(paths) == 0 {
45 | return WorkingDir{}
46 | }
47 | return WorkingDir{base: filepath.Join(paths...)}
48 | }
49 |
50 | // Join returns the result of filepath.Join([m.Path, paths...]).
51 | func (m WorkingDir) Join(paths ...string) string {
52 | return filepath.Join(append([]string{m.Path()}, paths...)...)
53 | }
54 |
55 | // String returns the string representation of the WorkingDir.
56 | func (m WorkingDir) Path() string {
57 | // Instead of the zero value, use defaultWorkingDir.
58 | if m.base == "" {
59 | return defaultWorkingDirBase
60 | }
61 | return m.base
62 | }
63 |
64 | // Built is a file that is created in the workspace
65 | // when envbuilder has already been run. This is used
66 | // to skip building when a container is restarting.
67 | // e.g. docker stop -> docker start
68 | func (m WorkingDir) Built() string {
69 | return m.Join("built")
70 | }
71 |
72 | // Image is a file that is created in the image when
73 | // envbuilder has already been run. This is used to skip
74 | // the destructive initial build step when 'resuming' envbuilder
75 | // from a previously built image.
76 | func (m WorkingDir) Image() string {
77 | return m.Join("image")
78 | }
79 |
80 | // Features is a directory that contains feature files.
81 | func (m WorkingDir) Features() string {
82 | return m.Join("features")
83 | }
84 |
--------------------------------------------------------------------------------
/internal/workingdir/workingdir_internal_test.go:
--------------------------------------------------------------------------------
1 | package workingdir
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/require"
7 | )
8 |
9 | func Test_WorkingDir(t *testing.T) {
10 | t.Parallel()
11 |
12 | t.Run("Default", func(t *testing.T) {
13 | t.Parallel()
14 | require.Equal(t, defaultWorkingDirBase+"/foo", Default.Join("foo"))
15 | require.Equal(t, defaultWorkingDirBase, Default.Path())
16 | require.Equal(t, defaultWorkingDirBase+"/built", Default.Built())
17 | require.Equal(t, defaultWorkingDirBase+"/image", Default.Image())
18 | })
19 |
20 | t.Run("ZeroValue", func(t *testing.T) {
21 | t.Parallel()
22 | var md WorkingDir
23 | require.Equal(t, defaultWorkingDirBase+"/foo", md.Join("foo"))
24 | require.Equal(t, defaultWorkingDirBase, md.Path())
25 | require.Equal(t, defaultWorkingDirBase+"/built", md.Built())
26 | require.Equal(t, defaultWorkingDirBase+"/image", md.Image())
27 | })
28 |
29 | t.Run("At", func(t *testing.T) {
30 | t.Parallel()
31 | tmpDir := t.TempDir()
32 | md := At(tmpDir)
33 | require.Equal(t, tmpDir+"/foo", md.Join("foo"))
34 | require.Equal(t, tmpDir, md.Path())
35 | require.Equal(t, tmpDir+"/built", md.Built())
36 | require.Equal(t, tmpDir+"/image", md.Image())
37 | })
38 | }
39 |
--------------------------------------------------------------------------------
/log/coder.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "net/url"
8 | "os"
9 | "sync"
10 | "time"
11 |
12 | "cdr.dev/slog"
13 | "cdr.dev/slog/sloggers/sloghuman"
14 | "github.com/coder/coder/v2/agent/proto"
15 | "github.com/coder/coder/v2/codersdk"
16 | "github.com/coder/coder/v2/codersdk/agentsdk"
17 | "github.com/coder/retry"
18 | "github.com/google/uuid"
19 | "golang.org/x/mod/semver"
20 | )
21 |
22 | var (
23 | // We set a relatively high connection timeout for the initial connection.
24 | // There is an unfortunate race between the envbuilder container starting and the
25 | // associated provisioner job completing.
26 | rpcConnectTimeout = 30 * time.Second
27 | logSendGracePeriod = 10 * time.Second
28 | minAgentAPIV2 = "v2.9"
29 | )
30 |
31 | // Coder establishes a connection to the Coder instance located at coderURL and
32 | // authenticates using token. It then establishes a dRPC connection to the Agent
33 | // API and begins sending logs. If the version of Coder does not support the
34 | // Agent API, it will fall back to using the PatchLogs endpoint. The closer is
35 | // used to close the logger and to wait at most logSendGracePeriod for logs to
36 | // be sent. Cancelling the context will close the logs immediately without
37 | // waiting for logs to be sent.
38 | func Coder(ctx context.Context, coderURL *url.URL, token string) (logger Func, closer func(), err error) {
39 | // To troubleshoot issues, we need some way of logging.
40 | metaLogger := slog.Make(sloghuman.Sink(os.Stderr))
41 | defer metaLogger.Sync()
42 | client := initClient(coderURL, token)
43 | bi, err := client.SDK.BuildInfo(ctx)
44 | if err != nil {
45 | return nil, nil, fmt.Errorf("get coder build version: %w", err)
46 | }
47 | if semver.Compare(semver.MajorMinor(bi.Version), minAgentAPIV2) < 0 {
48 | metaLogger.Warn(ctx, "Detected Coder version incompatible with AgentAPI v2, falling back to deprecated API", slog.F("coder_version", bi.Version))
49 | logger, closer = sendLogsV1(ctx, client, metaLogger.Named("send_logs_v1"))
50 | return logger, closer, nil
51 | }
52 |
53 | // Create a new context so we can ensure the connection is torn down.
54 | ctx, cancel := context.WithCancel(ctx)
55 | defer func() {
56 | if err != nil {
57 | cancel()
58 | }
59 | }()
60 | // Note that ctx passed to initRPC will be inherited by the
61 | // underlying connection, nothing we can do about that here.
62 | dac, err := initRPC(ctx, client, metaLogger.Named("init_rpc"))
63 | if err != nil {
64 | // Logged externally
65 | return nil, nil, fmt.Errorf("init coder rpc client: %w", err)
66 | }
67 | ls := agentsdk.NewLogSender(metaLogger.Named("coder_log_sender"))
68 | metaLogger.Warn(ctx, "Sending logs via AgentAPI v2", slog.F("coder_version", bi.Version))
69 | logger, loggerCloser := sendLogsV2(ctx, dac, ls, metaLogger.Named("send_logs_v2"))
70 | var closeOnce sync.Once
71 | closer = func() {
72 | loggerCloser()
73 |
74 | closeOnce.Do(func() {
75 | // Typically cancel would be after Close, but we want to be
76 | // sure there's nothing that might block on Close.
77 | cancel()
78 | _ = dac.DRPCConn().Close()
79 | })
80 | }
81 | return logger, closer, nil
82 | }
83 |
84 | type coderLogSender interface {
85 | Enqueue(uuid.UUID, ...agentsdk.Log)
86 | SendLoop(context.Context, agentsdk.LogDest) error
87 | Flush(uuid.UUID)
88 | WaitUntilEmpty(context.Context) error
89 | }
90 |
91 | func initClient(coderURL *url.URL, token string) *agentsdk.Client {
92 | client := agentsdk.New(coderURL)
93 | client.SetSessionToken(token)
94 | return client
95 | }
96 |
97 | func initRPC(ctx context.Context, client *agentsdk.Client, l slog.Logger) (proto.DRPCAgentClient20, error) {
98 | var c proto.DRPCAgentClient20
99 | var err error
100 | retryCtx, retryCancel := context.WithTimeout(ctx, rpcConnectTimeout)
101 | defer retryCancel()
102 | attempts := 0
103 | for r := retry.New(100*time.Millisecond, time.Second); r.Wait(retryCtx); {
104 | attempts++
105 | // Maximize compatibility.
106 | c, err = client.ConnectRPC20(ctx)
107 | if err != nil {
108 | l.Debug(ctx, "Failed to connect to Coder", slog.F("error", err), slog.F("attempt", attempts))
109 | continue
110 | }
111 | break
112 | }
113 | if c == nil {
114 | return nil, err
115 | }
116 | return proto.NewDRPCAgentClient(c.DRPCConn()), nil
117 | }
118 |
119 | // sendLogsV1 uses the PatchLogs endpoint to send logs.
120 | // This is deprecated, but required for backward compatibility with older versions of Coder.
121 | func sendLogsV1(ctx context.Context, client *agentsdk.Client, l slog.Logger) (logger Func, closer func()) {
122 | // nolint: staticcheck // required for backwards compatibility
123 | sendLog, flushAndClose := agentsdk.LogsSender(agentsdk.ExternalLogSourceID, client.PatchLogs, slog.Logger{})
124 | var mu sync.Mutex
125 | return func(lvl Level, msg string, args ...any) {
126 | log := agentsdk.Log{
127 | CreatedAt: time.Now(),
128 | Output: fmt.Sprintf(msg, args...),
129 | Level: codersdk.LogLevel(lvl),
130 | }
131 | mu.Lock()
132 | defer mu.Unlock()
133 | if err := sendLog(ctx, log); err != nil {
134 | l.Warn(ctx, "failed to send logs to Coder", slog.Error(err))
135 | }
136 | }, func() {
137 | ctx, cancel := context.WithTimeout(ctx, logSendGracePeriod)
138 | defer cancel()
139 | if err := flushAndClose(ctx); err != nil {
140 | l.Warn(ctx, "failed to flush logs", slog.Error(err))
141 | }
142 | }
143 | }
144 |
145 | // sendLogsV2 uses the v2 agent API to send logs. Only compatibile with coder versions >= 2.9.
146 | func sendLogsV2(ctx context.Context, dest agentsdk.LogDest, ls coderLogSender, l slog.Logger) (logger Func, closer func()) {
147 | sendCtx, sendCancel := context.WithCancel(ctx)
148 | done := make(chan struct{})
149 | uid := uuid.New()
150 | go func() {
151 | defer close(done)
152 | if err := ls.SendLoop(sendCtx, dest); err != nil {
153 | if !errors.Is(err, context.Canceled) {
154 | l.Warn(ctx, "failed to send logs to Coder", slog.Error(err))
155 | }
156 | }
157 | }()
158 |
159 | var closeOnce sync.Once
160 | return func(l Level, msg string, args ...any) {
161 | ls.Enqueue(uid, agentsdk.Log{
162 | CreatedAt: time.Now(),
163 | Output: fmt.Sprintf(msg, args...),
164 | Level: codersdk.LogLevel(l),
165 | })
166 | }, func() {
167 | closeOnce.Do(func() {
168 | // Trigger a flush and wait for logs to be sent.
169 | ls.Flush(uid)
170 | ctx, cancel := context.WithTimeout(ctx, logSendGracePeriod)
171 | defer cancel()
172 | err := ls.WaitUntilEmpty(ctx)
173 | if err != nil {
174 | l.Warn(ctx, "log sender did not empty", slog.Error(err))
175 | }
176 |
177 | // Stop the send loop.
178 | sendCancel()
179 | })
180 |
181 | // Wait for the send loop to finish.
182 | <-done
183 | }
184 | }
185 |
--------------------------------------------------------------------------------
/log/coder_internal_test.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "math/rand"
8 | "net/http"
9 | "net/http/httptest"
10 | "net/url"
11 | "sync"
12 | "testing"
13 | "time"
14 |
15 | "cdr.dev/slog/sloggers/slogtest"
16 | "github.com/coder/coder/v2/agent/proto"
17 | "github.com/coder/coder/v2/codersdk"
18 | "github.com/coder/coder/v2/codersdk/agentsdk"
19 | "github.com/google/uuid"
20 | "github.com/stretchr/testify/assert"
21 | "github.com/stretchr/testify/require"
22 | )
23 |
24 | func TestCoder(t *testing.T) {
25 | t.Parallel()
26 |
27 | t.Run("V1/OK", func(t *testing.T) {
28 | t.Parallel()
29 |
30 | token := uuid.NewString()
31 | gotLogs := make(chan struct{})
32 | var closeOnce sync.Once
33 | handler := func(w http.ResponseWriter, r *http.Request) {
34 | if r.URL.Path == "/api/v2/buildinfo" {
35 | w.Header().Set("Content-Type", "application/json")
36 | _, _ = w.Write([]byte(`{"version": "v2.8.9"}`))
37 | return
38 | }
39 | defer closeOnce.Do(func() { close(gotLogs) })
40 | tokHdr := r.Header.Get(codersdk.SessionTokenHeader)
41 | assert.Equal(t, token, tokHdr)
42 | req, ok := decodeV1Logs(t, w, r)
43 | if !ok {
44 | return
45 | }
46 | if assert.Len(t, req.Logs, 1) {
47 | assert.Equal(t, "hello world", req.Logs[0].Output)
48 | assert.Equal(t, codersdk.LogLevelInfo, req.Logs[0].Level)
49 | }
50 | }
51 | srv := httptest.NewServer(http.HandlerFunc(handler))
52 | defer srv.Close()
53 |
54 | ctx, cancel := context.WithCancel(context.Background())
55 | defer cancel()
56 |
57 | logger, _ := newCoderLogger(ctx, t, srv.URL, token)
58 | logger(LevelInfo, "hello %s", "world")
59 | <-gotLogs
60 | })
61 |
62 | t.Run("V1/Close", func(t *testing.T) {
63 | t.Parallel()
64 |
65 | var got []agentsdk.Log
66 | handler := func(w http.ResponseWriter, r *http.Request) {
67 | if r.URL.Path == "/api/v2/buildinfo" {
68 | w.Header().Set("Content-Type", "application/json")
69 | _, _ = w.Write([]byte(`{"version": "v2.8.9"}`))
70 | return
71 | }
72 | req, ok := decodeV1Logs(t, w, r)
73 | if !ok {
74 | return
75 | }
76 | got = append(got, req.Logs...)
77 | }
78 | srv := httptest.NewServer(http.HandlerFunc(handler))
79 | defer srv.Close()
80 |
81 | ctx, cancel := context.WithCancel(context.Background())
82 | defer cancel()
83 |
84 | logger, closer := newCoderLogger(ctx, t, srv.URL, uuid.NewString())
85 | logger(LevelInfo, "1")
86 | logger(LevelInfo, "2")
87 | closer()
88 | logger(LevelInfo, "3")
89 | require.Len(t, got, 2)
90 | assert.Equal(t, "1", got[0].Output)
91 | assert.Equal(t, "2", got[1].Output)
92 | })
93 |
94 | t.Run("V1/ErrUnauthorized", func(t *testing.T) {
95 | t.Parallel()
96 |
97 | token := uuid.NewString()
98 | authFailed := make(chan struct{})
99 | var closeOnce sync.Once
100 | handler := func(w http.ResponseWriter, r *http.Request) {
101 | if r.URL.Path == "/api/v2/buildinfo" {
102 | w.Header().Set("Content-Type", "application/json")
103 | _, _ = w.Write([]byte(`{"version": "v2.8.9"}`))
104 | return
105 | }
106 | defer closeOnce.Do(func() { close(authFailed) })
107 | w.WriteHeader(http.StatusUnauthorized)
108 | }
109 | srv := httptest.NewServer(http.HandlerFunc(handler))
110 | defer srv.Close()
111 |
112 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
113 | defer cancel()
114 | u, err := url.Parse(srv.URL)
115 | require.NoError(t, err)
116 | log, _, err := Coder(ctx, u, token)
117 | require.NoError(t, err)
118 | // defer closeLog()
119 | log(LevelInfo, "hello %s", "world")
120 | <-authFailed
121 | })
122 |
123 | t.Run("V1/ErrNotCoder", func(t *testing.T) {
124 | t.Parallel()
125 |
126 | token := uuid.NewString()
127 | handlerCalled := make(chan struct{})
128 | var closeOnce sync.Once
129 | handler := func(w http.ResponseWriter, r *http.Request) {
130 | defer closeOnce.Do(func() { close(handlerCalled) })
131 | _, _ = fmt.Fprintf(w, `hello world`)
132 | }
133 | srv := httptest.NewServer(http.HandlerFunc(handler))
134 | defer srv.Close()
135 |
136 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
137 | defer cancel()
138 | u, err := url.Parse(srv.URL)
139 | require.NoError(t, err)
140 | _, _, err = Coder(ctx, u, token)
141 | require.ErrorContains(t, err, "get coder build version")
142 | require.ErrorContains(t, err, "unexpected non-JSON response")
143 | <-handlerCalled
144 | })
145 |
146 | // In this test, we just fake out the DRPC server.
147 | t.Run("V2/OK", func(t *testing.T) {
148 | t.Parallel()
149 |
150 | ctx, cancel := context.WithCancel(context.Background())
151 | defer cancel()
152 |
153 | ld := &fakeLogDest{t: t}
154 | ls := agentsdk.NewLogSender(slogtest.Make(t, nil))
155 | logFunc, logsDone := sendLogsV2(ctx, ld, ls, slogtest.Make(t, nil))
156 | defer logsDone()
157 |
158 | // Send some logs
159 | for i := 0; i < 10; i++ {
160 | logFunc(LevelInfo, "info log %d", i+1)
161 | }
162 |
163 | // Cancel and wait for flush
164 | cancel()
165 | t.Logf("cancelled")
166 | logsDone()
167 |
168 | require.Len(t, ld.logs, 10)
169 | })
170 |
171 | // In this test, we just fake out the DRPC server.
172 | t.Run("V2/Close", func(t *testing.T) {
173 | t.Parallel()
174 |
175 | ctx, cancel := context.WithCancel(context.Background())
176 | defer cancel()
177 |
178 | ld := &fakeLogDest{t: t}
179 | ls := agentsdk.NewLogSender(slogtest.Make(t, nil))
180 | logger, closer := sendLogsV2(ctx, ld, ls, slogtest.Make(t, nil))
181 | defer closer()
182 |
183 | logger(LevelInfo, "1")
184 | logger(LevelInfo, "2")
185 | closer()
186 | logger(LevelInfo, "3")
187 |
188 | require.Len(t, ld.logs, 2)
189 | })
190 |
191 | // In this test, we validate that a 401 error on the initial connect
192 | // results in a retry. When envbuilder initially attempts to connect
193 | // using the Coder agent token, the workspace build may not yet have
194 | // completed.
195 | t.Run("V2/Retry", func(t *testing.T) {
196 | t.Parallel()
197 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
198 | defer cancel()
199 |
200 | token := uuid.NewString()
201 | done := make(chan struct{})
202 | handlerSend := make(chan int)
203 | handler := func(w http.ResponseWriter, r *http.Request) {
204 | t.Logf("test handler: %s", r.URL.Path)
205 | if r.URL.Path == "/api/v2/buildinfo" {
206 | w.Header().Set("Content-Type", "application/json")
207 | _, _ = w.Write([]byte(`{"version": "v2.9.0"}`))
208 | return
209 | }
210 | code := <-handlerSend
211 | t.Logf("test handler response: %d", code)
212 | w.WriteHeader(code)
213 | }
214 | srv := httptest.NewServer(http.HandlerFunc(handler))
215 | defer srv.Close()
216 |
217 | u, err := url.Parse(srv.URL)
218 | require.NoError(t, err)
219 | var connectError error
220 | go func() {
221 | defer close(handlerSend)
222 | defer close(done)
223 | _, _, connectError = Coder(ctx, u, token)
224 | }()
225 |
226 | // Initial: unauthorized
227 | handlerSend <- http.StatusUnauthorized
228 | // 2nd try: still unauthorized
229 | handlerSend <- http.StatusUnauthorized
230 | // 3rd try: authorized
231 | handlerSend <- http.StatusOK
232 |
233 | cancel()
234 |
235 | <-done
236 | require.ErrorContains(t, connectError, "failed to WebSocket dial")
237 | require.ErrorIs(t, connectError, context.Canceled)
238 | })
239 | }
240 |
241 | //nolint:paralleltest // We need to replace a global timeout.
242 | func TestCoderRPCTimeout(t *testing.T) {
243 | // This timeout is picked with the current subtests in mind, it
244 | // should not be changed without good reason.
245 | testReplaceTimeout(t, &rpcConnectTimeout, 500*time.Millisecond)
246 |
247 | // In this test, we just stand up an endpoint that does not
248 | // do dRPC. We'll try to connect, fail to websocket upgrade
249 | // and eventually give up after rpcConnectTimeout.
250 | t.Run("V2/Err", func(t *testing.T) {
251 | t.Parallel()
252 |
253 | token := uuid.NewString()
254 | handlerDone := make(chan struct{})
255 | handlerWait := make(chan struct{})
256 | var closeOnce sync.Once
257 | handler := func(w http.ResponseWriter, r *http.Request) {
258 | if r.URL.Path == "/api/v2/buildinfo" {
259 | w.Header().Set("Content-Type", "application/json")
260 | _, _ = w.Write([]byte(`{"version": "v2.9.0"}`))
261 | return
262 | }
263 | defer closeOnce.Do(func() { close(handlerDone) })
264 | <-handlerWait
265 | w.WriteHeader(http.StatusOK)
266 | }
267 | srv := httptest.NewServer(http.HandlerFunc(handler))
268 | defer srv.Close()
269 |
270 | ctx, cancel := context.WithTimeout(context.Background(), rpcConnectTimeout/2)
271 | defer cancel()
272 | u, err := url.Parse(srv.URL)
273 | require.NoError(t, err)
274 | _, _, err = Coder(ctx, u, token)
275 | require.ErrorContains(t, err, "failed to WebSocket dial")
276 | require.ErrorIs(t, err, context.DeadlineExceeded)
277 | close(handlerWait)
278 | <-handlerDone
279 | })
280 |
281 | t.Run("V2/Timeout", func(t *testing.T) {
282 | t.Parallel()
283 |
284 | token := uuid.NewString()
285 | handlerDone := make(chan struct{})
286 | handlerWait := make(chan struct{})
287 | var closeOnce sync.Once
288 | handler := func(w http.ResponseWriter, r *http.Request) {
289 | if r.URL.Path == "/api/v2/buildinfo" {
290 | w.Header().Set("Content-Type", "application/json")
291 | _, _ = w.Write([]byte(`{"version": "v2.9.0"}`))
292 | return
293 | }
294 | defer closeOnce.Do(func() { close(handlerDone) })
295 | <-handlerWait
296 | w.WriteHeader(http.StatusOK)
297 | }
298 | srv := httptest.NewServer(http.HandlerFunc(handler))
299 | defer srv.Close()
300 |
301 | ctx, cancel := context.WithTimeout(context.Background(), rpcConnectTimeout*2)
302 | defer cancel()
303 | u, err := url.Parse(srv.URL)
304 | require.NoError(t, err)
305 | _, _, err = Coder(ctx, u, token)
306 | require.ErrorContains(t, err, "failed to WebSocket dial")
307 | require.ErrorIs(t, err, context.DeadlineExceeded)
308 | close(handlerWait)
309 | <-handlerDone
310 | })
311 | }
312 |
313 | func decodeV1Logs(t *testing.T, w http.ResponseWriter, r *http.Request) (agentsdk.PatchLogs, bool) {
314 | t.Helper()
315 | var req agentsdk.PatchLogs
316 | err := json.NewDecoder(r.Body).Decode(&req)
317 | if !assert.NoError(t, err) {
318 | http.Error(w, err.Error(), http.StatusBadRequest)
319 | return req, false
320 | }
321 | return req, true
322 | }
323 |
324 | func newCoderLogger(ctx context.Context, t *testing.T, us string, token string) (Func, func()) {
325 | t.Helper()
326 | u, err := url.Parse(us)
327 | require.NoError(t, err)
328 | logger, closer, err := Coder(ctx, u, token)
329 | require.NoError(t, err)
330 | t.Cleanup(closer)
331 | return logger, closer
332 | }
333 |
334 | type fakeLogDest struct {
335 | t testing.TB
336 | logs []*proto.Log
337 | }
338 |
339 | func (d *fakeLogDest) BatchCreateLogs(ctx context.Context, request *proto.BatchCreateLogsRequest) (*proto.BatchCreateLogsResponse, error) {
340 | d.t.Logf("got %d logs, ", len(request.Logs))
341 | d.logs = append(d.logs, request.Logs...)
342 | return &proto.BatchCreateLogsResponse{}, nil
343 | }
344 |
345 | func testReplaceTimeout(t *testing.T, v *time.Duration, d time.Duration) {
346 | t.Helper()
347 | if isParallel(t) {
348 | t.Fatal("cannot replace timeout in parallel test")
349 | }
350 | old := *v
351 | *v = d
352 | t.Cleanup(func() { *v = old })
353 | }
354 |
355 | func isParallel(t *testing.T) (ret bool) {
356 | t.Helper()
357 | // This is a hack to determine if the test is running in parallel
358 | // via property of t.Setenv.
359 | defer func() {
360 | if r := recover(); r != nil {
361 | ret = true
362 | }
363 | }()
364 | // Random variable name to avoid collisions.
365 | t.Setenv(fmt.Sprintf("__TEST_CHECK_IS_PARALLEL_%d", rand.Int()), "1")
366 | return false
367 | }
368 |
--------------------------------------------------------------------------------
/log/log.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "io"
7 | "strings"
8 |
9 | "github.com/coder/coder/v2/codersdk"
10 | )
11 |
12 | type Func func(l Level, msg string, args ...any)
13 |
14 | type Level string
15 |
16 | // Below constants are the same as their codersdk equivalents.
17 | const (
18 | LevelTrace = Level(codersdk.LogLevelTrace)
19 | LevelDebug = Level(codersdk.LogLevelDebug)
20 | LevelInfo = Level(codersdk.LogLevelInfo)
21 | LevelWarn = Level(codersdk.LogLevelWarn)
22 | LevelError = Level(codersdk.LogLevelError)
23 | )
24 |
25 | // New logs to the provided io.Writer.
26 | func New(w io.Writer, verbose bool) Func {
27 | return func(l Level, msg string, args ...any) {
28 | if !verbose {
29 | switch l {
30 | case LevelDebug, LevelTrace:
31 | return
32 | }
33 | }
34 | _, _ = fmt.Fprintf(w, msg, args...)
35 | if !strings.HasSuffix(msg, "\n") {
36 | _, _ = fmt.Fprintf(w, "\n")
37 | }
38 | }
39 | }
40 |
41 | // Wrap wraps the provided LogFuncs into a single Func.
42 | func Wrap(fs ...Func) Func {
43 | return func(l Level, msg string, args ...any) {
44 | for _, f := range fs {
45 | f(l, msg, args...)
46 | }
47 | }
48 | }
49 |
50 | // Writer returns an io.Writer that logs all writes in a separate goroutine.
51 | // It is the responsibility of the caller to call the returned
52 | // function to stop the goroutine.
53 | func Writer(logf Func) (io.Writer, func()) {
54 | pipeReader, pipeWriter := io.Pipe()
55 | doneCh := make(chan struct{})
56 | go func() {
57 | defer pipeWriter.Close()
58 | defer pipeReader.Close()
59 | scanner := bufio.NewScanner(pipeReader)
60 | for {
61 | select {
62 | case <-doneCh:
63 | return
64 | default:
65 | if !scanner.Scan() {
66 | return
67 | }
68 | logf(LevelInfo, "%s", scanner.Text())
69 | }
70 | }
71 | }()
72 | closer := func() {
73 | close(doneCh)
74 | }
75 | return pipeWriter, closer
76 | }
77 |
--------------------------------------------------------------------------------
/log/log_test.go:
--------------------------------------------------------------------------------
1 | package log_test
2 |
3 | import (
4 | "strings"
5 | "testing"
6 |
7 | "github.com/coder/envbuilder/log"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func Test_Verbose(t *testing.T) {
12 | t.Parallel()
13 |
14 | t.Run("true", func(t *testing.T) {
15 | var sb strings.Builder
16 | l := log.New(&sb, true)
17 | l(log.LevelDebug, "hello")
18 | l(log.LevelInfo, "world")
19 | require.Equal(t, "hello\nworld\n", sb.String())
20 | })
21 |
22 | t.Run("false", func(t *testing.T) {
23 | var sb strings.Builder
24 | l := log.New(&sb, false)
25 | l(log.LevelDebug, "hello")
26 | l(log.LevelInfo, "world")
27 | require.Equal(t, "world\n", sb.String())
28 | })
29 | }
30 |
--------------------------------------------------------------------------------
/log/logrus.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "io"
5 |
6 | "github.com/sirupsen/logrus"
7 | )
8 |
9 | // HijackLogrus hijacks the logrus logger and calls the callback for each log entry.
10 | // This is an abuse of logrus, the package that Kaniko uses, but it exposes
11 | // no other way to obtain the log entries.
12 | func HijackLogrus(lvl Level, callback func(entry *logrus.Entry)) {
13 | logrus.StandardLogger().SetOutput(io.Discard)
14 | logrus.StandardLogger().SetLevel(ToLogrus(lvl))
15 | logrus.StandardLogger().SetFormatter(&logrusFormatter{
16 | callback: callback,
17 | empty: []byte{},
18 | })
19 | }
20 |
21 | type logrusFormatter struct {
22 | callback func(entry *logrus.Entry)
23 | empty []byte
24 | }
25 |
26 | func (f *logrusFormatter) Format(entry *logrus.Entry) ([]byte, error) {
27 | f.callback(entry)
28 | return f.empty, nil
29 | }
30 |
31 | func ToLogrus(lvl Level) logrus.Level {
32 | switch lvl {
33 | case LevelTrace:
34 | return logrus.TraceLevel
35 | case LevelDebug:
36 | return logrus.DebugLevel
37 | case LevelInfo:
38 | return logrus.InfoLevel
39 | case LevelWarn:
40 | return logrus.WarnLevel
41 | case LevelError:
42 | return logrus.ErrorLevel
43 | default:
44 | return logrus.InfoLevel
45 | }
46 | }
47 |
48 | func FromLogrus(lvl logrus.Level) Level {
49 | switch lvl {
50 | case logrus.TraceLevel:
51 | return LevelTrace
52 | case logrus.DebugLevel:
53 | return LevelDebug
54 | case logrus.InfoLevel:
55 | return LevelInfo
56 | case logrus.WarnLevel:
57 | return LevelWarn
58 | default: // Error, Fatal, Panic
59 | return LevelError
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/log/logrus_test.go:
--------------------------------------------------------------------------------
1 | package log_test
2 |
3 | import (
4 | "context"
5 | "testing"
6 | "time"
7 |
8 | "github.com/coder/envbuilder/log"
9 | "github.com/sirupsen/logrus"
10 | "github.com/stretchr/testify/require"
11 | )
12 |
13 | func TestHijackLogrus_Info(t *testing.T) {
14 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
15 | t.Cleanup(cancel)
16 | messages := make(chan *logrus.Entry)
17 |
18 | logf := func(entry *logrus.Entry) {
19 | t.Logf("got msg level: %s msg: %q", entry.Level, entry.Message)
20 | messages <- entry
21 | }
22 |
23 | log.HijackLogrus(log.LevelInfo, logf)
24 |
25 | done := make(chan struct{})
26 | go func() {
27 | defer close(done)
28 | // The following should be filtered out.
29 | logrus.Trace("Tracing!")
30 | logrus.Debug("Debugging!")
31 | // We should receive the below.
32 | logrus.Info("Testing!")
33 | logrus.Warn("Warning!")
34 | logrus.Error("Error!")
35 | }()
36 |
37 | require.Equal(t, "Testing!", rcvCtx(ctx, t, messages).Message)
38 | require.Equal(t, "Warning!", rcvCtx(ctx, t, messages).Message)
39 | require.Equal(t, "Error!", rcvCtx(ctx, t, messages).Message)
40 | <-done
41 | }
42 |
43 | func TestHijackLogrus_Debug(t *testing.T) {
44 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
45 | t.Cleanup(cancel)
46 | messages := make(chan *logrus.Entry)
47 |
48 | logf := func(entry *logrus.Entry) {
49 | t.Logf("got msg level: %s msg: %q", entry.Level, entry.Message)
50 | messages <- entry
51 | }
52 |
53 | log.HijackLogrus(log.LevelDebug, logf)
54 |
55 | done := make(chan struct{})
56 | go func() {
57 | defer close(done)
58 | // The following should be filtered out.
59 | logrus.Trace("Tracing!")
60 | // We should receive the below.
61 | logrus.Debug("Debugging!")
62 | logrus.Info("Testing!")
63 | logrus.Warn("Warning!")
64 | logrus.Error("Error!")
65 | }()
66 |
67 | require.Equal(t, "Debugging!", rcvCtx(ctx, t, messages).Message)
68 | require.Equal(t, "Testing!", rcvCtx(ctx, t, messages).Message)
69 | require.Equal(t, "Warning!", rcvCtx(ctx, t, messages).Message)
70 | require.Equal(t, "Error!", rcvCtx(ctx, t, messages).Message)
71 | <-done
72 | }
73 |
74 | func TestHijackLogrus_Error(t *testing.T) {
75 | ctx, cancel := context.WithTimeout(context.Background(), time.Second)
76 | t.Cleanup(cancel)
77 | messages := make(chan *logrus.Entry)
78 |
79 | logf := func(entry *logrus.Entry) {
80 | t.Logf("got msg level: %s msg: %q", entry.Level, entry.Message)
81 | messages <- entry
82 | }
83 |
84 | log.HijackLogrus(log.LevelError, logf)
85 |
86 | done := make(chan struct{})
87 | go func() {
88 | defer close(done)
89 | // The following should be filtered out.
90 | logrus.Trace("Tracing!")
91 | logrus.Debug("Debugging!")
92 | logrus.Info("Testing!")
93 | logrus.Warn("Warning!")
94 | // We should receive the below.
95 | logrus.Error("Error!")
96 | }()
97 |
98 | require.Equal(t, "Error!", rcvCtx(ctx, t, messages).Message)
99 | <-done
100 | }
101 |
102 | func rcvCtx[T any](ctx context.Context, t *testing.T, ch <-chan T) (v T) {
103 | t.Helper()
104 | select {
105 | case <-ctx.Done():
106 | t.Fatal("timeout")
107 | case v = <-ch:
108 | }
109 | return v
110 | }
111 |
--------------------------------------------------------------------------------
/options/defaults.go:
--------------------------------------------------------------------------------
1 | package options
2 |
3 | import (
4 | "fmt"
5 | "path"
6 | "strings"
7 |
8 | "github.com/go-git/go-billy/v5/osfs"
9 |
10 | giturls "github.com/chainguard-dev/git-urls"
11 | "github.com/coder/envbuilder/internal/chmodfs"
12 | "github.com/coder/envbuilder/internal/workingdir"
13 | )
14 |
15 | // DefaultWorkspaceFolder returns the default workspace folder
16 | // for a given repository URL.
17 | func DefaultWorkspaceFolder(workspacesFolder, repoURL string) string {
18 | // emptyWorkspaceDir is the path to a workspace that has
19 | // nothing going on... it's empty!
20 | emptyWorkspaceDir := workspacesFolder + "/empty"
21 |
22 | if repoURL == "" {
23 | return emptyWorkspaceDir
24 | }
25 | parsed, err := giturls.Parse(repoURL)
26 | if err != nil {
27 | return emptyWorkspaceDir
28 | }
29 | repo := path.Base(parsed.Path)
30 | // Giturls parsing never actually fails since ParseLocal never
31 | // errors and places the entire URL in the Path field. This check
32 | // ensures it's at least a Unix path containing forwardslash.
33 | if repo == repoURL || repo == "/" || repo == "." || repo == "" {
34 | return emptyWorkspaceDir
35 | }
36 | repo = strings.TrimSuffix(repo, ".git")
37 | return fmt.Sprintf("%s/%s", workspacesFolder, repo)
38 | }
39 |
40 | func (o *Options) SetDefaults() {
41 | // Temporarily removed these from the default settings to prevent conflicts
42 | // between current and legacy environment variables that add default values.
43 | // Once the legacy environment variables are phased out, this can be
44 | // reinstated to the previous default values.
45 | if len(o.IgnorePaths) == 0 {
46 | o.IgnorePaths = []string{
47 | "/var/run",
48 | // KinD adds these paths to pods, so ignore them by default.
49 | "/product_uuid", "/product_name",
50 | }
51 | }
52 | if o.InitScript == "" {
53 | o.InitScript = "sleep infinity"
54 | }
55 | if o.InitCommand == "" {
56 | o.InitCommand = "/bin/sh"
57 | }
58 |
59 | if o.Filesystem == nil {
60 | o.Filesystem = chmodfs.New(osfs.New("/"))
61 | }
62 | if o.WorkspaceBaseDir == "" {
63 | o.WorkspaceBaseDir = "/workspaces"
64 | }
65 | if o.WorkspaceFolder == "" {
66 | o.WorkspaceFolder = DefaultWorkspaceFolder(o.WorkspaceBaseDir, o.GitURL)
67 | }
68 | if o.BinaryPath == "" {
69 | o.BinaryPath = "/.envbuilder/bin/envbuilder"
70 | }
71 | if o.WorkingDirBase == "" {
72 | o.WorkingDirBase = workingdir.Default.Path()
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/options/defaults_test.go:
--------------------------------------------------------------------------------
1 | package options_test
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/coder/envbuilder/internal/chmodfs"
7 | "github.com/go-git/go-billy/v5/osfs"
8 |
9 | "github.com/stretchr/testify/assert"
10 |
11 | "github.com/coder/envbuilder/options"
12 | "github.com/stretchr/testify/require"
13 | )
14 |
15 | func TestDefaultWorkspaceFolder(t *testing.T) {
16 | t.Parallel()
17 |
18 | successTests := []struct {
19 | name string
20 | baseDir string
21 | gitURL string
22 | expected string
23 | }{
24 | {
25 | name: "HTTP",
26 | baseDir: "/workspaces",
27 | gitURL: "https://github.com/coder/envbuilder.git",
28 | expected: "/workspaces/envbuilder",
29 | },
30 | {
31 | name: "SSH",
32 | baseDir: "/workspaces",
33 | gitURL: "git@github.com:coder/envbuilder.git",
34 | expected: "/workspaces/envbuilder",
35 | },
36 | {
37 | name: "username and password",
38 | baseDir: "/workspaces",
39 | gitURL: "https://username:password@github.com/coder/envbuilder.git",
40 | expected: "/workspaces/envbuilder",
41 | },
42 | {
43 | name: "trailing",
44 | baseDir: "/workspaces",
45 | gitURL: "https://github.com/coder/envbuilder.git/",
46 | expected: "/workspaces/envbuilder",
47 | },
48 | {
49 | name: "trailing-x2",
50 | baseDir: "/workspaces",
51 | gitURL: "https://github.com/coder/envbuilder.git//",
52 | expected: "/workspaces/envbuilder",
53 | },
54 | {
55 | name: "no .git",
56 | baseDir: "/workspaces",
57 | gitURL: "https://github.com/coder/envbuilder",
58 | expected: "/workspaces/envbuilder",
59 | },
60 | {
61 | name: "trailing no .git",
62 | baseDir: "/workspaces",
63 | gitURL: "https://github.com/coder/envbuilder/",
64 | expected: "/workspaces/envbuilder",
65 | },
66 | {
67 | name: "fragment",
68 | baseDir: "/workspaces",
69 | gitURL: "https://github.com/coder/envbuilder.git#feature-branch",
70 | expected: "/workspaces/envbuilder",
71 | },
72 | {
73 | name: "fragment-trailing",
74 | baseDir: "/workspaces",
75 | gitURL: "https://github.com/coder/envbuilder.git/#refs/heads/feature-branch",
76 | expected: "/workspaces/envbuilder",
77 | },
78 | {
79 | name: "fragment-trailing no .git",
80 | baseDir: "/workspaces",
81 | gitURL: "https://github.com/coder/envbuilder/#refs/heads/feature-branch",
82 | expected: "/workspaces/envbuilder",
83 | },
84 | {
85 | name: "space",
86 | baseDir: "/workspaces",
87 | gitURL: "https://github.com/coder/env%20builder.git",
88 | expected: "/workspaces/env builder",
89 | },
90 | {
91 | name: "Unix path",
92 | baseDir: "/workspaces",
93 | gitURL: "/repo",
94 | expected: "/workspaces/repo",
95 | },
96 | {
97 | name: "Unix subpath",
98 | baseDir: "/workspaces",
99 | gitURL: "/path/to/repo",
100 | expected: "/workspaces/repo",
101 | },
102 | {
103 | name: "empty",
104 | baseDir: "/workspaces",
105 | gitURL: "",
106 | expected: "/workspaces/empty",
107 | },
108 | {
109 | name: "non default workspaces folder",
110 | baseDir: "/foo",
111 | gitURL: "https://github.com/coder/envbuilder.git",
112 | expected: "/foo/envbuilder",
113 | },
114 | {
115 | name: "non default workspaces folder empty git URL",
116 | baseDir: "/foo",
117 | gitURL: "",
118 | expected: "/foo/empty",
119 | },
120 | }
121 | for _, tt := range successTests {
122 | t.Run(tt.name, func(t *testing.T) {
123 | dir := options.DefaultWorkspaceFolder(tt.baseDir, tt.gitURL)
124 | require.Equal(t, tt.expected, dir)
125 | })
126 | }
127 |
128 | invalidTests := []struct {
129 | name string
130 | invalidURL string
131 | }{
132 | {
133 | name: "simple text",
134 | invalidURL: "not a valid URL",
135 | },
136 | {
137 | name: "website URL",
138 | invalidURL: "www.google.com",
139 | },
140 | {
141 | name: "Unix root",
142 | invalidURL: "/",
143 | },
144 | {
145 | name: "Path consists entirely of slash",
146 | invalidURL: "//",
147 | },
148 | {
149 | name: "Git URL with no path",
150 | invalidURL: "http://127.0.0.1:41073",
151 | },
152 | }
153 | for _, tt := range invalidTests {
154 | t.Run(tt.name, func(t *testing.T) {
155 | dir := options.DefaultWorkspaceFolder("/workspaces", tt.invalidURL)
156 | require.Equal(t, "/workspaces/empty", dir)
157 | })
158 | }
159 | }
160 |
161 | func TestOptions_SetDefaults(t *testing.T) {
162 | t.Parallel()
163 |
164 | expected := options.Options{
165 | InitScript: "sleep infinity",
166 | InitCommand: "/bin/sh",
167 | IgnorePaths: []string{"/var/run", "/product_uuid", "/product_name"},
168 | Filesystem: chmodfs.New(osfs.New("/")),
169 | GitURL: "",
170 | WorkspaceBaseDir: "/workspaces",
171 | WorkspaceFolder: "/workspaces/empty",
172 | WorkingDirBase: "/.envbuilder",
173 | BinaryPath: "/.envbuilder/bin/envbuilder",
174 | }
175 |
176 | var actual options.Options
177 | actual.SetDefaults()
178 | assert.Equal(t, expected, actual)
179 | }
180 |
--------------------------------------------------------------------------------
/options/options_test.go:
--------------------------------------------------------------------------------
1 | package options_test
2 |
3 | import (
4 | "bytes"
5 | "flag"
6 | "os"
7 | "testing"
8 |
9 | "github.com/coder/envbuilder/options"
10 |
11 | "github.com/coder/serpent"
12 | "github.com/stretchr/testify/assert"
13 | "github.com/stretchr/testify/require"
14 | )
15 |
16 | // TestEnvOptionParsing tests that given environment variables of different types are handled as expected.
17 | func TestEnvOptionParsing(t *testing.T) {
18 | t.Run("string", func(t *testing.T) {
19 | const val = "setup.sh"
20 | t.Setenv(options.WithEnvPrefix("SETUP_SCRIPT"), val)
21 | o := runCLI()
22 | require.Equal(t, o.SetupScript, val)
23 | })
24 |
25 | t.Run("int", func(t *testing.T) {
26 | t.Setenv(options.WithEnvPrefix("CACHE_TTL_DAYS"), "7")
27 | o := runCLI()
28 | require.Equal(t, o.CacheTTLDays, int64(7))
29 | })
30 |
31 | t.Run("string array", func(t *testing.T) {
32 | t.Setenv(options.WithEnvPrefix("IGNORE_PATHS"), "/var,/temp")
33 | o := runCLI()
34 | require.Equal(t, o.IgnorePaths, []string{"/var", "/temp"})
35 | })
36 |
37 | t.Run("bool", func(t *testing.T) {
38 | t.Run("lowercase", func(t *testing.T) {
39 | t.Setenv(options.WithEnvPrefix("SKIP_REBUILD"), "true")
40 | t.Setenv(options.WithEnvPrefix("GIT_CLONE_SINGLE_BRANCH"), "false")
41 | t.Setenv(options.WithEnvPrefix("GIT_CLONE_THINPACK"), "false")
42 | o := runCLI()
43 | require.True(t, o.SkipRebuild)
44 | require.False(t, o.GitCloneSingleBranch)
45 | require.False(t, o.GitCloneThinPack)
46 | })
47 |
48 | t.Run("uppercase", func(t *testing.T) {
49 | t.Setenv(options.WithEnvPrefix("SKIP_REBUILD"), "TRUE")
50 | t.Setenv(options.WithEnvPrefix("GIT_CLONE_SINGLE_BRANCH"), "FALSE")
51 | t.Setenv(options.WithEnvPrefix("GIT_CLONE_THINPACK"), "FALSE")
52 | o := runCLI()
53 | require.True(t, o.SkipRebuild)
54 | require.False(t, o.GitCloneSingleBranch)
55 | require.False(t, o.GitCloneThinPack)
56 | })
57 |
58 | t.Run("numeric", func(t *testing.T) {
59 | t.Setenv(options.WithEnvPrefix("SKIP_REBUILD"), "1")
60 | t.Setenv(options.WithEnvPrefix("GIT_CLONE_SINGLE_BRANCH"), "0")
61 | t.Setenv(options.WithEnvPrefix("GIT_CLONE_THINPACK"), "0")
62 | o := runCLI()
63 | require.True(t, o.SkipRebuild)
64 | require.False(t, o.GitCloneSingleBranch)
65 | require.False(t, o.GitCloneThinPack)
66 | })
67 |
68 | t.Run("empty", func(t *testing.T) {
69 | t.Setenv(options.WithEnvPrefix("GIT_CLONE_SINGLE_BRANCH"), "")
70 | t.Setenv(options.WithEnvPrefix("GIT_CLONE_THINPACK"), "")
71 | o := runCLI()
72 | require.False(t, o.GitCloneSingleBranch)
73 | require.True(t, o.GitCloneThinPack)
74 | })
75 | })
76 | }
77 |
78 | func TestLegacyEnvVars(t *testing.T) {
79 | legacyEnvs := map[string]string{
80 | "SETUP_SCRIPT": "./setup-legacy-script.sh",
81 | "INIT_SCRIPT": "./init-legacy-script.sh",
82 | "INIT_COMMAND": "/bin/zsh",
83 | "INIT_ARGS": "arg1 arg2",
84 | "CACHE_REPO": "example-cache-repo",
85 | "BASE_IMAGE_CACHE_DIR": "/path/to/base/image/cache",
86 | "LAYER_CACHE_DIR": "/path/to/layer/cache",
87 | "DEVCONTAINER_DIR": "/path/to/devcontainer/dir",
88 | "DEVCONTAINER_JSON_PATH": "/path/to/devcontainer.json",
89 | "DOCKERFILE_PATH": "/path/to/Dockerfile",
90 | "BUILD_CONTEXT_PATH": "/path/to/build/context",
91 | "CACHE_TTL_DAYS": "7",
92 | "DOCKER_CONFIG_BASE64": "base64encodedconfig",
93 | "FALLBACK_IMAGE": "fallback-image:latest",
94 | "EXIT_ON_BUILD_FAILURE": "true",
95 | "FORCE_SAFE": "true",
96 | "INSECURE": "true",
97 | "IGNORE_PATHS": "/var/run,/tmp",
98 | "SKIP_REBUILD": "true",
99 | "GIT_URL": "https://github.com/example/repo.git",
100 | "GIT_CLONE_DEPTH": "1",
101 | "GIT_CLONE_SINGLE_BRANCH": "true",
102 | "GIT_USERNAME": "gituser",
103 | "GIT_PASSWORD": "gitpassword",
104 | "GIT_SSH_PRIVATE_KEY_PATH": "/path/to/private/key",
105 | "GIT_HTTP_PROXY_URL": "http://proxy.example.com",
106 | "WORKSPACE_FOLDER": "/path/to/workspace/folder",
107 | "SSL_CERT_BASE64": "base64encodedcert",
108 | "EXPORT_ENV_FILE": "/path/to/export/env/file",
109 | "POST_START_SCRIPT_PATH": "/path/to/post/start/script",
110 | }
111 | for k, v := range legacyEnvs {
112 | t.Setenv(k, v)
113 | }
114 |
115 | o := runCLI()
116 |
117 | assert.Equal(t, legacyEnvs["SETUP_SCRIPT"], o.SetupScript)
118 | assert.Equal(t, legacyEnvs["INIT_SCRIPT"], o.InitScript)
119 | assert.Equal(t, legacyEnvs["INIT_COMMAND"], o.InitCommand)
120 | assert.Equal(t, legacyEnvs["INIT_ARGS"], o.InitArgs)
121 | assert.Equal(t, legacyEnvs["CACHE_REPO"], o.CacheRepo)
122 | assert.Equal(t, legacyEnvs["BASE_IMAGE_CACHE_DIR"], o.BaseImageCacheDir)
123 | assert.Equal(t, legacyEnvs["LAYER_CACHE_DIR"], o.LayerCacheDir)
124 | assert.Equal(t, legacyEnvs["DEVCONTAINER_DIR"], o.DevcontainerDir)
125 | assert.Equal(t, legacyEnvs["DEVCONTAINER_JSON_PATH"], o.DevcontainerJSONPath)
126 | assert.Equal(t, legacyEnvs["DOCKERFILE_PATH"], o.DockerfilePath)
127 | assert.Equal(t, legacyEnvs["BUILD_CONTEXT_PATH"], o.BuildContextPath)
128 | assert.Equal(t, int64(7), o.CacheTTLDays)
129 | assert.Equal(t, legacyEnvs["DOCKER_CONFIG_BASE64"], o.DockerConfigBase64)
130 | assert.Equal(t, legacyEnvs["FALLBACK_IMAGE"], o.FallbackImage)
131 | assert.Equal(t, true, o.ExitOnBuildFailure)
132 | assert.Equal(t, true, o.ForceSafe)
133 | assert.Equal(t, true, o.Insecure)
134 | assert.Equal(t, []string{"/var/run", "/tmp"}, o.IgnorePaths)
135 | assert.Equal(t, true, o.SkipRebuild)
136 | assert.Equal(t, legacyEnvs["GIT_URL"], o.GitURL)
137 | assert.Equal(t, int64(1), o.GitCloneDepth)
138 | assert.Equal(t, true, o.GitCloneSingleBranch)
139 | assert.Equal(t, legacyEnvs["GIT_USERNAME"], o.GitUsername)
140 | assert.Equal(t, legacyEnvs["GIT_PASSWORD"], o.GitPassword)
141 | assert.Equal(t, legacyEnvs["GIT_SSH_PRIVATE_KEY_PATH"], o.GitSSHPrivateKeyPath)
142 | assert.Equal(t, legacyEnvs["GIT_HTTP_PROXY_URL"], o.GitHTTPProxyURL)
143 | assert.Equal(t, legacyEnvs["WORKSPACE_FOLDER"], o.WorkspaceFolder)
144 | assert.Equal(t, legacyEnvs["SSL_CERT_BASE64"], o.SSLCertBase64)
145 | assert.Equal(t, legacyEnvs["EXPORT_ENV_FILE"], o.ExportEnvFile)
146 | assert.Equal(t, legacyEnvs["POST_START_SCRIPT_PATH"], o.PostStartScriptPath)
147 | }
148 |
149 | // UpdateGoldenFiles indicates golden files should be updated.
150 | var updateCLIOutputGoldenFiles = flag.Bool("update", false, "update options CLI output .golden files")
151 |
152 | // TestCLIOutput tests that the default CLI output is as expected.
153 | func TestCLIOutput(t *testing.T) {
154 | var o options.Options
155 | cmd := serpent.Command{
156 | Use: "envbuilder",
157 | Options: o.CLI(),
158 | Handler: func(inv *serpent.Invocation) error {
159 | return nil
160 | },
161 | }
162 |
163 | var b ioBufs
164 | i := cmd.Invoke("--help")
165 | i.Stdout = &b.Stdout
166 | i.Stderr = &b.Stderr
167 | i.Stdin = &b.Stdin
168 |
169 | err := i.Run()
170 | require.NoError(t, err)
171 |
172 | if *updateCLIOutputGoldenFiles {
173 | err = os.WriteFile("testdata/options.golden", b.Stdout.Bytes(), 0o644)
174 | require.NoError(t, err)
175 | t.Logf("updated golden file: testdata/options.golden")
176 | } else {
177 | golden, err := os.ReadFile("testdata/options.golden")
178 | require.NoError(t, err)
179 | require.Equal(t, string(golden), b.Stdout.String())
180 | }
181 | }
182 |
183 | func runCLI() options.Options {
184 | var o options.Options
185 | cmd := serpent.Command{
186 | Options: o.CLI(),
187 | Handler: func(inv *serpent.Invocation) error {
188 | return nil
189 | },
190 | }
191 |
192 | i := cmd.Invoke().WithOS()
193 | i.Args = []string{"--help"}
194 | fakeIO(i)
195 | err := i.Run()
196 | if err != nil {
197 | panic("failed to run CLI: " + err.Error())
198 | }
199 |
200 | return o
201 | }
202 |
203 | type ioBufs struct {
204 | Stdin bytes.Buffer
205 | Stdout bytes.Buffer
206 | Stderr bytes.Buffer
207 | }
208 |
209 | func fakeIO(i *serpent.Invocation) *ioBufs {
210 | var b ioBufs
211 | i.Stdout = &b.Stdout
212 | i.Stderr = &b.Stderr
213 | i.Stdin = &b.Stdin
214 | return &b
215 | }
216 |
--------------------------------------------------------------------------------
/options/testdata/options.golden:
--------------------------------------------------------------------------------
1 | USAGE:
2 | envbuilder
3 |
4 | OPTIONS:
5 | --base-image-cache-dir string, $ENVBUILDER_BASE_IMAGE_CACHE_DIR
6 | The path to a directory where the base image can be found. This should
7 | be a read-only directory solely mounted for the purpose of caching the
8 | base image.
9 |
10 | --build-context-path string, $ENVBUILDER_BUILD_CONTEXT_PATH
11 | Can be specified when a DockerfilePath is specified outside the base
12 | WorkspaceFolder. This path MUST be relative to the WorkspaceFolder
13 | path into which the repo is cloned.
14 |
15 | --build-secrets string-array, $ENVBUILDER_BUILD_SECRETS
16 | The list of secret environment variables to use when building the
17 | image.
18 |
19 | --cache-repo string, $ENVBUILDER_CACHE_REPO
20 | The name of the container registry to push the cache image to. If this
21 | is empty, the cache will not be pushed.
22 |
23 | --cache-ttl-days int, $ENVBUILDER_CACHE_TTL_DAYS
24 | The number of days to use cached layers before expiring them. Defaults
25 | to 7 days.
26 |
27 | --coder-agent-subsystem string-array, $CODER_AGENT_SUBSYSTEM
28 | Coder agent subsystems to report when forwarding logs. The envbuilder
29 | subsystem is always included.
30 |
31 | --coder-agent-token string, $CODER_AGENT_TOKEN
32 | Authentication token for a Coder agent. If this is set, then
33 | CODER_AGENT_URL must also be set.
34 |
35 | --coder-agent-url string, $CODER_AGENT_URL
36 | URL of the Coder deployment. If CODER_AGENT_TOKEN is also set, logs
37 | from envbuilder will be forwarded here and will be visible in the
38 | workspace build logs.
39 |
40 | --devcontainer-dir string, $ENVBUILDER_DEVCONTAINER_DIR
41 | The path to the folder containing the devcontainer.json file that will
42 | be used to build the workspace and can either be an absolute path or a
43 | path relative to the workspace folder. If not provided, defaults to
44 | `.devcontainer`.
45 |
46 | --devcontainer-json-path string, $ENVBUILDER_DEVCONTAINER_JSON_PATH
47 | The path to a devcontainer.json file that is either an absolute path
48 | or a path relative to DevcontainerDir. This can be used in cases where
49 | one wants to substitute an edited devcontainer.json file for the one
50 | that exists in the repo.
51 |
52 | --docker-config-base64 string, $ENVBUILDER_DOCKER_CONFIG_BASE64
53 | The base64 encoded Docker config file that will be used to pull images
54 | from private container registries. When this is set, Docker
55 | configuration set via the DOCKER_CONFIG environment variable is
56 | ignored.
57 |
58 | --dockerfile-path string, $ENVBUILDER_DOCKERFILE_PATH
59 | The relative path to the Dockerfile that will be used to build the
60 | workspace. This is an alternative to using a devcontainer that some
61 | might find simpler.
62 |
63 | --exit-on-build-failure bool, $ENVBUILDER_EXIT_ON_BUILD_FAILURE
64 | Terminates the container upon a build failure. This is handy when
65 | preferring the FALLBACK_IMAGE in cases where no devcontainer.json or
66 | image is provided. However, it ensures that the container stops if the
67 | build process encounters an error.
68 |
69 | --exit-on-push-failure bool, $ENVBUILDER_EXIT_ON_PUSH_FAILURE
70 | ExitOnPushFailure terminates the container upon a push failure. This
71 | is useful if failure to push the built image should abort execution
72 | and result in an error.
73 |
74 | --export-env-file string, $ENVBUILDER_EXPORT_ENV_FILE
75 | Optional file path to a .env file where envbuilder will dump
76 | environment variables from devcontainer.json and the built container
77 | image.
78 |
79 | --fallback-image string, $ENVBUILDER_FALLBACK_IMAGE
80 | Specifies an alternative image to use when neither an image is
81 | declared in the devcontainer.json file nor a Dockerfile is present. If
82 | there's a build failure (from a faulty Dockerfile) or a
83 | misconfiguration, this image will be the substitute. Set
84 | ExitOnBuildFailure to true to halt the container if the build faces an
85 | issue.
86 |
87 | --force-safe bool, $ENVBUILDER_FORCE_SAFE
88 | Ignores any filesystem safety checks. This could cause serious harm to
89 | your system! This is used in cases where bypass is needed to unblock
90 | customers.
91 |
92 | --get-cached-image bool, $ENVBUILDER_GET_CACHED_IMAGE
93 | Print the digest of the cached image, if available. Exits with an
94 | error if not found.
95 |
96 | --git-clone-depth int, $ENVBUILDER_GIT_CLONE_DEPTH
97 | The depth to use when cloning the Git repository.
98 |
99 | --git-clone-single-branch bool, $ENVBUILDER_GIT_CLONE_SINGLE_BRANCH
100 | Clone only a single branch of the Git repository.
101 |
102 | --git-clone-thinpack bool, $ENVBUILDER_GIT_CLONE_THINPACK (default: true)
103 | Git clone with thin pack compatibility enabled, ensuring that even
104 | when thin pack compatibility is activated,it will not be turned on for
105 | the domain dev.zaure.com.
106 |
107 | --git-http-proxy-url string, $ENVBUILDER_GIT_HTTP_PROXY_URL
108 | The URL for the HTTP proxy. This is optional.
109 |
110 | --git-password string, $ENVBUILDER_GIT_PASSWORD
111 | The password to use for Git authentication. This is optional.
112 |
113 | --git-ssh-private-key-base64 string, $ENVBUILDER_GIT_SSH_PRIVATE_KEY_BASE64
114 | Base64 encoded SSH private key to be used for Git authentication. If
115 | this is set, then GIT_SSH_PRIVATE_KEY_PATH cannot be set.
116 |
117 | --git-ssh-private-key-path string, $ENVBUILDER_GIT_SSH_PRIVATE_KEY_PATH
118 | Path to an SSH private key to be used for Git authentication. If this
119 | is set, then GIT_SSH_PRIVATE_KEY_BASE64 cannot be set.
120 |
121 | --git-url string, $ENVBUILDER_GIT_URL
122 | The URL of a Git repository containing a Devcontainer or Docker image
123 | to clone. This is optional.
124 |
125 | --git-username string, $ENVBUILDER_GIT_USERNAME
126 | The username to use for Git authentication. This is optional.
127 |
128 | --ignore-paths string-array, $ENVBUILDER_IGNORE_PATHS
129 | The comma separated list of paths to ignore when building the
130 | workspace.
131 |
132 | --init-args string, $ENVBUILDER_INIT_ARGS
133 | The arguments to pass to the init command. They are split according to
134 | /bin/sh rules with https://github.com/kballard/go-shellquote.
135 |
136 | --init-command string, $ENVBUILDER_INIT_COMMAND
137 | The command to run to initialize the workspace. Default: `/bin/sh`.
138 |
139 | --init-script string, $ENVBUILDER_INIT_SCRIPT
140 | The script to run to initialize the workspace. Default: `sleep
141 | infinity`.
142 |
143 | --insecure bool, $ENVBUILDER_INSECURE
144 | Bypass TLS verification when cloning and pulling from container
145 | registries.
146 |
147 | --layer-cache-dir string, $ENVBUILDER_LAYER_CACHE_DIR
148 | The path to a directory where built layers will be stored. This spawns
149 | an in-memory registry to serve the layers from.
150 |
151 | --post-start-script-path string, $ENVBUILDER_POST_START_SCRIPT_PATH
152 | The path to a script that will be created by envbuilder based on the
153 | postStartCommand in devcontainer.json, if any is specified (otherwise
154 | the script is not created). If this is set, the specified InitCommand
155 | should check for the presence of this script and execute it after
156 | successful startup.
157 |
158 | --push-image bool, $ENVBUILDER_PUSH_IMAGE
159 | Push the built image to a remote registry. This option forces a
160 | reproducible build.
161 |
162 | --remote-repo-build-mode bool, $ENVBUILDER_REMOTE_REPO_BUILD_MODE (default: false)
163 | Use the remote repository as the source of truth when building the
164 | image. Enabling this option ignores user changes to local files and
165 | they will not be reflected in the image. This can be used to improving
166 | cache utilization when multiple users are building working on the same
167 | repository.
168 |
169 | --setup-script string, $ENVBUILDER_SETUP_SCRIPT
170 | The script to run before the init script. It runs as the root user
171 | regardless of the user specified in the devcontainer.json file.
172 | SetupScript is ran as the root user prior to the init script. It is
173 | used to configure envbuilder dynamically during the runtime. e.g.
174 | specifying whether to start systemd or tiny init for PID 1.
175 |
176 | --skip-rebuild bool, $ENVBUILDER_SKIP_REBUILD
177 | Skip building if the MagicFile exists. This is used to skip building
178 | when a container is restarting. e.g. docker stop -> docker start This
179 | value can always be set to true - even if the container is being
180 | started for the first time.
181 |
182 | --ssl-cert-base64 string, $ENVBUILDER_SSL_CERT_BASE64
183 | The content of an SSL cert file. This is useful for self-signed
184 | certificates.
185 |
186 | --verbose bool, $ENVBUILDER_VERBOSE
187 | Enable verbose logging.
188 |
189 | --workspace-base-dir string, $ENVBUILDER_WORKSPACE_BASE_DIR (default: /workspaces)
190 | The path under which workspaces will be placed when workspace folder
191 | option is not given.
192 |
193 | --workspace-folder string, $ENVBUILDER_WORKSPACE_FOLDER
194 | The path to the workspace folder that will be built. This is optional.
195 | Defaults to `[workspace base dir]/[name]` where name is the name of
196 | the repository or `empty`.
197 |
198 |
--------------------------------------------------------------------------------
/scripts/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM scratch
2 | ARG TARGETARCH
3 |
4 | COPY envbuilder-${TARGETARCH} /.envbuilder/bin/envbuilder
5 |
6 | ENV KANIKO_DIR=/.envbuilder
7 |
8 | ENTRYPOINT ["/.envbuilder/bin/envbuilder"]
9 |
--------------------------------------------------------------------------------
/scripts/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd "$(dirname "${BASH_SOURCE[0]}")"
4 | set -euo pipefail
5 |
6 | archs=()
7 | push=false
8 | base="envbuilder"
9 | tag=""
10 |
11 | for arg in "$@"; do
12 | if [[ $arg == --arch=* ]]; then
13 | arch="${arg#*=}"
14 | archs+=( "$arch" )
15 | elif [[ $arg == --push ]]; then
16 | push=true
17 | elif [[ $arg == --base=* ]]; then
18 | base="${arg#*=}"
19 | elif [[ $arg == --tag=* ]]; then
20 | tag="${arg#*=}"
21 | else
22 | echo "Unknown argument: $arg"
23 | exit 1
24 | fi
25 | done
26 |
27 | current=$(go env GOARCH)
28 | if [ ${#archs[@]} -eq 0 ]; then
29 | echo "No architectures specified. Defaulting to $current..."
30 | archs=( "$current" )
31 | fi
32 |
33 | if [[ -z "${tag}" ]]; then
34 | tag=$(./version.sh)
35 | fi
36 |
37 | # We have to use docker buildx to tag multiple images with
38 | # platforms tragically, so we have to create a builder.
39 | BUILDER_NAME="envbuilder"
40 | BUILDER_EXISTS=$(docker buildx ls | grep $BUILDER_NAME || true)
41 |
42 | # If builder doesn't exist, create it
43 | if [ -z "$BUILDER_EXISTS" ]; then
44 | echo "Creating dockerx builder $BUILDER_NAME..."
45 | docker buildx create --use --platform=linux/arm64,linux/amd64,linux/arm/v7 --name $BUILDER_NAME
46 | else
47 | echo "Builder $BUILDER_NAME already exists. Using it."
48 | fi
49 |
50 | # Ensure the builder is bootstrapped and ready to use
51 | docker buildx inspect --bootstrap &> /dev/null
52 |
53 | ldflags=(-X "'github.com/coder/envbuilder/buildinfo.tag=$tag'")
54 |
55 | for arch in "${archs[@]}"; do
56 | echo "Building for $arch..."
57 | GOARCH=$arch CGO_ENABLED=0 go build -ldflags="${ldflags[*]}" -o "./envbuilder-${arch}" ../cmd/envbuilder &
58 | done
59 | wait
60 |
61 | args=()
62 | for arch in "${archs[@]}"; do
63 | args+=( --platform "linux/${arch}" )
64 | done
65 | if [ "$push" = true ]; then
66 | args+=( --push )
67 | else
68 | args+=( --load )
69 | fi
70 |
71 | # coerce semver build tags into something docker won't complain about
72 | tag="${tag//\+/-}"
73 | docker buildx build --builder $BUILDER_NAME "${args[@]}" -t "${base}:${tag}" -t "${base}:latest" -f Dockerfile .
74 |
75 | # Check if archs contains the current. If so, then output a message!
76 | if [[ -z "${CI:-}" ]] && [[ " ${archs[*]} " =~ ${current} ]]; then
77 | docker tag "${base}:${tag}" envbuilder:latest
78 | echo "Tagged $current as ${base}:${tag} ${base}:latest!"
79 | fi
80 |
--------------------------------------------------------------------------------
/scripts/check_fmt.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | list="$(go run mvdan.cc/gofumpt@v0.6.0 -l .)"
4 | if [[ -n $list ]]; then
5 | echo -n -e "error: The following files have changes:\n\n${list}\n\nDiff:\n\n"
6 | go run mvdan.cc/gofumpt@v0.6.0 -d .
7 | exit 1
8 | fi
9 |
--------------------------------------------------------------------------------
/scripts/develop.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd "$(dirname "${BASH_SOURCE[0]}")"
4 | set -euxo pipefail
5 |
6 | ./build.sh || exit 1
7 |
8 | docker run --rm -it \
9 | -e ENVBUILDER_GIT_URL=https://github.com/denoland/deno \
10 | -e ENVBUILDER_INIT_SCRIPT="bash" \
11 | envbuilder:latest
12 |
--------------------------------------------------------------------------------
/scripts/diagram-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coder/envbuilder/7eabaa4d876c3ce54f7271b6e68fea37b6719c66/scripts/diagram-dark.png
--------------------------------------------------------------------------------
/scripts/diagram-light.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coder/envbuilder/7eabaa4d876c3ce54f7271b6e68fea37b6719c66/scripts/diagram-light.png
--------------------------------------------------------------------------------
/scripts/diagram.d2:
--------------------------------------------------------------------------------
1 | direction: right
2 |
3 | style: {
4 | fill: transparent
5 | }
6 |
7 | create: Create Workspace {
8 | shape: step
9 | }
10 |
11 | create -> "Code" -> "Edit Dockerfile" -> "Restart Workspace" -> Code
12 |
--------------------------------------------------------------------------------
/scripts/diagram.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd "$(dirname "${BASH_SOURCE[0]}")"
4 | set -euxo pipefail
5 |
6 | formats=( svg png )
7 | for format in "${formats[@]}"; do
8 | d2 ./diagram.d2 --pad=32 -t 1 "./diagram-light.${format}"
9 | d2 ./diagram.d2 --pad=32 -t 200 "./diagram-dark.${format}"
10 | done
11 |
--------------------------------------------------------------------------------
/scripts/docsgen/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "path/filepath"
7 |
8 | "github.com/coder/envbuilder/options"
9 | )
10 |
11 | func main() {
12 | path := filepath.Join("docs", "env-variables.md")
13 | var options options.Options
14 | mkd := "\n# Environment Variables\n\n" + options.Markdown()
15 | err := os.WriteFile(path, []byte(mkd), 0o644)
16 | if err != nil {
17 | panic(err)
18 | }
19 | fmt.Printf("%s updated successfully with the latest flags!\n", path)
20 | }
21 |
--------------------------------------------------------------------------------
/scripts/lib.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This script is meant to be sourced by other scripts. To source this script:
4 | # # shellcheck source=scripts/lib.sh
5 | # source "$(dirname "${BASH_SOURCE[0]}")/lib.sh"
6 |
7 | set -euo pipefail
8 |
9 | # Avoid sourcing this script multiple times to guard against when lib.sh
10 | # is used by another sourced script, it can lead to confusing results.
11 | if [[ ${SCRIPTS_LIB_IS_SOURCED:-0} == 1 ]]; then
12 | return
13 | fi
14 | # Do not export to avoid this value being inherited by non-sourced
15 | # scripts.
16 | SCRIPTS_LIB_IS_SOURCED=1
17 |
18 | # We have to define realpath before these otherwise it fails on Mac's bash.
19 | SCRIPT="${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}"
20 | SCRIPT_DIR="$(realpath "$(dirname "$SCRIPT")")"
21 |
22 | function project_root {
23 | # Nix sets $src in derivations!
24 | [[ -n "${src:-}" ]] && echo "$src" && return
25 |
26 | # Try to use `git rev-parse --show-toplevel` to find the project root.
27 | # If this directory is not a git repository, this command will fail.
28 | git rev-parse --show-toplevel 2>/dev/null && return
29 | }
30 |
31 | PROJECT_ROOT="$(cd "$SCRIPT_DIR" && realpath "$(project_root)")"
32 |
33 | # cdroot changes directory to the root of the repository.
34 | cdroot() {
35 | cd "$PROJECT_ROOT" || error "Could not change directory to '$PROJECT_ROOT'"
36 | }
37 |
38 | # log prints a message to stderr
39 | log() {
40 | echo "$*" 1>&2
41 | }
42 |
43 | # error prints an error message and returns an error exit code.
44 | error() {
45 | log "ERROR: $*"
46 | exit 1
47 | }
48 |
--------------------------------------------------------------------------------
/scripts/version.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This script generates the version string used by Envbuilder, including for dev
4 | # versions. Note: the version returned by this script will NOT include the "v"
5 | # prefix that is included in the Git tag.
6 | #
7 | # If $ENVBUILDER_RELEASE is set to "true", the returned version will equal the
8 | # current git tag. If the current commit is not tagged, this will fail.
9 | #
10 | # If $ENVBUILDER_RELEASE is not set, the returned version will always be a dev
11 | # version.
12 |
13 | set -euo pipefail
14 | # shellcheck source=scripts/lib.sh
15 | source "$(dirname "${BASH_SOURCE[0]}")/lib.sh"
16 | cdroot
17 |
18 | if [[ -n "${ENVBUILDER_FORCE_VERSION:-}" ]]; then
19 | echo "${ENVBUILDER_FORCE_VERSION}"
20 | exit 0
21 | fi
22 |
23 | # To make contributing easier, if there are no tags, we'll use a default
24 | # version.
25 | tag_list=$(git tag)
26 | if [[ -z ${tag_list} ]]; then
27 | log
28 | log "INFO(version.sh): It appears you've checked out a fork or shallow clone of Envbuilder."
29 | log "INFO(version.sh): By default GitHub does not include tags when forking."
30 | log "INFO(version.sh): We will use the default version 0.0.1 for this build."
31 | log "INFO(version.sh): To pull tags from upstream, use the following commands:"
32 | log "INFO(version.sh): - git remote add upstream https://github.com/coder/envbuilder.git"
33 | log "INFO(version.sh): - git fetch upstream"
34 | log
35 | last_tag="v0.0.1"
36 | else
37 | current_commit=$(git rev-parse HEAD)
38 | # Try to find the last tag that contains the current commit
39 | last_tag=$(git tag --contains "$current_commit" --sort=-version:refname | head -n 1)
40 | # If there is no tag that contains the current commit,
41 | # get the latest tag sorted by semver.
42 | if [[ -z "${last_tag}" ]]; then
43 | last_tag=$(git tag --sort=-version:refname | head -n 1)
44 | fi
45 | fi
46 |
47 | version="${last_tag}"
48 |
49 | # If the HEAD has extra commits since the last tag then we are in a dev version.
50 | #
51 | # Dev versions are denoted by the "-dev+" suffix with a trailing commit short
52 | # SHA.
53 | if [[ "${ENVBUILDER_RELEASE:-}" == *t* ]]; then
54 | # $last_tag will equal `git describe --always` if we currently have the tag
55 | # checked out.
56 | if [[ "${last_tag}" != "$(git describe --always)" ]]; then
57 | error "version.sh: the current commit is not tagged with an annotated tag"
58 | fi
59 | else
60 | rev=$(git log -1 --format='%h' HEAD)
61 | version+="+dev-${rev}"
62 | # If the git repo has uncommitted changes, mark the version string as 'dirty'.
63 | dirty_files=$(git ls-files --other --modified --exclude-standard)
64 | if [[ -n "${dirty_files}" ]]; then
65 | version+="-dirty"
66 | fi
67 | fi
68 |
69 | # Remove the "v" prefix.
70 | echo "${version#v}"
71 |
--------------------------------------------------------------------------------
/testutil/gittest/gittest.go:
--------------------------------------------------------------------------------
1 | package gittest
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "log"
7 | "net"
8 | "net/http"
9 | "net/http/httptest"
10 | "os"
11 | "os/exec"
12 | "sync"
13 | "testing"
14 | "time"
15 |
16 | gossh "golang.org/x/crypto/ssh"
17 |
18 | "github.com/coder/envbuilder/testutil/mwtest"
19 | "github.com/gliderlabs/ssh"
20 | "github.com/go-git/go-billy/v5"
21 | "github.com/go-git/go-billy/v5/memfs"
22 | "github.com/go-git/go-git/v5"
23 | "github.com/go-git/go-git/v5/plumbing"
24 | "github.com/go-git/go-git/v5/plumbing/cache"
25 | "github.com/go-git/go-git/v5/plumbing/format/pktline"
26 | "github.com/go-git/go-git/v5/plumbing/object"
27 | "github.com/go-git/go-git/v5/plumbing/protocol/packp"
28 | "github.com/go-git/go-git/v5/plumbing/transport"
29 | "github.com/go-git/go-git/v5/plumbing/transport/server"
30 | "github.com/go-git/go-git/v5/storage/filesystem"
31 | "github.com/stretchr/testify/require"
32 | )
33 |
34 | type Options struct {
35 | Files map[string]string
36 | Username string
37 | Password string
38 | AuthMW func(http.Handler) http.Handler
39 | TLS bool
40 | }
41 |
42 | // CreateGitServer creates a git repository with an in-memory filesystem
43 | // and serves it over HTTP using a httptest.Server.
44 | func CreateGitServer(t *testing.T, opts Options) *httptest.Server {
45 | t.Helper()
46 | if opts.AuthMW == nil {
47 | opts.AuthMW = mwtest.BasicAuthMW(opts.Username, opts.Password)
48 | }
49 | commits := make([]CommitFunc, 0)
50 | for path, content := range opts.Files {
51 | commits = append(commits, Commit(t, path, content, "my test commit"))
52 | }
53 | fs := memfs.New()
54 | _ = NewRepo(t, fs, commits...)
55 | if opts.TLS {
56 | return httptest.NewTLSServer(opts.AuthMW(NewServer(fs)))
57 | }
58 | return httptest.NewServer(opts.AuthMW(NewServer(fs)))
59 | }
60 |
61 | // NewServer returns a http.Handler that serves a git repository.
62 | // It's expected that the repository is already initialized by the caller.
63 | func NewServer(fs billy.Filesystem) http.Handler {
64 | mux := http.NewServeMux()
65 | mux.HandleFunc("/info/refs", func(rw http.ResponseWriter, r *http.Request) {
66 | if r.URL.Query().Get("service") != "git-upload-pack" {
67 | http.Error(rw, "only smart git", 403)
68 | return
69 | }
70 | rw.Header().Set("Content-Type", "application/x-git-upload-pack-advertisement")
71 | ep, err := transport.NewEndpoint("/")
72 | if err != nil {
73 | http.Error(rw, err.Error(), 500)
74 | return
75 | }
76 | svr := server.NewServer(server.NewFilesystemLoader(fs))
77 | sess, err := svr.NewUploadPackSession(ep, nil)
78 | if err != nil {
79 | http.Error(rw, err.Error(), 500)
80 | return
81 | }
82 | ar, err := sess.AdvertisedReferencesContext(r.Context())
83 | if err != nil {
84 | http.Error(rw, err.Error(), 500)
85 | return
86 | }
87 | ar.Prefix = [][]byte{
88 | []byte("# service=git-upload-pack"),
89 | pktline.Flush,
90 | }
91 | err = ar.Encode(rw)
92 | if err != nil {
93 | http.Error(rw, err.Error(), 500)
94 | return
95 | }
96 | })
97 | mux.HandleFunc("/git-upload-pack", func(rw http.ResponseWriter, r *http.Request) {
98 | rw.Header().Set("content-type", "application/x-git-upload-pack-result")
99 |
100 | upr := packp.NewUploadPackRequest()
101 | err := upr.Decode(r.Body)
102 | if err != nil {
103 | http.Error(rw, err.Error(), 500)
104 | return
105 | }
106 |
107 | ep, err := transport.NewEndpoint("/")
108 | if err != nil {
109 | http.Error(rw, err.Error(), 500)
110 | log.Println(err)
111 | return
112 | }
113 | ld := server.NewFilesystemLoader(fs)
114 | svr := server.NewServer(ld)
115 | sess, err := svr.NewUploadPackSession(ep, nil)
116 | if err != nil {
117 | http.Error(rw, err.Error(), 500)
118 | log.Println(err)
119 | return
120 | }
121 | res, err := sess.UploadPack(r.Context(), upr)
122 | if err != nil {
123 | http.Error(rw, err.Error(), 500)
124 | log.Println(err)
125 | return
126 | }
127 |
128 | err = res.Encode(rw)
129 | if err != nil {
130 | http.Error(rw, err.Error(), 500)
131 | log.Println(err)
132 | return
133 | }
134 | })
135 | return mux
136 | }
137 |
138 | func NewServerSSH(t *testing.T, fs billy.Filesystem, pubkeys ...gossh.PublicKey) *transport.Endpoint {
139 | t.Helper()
140 |
141 | l, err := net.Listen("tcp", "localhost:0")
142 | require.NoError(t, err)
143 | t.Cleanup(func() { _ = l.Close() })
144 |
145 | srvOpts := []ssh.Option{
146 | ssh.PublicKeyAuth(func(ctx ssh.Context, key ssh.PublicKey) bool {
147 | for _, pk := range pubkeys {
148 | if ssh.KeysEqual(pk, key) {
149 | return true
150 | }
151 | }
152 | return false
153 | }),
154 | }
155 |
156 | done := make(chan struct{}, 1)
157 | go func() {
158 | _ = ssh.Serve(l, handleSession, srvOpts...)
159 | close(done)
160 | }()
161 | t.Cleanup(func() {
162 | _ = l.Close()
163 | <-done
164 | })
165 |
166 | addr, ok := l.Addr().(*net.TCPAddr)
167 | require.True(t, ok)
168 | tr, err := transport.NewEndpoint(fmt.Sprintf("ssh://git@%s:%d/", addr.IP, addr.Port))
169 | require.NoError(t, err)
170 | return tr
171 | }
172 |
173 | func handleSession(sess ssh.Session) {
174 | c := sess.Command()
175 | if len(c) < 1 {
176 | _, _ = fmt.Fprintf(os.Stderr, "invalid command: %q\n", c)
177 | }
178 |
179 | cmd := exec.Command(c[0], c[1:]...)
180 | stdout, err := cmd.StdoutPipe()
181 | if err != nil {
182 | _, _ = fmt.Fprintf(os.Stderr, "cmd stdout pipe: %s\n", err.Error())
183 | return
184 | }
185 |
186 | stdin, err := cmd.StdinPipe()
187 | if err != nil {
188 | _, _ = fmt.Fprintf(os.Stderr, "cmd stdin pipe: %s\n", err.Error())
189 | return
190 | }
191 |
192 | stderr, err := cmd.StderrPipe()
193 | if err != nil {
194 | _, _ = fmt.Fprintf(os.Stderr, "cmd stderr pipe: %s\n", err.Error())
195 | return
196 | }
197 |
198 | err = cmd.Start()
199 | if err != nil {
200 | _, _ = fmt.Fprintf(os.Stderr, "start cmd: %s\n", err.Error())
201 | return
202 | }
203 |
204 | go func() {
205 | defer stdin.Close()
206 | _, _ = io.Copy(stdin, sess)
207 | }()
208 |
209 | var wg sync.WaitGroup
210 | wg.Add(2)
211 |
212 | go func() {
213 | defer wg.Done()
214 | _, _ = io.Copy(sess.Stderr(), stderr)
215 | }()
216 |
217 | go func() {
218 | defer wg.Done()
219 | _, _ = io.Copy(sess, stdout)
220 | }()
221 |
222 | wg.Wait()
223 |
224 | if err := cmd.Wait(); err != nil {
225 | _, _ = fmt.Fprintf(os.Stderr, "wait cmd: %s\n", err.Error())
226 | }
227 | }
228 |
229 | // CommitFunc commits to a repo.
230 | type CommitFunc func(billy.Filesystem, *git.Repository)
231 |
232 | // Commit is a test helper for committing a single file to a repo.
233 | func Commit(t *testing.T, path, content, msg string) CommitFunc {
234 | return func(fs billy.Filesystem, repo *git.Repository) {
235 | t.Helper()
236 | tree, err := repo.Worktree()
237 | require.NoError(t, err)
238 | WriteFile(t, fs, path, content)
239 | _, err = tree.Add(path)
240 | require.NoError(t, err)
241 | commit, err := tree.Commit(msg, &git.CommitOptions{
242 | Author: &object.Signature{
243 | Name: "Example",
244 | Email: "test@example.com",
245 | When: time.Now(),
246 | },
247 | })
248 | require.NoError(t, err)
249 | _, err = repo.CommitObject(commit)
250 | require.NoError(t, err)
251 | }
252 | }
253 |
254 | // NewRepo returns a new Git repository.
255 | func NewRepo(t *testing.T, fs billy.Filesystem, commits ...CommitFunc) *git.Repository {
256 | t.Helper()
257 | storage := filesystem.NewStorage(fs, cache.NewObjectLRU(cache.DefaultMaxSize))
258 | repo, err := git.Init(storage, fs)
259 | require.NoError(t, err)
260 |
261 | // This changes the default ref to main instead of master.
262 | h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.ReferenceName("refs/heads/main"))
263 | err = storage.SetReference(h)
264 | require.NoError(t, err)
265 |
266 | for _, commit := range commits {
267 | commit(fs, repo)
268 | }
269 | return repo
270 | }
271 |
272 | // WriteFile writes a file to the filesystem.
273 | func WriteFile(t *testing.T, fs billy.Filesystem, path, content string) {
274 | t.Helper()
275 | file, err := fs.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
276 | require.NoError(t, err)
277 | _, err = file.Write([]byte(content))
278 | require.NoError(t, err)
279 | err = file.Close()
280 | require.NoError(t, err)
281 | }
282 |
--------------------------------------------------------------------------------
/testutil/mwtest/auth_basic.go:
--------------------------------------------------------------------------------
1 | package mwtest
2 |
3 | import "net/http"
4 |
5 | func BasicAuthMW(username, password string) func(http.Handler) http.Handler {
6 | return func(next http.Handler) http.Handler {
7 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
8 | if username != "" || password != "" {
9 | authUser, authPass, ok := r.BasicAuth()
10 | if !ok || username != authUser || password != authPass {
11 | w.WriteHeader(http.StatusUnauthorized)
12 | return
13 | }
14 | }
15 | next.ServeHTTP(w, r)
16 | })
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/testutil/registrytest/registrytest.go:
--------------------------------------------------------------------------------
1 | package registrytest
2 |
3 | import (
4 | "archive/tar"
5 | "bytes"
6 | "crypto"
7 | "encoding/hex"
8 | "encoding/json"
9 | "fmt"
10 | "io"
11 | "net/http"
12 | "net/http/httptest"
13 | "net/url"
14 | "strings"
15 | "testing"
16 | "time"
17 |
18 | "github.com/google/go-containerregistry/pkg/name"
19 | "github.com/google/go-containerregistry/pkg/registry"
20 | v1 "github.com/google/go-containerregistry/pkg/v1"
21 | "github.com/google/go-containerregistry/pkg/v1/empty"
22 | "github.com/google/go-containerregistry/pkg/v1/mutate"
23 | "github.com/google/go-containerregistry/pkg/v1/partial"
24 | "github.com/google/go-containerregistry/pkg/v1/remote"
25 | "github.com/google/go-containerregistry/pkg/v1/types"
26 | "github.com/stretchr/testify/require"
27 |
28 | // needed by the registry
29 | _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
30 | )
31 |
32 | // New starts a new Docker registry listening on localhost.
33 | // It will automatically shut down when the test finishes.
34 | // It will store data in memory.
35 | func New(t testing.TB, mws ...func(http.Handler) http.Handler) string {
36 | t.Helper()
37 | regHandler := registry.New(registry.WithBlobHandler(registry.NewInMemoryBlobHandler()))
38 | for _, mw := range mws {
39 | regHandler = mw(regHandler)
40 | }
41 | regSrv := httptest.NewServer(regHandler)
42 | t.Cleanup(func() { regSrv.Close() })
43 | regSrvURL, err := url.Parse(regSrv.URL)
44 | require.NoError(t, err)
45 | return fmt.Sprintf("localhost:%s", regSrvURL.Port())
46 | }
47 |
48 | // WriteContainer uploads a container to the registry server.
49 | // It returns the reference to the uploaded container.
50 | func WriteContainer(t *testing.T, serverURL, containerRef, mediaType string, files map[string]any) string {
51 | var buf bytes.Buffer
52 | hasher := crypto.SHA256.New()
53 | mw := io.MultiWriter(&buf, hasher)
54 | wtr := tar.NewWriter(mw)
55 | for name, content := range files {
56 | var data []byte
57 | switch content := content.(type) {
58 | case string:
59 | data = []byte(content)
60 | case []byte:
61 | data = content
62 | default:
63 | var err error
64 | data, err = json.Marshal(content)
65 | require.NoError(t, err)
66 | }
67 | err := wtr.WriteHeader(&tar.Header{
68 | Mode: 0o777,
69 | Name: name,
70 | Typeflag: tar.TypeReg,
71 | Size: int64(len(data)),
72 | })
73 | require.NoError(t, err)
74 | _, err = wtr.Write(data)
75 | require.NoError(t, err)
76 | }
77 |
78 | h := v1.Hash{
79 | Algorithm: "sha256",
80 | Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))),
81 | }
82 | layer, err := partial.UncompressedToLayer(&uncompressedLayer{
83 | diffID: h,
84 | mediaType: types.MediaType(mediaType),
85 | content: buf.Bytes(),
86 | })
87 | require.NoError(t, err)
88 |
89 | image, err := mutate.Append(empty.Image, mutate.Addendum{
90 | Layer: layer,
91 | History: v1.History{
92 | Author: "registrytest",
93 | Created: v1.Time{Time: time.Now()},
94 | Comment: "created by the registrytest package",
95 | CreatedBy: "registrytest",
96 | },
97 | })
98 | require.NoError(t, err)
99 |
100 | // url.Parse will interpret localhost:12345 as scheme localhost and host 12345
101 | // so we need to add a scheme to the URL
102 | if !strings.HasPrefix(serverURL, "http://") {
103 | serverURL = "http://" + serverURL
104 | }
105 | parsed, err := url.Parse(serverURL)
106 | require.NoError(t, err)
107 | parsed.Path = containerRef
108 | parsedStr := parsed.String()
109 |
110 | ref, err := name.ParseReference(strings.TrimPrefix(parsedStr, "http://"))
111 | require.NoError(t, err)
112 |
113 | err = remote.Write(ref, image)
114 | require.NoError(t, err)
115 |
116 | return ref.String()
117 | }
118 |
119 | // uncompressedLayer implements partial.UncompressedLayer from raw bytes.
120 | type uncompressedLayer struct {
121 | diffID v1.Hash
122 | mediaType types.MediaType
123 | content []byte
124 | }
125 |
126 | // DiffID implements partial.UncompressedLayer
127 | func (ul *uncompressedLayer) DiffID() (v1.Hash, error) {
128 | return ul.diffID, nil
129 | }
130 |
131 | // Uncompressed implements partial.UncompressedLayer
132 | func (ul *uncompressedLayer) Uncompressed() (io.ReadCloser, error) {
133 | return io.NopCloser(bytes.NewBuffer(ul.content)), nil
134 | }
135 |
136 | // MediaType returns the media type of the layer
137 | func (ul *uncompressedLayer) MediaType() (types.MediaType, error) {
138 | return ul.mediaType, nil
139 | }
140 |
--------------------------------------------------------------------------------