├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.yml
│ └── feature_request.yml
├── PULL_REQUEST_TEMPLATE.md
├── actions
│ ├── clean-runner
│ │ └── action.yaml
│ └── show-disk-usage
│ │ └── action.yaml
├── dependabot.yml
└── workflows
│ ├── build.yaml
│ ├── ci.yaml
│ ├── cloc.yml
│ ├── codeql-analysis.yml
│ ├── commit-msg.yaml
│ ├── coverage.yaml
│ ├── dco.yml
│ ├── nightly.yaml
│ ├── release.yaml
│ ├── stale.yaml
│ └── sync-3rdparty-images.yaml
├── .gitignore
├── .golangci.yml
├── ADOPTERS.md
├── CODEOWNERS
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── MAINTAINERS.md
├── Makefile
├── README.md
├── SECURITY.md
├── build.yaml
├── cmd
└── stacker
│ ├── bom.go
│ ├── build.go
│ ├── check.go
│ ├── chroot.go
│ ├── clean.go
│ ├── convert.go
│ ├── gc.go
│ ├── grab.go
│ ├── inspect.go
│ ├── internal_go.go
│ ├── lxc-wrapper
│ ├── .gitignore
│ ├── Makefile
│ └── lxc-wrapper.c
│ ├── main.go
│ ├── main_embed.go
│ ├── main_noembed.go
│ ├── publish.go
│ ├── recursive-build.go
│ ├── unpriv-setup.go
│ └── validate.go
├── doc
├── hacking.md
├── install.md
├── layer-merging.md
├── running.md
├── stacker_yaml.md
├── talks
│ ├── FOSDEM_2019.pdf
│ ├── OSS_EU_2018.pdf
│ ├── OSS_NA_2018.pdf
│ └── stacker101
│ │ ├── 1.README.md
│ │ ├── 1.stacker.yaml
│ │ ├── 2.stacker.yaml
│ │ ├── 3.stacker.yaml
│ │ ├── 4.stacker.yaml
│ │ ├── Makefile
│ │ ├── README-TALK.txt
│ │ ├── arch.jpg
│ │ ├── flow3.png
│ │ ├── overview.png
│ │ ├── stacker101.md
│ │ ├── standards-bodies.md
│ │ └── standards.png
├── tricks.md
└── tutorial.md
├── go.mod
├── go.sum
├── install-build-deps.sh
├── pkg
├── container
│ ├── container.go
│ ├── idmap
│ │ └── idmap.go
│ └── userns.go
├── embed-exec
│ └── embed-exec.go
├── lib
│ ├── bom.go
│ ├── containers_storage
│ │ └── lib.go
│ ├── dag.go
│ ├── dag_test.go
│ ├── dir.go
│ ├── dir_test.go
│ ├── file.go
│ ├── file_test.go
│ ├── hash.go
│ ├── image.go
│ ├── image_test.go
│ └── version.go
├── log
│ ├── log.go
│ └── log_test.go
├── mtree
│ └── filter.go
├── overlay
│ ├── metadata.go
│ ├── metadata_test.go
│ ├── overlay-dirs.go
│ ├── overlay.go
│ ├── pack.go
│ ├── pool.go
│ └── unpriv-setup.go
├── stacker
│ ├── api.go
│ ├── base.go
│ ├── bom.go
│ ├── build.go
│ ├── cache.go
│ ├── cache_test.go
│ ├── check.go
│ ├── convert.go
│ ├── deps.go
│ ├── git.go
│ ├── grab.go
│ ├── import.go
│ ├── lock.go
│ ├── network.go
│ ├── publisher.go
│ ├── referrer.go
│ ├── referrer_test.go
│ └── storage.go
├── storage
│ ├── storage.go
│ └── unpriv-setup.go
├── test
│ └── cover.go
└── types
│ ├── config.go
│ ├── imagesource.go
│ ├── layer.go
│ ├── layer_bind_test.go
│ ├── layer_import_test.go
│ ├── layer_type.go
│ ├── stackerfile.go
│ ├── stackerfiles.go
│ ├── storage.go
│ └── types_test.go
├── test
├── .gitignore
├── annotations-namespace.bats
├── annotations.bats
├── args.bats
├── asterisk.bats
├── atomfs-erofs.bats
├── atomfs-squashfs.bats
├── basic.bats
├── binds.bats
├── bom.bats
├── broken-link.bats
├── build-only.bats
├── built-type.bats
├── caching.bats
├── check.bats
├── chroot.bats
├── clean.bats
├── config.bats
├── convert.bats
├── cp-not-required.bats
├── dependency-order.bats
├── dir-whiteout.bats
├── dirlinks.bats
├── docker-base.bats
├── empty-layers.bats
├── entrypoint.bats
├── env.bats
├── grab.bats
├── gzip.bats
├── helpers.bash
├── import-http.bats
├── import.bats
├── invalid.bats
├── labels.bats
├── log.bats
├── main.py
├── multi-arch.bats
├── multiple-output-types.bats
├── oci-import.bats
├── overlay-dirs.bats
├── prerequisites.bats
├── publish.bats
├── setup_suite.bash
├── squashfs.bats
├── static-analysis.sh
├── tmpfs.bats
├── unprivileged.bats
└── whiteout.bats
└── tools
└── oci-copy
/.github/ISSUE_TEMPLATE/bug_report.yml:
--------------------------------------------------------------------------------
1 | name: Bug report
2 | description: File a bug report
3 | title: "Bug: "
4 | labels: bug
5 | body:
6 | - type: input
7 | attributes:
8 | label: "stacker version"
9 | placeholder: "v0.0.1 or commit hash"
10 | validations:
11 | required: true
12 |
13 | - type: textarea
14 | attributes:
15 | label: "Describe the bug"
16 | description: "Try to describe your issue/bug as well as you can, as that will help with speeding up fixing"
17 |
18 | - type: textarea
19 | attributes:
20 | label: "To reproduce"
21 | description: Steps to reproduce the behavior
22 | value: |
23 | 1. Configuration
24 | 2. Client tool used
25 | 3. Seen error
26 |
27 | - type: textarea
28 | attributes:
29 | label: "Expected behavior"
30 | description: "A clear and concise description of what you expected to happen."
31 |
32 | - type: textarea
33 | attributes:
34 | label: "Screenshots"
35 | description: "If applicable, add screenshots to help explain your problem."
36 |
37 | - type: textarea
38 | attributes:
39 | label: "Additional context"
40 | description: "Add any other context about the problem here."
41 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | name: Feature request
2 | description: Request a feature
3 | title: "Feat: "
4 | labels: feature
5 | body:
6 | - type: textarea
7 | attributes:
8 | label: "Is your feature request related to a problem? Please describe."
9 | description: "A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]"
10 |
11 | - type: textarea
12 | attributes:
13 | label: "Describe the solution you'd like"
14 | description: "A clear and concise description of what you want to happen."
15 |
16 | - type: textarea
17 | attributes:
18 | label: "Describe alternatives you've considered"
19 | description: "A clear and concise description of any alternative solutions or features you've considered."
20 |
21 | - type: textarea
22 | attributes:
23 | label: "Additional context"
24 | description: "Add any other context or screenshots about the feature request here."
25 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
6 | **What type of PR is this?**
7 |
8 |
15 |
16 | **Which issue does this PR fix**:
17 |
18 |
19 | **What does this PR do / Why do we need it**:
20 |
21 |
22 | **If an issue # is not available please add repro steps and logs showing the issue**:
23 |
24 |
25 | **Testing done on this change**:
26 |
30 |
31 | **Automation added to e2e**:
32 |
36 |
37 | **Will this break upgrades or downgrades?**
38 |
39 |
40 | **Does this PR introduce any user-facing change?**:
41 |
46 |
47 | ```release-note
48 |
49 | ```
50 |
51 | By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
52 |
--------------------------------------------------------------------------------
/.github/actions/clean-runner/action.yaml:
--------------------------------------------------------------------------------
1 | name: 'Clean runner'
2 | description: 'Remove unneeded tooling'
3 | runs:
4 | using: "composite"
5 | steps:
6 | - shell: bash
7 | run: |
8 | # To free up ~15 GB of disk space
9 | sudo rm -rf /opt/ghc
10 | sudo rm -rf /usr/local/share/boost
11 | sudo rm -rf /usr/local/lib/android
12 | sudo rm -rf /usr/share/dotnet
13 |
--------------------------------------------------------------------------------
/.github/actions/show-disk-usage/action.yaml:
--------------------------------------------------------------------------------
1 | name: 'Show disk usage'
2 | description: 'Show information about disk usage'
3 | runs:
4 | using: "composite"
5 | steps:
6 | - shell: bash
7 | run: |
8 | cd $GITHUB_WORKSPACE
9 | set -x
10 | df -h
11 | sudo ls -lRh /tmp/* || true
12 | sudo du -sh /tmp || true
13 | sudo du -sh /tmp/* || true
14 | sudo find /tmp/ -size +5M | sudo xargs ls -lh
15 | sudo du -sh ./* || true
16 | sudo find ./ -size +5M | xargs ls -lh
17 | sudo du -sh /var/
18 | sudo du -sh /var/lib/docker/
19 | sudo du -sh /home/runner/work/
20 | set +x
21 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "gomod" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 | - package-ecosystem: "github-actions" # See documentation for possible values
13 | directory: "/" # Location of package manifests
14 | schedule:
15 | interval: "weekly"
16 |
--------------------------------------------------------------------------------
/.github/workflows/build.yaml:
--------------------------------------------------------------------------------
1 | name: Reusable stacker build
2 | on:
3 | workflow_call:
4 | inputs:
5 | # note >-, args needs to be strings to be used as inputs
6 | # for the reusable build.yaml workflow
7 | go-version:
8 | required: false
9 | type: string
10 | description: 'Stringified JSON object listing go versions'
11 | default: >-
12 | ["1.22.x", "1.23.x"]
13 | privilege-level:
14 | required: false
15 | type: string
16 | description: 'Stringified JSON object listing stacker privilege-level'
17 | default: >-
18 | ["unpriv", "priv"]
19 | build-id:
20 | required: false
21 | type: string
22 | description: 'build-id'
23 | default: "${{ github.sha }}"
24 | slow-test:
25 | required: false
26 | type: boolean
27 | description: 'Should slow tests be run?'
28 | default: true
29 | secrets:
30 | codecov_token:
31 | required: true
32 |
33 | jobs:
34 | build:
35 | runs-on: ubuntu-24.04
36 | services:
37 | registry:
38 | image: ghcr.io/project-stacker/registry:2
39 | ports:
40 | - 5000:5000
41 | strategy:
42 | matrix:
43 | go-version: ${{fromJson(inputs.go-version)}}
44 | privilege-level: ${{fromJson(inputs.privilege-level)}}
45 | name: "golang ${{ matrix.go-version }} privilege ${{ matrix.privilege-level }}"
46 | steps:
47 | - uses: actions/checkout@v3
48 | - name: Clean disk space
49 | uses: ./.github/actions/clean-runner
50 | - uses: benjlevesque/short-sha@v2.1
51 | id: short-sha
52 | - name: Set up golang ${{ matrix.go-version }}
53 | uses: actions/setup-go@v5
54 | with:
55 | go-version: ${{ matrix.go-version }}
56 | - name: Setup Environment
57 | run: |
58 | gopath=$PWD/.build/gopath
59 | echo "GOPATH=$gopath" >> $GITHUB_ENV
60 | echo "GOCACHE=$gopath/gocache" >> $GITHUB_ENV
61 | echo "PATH=$gopath/bin:$PATH" >> $GITHUB_ENV
62 | echo "SLOW_TEST=${{inputs.slow-test}}" >> $GITHUB_ENV
63 | echo "STACKER_DOCKER_BASE=oci:$PWD/.build/oci-clone:" >> $GITHUB_ENV
64 |
65 | echo "PWD=$PWD"
66 | cat "$GITHUB_ENV"
67 | - name: install dependencies
68 | run: |
69 | ./install-build-deps.sh
70 | echo "running kernel is: $(uname -a)"
71 | - name: docker-clone
72 | run: |
73 | make docker-clone "STACKER_DOCKER_BASE=docker://ghcr.io/project-stacker/" CLONE_D="$PWD/.build/oci-clone"
74 | - name: Go-download
75 | run: |
76 | make go-download
77 | - name: Show disk usage before building the binaries
78 | uses: ./.github/actions/show-disk-usage
79 | - name: Build-level1
80 | run: |
81 | make show-info
82 | make stacker-dynamic VERSION_FULL=${{ inputs.build-id }}
83 | - name: Build
84 | run: |
85 | make stacker VERSION_FULL=${{ inputs.build-id }}
86 | env:
87 | REGISTRY_URL: localhost:5000
88 | ZOT_HOST: localhost
89 | ZOT_PORT: 8080
90 | - name: Show disk usage before running the tests
91 | if: always()
92 | uses: ./.github/actions/show-disk-usage
93 | - name: Test
94 | run: |
95 | make check VERSION_FULL=${{ inputs.build-id }} PRIVILEGE_LEVEL=${{ matrix.privilege-level }}
96 | env:
97 | REGISTRY_URL: localhost:5000
98 | ZOT_HOST: localhost
99 | ZOT_PORT: 8080
100 | - name: Show disk usage after running the tests
101 | if: always()
102 | uses: ./.github/actions/show-disk-usage
103 | - name: Upload code coverage
104 | uses: codecov/codecov-action@v5
105 | with:
106 | token: ${{ secrets.codecov_token }}
107 | - name: Upload artifacts
108 | uses: actions/upload-artifact@v4
109 | if: ${{ (matrix.privilege-level == 'priv') && (matrix.go-version == '1.23.x') }}
110 | with:
111 | # if there is more than 1 go-version, we would need to account for that here.
112 | name: binary
113 | path: stacker
114 | if-no-files-found: error
115 | - uses: actions/cache@v3
116 | id: restore-build
117 | with:
118 | path: stacker
119 | key: ${{ inputs.build-id }}
120 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yaml:
--------------------------------------------------------------------------------
1 | name: ci
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | branches:
9 | - main
10 |
11 | jobs:
12 | build:
13 | uses: ./.github/workflows/build.yaml
14 | with:
15 | slow-test: false
16 | secrets:
17 | codecov_token: ${{ secrets.CODECOV_TOKEN }}
18 | coverage:
19 | uses: ./.github/workflows/coverage.yaml
20 | with:
21 | slow-test: false
22 | secrets:
23 | codecov_token: ${{ secrets.CODECOV_TOKEN }}
24 |
--------------------------------------------------------------------------------
/.github/workflows/cloc.yml:
--------------------------------------------------------------------------------
1 | name: "Lines of code statistics"
2 | on:
3 | push:
4 | branches:
5 | - main
6 | pull_request:
7 | branches:
8 | - main
9 | release:
10 | types:
11 | - published
12 |
13 | permissions: read-all
14 |
15 | jobs:
16 | loc:
17 | name: Lines of code
18 | runs-on: ubuntu-latest
19 | steps:
20 | - name: Install go
21 | uses: actions/setup-go@v5
22 | with:
23 | go-version: 1.22.x
24 | - name: Check out source code
25 | uses: actions/checkout@v3
26 | - name: Install dependencies
27 | run: |
28 | cd $GITHUB_WORKSPACE
29 | go install github.com/hhatto/gocloc/cmd/gocloc@latest
30 | - name: All sources
31 | run: |
32 | cd $GITHUB_WORKSPACE
33 | gocloc .
34 | - name: All sources (except tests)
35 | run: |
36 | cd $GITHUB_WORKSPACE
37 | gocloc --not-match='.*_test.go' .
38 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ main ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ main ]
20 | schedule:
21 | - cron: '17 11 * * 0'
22 |
23 | permissions: read-all
24 |
25 | jobs:
26 | analyze:
27 | name: Analyze
28 | runs-on: ubuntu-latest
29 | permissions:
30 | actions: read
31 | contents: read
32 | security-events: write
33 | strategy:
34 | fail-fast: false
35 | matrix:
36 | language: [ 'go' ]
37 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
38 | # Learn more:
39 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
40 | env:
41 | CGO_ENABLED: 0
42 | GOFLAGS: "-tags=exclude_graphdriver_btrfs,exclude_graphdriver_devicemapper,containers_image_openpgp,osusergo,netgo"
43 |
44 | steps:
45 | - name: Checkout repository
46 | uses: actions/checkout@v3
47 |
48 | - name: Install go
49 | uses: actions/setup-go@v5
50 | with:
51 | go-version: 1.22.x
52 |
53 | # Initializes the CodeQL tools for scanning.
54 | - name: Initialize CodeQL
55 | uses: github/codeql-action/init@v3
56 | with:
57 | languages: ${{ matrix.language }}
58 | # If you wish to specify custom queries, you can do so here or in a config file.
59 | # By default, queries listed here will override any specified in a config file.
60 | # Prefix the list here with "+" to use these queries and those in the config file.
61 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
62 |
63 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
64 | # If this step fails, then you should remove it and run the build manually (see below)
65 | - name: Autobuild
66 | uses: github/codeql-action/autobuild@v3
67 |
68 | # ℹ️ Command-line programs to run using the OS shell.
69 | # 📚 https://git.io/JvXDl
70 |
71 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
72 | # and modify them (or add more) to build your code if your project
73 | # uses a compiled language
74 | #- run: |
75 | # make bootstrap
76 | # make release
77 |
78 | - name: Perform CodeQL Analysis
79 | uses: github/codeql-action/analyze@v3
80 |
--------------------------------------------------------------------------------
/.github/workflows/commit-msg.yaml:
--------------------------------------------------------------------------------
1 | name: 'Check commit message style'
2 | on:
3 | pull_request:
4 | types:
5 | - opened
6 | - edited
7 | - reopened
8 | - synchronize
9 | push:
10 | branches:
11 | - main
12 |
13 | jobs:
14 | check-commit-message-style:
15 | name: Check commit message style
16 | runs-on: ubuntu-latest
17 | steps:
18 | - name: Checkout
19 | uses: actions/checkout@v3
20 | - name: Check Commit Type
21 | uses: gsactions/commit-message-checker@v2
22 | with:
23 | pattern: '^((build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test)(\(.+\))?(!)?(: (.*\s*)*))'
24 | flags: 'gm'
25 | error: 'Your first line has to the Conventional Commits specification.'
26 | excludeDescription: 'true' # optional: this excludes the description body of a pull request
27 | excludeTitle: 'true' # optional: this excludes the title of a pull request
28 | checkAllCommitMessages: 'true'
29 | accessToken: ${{ secrets.GITHUB_TOKEN }}
30 | - name: Check Line Length
31 | uses: gsactions/commit-message-checker@v2
32 | with:
33 | pattern: '^[^#].{1,74}'
34 | error: 'The maximum line length of 74 characters is exceeded.'
35 | excludeDescription: 'true' # optional: this excludes the description body of a pull request
36 | excludeTitle: 'true' # optional: this excludes the title of a pull request
37 | checkAllCommitMessages: 'true' # optional: this checks all commits associated with a pull request
38 | accessToken: ${{ secrets.GITHUB_TOKEN }} # github access token is only required if checkAllCommitMessages is true
39 |
--------------------------------------------------------------------------------
/.github/workflows/coverage.yaml:
--------------------------------------------------------------------------------
1 | name: Reusable stacker build for coverage
2 | on:
3 | workflow_call:
4 | inputs:
5 | # note >-, args needs to be strings to be used as inputs
6 | # for the reusable build.yaml workflow
7 | go-version:
8 | required: false
9 | type: string
10 | description: 'Stringified JSON object listing go versions'
11 | default: >-
12 | ["1.22.x", "1.23.x"]
13 | privilege-level:
14 | required: false
15 | type: string
16 | description: 'Stringified JSON object listing stacker privilege-level'
17 | default: >-
18 | ["unpriv", "priv"]
19 | build-id:
20 | required: false
21 | type: string
22 | description: 'build-id'
23 | default: "${{ github.sha }}"
24 | slow-test:
25 | required: false
26 | type: boolean
27 | description: 'Should slow tests be run?'
28 | default: true
29 | secrets:
30 | codecov_token:
31 | required: true
32 |
33 | jobs:
34 | build:
35 | runs-on: ubuntu-24.04
36 | services:
37 | registry:
38 | image: ghcr.io/project-stacker/registry:2
39 | ports:
40 | - 5000:5000
41 | strategy:
42 | matrix:
43 | go-version: ${{fromJson(inputs.go-version)}}
44 | privilege-level: ${{fromJson(inputs.privilege-level)}}
45 | name: "golang ${{ matrix.go-version }} privilege ${{ matrix.privilege-level }}"
46 | steps:
47 | - uses: actions/checkout@v3
48 | - name: Clean disk space
49 | uses: ./.github/actions/clean-runner
50 | - uses: benjlevesque/short-sha@v2.1
51 | id: short-sha
52 | - name: Set up golang ${{ matrix.go-version }}
53 | uses: actions/setup-go@v5
54 | with:
55 | go-version: ${{ matrix.go-version }}
56 | - name: Setup Environment
57 | run: |
58 | gopath=$PWD/.build/gopath
59 | echo "GOPATH=$gopath" >> $GITHUB_ENV
60 | echo "GOCACHE=$gopath/gocache" >> $GITHUB_ENV
61 | echo "PATH=$gopath/bin:$PATH" >> $GITHUB_ENV
62 | echo "SLOW_TEST=${{inputs.slow-test}}" >> $GITHUB_ENV
63 | echo "STACKER_DOCKER_BASE=oci:$PWD/.build/oci-clone:" >> $GITHUB_ENV
64 | GOCOVERDIR=$(mktemp -d)
65 | echo "GOCOVERDIR=$GOCOVERDIR" >> $GITHUB_ENV
66 | echo "PWD=$PWD"
67 | cat "$GITHUB_ENV"
68 | - name: install dependencies
69 | run: |
70 | ./install-build-deps.sh
71 | echo "running kernel is: $(uname -a)"
72 | - name: docker-clone
73 | run: |
74 | make docker-clone "STACKER_DOCKER_BASE=docker://ghcr.io/project-stacker/" CLONE_D="$PWD/.build/oci-clone"
75 | - name: Go-download
76 | run: |
77 | make go-download
78 | - name: Show disk usage before building the binaries
79 | uses: ./.github/actions/show-disk-usage
80 | - name: Build-level1
81 | run: |
82 | make show-info
83 | make stacker-dynamic VERSION_FULL=${{ inputs.build-id }}
84 | - name: Show disk usage before running the tests
85 | if: always()
86 | uses: ./.github/actions/show-disk-usage
87 | - name: Build and test
88 | run: |
89 | make check-cov GOCOVERDIR=$GOCOVERDIR PRIVILEGE_LEVEL=${{ matrix.privilege-level }}
90 | go tool covdata textfmt -i $GOCOVERDIR -o coverage-${{ matrix.privilege-level }}.txt
91 | go tool covdata percent -i $GOCOVERDIR
92 | ls -altR $GOCOVERDIR
93 | env:
94 | REGISTRY_URL: localhost:5000
95 | ZOT_HOST: localhost
96 | ZOT_PORT: 8080
97 | - name: Show disk usage after running the tests
98 | if: always()
99 | uses: ./.github/actions/show-disk-usage
100 | - name: Upload code coverage
101 | uses: codecov/codecov-action@v5
102 | with:
103 | token: ${{ secrets.codecov_token }}
104 | files: coverage-${{ matrix.privilege-level}}.txt
105 | - name: Upload artifacts
106 | uses: actions/upload-artifact@v4
107 | if: ${{ (matrix.privilege-level == 'priv') && (matrix.go-version == '1.23.x') }}
108 | with:
109 | # if there is more than 1 go-version, we would need to account for that here.
110 | name: binary-cov
111 | path: stacker
112 | if-no-files-found: error
113 | - uses: actions/cache@v3
114 | id: restore-build
115 | with:
116 | path: stacker
117 | key: ${{ inputs.build-id }}
118 |
--------------------------------------------------------------------------------
/.github/workflows/dco.yml:
--------------------------------------------------------------------------------
1 | # .github/workflows/dco.yml
2 | name: DCO
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 |
8 | permissions: read-all
9 |
10 | jobs:
11 | check:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v3
15 | - name: Set up Python 3.x
16 | uses: actions/setup-python@v5
17 | with:
18 | python-version: '3.x'
19 | - name: Check DCO
20 | env:
21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
22 | run: |
23 | pip3 install -U dco-check
24 | dco-check
25 |
--------------------------------------------------------------------------------
/.github/workflows/nightly.yaml:
--------------------------------------------------------------------------------
1 | name: "nightly"
2 |
3 | on:
4 | workflow_dispatch:
5 | schedule:
6 | - cron: '0 0 * * *'
7 |
8 | jobs:
9 | build:
10 | uses: ./.github/workflows/build.yaml
11 | secrets:
12 | codecov_token: ${{ secrets.CODECOV_TOKEN }}
13 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | name: "tagged-release"
2 |
3 | on:
4 | release:
5 | types:
6 | - published
7 |
8 | jobs:
9 | build-id:
10 | runs-on: ubuntu-latest
11 | outputs:
12 | build-id: ${{steps.build-id.outputs.build-id}}
13 | steps:
14 | - uses: actions/checkout@v3
15 | - uses: benjlevesque/short-sha@v2.1
16 | id: short-sha
17 | - id: build-id
18 | run: echo "build-id=${{ github.event.release.tag_name }}-${{ steps.short-sha.outputs.sha }}" >> "$GITHUB_OUTPUT"
19 | ci:
20 | uses: ./.github/workflows/build.yaml
21 | needs: build-id
22 | with:
23 | # note >-, args needs to be strings to be used as inputs
24 | # for the reusable build.yaml workflow
25 | go-version: >-
26 | ["1.22.x"]
27 | privilege-level: >-
28 | ["priv"]
29 | build-id: "${{needs.build-id.outputs.build-id}}"
30 | secrets:
31 | codecov_token: ${{ secrets.CODECOV_TOKEN }}
32 | release:
33 | name: "Tagged Release"
34 | runs-on: ubuntu-24.04
35 | # needs ci for the cached stacker binary
36 | needs: [build-id, ci]
37 | steps:
38 | - uses: actions/cache@v3
39 | id: restore-build
40 | with:
41 | path: stacker
42 | key: ${{needs.build-id.outputs.build-id}}
43 | - if: github.event_name == 'release' && github.event.action == 'published'
44 | name: Publish artifacts on releases
45 | uses: svenstaro/upload-release-action@v2
46 | with:
47 | repo_token: ${{ secrets.GITHUB_TOKEN }}
48 | file: stacker
49 | tag: ${{ github.ref }}
50 | overwrite: true
51 | file_glob: true
52 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yaml:
--------------------------------------------------------------------------------
1 | name: 'Close stale issues and PRs'
2 | on:
3 | schedule:
4 | - cron: '30 1 * * *'
5 |
6 | permissions:
7 | contents: read
8 | issues: write
9 | pull-requests: write
10 |
11 | jobs:
12 | stale:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/stale@v9
16 | with:
17 | stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
18 | stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
19 | close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
20 | close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.'
21 | days-before-issue-stale: 30
22 | days-before-pr-stale: 45
23 | days-before-issue-close: 5
24 | days-before-pr-close: 10
25 | stale-issue-label: 'no-issue-activity'
26 | exempt-issue-labels: 'awaiting-approval,work-in-progress'
27 | stale-pr-label: 'no-pr-activity'
28 | exempt-pr-labels: 'awaiting-approval,work-in-progress'
29 | only-labels: 'awaiting-feedback,awaiting-answers'
30 |
--------------------------------------------------------------------------------
/.github/workflows/sync-3rdparty-images.yaml:
--------------------------------------------------------------------------------
1 | name: 'Sync images and artifacts to ghcr'
2 | on:
3 | schedule:
4 | - cron: '30 1 * * *'
5 | push:
6 | branches:
7 | - main
8 | workflow_dispatch:
9 |
10 | permissions: read-all
11 |
12 | jobs:
13 | sync:
14 | name: "images"
15 | permissions:
16 | contents: read
17 | packages: write
18 | runs-on: ubuntu-latest
19 | steps:
20 | - name: Log in to GitHub Docker Registry
21 | uses: docker/login-action@v3
22 | with:
23 | registry: ghcr.io
24 | username: ${{ github.actor }}
25 | password: ${{ secrets.GITHUB_TOKEN }}
26 | - name: Tag and push to ghcr
27 | run: |
28 | set -e
29 | for n in ubuntu:latest alpine:edge alpine:3.19 centos:latest busybox:latest; do
30 | dest="ghcr.io/${{ github.repository_owner }}/$n"
31 | docker trust inspect "$n"
32 | docker pull public.ecr.aws/docker/library/$n
33 | docker tag public.ecr.aws/docker/library/$n "$dest"
34 | docker push $dest
35 | done
36 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.swp
2 | *.stacker
3 | /stacker
4 | /stacker-dynamic
5 | .build
6 | coverage.txt
7 | hack/
8 |
9 | # IDEs
10 | .vscode
11 | .idea
12 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | linters:
3 | disable:
4 | - errcheck
5 | - staticcheck
6 | exclusions:
7 | generated: lax
8 | presets:
9 | - comments
10 | - common-false-positives
11 | - legacy
12 | - std-error-handling
13 | rules:
14 | - path: (.+)\.go$
15 | text: PAXRecords
16 | - path: (.+)\.go$
17 | text: os.SEEK_
18 | paths:
19 | - third_party$
20 | - builtin$
21 | - examples$
22 | formatters:
23 | exclusions:
24 | generated: lax
25 | paths:
26 | - third_party$
27 | - builtin$
28 | - examples$
29 |
--------------------------------------------------------------------------------
/ADOPTERS.md:
--------------------------------------------------------------------------------
1 | # Adopters
2 |
3 | 1. Cisco Systems, Inc.
4 | 2. AppDynamics (acquired by Cisco, Systems Inc.)
5 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # This is a comment.
2 | # Each line is a file pattern followed by one or more owners.
3 |
4 | * @rchincha @smoser @hallyn @peusebiu
5 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Community Code of Conduct
2 |
3 | stacker follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
4 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | See [Contributing Guide](https://stackerbuild.io/developer_guide/CONTRIBUTING/) for instructions and guidelines.
2 |
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | # stacker Maintainers
2 |
3 | ## Maintainers
4 |
5 | | Maintainer | Github ID | Affiliation |
6 | | --------------- | --------- | ----------- |
7 | | Ramkumar Chinchani | [rchincha](https://github.com/rchincha) | [Cisco Systems](https://www.cisco.com) |
8 | | Scott Moser | [smosert](https://github.com/smoser) | [Cisco Systems](https://www.cisco.com) |
9 | | Serge Hallyn | [hallyn](https://github.com/hallyn) | [Cisco Systems](https://www.cisco.com) |
10 | | Petu Constantin Eusebiu | [peusebiu](https://github.com/peusebiu) | [Cisco Systems](https://www.cisco.com)
11 | | Tycho Andersen | [tych0](https://github.com/tych0) | [Netflix](https://www.netflix.com) |
12 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | # stacker [](https://github.com/project-stacker/stacker/actions/workflows/ci.yaml) [](https://codecov.io/github/project-stacker/stacker) [](https://github.com/project-stacker/stacker/actions?query=workflow%3ACodeQL) [](https://pkg.go.dev/stackerbuild.io/stacker) [](https://github.com/project-stacker/stacker/actions/workflows/nightly.yaml)
7 |
8 | Stacker is a tool for building OCI images natively via a declarative yaml format.
9 |
10 | ## Features
11 |
12 | * Single binary
13 | * Rootless builds
14 | * Hermetically sealed builds using LXC containers
15 | * Also available as a [GitHub action](https://github.com/project-stacker/stacker-build-push-action)
16 |
17 | ### Installation
18 |
19 | Stacker has various [build](doc/install.md) and [runtime](doc/running.md)
20 | dependencies.
21 |
22 | ### Hacking
23 |
24 | See the [hacking](doc/hacking.md) guide for tips on hacking/debugging stacker.
25 |
26 | ### Usage
27 |
28 | See the [tutorial](doc/tutorial.md) for a short introduction to how to use stacker.
29 |
30 | See the [`stacker.yaml` specification](doc/stacker_yaml.md) for full details on
31 | the `stacker.yaml` specification.
32 |
33 | Additionally, there are some [tips and tricks](doc/tricks.md) for common usage.
34 |
35 | ### TODO / Roadmap
36 |
37 | * Upstream something to containers/image that allows for automatic detection
38 | of compression
39 | * Design/implement OCIv2 drafts + final spec when it comes out
40 |
41 | ### Conference Talks
42 |
43 | * An Operator Centric Way to Update Application Containers FOSDEM 2019
44 | * [video](https://archive.fosdem.org/2019/schedule/event/containers_atomfs/)
45 | * [slides](doc/talks/FOSDEM_2019.pdf)
46 | * Building OCI Images without Privilege OSS EU 2018
47 | * [slides](doc/talks/OSS_EU_2018.pdf)
48 | * Building OCI Images without Privilege OSS NA 2018
49 | * [slides](doc/talks/OSS_NA_2018.pdf)
50 |
51 | (Note that despite the similarity in name of the 2018 talks, the content is
52 | mostly disjoint; I need to be more creative with naming.)
53 |
54 | ### License
55 |
56 | `stacker` is released under the [Apache License, Version 2.0](LICENSE), and is:
57 |
58 | Copyright (C) 2017-2022 Cisco Systems, Inc. and contributors
59 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | | Version | Supported |
6 | | ------- | ------------------ |
7 | | > 1.0.x | :white_check_mark: |
8 | | < 1.0.0 | :x: |
9 |
10 | ## Reporting a Vulnerability
11 |
12 | When a vulnerability is found, please *DO NOT* file a public issue.
13 | Instead, send an email to one of the core [maintainers](MAINTAINERS.md) and
14 | await acknowledgement. Normally we expect to resolve the issue in 60 days.
15 | However should there be an exception the team will reach out for next steps.
16 |
--------------------------------------------------------------------------------
/build.yaml:
--------------------------------------------------------------------------------
1 | build-env:
2 | build_only: true
3 | from:
4 | type: docker
5 | url: ${{STACKER_BUILD_BASE_IMAGE}}
6 | imports:
7 | - https://github.com/json-c/json-c/archive/refs/tags/json-c-0.16-20220414.tar.gz
8 | - https://gitlab.com/cryptsetup/cryptsetup/-/archive/v2.6.0/cryptsetup-v2.6.0.tar.gz
9 | - https://github.com/lvmteam/lvm2/archive/refs/tags/v2_03_18.tar.gz
10 | run: |
11 | #!/bin/sh -ex
12 | # libapparmor is only in testing
13 | head -n1 /etc/apk/repositories | sed 's/main/testing/g' >> /etc/apk/repositories
14 |
15 | apk add git findutils go automake autoconf make gcc libtool \
16 | acl-dev acl-static build-base \
17 | libseccomp-dev libseccomp-static \
18 | libcap-dev libcap-static \
19 | libapparmor-dev \
20 | zlib-static lz4-static \
21 | zstd-dev zstd-static \
22 | xz \
23 | gettext-dev \
24 | lvm2-dev util-linux-dev \
25 | linux-headers \
26 | util-linux-static \
27 | po4a
28 |
29 | # json-c doesn't have static binaries in alpine
30 | apk add cmake
31 | tar -xf /stacker/imports/json-c-*
32 | cd json-c-*
33 | mkdir build
34 | cd build
35 | cmake ..
36 | make -j$(grep -c processor /proc/cpuinfo) install
37 | cd /
38 |
39 | # build libdevmapper.a
40 | tar -xf /stacker/imports/v2_03_18.tar.gz
41 | cd lvm2-*
42 | ./configure --enable-static_link
43 | make install_device-mapper
44 | cd /
45 |
46 | # build static cryptsetup without all the command line tools
47 | apk add gettext gettext-dev zlib-static lz4-static openssl-dev \
48 | openssl-libs-static popt-dev bash
49 | tar -xf /stacker/imports/cryptsetup*
50 | cd cryptsetup*
51 | ./autogen.sh
52 | CFLAGS="-D_LARGEFILE64_SOURCE -D_LARGEFILE_SOURCE" \
53 | ./configure --enable-static \
54 | --disable-cryptsetup --disable-veritysetup --disable-integritysetup \
55 | --disable-nls --disable-ssh-token \
56 | --disable-asciidoc
57 | make -j$(grep -c processor /proc/cpuinfo) install
58 | cd /
59 |
60 | # build lxc
61 | apk add meson ninja docbook2x docbook2x-doc curl
62 | git clone --depth 1 ${{LXC_CLONE_URL}} -b ${{LXC_BRANCH}}
63 | mkdir /etc/default
64 | cd lxc
65 | meson setup -Dprefix=/usr -Dman=false -Dsd-bus=disabled -Dinit-script=sysvinit build
66 | meson compile -C build
67 | meson install -C build
68 | cd /
69 |
70 | # build lzma
71 | git clone -b v5.2.6 https://github.com/xz-mirror/xz.git
72 | cd xz
73 | ./autogen.sh
74 | ./configure --enable-static --enable-shared --prefix=/usr
75 | make -j$(grep -c processor /proc/cpuinfo) install
76 | cd /
77 |
78 | build:
79 | build_only: true
80 | from:
81 | type: built
82 | tag: build-env
83 | binds:
84 | - . -> /stacker-tree
85 | - ${{BUILD_D}} -> /build
86 | run: |
87 | #!/bin/sh -ex
88 | # golang wants somewhere to put its garbage
89 | export HOME=/root
90 | export LXC_VERSION=$(git -C /lxc rev-parse HEAD)
91 | export VERSION_FULL=${{VERSION_FULL}}
92 |
93 | # apk go pkg doesn't seem to have the standard go.env which would set GOTOOLCHAIN=auto.
94 | # therefore it defaults to 'local', which ignores the `toolchain` line in go.mod
95 | # let's re-set it here so we get the right toolchain version as specified in go.mod
96 | export GOTOOLCHAIN=auto
97 |
98 | cd /stacker-tree
99 | make BUILD_D=/build show-info
100 | make BUILD_D=/build -C cmd/stacker/lxc-wrapper clean
101 | if [ x${{WITH_COV}} = x"yes" ]; then
102 | make BUILD_D=/build stacker-static-cov
103 | else
104 | make -C /stacker-tree stacker-static
105 | fi
106 |
--------------------------------------------------------------------------------
/cmd/stacker/bom.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "path"
6 | "path/filepath"
7 |
8 | "github.com/pkg/errors"
9 | cli "github.com/urfave/cli/v2"
10 | "stackerbuild.io/stacker-bom/pkg/bom"
11 | "stackerbuild.io/stacker-bom/pkg/distro"
12 | "stackerbuild.io/stacker-bom/pkg/fs"
13 | "stackerbuild.io/stacker/pkg/types"
14 | )
15 |
16 | var bomCmd = cli.Command{
17 | Name: "bom",
18 | Usage: "work with a software bill of materials (BOM)",
19 | Subcommands: []*cli.Command{
20 | &cli.Command{
21 | Name: "build",
22 | Action: doBomBuild,
23 | },
24 | &cli.Command{
25 | Name: "discover",
26 | Action: doBomDiscover,
27 | },
28 | &cli.Command{
29 | Name: "generate",
30 | Action: doBomGenerate,
31 | },
32 | &cli.Command{
33 | Name: "verify",
34 | Action: doBomVerify,
35 | },
36 | },
37 | }
38 |
39 | func doBomDiscover(ctx *cli.Context) error {
40 | author := "stacker-internal"
41 | org := "stacker-internal"
42 |
43 | if err := fs.Discover(author, org, types.InternalStackerDir+"/artifacts/installed-packages.json"); err != nil {
44 | return nil
45 | }
46 |
47 | return nil
48 | }
49 |
50 | func doBomGenerate(ctx *cli.Context) error {
51 | if ctx.Args().Len() != 1 {
52 | return errors.Errorf("wrong number of args for umount")
53 | }
54 |
55 | input := ctx.Args().Get(0)
56 |
57 | author := "stacker-internal"
58 | org := "stacker-internal"
59 | lic := "unknown"
60 |
61 | if err := distro.ParsePackage(input, author, org, lic, fmt.Sprintf("%s/artifacts/%s.json",
62 | types.InternalStackerDir, filepath.Base(input))); err != nil {
63 | return nil
64 | }
65 |
66 | return nil
67 | }
68 |
69 | // build/roll your own sbom document for a particular dest (file/dir)
70 | // by specifying details such as author, org, license, etc.
71 | func doBomBuild(ctx *cli.Context) error {
72 | if ctx.Args().Len() < 7 {
73 | return errors.Errorf("wrong number of args")
74 | }
75 |
76 | dest := ctx.Args().Get(0)
77 | author := ctx.Args().Get(1)
78 | org := ctx.Args().Get(2)
79 | license := ctx.Args().Get(3)
80 | pkgname := ctx.Args().Get(4)
81 | pkgversion := ctx.Args().Get(5)
82 | paths := []string{}
83 | for i := 6; i < ctx.Args().Len(); i++ {
84 | paths = append(paths, ctx.Args().Get(i))
85 | }
86 | out := path.Join(dest, fmt.Sprintf("doc-%s.spdx.json", pkgname))
87 | name := fmt.Sprintf("doc-%s", pkgname)
88 |
89 | return fs.BuildPackage(name, author, org, license, pkgname, pkgversion, paths, out)
90 | }
91 |
92 | func doBomVerify(ctx *cli.Context) error {
93 | if ctx.Args().Len() != 5 {
94 | return errors.Errorf("wrong number of args")
95 | }
96 |
97 | dest := ctx.Args().Get(0)
98 | namespace := ctx.Args().Get(1)
99 | name := ctx.Args().Get(2)
100 | author := ctx.Args().Get(3)
101 | org := ctx.Args().Get(4)
102 |
103 | // first merge all individual sbom artifacts that may have been generated
104 | iDir := types.InternalStackerDir
105 | if err := bom.MergeDocuments(iDir+"/artifacts", namespace, name, author, org, dest); err != nil {
106 | return err
107 | }
108 |
109 | // check against inventory
110 | if err := fs.GenerateInventory("/",
111 | []string{"/proc", "/sys", "/dev", "/etc/resolv.conf", iDir},
112 | iDir+"/artifacts/inventory.json"); err != nil {
113 | return err
114 | }
115 |
116 | return fs.Verify(dest, iDir+"/artifacts/inventory.json", "")
117 | }
118 |
--------------------------------------------------------------------------------
/cmd/stacker/build.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 |
6 | cli "github.com/urfave/cli/v2"
7 | "machinerun.io/atomfs/pkg/verity"
8 | "stackerbuild.io/stacker/pkg/stacker"
9 | "stackerbuild.io/stacker/pkg/types"
10 | )
11 |
12 | var buildCmd = cli.Command{
13 | Name: "build",
14 | Usage: "builds a new OCI image from a stacker yaml file",
15 | Action: doBuild,
16 | Flags: initBuildFlags(),
17 | Before: beforeBuild,
18 | }
19 |
20 | func initBuildFlags() []cli.Flag {
21 | return append(
22 | initCommonBuildFlags(),
23 | &cli.StringFlag{
24 | Name: "stacker-file",
25 | Aliases: []string{"f"},
26 | Usage: "the input stackerfile",
27 | Value: "stacker.yaml",
28 | })
29 | }
30 |
31 | func initCommonBuildFlags() []cli.Flag {
32 | return []cli.Flag{
33 | &cli.BoolFlag{
34 | Name: "no-cache",
35 | Usage: "don't use the previous build cache",
36 | },
37 | &cli.StringSliceFlag{
38 | Name: "substitute",
39 | Usage: "variable substitution in stackerfiles, FOO=bar format",
40 | },
41 | &cli.StringFlag{
42 | Name: "substitute-file",
43 | Usage: "file containing variable substitution in stackerfiles, 'FOO: bar' yaml format",
44 | },
45 | &cli.StringFlag{
46 | Name: "on-run-failure",
47 | Usage: "command to run inside container if run fails (useful for inspection)",
48 | },
49 | &cli.BoolFlag{
50 | Name: "shell-fail",
51 | Usage: fmt.Sprintf("exec %s inside the container if run fails (alias for --on-run-failure=%s)", stacker.DefaultShell, stacker.DefaultShell),
52 | },
53 | &cli.StringSliceFlag{
54 | Name: "layer-type",
55 | Usage: "set the output layer type (supported values: tar, squashfs, erofs); can be supplied multiple times",
56 | Value: cli.NewStringSlice("tar"),
57 | },
58 | &cli.BoolFlag{
59 | Name: "no-verity",
60 | Usage: "do not append dm-verity data to fs archives",
61 | Aliases: []string{"no-squashfs-verity"},
62 | },
63 | &cli.BoolFlag{
64 | Name: "require-hash",
65 | Usage: "require all remote imports to have a hash provided in stackerfiles",
66 | },
67 | &cli.BoolFlag{
68 | Name: "order-only",
69 | Usage: "show the build order without running the actual build",
70 | },
71 | &cli.StringFlag{
72 | Name: "annotations-namespace",
73 | Usage: "set OCI annotations namespace in the OCI image manifest",
74 | Value: "io.stackeroci",
75 | },
76 | }
77 | }
78 |
79 | func beforeBuild(ctx *cli.Context) error {
80 | // Validate build failure arguments
81 | err := validateBuildFailureFlags(ctx)
82 | if err != nil {
83 | return err
84 | }
85 |
86 | // Validate layer type
87 | err = validateLayerTypeFlags(ctx)
88 | if err != nil {
89 | return err
90 | }
91 | return nil
92 | }
93 |
94 | func newBuildArgs(ctx *cli.Context) (stacker.BuildArgs, error) {
95 | args := stacker.BuildArgs{
96 | Config: config,
97 | NoCache: ctx.Bool("no-cache"),
98 | Substitute: ctx.StringSlice("substitute"),
99 | SubstituteFile: ctx.String("substitute-file"),
100 | OnRunFailure: ctx.String("on-run-failure"),
101 | OrderOnly: ctx.Bool("order-only"),
102 | HashRequired: ctx.Bool("require-hash"),
103 | Progress: shouldShowProgress(ctx),
104 | AnnotationsNamespace: ctx.String("annotations-namespace"),
105 | }
106 | var err error
107 | verity := verity.VerityMetadata(!ctx.Bool("no-verity"))
108 | args.LayerTypes, err = types.NewLayerTypes(ctx.StringSlice("layer-type"), verity)
109 | return args, err
110 | }
111 |
112 | func doBuild(ctx *cli.Context) error {
113 | args, err := newBuildArgs(ctx)
114 | if err != nil {
115 | return err
116 | }
117 |
118 | builder := stacker.NewBuilder(&args)
119 | return builder.BuildMultiple([]string{ctx.String("stacker-file")})
120 | }
121 |
--------------------------------------------------------------------------------
/cmd/stacker/check.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io/fs"
5 | "os"
6 | "os/exec"
7 | "path/filepath"
8 |
9 | "github.com/pkg/errors"
10 | "github.com/pkg/xattr"
11 | cli "github.com/urfave/cli/v2"
12 | "stackerbuild.io/stacker/pkg/log"
13 | "stackerbuild.io/stacker/pkg/overlay"
14 | "stackerbuild.io/stacker/pkg/stacker"
15 | )
16 |
17 | var checkCmd = cli.Command{
18 | Name: "check",
19 | Usage: "checks that all runtime required things (like kernel features) are present",
20 | Action: doCheck,
21 | }
22 |
23 | func doCheck(ctx *cli.Context) error {
24 |
25 | kernel, err := stacker.KernelInfo()
26 | if err != nil {
27 | return errors.Wrapf(err, "couldn't get kernel info")
28 | }
29 |
30 | log.Infof("os/kernel: %s", kernel)
31 |
32 | if err := os.MkdirAll(config.RootFSDir, 0700); err != nil {
33 | return errors.Wrapf(err, "couldn't create rootfs dir for testing")
34 | }
35 |
36 | // internally there are many checks to avoid symlinks
37 | evalp, err := filepath.EvalSymlinks(config.RootFSDir)
38 | if err != nil {
39 | return errors.Wrapf(err, "%s: unable to evaluate path for symlinks", config.RootFSDir)
40 | }
41 |
42 | if evalp != config.RootFSDir {
43 | return errors.Errorf("%s: roots dir (--roots-dir) path uses symbolic links, use %q instead", config.RootFSDir, evalp)
44 | }
45 |
46 | // not all underlying filesystems are compatible
47 | fstype, err := stacker.MountInfo(config.RootFSDir)
48 | if err != nil {
49 | return errors.Wrapf(err, "%s: couldn't get fs type", config.RootFSDir)
50 | }
51 |
52 | log.Infof("%s %s", config.RootFSDir, fstype)
53 |
54 | if fstype == "NFS(6969)" {
55 | return errors.Errorf("roots dir (--roots-dir) path %s is not supported on NFS.", config.RootFSDir)
56 | }
57 |
58 | if e := verifyNewUIDMap(ctx); e != nil {
59 | return e
60 | }
61 |
62 | switch config.StorageType {
63 | case "overlay":
64 | return overlay.Check(config)
65 | default:
66 | return errors.Errorf("invalid storage type %v", config.StorageType)
67 | }
68 | }
69 |
70 | func verifyNewUIDMap(ctx *cli.Context) error {
71 | binFile, err := exec.LookPath("newuidmap")
72 | if err != nil {
73 | return errors.Wrapf(err, "newuidmap not found in path")
74 | }
75 |
76 | fileInfo, err := os.Stat(binFile)
77 | if err != nil {
78 | return errors.Wrapf(err, "couldn't stat file: %s", binFile)
79 | }
80 |
81 | if fileInfo.Mode()&0111 == 0 {
82 | return errors.Errorf("%s is not executable", binFile)
83 | }
84 |
85 | if fileInfo.Mode()&fs.ModeSetuid != 0 {
86 | // setuid-root is present, we are good!
87 | return nil
88 | }
89 |
90 | if e := checkForCap(binFile, "security.capability"); e != nil {
91 | return errors.Wrapf(e, "%s does not have either setuid-root or security caps", binFile)
92 | }
93 |
94 | return nil
95 | }
96 |
97 | func checkForCap(f string, cap string) error {
98 | caps, e := xattr.List(f)
99 | if e != nil {
100 | return errors.Errorf("could not read caps of %s", f)
101 | }
102 |
103 | for _, fcap := range caps {
104 | if fcap == cap {
105 | return nil
106 | }
107 | }
108 |
109 | return errors.Errorf("no security cap")
110 | }
111 |
--------------------------------------------------------------------------------
/cmd/stacker/chroot.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/pkg/errors"
8 | cli "github.com/urfave/cli/v2"
9 | "stackerbuild.io/stacker/pkg/container"
10 | "stackerbuild.io/stacker/pkg/log"
11 | "stackerbuild.io/stacker/pkg/stacker"
12 | "stackerbuild.io/stacker/pkg/types"
13 | )
14 |
15 | var chrootCmd = cli.Command{
16 | Name: "chroot",
17 | Usage: "run a command in a chroot",
18 | Aliases: []string{"exec"},
19 | Action: doChroot,
20 | Flags: []cli.Flag{
21 | &cli.StringFlag{
22 | Name: "stacker-file",
23 | Aliases: []string{"f"},
24 | Usage: "the input stackerfile",
25 | Value: "stacker.yaml",
26 | },
27 | &cli.StringSliceFlag{
28 | Name: "substitute",
29 | Usage: "variable substitution in stackerfiles, FOO=bar format",
30 | },
31 | },
32 | ArgsUsage: fmt.Sprintf(`[tag] [cmd]
33 |
34 | is the built tag in the stackerfile to chroot to, or the first tag if
35 | none is specified.
36 |
37 | is the command to run, or %s if none is specified. To specify cmd,
38 | you must specify a tag.`, stacker.DefaultShell),
39 | }
40 |
41 | func doChroot(ctx *cli.Context) error {
42 | s, locks, err := stacker.NewStorage(config)
43 | if err != nil {
44 | return err
45 | }
46 | defer locks.Unlock()
47 |
48 | tag := ""
49 | if ctx.Args().Len() > 0 {
50 | tag = ctx.Args().Get(0)
51 | }
52 |
53 | cmd := []string{stacker.DefaultShell}
54 |
55 | if ctx.Args().Len() > 1 {
56 | cmd[0] = ctx.Args().Get(1)
57 | }
58 |
59 | file := ctx.String("f")
60 | _, err = os.Stat(file)
61 | if err != nil {
62 | if !os.IsNotExist(err) {
63 | return errors.Wrapf(err, "couldn't access %s", file)
64 | }
65 |
66 | log.Infof("couldn't find stacker file, chrooting to %s as best effort", tag)
67 | c, err := container.New(config, tag)
68 | if err != nil {
69 | return err
70 | }
71 | defer c.Close()
72 | return c.Execute(cmd, os.Stdin)
73 | }
74 | sf, err := types.NewStackerfile(file, false, ctx.StringSlice("substitute"))
75 | if err != nil {
76 | return err
77 | }
78 |
79 | if tag == "" {
80 | tag = sf.FileOrder[0]
81 | }
82 |
83 | layer, ok := sf.Get(tag)
84 | if !ok {
85 | return errors.Errorf("no layer %s in stackerfile", tag)
86 | }
87 |
88 | name, cleanup, err := s.TemporaryWritableSnapshot(tag)
89 | if err != nil {
90 | return err
91 | }
92 | defer cleanup()
93 |
94 | log.Infof("This chroot is temporary, any changes will be destroyed when it exits.")
95 | c, err := container.New(config, name)
96 | if err != nil {
97 | return err
98 | }
99 | defer c.Close()
100 |
101 | err = stacker.SetupBuildContainerConfig(config, s, c, types.InternalStackerDir, name)
102 | if err != nil {
103 | return err
104 | }
105 | err = stacker.SetupLayerConfig(config, c, layer, types.InternalStackerDir, name)
106 | if err != nil {
107 | return err
108 | }
109 |
110 | return c.Execute(cmd, os.Stdin)
111 | }
112 |
--------------------------------------------------------------------------------
/cmd/stacker/clean.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 |
6 | "github.com/pkg/errors"
7 | cli "github.com/urfave/cli/v2"
8 | "stackerbuild.io/stacker/pkg/log"
9 | "stackerbuild.io/stacker/pkg/stacker"
10 | )
11 |
12 | var cleanCmd = cli.Command{
13 | Name: "clean",
14 | Usage: "cleans up after a `stacker build`",
15 | Action: doClean,
16 | Flags: []cli.Flag{
17 | &cli.BoolFlag{
18 | Name: "all",
19 | Usage: "no-op; this used to do soemthing, and is left in for compatibility",
20 | },
21 | },
22 | }
23 |
24 | func doClean(ctx *cli.Context) error {
25 | fail := false
26 |
27 | if _, err := os.Stat(config.RootFSDir); !os.IsNotExist(err) {
28 | s, locks, err := stacker.NewStorage(config)
29 | if err != nil {
30 | return err
31 | }
32 | err = s.Clean()
33 | if err != nil {
34 | log.Infof("problem cleaning roots %v", err)
35 | fail = true
36 | }
37 | locks.Unlock()
38 | }
39 |
40 | if err := os.RemoveAll(config.OCIDir); err != nil {
41 | log.Infof("problem cleaning oci dir %v", err)
42 | fail = true
43 | }
44 |
45 | if err := os.RemoveAll(config.StackerDir); err != nil {
46 | if !os.IsNotExist(err) {
47 | log.Infof("error deleting stacker dir: %v", err)
48 | fail = true
49 | }
50 | }
51 |
52 | if fail {
53 | return errors.Errorf("cleaning failed")
54 | }
55 |
56 | return nil
57 | }
58 |
--------------------------------------------------------------------------------
/cmd/stacker/convert.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "log"
5 |
6 | cli "github.com/urfave/cli/v2"
7 | "stackerbuild.io/stacker/pkg/stacker"
8 | )
9 |
10 | var convertCmd = cli.Command{
11 | Name: "convert",
12 | Usage: "converts a Dockerfile into a stacker yaml file (experimental, best-effort)",
13 | Action: doConvert,
14 | Flags: initConvertFlags(),
15 | Before: beforeConvert,
16 | }
17 |
18 | func initConvertFlags() []cli.Flag {
19 | return append(
20 | initCommonConvertFlags(),
21 | &cli.StringFlag{
22 | Name: "docker-file",
23 | Aliases: []string{"i"},
24 | Usage: "the input Dockerfile",
25 | Value: "Dockerfile",
26 | },
27 | &cli.StringFlag{
28 | Name: "output-file",
29 | Aliases: []string{"o"},
30 | Usage: "the output stacker file",
31 | Value: "stacker.yaml",
32 | },
33 | &cli.StringFlag{
34 | Name: "substitute-file",
35 | Aliases: []string{"s"},
36 | Usage: "the output file containing detected substitutions",
37 | Value: "stacker-subs.yaml",
38 | },
39 | )
40 | }
41 |
42 | func initCommonConvertFlags() []cli.Flag {
43 | return []cli.Flag{}
44 | }
45 |
46 | func beforeConvert(ctx *cli.Context) error {
47 | // Validate build failure arguments
48 |
49 | return nil
50 | }
51 |
52 | func newConvertArgs(ctx *cli.Context) (stacker.ConvertArgs, error) {
53 | args := stacker.ConvertArgs{
54 | Config: config,
55 | Progress: shouldShowProgress(ctx),
56 | InputFile: ctx.String("docker-file"),
57 | OutputFile: ctx.String("output-file"),
58 | SubstituteFile: ctx.String("substitute-file"),
59 | }
60 | return args, nil
61 | }
62 |
63 | func doConvert(ctx *cli.Context) error {
64 | args, err := newConvertArgs(ctx)
65 | if err != nil {
66 | return err
67 | }
68 |
69 | converter := stacker.NewConverter(&args)
70 | if err = converter.Convert(); err != nil {
71 | log.Fatalf("conversion failed: %e", err)
72 | }
73 |
74 | return nil
75 | }
76 |
--------------------------------------------------------------------------------
/cmd/stacker/gc.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | cli "github.com/urfave/cli/v2"
5 | "stackerbuild.io/stacker/pkg/stacker"
6 | )
7 |
8 | var gcCmd = cli.Command{
9 | Name: "gc",
10 | Usage: "gc unused OCI imports/outputs snapshots",
11 | Action: doGC,
12 | }
13 |
14 | func doGC(ctx *cli.Context) error {
15 | s, locks, err := stacker.NewStorage(config)
16 | if err != nil {
17 | return err
18 | }
19 | defer locks.Unlock()
20 | return s.GC()
21 | }
22 |
--------------------------------------------------------------------------------
/cmd/stacker/grab.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 | "strings"
6 |
7 | "github.com/pkg/errors"
8 | cli "github.com/urfave/cli/v2"
9 | "stackerbuild.io/stacker/pkg/stacker"
10 | )
11 |
12 | var grabCmd = cli.Command{
13 | Name: "grab",
14 | Usage: "grabs a file from the layer's filesystem",
15 | Action: doGrab,
16 | ArgsUsage: `:
17 |
18 | is the tag in a built stacker image to extract the file from.
19 |
20 | is the path to extract (relative to /) in the image's rootfs.`,
21 | }
22 |
23 | func doGrab(ctx *cli.Context) error {
24 | s, locks, err := stacker.NewStorage(config)
25 | if err != nil {
26 | return err
27 | }
28 | defer locks.Unlock()
29 |
30 | parts := strings.SplitN(ctx.Args().First(), ":", 2)
31 | if len(parts) < 2 {
32 | return errors.Errorf("invalid grab argument: %s", ctx.Args().First())
33 | }
34 |
35 | name, cleanup, err := s.TemporaryWritableSnapshot(parts[0])
36 | if err != nil {
37 | return err
38 | }
39 | defer cleanup()
40 |
41 | cwd, err := os.Getwd()
42 | if err != nil {
43 | return err
44 | }
45 |
46 | return stacker.Grab(config, s, name, parts[1], cwd, "", nil, -1, -1)
47 | }
48 |
--------------------------------------------------------------------------------
/cmd/stacker/inspect.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 |
8 | "github.com/dustin/go-humanize"
9 | ispec "github.com/opencontainers/image-spec/specs-go/v1"
10 | "github.com/opencontainers/umoci"
11 | "github.com/opencontainers/umoci/oci/casext"
12 | "github.com/pkg/errors"
13 | cli "github.com/urfave/cli/v2"
14 | stackeroci "machinerun.io/atomfs/pkg/oci"
15 | )
16 |
17 | var inspectCmd = cli.Command{
18 | Name: "inspect",
19 | Usage: "print the json representation of an OCI image",
20 | Action: doInspect,
21 | Flags: []cli.Flag{},
22 | ArgsUsage: `[tag]
23 |
24 | is the tag in the stackerfile to inspect. If none is supplied, inspect
25 | prints the information on all tags.`,
26 | }
27 |
28 | func doInspect(ctx *cli.Context) error {
29 | oci, err := umoci.OpenLayout(config.OCIDir)
30 | if err != nil {
31 | return err
32 | }
33 | defer oci.Close()
34 |
35 | arg := ctx.Args().Get(0)
36 | if arg != "" {
37 | return renderManifest(oci, arg)
38 | }
39 |
40 | tags, err := oci.ListReferences(context.Background())
41 | if err != nil {
42 | return err
43 | }
44 |
45 | for _, t := range tags {
46 | err = renderManifest(oci, t)
47 | if err != nil {
48 | return err
49 | }
50 | }
51 |
52 | return nil
53 | }
54 |
55 | func renderManifest(oci casext.Engine, name string) error {
56 | man, err := stackeroci.LookupManifest(oci, name)
57 | if err != nil {
58 | return err
59 | }
60 |
61 | fmt.Printf("%s\n", name)
62 | for i, l := range man.Layers {
63 | fmt.Printf("\tlayer %d: %s... (%s, %s)\n", i, l.Digest.Encoded()[:12], humanize.Bytes(uint64(l.Size)), l.MediaType)
64 | }
65 |
66 | if len(man.Annotations) > 0 {
67 | fmt.Printf("Annotations:\n")
68 | for k, v := range man.Annotations {
69 | fmt.Printf(" %s: %s\n", k, v)
70 | }
71 | }
72 |
73 | configBlob, err := oci.FromDescriptor(context.Background(), man.Config)
74 | if err != nil {
75 | return err
76 | }
77 |
78 | if configBlob.Descriptor.MediaType != ispec.MediaTypeImageConfig {
79 | return errors.Errorf("bad image config type: %s", configBlob.Descriptor.MediaType)
80 | }
81 |
82 | config := configBlob.Data.(ispec.Image)
83 |
84 | fmt.Printf("Image config:\n")
85 | pretty, err := json.MarshalIndent(config, "", " ")
86 | if err != nil {
87 | return err
88 | }
89 | fmt.Println(string(pretty))
90 | return nil
91 | }
92 |
--------------------------------------------------------------------------------
/cmd/stacker/lxc-wrapper/.gitignore:
--------------------------------------------------------------------------------
1 | lxc-wrapper
2 |
--------------------------------------------------------------------------------
/cmd/stacker/lxc-wrapper/Makefile:
--------------------------------------------------------------------------------
1 | clean:
2 | -rm lxc-wrapper
3 |
--------------------------------------------------------------------------------
/cmd/stacker/main_embed.go:
--------------------------------------------------------------------------------
1 | //go:build !skipembed
2 |
3 | package main
4 |
5 | import "embed"
6 |
7 | //go:embed lxc-wrapper/lxc-wrapper
8 | var embeddedFS embed.FS
9 |
10 | const hasEmbedded = true
11 |
--------------------------------------------------------------------------------
/cmd/stacker/main_noembed.go:
--------------------------------------------------------------------------------
1 | //go:build skipembed
2 |
3 | package main
4 |
5 | import "embed"
6 |
7 | var embeddedFS embed.FS
8 |
9 | const hasEmbedded = true
10 |
--------------------------------------------------------------------------------
/cmd/stacker/recursive-build.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | cli "github.com/urfave/cli/v2"
5 | "stackerbuild.io/stacker/pkg/lib"
6 | "stackerbuild.io/stacker/pkg/stacker"
7 | )
8 |
9 | const stackerFilePathRegex = "\\/stacker.yaml$"
10 |
11 | var recursiveBuildCmd = cli.Command{
12 | Name: "recursive-build",
13 | Usage: "finds stacker yaml files under a directory and builds all OCI layers they define",
14 | Action: doRecursiveBuild,
15 | Flags: initRecursiveBuildFlags(),
16 | Before: beforeRecursiveBuild,
17 | }
18 |
19 | func initRecursiveBuildFlags() []cli.Flag {
20 | return append(
21 | initCommonBuildFlags(),
22 | &cli.StringFlag{
23 | Name: "stacker-file-pattern",
24 | Aliases: []string{"p"},
25 | Usage: "regex pattern to use when searching for stackerfile paths",
26 | Value: stackerFilePathRegex,
27 | },
28 | &cli.StringFlag{
29 | Name: "search-dir",
30 | Aliases: []string{"d"},
31 | Usage: "directory under which to search for stackerfiles to build",
32 | Value: ".",
33 | })
34 | }
35 |
36 | func beforeRecursiveBuild(ctx *cli.Context) error {
37 |
38 | // Validate build failure arguments
39 | err := validateBuildFailureFlags(ctx)
40 | if err != nil {
41 | return err
42 | }
43 |
44 | // Validate layer type
45 | err = validateLayerTypeFlags(ctx)
46 | if err != nil {
47 | return err
48 | }
49 |
50 | // Validate search arguments
51 | err = validateFileSearchFlags(ctx)
52 | if err != nil {
53 | return err
54 | }
55 |
56 | return nil
57 | }
58 |
59 | func doRecursiveBuild(ctx *cli.Context) error {
60 | args, err := newBuildArgs(ctx)
61 | if err != nil {
62 | return err
63 | }
64 |
65 | stackerFiles, err := lib.FindFiles(ctx.String("search-dir"), ctx.String("stacker-file-pattern"))
66 | if err != nil {
67 | return err
68 | }
69 |
70 | builder := stacker.NewBuilder(&args)
71 | return builder.BuildMultiple(stackerFiles)
72 | }
73 |
--------------------------------------------------------------------------------
/cmd/stacker/unpriv-setup.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 | "path/filepath"
6 | "strconv"
7 |
8 | "github.com/pkg/errors"
9 | cli "github.com/urfave/cli/v2"
10 | "stackerbuild.io/stacker/pkg/stacker"
11 | )
12 |
13 | var unprivSetupCmd = cli.Command{
14 | Name: "unpriv-setup",
15 | Usage: "do the necessary unprivileged setup for stacker build to work without root",
16 | Action: doUnprivSetup,
17 | Before: beforeUnprivSetup,
18 | Flags: []cli.Flag{
19 | &cli.StringFlag{
20 | Name: "uid",
21 | Usage: "the user to do setup for (defaults to $SUDO_UID from env)",
22 | Value: os.Getenv("SUDO_UID"),
23 | },
24 | &cli.StringFlag{
25 | Name: "gid",
26 | Usage: "the group to do setup for (defaults to $SUDO_GID from env)",
27 | Value: os.Getenv("SUDO_GID"),
28 | },
29 | &cli.StringFlag{
30 | Name: "username",
31 | Usage: "the username to do setup for (defaults to $SUDO_USER from env)",
32 | Value: os.Getenv("SUDO_USER"),
33 | },
34 | },
35 | }
36 |
37 | func beforeUnprivSetup(ctx *cli.Context) error {
38 | if ctx.String("uid") == "" {
39 | return errors.Errorf("please specify --uid or run unpriv-setup with sudo")
40 | }
41 |
42 | if ctx.String("gid") == "" {
43 | return errors.Errorf("please specify --gid or run unpriv-setup with sudo")
44 | }
45 |
46 | if ctx.String("username") == "" {
47 | return errors.Errorf("please specify --username or run unpriv-setup with sudo")
48 | }
49 |
50 | return nil
51 | }
52 |
53 | func recursiveChown(dir string, uid int, gid int) error {
54 | return filepath.Walk(dir, func(p string, info os.FileInfo, err error) error {
55 | if err != nil {
56 | return err
57 | }
58 |
59 | return os.Chown(p, uid, gid)
60 | })
61 | }
62 |
63 | func doUnprivSetup(ctx *cli.Context) error {
64 | _, err := os.Stat(config.StackerDir)
65 | if err == nil {
66 | return errors.Errorf("stacker dir %s already exists, aborting setup", config.StackerDir)
67 | }
68 |
69 | uid, err := strconv.Atoi(ctx.String("uid"))
70 | if err != nil {
71 | return errors.Wrapf(err, "couldn't convert uid %s", ctx.String("uid"))
72 | }
73 |
74 | gid, err := strconv.Atoi(ctx.String("gid"))
75 | if err != nil {
76 | return errors.Wrapf(err, "couldn't convert gid %s", ctx.String("gid"))
77 | }
78 |
79 | err = os.MkdirAll(config.StackerDir, 0755)
80 | if err != nil {
81 | return err
82 | }
83 |
84 | err = os.MkdirAll(config.RootFSDir, 0755)
85 | if err != nil {
86 | return err
87 | }
88 |
89 | username := ctx.String("username")
90 |
91 | err = stacker.UnprivSetup(config, username, uid, gid)
92 | if err != nil {
93 | return err
94 | }
95 |
96 | err = recursiveChown(config.StackerDir, uid, gid)
97 | if err != nil {
98 | return err
99 | }
100 |
101 | return recursiveChown(config.RootFSDir, uid, gid)
102 | }
103 |
--------------------------------------------------------------------------------
/cmd/stacker/validate.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 | "regexp"
6 | "strings"
7 |
8 | "github.com/pkg/errors"
9 | cli "github.com/urfave/cli/v2"
10 | "stackerbuild.io/stacker/pkg/stacker"
11 | )
12 |
13 | /*
14 | check that roots-dir./ name don't contain ':', it will interfere with overlay mount options
15 |
16 | which is using :s as separator
17 | */
18 | func validateRootsDirName(rootsDir string) error {
19 | if strings.Contains(rootsDir, ":") {
20 | return errors.Errorf("using ':' in the name of --roots-dir (%s) is forbidden due to overlay constraints", rootsDir)
21 | }
22 |
23 | return nil
24 | }
25 |
26 | func validateBuildFailureFlags(ctx *cli.Context) error {
27 | if ctx.Bool("shell-fail") {
28 | askedFor := ctx.String("on-run-failure")
29 | if askedFor != "" && askedFor != stacker.DefaultShell {
30 | return errors.Errorf("--shell-fail is incompatible with --on-run-failure=%s", askedFor)
31 | }
32 | err := ctx.Set("on-run-failure", stacker.DefaultShell)
33 | if err != nil {
34 | return err
35 | }
36 | }
37 |
38 | return nil
39 | }
40 |
41 | func validateLayerTypeFlags(ctx *cli.Context) error {
42 | layerTypes := ctx.StringSlice("layer-type")
43 | if len(layerTypes) == 0 {
44 | return errors.Errorf("must specify at least one output --layer-type")
45 | }
46 |
47 | for _, layerType := range layerTypes {
48 | switch layerType {
49 | case "tar":
50 | break
51 | case "squashfs":
52 | break
53 | case "erofs":
54 | break
55 | default:
56 | return errors.Errorf("unknown layer type: %s", layerType)
57 | }
58 | }
59 |
60 | return nil
61 | }
62 |
63 | func validateFileSearchFlags(ctx *cli.Context) error {
64 | // Use the current working directory if base search directory is "."
65 | if ctx.String("search-dir") == "." {
66 | wd, err := os.Getwd()
67 | if err != nil {
68 | return err
69 | }
70 | err = ctx.Set("search-dir", wd)
71 | if err != nil {
72 | return err
73 | }
74 | }
75 |
76 | // Ensure the base search directory exists
77 | if _, err := os.Lstat(ctx.String("search-dir")); err != nil {
78 | return err
79 | }
80 |
81 | // Ensure the stacker-file-pattern variable compiles as a regex
82 | if _, err := regexp.Compile(ctx.String("stacker-file-pattern")); err != nil {
83 | return err
84 | }
85 |
86 | return nil
87 | }
88 |
--------------------------------------------------------------------------------
/doc/install.md:
--------------------------------------------------------------------------------
1 | ## Building and Installing Stacker
2 |
3 | ### Go Dependency
4 |
5 | Stacker requires at least go 1.20.
6 |
7 | #### Ubuntu 22.04
8 |
9 | On Ubuntu 22.04 you can install Go using the instructions at:
10 | https://github.com/golang/go/wiki/Ubuntu
11 |
12 | #### Fedora 31
13 |
14 | On Fedora 31 you can install Go with the following command:
15 |
16 | sudo dnf install golang
17 |
18 | #### Other Distributions
19 |
20 | If Go is not already packaged for your Linux distribution, you can get the
21 | latest Go version here:
22 | https://golang.org/dl/#stable
23 |
24 | Go can be installed using the instructions on on the official Go website:
25 | https://golang.org/doc/install#install
26 |
27 | ### Other Dependencies
28 |
29 | The other build dependencies can be satisfied by running:
30 |
31 | #### **Ubuntu 22.04**
32 |
33 |
34 | This script will install the current library and build-time depedencies.
35 | Once installed it will prepare the system for build by fetching golang
36 | tools, downloading go modules and preparing a mirror of remote OCI images.
37 |
38 |
39 | sudo ./install-build-deps.sh
40 |
41 | **To run `make check` you will also need:**
42 |
43 | **umoci** - https://github.com/opencontainers/umoci
44 |
45 | Since the path **/usr/local** is owned by root, when you reach the step to run **make install**, you need to run it as **sudo**.
46 |
47 | `make check` requires the **golangci-lint** binary to be present in $GOPATH/bin
48 |
49 | Since there are some tests that run with elevated privileges and use git, it will complain that the stacker folder is unsafe as it is owned by your user. To prevent that, we need to tell git to consider that folder as safe. To do this, open your git config file (**.gitconfig**) and add the following line with the path to your local stacker folder. Below is an example:
50 |
51 | [safe]
52 | directory = /home/chofnar/github/stacker
53 |
54 |
55 | #### **Fedora 31**
56 |
57 | The other build dependencies can be satisfied with the following command and
58 | packages:
59 |
60 | sudo dnf install lxc-devel libcap-devel libacl-devel gpgme-devel
61 | sudo dnf install bats jq
62 |
63 | ### Building the Stacker Binary
64 |
65 | Finally, once you have the build dependencies, stacker can be built with a
66 | simple `make stacker`. The stacker binary will be output as `./stacker`.
67 |
--------------------------------------------------------------------------------
/doc/running.md:
--------------------------------------------------------------------------------
1 | ## Runtime environment
2 |
3 | Stacker execs various tools in order to accomplish its goals.
4 |
5 | For example, in order to generate squashfs images, the `mksquashfs` binary
6 | needs to be present in `$PATH`.
7 |
8 | `stacker` builds things in the host's network namespace, re-exports any of
9 | `HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY` and their lowercase counterparts inside
10 | the environment, and bind mounts in the host's /etc/resolv.conf. This means
11 | that the network experience inside the container should be identical to the
12 | network experience that is on the host. Since stacker is only used for building
13 | images, this is safe and most intuitive for users on corporate networks with
14 | complicated proxy and other setups. However, it does mean that packaging that
15 | expects to be able to modify things in `/sys` will fail, since `/sys` is bind
16 | mounted from the host's `/sys` (sysfs cannot be mounted in a network namespace
17 | that a user doesn't own).
18 |
19 | When running as an unprivileged user, stacker will attempt to run things inside
20 | a user namespace owned by the user that executed the command, and will try to
21 | map 65k user and group ids to meet the POSIX standard. This means that
22 | `/etc/sub{u,g}id` should be configured with enough uids to map things
23 | correctly. This configuration can be done automatically via `stacker
24 | unpriv-setup`. See below for discussion on unprivileged use with particular
25 | storage backends.
26 |
27 | ### What's inside the container
28 |
29 | Note that unlike other container tools, stacker generally assumes what's inside
30 | the container is a "sane" rootfs, i.e. it can exec `sh` to implement the `run:`
31 | section.
32 |
33 | ### The overlay filesystem
34 |
35 | Stacker cannot itself be backed by an underlying overlayfs, since stacker needs
36 | to create whiteout files, and the kernel (rightfully) forbids manual creation
37 | of whiteout files on overlay filesystems.
38 |
39 | Additionally, here are no additional userspace dependencies required to use the
40 | overlayfs backend.
41 |
42 | #### The overlay backend and the kernel
43 |
44 | For privileged use, the overlayfs backend should work on any reasonably recent
45 | kernel (say >= 4.4).
46 |
47 | For unprivileged use, the overlayfs backend requires one fairly new kernel
48 | change, a3c751a50fe6 ("vfs: allow unprivileged whiteout creation"). This is
49 | available in all kernels >= 5.8, and may be backported to some distribution
50 | kernels. It also requires that unprivileged users be able to mount overlay
51 | filesystems, something which is allowed in Ubuntu kernels and will be allowed in
52 | upstream kernels as of 459c7c565ac3 ("ovl: unprivieged mounts"), which will be
53 | released in 5.11.
54 |
55 | Stacker has checks to ensure that it can run with all these environment
56 | requirements, and will fail fast if it can't do something it should be able to
57 | do.
58 |
--------------------------------------------------------------------------------
/doc/talks/FOSDEM_2019.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-stacker/stacker/613a7e3ec90b630a208755b6d1ab4fbec5823bce/doc/talks/FOSDEM_2019.pdf
--------------------------------------------------------------------------------
/doc/talks/OSS_EU_2018.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-stacker/stacker/613a7e3ec90b630a208755b6d1ab4fbec5823bce/doc/talks/OSS_EU_2018.pdf
--------------------------------------------------------------------------------
/doc/talks/OSS_NA_2018.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-stacker/stacker/613a7e3ec90b630a208755b6d1ab4fbec5823bce/doc/talks/OSS_NA_2018.pdf
--------------------------------------------------------------------------------
/doc/talks/stacker101/1.README.md:
--------------------------------------------------------------------------------
1 | # README for container 1
2 |
3 |
--------------------------------------------------------------------------------
/doc/talks/stacker101/1.stacker.yaml:
--------------------------------------------------------------------------------
1 | test:
2 | from:
3 | type: scratch
4 | import:
5 | - path: 1.README.md
6 | dest: /
7 |
--------------------------------------------------------------------------------
/doc/talks/stacker101/2.stacker.yaml:
--------------------------------------------------------------------------------
1 | test:
2 | from:
3 | type: docker
4 | url: docker://$YOUR_REGISTRY:5000/c3/minbase@sha256:af4c87a154aea6c5d6fd578f234c70d1f31d42609ddd096f212cbb9d8dbde9be
5 | run: |
6 | echo "fun" > what-is-hacking
7 |
--------------------------------------------------------------------------------
/doc/talks/stacker101/3.stacker.yaml:
--------------------------------------------------------------------------------
1 | test:
2 | from:
3 | type: docker
4 | url: docker://$YOUR_REGISTRY:5000/c3/minbase:1.0.72
5 | import:
6 | - path: example.tgz
7 | run: |
8 | tar zxvf /stacker/imports/example.tgz example.stacker.yaml
9 |
--------------------------------------------------------------------------------
/doc/talks/stacker101/4.stacker.yaml:
--------------------------------------------------------------------------------
1 | build_it:
2 | from:
3 | type: docker
4 | url: docker://$YOUR_REGISTRY/c3/godev:${{C3_VERSION}}
5 | build_only: true
6 | run: |
7 | git clone $url foo
8 | cd foo
9 | make install
10 |
11 | finalcontainer:
12 | from: ...
13 | import:
14 | - path: stacker://build_it/usr/bin/it
15 | dest: /usr/bin/it
16 | run: |
17 | # do whatever else here, no need to copy the imports because we used 'dest' above
18 |
--------------------------------------------------------------------------------
/doc/talks/stacker101/Makefile:
--------------------------------------------------------------------------------
1 |
2 | setup: exampletarball
3 |
4 | exampletarball:
5 | tar zcvf example.tgz *.stacker.yaml
6 |
7 |
8 | %: %.stacker.yaml exampletarball
9 | stacker --oci-dir $@ build $(BARG) -f $<
10 | tree -h $@
11 | @echo "press enter to run oci-viewer"
12 | @read a
13 | ~/bin/ociv $@
14 |
15 | clean:
16 | rm -rf 1 2 3 example.tgz
17 |
--------------------------------------------------------------------------------
/doc/talks/stacker101/README-TALK.txt:
--------------------------------------------------------------------------------
1 | This talk uses `patat`, https://github.com/jaspervdj/patat which is a markdown
2 | based presentation tool that supports images on some terminals (like MacOS'
3 | iTerm2) and supports running code during the presentation to generate output,
4 | which is used here to display the listings of stacker.yaml examples that are
5 | kept separate to allow building them with the included makefile.
6 |
7 | -mike
8 |
--------------------------------------------------------------------------------
/doc/talks/stacker101/arch.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-stacker/stacker/613a7e3ec90b630a208755b6d1ab4fbec5823bce/doc/talks/stacker101/arch.jpg
--------------------------------------------------------------------------------
/doc/talks/stacker101/flow3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-stacker/stacker/613a7e3ec90b630a208755b6d1ab4fbec5823bce/doc/talks/stacker101/flow3.png
--------------------------------------------------------------------------------
/doc/talks/stacker101/overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-stacker/stacker/613a7e3ec90b630a208755b6d1ab4fbec5823bce/doc/talks/stacker101/overview.png
--------------------------------------------------------------------------------
/doc/talks/stacker101/standards-bodies.md:
--------------------------------------------------------------------------------
1 | ---
2 | marp: true
3 | theme: gaia
4 | ---
5 |
6 | # Open Standards For Datacenter Software
7 |
8 | Ramkumar Chinchani
9 | rchincha@cisco.com
10 |
11 | ---
12 |
13 | # Why Open Standards?
14 |
15 | * Avoid vendor lock-in
16 | * Large ecosystem
17 | * Pace of innovation
18 |
19 | ---
20 |
21 | 
22 |
23 | ---
24 |
25 | # Regulatory Requirements
26 |
27 | * [Executive Order on Improving the Nation’s Cybersecurity](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/)
28 | * software bill of materials (SBOM) - SPDX
29 |
30 | * [FedRamp](https://www.fedramp.gov)
31 | * [FEDRAMP VULNERABILITY SCANNING REQUIREMENTS FOR CONTAINERS](https://www.fedramp.gov/assets/resources/documents/Vulnerability_Scanning_Requirements_for_Containers.pdf)
32 |
33 | * [NIST](https://www.nist.gov/)
34 | * [NIST Special Publication 800-190/Application Container Security Guide](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-190.pdf)
35 |
36 | ---
37 |
38 | # `CNCF` Ecosystem
39 | https://cncf.landscape2.io/
40 |
41 | 
42 |
43 | ---
44 |
45 | # `OCI` "Standards"
46 |
47 | * _image spec_
48 | * https://github.com/opencontainers/image-spec
49 | * _runtime spec_
50 | * https://github.com/opencontainers/runtime-spec
51 | * _distribution spec_
52 | * https://github.com/opencontainers/distribution-spec
53 |
54 | ---
55 |
56 | # `OCI` Ecosystem
57 |
58 | | Purpose | Redhat | Microsoft | Google | Docker| Cisco |
59 | | --- | --- | --- | --- | --- | --- |
60 | | Build | `buildah` | | `bazel` | `buildx` | `stacker` |
61 | | Push/pull | `skopeo` | `oras` | `crane` | _`docker`_ | |
62 | | Run | `podman` | | | `docker` | |
63 | | Sign | `cosign` | `notation` | `cosign` | `notaryv1` | |
64 | | Registry | _`quay`_ | `acr` | _`gar`_ | _`distribution`_ | `zot` |
65 |
66 | ---
67 |
68 | # `CNCF` Meets `OCI`
69 |
70 | 
71 |
72 | ---
73 |
74 | # Putting Everything Together
75 |
76 | 
--------------------------------------------------------------------------------
/doc/talks/stacker101/standards.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-stacker/stacker/613a7e3ec90b630a208755b6d1ab4fbec5823bce/doc/talks/stacker101/standards.png
--------------------------------------------------------------------------------
/doc/tricks.md:
--------------------------------------------------------------------------------
1 | ## Tips and Tricks
2 |
3 | #### Building a layer from scratch
4 |
5 | There are a couple of cases where it may be useful to build a layer from
6 | scratch. For example to derive a new base install of an OS or to build a
7 | "tarball" type image which just carries data and will not actually be run by a
8 | container runtime.
9 |
10 | The way to accomplish this in stacker is to use a build only layer (i.e. a
11 | layer that does not get emitted into the final OCI image, perhaps containing
12 | assets or something that will be used by the final container).
13 |
14 | The best way to accomplish this is as follows:
15 |
16 | build:
17 | from:
18 | type: docker
19 | url: docker://ubuntu:latest
20 | run: |
21 | touch /tmp/first
22 | touch /tmp/second
23 | tar -C /tmp -cv -f /contents.tar first second
24 | build_only: true
25 | contents:
26 | from:
27 | type: tar
28 | url: stacker://build/contents.tar
29 |
30 | Or e.g. to bootstrap a base layer for CentoOS 7:
31 |
32 | build:
33 | from:
34 | type: docker
35 | url: docker://ubuntu:latest
36 | run: |
37 | yum -y --installroot=/rootfs --nogpgcheck install
38 | tar -C rootfs -zcf /rootfs.tar .
39 | build_only: true
40 | contents:
41 | from:
42 | type: tar
43 | url: stacker://build/rootfs.tar
44 |
45 | These work by creating the base for the system in a build container with all
46 | the utilities available needed to manipulate that base, and then asking stacker
47 | to create a layer based on this tarball, without actually running anything
48 | inside of the layer (which means e.g. absence of a shell or libc or whatever is
49 | fine).
50 |
51 | Another way to accomplish something similar is to use a [distroless](https://github.com/GoogleContainerTools/distroless) layer:
52 |
53 | build:
54 | from:
55 | type: docker
56 | url: docker://ubuntu:latest
57 | binds:
58 | - /tmp/dir_to_overlay -> /dir_to_overlay
59 | run: |
60 | touch /dir_to_overlay/binaryfile
61 | build_only: true
62 | contents:
63 | from:
64 | type: docker
65 | url: docker://gcr.io/distroless/base
66 | overlay_dirs:
67 | - source: /tmp/dir_to_overlay
68 | dest: /dir_to_overlay
69 | You can use the first layer as a build env, and copy your binary to a bind-mounted folder. Use overlay_dirs with that same folder to have the binary in the distroless layer.
--------------------------------------------------------------------------------
/install-build-deps.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -o pipefail
3 | set -o errexit
4 |
5 | installdeps_fedora() {
6 | sudo dnf install \
7 | jq \
8 | lxc-devel \
9 | libcap-devel \
10 | libacl-devel
11 | # skopeo deps
12 | sudo dnf install \
13 | gpgme-devel \
14 | libassuan-devel \
15 | btrfs-progs-devel \
16 | device-mapper-devel
17 | if ! command -v go 2>/dev/null; then
18 | sudo dnf install golang
19 | go version
20 | fi
21 | sudo dnf install bsdtar
22 | }
23 |
24 | installdeps_ubuntu() {
25 | PKGS=(
26 | build-essential
27 | cryptsetup-bin
28 | jq
29 | libacl1-dev
30 | libcap-dev
31 | libcryptsetup-dev
32 | libdevmapper-dev
33 | liblxc-dev
34 | libpam0g-dev
35 | libseccomp-dev
36 | libselinux1-dev
37 | libssl-dev
38 | libzstd-dev
39 | lxc-dev
40 | lxc-utils
41 | parallel
42 | pkg-config
43 | squashfs-tools
44 | squashfuse
45 | libarchive-tools
46 | shellcheck
47 | erofs-utils
48 | erofsfuse
49 | )
50 |
51 | case "$VERSION_ID" in
52 | 22.04)
53 | sudo add-apt-repository -y ppa:project-machine/squashfuse
54 | ;;
55 | 24.04)
56 | # lp:2080069
57 | # temporarily add puzzleos/dev to pickup lxc-dev package which
58 | # provides static liblxc.a
59 | sudo add-apt-repository -y ppa:puzzleos/dev
60 |
61 | # allow array to expand again
62 | #shellcheck disable=2206
63 | PKGS=( ${PKGS[*]} libsystemd-dev )
64 |
65 | # 24.04 has additional apparmor restrictions, probably doesn't apply
66 | # for root in github VM but developers will run into this
67 | enable_userns
68 | ;;
69 | esac
70 |
71 | # allow array to expand
72 | #shellcheck disable=2206
73 | sudo apt -yy install ${PKGS[*]}
74 |
75 | # Work around an Ubuntu packaging bug. Fixed in 23.04 onward.
76 | if [ "$VERSION_ID" != "24.04" ]; then
77 | sudo sed -i 's/#define LXC_DEVEL 1/#define LXC_DEVEL 0/' /usr/include/lxc/version.h
78 | fi
79 |
80 | # skopeo deps
81 | sudo apt -yy install \
82 | libgpgme-dev \
83 | libassuan-dev \
84 | libbtrfs-dev \
85 | libdevmapper-dev \
86 | pkg-config
87 | if ! command -v go 2>/dev/null; then
88 | sudo apt -yy install golang-go
89 | go version
90 | fi
91 |
92 | # cloud kernels, like linux-azure, don't include erofs in the linux-modules package and instead put it linux-modules-extra
93 | if ! modinfo erofs &>/dev/null; then
94 | sudo apt -yy install linux-modules-extra-$(uname -r)
95 | fi
96 | }
97 |
98 | enable_userns() {
99 | SYSCTL_USERNS="/etc/sysctl.d/00-enable-userns.conf"
100 | if ! [ -s "${SYSCTL_USERNS}" ]; then
101 | echo "Add kernel tunables to enable user namespaces in $SYSCTL_USERNS "
102 | cat < 0 {
59 | builderOpts.IgnorePatterns = opts.ignorePatterns
60 | }
61 | doc, err := builder.Generate(builderOpts)
62 | if err != nil {
63 | return errors.Errorf("generating doc: %v", err)
64 | }
65 |
66 | var renderer serialize.Serializer
67 | if opts.format == "json" {
68 | renderer = &serialize.JSON{}
69 | } else {
70 | renderer = &serialize.TagValue{}
71 | }
72 |
73 | markup, err := renderer.Serialize(doc)
74 | if err != nil {
75 | return errors.Errorf("serializing document: %v", err)
76 | }
77 | if opts.outputFile == "" {
78 | } else {
79 | if err := os.WriteFile(opts.outputFile, []byte(markup), 0o664); err != nil { //nolint:gosec // G306: Expect WriteFile
80 | return errors.Errorf("writing SBOM: %v", err)
81 | }
82 | }
83 | // Export the SBOM as in-toto provenance
84 | if opts.provenancePath != "" {
85 | if err := doc.WriteProvenanceStatement(
86 | spdx.DefaultProvenanceOptions, opts.provenancePath,
87 | ); err != nil {
88 | return errors.Errorf("writing SBOM as provenance statement: %v", err)
89 | }
90 | }
91 |
92 | return nil
93 | }
94 |
95 | type GenerateBOMOpts struct {
96 | Path string
97 | Dest string
98 | }
99 |
100 | func GenerateBOM(opts GenerateBOMOpts) error {
101 | log.SetOutput(io.Discard)
102 | err := generateBOM(&generateOptions{directories: []string{opts.Path}, outputFile: opts.Dest})
103 | return err
104 | }
105 |
--------------------------------------------------------------------------------
/pkg/lib/containers_storage/lib.go:
--------------------------------------------------------------------------------
1 | // containers/image/storage has a dependency on libdevmapper.so; having this in
2 | // its own package allows downstream users to import it if they want to use it,
3 | // but means they can also avoid importing it if they don't want to add this
4 | // dependency.
5 | package containers_storage
6 |
7 | import (
8 | "github.com/containers/image/v5/storage"
9 | "stackerbuild.io/stacker/pkg/lib"
10 | )
11 |
12 | func init() {
13 | lib.RegisterURLScheme("containers-storage", storage.Transport.ParseReference)
14 | }
15 |
--------------------------------------------------------------------------------
/pkg/lib/dag_test.go:
--------------------------------------------------------------------------------
1 | package lib
2 |
3 | import (
4 | "testing"
5 |
6 | . "github.com/smartystreets/goconvey/convey"
7 | )
8 |
9 | func TestDag(t *testing.T) {
10 | Convey("Create a dag", t, func() {
11 | dag := NewDAG()
12 | So(dag, ShouldNotBeNil)
13 |
14 | // Assert that only unique vertices can be created
15 | So(dag.AddVertex("shirt", 1), ShouldBeNil)
16 | So(dag.AddVertex("tie", 2), ShouldBeNil)
17 | So(dag.AddVertex("belt", 3), ShouldBeNil)
18 | So(dag.AddVertex("pants", 4), ShouldBeNil)
19 | So(dag.AddVertex("jacket", 5), ShouldBeNil)
20 | So(dag.AddVertex("shirt", 6), ShouldBeError)
21 |
22 | // Assert that edges can be created to only existing vertices
23 | So(dag.AddDependencies("tie", "shirt"), ShouldBeNil)
24 | So(dag.AddDependencies("jacket", "tie", "belt"), ShouldBeNil)
25 | So(dag.AddDependencies("belt", "pants"), ShouldBeNil)
26 | So(dag.AddDependencies("shirt", "does_not_exist"), ShouldBeError)
27 | So(dag.AddDependencies("does_not_exist", "shirt"), ShouldBeError)
28 |
29 | // Assert that cycles cannot happen
30 | So(dag.AddDependencies("shirt", "shirt"), ShouldBeError)
31 | So(dag.AddDependencies("shirt", "jacket"), ShouldBeError)
32 |
33 | // Check if the vertex can be retrieved
34 | So(dag.GetValue("shirt"), ShouldEqual, 1)
35 | So(dag.GetValue("unknown_key"), ShouldBeNil)
36 |
37 | // Check if the vertex can be set
38 | So(dag.SetValue("shirt", 9), ShouldBeNil)
39 | So(dag.GetValue("shirt"), ShouldEqual, 9)
40 | So(dag.SetValue("unknown_key", 99), ShouldNotBeNil)
41 |
42 | expectedSortedNodes := []Vertex{
43 | {"pants", 4},
44 | {"belt", 3},
45 | {"shirt", 9},
46 | {"tie", 2},
47 | {"jacket", 5},
48 | }
49 | sortedNodes := dag.Sort()
50 | So(sortedNodes, ShouldResemble, expectedSortedNodes)
51 |
52 | // Add and remove a vertex
53 | So(dag.AddVertex("hat", 100), ShouldBeNil)
54 | So(dag.RemoveVertex("hat"), ShouldBeNil)
55 | So(dag.GetValue("hat"), ShouldBeNil)
56 | So(dag.RemoveVertex("unknown_thing"), ShouldBeError)
57 | })
58 | }
59 |
--------------------------------------------------------------------------------
/pkg/lib/dir.go:
--------------------------------------------------------------------------------
1 | package lib
2 |
3 | import (
4 | "io/fs"
5 | "os"
6 | "path"
7 | "strconv"
8 | "strings"
9 |
10 | "github.com/pkg/errors"
11 | )
12 |
13 | // DirCopy copies a whole directory recursively
14 | func DirCopy(dest string, source string) error {
15 |
16 | var err error
17 | var fds []os.DirEntry
18 | var srcinfo os.FileInfo
19 |
20 | if srcinfo, err = os.Stat(source); err != nil {
21 | return errors.Wrapf(err, "Coudn't stat %s", source)
22 | }
23 |
24 | linkFI, err := os.Lstat(source)
25 | if err != nil {
26 | return errors.Wrapf(err, "Coudn't stat link %s", source)
27 | }
28 |
29 | // in case the dir is a symlink
30 | if linkFI.Mode()&os.ModeSymlink != 0 {
31 | target, err := os.Readlink(source)
32 | if err != nil {
33 | return errors.Wrapf(err, "Coudn't read link %s", source)
34 | }
35 |
36 | return os.Symlink(target, dest)
37 | }
38 |
39 | dirMode := srcinfo.Mode()
40 |
41 | if err = os.MkdirAll(dest, dirMode); err != nil {
42 | return errors.Wrapf(err, "Coudn't mkdir %s", dest)
43 | }
44 |
45 | if fds, err = os.ReadDir(source); err != nil {
46 | return errors.Wrapf(err, "Coudn't read dir %s", source)
47 | }
48 |
49 | for _, fd := range fds {
50 | srcfp := path.Join(source, fd.Name())
51 | dstfp := path.Join(dest, fd.Name())
52 |
53 | if fd.IsDir() {
54 | if err = DirCopy(dstfp, srcfp); err != nil {
55 | return err
56 | }
57 | } else {
58 | if err = FileCopyNoPerms(dstfp, srcfp); err != nil {
59 | return err
60 | }
61 | }
62 | }
63 | return nil
64 | }
65 |
66 | // CopyThing copies either a dir or file to the target.
67 | func CopyThing(srcpath, destpath string) error {
68 | srcInfo, err := os.Lstat(srcpath)
69 | if err != nil {
70 | return errors.WithStack(err)
71 | }
72 |
73 | if srcInfo.IsDir() {
74 | return DirCopy(destpath, srcpath)
75 | } else {
76 | return FileCopy(destpath, srcpath, nil, -1, -1)
77 | }
78 | }
79 |
80 | // Chmod changes file permissions
81 | func Chmod(mode, destpath string) error {
82 | destInfo, err := os.Lstat(destpath)
83 | if err != nil {
84 | return errors.WithStack(err)
85 | }
86 |
87 | if destInfo.IsDir() {
88 | return errors.WithStack(os.ErrInvalid)
89 | }
90 |
91 | if destInfo.Mode()&os.ModeSymlink != 0 {
92 | return errors.WithStack(os.ErrInvalid)
93 | }
94 |
95 | // read as an octal value
96 | iperms, err := strconv.ParseUint(mode, 8, 32)
97 | if err != nil {
98 | return errors.WithStack(err)
99 | }
100 |
101 | return os.Chmod(destpath, fs.FileMode(iperms))
102 | }
103 |
104 | // Chown changes file ownership
105 | func Chown(owner, destpath string) error {
106 | destInfo, err := os.Lstat(destpath)
107 | if err != nil {
108 | return errors.WithStack(err)
109 | }
110 |
111 | if destInfo.IsDir() {
112 | return errors.WithStack(os.ErrInvalid)
113 | }
114 |
115 | owns := strings.Split(owner, ":")
116 | if len(owns) > 2 {
117 | return errors.WithStack(os.ErrInvalid)
118 | }
119 |
120 | uid, err := strconv.ParseInt(owns[0], 10, 32)
121 | if err != nil {
122 | return errors.WithStack(err)
123 | }
124 |
125 | gid := int64(-1)
126 | if len(owns) > 1 {
127 | gid, err = strconv.ParseInt(owns[1], 10, 32)
128 | if err != nil {
129 | return errors.WithStack(err)
130 | }
131 | }
132 |
133 | return os.Lchown(destpath, int(uid), int(gid))
134 | }
135 |
--------------------------------------------------------------------------------
/pkg/lib/dir_test.go:
--------------------------------------------------------------------------------
1 | package lib_test
2 |
3 | import (
4 | "os"
5 | "path"
6 | "testing"
7 |
8 | . "github.com/smartystreets/goconvey/convey"
9 | "stackerbuild.io/stacker/pkg/lib"
10 | )
11 |
12 | func TestDir(t *testing.T) {
13 | Convey("IsSymLink", t, func() {
14 | src, err := os.CreateTemp("", "src")
15 | So(err, ShouldBeNil)
16 | So(src, ShouldNotBeNil)
17 | defer os.Remove(src.Name())
18 |
19 | _, err = src.Write([]byte("hello world!"))
20 | So(err, ShouldBeNil)
21 |
22 | ok, _ := lib.IsSymlink(src.Name())
23 | So(ok, ShouldBeFalse)
24 | })
25 |
26 | Convey("DirCopy", t, func() {
27 | src, err := os.CreateTemp("", "src")
28 | So(err, ShouldBeNil)
29 | So(src, ShouldNotBeNil)
30 | defer os.Remove(src.Name())
31 |
32 | _, err = src.Write([]byte("hello world!"))
33 | So(err, ShouldBeNil)
34 |
35 | dest, err := os.MkdirTemp("", "dest")
36 | So(err, ShouldBeNil)
37 | So(dest, ShouldNotBeNil)
38 | defer os.Remove(dest)
39 |
40 | err = lib.DirCopy(path.Dir(src.Name()), dest)
41 | So(err, ShouldBeNil)
42 | })
43 |
44 | Convey("CopyThing", t, func() {
45 | src, err := os.CreateTemp("", "src")
46 | So(err, ShouldBeNil)
47 | So(src, ShouldNotBeNil)
48 | defer os.Remove(src.Name())
49 |
50 | _, err = src.Write([]byte("hello world!"))
51 | So(err, ShouldBeNil)
52 |
53 | dest, err := os.CreateTemp("", "dest")
54 | So(err, ShouldBeNil)
55 | So(dest, ShouldNotBeNil)
56 | defer os.Remove(dest.Name())
57 |
58 | err = lib.CopyThing(src.Name(), dest.Name())
59 | So(err, ShouldBeNil)
60 | })
61 |
62 | Convey("Chmod", t, func() {
63 | src, err := os.CreateTemp("", "src")
64 | So(err, ShouldBeNil)
65 | So(src, ShouldNotBeNil)
66 | defer os.Remove(src.Name())
67 |
68 | _, err = src.Write([]byte("hello world!"))
69 | So(err, ShouldBeNil)
70 |
71 | err = lib.Chmod("644", src.Name())
72 | So(err, ShouldBeNil)
73 | })
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/lib/file.go:
--------------------------------------------------------------------------------
1 | package lib
2 |
3 | import (
4 | "io"
5 | "io/fs"
6 | "os"
7 | "path/filepath"
8 | "regexp"
9 |
10 | "github.com/pkg/errors"
11 | )
12 |
13 | const GidEmpty, UidEmpty = -1, -1
14 |
15 | func FileCopy(dest string, source string, mode *fs.FileMode, uid, gid int) error {
16 | os.RemoveAll(dest)
17 |
18 | linkFI, err := os.Lstat(source)
19 | if err != nil {
20 | return errors.Wrapf(err, "Coudn't stat link %s", source)
21 | }
22 |
23 | // If it's a link, it might be broken. In any case, just copy it.
24 | if linkFI.Mode()&os.ModeSymlink != 0 {
25 | target, err := os.Readlink(source)
26 | if err != nil {
27 | return errors.Wrapf(err, "Coudn't read link %s", source)
28 | }
29 |
30 | if err = os.Symlink(target, dest); err != nil {
31 | return errors.Wrapf(err, "Couldn't symlink %s->%s", source, target)
32 | }
33 |
34 | if err = os.Lchown(dest, uid, gid); err != nil {
35 | return errors.Wrapf(err, "Couldn't set symlink ownership %s", dest)
36 | }
37 |
38 | return nil
39 | }
40 |
41 | s, err := os.Open(source)
42 | if err != nil {
43 | return errors.Wrapf(err, "Coudn't open file %s", source)
44 | }
45 | defer s.Close()
46 |
47 | fi, err := s.Stat()
48 | if err != nil {
49 | return errors.Wrapf(err, "Coudn't stat file %s", source)
50 | }
51 |
52 | d, err := os.Create(dest)
53 | if err != nil {
54 | return errors.Wrapf(err, "Coudn't create file %s", dest)
55 | }
56 | defer d.Close()
57 |
58 | if mode != nil {
59 | err = d.Chmod(*mode)
60 | } else {
61 | err = d.Chmod(fi.Mode())
62 | }
63 | if err != nil {
64 | return errors.Wrapf(err, "Coudn't chmod file %s", source)
65 | }
66 |
67 | err = d.Chown(uid, gid)
68 | if err != nil {
69 | return errors.Wrapf(err, "Coudn't chown file %s", source)
70 | }
71 |
72 | _, err = io.Copy(d, s)
73 | return err
74 | }
75 |
76 | func FileCopyNoPerms(dest string, source string) error {
77 | return FileCopy(dest, source, nil, UidEmpty, GidEmpty)
78 | }
79 |
80 | // FindFiles searches for paths matching a particular regex under a given folder
81 | func FindFiles(base, pattern string) ([]string, error) {
82 | var err error
83 | var paths []string
84 |
85 | re, err := regexp.Compile(pattern)
86 | if err != nil {
87 | return nil, err
88 | }
89 |
90 | visit := func(path string, info os.FileInfo, err error) error {
91 |
92 | if err != nil {
93 | return err
94 | }
95 |
96 | if info.IsDir() {
97 | return nil
98 | }
99 |
100 | matched := re.MatchString(path)
101 |
102 | if matched {
103 | paths = append(paths, path)
104 | }
105 |
106 | return nil
107 | }
108 |
109 | // Note symlinks are not followed by walk implementation
110 | err = filepath.Walk(base, visit)
111 |
112 | return paths, err
113 | }
114 |
115 | func IsSymlink(path string) (bool, error) {
116 | statInfo, err := os.Lstat(path)
117 | if err != nil {
118 | return false, err
119 | }
120 | return (statInfo.Mode() & os.ModeSymlink) != 0, nil
121 | }
122 |
123 | func PathExists(path string) bool {
124 | statInfo, err := os.Stat(path)
125 | if statInfo == nil {
126 | isLink, err := IsSymlink(path)
127 | if err != nil {
128 | return false
129 | }
130 | return isLink
131 | }
132 | if err != nil && os.IsNotExist(err) {
133 | return false
134 | }
135 | return true
136 | }
137 |
--------------------------------------------------------------------------------
/pkg/lib/file_test.go:
--------------------------------------------------------------------------------
1 | package lib_test
2 |
3 | import (
4 | "io/fs"
5 | "os"
6 | "path"
7 | "testing"
8 |
9 | . "github.com/smartystreets/goconvey/convey"
10 | "stackerbuild.io/stacker/pkg/lib"
11 | )
12 |
13 | func TestFile(t *testing.T) {
14 | Convey("FileCopy", t, func() {
15 | src, err := os.CreateTemp("", "src")
16 | So(err, ShouldBeNil)
17 | So(src, ShouldNotBeNil)
18 | defer os.Remove(src.Name())
19 |
20 | _, err = src.Write([]byte("hello world!"))
21 | So(err, ShouldBeNil)
22 |
23 | Convey("With defaults", func() {
24 | dest, err := os.CreateTemp("", "dest")
25 | So(err, ShouldBeNil)
26 | defer os.Remove(dest.Name())
27 |
28 | err = lib.FileCopyNoPerms(dest.Name(), src.Name())
29 | So(err, ShouldBeNil)
30 | })
31 |
32 | Convey("With non-default mode", func() {
33 | dest, err := os.CreateTemp("", "dest")
34 | So(err, ShouldBeNil)
35 | defer os.Remove(dest.Name())
36 |
37 | mode := fs.FileMode(0644)
38 | err = lib.FileCopy(dest.Name(), src.Name(), &mode, lib.UidEmpty, lib.GidEmpty)
39 | So(err, ShouldBeNil)
40 | })
41 | })
42 |
43 | Convey("FindFiles", t, func() {
44 | tdir, err := os.MkdirTemp("", "find-files-test-*")
45 | So(err, ShouldBeNil)
46 | So(tdir, ShouldNotBeNil)
47 | defer os.RemoveAll(tdir)
48 |
49 | src, err := os.CreateTemp(tdir, "src")
50 | So(err, ShouldBeNil)
51 | So(src, ShouldNotBeNil)
52 | defer os.Remove(src.Name())
53 |
54 | files, err := lib.FindFiles(path.Dir(src.Name()), ".*")
55 | So(err, ShouldBeNil)
56 | So(files, ShouldNotBeEmpty)
57 | })
58 | }
59 |
--------------------------------------------------------------------------------
/pkg/lib/hash.go:
--------------------------------------------------------------------------------
1 | package lib
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "os"
7 |
8 | "github.com/minio/sha256-simd"
9 | "github.com/opencontainers/go-digest"
10 | "github.com/pkg/errors"
11 | )
12 |
13 | func HashFile(path string, includeMode bool) (string, error) {
14 | h := sha256.New()
15 | f, err := os.Open(path)
16 | if err != nil {
17 | return "", errors.Wrapf(err, "couldn't open %s for hashing", path)
18 | }
19 | defer f.Close()
20 |
21 | _, err = io.Copy(h, f)
22 | if err != nil {
23 | return "", errors.Wrapf(err, "couldn't copy %s for hashing", path)
24 | }
25 |
26 | if includeMode {
27 | // Include file mode when computing the hash
28 | // In general we want to do this, but not all external
29 | // tooling includes it, so we can't compare it with the hash
30 | // in the reply of a HTTP HEAD call
31 |
32 | fi, err := f.Stat()
33 | if err != nil {
34 | return "", errors.Wrapf(err, "couldn't stat %s for hashing", path)
35 | }
36 |
37 | _, err = h.Write([]byte(fmt.Sprintf("%v", fi.Mode())))
38 | if err != nil {
39 | return "", errors.Wrapf(err, "couldn't write mode")
40 | }
41 | }
42 |
43 | d := digest.NewDigest("sha256", h)
44 | return d.String(), nil
45 | }
46 |
--------------------------------------------------------------------------------
/pkg/lib/image_test.go:
--------------------------------------------------------------------------------
1 | package lib
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "path"
8 | "testing"
9 | "time"
10 |
11 | ispec "github.com/opencontainers/image-spec/specs-go/v1"
12 | "github.com/opencontainers/umoci"
13 | "github.com/opencontainers/umoci/mutate"
14 | "github.com/opencontainers/umoci/oci/casext"
15 | "github.com/stretchr/testify/assert"
16 | "machinerun.io/atomfs/pkg/squashfs"
17 | "machinerun.io/atomfs/pkg/verity"
18 | )
19 |
20 | func createImage(dir string, tag string) error {
21 | imageRoot := path.Join(dir, "oci")
22 |
23 | var oci casext.Engine
24 | _, err := os.Stat(imageRoot)
25 | if err != nil {
26 | oci, err = umoci.CreateLayout(imageRoot)
27 | } else {
28 | oci, err = umoci.OpenLayout(imageRoot)
29 | }
30 | if err != nil {
31 | return err
32 | }
33 | defer oci.Close()
34 |
35 | err = umoci.NewImage(oci, tag)
36 | if err != nil {
37 | return err
38 | }
39 |
40 | descPaths, err := oci.ResolveReference(context.Background(), tag)
41 | if err != nil {
42 | return err
43 | }
44 |
45 | mutator, err := mutate.New(oci, descPaths[0])
46 | if err != nil {
47 | return err
48 | }
49 |
50 | // need *something* in the layer, why not just recursively include the
51 | // OCI image for maximum confusion :)
52 | layer, mediaType, _, err := squashfs.MakeSquashfs(dir, path.Join(dir, "oci"), nil, verity.VerityMetadataMissing)
53 | if err != nil {
54 | return err
55 | }
56 |
57 | now := time.Now()
58 | history := &ispec.History{
59 | Created: &now,
60 | CreatedBy: fmt.Sprintf("stacker test suite %s", tag),
61 | EmptyLayer: false,
62 | }
63 | _, err = mutator.Add(context.Background(), mediaType, layer, history, mutate.NoopCompressor, nil)
64 | if err != nil {
65 | return err
66 | }
67 |
68 | _, err = mutator.Commit(context.Background())
69 | if err != nil {
70 | return err
71 | }
72 |
73 | return oci.GC(context.Background())
74 | }
75 |
76 | func TestImageCompressionCopy(t *testing.T) {
77 | assert := assert.New(t)
78 | dir, err := os.MkdirTemp("", "stacker-compression-copy-test")
79 | assert.NoError(err)
80 | defer os.RemoveAll(dir)
81 |
82 | assert.NoError(createImage(dir, "foo"))
83 |
84 | assert.NoError(ImageCopy(ImageCopyOpts{
85 | Src: fmt.Sprintf("oci:%s/oci:foo", dir),
86 | Dest: fmt.Sprintf("oci:%s/oci2:foo", dir),
87 | }))
88 |
89 | origBlobs, err := os.ReadDir(fmt.Sprintf("%s/oci/blobs/sha256/", dir))
90 | assert.NoError(err)
91 | copiedBlobs, err := os.ReadDir(fmt.Sprintf("%s/oci2/blobs/sha256/", dir))
92 | assert.NoError(err)
93 |
94 | for i := range origBlobs {
95 | // could check the hashes too, but containers/image doesn't
96 | // generally break that :)
97 | assert.Equal(origBlobs[i].Name(), copiedBlobs[i].Name())
98 | }
99 | }
100 |
101 | func TestForceManifestTypeOption(t *testing.T) {
102 | assert := assert.New(t)
103 | dir, err := os.MkdirTemp("", "stacker-force-manifesttype-test")
104 | assert.NoError(err)
105 | defer os.RemoveAll(dir)
106 |
107 | assert.NoError(createImage(dir, "foo"))
108 |
109 | assert.NoError(ImageCopy(ImageCopyOpts{
110 | Src: fmt.Sprintf("oci:%s/oci:foo", dir),
111 | Dest: fmt.Sprintf("oci:%s/oci2:foo", dir),
112 | ForceManifestType: ispec.MediaTypeImageManifest,
113 | }))
114 |
115 | assert.Error(ImageCopy(ImageCopyOpts{
116 | Src: fmt.Sprintf("oci:%s/oci:foo", dir),
117 | Dest: fmt.Sprintf("oci:%s/oci2:foo", dir),
118 | ForceManifestType: "test",
119 | }))
120 | }
121 |
122 | func TestOldManifestReallyRemoved(t *testing.T) {
123 | assert := assert.New(t)
124 | dir, err := os.MkdirTemp("", "stacker-compression-copy-test")
125 | assert.NoError(err)
126 | defer os.RemoveAll(dir)
127 |
128 | assert.NoError(createImage(dir, "a"))
129 | assert.NoError(createImage(dir, "b"))
130 |
131 | assert.NoError(ImageCopy(ImageCopyOpts{
132 | Src: fmt.Sprintf("oci:%s/oci:a", dir),
133 | Dest: fmt.Sprintf("oci:%s/oci2:tag", dir),
134 | }))
135 | assert.NoError(ImageCopy(ImageCopyOpts{
136 | Src: fmt.Sprintf("oci:%s/oci:b", dir),
137 | Dest: fmt.Sprintf("oci:%s/oci2:tag", dir),
138 | }))
139 |
140 | oci, err := umoci.OpenLayout(path.Join(dir, "oci2"))
141 | assert.NoError(err)
142 | defer oci.Close()
143 |
144 | ctx := context.Background()
145 |
146 | index, err := oci.GetIndex(ctx)
147 | assert.NoError(err)
148 | assert.Len(index.Manifests, 1)
149 | }
150 |
--------------------------------------------------------------------------------
/pkg/lib/version.go:
--------------------------------------------------------------------------------
1 | package lib
2 |
3 | var (
4 | StackerVersion = ""
5 | LXCVersion = ""
6 | )
7 |
--------------------------------------------------------------------------------
/pkg/log/log.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "time"
7 |
8 | "github.com/apex/log"
9 | )
10 |
11 | var thisIsAStackerLog struct{}
12 |
13 | func addStackerLogSentinel(e *log.Entry) *log.Entry {
14 | return e.WithField("isStacker", &thisIsAStackerLog)
15 | }
16 |
17 | func isStackerLog(e *log.Entry) bool {
18 | v, ok := e.Fields["isStacker"]
19 | return ok && v == &thisIsAStackerLog
20 | }
21 |
22 | type stackerLogFilterer struct {
23 | underlying log.Handler
24 | }
25 |
26 | func (h stackerLogFilterer) HandleLog(e *log.Entry) error {
27 | if !isStackerLog(e) {
28 | return nil
29 | }
30 |
31 | delete(e.Fields, "isStacker")
32 |
33 | return h.underlying.HandleLog(e)
34 | }
35 |
36 | func FilterNonStackerLogs(handler log.Handler, level log.Level) {
37 | log.SetHandler(stackerLogFilterer{handler})
38 | log.SetLevel(level)
39 | }
40 |
41 | func Debugf(msg string, v ...interface{}) {
42 | addStackerLogSentinel(log.NewEntry(log.Log.(*log.Logger))).Debugf(msg, v...)
43 | }
44 |
45 | func Infof(msg string, v ...interface{}) {
46 | addStackerLogSentinel(log.NewEntry(log.Log.(*log.Logger))).Infof(msg, v...)
47 | }
48 |
49 | func Warnf(msg string, v ...interface{}) {
50 | addStackerLogSentinel(log.NewEntry(log.Log.(*log.Logger))).Warnf(msg, v...)
51 | }
52 |
53 | func Errorf(msg string, v ...interface{}) {
54 | addStackerLogSentinel(log.NewEntry(log.Log.(*log.Logger))).Errorf(msg, v...)
55 | }
56 |
57 | func Fatalf(msg string, v ...interface{}) {
58 | addStackerLogSentinel(log.NewEntry(log.Log.(*log.Logger))).Fatalf(msg, v...)
59 | }
60 |
61 | type TextHandler struct {
62 | out io.StringWriter
63 | timestamp bool
64 | }
65 |
66 | func NewTextHandler(out io.StringWriter, timestamp bool) log.Handler {
67 | return &TextHandler{out, timestamp}
68 | }
69 |
70 | func (th *TextHandler) HandleLog(e *log.Entry) error {
71 | if th.timestamp {
72 | _, err := th.out.WriteString(fmt.Sprintf("%s ", e.Timestamp.Format(time.RFC3339)))
73 | if err != nil {
74 | return err
75 | }
76 | }
77 |
78 | _, err := th.out.WriteString(e.Message)
79 | if err != nil {
80 | return err
81 | }
82 |
83 | for _, name := range e.Fields.Names() {
84 | _, err = th.out.WriteString(fmt.Sprintf(" %s=%s", name, e.Fields.Get(name)))
85 | if err != nil {
86 | return err
87 | }
88 | }
89 |
90 | _, err = th.out.WriteString("\n")
91 | if err != nil {
92 | return err
93 | }
94 |
95 | return nil
96 | }
97 |
--------------------------------------------------------------------------------
/pkg/log/log_test.go:
--------------------------------------------------------------------------------
1 | package log_test
2 |
3 | import (
4 | "os"
5 |
6 | "testing"
7 |
8 | . "github.com/smartystreets/goconvey/convey"
9 | "stackerbuild.io/stacker/pkg/log"
10 | )
11 |
12 | func TestLog(t *testing.T) {
13 | Convey("With timestamps", t, func() {
14 | handler := log.NewTextHandler(os.Stderr, true)
15 | So(handler, ShouldNotBeNil)
16 |
17 | So(func() { log.Debugf("debug msg") }, ShouldNotPanic)
18 | So(func() { log.Infof("info msg") }, ShouldNotPanic)
19 | So(func() { log.Errorf("error msg") }, ShouldNotPanic)
20 |
21 | So(func() { log.FilterNonStackerLogs(handler, 1) }, ShouldNotPanic)
22 |
23 | So(func() { log.Debugf("debug msg") }, ShouldNotPanic)
24 | So(func() { log.Infof("info msg") }, ShouldNotPanic)
25 | So(func() { log.Errorf("error msg") }, ShouldNotPanic)
26 | })
27 |
28 | Convey("Without timestamps", t, func() {
29 | handler := log.NewTextHandler(os.Stderr, false)
30 | So(handler, ShouldNotBeNil)
31 |
32 | So(func() { log.Debugf("debug msg") }, ShouldNotPanic)
33 | So(func() { log.Infof("info msg") }, ShouldNotPanic)
34 | So(func() { log.Errorf("error msg") }, ShouldNotPanic)
35 |
36 | So(func() { log.FilterNonStackerLogs(handler, 1) }, ShouldNotPanic)
37 |
38 | So(func() { log.Debugf("debug msg") }, ShouldNotPanic)
39 | So(func() { log.Infof("info msg") }, ShouldNotPanic)
40 | So(func() { log.Errorf("error msg") }, ShouldNotPanic)
41 | })
42 | }
43 |
--------------------------------------------------------------------------------
/pkg/mtree/filter.go:
--------------------------------------------------------------------------------
1 | package mtree
2 |
3 | import (
4 | "github.com/opencontainers/umoci/pkg/mtreefilter"
5 | )
6 |
7 | var (
8 | // Stacker does a mkdir /stacker for bind mounting in imports and such.
9 | // Unfortunately, this causes the mtime on the directory to be changed,
10 | // and go-mtree picks that upas a diff and always generates it. Let's
11 | // mask this out. This of course prevents stuff like `chmod 0444 /` or
12 | // similar, but that's not a very common use case.
13 | LayerGenerationIgnoreRoot mtreefilter.FilterFunc = func(path string) bool {
14 | // the paths are supplied relative to the filter dir, so '.' is root.
15 | return path != "."
16 | }
17 | )
18 |
--------------------------------------------------------------------------------
/pkg/overlay/metadata_test.go:
--------------------------------------------------------------------------------
1 | package overlay
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/mitchellh/hashstructure"
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 | func TestOverlayMetadataChanged(t *testing.T) {
11 | assert := assert.New(t)
12 |
13 | // see TestCacheEntryChanged for a full explanation, but if you need to
14 | // bump this, you should bump the cache version as well since things
15 | // may not be transferrable across versions.
16 | h, err := hashstructure.Hash(overlayMetadata{}, nil)
17 | assert.NoError(err)
18 |
19 | assert.Equal(uint64(0x7267149f94b38b4b), h)
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/overlay/pool.go:
--------------------------------------------------------------------------------
1 | package overlay
2 |
3 | import (
4 | "context"
5 | "sync"
6 |
7 | "github.com/pkg/errors"
8 | )
9 |
10 | var ThreadPoolCancelled = errors.Errorf("thread pool cancelled")
11 |
12 | type ThreadPool struct {
13 | ctx context.Context
14 | cancel context.CancelFunc
15 | n int
16 | tasks chan func(context.Context) error
17 | err error
18 | }
19 |
20 | func NewThreadPool(n int) *ThreadPool {
21 | ctx, cancel := context.WithCancel(context.Background())
22 | return &ThreadPool{ctx, cancel, n, make(chan func(context.Context) error, 1000), nil}
23 | }
24 |
25 | func (tp *ThreadPool) Add(f func(context.Context) error) {
26 | tp.tasks <- f
27 | }
28 |
29 | func (tp *ThreadPool) DoneAddingJobs() {
30 | close(tp.tasks)
31 | }
32 |
33 | func (tp *ThreadPool) Run() error {
34 | wg := sync.WaitGroup{}
35 | wg.Add(tp.n)
36 | for i := 0; i < tp.n; i++ {
37 | go func(i int) {
38 | defer wg.Done()
39 | for {
40 | select {
41 | case <-tp.ctx.Done():
42 | return
43 | case f, ok := <-tp.tasks:
44 | if !ok {
45 | return
46 | }
47 |
48 | err := f(tp.ctx)
49 | if err != nil && err != ThreadPoolCancelled {
50 | tp.err = err
51 | tp.cancel()
52 | return
53 | }
54 | }
55 | }
56 | }(i)
57 | }
58 |
59 | wg.Wait()
60 | return tp.err
61 | }
62 |
--------------------------------------------------------------------------------
/pkg/overlay/unpriv-setup.go:
--------------------------------------------------------------------------------
1 | package overlay
2 |
3 | import (
4 | "stackerbuild.io/stacker/pkg/types"
5 | )
6 |
7 | func UnprivSetup(config types.StackerConfig, uid, gid int) error {
8 | return Check(config)
9 | }
10 |
--------------------------------------------------------------------------------
/pkg/stacker/api.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import "fmt"
4 |
5 | const (
6 | GitVersionAnnotation = "%s.stacker.git_version"
7 | StackerContentsAnnotation = "%s.stacker.stacker_yaml"
8 | StackerVersionAnnotation = "%s.stacker.stacker_version"
9 | )
10 |
11 | func getGitVersionAnnotation(namespace string) string {
12 | return fmt.Sprintf(GitVersionAnnotation, namespace)
13 | }
14 |
15 | func getStackerContentsAnnotation(namespace string) string {
16 | return fmt.Sprintf(StackerContentsAnnotation, namespace)
17 | }
18 |
19 | func getStackerVersionAnnotation(namespace string) string {
20 | return fmt.Sprintf(StackerVersionAnnotation, namespace)
21 | }
22 |
--------------------------------------------------------------------------------
/pkg/stacker/bom.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "io/fs"
7 | "os"
8 | "path"
9 | "path/filepath"
10 |
11 | "github.com/pkg/errors"
12 | "stackerbuild.io/stacker/pkg/container"
13 | "stackerbuild.io/stacker/pkg/log"
14 | "stackerbuild.io/stacker/pkg/types"
15 | )
16 |
17 | // build for all pkgs and then merge
18 | func BuildLayerArtifacts(sc types.StackerConfig, storage types.Storage, l types.Layer,
19 | tag string, pkg types.Package,
20 | ) error {
21 | name, cleanup, err := storage.TemporaryWritableSnapshot(tag)
22 | if err != nil {
23 | return err
24 | }
25 | defer cleanup()
26 |
27 | c, err := container.New(sc, name)
28 | if err != nil {
29 | return err
30 | }
31 | defer c.Close()
32 |
33 | inDir := types.InternalStackerDir
34 | err = SetupBuildContainerConfig(sc, storage, c, inDir, name)
35 | if err != nil {
36 | log.Errorf("build container %v", err)
37 | return err
38 | }
39 |
40 | err = SetupLayerConfig(sc, c, l, inDir, tag)
41 | if err != nil {
42 | return err
43 | }
44 |
45 | cmd := []string{filepath.Join(inDir, types.BinStacker)}
46 |
47 | if sc.Debug {
48 | cmd = append(cmd, "--debug")
49 | }
50 |
51 | cmd = append(cmd, "bom", "build", filepath.Join(inDir, "artifacts"),
52 | l.Annotations[types.AuthorAnnotation],
53 | l.Annotations[types.OrgAnnotation],
54 | l.Annotations[types.LicenseAnnotation],
55 | pkg.Name, pkg.Version)
56 | cmd = append(cmd, pkg.Paths...)
57 | err = c.Execute(cmd, os.Stdin)
58 | if err != nil {
59 | return err
60 | }
61 |
62 | return nil
63 | }
64 |
65 | func VerifyLayerArtifacts(sc types.StackerConfig, storage types.Storage, l types.Layer, tag string) error {
66 | name, cleanup, err := storage.TemporaryWritableSnapshot(tag)
67 | if err != nil {
68 | return err
69 | }
70 | defer cleanup()
71 |
72 | c, err := container.New(sc, name)
73 | if err != nil {
74 | return err
75 | }
76 | defer c.Close()
77 |
78 | inDir := types.InternalStackerDir
79 | err = SetupBuildContainerConfig(sc, storage, c, inDir, name)
80 | if err != nil {
81 | log.Errorf("build container %v", err)
82 | return err
83 | }
84 |
85 | err = SetupLayerConfig(sc, c, l, inDir, tag)
86 | if err != nil {
87 | return err
88 | }
89 |
90 | cmd := []string{filepath.Join(inDir, types.BinStacker)}
91 |
92 | if sc.Debug {
93 | cmd = append(cmd, "--debug")
94 | }
95 |
96 | cmd = append(cmd, "bom", "verify",
97 | fmt.Sprintf(types.InternalStackerDir+"/artifacts/%s.json", tag),
98 | l.Bom.Namespace,
99 | tag, l.Annotations[types.AuthorAnnotation], l.Annotations[types.OrgAnnotation])
100 |
101 | err = c.Execute(cmd, os.Stdin)
102 | if err != nil {
103 | return err
104 | }
105 |
106 | return nil
107 | }
108 |
109 | func ImportArtifacts(sc types.StackerConfig, src types.ImageSource, name string) error {
110 | if src.Type == types.BuiltLayer {
111 | // if a bom is available, add it here so it can be merged
112 | srcpath := path.Join(sc.StackerDir, "artifacts", src.Tag, fmt.Sprintf("%s.json", src.Tag))
113 |
114 | _, err := os.Lstat(srcpath)
115 | if err != nil && errors.Is(err, fs.ErrNotExist) {
116 | return nil
117 | }
118 |
119 | log.Infof("importing sbom from %s", srcpath)
120 |
121 | dstfp, err := os.CreateTemp(path.Join(sc.StackerDir, "artifacts", name), fmt.Sprintf("%s-*.json", src.Tag))
122 | if err != nil {
123 | return err
124 | }
125 | defer dstfp.Close()
126 |
127 | srcfp, err := os.Open(srcpath)
128 | if err != nil {
129 | return err
130 | }
131 | defer srcfp.Close()
132 |
133 | if _, err := io.Copy(dstfp, srcfp); err != nil {
134 | return err
135 | }
136 | }
137 |
138 | return nil
139 | }
140 |
--------------------------------------------------------------------------------
/pkg/stacker/cache_test.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "os"
5 | "path"
6 | "testing"
7 |
8 | "github.com/mitchellh/hashstructure"
9 | ispec "github.com/opencontainers/image-spec/specs-go/v1"
10 | "github.com/opencontainers/umoci"
11 | "github.com/opencontainers/umoci/oci/casext"
12 | "github.com/stretchr/testify/assert"
13 | "stackerbuild.io/stacker/pkg/types"
14 | )
15 |
16 | func TestLayerHashing(t *testing.T) {
17 | dir, err := os.MkdirTemp("", "stacker_cache_test")
18 | if err != nil {
19 | t.Fatalf("couldn't create temp dir %v", err)
20 | }
21 | defer os.RemoveAll(dir)
22 |
23 | config := types.StackerConfig{
24 | StackerDir: dir,
25 | RootFSDir: dir,
26 | }
27 |
28 | layerBases := path.Join(config.StackerDir, "layer-bases")
29 | err = os.MkdirAll(layerBases, 0755)
30 | if err != nil {
31 | t.Fatalf("couldn't mkdir for layer bases %v", err)
32 | }
33 |
34 | oci, err := umoci.CreateLayout(path.Join(layerBases, "oci"))
35 | if err != nil {
36 | t.Fatalf("couldn't creat OCI layout: %v", err)
37 | }
38 | defer oci.Close()
39 |
40 | err = umoci.NewImage(oci, "centos")
41 | if err != nil {
42 | t.Fatalf("couldn't create fake centos image %v", err)
43 | }
44 |
45 | stackerYaml := path.Join(dir, "stacker.yaml")
46 | err = os.WriteFile(stackerYaml, []byte(`
47 | foo:
48 | from:
49 | type: docker
50 | url: docker://centos:latest
51 | run: zomg
52 | build_only: true
53 | `), 0644)
54 | if err != nil {
55 | t.Fatalf("couldn't write stacker yaml %v", err)
56 | }
57 |
58 | sf, err := types.NewStackerfile(stackerYaml, false, nil)
59 | if err != nil {
60 | t.Fatalf("couldn't read stacker file %v", err)
61 | }
62 |
63 | cache, err := OpenCache(config, casext.Engine{}, types.StackerFiles{"dummy": sf})
64 | if err != nil {
65 | t.Fatalf("couldn't open cache %v", err)
66 | }
67 |
68 | // fake a successful build for a build-only layer
69 | err = os.MkdirAll(path.Join(dir, "foo"), 0755)
70 | if err != nil {
71 | t.Fatalf("couldn't fake successful bulid %v", err)
72 | }
73 |
74 | err = cache.Put("foo", map[types.LayerType]ispec.Descriptor{})
75 | if err != nil {
76 | t.Fatalf("couldn't put to cache %v", err)
77 | }
78 |
79 | // change the layer, but look it up under the same name, to make sure
80 | // the layer itself is hashed
81 | stackerYaml = path.Join(dir, "stacker.yaml")
82 | err = os.WriteFile(stackerYaml, []byte(`
83 | foo:
84 | from:
85 | type: docker
86 | url: docker://centos:latest
87 | run: zomg meshuggah rocks
88 | build_only: true
89 | `), 0644)
90 | if err != nil {
91 | t.Fatalf("couldn't write stacker yaml %v", err)
92 | }
93 |
94 | sf, err = types.NewStackerfile(stackerYaml, false, nil)
95 | if err != nil {
96 | t.Fatalf("couldn't read stacker file %v", err)
97 | }
98 |
99 | // ok, now re-load the persisted cache
100 | cache, err = OpenCache(config, casext.Engine{}, types.StackerFiles{"dummy": sf})
101 | if err != nil {
102 | t.Fatalf("couldn't re-load cache %v", err)
103 | }
104 |
105 | _, ok, err := cache.Lookup("foo")
106 | if err != nil {
107 | t.Errorf("lookup failed %v", err)
108 | }
109 | if ok {
110 | t.Errorf("found cached entry when I shouldn't have?")
111 | }
112 | }
113 |
114 | func TestCacheEntryChanged(t *testing.T) {
115 | assert := assert.New(t)
116 |
117 | h, err := hashstructure.Hash(CacheEntry{}, nil)
118 | assert.NoError(err)
119 |
120 | // The goal here is to make sure that the types of things in CacheEntry
121 | // haven't changed; if they have (aka this test fails), you should do
122 | // currentCacheVersion++, since stackers with an old cache will be
123 | // invalid with your current patch.
124 | //
125 | // This test works because the type information is included in the
126 | // hashstructure hash above, so using a zero valued CacheEntry is
127 | // enough to capture changes in types.
128 | assert.Equal(uint64(0xa26696f335211127), h)
129 | }
130 |
--------------------------------------------------------------------------------
/pkg/stacker/check.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "fmt"
5 |
6 | "golang.org/x/sys/unix"
7 | )
8 |
9 | func KernelInfo() (string, error) {
10 | utsname := unix.Utsname{}
11 | if err := unix.Uname(&utsname); err != nil {
12 | return "", err
13 | }
14 |
15 | return fmt.Sprintf("%s %s %s", string(utsname.Sysname[:]), string(utsname.Release[:]), string(utsname.Version[:])), nil
16 | }
17 |
18 | func MountInfo(path string) (string, error) {
19 | // from /usr/include/linux/magic.h
20 | var fstypeMap = map[int64]string{
21 | 0xadf5: "ADFS",
22 | 0xadff: "AFFS",
23 | 0x5346414F: "AFS",
24 | 0x0187: "AUTOFS",
25 | 0x73757245: "CODA",
26 | 0x28cd3d45: "CRAMFS",
27 | 0x453dcd28: "CRAMFS_WEND",
28 | 0x64626720: "DEBUGFS",
29 | 0x73636673: "SECURITYFS",
30 | 0xf97cff8c: "SELINUX",
31 | 0x43415d53: "SMACK",
32 | 0x858458f6: "RAMFS",
33 | 0x01021994: "TMPFS",
34 | 0x958458f6: "HUGETLBFS",
35 | 0x73717368: "SQUASHFS",
36 | 0xf15f: "ECRYPTFS",
37 | 0x414A53: "EFS",
38 | 0xE0F5E1E2: "EROFS_V1",
39 | 0xEF53: "EXT2",
40 | 0xabba1974: "XENFS",
41 | 0x9123683E: "BTRFS",
42 | 0x3434: "NILFS",
43 | 0xF2F52010: "F2FS",
44 | 0xf995e849: "HPFS",
45 | 0x9660: "ISOFS",
46 | 0x72b6: "JFFS2",
47 | 0x58465342: "XFS",
48 | 0x6165676C: "PSTOREFS",
49 | 0xde5e81e4: "EFIVARFS",
50 | 0x00c0ffee: "HOSTFS",
51 | 0x794c7630: "OVERLAYFS",
52 | 0x137F: "MINIX",
53 | 0x138F: "MINIX2",
54 | 0x2468: "MINIX2",
55 | 0x2478: "MINIX22",
56 | 0x4d5a: "MINIX3",
57 | 0x4d44: "MSDOS",
58 | 0x564c: "NCP",
59 | 0x6969: "NFS",
60 | 0x7461636f: "OCFS2",
61 | 0x9fa1: "OPENPROM",
62 | 0x002f: "QNX4",
63 | 0x68191122: "QNX6",
64 | 0x6B414653: "AFS_FS",
65 | 0x52654973: "REISERFS",
66 | 0x517B: "SMB",
67 | 0x27e0eb: "CGROUP",
68 | 0x63677270: "CGROUP2",
69 | 0x7655821: "RDTGROUP",
70 | 0x57AC6E9D: "STACK_END",
71 | 0x74726163: "TRACEFS",
72 | 0x01021997: "V9FS",
73 | 0x62646576: "BDEVFS",
74 | 0x64646178: "DAXFS",
75 | 0x42494e4d: "BINFMTFS",
76 | 0x1cd1: "DEVPTS",
77 | 0x6c6f6f70: "BINDERFS",
78 | 0xBAD1DEA: "FUTEXFS",
79 | 0x50495045: "PIPEFS",
80 | 0x9fa0: "PROC",
81 | 0x534F434B: "SOCKFS",
82 | 0x62656572: "SYSFS",
83 | 0x9fa2: "USBDEVICE",
84 | 0x11307854: "MTD_INODE_FS",
85 | 0x09041934: "ANON_INODE_FS",
86 | 0x73727279: "BTRFS_TEST",
87 | 0x6e736673: "NSFS",
88 | 0xcafe4a11: "BPF_FS",
89 | 0x5a3c69f0: "AAFS",
90 | 0x5a4f4653: "ZONEFS",
91 | 0x15013346: "UDF",
92 | 0x13661366: "BALLOON_KVM",
93 | 0x58295829: "ZSMALLOC",
94 | 0x444d4142: "DMA_BUF",
95 | 0x454d444d: "DEVMEM",
96 | 0x33: "Z3FOLD",
97 | 0xc7571590: "PPC_CMM",
98 | 0x5345434d: "SECRETMEM",
99 | 0x6a656a62: "SHIFTFS",
100 | }
101 |
102 | st := unix.Statfs_t{}
103 | if err := unix.Statfs(path, &st); err != nil {
104 | return "", err
105 | }
106 |
107 | fstype, ok := fstypeMap[st.Type]
108 | if !ok {
109 | fstype = "unknown"
110 | }
111 |
112 | // lookup fs type in /usr/include/linux/magic.h
113 | return fmt.Sprintf("%s(%x)", fstype, st.Type), nil
114 | }
115 |
--------------------------------------------------------------------------------
/pkg/stacker/deps.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "sort"
5 | "stackerbuild.io/stacker/pkg/lib"
6 | "stackerbuild.io/stacker/pkg/types"
7 | )
8 |
9 | // StackerDepsDAG processes the dependencies between different stacker recipes
10 | type StackerFilesDAG struct {
11 | dag lib.Graph
12 | }
13 |
14 | // NewStackerDepsDAG properly initializes a StackerDepsProcessor
15 | func NewStackerFilesDAG(sfMap types.StackerFiles) (*StackerFilesDAG, error) {
16 | dag := lib.NewDAG()
17 |
18 | // The DAG.Sort() method uses Topological sort which for acyclical graphs
19 | // will return an order dependent upon which node it starts. To ensure
20 | // we build the same Graph, sort the list of input files so we get the
21 | // same starting Node for DAG.Sort() resulting in a consistent build order.
22 | keys := make([]string, 0, len(sfMap))
23 | for k := range sfMap {
24 | keys = append(keys, k)
25 | }
26 | sort.Strings(keys)
27 |
28 | // Add vertices to dag
29 | for _, path := range keys {
30 | sf := sfMap[path]
31 | // Add a vertex for every StackerFile object
32 | err := dag.AddVertex(path, sf)
33 | if err != nil {
34 | return nil, err
35 | }
36 | }
37 |
38 | // Update the dependencies in the dag
39 | for path, sf := range sfMap {
40 | prerequisites, err := sf.Prerequisites()
41 | if err != nil {
42 | return nil, err
43 | }
44 |
45 | for _, depPath := range prerequisites {
46 | err := dag.AddDependencies(path, depPath)
47 | if err != nil {
48 | return nil, err
49 | }
50 | }
51 | }
52 |
53 | p := StackerFilesDAG{
54 | dag: dag,
55 | }
56 | return &p, nil
57 | }
58 |
59 | func (d *StackerFilesDAG) GetStackerFile(path string) *types.Stackerfile {
60 | value := d.dag.GetValue(path)
61 | return value.(*types.Stackerfile)
62 | }
63 |
64 | // Sort provides a serial build order for the stacker files
65 | func (d *StackerFilesDAG) Sort() []string {
66 | var order []string
67 |
68 | // Use dag.Sort() to ensure we always process targets in order of their dependencies
69 | for _, i := range d.dag.Sort() {
70 | path := i.Key.(string)
71 | order = append(order, path)
72 | }
73 |
74 | return order
75 | }
76 |
--------------------------------------------------------------------------------
/pkg/stacker/git.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "os/exec"
5 | "strings"
6 |
7 | "github.com/apex/log"
8 | )
9 |
10 | // gitHash generates a version string similar to git describe --always
11 | func gitHash(path string, short bool) (string, error) {
12 |
13 | // Get hash
14 | args := []string{"-C", path, "rev-parse", "HEAD"}
15 | if short {
16 | args = []string{"-C", path, "rev-parse", "--short", "HEAD"}
17 | }
18 | output, err := exec.Command("git", args...).CombinedOutput()
19 | if err != nil {
20 | return "", err
21 | }
22 |
23 | return strings.TrimSpace(string(output)), nil
24 | }
25 |
26 | // GitVersion generates a version string similar to what git describe --always
27 | // does, with -dirty on the end if the git repo had local changes.
28 | func GitVersion(path string) (string, error) {
29 |
30 | var vers string
31 | // Obtain commit hash
32 | args := []string{"-C", path, "describe", "--tags"}
33 | output, err := exec.Command("git", args...).CombinedOutput()
34 | if err == nil {
35 | vers = strings.TrimSpace(string(output))
36 | } else {
37 | log.Debug("'git describe --tags' failed, falling back to hash")
38 | vers, err = gitHash(path, false)
39 | if err != nil {
40 | return "", err
41 | }
42 | }
43 |
44 | // Check if there are local changes
45 | args = []string{"-C", path, "status", "--porcelain", "--untracked-files=no"}
46 | output, err = exec.Command("git", args...).CombinedOutput()
47 | if err != nil {
48 | return "", err
49 | }
50 |
51 | if len(output) == 0 {
52 | // Commit is clean, no local changes found
53 | return vers, nil
54 | }
55 |
56 | return vers + "-dirty", nil
57 | }
58 |
--------------------------------------------------------------------------------
/pkg/stacker/grab.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "fmt"
5 | "io/fs"
6 | "path"
7 | "path/filepath"
8 |
9 | "stackerbuild.io/stacker/pkg/container"
10 | "stackerbuild.io/stacker/pkg/types"
11 | )
12 |
13 | func Grab(sc types.StackerConfig, storage types.Storage, name string, source string, targetDir string,
14 | idest string, mode *fs.FileMode, uid, gid int,
15 | ) error {
16 | c, err := container.New(sc, name)
17 | if err != nil {
18 | return err
19 | }
20 | defer c.Close()
21 |
22 | err = SetupBuildContainerConfig(sc, storage, c, types.InternalStackerDir, name)
23 | if err != nil {
24 | return err
25 | }
26 |
27 | idestdir := filepath.Join(types.InternalStackerDir, "grab")
28 | err = c.BindMount(targetDir, idestdir, "")
29 | if err != nil {
30 | return err
31 | }
32 |
33 | bcmd := []string{filepath.Join(types.InternalStackerDir, types.BinStacker), "internal-go"}
34 |
35 | iDestName := filepath.Join(idestdir, path.Base(source))
36 | if idest == "" || source[len(source)-1:] != "/" {
37 | err = c.Execute(append(bcmd, "cp", source, iDestName), nil)
38 | } else {
39 | err = c.Execute(append(bcmd, "cp", source, idestdir+"/"), nil)
40 | }
41 | if err != nil {
42 | return err
43 | }
44 |
45 | if mode != nil {
46 | err = c.Execute(append(bcmd, "chmod", fmt.Sprintf("%o", *mode), iDestName), nil)
47 | if err != nil {
48 | return err
49 | }
50 | }
51 |
52 | if uid > 0 {
53 | owns := fmt.Sprintf("%d", uid)
54 | if gid > 0 {
55 | owns += fmt.Sprintf(":%d", gid)
56 | }
57 |
58 | err = c.Execute(append(bcmd, "chown", owns, iDestName), nil)
59 | if err != nil {
60 | return err
61 | }
62 | }
63 |
64 | return nil
65 | }
66 |
--------------------------------------------------------------------------------
/pkg/stacker/lock.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "bytes"
5 | "os"
6 | "path"
7 | "strconv"
8 | "strings"
9 | "syscall"
10 |
11 | "github.com/pkg/errors"
12 | "stackerbuild.io/stacker/pkg/types"
13 | )
14 |
15 | func findLock(st *syscall.Stat_t) error {
16 | content, err := os.ReadFile("/proc/locks")
17 | if err != nil {
18 | return errors.Wrapf(err, "failed to read locks file")
19 | }
20 |
21 | for _, line := range strings.Split(string(content), "\n") {
22 | if len(line) == 0 {
23 | continue
24 | }
25 |
26 | fields := strings.Fields(line)
27 | if len(fields) < 8 {
28 | return errors.Errorf("invalid lock file entry %s", line)
29 | }
30 |
31 | entries := strings.Split(fields[5], ":")
32 | if len(entries) != 3 {
33 | return errors.Errorf("invalid lock file field %s", fields[5])
34 | }
35 |
36 | /*
37 | * XXX: the kernel prints "fd:01:$ino" for some (all?) locks,
38 | * even though the man page we should be able to use fields 0
39 | * and 1 as major and minor device types. Let's just ignore
40 | * these.
41 | */
42 |
43 | ino, err := strconv.ParseUint(entries[2], 10, 64)
44 | if err != nil {
45 | return errors.Wrapf(err, "invalid ino %s", entries[2])
46 | }
47 |
48 | if st.Ino != ino {
49 | continue
50 | }
51 |
52 | pid := fields[4]
53 | content, err := os.ReadFile(path.Join("/proc", pid, "cmdline"))
54 | if err != nil {
55 | return errors.Errorf("lock owned by pid %s", pid)
56 | }
57 |
58 | content = bytes.Replace(content, []byte{0}, []byte{' '}, -1)
59 | return errors.Errorf("lock owned by pid %s (%s)", pid, string(content))
60 | }
61 |
62 | return errors.Errorf("couldn't find who owns the lock")
63 | }
64 |
65 | func acquireLock(p string) (*os.File, error) {
66 | lockfile, err := os.Create(p)
67 | if err != nil {
68 | return nil, errors.Wrapf(err, "couldn't create lockfile %s", p)
69 | }
70 |
71 | lockMode := syscall.LOCK_EX
72 |
73 | lockErr := syscall.Flock(int(lockfile.Fd()), lockMode|syscall.LOCK_NB)
74 | if lockErr == nil {
75 | return lockfile, nil
76 | }
77 |
78 | fi, err := lockfile.Stat()
79 | lockfile.Close()
80 | if err != nil {
81 | return nil, errors.Wrapf(err, "couldn't lock or stat lockfile %s", p)
82 | }
83 |
84 | owner := findLock(fi.Sys().(*syscall.Stat_t))
85 | return nil, errors.Wrapf(lockErr, "couldn't acquire lock on %s: %v", p, owner)
86 | }
87 |
88 | const lockPath = ".lock"
89 |
90 | type StackerLocks struct {
91 | stackerDir, rootsDir *os.File
92 | }
93 |
94 | func (ls *StackerLocks) Unlock() {
95 | // TODO: it would be good to lock the OCI dir here, because I can
96 | // imagine two people trying to output stuff to the same directory.
97 | // However, that screws with umoci, because it sees an empty dir as an
98 | // invalid image. the bug we're trying to fix right
99 | // now is multiple invocations on a roots dir, so this
100 | // is good enough.
101 | for _, lock := range []*os.File{ls.stackerDir, ls.rootsDir} {
102 | if lock != nil {
103 | lock.Close()
104 | }
105 | }
106 | ls.stackerDir = nil
107 | ls.rootsDir = nil
108 | }
109 |
110 | func lock(config types.StackerConfig) (*StackerLocks, error) {
111 | ls := &StackerLocks{}
112 |
113 | var err error
114 | ls.stackerDir, err = acquireLock(path.Join(config.StackerDir, lockPath))
115 | if err != nil {
116 | return nil, err
117 | }
118 |
119 | ls.rootsDir, err = acquireLock(path.Join(config.RootFSDir, lockPath))
120 | if err != nil {
121 | ls.Unlock()
122 | return nil, err
123 | }
124 |
125 | return ls, nil
126 | }
127 |
--------------------------------------------------------------------------------
/pkg/stacker/network.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "io"
5 | "io/fs"
6 | "net/http"
7 | "net/url"
8 | "os"
9 | "path"
10 | "strconv"
11 | "strings"
12 |
13 | "github.com/cheggaaa/pb/v3"
14 | "github.com/pkg/errors"
15 | "stackerbuild.io/stacker/pkg/lib"
16 | "stackerbuild.io/stacker/pkg/log"
17 | )
18 |
19 | // download with caching support in the specified cache dir.
20 | func Download(cacheDir string, url string, progress bool, expectedHash, remoteHash, remoteSize string,
21 | idest string, mode *fs.FileMode, uid, gid int,
22 | ) (string, error) {
23 | var name string
24 | if idest != "" && idest[len(idest)-1:] != "/" {
25 | name = path.Join(cacheDir, path.Base(idest))
26 | } else {
27 | name = path.Join(cacheDir, path.Base(url))
28 | }
29 |
30 | if fi, err := os.Stat(name); err == nil {
31 | // Couldn't get remoteHash then use cached copy of import
32 | if remoteHash == "" {
33 | log.Infof("Couldn't obtain file info of %s, using cached copy", url)
34 | return name, nil
35 | }
36 | // File is found in cache
37 | // need to check if cache is valid before using it
38 | localHash, err := lib.HashFile(name, false)
39 | if err != nil {
40 | return "", err
41 | }
42 | localHash = strings.TrimPrefix(localHash, "sha256:")
43 | localSize := strconv.FormatInt(fi.Size(), 10)
44 | log.Debugf("Local file: hash: %s length: %s", localHash, localSize)
45 |
46 | if localHash == remoteHash {
47 | // Cached file has same hash as the remote file
48 | log.Infof("matched hash of %s, using cached copy", url)
49 | return name, nil
50 | } else if localSize == remoteSize {
51 | // Cached file has same content length as the remote file
52 | log.Infof("matched content length of %s, taking a leap of faith and using cached copy", url)
53 | return name, nil
54 | }
55 | // Cached file has a different hash from the remote one
56 | // Need to cleanup
57 | err = os.RemoveAll(name)
58 | if err != nil {
59 | return "", err
60 | }
61 | } else if !os.IsNotExist(err) {
62 | // File is not found in cache but there are other errors
63 | return "", err
64 | }
65 |
66 | // File is not in cache
67 | // it wasn't there in the first place or it was cleaned up
68 | out, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0644)
69 | if err != nil {
70 | return "", err
71 | }
72 | defer out.Close()
73 |
74 | log.Infof("downloading %v", url)
75 |
76 | resp, err := http.Get(url)
77 | if err != nil {
78 | os.RemoveAll(name)
79 | return "", err
80 | }
81 | defer resp.Body.Close()
82 |
83 | if resp.StatusCode != 200 {
84 | os.RemoveAll(name)
85 | return "", errors.Errorf("couldn't download %s: %s", url, resp.Status)
86 | }
87 |
88 | source := resp.Body
89 | if progress {
90 | bar := pb.New(int(resp.ContentLength)).Set(pb.Bytes, true)
91 | bar.Start()
92 | source = bar.NewProxyReader(source)
93 | defer bar.Finish()
94 | }
95 |
96 | _, err = io.Copy(out, source)
97 |
98 | if err != nil {
99 | return "", err
100 | }
101 |
102 | downloadHash, err := lib.HashFile(name, false)
103 | if err != nil {
104 | return "", err
105 | }
106 |
107 | if expectedHash != "" {
108 | log.Infof("Checking shasum of downloaded file")
109 | downloadHash = strings.TrimPrefix(downloadHash, "sha256:")
110 | log.Debugf("Downloaded file hash: %s", downloadHash)
111 |
112 | if downloadHash != remoteHash {
113 | log.Warnf("Downloaded file hash %q does not match hash from HTTP header %q", downloadHash, remoteHash)
114 | }
115 |
116 | if expectedHash != downloadHash {
117 | os.RemoveAll(name)
118 | return "", errors.Errorf("Downloaded file hash does not match. Expected: %s Actual: %s", expectedHash, downloadHash)
119 | }
120 | }
121 |
122 | if mode != nil {
123 | err = out.Chmod(*mode)
124 | if err != nil {
125 | return "", errors.Wrapf(err, "Coudn't chmod file %s", name)
126 | }
127 | }
128 |
129 | err = out.Chown(uid, gid)
130 | if err != nil {
131 | return "", errors.Wrapf(err, "Coudn't chown file %s", source)
132 | }
133 |
134 | return name, err
135 | }
136 |
137 | // getHttpFileInfo returns the hash and content size a file stored on a web server
138 | func getHttpFileInfo(remoteURL string) (string, string, error) {
139 |
140 | // Verify URL scheme
141 | u, err := url.Parse(remoteURL)
142 | if err != nil {
143 | return "", "", err
144 | }
145 | if u.Scheme != "http" && u.Scheme != "https" {
146 | return "", "", errors.Errorf("cannot obtain content info for non HTTP URL: (%s)", remoteURL)
147 | }
148 |
149 | // Make a HEAD call on remote URL
150 | resp, err := http.Head(remoteURL)
151 | if err != nil {
152 | return "", "", err
153 | }
154 | defer resp.Body.Close()
155 |
156 | // Get file info from header
157 | // If the hash is not present this is an empty string
158 | hash := resp.Header.Get("X-Checksum-Sha256")
159 | length := resp.Header.Get("Content-Length")
160 |
161 | return hash, length, nil
162 | }
163 |
--------------------------------------------------------------------------------
/pkg/stacker/referrer_test.go:
--------------------------------------------------------------------------------
1 | package stacker
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 | )
7 |
8 | func TestDistspecURLParsing(t *testing.T) {
9 | cases := map[string]*distspecUrl{
10 | "docker://alpine:latest": &distspecUrl{Scheme: "docker", Host: "docker.io", Tag: "latest", Path: "/library/alpine"},
11 | "docker://localhost:8080/alpine:latest": &distspecUrl{Scheme: "docker", Host: "localhost:8080", Tag: "latest", Path: "/alpine"},
12 | "docker://localhost:8080/a/b/c/alpine:latest": &distspecUrl{Scheme: "docker", Host: "localhost:8080", Tag: "latest", Path: "/a/b/c/alpine"},
13 | "docker://alpine": &distspecUrl{Scheme: "docker", Host: "docker.io", Tag: "latest", Path: "/alpine"},
14 | }
15 |
16 | for input, expected := range cases {
17 | result, err := parseDistSpecUrl(input)
18 | if err != nil {
19 | t.Fatalf("Unable to parse url %s: %s", input, err)
20 | }
21 |
22 | if !reflect.DeepEqual(*expected, result) {
23 | t.Fatalf("%s: Incorrect result expected != found: %v != %v",
24 | input, *expected, result)
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/pkg/storage/storage.go:
--------------------------------------------------------------------------------
1 | // common code used by storage backends
2 | package storage
3 |
4 | import (
5 | "github.com/pkg/errors"
6 | "stackerbuild.io/stacker/pkg/types"
7 | )
8 |
9 | // FindFirstBaseInOutput finds the highest base in the dependency tree that is
10 | // present in the output (i.e. it skips build-only layers).
11 | func FindFirstBaseInOutput(name string, sfm types.StackerFiles) (string, types.Layer, bool, error) {
12 | // We need to copy any base OCI layers to the output dir, since they
13 | // may not have been copied before and the final `umoci repack` expects
14 | // them to be there.
15 | base, ok := sfm.LookupLayerDefinition(name)
16 | if !ok {
17 | return "", types.Layer{}, false, errors.Errorf("couldn't find layer %s", name)
18 | }
19 | baseTag := name
20 | var err error
21 |
22 | // first, go all the way to the first layer that's not a built type
23 | for {
24 | if base.From.Type != types.BuiltLayer {
25 | break
26 | }
27 |
28 | baseTag, err = base.From.ParseTag()
29 | if err != nil {
30 | return "", types.Layer{}, false, err
31 | }
32 |
33 | base, ok = sfm.LookupLayerDefinition(baseTag)
34 | if !ok {
35 | return "", types.Layer{}, false, errors.Errorf("missing base layer: %s?", baseTag)
36 | }
37 |
38 | // if the base was emitted to the output, return that
39 | if !base.BuildOnly {
40 | return baseTag, base, true, nil
41 | }
42 | }
43 |
44 | // if this is from something in the OCI cache, we can use that
45 | if types.IsContainersImageLayer(base.From.Type) {
46 | return baseTag, base, true, nil
47 | }
48 |
49 | // otherwise, we didn't find anything
50 | return "", types.Layer{}, false, nil
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/storage/unpriv-setup.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "os/exec"
7 | "strconv"
8 | "strings"
9 |
10 | "github.com/pkg/errors"
11 | "stackerbuild.io/stacker/pkg/log"
12 | )
13 |
14 | func warnAboutNewuidmap() {
15 | _, err := exec.LookPath("newuidmap")
16 | if err != nil {
17 | log.Infof("WARNING: no newuidmap binary present. LXC will not work correctly.")
18 | }
19 |
20 | _, err = exec.LookPath("newgidmap")
21 | if err != nil {
22 | log.Infof("WARNING: no newgidmap binary present. LXC will not work correctly.")
23 | }
24 | }
25 |
26 | func addSpecificEntries(file string, name string, currentId int) error {
27 | content, err := os.ReadFile(file)
28 | if err != nil && !os.IsNotExist(err) {
29 | return errors.Wrapf(err, "couldn't read %s", file)
30 | }
31 |
32 | maxAlloc := 100 * 1000
33 |
34 | for _, line := range strings.Split(string(content), "\n") {
35 | if line == "" {
36 | continue
37 | }
38 |
39 | parts := strings.Split(line, ":")
40 | if parts[0] == name {
41 | return nil
42 | }
43 |
44 | if len(parts) != 3 {
45 | return errors.Errorf("invalid %s entry: %s", file, line)
46 | }
47 |
48 | thisAlloc, err := strconv.Atoi(parts[1])
49 | if err != nil {
50 | return errors.Wrapf(err, "invalid %s entry: %s", file, line)
51 | }
52 |
53 | size, err := strconv.Atoi(parts[2])
54 | if err != nil {
55 | return errors.Wrapf(err, "invalid %s entry: %s", file, line)
56 | }
57 |
58 | if thisAlloc+size > maxAlloc {
59 | maxAlloc = thisAlloc + size
60 | }
61 |
62 | }
63 |
64 | // newuidmap (and thus lxc-usernsexec, or more generally liblxc) will
65 | // complain if the current uid is in the subuid allocation. So if it
66 | // is, let's just advance the subuid allocation another 65536 uids. we
67 | // don't need to check if this overlaps again, since we know that
68 | // maxAlloc was the highest existing allocation.
69 | if maxAlloc <= currentId && currentId < maxAlloc+65536 {
70 | maxAlloc += 65536
71 | }
72 |
73 | withNewEntry := append(content, []byte(fmt.Sprintf("%s:%d:65536\n", name, maxAlloc))...)
74 | err = os.WriteFile(file, withNewEntry, 0644)
75 | return errors.Wrapf(err, "couldn't write %s", file)
76 | }
77 |
78 | func addEtcEntriesIfNecessary(username string, uid int, gid int) error {
79 | err := addSpecificEntries("/etc/subuid", username, uid)
80 | if err != nil {
81 | return err
82 | }
83 |
84 | err = addSpecificEntries("/etc/subgid", username, gid)
85 | if err != nil {
86 | return err
87 | }
88 |
89 | return nil
90 | }
91 |
92 | func UidmapSetup(username string, uid, gid int) error {
93 | warnAboutNewuidmap()
94 | return addEtcEntriesIfNecessary(username, uid, gid)
95 | }
96 |
--------------------------------------------------------------------------------
/pkg/test/cover.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import "os"
4 |
5 | const CoverageBindPath = "/stacker/.coverage"
6 |
7 | func IsCoverageEnabled() bool {
8 | _, ok := os.LookupEnv("GOCOVERDIR")
9 | return ok
10 | }
11 |
12 | func GetCoverageDir() string {
13 | val, ok := os.LookupEnv("GOCOVERDIR")
14 | if ok {
15 | return val
16 | }
17 |
18 | return ""
19 | }
20 |
--------------------------------------------------------------------------------
/pkg/types/config.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "embed"
5 | "fmt"
6 | "path"
7 | )
8 |
9 | // StackerConfig is a struct that contains global (or widely used) stacker
10 | // config options.
11 | type StackerConfig struct {
12 | WorkDir string `yaml:"work_dir,omitempty"`
13 | StackerDir string `yaml:"stacker_dir"`
14 | OCIDir string `yaml:"oci_dir"`
15 | RootFSDir string `yaml:"rootfs_dir"`
16 | Debug bool `yaml:"-"`
17 | StorageType string `yaml:"-"`
18 |
19 | // EmbeddedFS should contain a (statically linked) lxc-wrapper binary
20 | // (built from cmd/lxc-wrapper/lxc-wrapper.c) at
21 | // lxc-wrapper/lxc-wrapper.
22 | EmbeddedFS embed.FS `yaml:"-"`
23 | }
24 |
25 | // Substitutions - return an array of substitutions for StackerFiles
26 | func (sc *StackerConfig) Substitutions() []string {
27 | return []string{
28 | fmt.Sprintf("STACKER_ROOTFS_DIR=%s", sc.RootFSDir),
29 | fmt.Sprintf("STACKER_STACKER_DIR=%s", sc.StackerDir),
30 | fmt.Sprintf("STACKER_OCI_DIR=%s", sc.OCIDir),
31 | fmt.Sprintf("STACKER_WORK_DIR=%s", sc.WorkDir),
32 | }
33 | }
34 |
35 | func (sc *StackerConfig) CacheFile() string {
36 | return path.Join(sc.StackerDir, "build.cache")
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/types/imagesource.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "fmt"
5 | "path"
6 | "reflect"
7 | "strings"
8 |
9 | "github.com/pkg/errors"
10 | )
11 |
12 | // dockerishUrl represents a URL that looks like docker://image:tag; as of go
13 | // 1.12.9 these are no longer parsed correctly via the url.Parse() function,
14 | // since it complains about :tag not being a valid int (i.e. port number).
15 | type dockerishUrl struct {
16 | Scheme string
17 | Host string
18 | Tag string
19 | Path string
20 | }
21 |
22 | func NewDockerishUrl(thing string) (dockerishUrl, error) {
23 | parts := strings.SplitN(thing, "://", 2)
24 |
25 | if len(parts) < 2 {
26 | return dockerishUrl{Path: thing}, nil
27 | }
28 |
29 | url := dockerishUrl{Scheme: parts[0]}
30 | pathSplit := strings.SplitN(parts[1], "/", 2)
31 |
32 | url.Host = pathSplit[0]
33 | if len(pathSplit) == 2 {
34 | url.Path = "/" + pathSplit[1]
35 | }
36 |
37 | tagSplit := strings.SplitN(url.Host, ":", 2)
38 | if len(tagSplit) == 2 {
39 | url.Tag = tagSplit[1]
40 | }
41 |
42 | return url, nil
43 | }
44 |
45 | type ImageSource struct {
46 | Type string `yaml:"type" json:"type"`
47 | Url string `yaml:"url" json:"url,omitempty"`
48 | Tag string `yaml:"tag" json:"tag,omitempty"`
49 | Insecure bool `yaml:"insecure" json:"insecure,omitempty"`
50 | }
51 |
52 | func NewImageSource(containersImageString string) (*ImageSource, error) {
53 | ret := &ImageSource{}
54 | if strings.HasPrefix(containersImageString, "oci:") {
55 | ret.Type = OCILayer
56 | ret.Url = containersImageString[len("oci:"):]
57 | return ret, nil
58 | }
59 |
60 | url, err := NewDockerishUrl(containersImageString)
61 | if err != nil {
62 | return nil, err
63 | }
64 |
65 | switch url.Scheme {
66 | case "docker":
67 | ret.Type = DockerLayer
68 | ret.Url = containersImageString
69 | default:
70 | return nil, errors.Errorf("unknown image source type: %s", containersImageString)
71 | }
72 |
73 | return ret, nil
74 | }
75 |
76 | // Returns a URL that can be passed to github.com/containers/image handling
77 | // code.
78 | func (is *ImageSource) ContainersImageURL() (string, error) {
79 | switch is.Type {
80 | case DockerLayer:
81 | return is.Url, nil
82 | case OCILayer:
83 | return fmt.Sprintf("oci:%s", is.Url), nil
84 | default:
85 | return "", errors.Errorf("can't get containers/image url for source type: %s", is.Type)
86 | }
87 | }
88 |
89 | func (is *ImageSource) ParseTag() (string, error) {
90 | switch is.Type {
91 | case BuiltLayer:
92 | return is.Tag, nil
93 | case DockerLayer:
94 | url, err := NewDockerishUrl(is.Url)
95 | if err != nil {
96 | return "", err
97 | }
98 |
99 | if url.Path != "" {
100 | return path.Base(strings.Split(url.Path, ":")[0]), nil
101 | }
102 |
103 | // skopeo allows docker://centos:latest or
104 | // docker://docker.io/centos:latest; if we don't have a
105 | // url path, let's use the host as the image tag
106 | return strings.Split(url.Host, ":")[0], nil
107 | case OCILayer:
108 | pieces := strings.SplitN(is.Url, ":", 2)
109 | if len(pieces) != 2 {
110 | return "", errors.Errorf("bad OCI tag: %s", is.Type)
111 | }
112 |
113 | return pieces[1], nil
114 | default:
115 | return "", errors.Errorf("unsupported type: %s", is.Type)
116 | }
117 | }
118 |
119 | var (
120 | imageSourceFields []string
121 | )
122 |
123 | func init() {
124 | imageSourceFields = []string{}
125 | imageSourceType := reflect.TypeOf(ImageSource{})
126 | for i := 0; i < imageSourceType.NumField(); i++ {
127 | tag := imageSourceType.Field(i).Tag.Get("yaml")
128 | imageSourceFields = append(imageSourceFields, tag)
129 | }
130 | }
131 |
--------------------------------------------------------------------------------
/pkg/types/layer_bind_test.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "encoding/json"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | "gopkg.in/yaml.v2"
9 | )
10 |
11 | func TestUnmarshalBindsYamlAndJSON(t *testing.T) {
12 | assert := assert.New(t)
13 | tables := []struct {
14 | desc string
15 | yblob string
16 | jblob string
17 | expected Binds
18 | errstr string
19 | }{
20 | {desc: "proper array of source/dest bind allowed",
21 | yblob: "- source: src1\n dest: dest1\n",
22 | jblob: `[{"source": "src1", "dest": "dest1"}]`,
23 | expected: Binds{
24 | Bind{Source: "src1", Dest: "dest1"},
25 | }},
26 | {desc: "array of bind ascii art",
27 | yblob: "- src1 -> dest1\n- src2 -> dest2",
28 | jblob: `["src1 -> dest1", "src2 -> dest2"]`,
29 | expected: Binds{
30 | Bind{Source: "src1", Dest: "dest1"},
31 | Bind{Source: "src2", Dest: "dest2"},
32 | }},
33 | {desc: "example mixed valid ascii art and dict",
34 | yblob: "- src1 -> dest1\n- source: src2\n dest: dest2\n",
35 | jblob: `["src1 -> dest1", {"source": "src2", "dest": "dest2"}]`,
36 | expected: Binds{
37 | Bind{Source: "src1", Dest: "dest1"},
38 | Bind{Source: "src2", Dest: "dest2"},
39 | }},
40 | // golang encoding/json is case insensitive
41 | {desc: "capital Source/Dest is not allowed as yaml",
42 | yblob: "- Source: src1\n Dest: dest1\n",
43 | expected: Binds{},
44 | errstr: "xpected 'bind'"},
45 | {desc: "source is required",
46 | yblob: "- Dest: dest1\n",
47 | jblob: `[{"Dest": "dest1"}]`,
48 | expected: Binds{},
49 | errstr: "xpected 'bind'"},
50 | {desc: "must be an array",
51 | yblob: "source: src1\ndest: dest1\n",
52 | jblob: `{"source": "src1", "dest": "dest1"}`,
53 | expected: Binds{},
54 | errstr: "unmarshal"},
55 | }
56 | var err error
57 | found := Binds{}
58 | for _, t := range tables {
59 | err = yaml.Unmarshal([]byte(t.yblob), &found)
60 | if t.errstr == "" {
61 | if !assert.NoError(err, t.desc) {
62 | continue
63 | }
64 | assert.Equal(t.expected, found)
65 | } else {
66 | assert.ErrorContains(err, t.errstr, t.desc)
67 | }
68 | }
69 |
70 | for _, t := range tables {
71 | if t.jblob == "" {
72 | continue
73 | }
74 | err = json.Unmarshal([]byte(t.jblob), &found)
75 | if t.errstr == "" {
76 | if !assert.NoError(err, t.desc) {
77 | continue
78 | }
79 | assert.Equal(t.expected, found)
80 | } else {
81 | assert.ErrorContains(err, t.errstr, t.desc)
82 | }
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/pkg/types/layer_import_test.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "io/fs"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | "gopkg.in/yaml.v2"
9 | )
10 |
11 | // the empty/unset Uid/Gid
12 | const eUGid = -1
13 |
14 | func modePtr(mode int) *fs.FileMode {
15 | m := fs.FileMode(mode)
16 | return &m
17 | }
18 |
19 | func TestGetImportFromInterface(t *testing.T) {
20 | assert := assert.New(t)
21 | hash1 := "b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"
22 | tables := []struct {
23 | desc string
24 | val interface{}
25 | expected Import
26 | errstr string
27 | }{
28 | {desc: "basic string",
29 | val: "/path/to/file",
30 | expected: Import{Path: "/path/to/file", Uid: eUGid, Gid: eUGid}},
31 | {desc: "relative string",
32 | val: "path/to/file",
33 | expected: Import{Path: "path/to/file", Uid: eUGid, Gid: eUGid}},
34 | {desc: "dict no dest",
35 | val: map[interface{}]interface{}{
36 | "path": "/path/to/file",
37 | "hash": hash1,
38 | },
39 | expected: Import{Path: "/path/to/file", Dest: "", Hash: hash1, Uid: eUGid, Gid: eUGid}},
40 | {desc: "dest cannot be relative",
41 | val: map[interface{}]interface{}{
42 | "path": "src1",
43 | "dest": "dest1",
44 | },
45 | errstr: "cannot be relative",
46 | },
47 | {desc: "uid cannot be negative",
48 | val: map[interface{}]interface{}{
49 | "path": "src1",
50 | "uid": -2,
51 | },
52 | errstr: "cannot be negative",
53 | },
54 | {desc: "gid cannot be negative",
55 | val: map[interface{}]interface{}{
56 | "path": "src1",
57 | "gid": -2,
58 | },
59 | errstr: "cannot be negative",
60 | },
61 | {desc: "gid must be an int",
62 | val: map[interface{}]interface{}{
63 | "path": "src1",
64 | "gid": "100",
65 | },
66 | errstr: "not an integer",
67 | },
68 | {desc: "mode present",
69 | val: map[interface{}]interface{}{
70 | "path": "src1",
71 | "mode": 0755,
72 | },
73 | expected: Import{Path: "src1", Dest: "", Mode: modePtr(0755), Uid: eUGid, Gid: eUGid}},
74 | {desc: "path must be present",
75 | val: map[interface{}]interface{}{
76 | "uid": 0,
77 | "dest": "/path/to/file",
78 | },
79 | errstr: "No 'path' entry found",
80 | },
81 | {desc: "bad type - list",
82 | val: []interface{}{"foo", "bar"},
83 | errstr: "could not read imports entry",
84 | },
85 | {desc: "bad type - non-string-keys",
86 | val: map[interface{}]interface{}{
87 | 1: "target",
88 | },
89 | errstr: "is not a string",
90 | },
91 | {desc: "bad type - path",
92 | val: map[interface{}]interface{}{
93 | "path": 1111,
94 | },
95 | errstr: "is not a string",
96 | },
97 | }
98 |
99 | var found Import
100 | var err error
101 | for _, t := range tables {
102 | found, err = getImportFromInterface(t.val)
103 | if t.errstr == "" {
104 | assert.NoError(err, t.desc)
105 | assert.Equal(t.expected, found, t.desc)
106 | } else {
107 | assert.ErrorContains(err, t.errstr, t.desc)
108 | }
109 | }
110 | }
111 |
112 | func TestUnmarshalImports(t *testing.T) {
113 | assert := assert.New(t)
114 | tables := []struct {
115 | desc string
116 | yblob string
117 | expected Imports
118 | errstr string
119 | }{
120 | {desc: "import can be a singular string",
121 | yblob: "f1",
122 | expected: Imports{
123 | Import{Path: "f1", Uid: eUGid, Gid: eUGid},
124 | }},
125 | {desc: "import might be present and explicit null",
126 | yblob: "null",
127 | expected: nil},
128 | {desc: "imports should not be a dict",
129 | yblob: "path: /path/to/file\ndest: /path/to/dest\n",
130 | expected: Imports{},
131 | errstr: "xpected an array"},
132 | {desc: "example valid mixed string and dict",
133 | yblob: "- f1\n- path: f2\n",
134 | expected: Imports{
135 | Import{Path: "f1", Uid: eUGid, Gid: eUGid},
136 | Import{Path: "f2", Uid: eUGid, Gid: eUGid},
137 | }},
138 | }
139 | var err error
140 | found := Imports{}
141 | for _, t := range tables {
142 | err = yaml.Unmarshal([]byte(t.yblob), &found)
143 | if t.errstr == "" {
144 | if !assert.NoError(err, t.desc) {
145 | continue
146 | }
147 | assert.Equal(t.expected, found)
148 | } else {
149 | assert.ErrorContains(err, t.errstr, t.desc)
150 | }
151 | }
152 | }
153 |
--------------------------------------------------------------------------------
/pkg/types/layer_type.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "strings"
7 |
8 | ispec "github.com/opencontainers/image-spec/specs-go/v1"
9 | "github.com/pkg/errors"
10 | "machinerun.io/atomfs/pkg/erofs"
11 | "machinerun.io/atomfs/pkg/squashfs"
12 | "machinerun.io/atomfs/pkg/verity"
13 | )
14 |
15 | var ErrEmptyLayers = errors.New("empty layers")
16 |
17 | type LayerType struct {
18 | Type string
19 | Verity verity.VerityMetadata
20 | }
21 |
22 | func (lt LayerType) String() string {
23 | if lt.Verity {
24 | return fmt.Sprintf("%s+verity", lt.Type)
25 | }
26 | return lt.Type
27 | }
28 |
29 | func (lt LayerType) MarshalText() ([]byte, error) {
30 | return []byte(fmt.Sprintf("%s+%v", lt.Type, lt.Verity)), nil
31 | }
32 |
33 | func (lt *LayerType) UnmarshalText(text []byte) error {
34 | fields := strings.Split(string(text), "+")
35 | if len(fields) > 2 {
36 | return errors.Errorf("invalid layer type %s", string(text))
37 | }
38 |
39 | lt.Type = fields[0]
40 | if len(fields) == 1 {
41 | return nil
42 | }
43 |
44 | result, err := strconv.ParseBool(fields[1])
45 | if err != nil {
46 | return errors.Wrapf(err, "bad verity bool: %s", fields[1])
47 | }
48 |
49 | lt.Verity = verity.VerityMetadata(result)
50 |
51 | return nil
52 | }
53 |
54 | func NewLayerType(lt string, verity verity.VerityMetadata) (LayerType, error) {
55 | switch lt {
56 | case "squashfs", "erofs":
57 | return LayerType{Type: lt, Verity: verity}, nil
58 | case "tar":
59 | return LayerType{Type: lt}, nil
60 | default:
61 | return LayerType{}, errors.Errorf("invalid layer type: %s", lt)
62 | }
63 | }
64 |
65 | func NewLayerTypeManifest(manifest ispec.Manifest) (LayerType, error) {
66 | if len(manifest.Layers) == 0 {
67 | return NewLayerType("tar", verity.VerityMetadataMissing)
68 | }
69 |
70 | _, verityMetadataPresent := manifest.Layers[0].Annotations[verity.VerityRootHashAnnotation]
71 |
72 | switch manifest.Layers[0].MediaType {
73 | case squashfs.BaseMediaTypeLayerSquashfs:
74 | // older stackers generated media types without compression information
75 | fallthrough
76 | case squashfs.GenerateSquashfsMediaType(squashfs.GzipCompression):
77 | fallthrough
78 | case squashfs.GenerateSquashfsMediaType(squashfs.ZstdCompression):
79 | return NewLayerType("squashfs", verity.VerityMetadata(verityMetadataPresent))
80 | case erofs.BaseMediaTypeLayerErofs:
81 | // older stackers generated media types without compression information
82 | fallthrough
83 | case erofs.GenerateErofsMediaType(erofs.LZ4HCCompression):
84 | fallthrough
85 | case erofs.GenerateErofsMediaType(erofs.LZ4Compression):
86 | fallthrough
87 | case erofs.GenerateErofsMediaType(erofs.ZstdCompression):
88 | return NewLayerType("erofs", verity.VerityMetadata(verityMetadataPresent))
89 | case ispec.MediaTypeImageLayerGzip:
90 | fallthrough
91 | case ispec.MediaTypeImageLayer:
92 | return NewLayerType("tar", verity.VerityMetadataMissing)
93 | default:
94 | return LayerType{}, errors.Errorf("invalid layer type %s", manifest.Layers[0].MediaType)
95 | }
96 | }
97 |
98 | func NewLayerTypes(lts []string, verity verity.VerityMetadata) ([]LayerType, error) {
99 | ret := []LayerType{}
100 | for _, lt := range lts {
101 | hoisted, err := NewLayerType(lt, verity)
102 | if err != nil {
103 | return nil, err
104 | }
105 |
106 | ret = append(ret, hoisted)
107 | }
108 |
109 | return ret, nil
110 | }
111 |
112 | func (lt LayerType) LayerName(tag string) string {
113 | if lt.Type == "tar" {
114 | return tag
115 | }
116 |
117 | return fmt.Sprintf("%s-%s", tag, lt.Type)
118 | }
119 |
--------------------------------------------------------------------------------
/pkg/types/stackerfiles.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "path/filepath"
5 |
6 | "github.com/pkg/errors"
7 | "stackerbuild.io/stacker/pkg/log"
8 | )
9 |
10 | // Logic for working with multiple StackerFiles
11 | type StackerFiles map[string]*Stackerfile
12 |
13 | // NewStackerFiles reads multiple Stackerfiles from a list of paths and applies substitutions
14 | // It adds the Stackerfiles mentioned in the prerequisite paths to the results
15 | func NewStackerFiles(paths []string, validateHash bool, substituteVars []string) (StackerFiles, error) {
16 | sfm := make(map[string]*Stackerfile, len(paths))
17 |
18 | // Iterate over list of paths to stackerfiles
19 | for _, path := range paths {
20 | log.Debugf("initializing stacker recipe: %s", path)
21 |
22 | // Read this stackerfile
23 | sf, err := NewStackerfile(path, validateHash, substituteVars)
24 | if err != nil {
25 | return nil, err
26 | }
27 |
28 | // Add using absolute path to make sure the entries are unique
29 | absPath, err := filepath.Abs(path)
30 | if err != nil {
31 | return nil, err
32 | }
33 | if _, ok := sfm[absPath]; !ok {
34 | sfm[absPath] = sf
35 | }
36 |
37 | // Determine correct path of prerequisites
38 | prerequisites, err := sf.Prerequisites()
39 | if err != nil {
40 | return nil, err
41 | }
42 |
43 | // Need to also add stackerfile dependencies of this stackerfile to the map of stackerfiles
44 | depStackerFiles, err := NewStackerFiles(prerequisites, validateHash, substituteVars)
45 | if err != nil {
46 | return nil, err
47 | }
48 | for depPath, depStackerFile := range depStackerFiles {
49 | sfm[depPath] = depStackerFile
50 | }
51 | }
52 |
53 | // now, make sure output layer names are unique
54 | names := map[string]string{}
55 | for path, sf := range sfm {
56 | for _, layerName := range sf.FileOrder {
57 | if otherFile, ok := names[layerName]; ok {
58 | return nil, errors.Errorf("duplicate layer name: both %s and %s have %s", otherFile, path, layerName)
59 | }
60 |
61 | names[layerName] = path
62 | }
63 | }
64 |
65 | return sfm, nil
66 | }
67 |
68 | // LookupLayerDefinition searches for the Layer entry within the Stackerfiles
69 | func (sfm StackerFiles) LookupLayerDefinition(name string) (Layer, bool) {
70 | // Search for the layer in all of the stackerfiles
71 | for _, sf := range sfm {
72 | l, found := sf.Get(name)
73 | if found {
74 | return l, true
75 | }
76 | }
77 | return Layer{}, false
78 | }
79 |
--------------------------------------------------------------------------------
/pkg/types/storage.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | type Storage interface {
4 | // Name of this storage driver (e.g. "overlay")
5 | Name() string
6 |
7 | // Create does the initial work to create a storage tag to be used
8 | // in later operations.
9 | Create(path string) error
10 |
11 | // SetupEmptyRootfs() sets up an empty rootfs for contents to be
12 | // written in (e.g. if it's a base tar file to be extracted).
13 | SetupEmptyRootfs(name string) error
14 |
15 | // Snapshot "copies" (maybe in a fs-specific fast way) one tag to
16 | // another; snapshots should be readonly or not generally modifiable.
17 | Snapshot(source string, target string) error
18 |
19 | // Restore is like snapshot (in fact, the implementations may be the
20 | // same), but marks the result as writable.
21 | Restore(source string, target string) error
22 |
23 | // Delete a storage tag.
24 | Delete(path string) error
25 |
26 | // Test if a storage tag exists.
27 | Exists(thing string) bool
28 |
29 | // Create a temporary writable snapshot of the source, returning the
30 | // snapshot's tag and a cleanup function.
31 | TemporaryWritableSnapshot(source string) (string, func(), error)
32 |
33 | // Clean the storage: do unmounting, delete all caches/tags, etc.
34 | Clean() error
35 |
36 | // GC any storage that's no longer relevant for the layers in the
37 | // layer-bases cache or output directory (note that this implies a GC
38 | // of those OCI dirs as well).
39 | GC() error
40 |
41 | // Unpack is the thing that unpacks the specfied tag layer-bases OCI
42 | // cache into the specified "name" (working dir), whatever that means
43 | // for this storage.
44 | //
45 | // Unpack can do fancy things like using previously cached unpacks to
46 | // speed things up, etc.
47 | Unpack(tag, name string) error
48 |
49 | // Repack repacks the specified working dir into the specified OCI dir.
50 | Repack(name string, layer Layer, layerTypes []LayerType, sfm StackerFiles) error
51 |
52 | // GetLXCRootfsConfig returns the string that should be set as
53 | // lxc.rootfs.path in the LXC container's config.
54 | GetLXCRootfsConfig(name string) (string, error)
55 |
56 | // TarExtractLocation returns the location that a tar-based rootfs
57 | // should be extracted to
58 | TarExtractLocation(name string) string
59 |
60 | // Add overlay_dirs into overlay metadata so that later we can mount them
61 | // in the lxc container, works only for storage-type 'overlay'
62 | SetOverlayDirs(name string, overlayDirs []OverlayDir, layerTypes []LayerType) error
63 | }
64 |
--------------------------------------------------------------------------------
/test/.gitignore:
--------------------------------------------------------------------------------
1 | centos
2 | ubuntu
3 |
--------------------------------------------------------------------------------
/test/annotations-namespace.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "namespace arg works" {
12 | cat > stacker.yaml <<"EOF"
13 | thing:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | run: ls
18 | EOF
19 | stacker build --annotations-namespace=namespace.example --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
20 | [ "$status" -eq 0 ]
21 | manifest=$(cat oci/index.json | jq -r .manifests[0].digest | cut -f2 -d:)
22 | namespace=$(cat oci/blobs/sha256/$manifest | jq -r .annotations | cut -f1 -d:)
23 | [[ "$namespace" == *"namespace.example"* ]]
24 | }
25 |
26 | @test "default namespace arg works" {
27 | cat > stacker.yaml <<"EOF"
28 | thing:
29 | from:
30 | type: oci
31 | url: ${{BUSYBOX_OCI}}
32 | run: ls
33 | EOF
34 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
35 | [ "$status" -eq 0 ]
36 | manifest=$(cat oci/index.json | jq -r .manifests[0].digest | cut -f2 -d:)
37 | namespace=$(cat oci/blobs/sha256/$manifest | jq -r .annotations | cut -f1 -d:)
38 | [[ "$namespace" == *"io.stackeroci"* ]]
39 | }
40 |
--------------------------------------------------------------------------------
/test/annotations.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "annotations work" {
12 | cat > stacker.yaml <<"EOF"
13 | thing:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | run: ls
18 | annotations:
19 | a.b.c.key: val
20 | EOF
21 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
22 | [ "$status" -eq 0 ]
23 | manifest=$(cat oci/index.json | jq -r .manifests[0].digest | cut -f2 -d:)
24 | cat oci/blobs/sha256/$manifest | jq .
25 | key=$(cat oci/blobs/sha256/$manifest | jq -r .annotations | cut -f1 -d:)
26 | echo $key
27 | val=$(cat oci/blobs/sha256/$manifest | jq -r .annotations | cut -f2 -d:)
28 | echo $val
29 | [[ "$key" == *"a.b.c.key"* ]]
30 | [[ "$val" == *"val"* ]]
31 | }
32 |
--------------------------------------------------------------------------------
/test/args.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "workdir args" {
12 | cat > stacker.yaml <<"EOF"
13 | parent:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | child:
18 | from:
19 | type: built
20 | tag: parent
21 | run: |
22 | echo hello world
23 | EOF
24 | # check defaults
25 | tmpdir=$(mktemp -d)
26 | chmod -R a+rwx $tmpdir
27 | stacker --work-dir $tmpdir build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
28 | [ -d $tmpdir ]
29 | [ -d $tmpdir/.stacker ]
30 | [ -d $tmpdir/roots ]
31 | [ -d $tmpdir/oci ]
32 | rm -rf $tmpdir
33 |
34 | # check overrides
35 | tmpdir=$(mktemp -d)
36 | chmod -R a+rwx $tmpdir
37 | stackerdir=$(mktemp -d)
38 | chmod -R a+rwx $stackerdir
39 | stacker --work-dir $tmpdir --stacker-dir $stackerdir build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
40 | [ -d $tmpdir ]
41 | [ ! -d $tmpdir/.stacker ]
42 | [ -d $tmpdir/roots ]
43 | [ -d $tmpdir/oci ]
44 | [ -d $stackerdir ]
45 | rm -rf $tmpdir
46 | rm -rf $stackerdir
47 | }
48 |
--------------------------------------------------------------------------------
/test/asterisk.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "wildcards work in run section" {
12 | cat > stacker.yaml <<"EOF"
13 | a:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | run: |
18 | mkdir /mybin
19 | cp /bin/* /mybin
20 | EOF
21 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
22 | umoci unpack --image oci:a dest
23 | [ "$status" -eq 0 ]
24 |
25 |
26 | for i in dest/rootfs/bin/*; do
27 | stat dest/rootfs/mybin/$(basename $i)
28 | done
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/test/binds.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "bind as string slice" {
12 | cat > stacker.yaml <<"EOF"
13 | bind-test:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | binds:
18 | - ${{bind_path}} -> /root/tree1/foo
19 | run: |
20 | touch /root/tree1/foo/bar
21 | EOF
22 | mkdir -p tree1/foo
23 |
24 | # since we are creating directory as
25 | # real root and then `touch`-ing a file
26 | # where in user NS, need to have rw persmission
27 | # for others
28 | chmod +666 tree1/foo
29 |
30 | bind_path=$(realpath tree1/foo)
31 |
32 | out=$(stacker build --substitute bind_path=${bind_path} --substitute BUSYBOX_OCI=$BUSYBOX_OCI)
33 |
34 | [[ "${out}" =~ ^(.*filesystem bind-test built successfully)$ ]]
35 |
36 | stat tree1/foo/bar
37 | }
38 |
39 | @test "bind as struct" {
40 | cat > stacker.yaml <<"EOF"
41 | bind-test:
42 | from:
43 | type: oci
44 | url: ${{BUSYBOX_OCI}}
45 | binds:
46 | - source: ${{bind_path1}}
47 | dest: /root/tree1/foo
48 | - source: ${{bind_path2}}
49 | run: |
50 | touch /root/tree1/foo/bar
51 | [ -f "${{bind_path2}}/file1" ]
52 | EOF
53 | mkdir -p tree1/foo tree2/bar
54 | touch tree2/bar/file1
55 |
56 | # since we are creating directory as
57 | # real root and then `touch`-ing a file
58 | # where in user NS, need to have rw persmission
59 | # for others
60 | chmod +666 tree1/foo
61 |
62 | bind_path1=$(realpath tree1/foo)
63 | bind_path2=$(realpath tree2/bar)
64 |
65 | out=$(stacker build \
66 | "--substitute=bind_path1=${bind_path1}" \
67 | "--substitute=bind_path2=${bind_path2}" \
68 | "--substitute=BUSYBOX_OCI=$BUSYBOX_OCI" ) || {
69 | printf "%s\n" "$out" 1>&2
70 | exit 1
71 | }
72 | [[ "${out}" =~ ^(.*filesystem bind-test built successfully)$ ]]
73 |
74 | stat tree1/foo/bar
75 | }
76 |
77 | @test "fail on missing bind source" {
78 | cat > stacker.yaml <<"EOF"
79 | hello-binds:
80 | from:
81 | type: docker
82 | url: ${{BUSYBOX_OCI}}
83 | binds:
84 | - thats_no_dir -> /mydir
85 | run: |
86 | mkdir -p /hello-stacker-app
87 | echo 'echo "Hello Stacker!"' > /hello-stacker-app/hello.sh
88 | EOF
89 |
90 | bad_stacker build -f stacker.yaml --substitute=BUSYBOX_OCI="$BUSYBOX_OCI"
91 | }
92 |
--------------------------------------------------------------------------------
/test/broken-link.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | rm -rf dir || true
9 | cleanup
10 | }
11 |
12 | @test "importing broken symlink is ok" {
13 | cat > stacker.yaml <<"EOF"
14 | broken_link:
15 | from:
16 | type: oci
17 | url: ${{BUSYBOX_OCI}}
18 | imports:
19 | - dir
20 | run: cp -a /stacker/imports/dir/testln /testln
21 | EOF
22 | mkdir -p dir
23 | ln -s broken dir/testln
24 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
25 | umoci unpack --image oci:broken_link dest
26 | [ "$status" -eq 0 ]
27 |
28 | [ -L dest/rootfs/testln ]
29 | }
30 |
--------------------------------------------------------------------------------
/test/built-type.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "built type layers are restored correctly" {
12 | cat > stacker.yaml <<"EOF"
13 | parent:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | run: |
18 | touch /root/parent
19 | cat /proc/self/mountinfo
20 | child:
21 | from:
22 | type: built
23 | tag: parent
24 | run: |
25 | cat /proc/self/mountinfo
26 | touch /root/child
27 | EOF
28 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
29 |
30 | umoci --log=debug unpack --image oci:parent dest/parent
31 | [ "$status" -eq 0 ]
32 | [ -f dest/parent/rootfs/root/parent ]
33 |
34 | umoci --log info unpack --image oci:child dest/child # say my name say my name
35 | [ "$status" -eq 0 ]
36 | [ -f dest/child/rootfs/root/child ]
37 | }
38 |
--------------------------------------------------------------------------------
/test/check.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | @test "stacker check is reasonable priv overlay" {
4 | require_privilege priv
5 | stacker check
6 | }
7 |
8 | @test "stacker check is reasonable unpriv overlay" {
9 | require_privilege unpriv
10 |
11 | # if we don't have overlay support, stacker check should fail, otherwise it
12 | # should succeed
13 | run sudo -u $SUDO_USER "${ROOT_DIR}/stacker" --debug internal-go testsuite-check-overlay
14 | if [ "$status" -eq 50 ]; then
15 | bad_stacker check
16 | else
17 | stacker check
18 | fi
19 | }
20 |
--------------------------------------------------------------------------------
/test/chroot.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | rm -rf recursive bing.ico || true
10 | }
11 |
12 | @test "chroot goes to a reasonable place" {
13 | cat > stacker.yaml <<"EOF"
14 | thing:
15 | from:
16 | type: oci
17 | url: ${{BUSYBOX_OCI}}
18 | run: touch /test
19 | EOF
20 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
21 | echo "[ -f /test ]" | stacker chroot --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
22 | }
23 |
--------------------------------------------------------------------------------
/test/clean.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "clean of unpriv overlay works" {
12 | cat > stacker.yaml <<"EOF"
13 | test:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | EOF
18 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
19 | stacker clean
20 | }
21 |
--------------------------------------------------------------------------------
/test/config.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | rm -rf *-oci *-stacker *-roots || true
10 | }
11 |
12 | @test "config args work" {
13 | require_privilege priv
14 |
15 | local tmpd=$(pwd)
16 | echo "tmpd $tmpd"
17 | cat > stacker.yaml <<"EOF"
18 | test:
19 | from:
20 | type: oci
21 | url: ${{BUSYBOX_OCI}}
22 | EOF
23 |
24 | stacker "--oci-dir=$tmpd/args-oci" "--stacker-dir=$tmpd/args-stacker" \
25 | "--roots-dir=$tmpd/args-roots" build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
26 | [ -d "$tmpd/args-oci" ]
27 | [ -d "$tmpd/args-stacker" ]
28 | }
29 |
30 | @test "config file works" {
31 | require_privilege priv
32 |
33 | local tmpd=$(pwd)
34 | echo "tmpd $tmpd"
35 | find $tmpd
36 | cat > stacker.yaml <<"EOF"
37 | test:
38 | from:
39 | type: oci
40 | url: ${{BUSYBOX_OCI}}
41 | EOF
42 | cat > "$tmpd/config.yaml" < "$expected"
71 |
72 | # shellcheck disable=SC1039
73 | cat > "$stacker_yaml" <<"EOF"
74 | my-build:
75 | build_only: true
76 | from:
77 | type: oci
78 | url: ${{BUSYBOX_OCI}}
79 | run: |
80 | #!/bin/sh
81 | set -e
82 | outd=/my-publish
83 | rm -Rf "$outd"
84 | mkdir -p "$outd"
85 | cd "$outd"
86 | cat > content.txt < "$config_yaml" < Dockerfile < stacker.yaml < stacker.yaml < stacker.yaml <<"EOF"
26 | test:
27 | from:
28 | type: oci
29 | tag: ${{BUSYBOX_OCI}}
30 | imports:
31 | - stacker://foo/bar
32 | - stacker://baz/foo
33 | EOF
34 | bad_stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
35 | echo "${output}" | grep "couldn't find dependencies for test: stacker://foo/bar, stacker://baz/foo"
36 | }
37 |
38 | @test "stacker:// style nesting w/ type built works" {
39 | cat > stacker.yaml < stacker.yaml < stacker.yaml <<"EOF"
13 | parent:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | run: |
18 | mkdir /dir
19 | ln -s /dir /link
20 | child:
21 | from:
22 | type: built
23 | tag: parent
24 | run: |
25 | touch /link/test
26 | EOF
27 | stacker --storage-type=overlay build --layer-type=squashfs --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
28 |
29 | manifest=$(cat oci/index.json | jq -r .manifests[1].digest | cut -f2 -d:)
30 | layer1=$(cat oci/blobs/sha256/$manifest | jq -r .layers[1].digest | cut -f2 -d:)
31 | layer2=$(cat oci/blobs/sha256/$manifest | jq -r .layers[2].digest | cut -f2 -d:)
32 |
33 | echo layer1 $layer1
34 | echo layer2 $layer2
35 | ls -al roots
36 | ls -al roots/*/overlay/
37 | [ -h roots/sha256_$layer1/overlay/link ]
38 | [ -d roots/sha256_$layer1/overlay/dir ]
39 |
40 | [ ! -f roots/sha256_$layer2/overlay/link ]
41 | [ -d roots/sha256_$layer2/overlay/dir ]
42 | [ -f roots/sha256_$layer2/overlay/dir/test ]
43 | }
44 |
--------------------------------------------------------------------------------
/test/docker-base.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "importing from a docker hub" {
12 | cat > stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml <<"EOF"
13 | base:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | cmd: foo
18 | layer1:
19 | from:
20 | type: built
21 | tag: base
22 | entrypoint: bar
23 | layer2:
24 | from:
25 | type: built
26 | tag: layer1
27 | full_command: baz
28 | EOF
29 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
30 |
31 | manifest=$(cat oci/index.json | jq -r .manifests[0].digest | cut -f2 -d:)
32 | config=$(cat oci/blobs/sha256/$manifest | jq -r .config.digest | cut -f2 -d:)
33 | [ "$(cat oci/blobs/sha256/$config | jq -r '.config.Cmd | join("")')" = "foo" ]
34 |
35 | manifest=$(cat oci/index.json | jq -r .manifests[1].digest | cut -f2 -d:)
36 | config=$(cat oci/blobs/sha256/$manifest | jq -r .config.digest | cut -f2 -d:)
37 | [ "$(cat oci/blobs/sha256/$config | jq -r '.config.Entrypoint | join("")')" = "bar" ]
38 |
39 | manifest=$(cat oci/index.json | jq -r .manifests[2].digest | cut -f2 -d:)
40 | config=$(cat oci/blobs/sha256/$manifest | jq -r .config.digest | cut -f2 -d:)
41 | [ "$(cat oci/blobs/sha256/$config | jq -r '.config.Cmd')" = "null" ]
42 | [ "$(cat oci/blobs/sha256/$config | jq -r '.config.Entrypoint | join("")')" = "baz" ]
43 | }
44 |
--------------------------------------------------------------------------------
/test/env.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "/stacker is ro" {
12 | mkdir -p .stacker/imports/test
13 | touch .stacker/imports/test/foo
14 | chmod -R 777 .stacker/imports
15 |
16 | cat > stacker.yaml <<"EOF"
17 | test:
18 | from:
19 | type: oci
20 | url: ${{BUSYBOX_OCI}}
21 | run: |
22 | # make sure that /stacker is readonly
23 | grep "/stacker" /proc/mounts | grep "[[:space:]]ro[[:space:],]"
24 |
25 | # make sure stacker deleted the non-import
26 | [ ! -f /stacker/foo ]
27 | EOF
28 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
29 | }
30 |
31 | @test "two stackers can't run at the same time" {
32 | cat > stacker.yaml <<"EOF"
33 | test:
34 | from:
35 | type: oci
36 | url: ${{BUSYBOX_OCI}}
37 | run: |
38 | echo hello world
39 | EOF
40 | mkdir -p roots .stacker
41 | touch roots/.lock .stacker/.lock
42 | chmod 777 -R roots .stacker
43 |
44 | (
45 | flock 9
46 | bad_stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
47 | echo "${output}" | grep "couldn't acquire lock"
48 | ) 9 stacker.yaml <<"EOF"
15 | layer1:
16 | from:
17 | type: oci
18 | url: ${{BUSYBOX_OCI}}
19 | imports:
20 | - myfile.txt
21 | run: |
22 | cp /stacker/imports/myfile.txt /my-file
23 | EOF
24 | startdir="$PWD"
25 | wkdir="$PWD/work-dir"
26 | bdir="$PWD/build-dir"
27 | grabdir="$PWD/grab-dir"
28 | mkdir "$wkdir" "$grabdir" "$bdir"
29 | give_user_ownership "$wkdir" "$grabdir" "$bdir"
30 |
31 | echo "hello world" > myfile.txt
32 | expected_sha=$(sha myfile.txt)
33 |
34 | cd "$bdir"
35 | stacker "--work-dir=$wkdir" build "--stacker-file=$startdir/stacker.yaml" --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
36 | dir_is_empty . ||
37 | test_error "build dir had unexpected files: $_RET_EXTRA"
38 |
39 | cd "$grabdir"
40 | stacker "--work-dir=$wkdir" grab layer1:/my-file
41 | [ -f my-file ]
42 | found_sha=$(sha my-file)
43 | [ "${expected_sha}" = "${found_sha}" ]
44 |
45 | dir_has_only . my-file ||
46 | test_error "grab produced extra files." \
47 | "missing=${_RET_MISSING} extra=${_RET_EXTRA}"
48 | }
49 |
50 |
--------------------------------------------------------------------------------
/test/gzip.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "import various sizes" {
12 | test_copy_buffer_size 512k tar
13 | test_copy_buffer_size 2m tar
14 | test_copy_buffer_size 512k tar.gz
15 | test_copy_buffer_size 2m tar.gz
16 | }
17 |
--------------------------------------------------------------------------------
/test/import-http.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | mkdir -p reference
6 | mkdir -p dest
7 | rm -f nm_orig
8 | wget http://network-test.debian.org/nm -O reference/nm_orig
9 | mkdir img
10 | # Need to separate layer download from the download of the test file for imports
11 | # As we want to be able to have network for base image download
12 | # in img/stacker1.yaml but disconnect the network for test file download
13 | cat > img/stacker1.yaml <<"EOF"
14 | busybox_base:
15 | from:
16 | type: oci
17 | url: ${{BUSYBOX_OCI}}
18 | run: |
19 | ls
20 | EOF
21 | cat > img/stacker2.yaml < img/stacker1.yaml <<"EOF"
78 | busybox_base:
79 | from:
80 | type: oci
81 | url: ${{BUSYBOX_OCI}}
82 | imports:
83 | - path: https://www.cisco.com/favicon.ico
84 | dest: /dest/icon
85 | run: |
86 | [ -f /dest/icon ]
87 | [ ! -f /dest/favicon.ico ]
88 | [ ! -f /stacker/favicon.ico ]
89 | EOF
90 | # Build base image
91 | stacker build -f img/stacker1.yaml --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
92 | umoci ls --layout oci
93 | }
94 |
95 | # Ideally there would tests to hit/miss cache for servers which provide a hash
96 |
--------------------------------------------------------------------------------
/test/invalid.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "bad stacker:// import" {
12 | cat > stacker.yaml <<"EOF"
13 | bad:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | imports:
18 | - stacker://idontexist/file
19 | EOF
20 | bad_stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
21 | }
22 |
23 | @test "invalid yaml entry" {
24 | cat > stacker.yaml <<"EOF"
25 | foo:
26 | notanentry:
27 | foo: bar
28 | EOF
29 | bad_stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
30 | }
31 |
32 | @test "missing tag for base layer of type built" {
33 | cat > stacker1.yaml <<"EOF"
34 | layer1:
35 | from:
36 | type: oci
37 | url: ${{BUSYBOX_OCI}}
38 | EOF
39 | cat > stacker2.yaml <<"EOF"
40 | config:
41 | prerequisites:
42 | - stacker1.yaml
43 | layer2:
44 | from:
45 | type: built
46 | url: ${{BUSYBOX_OCI}}
47 | EOF
48 | bad_stacker build -f stacker2.yaml --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
49 | }
50 |
--------------------------------------------------------------------------------
/test/labels.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "generate_labels generates oci labels" {
12 | cat > stacker.yaml <<"EOF"
13 | label:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | generate_labels: |
18 | echo -n "rocks" > /stacker/oci-labels/meshuggah
19 | EOF
20 |
21 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
22 | manifest=$(cat oci/index.json | jq -r .manifests[0].digest | cut -f2 -d:)
23 | config=$(cat oci/blobs/sha256/$manifest | jq -r .config.digest | cut -f2 -d:)
24 | [ "$(cat "oci/blobs/sha256/$config" | jq -r .config.Labels.meshuggah)" = "rocks" ]
25 | }
26 |
--------------------------------------------------------------------------------
/test/log.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | rm logfile || true
10 | }
11 |
12 | @test "log --debug works" {
13 | # debug is passed by default in the tests
14 | stacker build --help
15 | echo "$output" | grep "stacker version"
16 | }
17 |
18 | @test "--debug and --quiet together fail" {
19 | bad_stacker --quiet build --help
20 | }
21 |
22 | @test "--quiet works" {
23 | run "${ROOT_DIR}/stacker" --quiet build --help
24 | [ -z "$(echo "$output" | grep "stacker version")" ]
25 | }
26 |
27 | @test "--log-file works" {
28 | stacker --log-file=logfile build --help
29 | grep "stacker version" logfile
30 | }
31 |
32 | @test "--progress works" {
33 | cat > stacker.yaml <<"EOF"
34 | test:
35 | from:
36 | type: oci
37 | url: ${{BUSYBOX_OCI}}
38 | EOF
39 |
40 | stacker --progress build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
41 | echo "$output" | grep "Copying blob"
42 | }
43 |
44 | @test "no progress when not attached to a terminal" {
45 | cat > stacker.yaml <<"EOF"
46 | test:
47 | from:
48 | type: oci
49 | url: ${{BUSYBOX_OCI}}
50 | EOF
51 |
52 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
53 | [ -z "$(echo "$output" | grep "Copying blob")" ]
54 | }
55 |
--------------------------------------------------------------------------------
/test/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | import argparse
4 | import glob
5 | import multiprocessing
6 | import os
7 | import subprocess
8 | import sys
9 |
10 | priv_levels=("priv", "unpriv")
11 |
12 | parser = argparse.ArgumentParser()
13 | parser.add_argument("--privilege-level", choices=priv_levels)
14 | parser.add_argument("--jobs", type=int, default=multiprocessing.cpu_count())
15 | parser.add_argument("tests", nargs="*", default=glob.glob("./test/*.bats"))
16 |
17 | options = parser.parse_args()
18 |
19 | priv_to_test=priv_levels
20 |
21 | if options.privilege_level is not None:
22 | priv_to_test = [options.privilege_level]
23 |
24 | for priv in priv_to_test:
25 | cmd = ["bats", "--jobs", str(options.jobs), "--tap", "--timing"]
26 | cmd.extend(options.tests)
27 |
28 | env = os.environ.copy()
29 | env["PRIVILEGE_LEVEL"] = priv
30 |
31 | print("running tests in modes:", priv)
32 | try:
33 | subprocess.check_call(cmd, env=env)
34 | except subprocess.CalledProcessError:
35 | print("tests in modes:", priv, "failed")
36 | sys.exit(1)
37 |
--------------------------------------------------------------------------------
/test/multi-arch.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "multi-arch/os support" {
12 | cat > stacker.yaml <<"EOF"
13 | busybox:
14 | os: darwin
15 | arch: arm64
16 | from:
17 | type: oci
18 | url: ${{BUSYBOX_OCI}}
19 | imports:
20 | - https://www.cisco.com/favicon.ico
21 | EOF
22 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
23 |
24 | # check OCI image generation
25 | manifest=$(cat oci/index.json | jq -r .manifests[0].digest | cut -f2 -d:)
26 | layer=$(cat oci/blobs/sha256/$manifest | jq -r .layers[0].digest)
27 | config=$(cat oci/blobs/sha256/$manifest | jq -r .config.digest | cut -f2 -d:)
28 | [ "$(cat oci/blobs/sha256/$config | jq -r '.architecture')" = "arm64" ]
29 | [ "$(cat oci/blobs/sha256/$config | jq -r '.os')" = "darwin" ]
30 | }
31 |
32 | @test "multi-arch/os bad config fails" {
33 | cat > stacker.yaml <<"EOF"
34 | busybox:
35 | os:
36 | from:
37 | type: oci
38 | url: ${{BUSYBOX_OCI}}
39 | imports:
40 | - https://www.cisco.com/favicon.ico
41 | EOF
42 | bad_stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
43 | [ "$status" -eq 1 ]
44 | }
45 |
--------------------------------------------------------------------------------
/test/oci-import.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "oci input and output directories can be the same" {
12 | cat > stacker.yaml < stacker.yaml < stacker.yaml < stacker.yaml <<"EOF"
13 | test:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | run: |
18 | echo hello world
19 | EOF
20 |
21 | mkdir -p roots
22 | mount -t tmpfs -o size=1G tmpfs roots
23 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
24 | }
25 |
--------------------------------------------------------------------------------
/test/unprivileged.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "file with chmod 000 works" {
12 | cat > stacker.yaml <<"EOF"
13 | parent:
14 | from:
15 | type: oci
16 | url: ${{BUSYBOX_OCI}}
17 | run: |
18 | touch /etc/000
19 | chmod 000 /etc/000
20 | child:
21 | from:
22 | type: oci
23 | url: ${{BUSYBOX_OCI}}
24 | run: |
25 | echo "zomg" > /etc/000
26 | chmod 000 /etc/000
27 | EOF
28 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
29 | umoci unpack --image oci:parent parent
30 | [ -f parent/rootfs/etc/000 ]
31 | [ "$(stat --format="%a" parent/rootfs/etc/000)" = "0" ]
32 |
33 | umoci unpack --image oci:child child
34 | [ -f child/rootfs/etc/000 ]
35 | [ "$(stat --format="%a" child/rootfs/etc/000)" = "0" ]
36 | [ "$(cat child/rootfs/etc/000)" = "zomg" ]
37 | }
38 |
39 | @test "unprivileged stacker" {
40 | cat > stacker.yaml <<"EOF"
41 | busybox:
42 | from:
43 | type: oci
44 | url: ${{BUSYBOX_OCI}}
45 | imports:
46 | - https://www.cisco.com/favicon.ico
47 | run: |
48 | cp /stacker/imports/favicon.ico /favicon.ico
49 | layer1:
50 | from:
51 | type: built
52 | tag: busybox
53 | run:
54 | - rm /favicon.ico
55 | EOF
56 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
57 | umoci unpack --image oci:busybox busybox
58 | [ "$(sha .stacker/imports/busybox/favicon.ico)" == "$(sha busybox/rootfs/favicon.ico)" ]
59 | umoci unpack --image oci:layer1 layer1
60 | [ ! -f layer1/rootfs/favicon.ico ]
61 | }
62 |
63 | @test "unprivileged read-only imports can be re-cached" {
64 | require_privilege unpriv
65 |
66 | sudo -s -u $SUDO_USER <<"EOF"
67 | mkdir -p import
68 | touch import/this
69 | chmod -w import
70 | EOF
71 |
72 | cat > stacker.yaml <<"EOF"
73 | busybox:
74 | from:
75 | type: oci
76 | url: ${{BUSYBOX_OCI}}
77 | imports:
78 | - import
79 | EOF
80 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
81 | ls -al import import/*
82 | echo that | sudo -u $SUDO_USER tee import/this
83 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
84 | }
85 |
86 | @test "/stacker in unprivileged mode gets deleted" {
87 | require_privilege unpriv
88 |
89 | sudo -s -u $SUDO_USER <<"EOF"
90 | touch first
91 | touch second
92 | EOF
93 |
94 | cat > stacker.yaml <<"EOF"
95 | base:
96 | from:
97 | type: oci
98 | url: ${{BUSYBOX_OCI}}
99 | imports:
100 | - first
101 | - second
102 | run: |
103 | ls -alh /stacker/imports
104 | tar -C /stacker/imports -cv -f /base.tar.gz first second
105 | next:
106 | from:
107 | type: tar
108 | url: stacker://base/base.tar.gz
109 | EOF
110 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
111 |
112 | umoci unpack --image oci:base base
113 | [ ! -d base/rootfs/stacker ]
114 |
115 | umoci unpack --image oci:next next
116 | [ -f next/rootfs/first ]
117 | [ -f next/rootfs/second ]
118 | [ ! -d next/rootfs/stacker ]
119 | }
120 |
121 | @test "stacker switching privilege modes fails" {
122 | require_privilege unpriv
123 |
124 | cat > stacker.yaml <<"EOF"
125 | base:
126 | from:
127 | type: oci
128 | url: ${{BUSYBOX_OCI}}
129 | imports:
130 | - test
131 | run: cat /stacker/imports/test
132 | EOF
133 | echo unpriv | sudo -s -u $SUDO_USER tee test
134 | stacker build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
135 | echo priv > test
136 |
137 | # always run as privileged...
138 | run "${ROOT_DIR}/stacker" --debug build --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
139 | echo $output
140 | [ "$status" -ne 0 ]
141 | }
142 |
143 | @test "underlying layer output conversion happens in a user namespace" {
144 | cat > stacker.yaml <<"EOF"
145 | image:
146 | from:
147 | type: oci
148 | url: ${{BUSYBOX_OCI}}
149 | EOF
150 |
151 | stacker build --layer-type squashfs --substitute BUSYBOX_OCI=${BUSYBOX_OCI}
152 | manifest=$(cat oci/index.json | jq -r .manifests[0].digest | cut -f2 -d:)
153 | layer0=$(cat oci/blobs/sha256/$manifest | jq -r .layers[0].digest | cut -f2 -d:)
154 |
155 | mkdir layer0
156 | mount -t squashfs oci/blobs/sha256/$layer0 layer0
157 | echo "mount has uid $(stat --format "%u" layer0/bin/mount)"
158 | [ "$(stat --format "%u" layer0/bin/mount)" = "0" ]
159 | }
160 |
--------------------------------------------------------------------------------
/test/whiteout.bats:
--------------------------------------------------------------------------------
1 | load helpers
2 |
3 | function setup() {
4 | stacker_setup
5 | }
6 |
7 | function teardown() {
8 | cleanup
9 | }
10 |
11 | @test "test not adding extraneous whiteouts" {
12 | cat > stacker.yaml < stacker.yaml <&2; }
15 |
16 | fail() { [ $# -eq 0 ] || stderr "$@"; exit 1; }
17 |
18 | vr() {
19 | stderr "$" "$@"
20 | "$@" && return 0
21 | fail "FAIL[$?]: $*"
22 | }
23 |
24 | [ "$1" = "-h" ] || [ "$1" = "--help" ] && { Usage; exit 0; }
25 | [ $# -ge 2 ] || {
26 | Usage 1>&2;
27 | fail "Got $# args, expected 2 or more";
28 | }
29 |
30 | oci_d="$1"
31 | shift
32 |
33 | command -v skopeo >/dev/null 2>&1 ||
34 | fail "no 'skopeo' in PATH"
35 |
36 | mkdir -p "$oci_d" || fail "failed to create dir '$oci_d'"
37 | for url in "$@"; do
38 | name=${url##*/};
39 | vr skopeo copy --retry-times=3 "$url" "oci:${oci_d}:$name" ||
40 | fail "Failed to copy '$url' to 'oci:${oci_d}:$name'"
41 | done
42 | exit 0
43 |
--------------------------------------------------------------------------------