├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── bug_report.md.license │ ├── feature_request.md │ └── feature_request.md.license ├── PULL_REQUEST_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md.license ├── renovate.json5 └── workflows │ ├── backport.yml │ ├── ci.yml │ ├── commands.yml │ ├── promote.yaml │ └── tag.yaml ├── .gitignore ├── .gitmodules ├── .golangci.yml ├── CODEOWNERS ├── LICENSE ├── Makefile ├── OWNERS.md ├── README.md ├── catalog-info.yaml ├── cmd └── uptest │ └── main.go ├── design ├── design-doc-uptest-improvements-and-increasing-test-coverage.md └── one-pager-considerations-for-changing-test-framework-of-uptest.md ├── docs └── integrating-uptest-for-e2e-testing.md ├── go.mod ├── go.sum ├── go.sum.license ├── hack ├── check_endpoints.sh └── patch.sh ├── internal ├── config │ ├── builder.go │ └── config.go ├── prepare.go ├── templates │ ├── 00-apply.yaml.tmpl │ ├── 00-apply.yaml.tmpl.license │ ├── 01-update.yaml.tmpl │ ├── 01-update.yaml.tmpl.license │ ├── 02-import.yaml.tmpl │ ├── 02-import.yaml.tmpl.license │ ├── 03-delete.yaml.tmpl │ ├── 03-delete.yaml.tmpl.license │ ├── embed.go │ ├── renderer.go │ └── renderer_test.go ├── tester.go └── version │ └── version.go └── pkg └── runner.go /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Help us diagnose and fix bugs in Uptest 4 | labels: bug,needs:triage 5 | title: 6 | --- 7 | 13 | 14 | ### What happened? 15 | 19 | 20 | 21 | ### How can we reproduce it? 22 | 27 | 28 | ### What environment did it happen in? 29 | 30 | * Uptest Version: 31 | 32 | 42 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | 3 | SPDX-License-Identifier: CC0-1.0 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Help us make Uptest more useful 4 | labels: enhancement,needs:triage 5 | title: 6 | --- 7 | 13 | 14 | ### What problem are you facing? 15 | 16 | 21 | 22 | ### How could Uptest help solve your problem? 23 | 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | 3 | SPDX-License-Identifier: CC0-1.0 4 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 8 | 9 | ### Description of your changes 10 | 11 | 20 | Fixes # 21 | 22 | I have: 23 | 24 | - [ ] Run `make reviewable test` to ensure this PR is ready for review. 25 | 26 | ### How has this code been tested 27 | 28 | 33 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | 3 | SPDX-License-Identifier: CC0-1.0 4 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ], 6 | // The maximum number of PRs to be created in parallel 7 | "prConcurrentLimit": 5, 8 | // The branches renovate should target 9 | "baseBranches": ["main"], 10 | "ignorePaths": ["design/**"], 11 | "postUpdateOptions": ["gomodTidy"], 12 | // By default renovate will auto detect whether semantic commits have been used 13 | // in the recent history and comply with that, we explicitly disable it 14 | "semanticCommits": "disabled", 15 | // All PRs should have a label 16 | "labels": ["automated"], 17 | "regexManagers": [ 18 | { 19 | // We want a PR to bump Go versions used through env variables in any Github 20 | // Actions, taking it from the official Github repository. 21 | "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], 22 | "matchStrings": [ 23 | "GO_VERSION: '(?.*?)'\\n" 24 | ], 25 | "datasourceTemplate": "golang-version", 26 | "depNameTemplate": "golang" 27 | }, { 28 | // We want a PR to bump golangci-lint versions used through env variables in 29 | // any Github Actions, taking it from the official Github repository tags. 30 | "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], 31 | "matchStrings": [ 32 | "GOLANGCI_VERSION: '(?.*?)'\\n" 33 | ], 34 | "datasourceTemplate": "github-tags", 35 | "depNameTemplate": "golangci/golangci-lint" 36 | } 37 | ], 38 | // PackageRules disabled below should be enabled in case of vulnerabilities 39 | "vulnerabilityAlerts": { 40 | "enabled": true 41 | }, 42 | "packageRules": [ 43 | { 44 | // We need to ignore k8s.io/client-go older versions as they switched to 45 | // semantic version and old tags are still available in the repo. 46 | "matchDatasources": [ 47 | "go" 48 | ], 49 | "matchDepNames": [ 50 | "k8s.io/client-go" 51 | ], 52 | "allowedVersions": "<1.0" 53 | }, { 54 | // We want a single PR for all the patches bumps of kubernetes related 55 | // dependencies, as most of the times these are all strictly related. 56 | "matchDatasources": [ 57 | "go" 58 | ], 59 | "groupName": "kubernetes patches", 60 | "matchUpdateTypes": [ 61 | "patch", 62 | "digest" 63 | ], 64 | "matchPackagePrefixes": [ 65 | "k8s.io", 66 | "sigs.k8s.io" 67 | ] 68 | }, { 69 | // We want dedicated PRs for each minor and major bumps to kubernetes related 70 | // dependencies. 71 | "matchDatasources": [ 72 | "go" 73 | ], 74 | "matchUpdateTypes": [ 75 | "major", 76 | "minor" 77 | ], 78 | "matchPackagePrefixes": [ 79 | "k8s.io", 80 | "sigs.k8s.io" 81 | ] 82 | }, { 83 | // We want dedicated PRs for each bump to non-kubernetes Go dependencies, but 84 | // only if there are known vulnerabilities in the current version. 85 | "matchDatasources": [ 86 | "go" 87 | ], 88 | "matchPackagePatterns": [ 89 | "*" 90 | ], 91 | "enabled": false, 92 | "excludePackagePrefixes": [ 93 | "k8s.io", 94 | "sigs.k8s.io" 95 | ], 96 | "matchUpdateTypes": [ 97 | "major" 98 | ] 99 | }, { 100 | // We want a single PR for all minor and patch bumps to non-kubernetes Go 101 | // dependencies, but only if there are known vulnerabilities in the current 102 | // version. 103 | "matchDatasources": [ 104 | "go" 105 | ], 106 | "matchPackagePatterns": [ 107 | "*" 108 | ], 109 | "enabled": false, 110 | "excludePackagePrefixes": [ 111 | "k8s.io", 112 | "sigs.k8s.io" 113 | ], 114 | "matchUpdateTypes": [ 115 | "minor", 116 | "patch", 117 | "digest" 118 | ], 119 | "groupName": "all non-major go dependencies" 120 | }, { 121 | // We want a single PR for all minor and patch bumps of Github Actions 122 | "matchDepTypes": [ 123 | "action" 124 | ], 125 | "matchUpdateTypes": [ 126 | "minor", 127 | "patch" 128 | ], 129 | "groupName": "all non-major github action", 130 | "pinDigests": true 131 | },{ 132 | // We want dedicated PRs for each major bump to Github Actions 133 | "matchDepTypes": [ 134 | "action" 135 | ], 136 | "pinDigests": true 137 | } 138 | ] 139 | } -------------------------------------------------------------------------------- /.github/workflows/backport.yml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | name: Backport 6 | 7 | on: 8 | # NOTE(negz): This is a risky target, but we run this action only when and if 9 | # a PR is closed, then filter down to specifically merged PRs. We also don't 10 | # invoke any scripts, etc from within the repo. I believe the fact that we'll 11 | # be able to review PRs before this runs makes this fairly safe. 12 | # https://securitylab.github.com/research/github-actions-preventing-pwn-requests/ 13 | pull_request_target: 14 | types: [closed] 15 | # See also commands.yml for the /backport triggered variant of this workflow. 16 | 17 | jobs: 18 | # NOTE(negz): I tested many backport GitHub actions before landing on this 19 | # one. Many do not support merge commits, or do not support pull requests with 20 | # more than one commit. This one does. It also handily links backport PRs with 21 | # new PRs, and provides commentary and instructions when it can't backport. 22 | # The main gotchas with this action are that it _only_ supports merge commits, 23 | # and that PRs _must_ be labelled before they're merged to trigger a backport. 24 | open-pr: 25 | runs-on: ubuntu-22.04 26 | if: github.event.pull_request.merged 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4 30 | with: 31 | fetch-depth: 0 32 | 33 | - name: Open Backport PR 34 | uses: zeebe-io/backport-action@2ee900dc92632adf994f8e437b6d16840fd61f58 # v0.0.9 35 | with: 36 | github_token: ${{ secrets.GITHUB_TOKEN }} 37 | github_workspace: ${{ github.workspace }} 38 | version: v0.0.8 39 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | name: CI 6 | 7 | on: 8 | push: 9 | branches: 10 | - main 11 | - release-* 12 | pull_request: {} 13 | workflow_dispatch: {} 14 | 15 | env: 16 | # Common versions 17 | GO_VERSION: '1.23' 18 | GOLANGCI_VERSION: 'v1.61.0' 19 | DOCKER_BUILDX_VERSION: 'v0.8.2' 20 | 21 | # Common users. We can't run a step 'if secrets.XXX != ""' but we can run a 22 | # step 'if env.XXX' != ""', so we copy these to succinctly test whether 23 | # credentials have been provided before trying to run steps that need them. 24 | UPBOUND_MARKETPLACE_PUSH_ROBOT_USR: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} 25 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 26 | jobs: 27 | detect-noop: 28 | runs-on: ubuntu-22.04 29 | outputs: 30 | noop: ${{ steps.noop.outputs.should_skip }} 31 | steps: 32 | - name: Detect No-op Changes 33 | id: noop 34 | uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0 35 | with: 36 | github_token: ${{ secrets.GITHUB_TOKEN }} 37 | paths_ignore: '["**.md", "**.png", "**.jpg"]' 38 | do_not_skip: '["workflow_dispatch", "schedule", "push"]' 39 | 40 | lint: 41 | runs-on: ubuntu-22.04 42 | needs: detect-noop 43 | if: needs.detect-noop.outputs.noop != 'true' 44 | 45 | steps: 46 | - name: Checkout 47 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4 48 | with: 49 | submodules: true 50 | 51 | - name: Setup Go 52 | uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3 53 | with: 54 | go-version: ${{ env.GO_VERSION }} 55 | 56 | - name: Find the Go Build Cache 57 | id: go 58 | run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT 59 | 60 | - name: Cache the Go Build Cache 61 | uses: actions/cache@v4 62 | with: 63 | path: ${{ steps.go.outputs.cache }} 64 | key: ${{ runner.os }}-build-lint-${{ hashFiles('**/go.sum') }} 65 | restore-keys: ${{ runner.os }}-build-lint- 66 | 67 | - name: Download Go Modules 68 | run: make modules.download modules.check 69 | 70 | # We could run 'make lint' but we prefer this action because it leaves 71 | # 'annotations' (i.e. it comments on PRs to point out linter violations). 72 | - name: Lint 73 | uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3 74 | with: 75 | version: ${{ env.GOLANGCI_VERSION }} 76 | 77 | check-diff: 78 | runs-on: ubuntu-22.04 79 | needs: detect-noop 80 | if: needs.detect-noop.outputs.noop != 'true' 81 | 82 | steps: 83 | - name: Checkout 84 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4 85 | with: 86 | submodules: true 87 | 88 | - name: Setup Go 89 | uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3 90 | with: 91 | go-version: ${{ env.GO_VERSION }} 92 | 93 | - name: Install goimports 94 | run: | 95 | cd /tmp 96 | go install golang.org/x/tools/cmd/goimports@v0.1.12 97 | 98 | - name: Find the Go Build Cache 99 | id: go 100 | run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT 101 | 102 | - name: Cache the Go Build Cache 103 | uses: actions/cache@v4 104 | with: 105 | path: ${{ steps.go.outputs.cache }} 106 | key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} 107 | restore-keys: ${{ runner.os }}-build-check-diff- 108 | 109 | - name: Download Go Modules 110 | run: make modules.download modules.check 111 | 112 | - name: Check Diff 113 | id: check-diff 114 | run: | 115 | make check-diff 116 | 117 | - name: Show diff 118 | if: failure() && steps.check-diff.outcome == 'failure' 119 | run: git diff 120 | 121 | unit-tests: 122 | runs-on: ubuntu-22.04 123 | needs: detect-noop 124 | if: needs.detect-noop.outputs.noop != 'true' 125 | 126 | steps: 127 | - name: Checkout 128 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4 129 | with: 130 | submodules: true 131 | 132 | - name: Fetch History 133 | run: git fetch --prune --unshallow 134 | 135 | - name: Setup Go 136 | uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3 137 | with: 138 | go-version: ${{ env.GO_VERSION }} 139 | 140 | - name: Find the Go Build Cache 141 | id: go 142 | run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT 143 | 144 | - name: Cache the Go Build Cache 145 | uses: actions/cache@v4 146 | with: 147 | path: ${{ steps.go.outputs.cache }} 148 | key: ${{ runner.os }}-build-unit-tests-${{ hashFiles('**/go.sum') }} 149 | restore-keys: ${{ runner.os }}-build-unit-tests- 150 | 151 | - name: Download Go Modules 152 | run: make modules.download modules.check 153 | 154 | - name: Run Unit Tests 155 | run: make -j2 test 156 | 157 | - name: Publish Unit Test Coverage 158 | uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d # v3 159 | with: 160 | flags: unittests 161 | file: _output/tests/linux_amd64/coverage.txt 162 | 163 | publish-artifacts: 164 | runs-on: ubuntu-22.04 165 | needs: detect-noop 166 | if: needs.detect-noop.outputs.noop != 'true' 167 | 168 | steps: 169 | - name: Setup QEMU 170 | uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2 171 | with: 172 | platforms: all 173 | 174 | - name: Setup Docker Buildx 175 | uses: docker/setup-buildx-action@885d1462b80bc1c1c7f0b00334ad271f09369c55 # v2 176 | with: 177 | version: ${{ env.DOCKER_BUILDX_VERSION }} 178 | install: true 179 | 180 | - name: Checkout 181 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4 182 | with: 183 | submodules: true 184 | 185 | - name: Fetch History 186 | run: git fetch --prune --unshallow 187 | 188 | - name: Setup Go 189 | uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3 190 | with: 191 | go-version: ${{ env.GO_VERSION }} 192 | 193 | - name: Find the Go Build Cache 194 | id: go 195 | run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT 196 | 197 | - name: Cache the Go Build Cache 198 | uses: actions/cache@v4 199 | with: 200 | path: ${{ steps.go.outputs.cache }} 201 | key: ${{ runner.os }}-build-publish-artifacts-${{ hashFiles('**/go.sum') }} 202 | restore-keys: ${{ runner.os }}-build-publish-artifacts- 203 | 204 | - name: Download Go Modules 205 | run: make modules.download modules.check 206 | 207 | - name: Build Artifacts 208 | run: make -j2 build.all 209 | env: 210 | # We're using docker buildx, which doesn't actually load the images it 211 | # builds by default. Specifying --load does so. 212 | BUILD_ARGS: "--load" 213 | 214 | - name: Publish Artifacts to GitHub 215 | uses: actions/upload-artifact@v4 216 | with: 217 | name: output 218 | path: _output/** 219 | 220 | - name: Publish Artifacts to S3 221 | run: make -j2 publish BRANCH_NAME=${GITHUB_REF##*/} 222 | if: env.AWS_ACCESS_KEY_ID != '' 223 | env: 224 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 225 | AWS_DEFAULT_REGION: us-east-1 226 | 227 | - name: Promote Artifacts in S3 228 | if: github.ref == 'refs/heads/main' && env.AWS_ACCESS_KEY_ID != '' 229 | run: make -j2 promote 230 | env: 231 | BRANCH_NAME: main 232 | CHANNEL: main 233 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 234 | AWS_DEFAULT_REGION: us-east-1 235 | -------------------------------------------------------------------------------- /.github/workflows/commands.yml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | name: Comment Commands 6 | 7 | on: issue_comment 8 | 9 | jobs: 10 | backport: 11 | runs-on: ubuntu-22.04 12 | if: github.event.issue.pull_request && startsWith(github.event.comment.body, '/backport') 13 | steps: 14 | - name: Extract Command 15 | id: command 16 | uses: xt0rted/slash-command-action@865ee04a1dfc8aa2571513eee8e84b5377153511 # v1 17 | with: 18 | repo-token: ${{ secrets.GITHUB_TOKEN }} 19 | command: backport 20 | reaction: "true" 21 | reaction-type: "eyes" 22 | allow-edits: "false" 23 | permission-level: write 24 | 25 | - name: Checkout 26 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4 27 | with: 28 | fetch-depth: 0 29 | 30 | - name: Open Backport PR 31 | uses: zeebe-io/backport-action@2ee900dc92632adf994f8e437b6d16840fd61f58 # v0.0.9 32 | with: 33 | github_token: ${{ secrets.GITHUB_TOKEN }} 34 | github_workspace: ${{ github.workspace }} 35 | version: v0.0.4 36 | -------------------------------------------------------------------------------- /.github/workflows/promote.yaml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | name: Promote 6 | 7 | on: 8 | workflow_dispatch: 9 | inputs: 10 | version: 11 | description: 'Release version (e.g. v0.1.0)' 12 | required: true 13 | channel: 14 | description: 'Release channel' 15 | required: true 16 | default: 'alpha' 17 | 18 | env: 19 | # Common versions 20 | GO_VERSION: '1.23' 21 | 22 | # Common users. We can't run a step 'if secrets.XXX != ""' but we can run 23 | # a step 'if env.XXX' != ""', so we copy these to succinctly test whether 24 | # credentials have been provided before trying to run steps that need them. 25 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 26 | 27 | jobs: 28 | promote-artifacts: 29 | runs-on: ubuntu-22.04 30 | 31 | steps: 32 | - name: Checkout 33 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4 34 | with: 35 | submodules: true 36 | 37 | - name: Setup Go 38 | uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3 39 | with: 40 | go-version: ${{ env.GO_VERSION }} 41 | 42 | - name: Fetch History 43 | run: git fetch --prune --unshallow 44 | 45 | - name: Promote Artifacts in S3 46 | if: env.AWS_ACCESS_KEY_ID != '' 47 | run: make -j2 promote BRANCH_NAME=${GITHUB_REF##*/} 48 | env: 49 | VERSION: ${{ github.event.inputs.version }} 50 | CHANNEL: ${{ github.event.inputs.channel }} 51 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 52 | AWS_DEFAULT_REGION: us-east-1 53 | -------------------------------------------------------------------------------- /.github/workflows/tag.yaml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | name: Tag 6 | 7 | on: 8 | workflow_dispatch: 9 | inputs: 10 | version: 11 | description: 'Release version (e.g. v0.1.0)' 12 | required: true 13 | message: 14 | description: 'Tag message' 15 | required: true 16 | 17 | jobs: 18 | create-tag: 19 | runs-on: ubuntu-22.04 20 | 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4 24 | 25 | - name: Create Tag 26 | uses: negz/create-tag@39bae1e0932567a58c20dea5a1a0d18358503320 # v1 27 | with: 28 | version: ${{ github.event.inputs.version }} 29 | message: ${{ github.event.inputs.message }} 30 | token: ${{ secrets.GITHUB_TOKEN }} 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | /.cache 6 | /.work 7 | /_output 8 | cover.out 9 | /vendor 10 | /.vendor-new 11 | .DS_Store 12 | 13 | # ignore IDE folders 14 | .vscode/ 15 | .idea/ 16 | 17 | # packages 18 | *.xpkg -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | [submodule "build"] 6 | path = build 7 | url = https://github.com/crossplane/build 8 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | run: 6 | deadline: 10m 7 | 8 | output: 9 | # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" 10 | formats: colored-line-number 11 | 12 | linters-settings: 13 | errcheck: 14 | # report about not checking of errors in type assetions: `a := b.(MyStruct)`; 15 | # default is false: such cases aren't reported by default. 16 | check-type-assertions: false 17 | 18 | # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; 19 | # default is false: such cases aren't reported by default. 20 | check-blank: false 21 | 22 | # [deprecated] comma-separated list of pairs of the form pkg:regex 23 | # the regex is used to ignore names within pkg. (default "fmt:.*"). 24 | # see https://github.com/kisielk/errcheck#the-deprecated-method for details 25 | exclude-functions: fmt:.*,io/ioutil:^Read.* 26 | 27 | govet: 28 | # report about shadowed variables 29 | check-shadowing: false 30 | 31 | gofmt: 32 | # simplify code: gofmt with `-s` option, true by default 33 | simplify: true 34 | 35 | goimports: 36 | # put imports beginning with prefix after 3rd-party packages; 37 | # it's a comma-separated list of prefixes 38 | local-prefixes: github.com/crossplane/uptest 39 | 40 | gocyclo: 41 | # minimal code complexity to report, 30 by default (but we recommend 10-20) 42 | min-complexity: 10 43 | 44 | maligned: 45 | # print struct with more effective memory layout or not, false by default 46 | suggest-new: true 47 | 48 | dupl: 49 | # tokens count to trigger issue, 150 by default 50 | threshold: 100 51 | 52 | goconst: 53 | # minimal length of string constant, 3 by default 54 | min-len: 3 55 | # minimal occurrences count to trigger, 3 by default 56 | min-occurrences: 5 57 | 58 | lll: 59 | # tab width in spaces. Default to 1. 60 | tab-width: 1 61 | 62 | unused: 63 | # treat code as a program (not a library) and report unused exported identifiers; default is false. 64 | # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: 65 | # if it's called for subdir of a project it can't find funcs usages. All text editor integrations 66 | # with golangci-lint call it on a directory with the changed file. 67 | check-exported: false 68 | 69 | unparam: 70 | # Inspect exported functions, default is false. Set to true if no external program/library imports your code. 71 | # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: 72 | # if it's called for subdir of a project it can't find external interfaces. All text editor integrations 73 | # with golangci-lint call it on a directory with the changed file. 74 | check-exported: false 75 | 76 | nakedret: 77 | # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 78 | max-func-lines: 30 79 | 80 | prealloc: 81 | # XXX: we don't recommend using this linter before doing performance profiling. 82 | # For most programs usage of prealloc will be a premature optimization. 83 | 84 | # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. 85 | # True by default. 86 | simple: true 87 | range-loops: true # Report preallocation suggestions on range loops, true by default 88 | for-loops: false # Report preallocation suggestions on for loops, false by default 89 | 90 | gocritic: 91 | # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks. 92 | # Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags". 93 | enabled-tags: 94 | - performance 95 | 96 | settings: # settings passed to gocritic 97 | captLocal: # must be valid enabled check name 98 | paramsOnly: true 99 | rangeValCopy: 100 | sizeThreshold: 32 101 | 102 | nolintlint: 103 | require-explanation: true 104 | require-specific: true 105 | 106 | linters: 107 | enable: 108 | - gosimple 109 | - staticcheck 110 | - unused 111 | - govet 112 | - gocyclo 113 | - gocritic 114 | - goconst 115 | - goimports 116 | - gofmt # We enable this as well as goimports for its simplify mode. 117 | - prealloc 118 | - revive 119 | - unconvert 120 | - misspell 121 | - nakedret 122 | - nolintlint 123 | 124 | presets: 125 | - bugs 126 | - unused 127 | fast: false 128 | 129 | 130 | issues: 131 | # Excluding files 132 | exclude-files: 133 | - "zz_\\..+\\.go$" 134 | 135 | # Excluding configuration per-path and per-linter 136 | exclude-rules: 137 | # Exclude some linters from running on tests files. 138 | - path: _test(ing)?\.go 139 | linters: 140 | - gocyclo 141 | - errcheck 142 | - dupl 143 | - gosec 144 | - scopelint 145 | - unparam 146 | 147 | # Ease some gocritic warnings on test files. 148 | - path: _test\.go 149 | text: "(unnamedResult|exitAfterDefer)" 150 | linters: 151 | - gocritic 152 | 153 | # These are performance optimisations rather than style issues per se. 154 | # They warn when function arguments or range values copy a lot of memory 155 | # rather than using a pointer. 156 | - text: "(hugeParam|rangeValCopy):" 157 | linters: 158 | - gocritic 159 | 160 | # This "TestMain should call os.Exit to set exit code" warning is not clever 161 | # enough to notice that we call a helper method that calls os.Exit. 162 | - text: "SA3000:" 163 | linters: 164 | - staticcheck 165 | 166 | - text: "k8s.io/api/core/v1" 167 | linters: 168 | - goimports 169 | 170 | # This is a "potential hardcoded credentials" warning. It's triggered by 171 | # any variable with 'secret' in the same, and thus hits a lot of false 172 | # positives in Kubernetes land where a Secret is an object type. 173 | - text: "G101:" 174 | linters: 175 | - gosec 176 | - gas 177 | 178 | # This is an 'errors unhandled' warning that duplicates errcheck. 179 | - text: "G104:" 180 | linters: 181 | - gosec 182 | - gas 183 | 184 | # Independently from option `exclude` we use default exclude patterns, 185 | # it can be disabled by this option. To list all 186 | # excluded by default patterns execute `golangci-lint run --help`. 187 | # Default value for this option is true. 188 | exclude-use-default: false 189 | 190 | # Show only new issues: if there are unstaged changes or untracked files, 191 | # only those changes are analyzed, else only changes in HEAD~ are analyzed. 192 | # It's a super-useful option for integration of golangci-lint into existing 193 | # large codebase. It's not practical to fix all existing issues at the moment 194 | # of integration: much better don't allow issues in new code. 195 | # Default is false. 196 | new: false 197 | 198 | # Maximum issues count per one linter. Set to 0 to disable. Default is 50. 199 | max-per-linter: 0 200 | 201 | # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. 202 | max-same-issues: 0 203 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | # This file controls automatic PR reviewer assignment. See the following docs: 6 | # 7 | # * https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners 8 | # * https://docs.github.com/en/organizations/organizing-members-into-teams/managing-code-review-settings-for-your-team 9 | # 10 | # The goal of this file is for most PRs to automatically and fairly have one 11 | # maintainer set as PR reviewers. All maintainers have permission to approve 12 | # and merge PRs. All PRs must be approved by at least one maintainer before being merged. 13 | # 14 | # Where possible, prefer explicitly specifying a maintainer who is a subject 15 | # matter expert for a particular part of the codebase rather than using the 16 | # @upbound/team-extensions group. 17 | # 18 | # See also OWNERS.md for governance details 19 | 20 | # Fallback owners 21 | * @ulucinar @sergenyalcin 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. 10 | 11 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. 12 | 13 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 14 | 15 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. 16 | 17 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. 18 | 19 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. 20 | 21 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). 22 | 23 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. 24 | 25 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." 26 | 27 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 28 | 29 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 30 | 31 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 32 | 33 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: 34 | 35 | (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and 36 | 37 | (b) You must cause any modified files to carry prominent notices stating that You changed the files; and 38 | 39 | (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and 40 | 41 | (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. 42 | 43 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 44 | 45 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 46 | 47 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 48 | 49 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 50 | 51 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 52 | 53 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. 54 | 55 | END OF TERMS AND CONDITIONS 56 | 57 | APPENDIX: How to apply the Apache License to your work. 58 | 59 | To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. 60 | 61 | Copyright [yyyy] [name of copyright owner] 62 | 63 | Licensed under the Apache License, Version 2.0 (the "License"); 64 | you may not use this file except in compliance with the License. 65 | You may obtain a copy of the License at 66 | 67 | 68 | 69 | Unless required by applicable law or agreed to in writing, software 70 | distributed under the License is distributed on an "AS IS" BASIS, 71 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 72 | See the License for the specific language governing permissions and 73 | limitations under the License. 74 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2023 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | # Project Setup 6 | PROJECT_NAME := uptest 7 | PROJECT_REPO := github.com/crossplane/$(PROJECT_NAME) 8 | 9 | PLATFORMS ?= linux_amd64 linux_arm64 darwin_amd64 darwin_arm64 10 | 11 | # -include will silently skip missing files, which allows us 12 | # to load those files with a target in the Makefile. If only 13 | # "include" was used, the make command would fail and refuse 14 | # to run a target until the include commands succeeded. 15 | -include build/makelib/common.mk 16 | 17 | # ==================================================================================== 18 | # Setup Output 19 | S3_BUCKET ?= crossplane.uptest.releases 20 | -include build/makelib/output.mk 21 | 22 | # ==================================================================================== 23 | # Setup Go 24 | GO_REQUIRED_VERSION = 1.21 25 | # GOLANGCILINT_VERSION is inherited from build submodule by default. 26 | # Uncomment below if you need to override the version. 27 | GOLANGCILINT_VERSION ?= 1.61.0 28 | 29 | GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/uptest 30 | GO_LDFLAGS += -X $(GO_PROJECT)/internal/version.Version=$(VERSION) 31 | GO_SUBDIRS += cmd internal 32 | GO111MODULE = on 33 | -include build/makelib/golang.mk 34 | 35 | # ==================================================================================== 36 | # Targets 37 | 38 | # run `make help` to see the targets and options 39 | 40 | # We want submodules to be set up the first time `make` is run. 41 | # We manage the build/ folder and its Makefiles as a submodule. 42 | # The first time `make` is run, the includes of build/*.mk files will 43 | # all fail, and this target will be run. The next time, the default as defined 44 | # by the includes will be run instead. 45 | fallthrough: submodules 46 | @echo Initial setup complete. Running make again . . . 47 | @make 48 | 49 | # Update the submodules, such as the common build scripts. 50 | submodules: 51 | @git submodule sync 52 | @git submodule update --init --recursive 53 | 54 | .PHONY: submodules fallthrough 55 | 56 | -include build/makelib/k8s_tools.mk 57 | -include build/makelib/controlplane.mk 58 | 59 | uptest: 60 | @echo "Running uptest" 61 | @printenv 62 | 63 | # NOTE(hasheddan): the build submodule currently overrides XDG_CACHE_HOME in 64 | # order to force the Helm 3 to use the .work/helm directory. This causes Go on 65 | # Linux machines to use that directory as the build cache as well. We should 66 | # adjust this behavior in the build submodule because it is also causing Linux 67 | # users to duplicate their build cache, but for now we just make it easier to 68 | # identify its location in CI so that we cache between builds. 69 | go.cachedir: 70 | @go env GOCACHE 71 | 72 | go.mod.cachedir: 73 | @go env GOMODCACHE 74 | -------------------------------------------------------------------------------- /OWNERS.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # OWNERS 8 | 9 | This page lists all maintainers for **this** repository. Each repository in the 10 | [Crossplane organization](https://github.com/crossplane/) will list their 11 | repository maintainers in their own `OWNERS.md` file. 12 | 13 | ## Maintainers 14 | 15 | * Alper Ulucinar ([ulucinar](https://github.com/ulucinar)) 16 | * Sergen Yalcin ([sergenyalcin](https://github.com/sergenyalcin)) 17 | 18 | See [CODEOWNERS](./CODEOWNERS) for automatic PR assignment. 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # UPTEST 2 | 3 | _Note:_ The `uptest` tool used to live in the https://github.com/upbound/uptest 4 | repository. We have moved it to this repository 5 | (https://github.com/crossplane/uptest) and kept the old repository instead of 6 | renaming it because the provider reusable CI workflows and the other CI tooling 7 | such as `crddiff` or `updoc` still reside in there. 8 | 9 | The end to end integration testing tool for Crossplane providers and configurations. 10 | 11 | Uptest comes as a binary which can be installed from the releases section. It runs end-to-end tests 12 | by applying the provided examples and waiting for the expected conditions. Other than that, it enables templating to 13 | insert dynamic values into the examples and supports running scripts as hooks just before and right after applying 14 | the examples. 15 | 16 | ## Usage 17 | 18 | ```shell 19 | $ uptest e2e --help 20 | usage: uptest e2e [] [] 21 | 22 | Run e2e tests for manifests by applying them to a control plane and waiting until a given condition is met. 23 | 24 | Flags: 25 | --help Show context-sensitive help (also try --help-long and --help-man). 26 | --data-source="" File path of data source that will be used for injection some values. 27 | --setup-script="" Script that will be executed before running tests. 28 | --teardown-script="" Script that will be executed after running tests. 29 | --default-timeout=1200 Default timeout in seconds for the test. Timeout could be overridden per resource using 30 | "uptest.upbound.io/timeout" annotation. 31 | --default-conditions="Ready" Comma seperated list of default conditions to wait for a successful test. Conditions could be 32 | overridden per resource using "uptest.upbound.io/conditions" annotation. 33 | 34 | Args: 35 | [] List of manifests. Value of this option will be used to trigger/configure the tests.The possible usage: 36 | 'provider-aws/examples/s3/bucket.yaml,provider-gcp/examples/storage/bucket.yaml': The comma separated resources 37 | are used as test inputs. If this option is not set, 'MANIFEST_LIST' env var is used as default. 38 | ``` 39 | 40 | Uptest expects a running control-plane (a.k.a. k8s + crossplane) where required providers are running and/or required 41 | configuration were applied. 42 | 43 | Example run: 44 | 45 | ```shell 46 | uptest e2e examples/user.yaml,examples/bucket.yaml --setup-script="test/hooks/setup.sh" 47 | ``` 48 | 49 | ### Injecting Dynamic Values (and Datasource) 50 | 51 | Uptest supports injecting dynamic values into the examples by using a data source. The data source is a yaml file 52 | storing key-value pairs. The values can be used in the examples by using the following syntax: 53 | 54 | ``` 55 | ${data.key} 56 | ``` 57 | 58 | Example data source file content: 59 | 60 | ```yaml 61 | aws_account_id: 123456789012 62 | aws_region: us-east-1 63 | ``` 64 | 65 | Example manifest: 66 | 67 | ```yaml 68 | apiVersion: athena.aws.upbound.io/v1beta1 69 | kind: DataCatalog 70 | metadata: 71 | labels: 72 | testing.upbound.io/example-name: example 73 | name: example 74 | spec: 75 | forProvider: 76 | description: Example Athena data catalog 77 | parameters: 78 | function: arn:aws:lambda:${data.aws_region}:${data.aws_account_id}:function:upbound-example-function 79 | region: us-west-1 80 | tags: 81 | Name: example-athena-data-catalog 82 | type: LAMBDA 83 | ``` 84 | 85 | Uptest also supports generating random strings as follows: 86 | 87 | ``` 88 | ${Rand.RFC1123Subdomain} 89 | ``` 90 | 91 | Example Manifest: 92 | 93 | ```yaml 94 | apiVersion: s3.aws.upbound.io/v1beta1 95 | kind: Bucket 96 | metadata: 97 | name: ${Rand.RFC1123Subdomain} 98 | labels: 99 | testing.upbound.io/example-name: s3 100 | spec: 101 | forProvider: 102 | region: us-west-1 103 | objectLockEnabled: true 104 | tags: 105 | Name: SampleBucket 106 | ``` 107 | 108 | ### Hooks 109 | 110 | There are 6 types of hooks that can be used to customize the test flow: 111 | 112 | 1. `setup-script`: This hook will be executed before running the tests case. It is useful to set up the control plane 113 | before running the tests. For example, you can use it to create a provider config and your cloud credentials. This 114 | can be configured via `--setup-script` flag as a relative path to where uptest is executed. 115 | 2. `teardown-script`: This hook will be executed after running the tests case. This can be configured via 116 | `--teardown-script` flag as a relative path to where uptest is executed. 117 | 3. `pre-assert-hook`: This hook will be executed before running the assertions and after applying a specific manifest. 118 | This can be configured via `uptest.upbound.io/pre-assert-hook` annotation on the manifest as a relative path to the 119 | manifest file. 120 | 4. `post-assert-hook`: This hook will be executed after running the assertions. This can be configured via 121 | `uptest.upbound.io/post-assert-hook` annotation on the manifest as a relative path to the manifest file. 122 | 5. `pre-delete-hook`: This hook will be executed just before deleting the resource. This can be configured via 123 | `uptest.upbound.io/pre-delete-hook` annotation on the manifest as a relative path to the manifest file. 124 | 6. `post-delete-hook`: This hook will be executed right after the resource is deleted. This can be configured via 125 | `uptest.upbound.io/post-delete-hook` annotation on the manifest as a relative path to the manifest file. 126 | 127 | > All hooks need to be executables, please make sure to set the executable bit on your scripts, e.g. with `chmod +x`. 128 | 129 | ### Troubleshooting 130 | 131 | Uptest uses [Chainsaw](https://github.com/kyverno/chainsaw) under the hood and generates a `chainsaw` test cases based on the provided input. 132 | You can render and inspect the generated chainsaws test cases by using uptest 133 | `--render-only` flag and checking the output directory. For example: 134 | 135 | ```shell 136 | uptest e2e examples/kcl/network-xr.yaml --setup-script=test/setup.sh --render-only 137 | 138 | 2024/11/01 22:20:46 Skipping update step because the root resource does not exist 139 | 2024/11/01 22:20:46 Written test files: /var/folders/sx/0tlfb9ys20bbqnszv3lw12m40000gn/T/uptest-e2e 140 | 141 | ls -1 /var/folders/sx/0tlfb9ys20bbqnszv3lw12m40000gn/T/uptest-e2e/case/ 142 | 00-apply.yaml 143 | 02-import.yaml 144 | 03-delete.yaml 145 | test-input.yaml 146 | ``` 147 | 148 | ## Report a Bug 149 | 150 | For filing bugs, suggesting improvements, or requesting new features, please 151 | open an [issue](https://github.com/crossplane/uptest/issues). 152 | 153 | ## Licensing 154 | 155 | Uptest is under the Apache 2.0 license. 156 | -------------------------------------------------------------------------------- /catalog-info.yaml: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | # 3 | # SPDX-License-Identifier: CC0-1.0 4 | 5 | apiVersion: backstage.io/v1alpha1 6 | kind: Component 7 | metadata: 8 | name: uptest 9 | description: "End to end integration testing for Crossplane Providers and Configurations" 10 | links: 11 | - url: https://github.com/crossplane/uptest/blob/main/README.md 12 | title: Uptest readme 13 | annotations: 14 | github.com/project-slug: crossplane/uptest 15 | spec: 16 | type: service 17 | lifecycle: production 18 | owner: team-extensions 19 | -------------------------------------------------------------------------------- /cmd/uptest/main.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | // main package for the uptest tooling. 6 | package main 7 | 8 | import ( 9 | "os" 10 | "path/filepath" 11 | "strings" 12 | 13 | "gopkg.in/alecthomas/kingpin.v2" 14 | 15 | "github.com/crossplane/uptest/pkg" 16 | ) 17 | 18 | var ( 19 | app = kingpin.New("uptest", "Automated Test Tool for Upbound Official Providers").DefaultEnvars() 20 | // e2e command (single command is preserved for backward compatibility) 21 | // and we may have further commands in the future. 22 | e2e = app.Command("e2e", "Run e2e tests for manifests by applying them to a control plane and waiting until a given condition is met.") 23 | ) 24 | 25 | var ( 26 | manifestList = e2e.Arg("manifest-list", "List of manifests. Value of this option will be used to trigger/configure the tests."+ 27 | "The possible usage:\n"+ 28 | "'provider-aws/examples/s3/bucket.yaml,provider-gcp/examples/storage/bucket.yaml': "+ 29 | "The comma separated resources are used as test inputs.\n"+ 30 | "If this option is not set, 'MANIFEST_LIST' env var is used as default.").Envar("MANIFEST_LIST").String() 31 | dataSourcePath = e2e.Flag("data-source", "File path of data source that will be used for injection some values.").Envar("UPTEST_DATASOURCE_PATH").Default("").String() 32 | setupScript = e2e.Flag("setup-script", "Script that will be executed before running tests.").Default("").String() 33 | teardownScript = e2e.Flag("teardown-script", "Script that will be executed after running tests.").Default("").String() 34 | 35 | defaultTimeout = e2e.Flag("default-timeout", "Default timeout in seconds for the test.\n"+ 36 | "Timeout could be overridden per resource using \"uptest.upbound.io/timeout\" annotation.").Default("1200s").Duration() 37 | defaultConditions = e2e.Flag("default-conditions", "Comma separated list of default conditions to wait for a successful test.\n"+ 38 | "Conditions could be overridden per resource using \"uptest.upbound.io/conditions\" annotation.").Default("Ready").String() 39 | 40 | skipDelete = e2e.Flag("skip-delete", "Skip the delete step of the test.").Default("false").Bool() 41 | testDir = e2e.Flag("test-directory", "Directory where chainsaw test case will be generated and executed.").Envar("UPTEST_TEST_DIR").Default(filepath.Join(os.TempDir(), "uptest-e2e")).String() 42 | onlyCleanUptestResources = e2e.Flag("only-clean-uptest-resources", "While deletion step, only clean resources that were created by uptest").Default("false").Bool() 43 | 44 | renderOnly = e2e.Flag("render-only", "Only render test files. Do not run the tests.").Default("false").Bool() 45 | logCollectInterval = e2e.Flag("log-collect-interval", "Specifies the interval duration for collecting logs. "+ 46 | "The duration should be provided in a format understood by the tool, such as seconds (s), minutes (m), or hours (h). For example, '30s' for 30 seconds, '5m' for 5 minutes, or '1h' for one hour.").Default("30s").Duration() 47 | skipUpdate = e2e.Flag("skip-update", "Skip the update step of the test.").Default("false").Bool() 48 | skipImport = e2e.Flag("skip-import", "Skip the import step of the test.").Default("false").Bool() 49 | ) 50 | 51 | func main() { 52 | if kingpin.MustParse(app.Parse(os.Args[1:])) == e2e.FullCommand() { 53 | e2eTests() 54 | } 55 | } 56 | 57 | func e2eTests() { 58 | cd, err := os.Getwd() 59 | if err != nil { 60 | kingpin.FatalIfError(err, "cannot get current directory") 61 | } 62 | 63 | list := strings.Split(*manifestList, ",") 64 | examplePaths := make([]string, 0, len(list)) 65 | for _, e := range list { 66 | if e == "" { 67 | continue 68 | } 69 | examplePaths = append(examplePaths, filepath.Join(cd, filepath.Clean(e))) 70 | } 71 | if len(examplePaths) == 0 { 72 | kingpin.Fatalf("No manifest to test provided.") 73 | } 74 | 75 | setupPath := "" 76 | if *setupScript != "" { 77 | setupPath, err = filepath.Abs(*setupScript) 78 | if err != nil { 79 | kingpin.FatalIfError(err, "cannot get absolute path of setup script") 80 | } 81 | } 82 | 83 | teardownPath := "" 84 | if *teardownScript != "" { 85 | teardownPath, err = filepath.Abs(*teardownScript) 86 | if err != nil { 87 | kingpin.FatalIfError(err, "cannot get absolute path of teardown script") 88 | } 89 | } 90 | 91 | builder := pkg.NewAutomatedTestBuilder() 92 | automatedTest := builder. 93 | SetManifestPaths(examplePaths). 94 | SetDataSourcePath(*dataSourcePath). 95 | SetSetupScriptPath(setupPath). 96 | SetTeardownScriptPath(teardownPath). 97 | SetDefaultConditions(strings.Split(*defaultConditions, ",")). 98 | SetDefaultTimeout(*defaultTimeout). 99 | SetDirectory(*testDir). 100 | SetSkipDelete(*skipDelete). 101 | SetSkipUpdate(*skipUpdate). 102 | SetSkipImport(*skipImport). 103 | SetOnlyCleanUptestResources(*onlyCleanUptestResources). 104 | SetRenderOnly(*renderOnly). 105 | SetLogCollectionInterval(*logCollectInterval). 106 | Build() 107 | 108 | kingpin.FatalIfError(pkg.RunTest(automatedTest), "cannot run e2e tests successfully") 109 | } 110 | -------------------------------------------------------------------------------- /design/design-doc-uptest-improvements-and-increasing-test-coverage.md: -------------------------------------------------------------------------------- 1 | # Uptest Improvements and Increasing Test Coverage 2 | 3 | * Owner: Sergen Yalcin (@sergenyalcin) 4 | * Reviewers: Uptest Maintainers 5 | * Status: Accepted 6 | 7 | ## Background 8 | 9 | Uptest is a tool for testing and validating Crossplane providers. This tool is 10 | utilized to test the behavior of Crossplane controllers, resource management 11 | operations, and other Crossplane components. Uptest can simulate the creation, 12 | update, import, deletion, and other operations of Crossplane resources. 13 | 14 | The primary goal of Uptest is to facilitate the testing process of Crossplane 15 | resources. It integrates seamlessly with Crossplane and provides a testing 16 | infrastructure that enables users to create and run test scenarios for 17 | validating the reliability, functionality, and performance of Crossplane 18 | resources. 19 | 20 | Uptest was developed as a tool designed to run e2e tests in the GitHub Actions 21 | environment. Then, various improvements were made to ensure it ran in local 22 | environments. However, the tool was not considered a standalone project in the 23 | first place. Uptest was designed to run tests in more controlled environments 24 | (for example, a Kind cluster created from scratch) rather than running tests on 25 | any arbitrary Kubernetes cluster, and has evolved to become a standalone project 26 | over time. Today Uptest is being evaluated as a tool for users to integrate into 27 | their Crossplane development pipelines. 28 | 29 | As a result, it is necessary to work on various enhancements for Uptest to 30 | continue its development as a more powerful and independent tool. In this 31 | document, evaluations will be made on the present and future of Uptest and the 32 | aspects to be developed. 33 | 34 | In its current form, Uptest offers its users an extensive test framework. This 35 | framework allows us to test and validate many different MRs simultaneously and 36 | seamlessly. 37 | 38 | For Crossplane providers, it is not enough to see `Ready: True` in the status of 39 | an MR. `Late-initialization` that occurs after the resource is `Ready` or the 40 | resource is not stable and is subjected to a continuous update loop, are 41 | actually situations that do not affect the `Ready` state of the resource but 42 | affect its lifecycle. 43 | 44 | To overcome some of the problems hidden behind this `Ready` condition, we use 45 | the `UpToDate` condition, which is only used in tests and activated by a 46 | specific annotation. This condition makes sure that after the resource is 47 | `Ready`, the `Late-Initialization` step is done, and the resource is not stuck 48 | in any update loop. 49 | 50 | For some MRs, it is vital that their names (or some identifier field values) are 51 | randomized. If the name of a resource is not unique, this will cause a conflict, 52 | and the resource cannot be created, or an existing resource could be updated. 53 | Two main issues need to be addressed to avoid this conflict. One is that some 54 | resources expect a universally unique name to be identified. In other words, we 55 | are talking about a name that will be unique on all cloud providers regardless 56 | of account or organization. The other issue is the need for uniqueness on a 57 | smaller scale, that is, within the account or organization used. At the end of 58 | the day, there is a need to generate random strings for some fields of some 59 | resources. Currently, there is only support for this in one format and only for 60 | some fields. 61 | 62 | Test cases that initially included only the `Apply` and `Delete` steps now also 63 | include the `Update` and `Import` steps. These new steps are very important in 64 | terms of increasing the test coverage of the lifecycle of resources. The 65 | `Import` step is also an important coverage point in terms of testing whether 66 | the external name configuration of the resources is done correctly. The 67 | validations performed during the transition to the new provider architecture, 68 | especially the new `Import` step, played a critical role in detecting many bugs 69 | and problems early. 70 | 71 | ## Goals 72 | 73 | - Increase Uptest's test capabilities by accommodating different test scenarios. 74 | - Make Uptest capable of running tests on arbitrary clusters. 75 | - Improve uptest documentation, such as user guides, technical documents, etc. 76 | - In cases where Uptest tests fail, debugging/logging should be improved to 77 | allow the user to understand the problem easily. 78 | - Increasing the configurability of Uptest by introducing parametric options to 79 | the CLI. 80 | 81 | ## Proposal 82 | 83 | The proposals below are suggested to achieve the above goals by making various 84 | improvements to the existing Uptest tool. The main goal is to make Uptest a 85 | more stable and inclusive standalone tool at the end of the day. 86 | 87 | ## Increased test capabilities 88 | 89 | Increasing the capabilities of Uptest will help us in the process of evolving it 90 | into a standalone tool both for test coverage and future use cases. 91 | 92 | ### Provider upgrade testing 93 | 94 | When Uptest first appeared, it was mainly concerned with testing whether a 95 | resource worked properly. Today, it has become an end-to-end testing solution. 96 | Especially when many validations are needed, such as the transition to a new 97 | architecture or before a release. In this context, it is valuable that Uptest 98 | also handles some end-to-end cases. 99 | 100 | Uptest tests some basic general steps in each case, such as setting up 101 | Crossplane and provider and testing ProviderConfig (Secret source). However, the 102 | `Upgrade` test, which is one of the main paths, can also be considered an 103 | end-to-end test in this context. Running an automated test along with the 104 | manual tests will increase the confidence level of developers. Roughly, the 105 | `Upgrade` test can look like this: 106 | 107 | - Installation of Source Package (parameterized) 108 | - Provisioning of Resources (several centralized resources and packages can be 109 | selected) 110 | - Upgrade to Target Package (parameterized) 111 | - Check that installed Resources are not affected 112 | - Provisioning of new resources (several centralized resources and packages can 113 | be selected) 114 | 115 | A subcommand called `upgrade` can be used to run these tests: 116 | 117 | ```shell 118 | uptest upgrade --source v1.0.0 --target v1.1.0 --data-source="${UPTEST_DATASOURCE_PATH}" --setup-script=cluster/test/setup.sh --default-conditions="Test" 119 | ``` 120 | 121 | Testing a scenario like this would be especially valuable in terms of increasing 122 | coverage and improving confidence levels ahead of the provider releases. 123 | 124 | ### Diff tests - Release testing for providers 125 | Before release, it may be valuable to look at the differences with the previous 126 | version and test the resources that are thought to affect the lifecycle. Some 127 | different tools can be used to automate this process. For example, the output of 128 | the [crd-diff](https://github.com/upbound/official-providers-ci/tree/main/cmd/crddiff) 129 | tool can be used to detect field changes in CRDs. Additionally, resources that 130 | have undergone configuration changes can be examined by parsing the git output. 131 | 132 | For example, before the `1.3.0` release, the difference between the `1.2.0` 133 | release and the `1.3.0` release can be examined as described above, the affected 134 | resources can be identified, and Uptest jobs can be triggered on them. 135 | 136 | ```shell 137 | # Different diff sources can be supported 138 | uptest diff-test --source v1.2.0 --diff-source git --data-source="${UPTEST_DATASOURCE_PATH}" --setup-script=cluster/test/setup.sh --default-conditions="Test" 139 | ``` 140 | 141 | ### Connection details tests 142 | 143 | `Connection Details`, are one of the key points Crossplane provides when a 144 | provider creates a managed resource. The resource can create resource-specific 145 | details including usernames, passwords, or connection details such as an IP 146 | address. Such details are vital for the user to access and use the provisioned 147 | resource. Uptest does not perform any tests on `Connection Details` of such 148 | resources today. 149 | 150 | For example, when a `Cluster` is provisioned through `provider-gcp`, the 151 | connection details of this cluster are stored in a secret. Whether the value in 152 | this secret is properly populated or not is of the same importance as whether 153 | the resource is Ready or not. 154 | 155 | For this reason, a test step that checks the `Connection Details` can be added 156 | for resources. These test steps can be manipulated with various hooks. 157 | Basically, the CLI of the relevant provider can be used here. At this point, it 158 | should be noted that this will be a custom step for frequently used resources 159 | rather than a generic step. 160 | 161 | The annotations in the example manifests manage the `Import` and `Update` steps. 162 | It would be appropriate to consider `Connection Details` as a similar step and 163 | manage it through annotations for the desired resources. As a default behavior, 164 | the `Connection Details` step will not run. This step can be executed if the 165 | annotation is set in the related example. The secret field values to be checked 166 | in the related annotation can be specified. For example: 167 | 168 | ```yaml 169 | apiVersion: rds.aws.upbound.io/v1beta1 170 | kind: Cluster 171 | metadata: 172 | name: example 173 | annotations: 174 | uptest.upbound.io/connection-details: "endpoint,master_username,port" 175 | meta.upbound.io/example-id: rds/v1beta1/cluster 176 | spec: 177 | forProvider: 178 | region: us-west-1 179 | engine: aurora-postgresql 180 | masterUsername: cpadmin 181 | autoGeneratePassword: true 182 | masterPasswordSecretRef: 183 | name: sample-cluster-password 184 | namespace: upbound-system 185 | key: password 186 | skipFinalSnapshot: true 187 | writeConnectionSecretToRef: 188 | name: sample-rds-cluster-secret 189 | namespace: upbound-system 190 | ``` 191 | 192 | Related Issue: https://github.com/upbound/official-providers-ci/issues/82 193 | 194 | ### ProviderConfig Coverage 195 | 196 | Uptest only uses the `Secret` source from the `ProviderConfig` in its tests. 197 | However, Crossplane providers allow many different provider configuration 198 | mechanisms (`IRSA`, `WebIdentitiy`, etc.). For this reason, changes made in 199 | this context are tested manually and there is difficulty in preparing the 200 | environments locally. Testing different `ProviderConfig` sources will 201 | significantly increase provider test coverage. It will also improve the ability 202 | to test changes locally. 203 | 204 | By default, `Secret` source is still used, but a specific provider config 205 | manifest can be applied to the cluster via a CLI flag: 206 | 207 | ```shell 208 | uptest e2e --provider-config="examples/irsa-config.yaml" --data-source="${UPTEST_DATASOURCE_PATH}" --setup-script=cluster/test/setup.sh --default-conditions="Test" 209 | ``` 210 | 211 | ### More Comprehensive Test Assertions 212 | 213 | `Uptest` focuses on the status conditions of the Crossplane resources. For 214 | example, during a test of MR, Uptest checks the `UpToDate` condition and does 215 | not look at the fields of the created resources. Doing more comprehensive 216 | assertions like comparing the values of the fields in the spec and status of MRs 217 | and validating patch steps for Compositions will increase the test coverage. 218 | 219 | Comparisons can be made here using Crossplane's `fieldpath` library. The set of 220 | fields in `status.AtProvider` has, with recent changes, become a set that 221 | includes those in `spec.ForProvider`. In this context, comparisons can be made 222 | using a go tool written using the capabilities of the `fieldpath` library. 223 | 224 | ```go 225 | // ... 226 | 227 | pv, err := fieldpath.PaveObject(mg) 228 | if err != nil { 229 | return nil, errors.Wrap(err, "cannot pave the managed resource") 230 | } 231 | 232 | specV, err := pv.GetValue("spec.forProvider") 233 | if err != nil { 234 | return nil, errors.Wrap(err, "cannot get spec.forProvider value from paved object") 235 | } 236 | specM, ok := specV.(map[string]any) 237 | if !ok { 238 | return nil, errors.Wrap(err, "spec.forProvider must be a map") 239 | } 240 | 241 | statusV, err := pv.GetValue("status.atProvider") 242 | if err != nil { 243 | return nil, errors.Wrap(err, "cannot get status.atProvider value from paved object") 244 | } 245 | statusM, ok := statusV.(map[string]any) 246 | if !ok { 247 | return nil, errors.Wrap(err, "status.atProvider must be a map") 248 | } 249 | 250 | for key, value := range specM { 251 | // Recursively compare the spec fields with status fields 252 | // ... 253 | } 254 | 255 | // ... 256 | ``` 257 | 258 | Related Issue: https://github.com/upbound/official-providers-ci/issues/175 259 | 260 | ### Mocking Providers 261 | 262 | Uptest provisions physical resources while running tests on providers. In some 263 | cases, users may want to run their tests on a mock system. Mocking providers is 264 | not directly the subject of Uptest, but enabling Uptest to run against existing 265 | mock infrastructures will be beneficial. 266 | 267 | For example, [Localstack](https://github.com/localstack/localstack) is a cloud 268 | service emulator that runs in a single container on your laptop or in your CI 269 | environment. With LocalStack, you can run your AWS applications or Lambdas 270 | entirely on your local machine without connecting to a remote cloud provider. 271 | 272 | Currently, there is an ability to use LocalStack in `provider-aws`. If 273 | `ProviderConfig` is configured properly, it will be possible to perform the 274 | relevant tests in a mocked way. Therefore, increasing the `ProviderConfig` 275 | coverage mentioned before and even allowing custom configurations is key to 276 | unlocking this capability. 277 | 278 | ```shell 279 | uptest e2e --provider-config="examples/localstack-config.yaml" --data-source="${UPTEST_DATASOURCE_PATH}" --setup-script=cluster/test/setup.sh --default-conditions="Test" 280 | ``` 281 | 282 | ### Debugging Improvements 283 | 284 | Debugging is of great importance for tests that fail. Uptest has added many 285 | debugging stages with the latest developments. For example, printing resource 286 | yaml outputs to the screen at regular intervals during testing is an example of 287 | this. However, this also creates noise from time to time. It is important to 288 | regulate the log frequency and review the logs collected after and during the 289 | test. In this way, it is valuable to both easily understand the situation that 290 | caused the perpetrator of the test and to provide the opportunity for rapid 291 | intervention without waiting for the test to be completed. There are some open 292 | issues about this debugging: 293 | 294 | - Using `crossplane beta trace` instead of `kubectl` for collecting debugging 295 | information: 296 | https://github.com/upbound/official-providers-ci/issues/177 297 | - Decreasing the log noise in Import step: 298 | https://github.com/upbound/official-providers-ci/issues/168 299 | - Exposing Kind cluster for faster development cycle: 300 | https://github.com/upbound/official-providers-ci/issues/4 301 | 302 | ### Creating End User Documentation 303 | 304 | Detailed documentation in which Uptest's use cases and instructions are 305 | summarized is needed by end users. The main purpose of this type of document is 306 | to explain how to use the tool. 307 | -------------------------------------------------------------------------------- /design/one-pager-considerations-for-changing-test-framework-of-uptest.md: -------------------------------------------------------------------------------- 1 | # Considerations for Changing Test Framework of Uptest 2 | 3 | * Owner: Sergen Yalcin (@sergenyalcin) 4 | * Reviewers: Uptest Maintainers 5 | * Status: Accepted 6 | 7 | ## Background 8 | 9 | Uptest is a tool for testing and validating Crossplane providers. This tool is 10 | utilized to test the behavior of Crossplane controllers, resource management 11 | operations, and other Crossplane components. Uptest can simulate the creation, 12 | update, import, deletion, and other operations of Crossplane resources. 13 | 14 | When `Uptest` was first written, it used [kuttl](https://github.com/kudobuilder/kuttl) 15 | as the underlying test framework in order to have good declarative testing 16 | capabilities. Over time, it was realized that there are better and more 17 | compatible alternatives and a perception that `kuttl` isn't being actively 18 | maintained, so it was decided to evaluate an alternative underlying framework. 19 | 20 | [kuttl](https://github.com/kudobuilder/kuttl) provides a declarative approach to 21 | test Kubernetes Operators. `kuttl` is designed for testing operators, however it 22 | can declaratively test any kubernetes objects. 23 | 24 | When starting the `Uptest` effort, we considered a few different alternatives 25 | and `kuttl`'s capabilities were appropriate for our assertion aims even though 26 | it missed some points. Today, we consider changing the underlying test framework 27 | tool because of the perception of `kuttl` not being actively maintained and 28 | other frameworks offering superior capabilities. 29 | 30 | ## Goals 31 | 32 | Decide on a more comprehensive underlying test framework to meet the current 33 | and [future requirements](https://github.com/crossplane/uptest/pull/10/files) of 34 | Uptest. 35 | 36 | ## Proposal - Switching to `chainsaw` 37 | 38 | [chainsaw](https://github.com/kyverno/chainsaw) provides a declarative approach 39 | to test Kubernetes operators and controllers. While Chainsaw is designed for 40 | testing operators and controllers, it can declaratively test any Kubernetes 41 | objects. Chainsaw is an open-source tool that was initially developed for 42 | defining and running Kyverno end-to-end tests. The tool has Apache-2.0 license. 43 | 44 | In addition to providing similar functionality provided by `kuttl`, it also 45 | offers better logs, config maps assertions, 46 | [assertions trees](https://kyverno.io/blog/2023/12/13/kyverno-chainsaw-exploring-the-power-of-assertion-trees/) 47 | and many more things. The fact that it is well-maintained, and has the 48 | capability for migration from `kuttl` makes it an attractive option. 49 | 50 | `chainsaw` shares similar concepts with `kuttl`. In this way, we do not have to 51 | make major changes to the templates. 52 | 53 | ```yaml 54 | apiVersion: chainsaw.kyverno.io/v1alpha1 55 | kind: Test 56 | metadata: 57 | name: example 58 | spec: 59 | steps: 60 | - try: 61 | # ... 62 | - apply: 63 | file: my-configmap.yaml 64 | # ... 65 | ``` 66 | 67 | ```yaml 68 | apiVersion: chainsaw.kyverno.io/v1alpha1 69 | kind: Test 70 | metadata: 71 | name: example 72 | spec: 73 | steps: 74 | - try: 75 | # ... 76 | - command: 77 | entrypoint: echo 78 | args: 79 | - hello chainsaw 80 | # ... 81 | ``` 82 | 83 | Also provides logical assertion statements: 84 | 85 | ```yaml 86 | apiVersion: chainsaw.kyverno.io/v1alpha1 87 | kind: Test 88 | metadata: 89 | name: example 90 | spec: 91 | steps: 92 | - try: 93 | # ... 94 | - assert: 95 | resource: 96 | apiVersion: v1 97 | kind: Deployment 98 | metadata: 99 | name: foo 100 | spec: 101 | (replicas > 3): true 102 | # ... 103 | ``` 104 | 105 | Resource Template support is another important requirement for Uptest: 106 | 107 | ```yaml 108 | apiVersion: chainsaw.kyverno.io/v1alpha1 109 | kind: Test 110 | metadata: 111 | name: template 112 | spec: 113 | template: true 114 | steps: 115 | - assert: 116 | resource: 117 | # apiVersion, kind, name, namespace and labels are considered for templating 118 | apiVersion: v1 119 | kind: ConfigMap 120 | metadata: 121 | name: ($namespace) 122 | # other fields are not (they are part of the assertion tree) 123 | data: 124 | foo: ($namespace) 125 | ``` 126 | 127 | Related Issue: https://github.com/upbound/official-providers-ci/issues/179 128 | 129 | In short, `chainsaw` is a more capable and well-maintained framework than 130 | `kuttl` and switching to it will better suit Uptest's future requirements. 131 | 132 | ## Alternative Considered 133 | 134 | ### Using the `crossplane-e2e-framework` 135 | 136 | [crossplane-e2e-framework](https://github.com/crossplane/crossplane/tree/master/test/e2e) 137 | is a [k8s-e2e-framework](https://pkg.go.dev/sigs.k8s.io/e2e-framework)-based 138 | test framework that provides a baseline for simulating the real-world use to 139 | exercise all of `Crossplane`'s functionality. 140 | 141 | `e2e-framework` is a tool that allows tests to be written in Go. Additionally, 142 | one of its advantages is that it works with familiar conventions in the 143 | environment we use. On the other hand, these types of utilities can be used when 144 | writing tests, thanks to their functions specific to the crossplane ecosystem. 145 | However, this will mean changing the entire `Uptest` code-base currently used. 146 | In this context, it should be taken into consideration that such a change would 147 | be quite large. 148 | 149 | As mentioned in the [discussion](https://github.com/crossplane-contrib/provider-argocd/pull/89#issuecomment-2016655783), 150 | to clarify the use cases of `Uptest` and `e2e-framework`, it might be good to 151 | strengthen the documentation of the tools. One could also write a guideline that 152 | directly compares the two tools and discusses their capabilities and use cases. 153 | This way, the end user can more easily decide when to use `Uptest` and when to 154 | use `e2e-framework`. 155 | 156 | ### Writing a Underlying Test Framework From Scratch 157 | 158 | Writing such a tool where all the steps in the test pipeline are modular, using 159 | the existing Go libraries, has advantages and disadvantages. One of the most 160 | important advantages of writing such a tool is that it can be developed 161 | completely according to our environment and requirements, and since it is 162 | written in accordance with our test scenarios, it can be easily integrated into 163 | Github Actions and other pipeline elements (example generation). With this tool, 164 | which can run with different configurations for different test scenarios, it 165 | will be possible to handle cases that we can predict for now(and will come to 166 | us in the future). 167 | 168 | However, the time it takes to write such a tool is also important. Maybe it 169 | won't take long to reveal the general outline of the tool, but as I mentioned 170 | above, it may take some time for it to be configurable for different scenarios. 171 | In this context, if this option is selected, it would be appropriate to first 172 | create the tool in general outline and then integrate it into various scenarios 173 | (iteratively) to speed up the process. -------------------------------------------------------------------------------- /docs/integrating-uptest-for-e2e-testing.md: -------------------------------------------------------------------------------- 1 | # Integrating Uptest for End to End Testing 2 | 3 | In this tutorial, we will integrate [uptest](https://github.com/crossplane/uptest) to a Github repository to automate end to end 4 | testing managed resources. While we will use a `Provider` repository as an example, the process will be almost identical 5 | for a `Configuration` repository. 6 | 7 | Starting with a provider repository with no end to end testing capability, we will end up having: 8 | - A make target to locally test examples end to end 9 | - A GitHub action triggered for PRs whenever a comment as `/test-examples=` is left 10 | 11 | ## Setting up the Make targets 12 | 13 | 1. Go to the [demo repository](https://github.com/upbound/demo-uptest-integration) which contains a GitHub provider 14 | generated using upjet and hit the `Use this template` button to initialize your demo repository under your own 15 | GitHub organization. 16 | 1. Clone your demo repository on your local and `cd` into the root directory. 17 | 2. Initialize build submodule with 18 | 19 | ```bash 20 | make submodules 21 | ``` 22 | 23 | 4. First we will add a simple setup script that will deploy a secret and a provider config for our provider. 24 | 25 | ```bash 26 | mkdir -p cluster/test 27 | touch cluster/test/setup.sh 28 | chmod +x cluster/test/setup.sh 29 | 30 | cat < cluster/test/setup.sh 31 | #!/usr/bin/env bash 32 | set -aeuo pipefail 33 | 34 | echo "Running setup.sh" 35 | echo "Creating cloud credential secret..." 36 | \${KUBECTL} -n upbound-system create secret generic provider-secret --from-literal=credentials="{\"token\":\"\${UPTEST_CLOUD_CREDENTIALS}\"}" \ 37 | --dry-run=client -o yaml | \${KUBECTL} apply -f - 38 | 39 | echo "Waiting until provider is healthy..." 40 | \${KUBECTL} wait provider.pkg --all --for condition=Healthy --timeout 5m 41 | 42 | echo "Waiting for all pods to come online..." 43 | \${KUBECTL} -n upbound-system wait --for=condition=Available deployment --all --timeout=5m 44 | 45 | echo "Creating a default provider config..." 46 | cat < Note: If you're following this tutorial for a `Configuration` repository, you will need to add 80 | > `local.xpkg.deploy.configuration.$(PROJECT_NAME)` instead of `local.xpkg.deploy.provider.$(PROJECT_NAME)` to the 81 | > `e2e` target. 82 | 83 | 6. Commit the changes we did so far. 84 | 85 | ```bash 86 | git add Makefile cluster/test/setup.sh 87 | git commit -m "Add uptest and e2e targets" 88 | ``` 89 | 90 | ## Testing Locally 91 | 92 | 1. Generate a [Personal Access Token](https://github.com/settings/tokens/new) for your Github account with 93 | `repo/public_repo` and `delete_repo` scopes. 94 | 2. Run the following: 95 | 96 | ```bash 97 | export UPTEST_CLOUD_CREDENTIALS= 98 | UPTEST_EXAMPLE_LIST=examples/repository/repository.yaml make e2e 99 | ``` 100 | 101 | You should see a `PASS` at the end of logs indicating everything worked fine. 102 | 103 | ## Adding the GitHub workflow 104 | 105 | Now we have things working locally, let's add a GitHub workflow to automate end to end testing with CI. 106 | 107 | 1. Run the following to add the GitHub workflow definition which will be triggered for `issue_comment` events and will call 108 | uptests reusable workflow: 109 | 110 | ```bash 111 | cat < .github/workflows/e2e.yaml 112 | name: End to End Testing 113 | 114 | on: 115 | issue_comment: 116 | types: [created] 117 | 118 | jobs: 119 | e2e: 120 | uses: upbound/uptest/.github/workflows/pr-comment-trigger.yml@main 121 | secrets: 122 | UPTEST_CLOUD_CREDENTIALS: \${{ secrets.UPTEST_CLOUD_CREDENTIALS }} 123 | UPTEST_DATASOURCE: \${{ secrets.UPTEST_DATASOURCE }} 124 | EOF 125 | ``` 126 | 127 | > See [Injecting Dynamic Values (and Datasource)](../README.md#injecting-dynamic-values-and-datasource) for more 128 | > details on `UPTEST_DATASOURCE` secret. 129 | 130 | 1. Commit and push to the `main` branch of the repository. 131 | 132 | ``` 133 | git add .github/workflows/e2e.yaml 134 | git commit -s -m "Add e2e workflow" 135 | git push origin main 136 | ``` 137 | 138 | 3. Lastly, we need to add a Repository Secret with our GitHub token. 139 | 1. Go to your repository Settings in GitHub UI. 140 | 2. On the left side, select `Secrets` -> `Actions` under `Security` section. 141 | 3. Hit `New repository secret` 142 | 4. Enter `UPTEST_CLOUD_CREDENTIALS` as `Name` and your GitHub Token as `Secret` and hit `Add secret` 143 | 144 | ## Testing via CI 145 | 146 | We are now ready to test our changes end to end via GitHub Actions. We will try that out by opening a test PR. 147 | 148 | 1. Go to the `examples/repository/repository.yaml` file and make some wording changes in `description` field, e.g. 149 | Add `CI -` as a prefix. 150 | 2. Create a PR with that change. 151 | 3. Add the following comment on the PR: 152 | 153 | ``` 154 | /test-examples="examples/repository/repository.yaml" 155 | ``` 156 | 157 | 4. Check the Actions and follow how end to end testing goes. 158 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | module github.com/crossplane/uptest 6 | 7 | go 1.21 8 | 9 | require ( 10 | github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230406155702-4e1673b7141f 11 | github.com/google/go-cmp v0.5.9 12 | gopkg.in/alecthomas/kingpin.v2 v2.2.6 13 | k8s.io/apimachinery v0.26.3 14 | sigs.k8s.io/yaml v1.3.0 15 | ) 16 | 17 | require ( 18 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect 19 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect 20 | github.com/davecgh/go-spew v1.1.1 // indirect 21 | github.com/emicklei/go-restful/v3 v3.9.0 // indirect 22 | github.com/evanphx/json-patch/v5 v5.6.0 // indirect 23 | github.com/go-logr/logr v1.2.3 // indirect 24 | github.com/go-openapi/jsonpointer v0.19.5 // indirect 25 | github.com/go-openapi/jsonreference v0.20.0 // indirect 26 | github.com/go-openapi/swag v0.21.1 // indirect 27 | github.com/gogo/protobuf v1.3.2 // indirect 28 | github.com/golang/protobuf v1.5.2 // indirect 29 | github.com/google/gnostic v0.6.9 // indirect 30 | github.com/google/gofuzz v1.2.0 // indirect 31 | github.com/imdario/mergo v0.3.12 // indirect 32 | github.com/josharian/intern v1.0.0 // indirect 33 | github.com/json-iterator/go v1.1.12 // indirect 34 | github.com/mailru/easyjson v0.7.7 // indirect 35 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 36 | github.com/modern-go/reflect2 v1.0.2 // indirect 37 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 38 | github.com/pkg/errors v0.9.1 // indirect 39 | github.com/stretchr/testify v1.8.1 // indirect 40 | golang.org/x/net v0.7.0 // indirect 41 | golang.org/x/oauth2 v0.1.0 // indirect 42 | golang.org/x/sys v0.5.0 // indirect 43 | golang.org/x/term v0.5.0 // indirect 44 | golang.org/x/text v0.7.0 // indirect 45 | golang.org/x/time v0.3.0 // indirect 46 | google.golang.org/appengine v1.6.7 // indirect 47 | google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect 48 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 49 | gopkg.in/inf.v0 v0.9.1 // indirect 50 | gopkg.in/yaml.v2 v2.4.0 // indirect 51 | gopkg.in/yaml.v3 v3.0.1 // indirect 52 | k8s.io/api v0.26.3 // indirect 53 | k8s.io/client-go v0.26.3 // indirect 54 | k8s.io/klog/v2 v2.80.1 // indirect 55 | k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect 56 | k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect 57 | sigs.k8s.io/controller-runtime v0.14.6 // indirect 58 | sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect 59 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 60 | ) 61 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 4 | github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= 5 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= 6 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 7 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= 8 | github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= 9 | github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= 10 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 11 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 12 | github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= 13 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 14 | github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= 15 | github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= 16 | github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= 17 | github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 18 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 19 | github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= 20 | github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= 21 | github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= 22 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 23 | github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230406155702-4e1673b7141f h1:wDRr6gaoiQstEdddrn0B5SSSgzdXreOQAbdmRH+9JeI= 24 | github.com/crossplane/crossplane-runtime v0.20.0-rc.0.0.20230406155702-4e1673b7141f/go.mod h1:ebtUpmconMy8RKUEhrCXTUFSOpfGQqbKM2E+rjCCYJo= 25 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 26 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 27 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 28 | github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= 29 | github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= 30 | github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= 31 | github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 32 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 33 | github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= 34 | github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= 35 | github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= 36 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= 37 | github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= 38 | github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= 39 | github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= 40 | github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= 41 | github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= 42 | github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 43 | github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 44 | github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= 45 | github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 46 | github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= 47 | github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= 48 | github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= 49 | github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= 50 | github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= 51 | github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= 52 | github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= 53 | github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= 54 | github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= 55 | github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= 56 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 57 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 58 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 59 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 60 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 61 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 62 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 63 | github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= 64 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 65 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 66 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 67 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 68 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 69 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= 70 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 71 | github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 72 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 73 | github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= 74 | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 75 | github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= 76 | github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= 77 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 78 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 79 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 80 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 81 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 82 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 83 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 84 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 85 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 86 | github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= 87 | github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 88 | github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 89 | github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= 90 | github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= 91 | github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= 92 | github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= 93 | github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= 94 | github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= 95 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 96 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 97 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 98 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 99 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 100 | github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 101 | github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= 102 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 103 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 104 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 105 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 106 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 107 | github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= 108 | github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= 109 | github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= 110 | github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= 111 | github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= 112 | github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= 113 | github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= 114 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 115 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 116 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 117 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 118 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 119 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 120 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 121 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= 122 | github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= 123 | github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= 124 | github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= 125 | github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= 126 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 127 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 128 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 129 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 130 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 131 | github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= 132 | github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= 133 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 134 | github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= 135 | github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= 136 | github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= 137 | github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= 138 | github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= 139 | github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= 140 | github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= 141 | github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 142 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 143 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 144 | github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= 145 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 146 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 147 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 148 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 149 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 150 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 151 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 152 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 153 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 154 | github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= 155 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 156 | github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= 157 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= 158 | github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= 159 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 160 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 161 | go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= 162 | go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= 163 | go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= 164 | go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= 165 | go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= 166 | go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= 167 | go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= 168 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 169 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 170 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 171 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 172 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 173 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 174 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 175 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 176 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 177 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 178 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 179 | golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 180 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 181 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 182 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 183 | golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= 184 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 185 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 186 | golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= 187 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 188 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 189 | golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 190 | golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= 191 | golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 192 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 193 | golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 194 | golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= 195 | golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= 196 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 197 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 198 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 199 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 200 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 201 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 202 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 203 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 204 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 205 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 206 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 207 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 208 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 209 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 210 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 211 | golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= 212 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 213 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 214 | golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= 215 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 216 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 217 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 218 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 219 | golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 220 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 221 | golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= 222 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 223 | golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= 224 | golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 225 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 226 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 227 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 228 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 229 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 230 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 231 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 232 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 233 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 234 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 235 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 236 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 237 | gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= 238 | gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= 239 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 240 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 241 | google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= 242 | google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= 243 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 244 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= 245 | google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= 246 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= 247 | google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= 248 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 249 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= 250 | google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= 251 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= 252 | google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= 253 | google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= 254 | google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= 255 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 256 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 257 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 258 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 259 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 260 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 261 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 262 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 263 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= 264 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 265 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 266 | google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 267 | google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk= 268 | google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 269 | gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= 270 | gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= 271 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 272 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 273 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 274 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 275 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 276 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 277 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 278 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 279 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 280 | gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 281 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 282 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 283 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 284 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 285 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 286 | gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 287 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 288 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 289 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 290 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 291 | k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= 292 | k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= 293 | k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE= 294 | k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ= 295 | k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= 296 | k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= 297 | k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= 298 | k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= 299 | k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= 300 | k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= 301 | k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= 302 | k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= 303 | k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= 304 | k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= 305 | sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= 306 | sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= 307 | sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= 308 | sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= 309 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= 310 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= 311 | sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= 312 | sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= 313 | -------------------------------------------------------------------------------- /go.sum.license: -------------------------------------------------------------------------------- 1 | 2 | SPDX-FileCopyrightText: 2024 The Crossplane Authors 3 | 4 | SPDX-License-Identifier: CC0-1.0 5 | -------------------------------------------------------------------------------- /hack/check_endpoints.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function check_endpoints { 4 | endpoints=( $("${KUBECTL}" -n "${CROSSPLANE_NAMESPACE}" get endpoints --no-headers | grep 'provider-' | awk '{print $1}') ) 5 | for endpoint in ${endpoints[@]}; do 6 | port=$(${KUBECTL} -n "${CROSSPLANE_NAMESPACE}" get endpoints "$endpoint" -o jsonpath='{.subsets[*].ports[0].port}') 7 | if [[ -z "${port}" ]]; then 8 | echo "$endpoint - No served ports" 9 | return 1 10 | else 11 | echo "$endpoint - Ports present" 12 | fi 13 | done 14 | } 15 | 16 | attempt=1 17 | max_attempts=10 18 | while [[ $attempt -le $max_attempts ]]; do 19 | if check_endpoints; then 20 | exit 0 21 | else 22 | printf "Retrying... (%d/%d)\n" "$attempt" "$max_attempts" >&2 23 | fi 24 | ((attempt++)) 25 | sleep 5 26 | done 27 | exit 1 28 | -------------------------------------------------------------------------------- /hack/patch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function patch { 4 | kindgroup=$1; 5 | name=$2; 6 | if ${KUBECTL} --subresource=status patch "$kindgroup/$name" --type=merge -p '{"status":{"conditions":[]}}' ; then 7 | return 0; 8 | else 9 | return 1; 10 | fi; 11 | }; 12 | 13 | 14 | kindgroup=$1; 15 | name=$2; 16 | attempt=1; 17 | max_attempts=10; 18 | while [[ $attempt -le $max_attempts ]]; do 19 | if patch "$kindgroup" "$name"; then 20 | echo "Successfully patched $kindgroup/$name"; 21 | ${KUBECTL} annotate "$kindgroup/$name" uptest-old-id=$(${KUBECTL} get "$kindgroup/$name" -o=jsonpath='{.status.atProvider.id}') --overwrite; 22 | break; 23 | else 24 | printf "Retrying... (%d/%d) for %s/%s\n" "$attempt" "$max_attempts" "$kindgroup" "$name" >&2; 25 | fi; 26 | ((attempt++)); 27 | sleep 5; 28 | done; 29 | if [[ $attempt -gt $max_attempts ]]; then 30 | echo "Failed to patch $kindgroup/$name after $max_attempts attempts"; 31 | exit 1; 32 | fi; 33 | exit 0; 34 | -------------------------------------------------------------------------------- /internal/config/builder.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2025 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | // Package config contains configuration options for configuring uptest runtime. 6 | package config 7 | 8 | import ( 9 | "time" 10 | ) 11 | 12 | // Builder is a struct that helps construct an AutomatedTest instance step-by-step. 13 | type Builder struct { 14 | test AutomatedTest 15 | } 16 | 17 | // NewBuilder initializes and returns a new Builder instance. 18 | func NewBuilder() *Builder { 19 | return &Builder{ 20 | test: AutomatedTest{}, 21 | } 22 | } 23 | 24 | // SetDirectory sets the directory path for the AutomatedTest and returns the Builder. 25 | func (b *Builder) SetDirectory(directory string) *Builder { 26 | b.test.Directory = directory 27 | return b 28 | } 29 | 30 | // SetManifestPaths sets the paths of the manifest files for the AutomatedTest and returns the Builder. 31 | func (b *Builder) SetManifestPaths(manifestPaths []string) *Builder { 32 | b.test.ManifestPaths = manifestPaths 33 | return b 34 | } 35 | 36 | // SetDataSourcePath sets the data source path for the AutomatedTest and returns the Builder. 37 | func (b *Builder) SetDataSourcePath(dataSourcePath string) *Builder { 38 | b.test.DataSourcePath = dataSourcePath 39 | return b 40 | } 41 | 42 | // SetSetupScriptPath sets the setup script path for the AutomatedTest and returns the Builder. 43 | func (b *Builder) SetSetupScriptPath(setupScriptPath string) *Builder { 44 | b.test.SetupScriptPath = setupScriptPath 45 | return b 46 | } 47 | 48 | // SetTeardownScriptPath sets the teardown script path for the AutomatedTest and returns the Builder. 49 | func (b *Builder) SetTeardownScriptPath(teardownScriptPath string) *Builder { 50 | b.test.TeardownScriptPath = teardownScriptPath 51 | return b 52 | } 53 | 54 | // SetDefaultTimeout sets the default timeout duration for the AutomatedTest and returns the Builder. 55 | func (b *Builder) SetDefaultTimeout(defaultTimeout time.Duration) *Builder { 56 | b.test.DefaultTimeout = defaultTimeout 57 | return b 58 | } 59 | 60 | // SetDefaultConditions sets the default conditions for the AutomatedTest and returns the Builder. 61 | func (b *Builder) SetDefaultConditions(defaultConditions []string) *Builder { 62 | b.test.DefaultConditions = defaultConditions 63 | return b 64 | } 65 | 66 | // SetSkipDelete sets whether the AutomatedTest should skip resource deletion and returns the Builder. 67 | func (b *Builder) SetSkipDelete(skipDelete bool) *Builder { 68 | b.test.SkipDelete = skipDelete 69 | return b 70 | } 71 | 72 | // SetSkipUpdate sets whether the AutomatedTest should skip resource updates and returns the Builder. 73 | func (b *Builder) SetSkipUpdate(skipUpdate bool) *Builder { 74 | b.test.SkipUpdate = skipUpdate 75 | return b 76 | } 77 | 78 | // SetSkipImport sets whether the AutomatedTest should skip resource imports and returns the Builder. 79 | func (b *Builder) SetSkipImport(skipImport bool) *Builder { 80 | b.test.SkipImport = skipImport 81 | return b 82 | } 83 | 84 | // SetOnlyCleanUptestResources sets whether the AutomatedTest should clean up only test-specific resources and returns the Builder. 85 | func (b *Builder) SetOnlyCleanUptestResources(onlyCleanUptestResources bool) *Builder { 86 | b.test.OnlyCleanUptestResources = onlyCleanUptestResources 87 | return b 88 | } 89 | 90 | // SetRenderOnly sets whether the AutomatedTest should only render outputs without execution and returns the Builder. 91 | func (b *Builder) SetRenderOnly(renderOnly bool) *Builder { 92 | b.test.RenderOnly = renderOnly 93 | return b 94 | } 95 | 96 | // SetLogCollectionInterval sets the interval for log collection during the AutomatedTest and returns the Builder. 97 | func (b *Builder) SetLogCollectionInterval(logCollectionInterval time.Duration) *Builder { 98 | b.test.LogCollectionInterval = logCollectionInterval 99 | return b 100 | } 101 | 102 | // Build finalizes and returns the constructed AutomatedTest instance. 103 | func (b *Builder) Build() *AutomatedTest { 104 | return &b.test 105 | } 106 | -------------------------------------------------------------------------------- /internal/config/config.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | // Package config contains configuration options for configuring uptest runtime. 6 | package config 7 | 8 | import ( 9 | "time" 10 | 11 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 12 | ) 13 | 14 | const ( 15 | // AnnotationKeyTimeout defines a test time for the annotated resource. 16 | AnnotationKeyTimeout = "uptest.upbound.io/timeout" 17 | // AnnotationKeyConditions defines the list of status conditions to 18 | // assert on the tested resource. 19 | AnnotationKeyConditions = "uptest.upbound.io/conditions" 20 | // AnnotationKeyPreAssertHook defines the path to a pre-assert 21 | // hook script to be executed before the resource is tested. 22 | AnnotationKeyPreAssertHook = "uptest.upbound.io/pre-assert-hook" 23 | // AnnotationKeyPostAssertHook defines the path to a post-assert 24 | // hook script to be executed after the resource is tested. 25 | AnnotationKeyPostAssertHook = "uptest.upbound.io/post-assert-hook" 26 | // AnnotationKeyPreDeleteHook defines the path to a pre-delete 27 | // hook script to be executed before the tested resource is deleted. 28 | AnnotationKeyPreDeleteHook = "uptest.upbound.io/pre-delete-hook" 29 | // AnnotationKeyPostDeleteHook defines the path to a post-delete 30 | // hook script to be executed after the tested resource is deleted. 31 | AnnotationKeyPostDeleteHook = "uptest.upbound.io/post-delete-hook" 32 | // AnnotationKeyUpdateParameter defines the update parameter that will be 33 | // used during the update step 34 | AnnotationKeyUpdateParameter = "uptest.upbound.io/update-parameter" 35 | // AnnotationKeyExampleID is id of example that populated from example 36 | // manifest. This information will be used for determining the root resource 37 | AnnotationKeyExampleID = "meta.upbound.io/example-id" 38 | // AnnotationKeyDisableImport determines whether the Import 39 | // step of the resource to be tested will be executed or not. 40 | AnnotationKeyDisableImport = "uptest.upbound.io/disable-import" 41 | ) 42 | 43 | // AutomatedTest represents an automated test of resource example 44 | // manifests to be run with uptest. 45 | type AutomatedTest struct { 46 | Directory string 47 | 48 | ManifestPaths []string 49 | DataSourcePath string 50 | 51 | SetupScriptPath string 52 | TeardownScriptPath string 53 | 54 | DefaultTimeout time.Duration 55 | DefaultConditions []string 56 | 57 | SkipDelete bool 58 | SkipUpdate bool 59 | SkipImport bool 60 | 61 | OnlyCleanUptestResources bool 62 | 63 | RenderOnly bool 64 | LogCollectionInterval time.Duration 65 | } 66 | 67 | // Manifest represents a resource loaded from an example resource manifest file. 68 | type Manifest struct { 69 | FilePath string 70 | Object *unstructured.Unstructured 71 | YAML string 72 | } 73 | 74 | // TestCase represents a test-case to be run by chainsaw. 75 | type TestCase struct { 76 | Timeout time.Duration 77 | SetupScriptPath string 78 | TeardownScriptPath string 79 | SkipUpdate bool 80 | SkipImport bool 81 | 82 | OnlyCleanUptestResources bool 83 | 84 | TestDirectory string 85 | } 86 | 87 | // Resource represents a Kubernetes object to be tested and asserted 88 | // by uptest. 89 | type Resource struct { 90 | Name string 91 | Namespace string 92 | KindGroup string 93 | YAML string 94 | APIVersion string 95 | Kind string 96 | 97 | Timeout time.Duration 98 | Conditions []string 99 | PreAssertScriptPath string 100 | PostAssertScriptPath string 101 | PreDeleteScriptPath string 102 | PostDeleteScriptPath string 103 | 104 | UpdateParameter string 105 | UpdateAssertKey string 106 | UpdateAssertValue string 107 | 108 | SkipImport bool 109 | 110 | Root bool 111 | } 112 | -------------------------------------------------------------------------------- /internal/prepare.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | // Package internal implements the uptest runtime for running 6 | // automated tests using resource example manifests 7 | // using chainsaw. 8 | package internal 9 | 10 | import ( 11 | "bytes" 12 | "fmt" 13 | "io" 14 | "log" 15 | "math/rand" 16 | "os" 17 | "path/filepath" 18 | "regexp" 19 | "strings" 20 | 21 | "github.com/crossplane/crossplane-runtime/pkg/errors" 22 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 23 | kyaml "k8s.io/apimachinery/pkg/util/yaml" 24 | "sigs.k8s.io/yaml" 25 | 26 | "github.com/crossplane/uptest/internal/config" 27 | ) 28 | 29 | var ( 30 | charset = []rune("abcdefghijklmnopqrstuvwxyz0123456789") 31 | 32 | dataSourceRegex = regexp.MustCompile(`\${data\.(.*?)}`) 33 | randomStrRegex = regexp.MustCompile(`\${Rand\.(.*?)}`) 34 | 35 | caseDirectory = "case" 36 | ) 37 | 38 | type injectedManifest struct { 39 | Path string 40 | Manifest string 41 | } 42 | 43 | // PreparerOption is a functional option type for configuring a Preparer. 44 | type PreparerOption func(*Preparer) 45 | 46 | // WithDataSource is a functional option that sets the data source path for the Preparer. 47 | func WithDataSource(path string) PreparerOption { 48 | return func(p *Preparer) { 49 | p.dataSourcePath = path 50 | } 51 | } 52 | 53 | // WithTestDirectory is a functional option that sets the test directory for the Preparer. 54 | func WithTestDirectory(path string) PreparerOption { 55 | return func(p *Preparer) { 56 | p.testDirectory = path 57 | } 58 | } 59 | 60 | // NewPreparer creates a new Preparer instance with the provided test file paths and optional configurations. 61 | // It applies any provided PreparerOption functions to customize the Preparer. 62 | func NewPreparer(testFilePaths []string, opts ...PreparerOption) *Preparer { 63 | p := &Preparer{ 64 | testFilePaths: testFilePaths, 65 | testDirectory: os.TempDir(), // Default test directory is the system's temporary directory. 66 | } 67 | // Apply each provided option to configure the Preparer. 68 | for _, f := range opts { 69 | f(p) 70 | } 71 | return p 72 | } 73 | 74 | // Preparer represents a structure used to prepare testing environments or configurations. 75 | type Preparer struct { 76 | testFilePaths []string // Paths to the test files. 77 | dataSourcePath string // Path to the data source file. 78 | testDirectory string // Directory where tests will be executed. 79 | } 80 | 81 | // PrepareManifests prepares and processes manifests from test files. 82 | // It performs the following steps: 83 | // 1. Cleans and recreates the case directory. 84 | // 2. Injects variables into test files. 85 | // 3. Decodes, processes, and validates each manifest file, skipping any that require manual intervention. 86 | // 4. Returns the processed manifests or an error if any step fails. 87 | // 88 | //nolint:gocyclo // This function is not complex, gocyclo threshold was reached due to the error handling. 89 | func (p *Preparer) PrepareManifests() ([]config.Manifest, error) { 90 | caseDirectory := filepath.Join(p.testDirectory, caseDirectory) 91 | if err := os.RemoveAll(caseDirectory); err != nil { 92 | return nil, errors.Wrapf(err, "cannot clean directory %s", caseDirectory) 93 | } 94 | if err := os.MkdirAll(caseDirectory, os.ModePerm); err != nil { //nolint:gosec // directory permissions are not critical here 95 | return nil, errors.Wrapf(err, "cannot create directory %s", caseDirectory) 96 | } 97 | 98 | injectedFiles, err := p.injectVariables() 99 | if err != nil { 100 | return nil, errors.Wrap(err, "cannot inject variables") 101 | } 102 | 103 | manifests := make([]config.Manifest, 0, len(injectedFiles)) 104 | for _, data := range injectedFiles { 105 | decoder := kyaml.NewYAMLOrJSONDecoder(bytes.NewBufferString(data.Manifest), 1024) 106 | for { 107 | u := &unstructured.Unstructured{} 108 | if err := decoder.Decode(&u); err != nil { 109 | if errors.Is(err, io.EOF) { 110 | break 111 | } 112 | return nil, errors.Wrap(err, "cannot decode manifest") 113 | } 114 | if u != nil { 115 | if v, ok := u.GetAnnotations()["upjet.upbound.io/manual-intervention"]; ok { 116 | log.Printf("Skipping %s with name %s since it requires the following manual intervention: %s\n", u.GroupVersionKind().String(), u.GetName(), v) 117 | continue 118 | } 119 | y, err := yaml.Marshal(u) 120 | if err != nil { 121 | return nil, errors.Wrapf(err, "cannot marshal manifest for \"%s/%s\"", u.GetObjectKind(), u.GetName()) 122 | } 123 | manifests = append(manifests, config.Manifest{ 124 | FilePath: data.Path, 125 | Object: u, 126 | YAML: string(y), 127 | }) 128 | } 129 | } 130 | } 131 | return manifests, nil 132 | } 133 | 134 | func (p *Preparer) injectVariables() ([]injectedManifest, error) { 135 | dataSourceMap := make(map[string]string) 136 | if p.dataSourcePath != "" { 137 | dataSource, err := os.ReadFile(p.dataSourcePath) 138 | if err != nil { 139 | return nil, errors.Wrap(err, "cannot read data source file") 140 | } 141 | if err := yaml.Unmarshal(dataSource, &dataSourceMap); err != nil { 142 | return nil, errors.Wrap(err, "cannot prepare data source map") 143 | } 144 | } 145 | 146 | inputs := make([]injectedManifest, len(p.testFilePaths)) 147 | for i, f := range p.testFilePaths { 148 | manifestData, err := os.ReadFile(filepath.Clean(f)) 149 | if err != nil { 150 | return nil, errors.Wrapf(err, "cannot read %s", f) 151 | } 152 | inputs[i] = injectedManifest{ 153 | Path: f, 154 | Manifest: p.injectValues(string(manifestData), dataSourceMap), 155 | } 156 | } 157 | return inputs, nil 158 | } 159 | 160 | func (p *Preparer) injectValues(manifestData string, dataSourceMap map[string]string) string { 161 | // Inject data source values such as tenantID, objectID, accountID 162 | dataSourceKeys := dataSourceRegex.FindAllStringSubmatch(manifestData, -1) 163 | for _, dataSourceKey := range dataSourceKeys { 164 | if v, ok := dataSourceMap[dataSourceKey[1]]; ok { 165 | manifestData = strings.ReplaceAll(manifestData, dataSourceKey[0], v) 166 | } 167 | } 168 | // Inject random strings 169 | randomKeys := randomStrRegex.FindAllStringSubmatch(manifestData, -1) 170 | for _, randomKey := range randomKeys { 171 | switch randomKey[1] { 172 | case "RFC1123Subdomain": 173 | r := generateRFC1123SubdomainCompatibleString() 174 | manifestData = strings.Replace(manifestData, randomKey[0], r, 1) 175 | default: 176 | continue 177 | } 178 | } 179 | return manifestData 180 | } 181 | 182 | func generateRFC1123SubdomainCompatibleString() string { 183 | s := make([]rune, 8) 184 | for i := range s { 185 | s[i] = charset[rand.Intn(len(charset))] //nolint:gosec // no need for crypto/rand here 186 | } 187 | return fmt.Sprintf("op-%s", string(s)) 188 | } 189 | -------------------------------------------------------------------------------- /internal/templates/00-apply.yaml.tmpl: -------------------------------------------------------------------------------- 1 | # This file belongs to the resource apply step. 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: apply 6 | spec: 7 | timeouts: 8 | apply: {{ .TestCase.Timeout }} 9 | assert: {{ .TestCase.Timeout }} 10 | exec: {{ .TestCase.Timeout }} 11 | steps: 12 | {{- if .TestCase.SetupScriptPath }} 13 | - name: Run Setup Script 14 | description: Setup the test environment by running the setup script. 15 | try: 16 | - command: 17 | entrypoint: {{ .TestCase.SetupScriptPath }} 18 | {{- end }} 19 | - name: Apply Resources 20 | description: Apply resources to the cluster. 21 | try: 22 | - apply: 23 | file: {{ .TestCase.TestDirectory }} 24 | - script: 25 | content: | 26 | echo "Runnning annotation script" 27 | {{- range $i, $resource := .Resources }} 28 | {{- if eq $resource.KindGroup "secret." -}} 29 | {{continue}} 30 | {{- end -}} 31 | {{- if not $resource.Namespace }} 32 | ${KUBECTL} annotate {{ $resource.KindGroup }}/{{ $resource.Name }} upjet.upbound.io/test=true --overwrite 33 | {{- end }} 34 | {{- end }} 35 | - name: Assert Status Conditions 36 | description: | 37 | Assert applied resources. First, run the pre-assert script if exists. 38 | Then, check the status conditions. Finally run the post-assert script if it 39 | exists. 40 | try: 41 | {{- range $resource := .Resources }} 42 | {{- if eq $resource.KindGroup "secret." -}} 43 | {{continue}} 44 | {{- end -}} 45 | {{- if $resource.PreAssertScriptPath }} 46 | - command: 47 | entrypoint: {{ $resource.PreAssertScriptPath }} 48 | {{- end }} 49 | - assert: 50 | resource: 51 | apiVersion: {{ $resource.APIVersion }} 52 | kind: {{ $resource.Kind }} 53 | metadata: 54 | name: {{ $resource.Name }} 55 | {{- if $resource.Namespace }} 56 | namespace: {{ $resource.Namespace }} 57 | {{- end }} 58 | status: 59 | {{- range $condition := $resource.Conditions }} 60 | ((conditions[?type == '{{ $condition }}'])[0]): 61 | status: "True" 62 | {{- end }} 63 | {{- if $resource.PostAssertScriptPath }} 64 | - command: 65 | entrypoint: {{ $resource.PostAssertScriptPath }} 66 | {{- end }} 67 | {{- end }} 68 | -------------------------------------------------------------------------------- /internal/templates/00-apply.yaml.tmpl.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | 3 | SPDX-License-Identifier: CC0-1.0 -------------------------------------------------------------------------------- /internal/templates/01-update.yaml.tmpl: -------------------------------------------------------------------------------- 1 | # This file belongs to the resource update step. 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: update 6 | spec: 7 | timeouts: 8 | apply: {{ .TestCase.Timeout }} 9 | assert: {{ .TestCase.Timeout }} 10 | exec: {{ .TestCase.Timeout }} 11 | steps: 12 | - name: Update Root Resource 13 | description: | 14 | Update the root resource by using the specified update-parameter in annotation. 15 | Before updating the resources, the status conditions are cleaned. 16 | try: 17 | {{- range $resource := .Resources }} 18 | {{- if eq $resource.KindGroup "secret." -}} 19 | {{continue}} 20 | {{- end -}} 21 | {{- if not $resource.Namespace }} 22 | {{- if $resource.Root }} 23 | - script: 24 | content: | 25 | ${KUBECTL} --subresource=status patch {{ $resource.KindGroup }}/{{ $resource.Name }} --type=merge -p '{"status":{"conditions":[]}}' 26 | ${KUBECTL} patch {{ $resource.KindGroup }}/{{ $resource.Name }} --type=merge -p '{"spec":{"forProvider":{{ $resource.UpdateParameter }}}}' 27 | {{- end }} 28 | {{- end }} 29 | {{- end }} 30 | - name: Assert Updated Resource 31 | description: | 32 | Assert update operation. Firstly check the status conditions. Then assert 33 | the updated field in status.atProvider. 34 | {{- range $resource := .Resources }} 35 | {{- if eq $resource.KindGroup "secret." -}} 36 | {{continue}} 37 | {{- end -}} 38 | {{- if not $resource.Namespace }} 39 | {{- if $resource.Root }} 40 | try: 41 | - assert: 42 | resource: 43 | apiVersion: {{ $resource.APIVersion }} 44 | kind: {{ $resource.Kind }} 45 | metadata: 46 | name: {{ $resource.Name }} 47 | status: 48 | {{- range $condition := $resource.Conditions }} 49 | ((conditions[?type == '{{ $condition }}'])[0]): 50 | status: "True" 51 | {{- end }} 52 | - script: 53 | content: ${KUBECTL} get {{ $resource.KindGroup }}/{{ $resource.Name }} -o=jsonpath='{.status.atProvider{{ $resource.UpdateAssertKey }}}' | grep -q "^{{ $resource.UpdateAssertValue }}$" 54 | {{- end }} 55 | {{- end }} 56 | {{- end }} 57 | -------------------------------------------------------------------------------- /internal/templates/01-update.yaml.tmpl.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | 3 | SPDX-License-Identifier: CC0-1.0 -------------------------------------------------------------------------------- /internal/templates/02-import.yaml.tmpl: -------------------------------------------------------------------------------- 1 | # This file belongs to the resource import step. 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: import 6 | spec: 7 | timeouts: 8 | apply: {{ .TestCase.Timeout }} 9 | assert: {{ .TestCase.Timeout }} 10 | exec: {{ .TestCase.Timeout }} 11 | steps: 12 | - name: Remove State 13 | description: | 14 | Removes the resource statuses from MRs and controllers. For controllers 15 | the scale down&up was applied. For MRs status conditions are patched. 16 | Also, for the assertion step, the ID before import was stored in the 17 | uptest-old-id annotation. 18 | try: 19 | - script: 20 | content: | 21 | {{- range $resource := .Resources }} 22 | {{- if eq $resource.KindGroup "secret." -}} 23 | {{continue}} 24 | {{- end -}} 25 | {{- if not $resource.Namespace }} 26 | ${KUBECTL} annotate {{ $resource.KindGroup }}/{{ $resource.Name }} crossplane.io/paused=true --overwrite 27 | {{- end }} 28 | {{- end }} 29 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=0 --timeout 10s 30 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=0 31 | - sleep: 32 | duration: 10s 33 | - script: 34 | content: | 35 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=1 --timeout 10s 36 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=1 37 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/check_endpoints.sh -o /tmp/check_endpoints.sh && chmod +x /tmp/check_endpoints.sh 38 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/patch.sh -o /tmp/patch.sh && chmod +x /tmp/patch.sh 39 | /tmp/check_endpoints.sh 40 | {{- range $resource := .Resources }} 41 | {{- if eq $resource.KindGroup "secret." -}} 42 | {{continue}} 43 | {{- end -}} 44 | {{- if not $resource.Namespace }} 45 | /tmp/patch.sh {{ $resource.KindGroup }} {{ $resource.Name }} 46 | {{- end }} 47 | {{- end }} 48 | {{- range $resource := .Resources }} 49 | {{- if eq $resource.KindGroup "secret." -}} 50 | {{continue}} 51 | {{- end -}} 52 | {{- if not $resource.Namespace }} 53 | ${KUBECTL} annotate {{ $resource.KindGroup }}/{{ $resource.Name }} --all crossplane.io/paused=false --overwrite 54 | {{- end }} 55 | {{- end }} 56 | - name: Assert Status Conditions and IDs 57 | description: | 58 | Assert imported resources. Firstly check the status conditions. Then 59 | compare the stored ID and the new populated ID. For successful test, 60 | the ID must be the same. 61 | try: 62 | {{- range $resource := .Resources }} 63 | {{- if eq $resource.KindGroup "secret." -}} 64 | {{continue}} 65 | {{- end -}} 66 | {{- if not $resource.Namespace }} 67 | - assert: 68 | resource: 69 | apiVersion: {{ $resource.APIVersion }} 70 | kind: {{ $resource.Kind }} 71 | metadata: 72 | name: {{ $resource.Name }} 73 | status: 74 | {{- range $condition := $resource.Conditions }} 75 | ((conditions[?type == '{{ $condition }}'])[0]): 76 | status: "True" 77 | {{- end }} 78 | {{- end }} 79 | {{- if not (or $resource.Namespace $resource.SkipImport) }} 80 | - assert: 81 | timeout: 1m 82 | resource: 83 | apiVersion: {{ $resource.APIVersion }} 84 | kind: {{ $resource.Kind }} 85 | metadata: 86 | name: {{ $resource.Name }} 87 | ("status.atProvider.id" == "metadata.annotations.uptest-old-id"): true 88 | {{- end }} 89 | {{- end }} 90 | -------------------------------------------------------------------------------- /internal/templates/02-import.yaml.tmpl.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | 3 | SPDX-License-Identifier: CC0-1.0 -------------------------------------------------------------------------------- /internal/templates/03-delete.yaml.tmpl: -------------------------------------------------------------------------------- 1 | # This file belongs to the resource delete step. 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: delete 6 | spec: 7 | timeouts: 8 | exec: {{ .TestCase.Timeout }} 9 | steps: 10 | - name: Delete Resources 11 | description: Delete resources. If needs ordered deletion, the pre-delete scripts were used. 12 | try: 13 | - script: 14 | content: | 15 | {{- range $resource := .Resources }} 16 | {{- if eq $resource.KindGroup "secret." -}} 17 | {{continue}} 18 | {{- end -}} 19 | {{- if $resource.PreDeleteScriptPath }} 20 | {{ $resource.PreDeleteScriptPath }} 21 | {{- end }} 22 | {{- if $resource.Namespace }} 23 | ${KUBECTL} delete {{ $resource.KindGroup }}/{{ $resource.Name }} --wait=false --namespace {{ $resource.Namespace }} --ignore-not-found 24 | {{- else }} 25 | ${KUBECTL} delete {{ $resource.KindGroup }}/{{ $resource.Name }} --wait=false --ignore-not-found 26 | {{- end }} 27 | {{- if $resource.PostDeleteScriptPath }} 28 | {{ $resource.PostDeleteScriptPath }} 29 | {{- end }} 30 | {{- end }} 31 | - name: Assert Deletion 32 | description: Assert deletion of resources. 33 | try: 34 | {{- range $resource := .Resources }} 35 | {{- if eq $resource.KindGroup "secret." -}} 36 | {{continue}} 37 | {{- end }} 38 | - script: 39 | content: | 40 | ${KUBECTL} wait {{ if $resource.Namespace }}--namespace {{ $resource.Namespace }} {{ end }}--for=delete {{ $resource.KindGroup }}/{{ $resource.Name }} --timeout {{ $.TestCase.Timeout }} 41 | {{- end }} 42 | {{- if not .TestCase.OnlyCleanUptestResources }} 43 | - script: 44 | content: | 45 | ${KUBECTL} wait managed --all --for=delete --timeout -1s 46 | {{- end }} 47 | {{- if .TestCase.TeardownScriptPath }} 48 | - command: 49 | entrypoint: {{ .TestCase.TeardownScriptPath }} 50 | {{- end }} 51 | -------------------------------------------------------------------------------- /internal/templates/03-delete.yaml.tmpl.license: -------------------------------------------------------------------------------- 1 | SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | 3 | SPDX-License-Identifier: CC0-1.0 -------------------------------------------------------------------------------- /internal/templates/embed.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | package templates 6 | 7 | import _ "embed" 8 | 9 | // inputFileTemplate is the template for the input file. 10 | // 11 | //go:embed 00-apply.yaml.tmpl 12 | var inputFileTemplate string 13 | 14 | // updateFileTemplate is the template for the update file. 15 | // 16 | //go:embed 01-update.yaml.tmpl 17 | var updateFileTemplate string 18 | 19 | // deleteFileTemplate is the template for the import file. 20 | // 21 | //go:embed 02-import.yaml.tmpl 22 | var importFileTemplate string 23 | 24 | // deleteFileTemplate is the template for the delete file. 25 | // 26 | //go:embed 03-delete.yaml.tmpl 27 | var deleteFileTemplate string 28 | -------------------------------------------------------------------------------- /internal/templates/renderer.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | // Package templates contains utilities for rendering chainsaw test cases using 6 | // the templates contained in the package. 7 | package templates 8 | 9 | import ( 10 | "strings" 11 | "text/template" 12 | 13 | "github.com/crossplane/crossplane-runtime/pkg/errors" 14 | 15 | "github.com/crossplane/uptest/internal/config" 16 | ) 17 | 18 | var fileTemplates = map[string]string{ 19 | "00-apply.yaml": inputFileTemplate, 20 | "01-update.yaml": updateFileTemplate, 21 | "02-import.yaml": importFileTemplate, 22 | "03-delete.yaml": deleteFileTemplate, 23 | } 24 | 25 | // Render renders the specified list of resources as a test case 26 | // with the specified configuration. 27 | func Render(tc *config.TestCase, resources []config.Resource, skipDelete bool) (map[string]string, error) { 28 | data := struct { 29 | Resources []config.Resource 30 | TestCase config.TestCase 31 | }{ 32 | Resources: resources, 33 | TestCase: *tc, 34 | } 35 | 36 | res := make(map[string]string, len(fileTemplates)) 37 | for name, tmpl := range fileTemplates { 38 | // Skip templates with names starting with "01-" if skipUpdate is true 39 | if tc.SkipUpdate && strings.HasPrefix(name, "01-") { 40 | continue 41 | } 42 | // Skip templates with names starting with "02-" if skipImport is true 43 | if tc.SkipImport && strings.HasPrefix(name, "02-") { 44 | continue 45 | } 46 | // Skip templates with names starting with "03-" if skipDelete is true 47 | if skipDelete && strings.HasPrefix(name, "03-") { 48 | continue 49 | } 50 | 51 | t, err := template.New(name).Parse(tmpl) 52 | if err != nil { 53 | return nil, errors.Wrapf(err, "cannot parse template %q", name) 54 | } 55 | 56 | var b strings.Builder 57 | if err := t.Execute(&b, data); err != nil { 58 | return nil, errors.Wrapf(err, "cannot execute template %q", name) 59 | } 60 | res[name] = b.String() 61 | } 62 | 63 | return res, nil 64 | } 65 | -------------------------------------------------------------------------------- /internal/templates/renderer_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | package templates 6 | 7 | import ( 8 | "testing" 9 | "time" 10 | 11 | "github.com/crossplane/crossplane-runtime/pkg/test" 12 | "github.com/google/go-cmp/cmp" 13 | 14 | "github.com/crossplane/uptest/internal/config" 15 | ) 16 | 17 | const ( 18 | bucketManifest = `apiVersion: s3.aws.crossplane.io/v1beta1 19 | kind: Bucket 20 | metadata: 21 | name: test-bucket 22 | spec: 23 | deletionPolicy: Delete 24 | ` 25 | 26 | claimManifest = `apiVersion: gcp.platformref.upbound.io/v1alpha1 27 | kind: Cluster 28 | metadata: 29 | name: test-cluster-claim 30 | namespace: upbound-system 31 | spec: 32 | parameters: 33 | nodes: 34 | count: 1 35 | size: small 36 | ` 37 | 38 | secretManifest = `apiVersion: v1 39 | kind: Secret 40 | metadata: 41 | name: test-secret 42 | namespace: upbound-system 43 | type: Opaque 44 | data: 45 | key: dmFsdWU= 46 | ` 47 | ) 48 | 49 | func TestRender(t *testing.T) { 50 | type args struct { 51 | tc *config.TestCase 52 | resources []config.Resource 53 | } 54 | type want struct { 55 | out map[string]string 56 | err error 57 | } 58 | tests := map[string]struct { 59 | args args 60 | want want 61 | }{ 62 | "SuccessSingleResource": { 63 | args: args{ 64 | tc: &config.TestCase{ 65 | SetupScriptPath: "/tmp/setup.sh", 66 | Timeout: 10 * time.Minute, 67 | TestDirectory: "/tmp/test-input.yaml", 68 | }, 69 | resources: []config.Resource{ 70 | { 71 | Name: "example-bucket", 72 | APIVersion: "bucket.s3.aws.upbound.io/v1alpha1", 73 | Kind: "Bucket", 74 | KindGroup: "s3.aws.upbound.io", 75 | YAML: bucketManifest, 76 | Conditions: []string{"Test"}, 77 | }, 78 | }, 79 | }, 80 | want: want{ 81 | out: map[string]string{ 82 | "00-apply.yaml": `# This file belongs to the resource apply step. 83 | apiVersion: chainsaw.kyverno.io/v1alpha1 84 | kind: Test 85 | metadata: 86 | name: apply 87 | spec: 88 | timeouts: 89 | apply: 10m0s 90 | assert: 10m0s 91 | exec: 10m0s 92 | steps: 93 | - name: Run Setup Script 94 | description: Setup the test environment by running the setup script. 95 | try: 96 | - command: 97 | entrypoint: /tmp/setup.sh 98 | - name: Apply Resources 99 | description: Apply resources to the cluster. 100 | try: 101 | - apply: 102 | file: /tmp/test-input.yaml 103 | - script: 104 | content: | 105 | echo "Runnning annotation script" 106 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket upjet.upbound.io/test=true --overwrite 107 | - name: Assert Status Conditions 108 | description: | 109 | Assert applied resources. First, run the pre-assert script if exists. 110 | Then, check the status conditions. Finally run the post-assert script if it 111 | exists. 112 | try: 113 | - assert: 114 | resource: 115 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 116 | kind: Bucket 117 | metadata: 118 | name: example-bucket 119 | status: 120 | ((conditions[?type == 'Test'])[0]): 121 | status: "True" 122 | `, 123 | "01-update.yaml": `# This file belongs to the resource update step. 124 | apiVersion: chainsaw.kyverno.io/v1alpha1 125 | kind: Test 126 | metadata: 127 | name: update 128 | spec: 129 | timeouts: 130 | apply: 10m0s 131 | assert: 10m0s 132 | exec: 10m0s 133 | steps: 134 | - name: Update Root Resource 135 | description: | 136 | Update the root resource by using the specified update-parameter in annotation. 137 | Before updating the resources, the status conditions are cleaned. 138 | try: 139 | - name: Assert Updated Resource 140 | description: | 141 | Assert update operation. Firstly check the status conditions. Then assert 142 | the updated field in status.atProvider. 143 | `, 144 | "02-import.yaml": `# This file belongs to the resource import step. 145 | apiVersion: chainsaw.kyverno.io/v1alpha1 146 | kind: Test 147 | metadata: 148 | name: import 149 | spec: 150 | timeouts: 151 | apply: 10m0s 152 | assert: 10m0s 153 | exec: 10m0s 154 | steps: 155 | - name: Remove State 156 | description: | 157 | Removes the resource statuses from MRs and controllers. For controllers 158 | the scale down&up was applied. For MRs status conditions are patched. 159 | Also, for the assertion step, the ID before import was stored in the 160 | uptest-old-id annotation. 161 | try: 162 | - script: 163 | content: | 164 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket crossplane.io/paused=true --overwrite 165 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=0 --timeout 10s 166 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=0 167 | - sleep: 168 | duration: 10s 169 | - script: 170 | content: | 171 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=1 --timeout 10s 172 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=1 173 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/check_endpoints.sh -o /tmp/check_endpoints.sh && chmod +x /tmp/check_endpoints.sh 174 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/patch.sh -o /tmp/patch.sh && chmod +x /tmp/patch.sh 175 | /tmp/check_endpoints.sh 176 | /tmp/patch.sh s3.aws.upbound.io example-bucket 177 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket --all crossplane.io/paused=false --overwrite 178 | - name: Assert Status Conditions and IDs 179 | description: | 180 | Assert imported resources. Firstly check the status conditions. Then 181 | compare the stored ID and the new populated ID. For successful test, 182 | the ID must be the same. 183 | try: 184 | - assert: 185 | resource: 186 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 187 | kind: Bucket 188 | metadata: 189 | name: example-bucket 190 | status: 191 | ((conditions[?type == 'Test'])[0]): 192 | status: "True" 193 | - assert: 194 | timeout: 1m 195 | resource: 196 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 197 | kind: Bucket 198 | metadata: 199 | name: example-bucket 200 | ("status.atProvider.id" == "metadata.annotations.uptest-old-id"): true 201 | `, 202 | "03-delete.yaml": `# This file belongs to the resource delete step. 203 | apiVersion: chainsaw.kyverno.io/v1alpha1 204 | kind: Test 205 | metadata: 206 | name: delete 207 | spec: 208 | timeouts: 209 | exec: 10m0s 210 | steps: 211 | - name: Delete Resources 212 | description: Delete resources. If needs ordered deletion, the pre-delete scripts were used. 213 | try: 214 | - script: 215 | content: | 216 | ${KUBECTL} delete s3.aws.upbound.io/example-bucket --wait=false --ignore-not-found 217 | - name: Assert Deletion 218 | description: Assert deletion of resources. 219 | try: 220 | - script: 221 | content: | 222 | ${KUBECTL} wait --for=delete s3.aws.upbound.io/example-bucket --timeout 10m0s 223 | - script: 224 | content: | 225 | ${KUBECTL} wait managed --all --for=delete --timeout -1s 226 | `, 227 | }, 228 | }, 229 | }, 230 | "SuccessSingleResourceWithNoSetupScript": { 231 | args: args{ 232 | tc: &config.TestCase{ 233 | Timeout: 10 * time.Minute, 234 | TestDirectory: "/tmp/test-input.yaml", 235 | }, 236 | resources: []config.Resource{ 237 | { 238 | Name: "example-bucket", 239 | APIVersion: "bucket.s3.aws.upbound.io/v1alpha1", 240 | Kind: "Bucket", 241 | KindGroup: "s3.aws.upbound.io", 242 | YAML: bucketManifest, 243 | Conditions: []string{"Test"}, 244 | }, 245 | }, 246 | }, 247 | want: want{ 248 | out: map[string]string{ 249 | "00-apply.yaml": `# This file belongs to the resource apply step. 250 | apiVersion: chainsaw.kyverno.io/v1alpha1 251 | kind: Test 252 | metadata: 253 | name: apply 254 | spec: 255 | timeouts: 256 | apply: 10m0s 257 | assert: 10m0s 258 | exec: 10m0s 259 | steps: 260 | - name: Apply Resources 261 | description: Apply resources to the cluster. 262 | try: 263 | - apply: 264 | file: /tmp/test-input.yaml 265 | - script: 266 | content: | 267 | echo "Runnning annotation script" 268 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket upjet.upbound.io/test=true --overwrite 269 | - name: Assert Status Conditions 270 | description: | 271 | Assert applied resources. First, run the pre-assert script if exists. 272 | Then, check the status conditions. Finally run the post-assert script if it 273 | exists. 274 | try: 275 | - assert: 276 | resource: 277 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 278 | kind: Bucket 279 | metadata: 280 | name: example-bucket 281 | status: 282 | ((conditions[?type == 'Test'])[0]): 283 | status: "True" 284 | `, 285 | "01-update.yaml": `# This file belongs to the resource update step. 286 | apiVersion: chainsaw.kyverno.io/v1alpha1 287 | kind: Test 288 | metadata: 289 | name: update 290 | spec: 291 | timeouts: 292 | apply: 10m0s 293 | assert: 10m0s 294 | exec: 10m0s 295 | steps: 296 | - name: Update Root Resource 297 | description: | 298 | Update the root resource by using the specified update-parameter in annotation. 299 | Before updating the resources, the status conditions are cleaned. 300 | try: 301 | - name: Assert Updated Resource 302 | description: | 303 | Assert update operation. Firstly check the status conditions. Then assert 304 | the updated field in status.atProvider. 305 | `, 306 | "02-import.yaml": `# This file belongs to the resource import step. 307 | apiVersion: chainsaw.kyverno.io/v1alpha1 308 | kind: Test 309 | metadata: 310 | name: import 311 | spec: 312 | timeouts: 313 | apply: 10m0s 314 | assert: 10m0s 315 | exec: 10m0s 316 | steps: 317 | - name: Remove State 318 | description: | 319 | Removes the resource statuses from MRs and controllers. For controllers 320 | the scale down&up was applied. For MRs status conditions are patched. 321 | Also, for the assertion step, the ID before import was stored in the 322 | uptest-old-id annotation. 323 | try: 324 | - script: 325 | content: | 326 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket crossplane.io/paused=true --overwrite 327 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=0 --timeout 10s 328 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=0 329 | - sleep: 330 | duration: 10s 331 | - script: 332 | content: | 333 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=1 --timeout 10s 334 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=1 335 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/check_endpoints.sh -o /tmp/check_endpoints.sh && chmod +x /tmp/check_endpoints.sh 336 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/patch.sh -o /tmp/patch.sh && chmod +x /tmp/patch.sh 337 | /tmp/check_endpoints.sh 338 | /tmp/patch.sh s3.aws.upbound.io example-bucket 339 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket --all crossplane.io/paused=false --overwrite 340 | - name: Assert Status Conditions and IDs 341 | description: | 342 | Assert imported resources. Firstly check the status conditions. Then 343 | compare the stored ID and the new populated ID. For successful test, 344 | the ID must be the same. 345 | try: 346 | - assert: 347 | resource: 348 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 349 | kind: Bucket 350 | metadata: 351 | name: example-bucket 352 | status: 353 | ((conditions[?type == 'Test'])[0]): 354 | status: "True" 355 | - assert: 356 | timeout: 1m 357 | resource: 358 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 359 | kind: Bucket 360 | metadata: 361 | name: example-bucket 362 | ("status.atProvider.id" == "metadata.annotations.uptest-old-id"): true 363 | `, 364 | "03-delete.yaml": `# This file belongs to the resource delete step. 365 | apiVersion: chainsaw.kyverno.io/v1alpha1 366 | kind: Test 367 | metadata: 368 | name: delete 369 | spec: 370 | timeouts: 371 | exec: 10m0s 372 | steps: 373 | - name: Delete Resources 374 | description: Delete resources. If needs ordered deletion, the pre-delete scripts were used. 375 | try: 376 | - script: 377 | content: | 378 | ${KUBECTL} delete s3.aws.upbound.io/example-bucket --wait=false --ignore-not-found 379 | - name: Assert Deletion 380 | description: Assert deletion of resources. 381 | try: 382 | - script: 383 | content: | 384 | ${KUBECTL} wait --for=delete s3.aws.upbound.io/example-bucket --timeout 10m0s 385 | - script: 386 | content: | 387 | ${KUBECTL} wait managed --all --for=delete --timeout -1s 388 | `, 389 | }, 390 | }, 391 | }, 392 | "SuccessMultipleResource": { 393 | args: args{ 394 | tc: &config.TestCase{ 395 | Timeout: 10 * time.Minute, 396 | SetupScriptPath: "/tmp/setup.sh", 397 | TeardownScriptPath: "/tmp/teardown.sh", 398 | TestDirectory: "/tmp/test-input.yaml", 399 | }, 400 | resources: []config.Resource{ 401 | { 402 | YAML: bucketManifest, 403 | APIVersion: "bucket.s3.aws.upbound.io/v1alpha1", 404 | Kind: "Bucket", 405 | Name: "example-bucket", 406 | KindGroup: "s3.aws.upbound.io", 407 | PreAssertScriptPath: "/tmp/bucket/pre-assert.sh", 408 | PostDeleteScriptPath: "/tmp/bucket/post-delete.sh", 409 | Conditions: []string{"Test"}, 410 | }, 411 | { 412 | YAML: claimManifest, 413 | APIVersion: "cluster.gcp.platformref.upbound.io/v1alpha1", 414 | Kind: "Cluster", 415 | Name: "test-cluster-claim", 416 | KindGroup: "cluster.gcp.platformref.upbound.io", 417 | Namespace: "upbound-system", 418 | PostAssertScriptPath: "/tmp/claim/post-assert.sh", 419 | PreDeleteScriptPath: "/tmp/claim/pre-delete.sh", 420 | Conditions: []string{"Ready", "Synced"}, 421 | }, 422 | { 423 | YAML: secretManifest, 424 | Name: "test-secret", 425 | KindGroup: "secret.", 426 | Namespace: "upbound-system", 427 | }, 428 | }, 429 | }, 430 | want: want{ 431 | out: map[string]string{ 432 | "00-apply.yaml": `# This file belongs to the resource apply step. 433 | apiVersion: chainsaw.kyverno.io/v1alpha1 434 | kind: Test 435 | metadata: 436 | name: apply 437 | spec: 438 | timeouts: 439 | apply: 10m0s 440 | assert: 10m0s 441 | exec: 10m0s 442 | steps: 443 | - name: Run Setup Script 444 | description: Setup the test environment by running the setup script. 445 | try: 446 | - command: 447 | entrypoint: /tmp/setup.sh 448 | - name: Apply Resources 449 | description: Apply resources to the cluster. 450 | try: 451 | - apply: 452 | file: /tmp/test-input.yaml 453 | - script: 454 | content: | 455 | echo "Runnning annotation script" 456 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket upjet.upbound.io/test=true --overwrite 457 | - name: Assert Status Conditions 458 | description: | 459 | Assert applied resources. First, run the pre-assert script if exists. 460 | Then, check the status conditions. Finally run the post-assert script if it 461 | exists. 462 | try: 463 | - command: 464 | entrypoint: /tmp/bucket/pre-assert.sh 465 | - assert: 466 | resource: 467 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 468 | kind: Bucket 469 | metadata: 470 | name: example-bucket 471 | status: 472 | ((conditions[?type == 'Test'])[0]): 473 | status: "True" 474 | - assert: 475 | resource: 476 | apiVersion: cluster.gcp.platformref.upbound.io/v1alpha1 477 | kind: Cluster 478 | metadata: 479 | name: test-cluster-claim 480 | namespace: upbound-system 481 | status: 482 | ((conditions[?type == 'Ready'])[0]): 483 | status: "True" 484 | ((conditions[?type == 'Synced'])[0]): 485 | status: "True" 486 | - command: 487 | entrypoint: /tmp/claim/post-assert.sh 488 | `, 489 | "01-update.yaml": `# This file belongs to the resource update step. 490 | apiVersion: chainsaw.kyverno.io/v1alpha1 491 | kind: Test 492 | metadata: 493 | name: update 494 | spec: 495 | timeouts: 496 | apply: 10m0s 497 | assert: 10m0s 498 | exec: 10m0s 499 | steps: 500 | - name: Update Root Resource 501 | description: | 502 | Update the root resource by using the specified update-parameter in annotation. 503 | Before updating the resources, the status conditions are cleaned. 504 | try: 505 | - name: Assert Updated Resource 506 | description: | 507 | Assert update operation. Firstly check the status conditions. Then assert 508 | the updated field in status.atProvider. 509 | `, 510 | "02-import.yaml": `# This file belongs to the resource import step. 511 | apiVersion: chainsaw.kyverno.io/v1alpha1 512 | kind: Test 513 | metadata: 514 | name: import 515 | spec: 516 | timeouts: 517 | apply: 10m0s 518 | assert: 10m0s 519 | exec: 10m0s 520 | steps: 521 | - name: Remove State 522 | description: | 523 | Removes the resource statuses from MRs and controllers. For controllers 524 | the scale down&up was applied. For MRs status conditions are patched. 525 | Also, for the assertion step, the ID before import was stored in the 526 | uptest-old-id annotation. 527 | try: 528 | - script: 529 | content: | 530 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket crossplane.io/paused=true --overwrite 531 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=0 --timeout 10s 532 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=0 533 | - sleep: 534 | duration: 10s 535 | - script: 536 | content: | 537 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=1 --timeout 10s 538 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=1 539 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/check_endpoints.sh -o /tmp/check_endpoints.sh && chmod +x /tmp/check_endpoints.sh 540 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/patch.sh -o /tmp/patch.sh && chmod +x /tmp/patch.sh 541 | /tmp/check_endpoints.sh 542 | /tmp/patch.sh s3.aws.upbound.io example-bucket 543 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket --all crossplane.io/paused=false --overwrite 544 | - name: Assert Status Conditions and IDs 545 | description: | 546 | Assert imported resources. Firstly check the status conditions. Then 547 | compare the stored ID and the new populated ID. For successful test, 548 | the ID must be the same. 549 | try: 550 | - assert: 551 | resource: 552 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 553 | kind: Bucket 554 | metadata: 555 | name: example-bucket 556 | status: 557 | ((conditions[?type == 'Test'])[0]): 558 | status: "True" 559 | - assert: 560 | timeout: 1m 561 | resource: 562 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 563 | kind: Bucket 564 | metadata: 565 | name: example-bucket 566 | ("status.atProvider.id" == "metadata.annotations.uptest-old-id"): true 567 | `, 568 | "03-delete.yaml": `# This file belongs to the resource delete step. 569 | apiVersion: chainsaw.kyverno.io/v1alpha1 570 | kind: Test 571 | metadata: 572 | name: delete 573 | spec: 574 | timeouts: 575 | exec: 10m0s 576 | steps: 577 | - name: Delete Resources 578 | description: Delete resources. If needs ordered deletion, the pre-delete scripts were used. 579 | try: 580 | - script: 581 | content: | 582 | ${KUBECTL} delete s3.aws.upbound.io/example-bucket --wait=false --ignore-not-found 583 | /tmp/bucket/post-delete.sh 584 | /tmp/claim/pre-delete.sh 585 | ${KUBECTL} delete cluster.gcp.platformref.upbound.io/test-cluster-claim --wait=false --namespace upbound-system --ignore-not-found 586 | - name: Assert Deletion 587 | description: Assert deletion of resources. 588 | try: 589 | - script: 590 | content: | 591 | ${KUBECTL} wait --for=delete s3.aws.upbound.io/example-bucket --timeout 10m0s 592 | - script: 593 | content: | 594 | ${KUBECTL} wait --namespace upbound-system --for=delete cluster.gcp.platformref.upbound.io/test-cluster-claim --timeout 10m0s 595 | - script: 596 | content: | 597 | ${KUBECTL} wait managed --all --for=delete --timeout -1s 598 | - command: 599 | entrypoint: /tmp/teardown.sh 600 | `, 601 | }, 602 | }, 603 | }, 604 | } 605 | for name, tc := range tests { 606 | t.Run(name, func(t *testing.T) { 607 | got, err := Render(tc.args.tc, tc.args.resources, false) 608 | if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { 609 | t.Errorf("Render(...): -want error, +got error:\n%s", diff) 610 | } 611 | if diff := cmp.Diff(tc.want.out, got); diff != "" { 612 | t.Errorf("Render(...): -want, +got:\n%s", diff) 613 | } 614 | }) 615 | } 616 | } 617 | 618 | func TestRenderWithSkipDelete(t *testing.T) { 619 | type args struct { 620 | tc *config.TestCase 621 | resources []config.Resource 622 | } 623 | type want struct { 624 | out map[string]string 625 | err error 626 | } 627 | tests := map[string]struct { 628 | args args 629 | want want 630 | }{ 631 | "SuccessSingleResource": { 632 | args: args{ 633 | tc: &config.TestCase{ 634 | SetupScriptPath: "/tmp/setup.sh", 635 | Timeout: 10 * time.Minute, 636 | TestDirectory: "/tmp/test-input.yaml", 637 | }, 638 | resources: []config.Resource{ 639 | { 640 | Name: "example-bucket", 641 | APIVersion: "bucket.s3.aws.upbound.io/v1alpha1", 642 | Kind: "Bucket", 643 | KindGroup: "s3.aws.upbound.io", 644 | YAML: bucketManifest, 645 | Conditions: []string{"Test"}, 646 | }, 647 | }, 648 | }, 649 | want: want{ 650 | out: map[string]string{ 651 | "00-apply.yaml": `# This file belongs to the resource apply step. 652 | apiVersion: chainsaw.kyverno.io/v1alpha1 653 | kind: Test 654 | metadata: 655 | name: apply 656 | spec: 657 | timeouts: 658 | apply: 10m0s 659 | assert: 10m0s 660 | exec: 10m0s 661 | steps: 662 | - name: Run Setup Script 663 | description: Setup the test environment by running the setup script. 664 | try: 665 | - command: 666 | entrypoint: /tmp/setup.sh 667 | - name: Apply Resources 668 | description: Apply resources to the cluster. 669 | try: 670 | - apply: 671 | file: /tmp/test-input.yaml 672 | - script: 673 | content: | 674 | echo "Runnning annotation script" 675 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket upjet.upbound.io/test=true --overwrite 676 | - name: Assert Status Conditions 677 | description: | 678 | Assert applied resources. First, run the pre-assert script if exists. 679 | Then, check the status conditions. Finally run the post-assert script if it 680 | exists. 681 | try: 682 | - assert: 683 | resource: 684 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 685 | kind: Bucket 686 | metadata: 687 | name: example-bucket 688 | status: 689 | ((conditions[?type == 'Test'])[0]): 690 | status: "True" 691 | `, 692 | "01-update.yaml": `# This file belongs to the resource update step. 693 | apiVersion: chainsaw.kyverno.io/v1alpha1 694 | kind: Test 695 | metadata: 696 | name: update 697 | spec: 698 | timeouts: 699 | apply: 10m0s 700 | assert: 10m0s 701 | exec: 10m0s 702 | steps: 703 | - name: Update Root Resource 704 | description: | 705 | Update the root resource by using the specified update-parameter in annotation. 706 | Before updating the resources, the status conditions are cleaned. 707 | try: 708 | - name: Assert Updated Resource 709 | description: | 710 | Assert update operation. Firstly check the status conditions. Then assert 711 | the updated field in status.atProvider. 712 | `, 713 | "02-import.yaml": `# This file belongs to the resource import step. 714 | apiVersion: chainsaw.kyverno.io/v1alpha1 715 | kind: Test 716 | metadata: 717 | name: import 718 | spec: 719 | timeouts: 720 | apply: 10m0s 721 | assert: 10m0s 722 | exec: 10m0s 723 | steps: 724 | - name: Remove State 725 | description: | 726 | Removes the resource statuses from MRs and controllers. For controllers 727 | the scale down&up was applied. For MRs status conditions are patched. 728 | Also, for the assertion step, the ID before import was stored in the 729 | uptest-old-id annotation. 730 | try: 731 | - script: 732 | content: | 733 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket crossplane.io/paused=true --overwrite 734 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=0 --timeout 10s 735 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=0 736 | - sleep: 737 | duration: 10s 738 | - script: 739 | content: | 740 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=1 --timeout 10s 741 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=1 742 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/check_endpoints.sh -o /tmp/check_endpoints.sh && chmod +x /tmp/check_endpoints.sh 743 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/patch.sh -o /tmp/patch.sh && chmod +x /tmp/patch.sh 744 | /tmp/check_endpoints.sh 745 | /tmp/patch.sh s3.aws.upbound.io example-bucket 746 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket --all crossplane.io/paused=false --overwrite 747 | - name: Assert Status Conditions and IDs 748 | description: | 749 | Assert imported resources. Firstly check the status conditions. Then 750 | compare the stored ID and the new populated ID. For successful test, 751 | the ID must be the same. 752 | try: 753 | - assert: 754 | resource: 755 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 756 | kind: Bucket 757 | metadata: 758 | name: example-bucket 759 | status: 760 | ((conditions[?type == 'Test'])[0]): 761 | status: "True" 762 | - assert: 763 | timeout: 1m 764 | resource: 765 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 766 | kind: Bucket 767 | metadata: 768 | name: example-bucket 769 | ("status.atProvider.id" == "metadata.annotations.uptest-old-id"): true 770 | `, 771 | }, 772 | }, 773 | }, 774 | "SkipImport": { 775 | args: args{ 776 | tc: &config.TestCase{ 777 | Timeout: 10 * time.Minute, 778 | SetupScriptPath: "/tmp/setup.sh", 779 | TeardownScriptPath: "/tmp/teardown.sh", 780 | TestDirectory: "/tmp/test-input.yaml", 781 | }, 782 | resources: []config.Resource{ 783 | { 784 | YAML: bucketManifest, 785 | APIVersion: "bucket.s3.aws.upbound.io/v1alpha1", 786 | Kind: "Bucket", 787 | Name: "example-bucket", 788 | KindGroup: "s3.aws.upbound.io", 789 | PreAssertScriptPath: "/tmp/bucket/pre-assert.sh", 790 | PostDeleteScriptPath: "/tmp/bucket/post-delete.sh", 791 | SkipImport: true, 792 | Conditions: []string{"Test"}, 793 | }, 794 | { 795 | YAML: claimManifest, 796 | Name: "test-cluster-claim", 797 | APIVersion: "cluster.gcp.platformref.upbound.io/v1alpha1", 798 | Kind: "Cluster", 799 | KindGroup: "cluster.gcp.platformref.upbound.io", 800 | Namespace: "upbound-system", 801 | PostAssertScriptPath: "/tmp/claim/post-assert.sh", 802 | PreDeleteScriptPath: "/tmp/claim/pre-delete.sh", 803 | Conditions: []string{"Ready", "Synced"}, 804 | }, 805 | { 806 | YAML: secretManifest, 807 | Name: "test-secret", 808 | KindGroup: "secret.", 809 | Namespace: "upbound-system", 810 | }, 811 | }, 812 | }, 813 | want: want{ 814 | out: map[string]string{ 815 | "00-apply.yaml": `# This file belongs to the resource apply step. 816 | apiVersion: chainsaw.kyverno.io/v1alpha1 817 | kind: Test 818 | metadata: 819 | name: apply 820 | spec: 821 | timeouts: 822 | apply: 10m0s 823 | assert: 10m0s 824 | exec: 10m0s 825 | steps: 826 | - name: Run Setup Script 827 | description: Setup the test environment by running the setup script. 828 | try: 829 | - command: 830 | entrypoint: /tmp/setup.sh 831 | - name: Apply Resources 832 | description: Apply resources to the cluster. 833 | try: 834 | - apply: 835 | file: /tmp/test-input.yaml 836 | - script: 837 | content: | 838 | echo "Runnning annotation script" 839 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket upjet.upbound.io/test=true --overwrite 840 | - name: Assert Status Conditions 841 | description: | 842 | Assert applied resources. First, run the pre-assert script if exists. 843 | Then, check the status conditions. Finally run the post-assert script if it 844 | exists. 845 | try: 846 | - command: 847 | entrypoint: /tmp/bucket/pre-assert.sh 848 | - assert: 849 | resource: 850 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 851 | kind: Bucket 852 | metadata: 853 | name: example-bucket 854 | status: 855 | ((conditions[?type == 'Test'])[0]): 856 | status: "True" 857 | - assert: 858 | resource: 859 | apiVersion: cluster.gcp.platformref.upbound.io/v1alpha1 860 | kind: Cluster 861 | metadata: 862 | name: test-cluster-claim 863 | namespace: upbound-system 864 | status: 865 | ((conditions[?type == 'Ready'])[0]): 866 | status: "True" 867 | ((conditions[?type == 'Synced'])[0]): 868 | status: "True" 869 | - command: 870 | entrypoint: /tmp/claim/post-assert.sh 871 | `, 872 | "01-update.yaml": `# This file belongs to the resource update step. 873 | apiVersion: chainsaw.kyverno.io/v1alpha1 874 | kind: Test 875 | metadata: 876 | name: update 877 | spec: 878 | timeouts: 879 | apply: 10m0s 880 | assert: 10m0s 881 | exec: 10m0s 882 | steps: 883 | - name: Update Root Resource 884 | description: | 885 | Update the root resource by using the specified update-parameter in annotation. 886 | Before updating the resources, the status conditions are cleaned. 887 | try: 888 | - name: Assert Updated Resource 889 | description: | 890 | Assert update operation. Firstly check the status conditions. Then assert 891 | the updated field in status.atProvider. 892 | `, 893 | "02-import.yaml": `# This file belongs to the resource import step. 894 | apiVersion: chainsaw.kyverno.io/v1alpha1 895 | kind: Test 896 | metadata: 897 | name: import 898 | spec: 899 | timeouts: 900 | apply: 10m0s 901 | assert: 10m0s 902 | exec: 10m0s 903 | steps: 904 | - name: Remove State 905 | description: | 906 | Removes the resource statuses from MRs and controllers. For controllers 907 | the scale down&up was applied. For MRs status conditions are patched. 908 | Also, for the assertion step, the ID before import was stored in the 909 | uptest-old-id annotation. 910 | try: 911 | - script: 912 | content: | 913 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket crossplane.io/paused=true --overwrite 914 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=0 --timeout 10s 915 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=0 916 | - sleep: 917 | duration: 10s 918 | - script: 919 | content: | 920 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=1 --timeout 10s 921 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=1 922 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/check_endpoints.sh -o /tmp/check_endpoints.sh && chmod +x /tmp/check_endpoints.sh 923 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/patch.sh -o /tmp/patch.sh && chmod +x /tmp/patch.sh 924 | /tmp/check_endpoints.sh 925 | /tmp/patch.sh s3.aws.upbound.io example-bucket 926 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket --all crossplane.io/paused=false --overwrite 927 | - name: Assert Status Conditions and IDs 928 | description: | 929 | Assert imported resources. Firstly check the status conditions. Then 930 | compare the stored ID and the new populated ID. For successful test, 931 | the ID must be the same. 932 | try: 933 | - assert: 934 | resource: 935 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 936 | kind: Bucket 937 | metadata: 938 | name: example-bucket 939 | status: 940 | ((conditions[?type == 'Test'])[0]): 941 | status: "True" 942 | `, 943 | }, 944 | }, 945 | }, 946 | "SuccessMultipleResource": { 947 | args: args{ 948 | tc: &config.TestCase{ 949 | Timeout: 10 * time.Minute, 950 | SetupScriptPath: "/tmp/setup.sh", 951 | TeardownScriptPath: "/tmp/teardown.sh", 952 | TestDirectory: "/tmp/test-input.yaml", 953 | }, 954 | resources: []config.Resource{ 955 | { 956 | YAML: bucketManifest, 957 | APIVersion: "bucket.s3.aws.upbound.io/v1alpha1", 958 | Kind: "Bucket", 959 | Name: "example-bucket", 960 | KindGroup: "s3.aws.upbound.io", 961 | PreAssertScriptPath: "/tmp/bucket/pre-assert.sh", 962 | PostDeleteScriptPath: "/tmp/bucket/post-delete.sh", 963 | Conditions: []string{"Test"}, 964 | }, 965 | { 966 | YAML: claimManifest, 967 | APIVersion: "cluster.gcp.platformref.upbound.io/v1alpha1", 968 | Kind: "Cluster", 969 | Name: "test-cluster-claim", 970 | KindGroup: "cluster.gcp.platformref.upbound.io", 971 | Namespace: "upbound-system", 972 | PostAssertScriptPath: "/tmp/claim/post-assert.sh", 973 | PreDeleteScriptPath: "/tmp/claim/pre-delete.sh", 974 | Conditions: []string{"Ready", "Synced"}, 975 | }, 976 | { 977 | YAML: secretManifest, 978 | Name: "test-secret", 979 | KindGroup: "secret.", 980 | Namespace: "upbound-system", 981 | }, 982 | }, 983 | }, 984 | want: want{ 985 | out: map[string]string{ 986 | "00-apply.yaml": `# This file belongs to the resource apply step. 987 | apiVersion: chainsaw.kyverno.io/v1alpha1 988 | kind: Test 989 | metadata: 990 | name: apply 991 | spec: 992 | timeouts: 993 | apply: 10m0s 994 | assert: 10m0s 995 | exec: 10m0s 996 | steps: 997 | - name: Run Setup Script 998 | description: Setup the test environment by running the setup script. 999 | try: 1000 | - command: 1001 | entrypoint: /tmp/setup.sh 1002 | - name: Apply Resources 1003 | description: Apply resources to the cluster. 1004 | try: 1005 | - apply: 1006 | file: /tmp/test-input.yaml 1007 | - script: 1008 | content: | 1009 | echo "Runnning annotation script" 1010 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket upjet.upbound.io/test=true --overwrite 1011 | - name: Assert Status Conditions 1012 | description: | 1013 | Assert applied resources. First, run the pre-assert script if exists. 1014 | Then, check the status conditions. Finally run the post-assert script if it 1015 | exists. 1016 | try: 1017 | - command: 1018 | entrypoint: /tmp/bucket/pre-assert.sh 1019 | - assert: 1020 | resource: 1021 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 1022 | kind: Bucket 1023 | metadata: 1024 | name: example-bucket 1025 | status: 1026 | ((conditions[?type == 'Test'])[0]): 1027 | status: "True" 1028 | - assert: 1029 | resource: 1030 | apiVersion: cluster.gcp.platformref.upbound.io/v1alpha1 1031 | kind: Cluster 1032 | metadata: 1033 | name: test-cluster-claim 1034 | namespace: upbound-system 1035 | status: 1036 | ((conditions[?type == 'Ready'])[0]): 1037 | status: "True" 1038 | ((conditions[?type == 'Synced'])[0]): 1039 | status: "True" 1040 | - command: 1041 | entrypoint: /tmp/claim/post-assert.sh 1042 | `, 1043 | "01-update.yaml": `# This file belongs to the resource update step. 1044 | apiVersion: chainsaw.kyverno.io/v1alpha1 1045 | kind: Test 1046 | metadata: 1047 | name: update 1048 | spec: 1049 | timeouts: 1050 | apply: 10m0s 1051 | assert: 10m0s 1052 | exec: 10m0s 1053 | steps: 1054 | - name: Update Root Resource 1055 | description: | 1056 | Update the root resource by using the specified update-parameter in annotation. 1057 | Before updating the resources, the status conditions are cleaned. 1058 | try: 1059 | - name: Assert Updated Resource 1060 | description: | 1061 | Assert update operation. Firstly check the status conditions. Then assert 1062 | the updated field in status.atProvider. 1063 | `, 1064 | "02-import.yaml": `# This file belongs to the resource import step. 1065 | apiVersion: chainsaw.kyverno.io/v1alpha1 1066 | kind: Test 1067 | metadata: 1068 | name: import 1069 | spec: 1070 | timeouts: 1071 | apply: 10m0s 1072 | assert: 10m0s 1073 | exec: 10m0s 1074 | steps: 1075 | - name: Remove State 1076 | description: | 1077 | Removes the resource statuses from MRs and controllers. For controllers 1078 | the scale down&up was applied. For MRs status conditions are patched. 1079 | Also, for the assertion step, the ID before import was stored in the 1080 | uptest-old-id annotation. 1081 | try: 1082 | - script: 1083 | content: | 1084 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket crossplane.io/paused=true --overwrite 1085 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=0 --timeout 10s 1086 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=0 1087 | - sleep: 1088 | duration: 10s 1089 | - script: 1090 | content: | 1091 | ${KUBECTL} scale deployment crossplane -n ${CROSSPLANE_NAMESPACE} --replicas=1 --timeout 10s 1092 | ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} get deploy --no-headers -o custom-columns=":metadata.name" | grep "provider-" | xargs ${KUBECTL} -n ${CROSSPLANE_NAMESPACE} scale deploy --replicas=1 1093 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/check_endpoints.sh -o /tmp/check_endpoints.sh && chmod +x /tmp/check_endpoints.sh 1094 | curl -sL https://raw.githubusercontent.com/crossplane/uptest/main/hack/patch.sh -o /tmp/patch.sh && chmod +x /tmp/patch.sh 1095 | /tmp/check_endpoints.sh 1096 | /tmp/patch.sh s3.aws.upbound.io example-bucket 1097 | ${KUBECTL} annotate s3.aws.upbound.io/example-bucket --all crossplane.io/paused=false --overwrite 1098 | - name: Assert Status Conditions and IDs 1099 | description: | 1100 | Assert imported resources. Firstly check the status conditions. Then 1101 | compare the stored ID and the new populated ID. For successful test, 1102 | the ID must be the same. 1103 | try: 1104 | - assert: 1105 | resource: 1106 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 1107 | kind: Bucket 1108 | metadata: 1109 | name: example-bucket 1110 | status: 1111 | ((conditions[?type == 'Test'])[0]): 1112 | status: "True" 1113 | - assert: 1114 | timeout: 1m 1115 | resource: 1116 | apiVersion: bucket.s3.aws.upbound.io/v1alpha1 1117 | kind: Bucket 1118 | metadata: 1119 | name: example-bucket 1120 | ("status.atProvider.id" == "metadata.annotations.uptest-old-id"): true 1121 | `, 1122 | }, 1123 | }, 1124 | }, 1125 | "SuccessClaim": { 1126 | args: args{ 1127 | tc: &config.TestCase{ 1128 | Timeout: 10 * time.Minute, 1129 | SetupScriptPath: "/tmp/setup.sh", 1130 | TeardownScriptPath: "/tmp/teardown.sh", 1131 | TestDirectory: "/tmp/test-input.yaml", 1132 | SkipUpdate: true, 1133 | SkipImport: true, 1134 | }, 1135 | resources: []config.Resource{ 1136 | { 1137 | YAML: claimManifest, 1138 | APIVersion: "cluster.gcp.platformref.upbound.io/v1alpha1", 1139 | Kind: "Cluster", 1140 | Name: "test-cluster-claim", 1141 | KindGroup: "cluster.gcp.platformref.upbound.io", 1142 | Namespace: "upbound-system", 1143 | PostAssertScriptPath: "/tmp/claim/post-assert.sh", 1144 | PreDeleteScriptPath: "/tmp/claim/pre-delete.sh", 1145 | Conditions: []string{"Ready", "Synced"}, 1146 | }, 1147 | }, 1148 | }, 1149 | want: want{ 1150 | out: map[string]string{ 1151 | "00-apply.yaml": `# This file belongs to the resource apply step. 1152 | apiVersion: chainsaw.kyverno.io/v1alpha1 1153 | kind: Test 1154 | metadata: 1155 | name: apply 1156 | spec: 1157 | timeouts: 1158 | apply: 10m0s 1159 | assert: 10m0s 1160 | exec: 10m0s 1161 | steps: 1162 | - name: Run Setup Script 1163 | description: Setup the test environment by running the setup script. 1164 | try: 1165 | - command: 1166 | entrypoint: /tmp/setup.sh 1167 | - name: Apply Resources 1168 | description: Apply resources to the cluster. 1169 | try: 1170 | - apply: 1171 | file: /tmp/test-input.yaml 1172 | - script: 1173 | content: | 1174 | echo "Runnning annotation script" 1175 | - name: Assert Status Conditions 1176 | description: | 1177 | Assert applied resources. First, run the pre-assert script if exists. 1178 | Then, check the status conditions. Finally run the post-assert script if it 1179 | exists. 1180 | try: 1181 | - assert: 1182 | resource: 1183 | apiVersion: cluster.gcp.platformref.upbound.io/v1alpha1 1184 | kind: Cluster 1185 | metadata: 1186 | name: test-cluster-claim 1187 | namespace: upbound-system 1188 | status: 1189 | ((conditions[?type == 'Ready'])[0]): 1190 | status: "True" 1191 | ((conditions[?type == 'Synced'])[0]): 1192 | status: "True" 1193 | - command: 1194 | entrypoint: /tmp/claim/post-assert.sh 1195 | `, 1196 | }, 1197 | }, 1198 | }, 1199 | "SuccessClaimAndXR": { 1200 | args: args{ 1201 | tc: &config.TestCase{ 1202 | Timeout: 10 * time.Minute, 1203 | SetupScriptPath: "/tmp/setup.sh", 1204 | TeardownScriptPath: "/tmp/teardown.sh", 1205 | TestDirectory: "/tmp/test-input.yaml", 1206 | SkipUpdate: true, 1207 | SkipImport: true, 1208 | }, 1209 | resources: []config.Resource{ 1210 | { 1211 | YAML: claimManifest, 1212 | APIVersion: "cluster.gcp.platformref.upbound.io/v1alpha1", 1213 | Kind: "Cluster", 1214 | Name: "test-cluster-claim", 1215 | KindGroup: "cluster.gcp.platformref.upbound.io", 1216 | Namespace: "upbound-system", 1217 | PostAssertScriptPath: "/tmp/claim/post-assert.sh", 1218 | PreDeleteScriptPath: "/tmp/claim/pre-delete.sh", 1219 | Conditions: []string{"Ready", "Synced"}, 1220 | }, 1221 | { 1222 | YAML: claimManifest, 1223 | APIVersion: "xnetwork.gcp.platformref.upbound.io/v1alpha1", 1224 | Kind: "XNetwork", 1225 | Name: "test-network-xr", 1226 | KindGroup: "xnetwork.gcp.platformref.upbound.io", 1227 | PostAssertScriptPath: "/tmp/claim/post-assert.sh", 1228 | PreDeleteScriptPath: "/tmp/claim/pre-delete.sh", 1229 | Conditions: []string{"Ready", "Synced"}, 1230 | }, 1231 | }, 1232 | }, 1233 | want: want{ 1234 | out: map[string]string{ 1235 | "00-apply.yaml": `# This file belongs to the resource apply step. 1236 | apiVersion: chainsaw.kyverno.io/v1alpha1 1237 | kind: Test 1238 | metadata: 1239 | name: apply 1240 | spec: 1241 | timeouts: 1242 | apply: 10m0s 1243 | assert: 10m0s 1244 | exec: 10m0s 1245 | steps: 1246 | - name: Run Setup Script 1247 | description: Setup the test environment by running the setup script. 1248 | try: 1249 | - command: 1250 | entrypoint: /tmp/setup.sh 1251 | - name: Apply Resources 1252 | description: Apply resources to the cluster. 1253 | try: 1254 | - apply: 1255 | file: /tmp/test-input.yaml 1256 | - script: 1257 | content: | 1258 | echo "Runnning annotation script" 1259 | ${KUBECTL} annotate xnetwork.gcp.platformref.upbound.io/test-network-xr upjet.upbound.io/test=true --overwrite 1260 | - name: Assert Status Conditions 1261 | description: | 1262 | Assert applied resources. First, run the pre-assert script if exists. 1263 | Then, check the status conditions. Finally run the post-assert script if it 1264 | exists. 1265 | try: 1266 | - assert: 1267 | resource: 1268 | apiVersion: cluster.gcp.platformref.upbound.io/v1alpha1 1269 | kind: Cluster 1270 | metadata: 1271 | name: test-cluster-claim 1272 | namespace: upbound-system 1273 | status: 1274 | ((conditions[?type == 'Ready'])[0]): 1275 | status: "True" 1276 | ((conditions[?type == 'Synced'])[0]): 1277 | status: "True" 1278 | - command: 1279 | entrypoint: /tmp/claim/post-assert.sh 1280 | - assert: 1281 | resource: 1282 | apiVersion: xnetwork.gcp.platformref.upbound.io/v1alpha1 1283 | kind: XNetwork 1284 | metadata: 1285 | name: test-network-xr 1286 | status: 1287 | ((conditions[?type == 'Ready'])[0]): 1288 | status: "True" 1289 | ((conditions[?type == 'Synced'])[0]): 1290 | status: "True" 1291 | - command: 1292 | entrypoint: /tmp/claim/post-assert.sh 1293 | `, 1294 | }, 1295 | }, 1296 | }, 1297 | } 1298 | for name, tc := range tests { 1299 | t.Run(name, func(t *testing.T) { 1300 | got, err := Render(tc.args.tc, tc.args.resources, true) 1301 | if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { 1302 | t.Errorf("Render(...): -want error, +got error:\n%s", diff) 1303 | } 1304 | if diff := cmp.Diff(tc.want.out, got); diff != "" { 1305 | t.Errorf("Render(...): -want, +got:\n%s", diff) 1306 | } 1307 | }) 1308 | } 1309 | } 1310 | -------------------------------------------------------------------------------- /internal/tester.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | package internal 6 | 7 | import ( 8 | "bufio" 9 | "context" 10 | "encoding/json" 11 | "fmt" 12 | "io/fs" 13 | "log" 14 | "os" 15 | "os/exec" 16 | "path/filepath" 17 | "strconv" 18 | "strings" 19 | "sync" 20 | "time" 21 | 22 | "github.com/crossplane/crossplane-runtime/pkg/errors" 23 | 24 | "github.com/crossplane/uptest/internal/config" 25 | "github.com/crossplane/uptest/internal/templates" 26 | ) 27 | 28 | var testFiles = []string{ 29 | "00-apply.yaml", 30 | "01-update.yaml", 31 | "02-import.yaml", 32 | "03-delete.yaml", 33 | } 34 | 35 | // NewTester returns a Tester object. 36 | func NewTester(ms []config.Manifest, opts *config.AutomatedTest) *Tester { 37 | return &Tester{ 38 | options: opts, 39 | manifests: ms, 40 | } 41 | } 42 | 43 | // Tester is responsible preparing and storing the test data&configurations, 44 | // and executing the tests. 45 | type Tester struct { 46 | options *config.AutomatedTest 47 | manifests []config.Manifest 48 | } 49 | 50 | // ExecuteTests execute tests via chainsaw. 51 | func (t *Tester) ExecuteTests() error { 52 | if err := writeTestFile(t.manifests, t.options.Directory); err != nil { 53 | return errors.Wrap(err, "cannot write test manifest files") 54 | } 55 | 56 | resources, timeout, err := t.writeChainsawFiles() 57 | if err != nil { 58 | return errors.Wrap(err, "cannot write chainsaw test files") 59 | } 60 | 61 | log.Printf("Written test files: %s\n", t.options.Directory) 62 | 63 | if t.options.RenderOnly { 64 | return nil 65 | } 66 | 67 | log.Println("Running chainsaw tests at " + t.options.Directory) 68 | startTime := time.Now() 69 | for _, tf := range testFiles { 70 | if !checkFileExists(filepath.Join(t.options.Directory, caseDirectory, tf)) { 71 | log.Println("Skipping test " + tf) 72 | continue 73 | } 74 | if err := executeSingleTestFile(t, tf, timeout-time.Since(startTime), resources); err != nil { 75 | return errors.Wrap(err, "cannot execute test "+tf) 76 | } 77 | } 78 | return nil 79 | } 80 | 81 | func executeSingleTestFile(t *Tester, tf string, timeout time.Duration, resources []config.Resource) error { 82 | chainsawCommand := fmt.Sprintf(`"${CHAINSAW}" test --test-dir %s --test-file %s --skip-delete --parallel 1 2>&1`, 83 | filepath.Clean(filepath.Join(t.options.Directory, caseDirectory)), 84 | filepath.Clean(tf)) 85 | 86 | ctx, cancel := context.WithTimeout(context.Background(), timeout) 87 | defer cancel() 88 | 89 | cmd := exec.CommandContext(ctx, "bash", "-c", chainsawCommand) // #nosec G204 90 | stdout, _ := cmd.StdoutPipe() 91 | if err := cmd.Start(); err != nil { 92 | return errors.Wrapf(err, "cannot start chainsaw: %s", chainsawCommand) 93 | } 94 | 95 | // Start ticker for kubectl command every 30 seconds 96 | ticker := time.NewTicker(t.options.LogCollectionInterval) 97 | done := make(chan bool) 98 | defer func() { 99 | ticker.Stop() 100 | close(done) 101 | }() 102 | 103 | var mutex sync.Mutex 104 | go logCollector(done, ticker, &mutex, resources) 105 | 106 | sc := bufio.NewScanner(stdout) 107 | for sc.Scan() { 108 | mutex.Lock() 109 | log.Println(sc.Text()) 110 | mutex.Unlock() 111 | } 112 | if sc.Err() != nil { 113 | return errors.Wrap(sc.Err(), "cannot scan output") 114 | } 115 | if err := cmd.Wait(); err != nil { 116 | return errors.Wrapf(err, "cannot wait for chainsaw: %s", chainsawCommand) 117 | } 118 | 119 | return nil 120 | } 121 | 122 | func logCollector(done chan bool, ticker *time.Ticker, mutex sync.Locker, resources []config.Resource) { 123 | for { 124 | select { 125 | case <-done: 126 | return 127 | case <-ticker.C: 128 | mutex.Lock() 129 | for _, r := range resources { 130 | // During the setup script is running, the crossplane command 131 | // is failing because of the resource not found error. 132 | // We do not want to show this error to the user because it 133 | // is a noise and temporary one. 134 | // The error output was redirected to a file. 135 | traceCmd := exec.Command("bash", "-c", fmt.Sprintf(`"${CROSSPLANE_CLI}" beta trace %s %s -o wide 2>>/tmp/uptest_crossplane_temp_errors.log`, r.KindGroup, r.Name)) //nolint:gosec // Disabling gosec to allow dynamic shell command execution 136 | output, err := traceCmd.CombinedOutput() 137 | if err == nil { 138 | log.Printf("crossplane trace logs %s\n%s\n", time.Now(), string(output)) 139 | } 140 | } 141 | mutex.Unlock() 142 | } 143 | } 144 | } 145 | 146 | func (t *Tester) prepareConfig() (*config.TestCase, []config.Resource, error) { //nolint:gocyclo // TODO: can we break this? 147 | tc := &config.TestCase{ 148 | Timeout: t.options.DefaultTimeout, 149 | SetupScriptPath: t.options.SetupScriptPath, 150 | TeardownScriptPath: t.options.TeardownScriptPath, 151 | OnlyCleanUptestResources: t.options.OnlyCleanUptestResources, 152 | TestDirectory: "test-input.yaml", 153 | } 154 | examples := make([]config.Resource, 0, len(t.manifests)) 155 | 156 | rootFound := false 157 | for _, m := range t.manifests { 158 | obj := m.Object 159 | groupVersionKind := obj.GroupVersionKind() 160 | apiVersion, kind := groupVersionKind.ToAPIVersionAndKind() 161 | kg := strings.ToLower(groupVersionKind.Kind + "." + groupVersionKind.Group) 162 | 163 | example := config.Resource{ 164 | Name: obj.GetName(), 165 | Namespace: obj.GetNamespace(), 166 | KindGroup: kg, 167 | YAML: m.YAML, 168 | Timeout: t.options.DefaultTimeout, 169 | Conditions: t.options.DefaultConditions, 170 | APIVersion: apiVersion, 171 | Kind: kind, 172 | } 173 | 174 | var err error 175 | annotations := obj.GetAnnotations() 176 | if v, ok := annotations[config.AnnotationKeyTimeout]; ok { 177 | d, err := strconv.Atoi(v) 178 | if err != nil { 179 | return nil, nil, errors.Wrap(err, "timeout value is not valid") 180 | } 181 | example.Timeout = time.Duration(d) * time.Second 182 | if example.Timeout > tc.Timeout { 183 | tc.Timeout = example.Timeout 184 | } 185 | } 186 | 187 | if v, ok := annotations[config.AnnotationKeyConditions]; ok { 188 | example.Conditions = strings.Split(v, ",") 189 | } 190 | 191 | if v, ok := annotations[config.AnnotationKeyPreAssertHook]; ok { 192 | example.PreAssertScriptPath, err = filepath.Abs(filepath.Join(filepath.Dir(m.FilePath), filepath.Clean(v))) 193 | if err != nil { 194 | return nil, nil, errors.Wrap(err, "cannot find absolute path for pre assert hook") 195 | } 196 | } 197 | 198 | if v, ok := annotations[config.AnnotationKeyPostAssertHook]; ok { 199 | example.PostAssertScriptPath, err = filepath.Abs(filepath.Join(filepath.Dir(m.FilePath), filepath.Clean(v))) 200 | if err != nil { 201 | return nil, nil, errors.Wrap(err, "cannot find absolute path for post assert hook") 202 | } 203 | } 204 | 205 | if v, ok := annotations[config.AnnotationKeyPreDeleteHook]; ok { 206 | example.PreDeleteScriptPath, err = filepath.Abs(filepath.Join(filepath.Dir(m.FilePath), filepath.Clean(v))) 207 | if err != nil { 208 | return nil, nil, errors.Wrap(err, "cannot find absolute path for pre delete hook") 209 | } 210 | } 211 | 212 | if v, ok := annotations[config.AnnotationKeyPostDeleteHook]; ok { 213 | example.PostDeleteScriptPath, err = filepath.Abs(filepath.Join(filepath.Dir(m.FilePath), filepath.Clean(v))) 214 | if err != nil { 215 | return nil, nil, errors.Wrap(err, "cannot find absolute path for post delete hook") 216 | } 217 | } 218 | 219 | updateParameter, ok := annotations[config.AnnotationKeyUpdateParameter] 220 | if !ok { 221 | updateParameter = os.Getenv("UPTEST_UPDATE_PARAMETER") 222 | } 223 | if updateParameter != "" { 224 | example.UpdateParameter = updateParameter 225 | var data map[string]interface{} 226 | if err := json.Unmarshal([]byte(updateParameter), &data); err != nil { 227 | return nil, nil, errors.Wrapf(err, "cannot unmarshal JSON object: %s", updateParameter) 228 | } 229 | example.UpdateAssertKey, example.UpdateAssertValue = convertToJSONPath(data, "") 230 | } 231 | disableImport, ok := annotations[config.AnnotationKeyDisableImport] 232 | if ok && disableImport == "true" { 233 | example.SkipImport = true 234 | } 235 | 236 | if exampleID, ok := annotations[config.AnnotationKeyExampleID]; ok { 237 | if exampleID == strings.ToLower(fmt.Sprintf("%s/%s/%s", strings.Split(groupVersionKind.Group, ".")[0], groupVersionKind.Version, groupVersionKind.Kind)) { 238 | if disableImport == "true" { 239 | log.Println("Skipping import step because the root resource has disable import annotation") 240 | tc.SkipImport = true 241 | } 242 | if updateParameter == "" || obj.GetNamespace() != "" { 243 | log.Println("Skipping update step because the root resource does not have the update parameter") 244 | tc.SkipUpdate = true 245 | } 246 | example.Root = true 247 | rootFound = true 248 | } 249 | } 250 | 251 | examples = append(examples, example) 252 | } 253 | 254 | if !rootFound { 255 | log.Println("Skipping update step because the root resource does not exist") 256 | tc.SkipUpdate = true 257 | } 258 | if t.options.SkipUpdate { 259 | log.Println("Skipping update step because the skip-delete option is set to true") 260 | tc.SkipUpdate = true 261 | } 262 | if t.options.SkipImport { 263 | log.Println("Skipping import step because the skip-import option is set to true") 264 | tc.SkipImport = true 265 | } 266 | 267 | return tc, examples, nil 268 | } 269 | 270 | func (t *Tester) writeChainsawFiles() ([]config.Resource, time.Duration, error) { 271 | tc, examples, err := t.prepareConfig() 272 | if err != nil { 273 | return nil, 0, errors.Wrap(err, "cannot build examples config") 274 | } 275 | 276 | files, err := templates.Render(tc, examples, t.options.SkipDelete) 277 | if err != nil { 278 | return nil, 0, errors.Wrap(err, "cannot render chainsaw templates") 279 | } 280 | 281 | for k, v := range files { 282 | if err := os.WriteFile(filepath.Join(filepath.Join(t.options.Directory, caseDirectory), k), []byte(v), fs.ModePerm); err != nil { 283 | return nil, 0, errors.Wrapf(err, "cannot write file %q", k) 284 | } 285 | } 286 | 287 | return examples, tc.Timeout, nil 288 | } 289 | 290 | func writeTestFile(manifests []config.Manifest, directory string) error { 291 | file, err := os.Create(filepath.Clean(filepath.Join(directory, caseDirectory, "test-input.yaml"))) 292 | if err != nil { 293 | return err 294 | } 295 | defer file.Close() //nolint:errcheck // Ignoring error on file close as any failures do not impact the functionality and are logged at a higher level. 296 | 297 | writer := bufio.NewWriter(file) 298 | for _, manifest := range manifests { 299 | if _, err := writer.WriteString("---\n"); err != nil { 300 | return errors.Wrap(err, "cannot write the manifest delimiter") 301 | } 302 | if _, err = writer.WriteString(manifest.YAML + "\n"); err != nil { 303 | return errors.Wrap(err, "cannot write the manifest content") 304 | } 305 | } 306 | return writer.Flush() 307 | } 308 | 309 | func convertToJSONPath(data map[string]interface{}, currentPath string) (string, string) { 310 | for key, value := range data { 311 | newPath := currentPath + "." + key 312 | switch v := value.(type) { 313 | case map[string]interface{}: 314 | return convertToJSONPath(v, newPath) 315 | default: 316 | return newPath, fmt.Sprintf("%v", v) 317 | } 318 | } 319 | return currentPath, "" 320 | } 321 | 322 | func checkFileExists(filePath string) bool { 323 | _, err := os.Stat(filePath) 324 | return !errors.Is(err, os.ErrNotExist) 325 | } 326 | -------------------------------------------------------------------------------- /internal/version/version.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2024 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | // Package version contains the version of provider-aws repo 6 | package version 7 | 8 | // Version will be overridden with the current version at build time using the -X linker flag 9 | var Version = "0.0.0" 10 | -------------------------------------------------------------------------------- /pkg/runner.go: -------------------------------------------------------------------------------- 1 | // SPDX-FileCopyrightText: 2025 The Crossplane Authors 2 | // 3 | // SPDX-License-Identifier: CC0-1.0 4 | 5 | // Package pkg contains configuration options for configuring uptest runtime. 6 | package pkg 7 | 8 | import ( 9 | "log" 10 | "os" 11 | 12 | "github.com/crossplane/crossplane-runtime/pkg/errors" 13 | 14 | "github.com/crossplane/uptest/internal" 15 | "github.com/crossplane/uptest/internal/config" 16 | ) 17 | 18 | // RunTest runs the specified automated test 19 | func RunTest(o *config.AutomatedTest) error { 20 | if !o.RenderOnly { 21 | defer func() { 22 | if err := os.RemoveAll(o.Directory); err != nil { 23 | log.Printf("Cannot clean the test directory: %s\n", err.Error()) 24 | } 25 | }() 26 | } 27 | 28 | // Read examples and inject data source values to manifests 29 | manifests, err := internal.NewPreparer(o.ManifestPaths, internal.WithDataSource(o.DataSourcePath), internal.WithTestDirectory(o.Directory)).PrepareManifests() 30 | if err != nil { 31 | return errors.Wrap(err, "cannot prepare manifests") 32 | } 33 | 34 | // Prepare assert environment and run tests 35 | if err := internal.NewTester(manifests, o).ExecuteTests(); err != nil { 36 | return errors.Wrap(err, "cannot execute tests") 37 | } 38 | 39 | return nil 40 | } 41 | 42 | // NewAutomatedTestBuilder returns a Builder for AutomatedTest object 43 | func NewAutomatedTestBuilder() *config.Builder { 44 | return config.NewBuilder() 45 | } 46 | --------------------------------------------------------------------------------