├── .github ├── dependabot.yml └── workflows │ ├── ci-test.yml │ ├── codeql-analysis.yml │ ├── dependency-review.yml │ └── release.yml ├── .gitignore ├── .goreleaser.yml ├── .krew.yaml ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── assets ├── deployment-replicaset.png ├── pod.png ├── service.png └── statefulset.png ├── cmd ├── main.go └── main_test.go ├── deploy └── krew │ └── plugin.yaml ├── go.mod ├── go.sum ├── pkg ├── input │ └── input.go └── plugin │ ├── plugin.go │ ├── render_engine.go │ ├── render_engine_test.go │ ├── renderable.go │ ├── template_functions_dynamic.go │ ├── template_functions_static.go │ ├── template_functions_static_test.go │ ├── templates │ ├── CronJob.tmpl │ ├── DaemonSet.tmpl │ ├── Deployment.tmpl │ ├── Event.tmpl │ ├── HorizontalPodAutoscaler.tmpl │ ├── Ingress.tmpl │ ├── Job.tmpl │ ├── Lease.tmpl │ ├── NamespaceConfig.tmpl │ ├── Node.tmpl │ ├── PersistentVolume.tmpl │ ├── PersistentVolumeClaim.tmpl │ ├── Pod.tmpl │ ├── ReplicaSet.tmpl │ ├── ResourceQuota.tmpl │ ├── Service.tmpl │ ├── StatefulSet.tmpl │ └── common.tmpl │ └── templates_common_test.go ├── staticcheck.conf └── tests ├── artifacts ├── README.md ├── cr-dbconn-mymysql-deleted.out ├── cr-dbconn-mymysql-deleted.yaml ├── cr-dbconn-mymysql.out ├── cr-dbconn-mymysql.yaml ├── crd-dbconn.out ├── crd-dbconn.yaml ├── cronjob-regular-active.out ├── cronjob-regular-active.yaml ├── cronjob-regular-new.out ├── cronjob-regular-new.yaml ├── cronjob-regular-scheduled-and-active.out ├── cronjob-regular-scheduled-and-active.yaml ├── cronjob-regular-scheduled.out ├── cronjob-regular-scheduled.yaml ├── deployment-healthy.out ├── deployment-healthy.yaml ├── deployment-initial-progressing.out ├── deployment-initial-progressing.yaml ├── deployment-new.out ├── deployment-new.yaml ├── deployment-non-existing-image.out ├── deployment-non-existing-image.yaml ├── deployment-ongoing-rollout.out ├── deployment-ongoing-rollout.yaml ├── deployment-progressing.out ├── deployment-progressing.yaml ├── deployment-unavailable-replicas.out ├── deployment-unavailable-replicas.yaml ├── ingress-regular.out ├── ingress-regular.yaml ├── ingress-with-problems.out ├── ingress-with-problems.yaml ├── job-active.out ├── job-active.yaml ├── job-complete.out ├── job-complete.yaml ├── job-new.out ├── job-new.yaml ├── multiple-2-pods-docs.out ├── multiple-2-pods-docs.yaml ├── multiple-2-pods-list.out ├── multiple-2-pods-list.yaml ├── node-aks.out ├── node-aks.yaml ├── node-and-service.out ├── node-and-service.yaml ├── node-minikube-with-metrics.out ├── node-minikube-with-metrics.yaml ├── node-minikube.out ├── node-minikube.yaml ├── pod-deleted-due-to-missing-container.out ├── pod-deleted-due-to-missing-container.yaml ├── pod-job-completed.out ├── pod-job-completed.yaml ├── pod-marked-for-deletion-completed.out ├── pod-marked-for-deletion-completed.yaml ├── pod-marked-for-deletion.out ├── pod-marked-for-deletion.yaml ├── pod-missing-pvc.out ├── pod-missing-pvc.yaml ├── pod-non-existing-image.out ├── pod-non-existing-image.yaml ├── pod-pending-scheduled.out ├── pod-pending-scheduled.yaml ├── pod-pending-waiting-container.out ├── pod-pending-waiting-container.yaml ├── pod-pending.out ├── pod-pending.yaml ├── pod-standalone-ineractive.out ├── pod-standalone-ineractive.yaml ├── pod-standalone.out ├── pod-standalone.yaml ├── pod-with-metrics-and-events.out ├── pod-with-metrics-and-events.yaml ├── rs-all-replicas-ready.out ├── rs-all-replicas-ready.yaml ├── rs-no-ready-replicas.out ├── rs-no-ready-replicas.yaml ├── rs-non-existing-image.out ├── rs-non-existing-image.yaml ├── rs-not-all-replicas-ready.out ├── rs-not-all-replicas-ready.yaml ├── rs-ongoing-rollout.out ├── rs-ongoing-rollout.yaml ├── rs-replicas-0.out ├── rs-replicas-0.yaml ├── rs-superseeded.out ├── rs-superseeded.yaml ├── service-clusterip-missing-endpoint.out ├── service-clusterip-missing-endpoint.yaml ├── service-clusterip-multiport-with-endpoints.out ├── service-clusterip-multiport-with-endpoints.yaml ├── service-clusterip-with-endpoint.out ├── service-clusterip-with-endpoint.yaml ├── service-clusterip-with-no-endpoint.out ├── service-clusterip-with-no-endpoint.yaml ├── service-with-not-ready-addresses.out ├── service-with-not-ready-addresses.yaml ├── sts-inital-rollout-done.out ├── sts-inital-rollout-done.yaml ├── sts-new.out ├── sts-new.yaml ├── sts-ongoing-update-rollout-with-diff.out ├── sts-ongoing-update-rollout-with-diff.yaml ├── sts-ongoing-update-rollout.out ├── sts-ongoing-update-rollout.yaml ├── sts-stuck-initial-rollout.out ├── sts-stuck-initial-rollout.yaml ├── sts-suspended.out └── sts-suspended.yaml └── e2e-artifacts ├── sts-with-ingress.pod.out └── sts-with-ingress.yaml /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gomod 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | open-pull-requests-limit: 10 8 | reviewers: 9 | - bergerx 10 | assignees: 11 | - bergerx 12 | -------------------------------------------------------------------------------- /.github/workflows/ci-test.yml: -------------------------------------------------------------------------------- 1 | name: ci-test 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@master 15 | with: 16 | fetch-depth: 0 17 | - name: Setup Go 18 | uses: actions/setup-go@v5 19 | with: 20 | go-version-file: 'go.mod' 21 | - run: make test 22 | - name: Start minikube 23 | uses: medyagh/setup-minikube@latest 24 | - run: ASSUME_MINIKUBE_IS_CONFIGURED=true make test-e2e -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ master ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ master ] 20 | schedule: 21 | - cron: '27 1 * * 2' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'go' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /.github/workflows/dependency-review.yml: -------------------------------------------------------------------------------- 1 | # Dependency Review Action 2 | # 3 | # This Action will scan dependency manifest files that change as part of a Pull Reqest, surfacing known-vulnerable versions of the packages declared or updated in the PR. Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable packages will be blocked from merging. 4 | # 5 | # Source repository: https://github.com/actions/dependency-review-action 6 | # Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement 7 | name: 'Dependency Review' 8 | on: [pull_request] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | dependency-review: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: 'Checkout Repository' 18 | uses: actions/checkout@v3 19 | - name: 'Dependency Review' 20 | uses: actions/dependency-review-action@v1 21 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: goreleaser 2 | 3 | on: 4 | push: 5 | branches-ignore: 6 | - '**' 7 | tags: 8 | - 'v*.*.*' 9 | 10 | jobs: 11 | goreleaser: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@master 16 | with: 17 | fetch-depth: 0 18 | - name: Setup Go 19 | uses: actions/setup-go@v5 20 | with: 21 | go-version-file: 'go.mod' 22 | - name: GoReleaser 23 | uses: goreleaser/goreleaser-action@v5 24 | with: 25 | version: latest 26 | args: release --clean 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | - name: Update new version in krew-index 30 | uses: rajatjindal/krew-release-bot@v0.0.46 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | cover.out 13 | cover.html 14 | 15 | bin/ 16 | dist/ 17 | 18 | .DS_Store 19 | .idea/ 20 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | project_name: status 2 | version: 2 3 | release: 4 | github: 5 | owner: bergerx 6 | name: kubectl-status 7 | before: 8 | hooks: 9 | - go generate ./... 10 | - go mod tidy 11 | builds: 12 | - id: status 13 | goos: 14 | - linux 15 | - windows 16 | - darwin 17 | goarch: 18 | - amd64 19 | - arm64 20 | env: 21 | - CGO_ENABLED=0 22 | - GO111MODULE=on 23 | main: cmd/main.go 24 | ldflags: -s -w 25 | -X main.version={{ .Summary }} 26 | archives: 27 | - id: status 28 | builds: 29 | - status 30 | name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}" 31 | format_overrides: 32 | - goos: windows 33 | format: zip 34 | -------------------------------------------------------------------------------- /.krew.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: krew.googlecontainertools.github.com/v1alpha2 2 | kind: Plugin 3 | metadata: 4 | name: status 5 | spec: 6 | version: {{ .TagName }} 7 | platforms: 8 | - selector: 9 | matchLabels: 10 | os: linux 11 | arch: amd64 12 | {{addURIAndSha "https://github.com/bergerx/kubectl-status/releases/download/{{ .TagName }}/status_linux_amd64.tar.gz" .TagName }} 13 | files: 14 | - from: "./status" 15 | to: "." 16 | - from: LICENSE 17 | to: "." 18 | bin: "status" 19 | - selector: 20 | matchLabels: 21 | os: linux 22 | arch: arm64 23 | {{addURIAndSha "https://github.com/bergerx/kubectl-status/releases/download/{{ .TagName }}/status_linux_arm64.tar.gz" .TagName }} 24 | files: 25 | - from: "./status" 26 | to: "." 27 | - from: LICENSE 28 | to: "." 29 | bin: "status" 30 | - selector: 31 | matchLabels: 32 | os: darwin 33 | arch: amd64 34 | {{addURIAndSha "https://github.com/bergerx/kubectl-status/releases/download/{{ .TagName }}/status_darwin_amd64.tar.gz" .TagName }} 35 | files: 36 | - from: "./status" 37 | to: "." 38 | - from: LICENSE 39 | to: "." 40 | bin: "status" 41 | - selector: 42 | matchLabels: 43 | os: darwin 44 | arch: arm64 45 | {{addURIAndSha "https://github.com/bergerx/kubectl-status/releases/download/{{ .TagName }}/status_darwin_arm64.tar.gz" .TagName }} 46 | files: 47 | - from: "./status" 48 | to: "." 49 | - from: LICENSE 50 | to: "." 51 | bin: "status" 52 | - selector: 53 | matchLabels: 54 | os: windows 55 | arch: amd64 56 | {{addURIAndSha "https://github.com/bergerx/kubectl-status/releases/download/{{ .TagName }}/status_windows_amd64.zip" .TagName }} 57 | files: 58 | - from: "/status.exe" 59 | to: "." 60 | - from: LICENSE 61 | to: "." 62 | bin: "status.exe" 63 | shortDescription: Show status details of a given resource. 64 | homepage: https://github.com/bergerx/kubectl-status 65 | caveats: | 66 | Usage: 67 | $ kubectl status 68 | 69 | For additional options: 70 | $ kubectl status --help 71 | or https://github.com/bergerx/kubectl-status/blob/master/doc/USAGE.md 72 | 73 | description: | 74 | Show status details of a given resource. Most useful when debugging Pod issues. 75 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | 5 | - repo: meta 6 | hooks: 7 | - id: check-useless-excludes 8 | 9 | - repo: https://github.com/pre-commit/pre-commit-hooks 10 | rev: v3.2.0 11 | hooks: 12 | - id: trailing-whitespace 13 | - id: mixed-line-ending 14 | - id: check-yaml 15 | args: [--allow-multiple-documents] 16 | exclude: ^.krew.yaml$ 17 | - id: check-added-large-files 18 | 19 | - repo: local 20 | hooks: 21 | 22 | # Usual pre-commit install only installs the pre-commit hook, trying to get 23 | # them installed through this hook. 24 | - id: install-pre-push-hook 25 | name: install-pre-push-hook 26 | entry: pre-commit install -t pre-push 27 | language: system 28 | always_run: true 29 | pass_filenames: false 30 | 31 | - id: make-test 32 | name: make test 33 | entry: make test 34 | language: system 35 | always_run: true 36 | pass_filenames: false 37 | 38 | - id: make-test-e2e 39 | name: make test-e2e 40 | stages: [push] 41 | entry: make test-e2e 42 | language: system 43 | always_run: true 44 | pass_filenames: false 45 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct - kubectl-status 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make 6 | participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, 7 | disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, 8 | socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. 9 | 10 | ## Our Standards 11 | 12 | Examples of behavior that contributes to a positive environment for our community include: 13 | 14 | * Demonstrating empathy and kindness toward other people 15 | * Being respectful of differing opinions, viewpoints, and experiences 16 | * Giving and gracefully accepting constructive feedback 17 | * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience 18 | * Focusing on what is best not just for us as individuals, but for the overall community 19 | 20 | Examples of unacceptable behavior include: 21 | 22 | * The use of sexualized language or imagery, and sexual attention or advances 23 | * Trolling, insulting or derogatory comments, and personal or political attacks 24 | * Public or private harassment 25 | * Publishing others' private information, such as a physical or email address, without their explicit permission 26 | * Other conduct which could reasonably be considered inappropriate in a professional setting 27 | 28 | ## Our Responsibilities 29 | 30 | Project maintainers are responsible for clarifying and enforcing our standards of acceptable behavior and will take 31 | appropriate and fair corrective action in response to any instances of unacceptable behavior. 32 | 33 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, 34 | issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any 35 | contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 36 | 37 | ## Scope 38 | 39 | This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing 40 | the community in public spaces. Examples of representing our community include using an official e-mail address, posting 41 | via an official social media account, or acting as an appointed representative at an online or offline event. 42 | 43 | ## Enforcement 44 | 45 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible 46 | for enforcement at <>. All complaints will be reviewed and investigated promptly and fairly. 47 | 48 | All community leaders are obligated to respect the privacy and security of the reporter of any incident. 49 | 50 | ## Attribution 51 | 52 | This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org/), version 53 | [1.4](https://www.contributor-covenant.org/version/1/4/code-of-conduct/code_of_conduct.md) and 54 | [2.0](https://www.contributor-covenant.org/version/2/0/code_of_conduct/code_of_conduct.md), and was generated 55 | by [contributing-gen](https://github.com/bttger/contributing-gen). -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | export GO111MODULE=on 3 | 4 | .DEFAULT_GOAL := bin 5 | 6 | .PHONY: test 7 | test: vet staticcheck 8 | go test ./... 9 | 10 | .PHONY: test-e2e 11 | test-e2e: vet staticcheck 12 | # using count to prevent caching 13 | RUN_E2E_TESTS=true go test -v ./... -count=1 -run 'TestE2E*' 14 | 15 | .PHONY: bin 16 | bin: fmt vet staticcheck 17 | mkdir -p bin 18 | goreleaser build --single-target --skip=validate --clean 19 | ln -Ffs ../dist/status_$$(go env GOOS)_$$(go env GOARCH)_v1/status bin/ 20 | 21 | .PHONY: fmt 22 | fmt: 23 | go fmt ./... 24 | 25 | .PHONY: vet 26 | vet: 27 | go vet ./... 28 | 29 | .PHONY: staticcheck 30 | staticcheck: 31 | go run honnef.co/go/tools/cmd/staticcheck@v0.5.1 ./... 32 | 33 | .PHONY: clean 34 | clean: 35 | @rm -fv bin/status 36 | @rm -fvr dist 37 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kubectl status 2 | 3 | A `kubectl` plugin to print a human-friendly output that focuses on the status fields of the resources in kubernetes. 4 | 5 | Just a different representation of the kubernetes resources (next to `get` and `describe`). 6 | 7 | This plugin uses templates for well-known API conventions and has support for hardcoded resources. Not all resources are 8 | fully supported. 9 | 10 | - [Installation](#installation) 11 | * [Upgrade](#upgrade) 12 | - [Demo](#demo) 13 | - [Features](#features) 14 | - [Usage](#usage) 15 | - [Development](#development) 16 | * [Guidelines](./CONTRIBUTING.md#general-guidelines) 17 | - [License](#license) 18 | 19 | ## Installation 20 | 21 | You can install `kubectl status` using the [Krew](https://github.com/kubernetes-sigs/krew), the package manager for 22 | kubectl plugins. 23 | 24 | After you [install Krew](https://krew.sigs.k8s.io/docs/user-guide/setup/install/), just run: 25 | 26 | ```bash 27 | kubectl krew install status 28 | kubectl status --help 29 | ``` 30 | 31 | ### Upgrade 32 | 33 | Assuming you installed using [Krew](https://github.com/kubernetes-sigs/krew): 34 | 35 | ```bash 36 | kubectl krew upgrade status 37 | ``` 38 | 39 | ## Demo 40 | 41 | Example Pod: 42 | ![pod](assets/pod.png) 43 | 44 | Example StatefulSet: 45 | ![statefulset](assets/statefulset.png) 46 | 47 | Example Deployment and ReplicaSet 48 | ![deployment-replicaset](assets/deployment-replicaset.png) 49 | 50 | Example Service: 51 | ![service](assets/service.png) 52 | 53 | ## Features 54 | 55 | * aims for ease of understanding the status of a given resource, 56 | * aligned with other kubectl cli subcommand usages (just like `kubectl get` or `kubectl describe`), 57 | * uses colors extensively for a better look and feel experience, while a white-ish output means everything is ok, 58 | red-ish output strongly indicates something wrong, 59 | * erroneous/impacting states are explicit and obvious, 60 | * explicit messages for not-so-easy-to-understand status (e.g., ongoing rollout), 61 | * goes the extra mile for better expressing the status (e.g., show spec diff for ongoing rollouts), 62 | * compact, non-extensive output to keep it sharp, 63 | * no external dependencies, doesn't shell out, and so doesn't depend on client/workstation configuration 64 | 65 | ## Usage 66 | 67 | In most cases, replacing a `kubectl get ...` with a `kubectl status ...` would be sufficient. 68 | 69 | Examples: 70 | 71 | ```bash 72 | kubectl status pods # Show status of all pods in the current namespace 73 | kubectl status pods --all-namespaces # Show status of all pods in all namespaces 74 | kubectl status deploy,sts # Show status of all Deployments and StatefulSets in the current namespace 75 | kubectl status nodes # Show status of all nodes 76 | kubectl status pod my-pod1 my-pod2 # Show status of some pods 77 | kubectl status pod/my-pod1 pod/my-pod2 # Same with previous 78 | kubectl status svc/my-svc1 pod/my-pod2 # Show status of various resources 79 | kubectl status deployment my-dep # Show status of a particular deployment 80 | kubectl status deployments.v1.apps # Show deployments in the "v1" version of the "apps" API group. 81 | kubectl status node -l node-role.kubernetes.io/master # Show status of nodes marked as master 82 | ``` 83 | 84 | ## Development 85 | 86 | Please see [CONTRIBUTING.md](./CONTRIBUTING.md) file for development related documents. 87 | 88 | ## License 89 | 90 | Apache 2.0. See [LICENSE](./LICENSE). 91 | -------------------------------------------------------------------------------- /assets/deployment-replicaset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bergerx/kubectl-status/45b5e07e7c3dfc5d8150fe3518240cb2684cceee/assets/deployment-replicaset.png -------------------------------------------------------------------------------- /assets/pod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bergerx/kubectl-status/45b5e07e7c3dfc5d8150fe3518240cb2684cceee/assets/pod.png -------------------------------------------------------------------------------- /assets/service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bergerx/kubectl-status/45b5e07e7c3dfc5d8150fe3518240cb2684cceee/assets/service.png -------------------------------------------------------------------------------- /assets/statefulset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bergerx/kubectl-status/45b5e07e7c3dfc5d8150fe3518240cb2684cceee/assets/statefulset.png -------------------------------------------------------------------------------- /deploy/krew/plugin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: krew.googlecontainertools.github.com/v1alpha2 2 | kind: Plugin 3 | metadata: 4 | name: status 5 | spec: 6 | version: "v0.2.1" 7 | platforms: 8 | - selector: 9 | matchLabels: 10 | os: linux 11 | arch: amd64 12 | uri: https://github.com/bergerx/kubectl-status/releases/download/v0.2.1/status_linux_amd64.tar.gz 13 | sha256: "a94c1d064261e0fccf40b72ce4791d3f2e48d8affd06763b918547756663e2cd" 14 | files: 15 | - from: "./status" 16 | to: "." 17 | - from: LICENSE 18 | to: "." 19 | bin: "status" 20 | - selector: 21 | matchLabels: 22 | os: darwin 23 | arch: amd64 24 | uri: https://github.com/bergerx/kubectl-status/releases/download/v0.2.1/status_darwin_amd64.tar.gz 25 | sha256: "7eada7ad8626ec3ccae536e6bd48342d3e21e7d61f52fa80b4cee60223c8b3ad" 26 | files: 27 | - from: "./status" 28 | to: "." 29 | - from: LICENSE 30 | to: "." 31 | bin: "status" 32 | - selector: 33 | matchLabels: 34 | os: windows 35 | arch: amd64 36 | uri: https://github.com/bergerx/kubectl-status/releases/download/v0.2.1/status_windows_amd64.zip 37 | sha256: "54a160cace548dec20b5844042ec49f8fc277ed219bdb15c81335931c960bd1d" 38 | files: 39 | - from: "/status.exe" 40 | to: "." 41 | - from: LICENSE 42 | to: "." 43 | bin: "status.exe" 44 | shortDescription: Show status details of a given resource. 45 | homepage: https://github.com/bergerx/kubectl-status 46 | caveats: | 47 | Usage: 48 | $ kubectl status 49 | 50 | For additional options: 51 | $ kubectl status --help 52 | or https://github.com/bergerx/kubectl-status/blob/master/doc/USAGE.md 53 | 54 | description: | 55 | Show status details of a given resource. Most useful when debugging Pod issues. 56 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/bergerx/kubectl-status 2 | 3 | go 1.22.4 4 | 5 | require ( 6 | github.com/dustin/go-humanize v1.0.1 7 | github.com/fatih/color v1.17.0 8 | github.com/go-sprout/sprout v0.4.1 9 | github.com/ivanpirog/coloredcobra v1.0.1 10 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 11 | github.com/spf13/cast v1.7.0 12 | github.com/spf13/cobra v1.8.1 13 | github.com/spf13/pflag v1.0.5 14 | github.com/spf13/viper v1.19.0 15 | github.com/stretchr/testify v1.10.0 16 | golang.org/x/sys v0.22.0 17 | k8s.io/api v0.31.3 18 | k8s.io/apimachinery v0.31.3 19 | k8s.io/cli-runtime v0.31.3 20 | k8s.io/client-go v0.31.3 21 | k8s.io/klog/v2 v2.130.1 22 | k8s.io/kubectl v0.31.3 23 | sigs.k8s.io/cli-utils v0.37.2 24 | ) 25 | 26 | require ( 27 | dario.cat/mergo v1.0.0 // indirect 28 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect 29 | github.com/MakeNowJust/heredoc v1.0.0 // indirect 30 | github.com/Masterminds/semver/v3 v3.2.1 // indirect 31 | github.com/blang/semver/v4 v4.0.0 // indirect 32 | github.com/chai2010/gettext-go v1.0.2 // indirect 33 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 34 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 35 | github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect 36 | github.com/fatih/camelcase v1.0.0 // indirect 37 | github.com/fsnotify/fsnotify v1.7.0 // indirect 38 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 39 | github.com/go-errors/errors v1.4.2 // indirect 40 | github.com/go-logr/logr v1.4.2 // indirect 41 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 42 | github.com/go-openapi/jsonreference v0.20.2 // indirect 43 | github.com/go-openapi/swag v0.22.4 // indirect 44 | github.com/gogo/protobuf v1.3.2 // indirect 45 | github.com/golang/protobuf v1.5.4 // indirect 46 | github.com/google/btree v1.0.1 // indirect 47 | github.com/google/gnostic-models v0.6.8 // indirect 48 | github.com/google/go-cmp v0.6.0 // indirect 49 | github.com/google/gofuzz v1.2.0 // indirect 50 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect 51 | github.com/google/uuid v1.6.0 // indirect 52 | github.com/gorilla/websocket v1.5.0 // indirect 53 | github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect 54 | github.com/hashicorp/hcl v1.0.0 // indirect 55 | github.com/imdario/mergo v0.3.13 // indirect 56 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 57 | github.com/josharian/intern v1.0.0 // indirect 58 | github.com/json-iterator/go v1.1.12 // indirect 59 | github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect 60 | github.com/magiconair/properties v1.8.7 // indirect 61 | github.com/mailru/easyjson v0.7.7 // indirect 62 | github.com/mattn/go-colorable v0.1.13 // indirect 63 | github.com/mattn/go-isatty v0.0.20 // indirect 64 | github.com/mitchellh/copystructure v1.2.0 // indirect 65 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect 66 | github.com/mitchellh/mapstructure v1.5.0 // indirect 67 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 68 | github.com/moby/spdystream v0.4.0 // indirect 69 | github.com/moby/term v0.5.0 // indirect 70 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 71 | github.com/modern-go/reflect2 v1.0.2 // indirect 72 | github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect 73 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 74 | github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect 75 | github.com/pelletier/go-toml/v2 v2.2.2 // indirect 76 | github.com/peterbourgon/diskv v2.0.1+incompatible // indirect 77 | github.com/pkg/errors v0.9.1 // indirect 78 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 79 | github.com/sagikazarmark/locafero v0.4.0 // indirect 80 | github.com/sagikazarmark/slog-shim v0.1.0 // indirect 81 | github.com/sourcegraph/conc v0.3.0 // indirect 82 | github.com/spf13/afero v1.11.0 // indirect 83 | github.com/subosito/gotenv v1.6.0 // indirect 84 | github.com/x448/float16 v0.8.4 // indirect 85 | github.com/xlab/treeprint v1.2.0 // indirect 86 | go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect 87 | go.uber.org/multierr v1.11.0 // indirect 88 | golang.org/x/crypto v0.25.0 // indirect 89 | golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect 90 | golang.org/x/net v0.26.0 // indirect 91 | golang.org/x/oauth2 v0.21.0 // indirect 92 | golang.org/x/sync v0.7.0 // indirect 93 | golang.org/x/term v0.22.0 // indirect 94 | golang.org/x/text v0.16.0 // indirect 95 | golang.org/x/time v0.5.0 // indirect 96 | google.golang.org/protobuf v1.34.2 // indirect 97 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 98 | gopkg.in/inf.v0 v0.9.1 // indirect 99 | gopkg.in/ini.v1 v1.67.0 // indirect 100 | gopkg.in/yaml.v2 v2.4.0 // indirect 101 | gopkg.in/yaml.v3 v3.0.1 // indirect 102 | k8s.io/component-base v0.31.3 // indirect 103 | k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect 104 | k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect 105 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 106 | sigs.k8s.io/kustomize/api v0.17.2 // indirect 107 | sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect 108 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 109 | sigs.k8s.io/yaml v1.4.0 // indirect 110 | ) 111 | -------------------------------------------------------------------------------- /pkg/plugin/plugin.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | _ "unsafe" // required for using go:linkname in the file 8 | 9 | "github.com/fatih/color" 10 | "github.com/spf13/viper" 11 | "k8s.io/apimachinery/pkg/api/meta" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/watch" 14 | "k8s.io/cli-runtime/pkg/genericiooptions" 15 | "k8s.io/cli-runtime/pkg/resource" 16 | watchtools "k8s.io/client-go/tools/watch" 17 | "k8s.io/klog/v2" 18 | "k8s.io/kubectl/pkg/cmd/util" 19 | "k8s.io/kubectl/pkg/util/interrupt" 20 | 21 | "github.com/bergerx/kubectl-status/pkg/input" 22 | ) 23 | 24 | func errorPrintf(wr io.Writer, format string, a ...interface{}) { 25 | _, _ = color.New(color.BgRed, color.FgHiWhite).Printf(format, a...) 26 | _, _ = fmt.Fprintln(wr) 27 | } 28 | 29 | func Run(f util.Factory, streams genericiooptions.IOStreams, args []string) error { 30 | klog.V(5).InfoS("All config settings", "settings", viper.AllSettings()) 31 | if viper.Get("color") == "always" { 32 | color.NoColor = false 33 | } else if viper.Get("color") == "never" { 34 | color.NoColor = true 35 | } 36 | repo, err := input.NewResourceRepo(f) 37 | if err != nil { 38 | klog.V(2).ErrorS(err, "Error creating repo") 39 | return err 40 | } 41 | engine, err := newRenderEngine(streams) 42 | if err != nil { 43 | klog.V(2).ErrorS(err, "Error creating engine") 44 | return err 45 | } 46 | klog.V(5).InfoS("Created engine", "engine", engine) 47 | results := repo.CLIQueryResults(args) 48 | count := 0 49 | err = results.Visit(func(resourceInfo *resource.Info, err error) error { 50 | count += 1 51 | klog.V(5).InfoS("Processing resource", "item", count, "resource", resourceInfo) 52 | processObj(resourceInfo.Object, engine, repo) 53 | return err 54 | }) 55 | klog.V(5).InfoS("Processed matching resources", "count", count) 56 | if err != nil { 57 | klog.V(1).ErrorS(err, "Error querying resources") 58 | return err 59 | } 60 | isWatch := viper.GetBool("watch") 61 | if !isWatch && count == 0 { 62 | return fmt.Errorf("no resources found") 63 | } 64 | if viper.GetBool("watch") { 65 | return runWatch(results, engine, repo) 66 | } 67 | return nil 68 | } 69 | 70 | func runWatch(results *resource.Result, engine *renderEngine, repo *input.ResourceRepo) error { 71 | color.HiYellow("\nPrinted all existing resource statuses, starting to watch. Switching to shallow mode during watch!\n\n") 72 | viper.Set("shallow", true) 73 | viper.Set("watching", true) 74 | klog.V(5).InfoS("Will run watch") 75 | obj, err := results.Object() 76 | if err != nil { 77 | klog.V(1).ErrorS(err, "Failed to get results object") 78 | return err 79 | } 80 | rv, err := meta.NewAccessor().ResourceVersion(obj) 81 | if err != nil { 82 | klog.V(1).ErrorS(err, "Watch failed to obtain resource version for list") 83 | return err 84 | } 85 | klog.V(5).InfoS("Starting watch with a specific resource version", "rv", rv) 86 | w, err := results.Watch(rv) 87 | if err != nil { 88 | klog.V(1).ErrorS(err, "Can't start watch") 89 | return err 90 | } 91 | ctx, cancel := context.WithCancel(context.Background()) 92 | defer cancel() 93 | intr := interrupt.New(nil, cancel) 94 | _ = intr.Run(func() error { 95 | _, err := watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) { 96 | klog.V(5).InfoS("Processing watch event", "e", e) 97 | processObj(e.Object, engine, repo) 98 | return false, nil 99 | }) 100 | klog.V(1).ErrorS(err, "Watch failed", "obj", obj) 101 | return err 102 | }) 103 | return nil 104 | } 105 | 106 | func processObj(obj runtime.Object, engine *renderEngine, repo *input.ResourceRepo) { 107 | streams := engine.ioStreams 108 | _, _ = fmt.Fprintf(streams.Out, "\n") 109 | out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) 110 | if err != nil { 111 | errorPrintf(streams.ErrOut, "Failed to decode obj=%s: %s", obj, err) 112 | return 113 | } 114 | r := newRenderableObject(out, engine, repo) 115 | err = r.render(streams.Out) 116 | if err != nil { 117 | _, _ = fmt.Fprintf(streams.ErrOut, "\n") 118 | errorPrintf(streams.ErrOut, "Failed to render: %s", err) 119 | return 120 | } 121 | _, _ = fmt.Fprintf(streams.Out, "\n") 122 | } 123 | -------------------------------------------------------------------------------- /pkg/plugin/render_engine.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "embed" 5 | "os" 6 | "path/filepath" 7 | "text/template" 8 | 9 | "github.com/go-sprout/sprout" 10 | "k8s.io/cli-runtime/pkg/genericiooptions" 11 | "k8s.io/klog/v2" 12 | ) 13 | 14 | //go:embed templates 15 | var templatesFS embed.FS 16 | 17 | // renderEngine provides methods to build kubernetes api queries from provided cli options. 18 | // Also holds the parsed templates. 19 | type renderEngine struct { 20 | ioStreams genericiooptions.IOStreams 21 | template.Template 22 | } 23 | 24 | func newRenderEngine(streams genericiooptions.IOStreams) (*renderEngine, error) { 25 | klog.V(5).InfoS("Creating new render engine instance...") 26 | tmpl, err := getTemplate() 27 | if err != nil { 28 | klog.V(3).ErrorS(err, "Error parsing templates") 29 | return nil, err 30 | } 31 | return &renderEngine{ 32 | streams, 33 | *tmpl, 34 | }, nil 35 | } 36 | 37 | // We don't overlay templates dynamically, we use them all in all cases, this may be inefficient and changing this 38 | // could be beneficial in the future. But we parse them all once and re-use again for all template executions. 39 | func getTemplate() (*template.Template, error) { 40 | klog.V(5).InfoS("Creating new template instance...") 41 | tmpl := template. 42 | New("templates"). 43 | Funcs(sprout.TxtFuncMap()). 44 | Funcs(funcMap()) 45 | return parseTemplates(tmpl) 46 | } 47 | 48 | func parseTemplates(tmpl *template.Template) (*template.Template, error) { 49 | klog.V(5).InfoS("parsing templates from the embedded template fs ...") 50 | parsedTemplates, err := tmpl.ParseFS(templatesFS, "templates/*.tmpl") 51 | if err != nil { 52 | klog.V(3).ErrorS(err, "Error parsing some templates") 53 | return nil, err 54 | } 55 | homeDir, err := os.UserHomeDir() 56 | if err != nil { 57 | klog.V(3).ErrorS(err, "error getting user home dir, ignoring") 58 | } 59 | templatesDir := filepath.Join(homeDir, ".kubectl-status", "templates") 60 | parsedTemplatesWithLocalTemplates, err := parsedTemplates.ParseGlob(filepath.Join(templatesDir, "*.tmpl")) 61 | if err != nil { 62 | klog.V(1).ErrorS(err, "Error parsing user provided templates, ignoring user provided templates") 63 | } else { 64 | parsedTemplates = parsedTemplatesWithLocalTemplates 65 | } 66 | klog.V(5).InfoS("Finished parsing all embedded template fs files.") 67 | return parsedTemplates, nil 68 | } 69 | 70 | // Use kind name if such template exists in templates, else returnDefaultResource 71 | func findTemplateName(tmpl template.Template, kind string) string { 72 | if tmpl.Lookup(kind) == nil { 73 | return "DefaultResource" 74 | } 75 | return kind 76 | } 77 | -------------------------------------------------------------------------------- /pkg/plugin/render_engine_test.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestGetTemplate(t *testing.T) { 8 | t.Run("templates are parsable", func(t *testing.T) { 9 | _, err := getTemplate() 10 | if err != nil { 11 | t.Errorf("getTemplate() error = %v", err) 12 | return 13 | } 14 | }) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/plugin/renderable.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/fatih/color" 9 | "github.com/spf13/viper" 10 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 11 | "k8s.io/apimachinery/pkg/types" 12 | "k8s.io/klog/v2" 13 | kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" 14 | 15 | "github.com/bergerx/kubectl-status/pkg/input" 16 | ) 17 | 18 | func newRenderableObject(obj map[string]interface{}, engine *renderEngine, repo *input.ResourceRepo) RenderableObject { 19 | r := RenderableObject{ 20 | Unstructured: unstructured.Unstructured{Object: obj}, 21 | engine: engine, 22 | repo: repo, 23 | Config: viper.GetViper(), 24 | } 25 | return r 26 | } 27 | 28 | // RenderableObject is the object passed to the templates, also provides methods to run queries against Kubernetes API. 29 | // It is an unstructured.Unstructured (so it has the Object field that keeps the Object) but there a numerous helper 30 | // methods that already helps with the templates. 31 | type RenderableObject struct { 32 | unstructured.Unstructured 33 | engine *renderEngine 34 | repo *input.ResourceRepo 35 | Config *viper.Viper 36 | } 37 | 38 | // KStatus return a Result object of kstatus for the object. 39 | func (r RenderableObject) KStatus() *kstatus.Result { 40 | result, err := kstatus.Compute(&r.Unstructured) 41 | if err != nil { 42 | klog.V(2).ErrorS(err, "kstatus.Compute failed", "r", r) 43 | } 44 | return result 45 | } 46 | 47 | func (r RenderableObject) newRenderableObject(obj map[string]interface{}) RenderableObject { 48 | return newRenderableObject(obj, r.engine, r.repo) 49 | } 50 | 51 | func (r RenderableObject) String() string { 52 | kindAndName := fmt.Sprintf("%s/%s", r.Kind(), r.Name()) 53 | if namespace := r.Namespace(); namespace != "" { 54 | kindAndName = fmt.Sprintf("%s[%s]", kindAndName, namespace) 55 | } 56 | return kindAndName 57 | } 58 | 59 | func (r RenderableObject) Kind() (kind string) { 60 | if x := r.Object["kind"]; x != nil { 61 | kind = x.(string) 62 | } 63 | return 64 | } 65 | 66 | func (r RenderableObject) Spec() (spec map[string]interface{}) { 67 | if x := r.Object["spec"]; x != nil { 68 | spec = x.(map[string]interface{}) 69 | } 70 | return 71 | } 72 | 73 | func (r RenderableObject) Status() (status map[string]interface{}) { 74 | if x := r.Object["status"]; x != nil { 75 | status = x.(map[string]interface{}) 76 | } 77 | return 78 | } 79 | 80 | func (r RenderableObject) Metadata() (metadata map[string]interface{}) { 81 | if x := r.Object["metadata"]; x != nil { 82 | metadata = x.(map[string]interface{}) 83 | } 84 | return 85 | } 86 | 87 | func (r RenderableObject) Annotations() (annotations map[string]interface{}) { 88 | if x := r.Metadata()["annotations"]; x != nil { 89 | annotations = x.(map[string]interface{}) 90 | } 91 | return 92 | } 93 | 94 | func (r RenderableObject) Labels() (labels map[string]interface{}) { 95 | if x := r.Metadata()["labels"]; x != nil { 96 | labels = x.(map[string]interface{}) 97 | } 98 | return 99 | } 100 | 101 | func (r RenderableObject) Name() string { 102 | return r.GetName() 103 | } 104 | 105 | func (r RenderableObject) Namespace() string { 106 | return r.GetNamespace() 107 | } 108 | 109 | func (r RenderableObject) StatusConditions() (conditions []interface{}) { 110 | if x := r.Status()["conditions"]; x != nil { 111 | conditions = x.([]interface{}) 112 | } 113 | return 114 | } 115 | 116 | func (r RenderableObject) render(wr io.Writer) error { 117 | klog.V(5).InfoS("called render, calling findTemplateName", "r", r) 118 | templateName := findTemplateName(r.engine.Template, r.Kind()) 119 | klog.V(5).InfoS("calling executeTemplate on renderable", "r", r, "templateName", templateName) 120 | err := r.executeTemplate(wr, templateName, r) 121 | if err != nil { 122 | klog.V(3).ErrorS(err, "error on executeTemplate", "r", r) 123 | } 124 | return err 125 | } 126 | 127 | func (r RenderableObject) renderString() (string, error) { 128 | klog.V(5).InfoS("called renderString", "r", r) 129 | var buffer bytes.Buffer 130 | err := r.render(&buffer) 131 | return buffer.String(), err 132 | } 133 | 134 | func (r RenderableObject) renderTemplate(templateName string, data interface{}) (string, error) { 135 | var buffer bytes.Buffer 136 | klog.V(5).InfoS("called renderTemplate, calling ExecuteTemplate", 137 | "r", r, "templateName", templateName, "data", data) 138 | err := r.executeTemplate(&buffer, templateName, data) 139 | if err != nil { 140 | klog.V(3).ErrorS(err, "error executing template", 141 | "r", r, "templateName", templateName) 142 | } 143 | return buffer.String(), err 144 | } 145 | 146 | func (r RenderableObject) executeTemplate(wr io.Writer, name string, data any) error { 147 | target, ok := data.(RenderableObject) 148 | if ok && target.Kind() == name && renderedUIDs.checkAdd(target.GetUID()) && !viper.GetBool("watching") && !viper.GetBool("test-hack") { 149 | klog.V(3).InfoS("skip rendering of the RenderableObject as its already rendered", 150 | "r", r, "templateName", name) 151 | _, _ = color.New(color.FgWhite).Fprintf(wr, "%s is already printed", target.String()) 152 | return nil 153 | } 154 | return r.engine.ExecuteTemplate(wr, name, data) 155 | } 156 | 157 | type uidSet map[types.UID]struct{} 158 | 159 | func (s uidSet) checkAdd(uid types.UID) bool { 160 | _, exists := s[uid] 161 | if !exists { 162 | s[uid] = struct{}{} 163 | } 164 | return exists 165 | } 166 | 167 | var renderedUIDs = make(uidSet) 168 | -------------------------------------------------------------------------------- /pkg/plugin/template_functions_static_test.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | var ( 9 | emptyMap = map[string]interface{}{} 10 | searchForMap = map[string]interface{}{ 11 | "searchKey1": "searchVal1", 12 | "searchKey2": "searchVal2", 13 | } 14 | nonMatchingValueMap = map[string]interface{}{ 15 | "searchKey": "searchValDoesntMatch", 16 | } 17 | nonMatchingKeyMap = map[string]interface{}{ 18 | "searchKeyDoesntMatch": "searchVal", 19 | } 20 | matchingSuperSetMap1 = map[string]interface{}{ 21 | "searchKey1": "searchVal1", 22 | "searchKey2": "searchVal2", 23 | "otherKey1": "doestMatter1", 24 | } 25 | matchingSuperSetMap2 = map[string]interface{}{ 26 | "searchKey1": "searchVal1", 27 | "searchKey2": "searchVal2", 28 | "otherKey2": "doestMatter2", 29 | } 30 | nestedSearchForMap = map[string]interface{}{ 31 | "outerKey.innerKey.searchKey1": "searchVal1", 32 | "outerKey.innerKey.searchKey2": "searchVal2", 33 | } 34 | matchingNestedMap = map[string]interface{}{ 35 | "outerKey": map[string]interface{}{ 36 | "innerKey": matchingSuperSetMap1, 37 | "otherKey": "doesntMatter", 38 | }, 39 | } 40 | nonMatchingMiddleKeyNestedMap = map[string]interface{}{ 41 | "outerKey": matchingSuperSetMap1, 42 | } 43 | ) 44 | 45 | func TestGetMatchingItemInMapList(t *testing.T) { 46 | type args struct { 47 | searchFor map[string]interface{} 48 | mapList []interface{} 49 | } 50 | tests := []struct { 51 | name string 52 | args args 53 | wantItem map[string]interface{} 54 | }{ 55 | { 56 | name: "one-to-one maps", 57 | args: args{ 58 | searchFor: searchForMap, 59 | mapList: []interface{}{searchForMap}, 60 | }, 61 | wantItem: searchForMap, 62 | }, { 63 | name: "key exists but value doesn't match", 64 | args: args{ 65 | searchFor: searchForMap, 66 | mapList: []interface{}{nonMatchingValueMap}, 67 | }, 68 | wantItem: nil, 69 | }, { 70 | name: "search key doesnt exist in mapList", 71 | args: args{ 72 | searchFor: searchForMap, 73 | mapList: []interface{}{nonMatchingKeyMap}, 74 | }, 75 | wantItem: nil, 76 | }, { 77 | name: "empty mapList", 78 | args: args{ 79 | searchFor: searchForMap, 80 | mapList: []interface{}{emptyMap}, 81 | }, 82 | wantItem: nil, 83 | }, { 84 | name: "empty searchFor", 85 | args: args{ 86 | searchFor: emptyMap, 87 | mapList: []interface{}{searchForMap}, 88 | }, 89 | wantItem: nil, 90 | }, { 91 | name: "searchFor is subset", 92 | args: args{ 93 | searchFor: searchForMap, 94 | mapList: []interface{}{nonMatchingKeyMap, nonMatchingValueMap, matchingSuperSetMap1}, 95 | }, 96 | wantItem: matchingSuperSetMap1, 97 | }, { 98 | name: "multiple matches should return first match", 99 | args: args{ 100 | searchFor: searchForMap, 101 | mapList: []interface{}{nonMatchingKeyMap, nonMatchingValueMap, matchingSuperSetMap2, matchingSuperSetMap1}, 102 | }, 103 | wantItem: matchingSuperSetMap2, 104 | }, { 105 | name: "nested map is subset", 106 | args: args{ 107 | searchFor: nestedSearchForMap, 108 | mapList: []interface{}{nonMatchingKeyMap, nonMatchingValueMap, matchingSuperSetMap1, matchingNestedMap}, 109 | }, 110 | wantItem: matchingNestedMap, 111 | }, { 112 | name: "nested map missing key", 113 | args: args{ 114 | searchFor: nestedSearchForMap, 115 | mapList: []interface{}{nonMatchingKeyMap}, 116 | }, 117 | wantItem: nil, 118 | }, { 119 | name: "nested map missing middle key", 120 | args: args{ 121 | searchFor: nestedSearchForMap, 122 | mapList: []interface{}{nonMatchingMiddleKeyNestedMap}, 123 | }, 124 | wantItem: nil, 125 | }, 126 | } 127 | for _, tt := range tests { 128 | t.Run(tt.name, func(t *testing.T) { 129 | if gotItem := getMatchingItemInMapList(tt.args.searchFor, tt.args.mapList); !reflect.DeepEqual(gotItem, tt.wantItem) { 130 | t.Errorf("getMatchingItemInMapList() = %v, want %v", gotItem, tt.wantItem) 131 | } 132 | }) 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /pkg/plugin/templates/CronJob.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "CronJob" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- if .Status.lastScheduleTime }}, last ran at {{ .Status.lastScheduleTime }} ({{ .Status.lastScheduleTime | colorAgo }} ago) 5 | {{- else }} 6 | {{- "Not yet scheduled" | yellow | bold | nindent 2 }} 7 | {{- end }} 8 | {{- with .Status.active }} 9 | {{- range . }} 10 | {{- "Active" | green | nindent 2 }}: {{ .kind | bold }}/{{ .name }} is running. 11 | {{- end }} 12 | {{- end }} 13 | {{- template "kstatus_summary" . }} 14 | {{- template "finalizer_details_on_termination" . }} 15 | {{- template "application_details" . }} 16 | {{- template "recent_updates" . }} 17 | {{- template "events" . }} 18 | {{- template "owners" . }} 19 | {{- end -}} -------------------------------------------------------------------------------- /pkg/plugin/templates/DaemonSet.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "DaemonSet" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- template "observed_generation_summary" . }} 7 | {{- template "application_details" . }} 8 | {{- template "daemonset_replicas_status" . }} 9 | {{- template "conditions_summary" . }} 10 | {{- $rolloutStatus := .RolloutStatus . }} 11 | {{- if not $rolloutStatus.done }} 12 | {{- "Ongoing Rollout" | yellow | bold | redBoldIf $rolloutStatus.error | nindent 2 }} 13 | {{- with $rolloutStatus.message }}: {{ . | yellow }}{{ end }} 14 | {{- with $rolloutStatus.error }}: Error: {{ . | red }}{{ end }} 15 | {{- end }} 16 | {{- template "recent_daemonset_rollouts" . }} 17 | {{- template "recent_updates" . }} 18 | {{- template "events" . }} 19 | {{- template "owners" . }} 20 | {{- end -}} 21 | 22 | {{- define "daemonset_replicas_status" }} 23 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 24 | {{- if .Status.desiredNumberScheduled }} 25 | {{- printf "desired:%d" .Status.desiredNumberScheduled | bold | nindent 2 }} 26 | {{- if hasKey .Status "currentNumberScheduled" }}, {{ printf "current:%d" .Status.currentNumberScheduled | redBoldIf (not ( eq .Status.desiredNumberScheduled .Status.currentNumberScheduled)) }}{{ end }} 27 | {{- if hasKey .Status "numberAvailable" }}, {{ printf "available:%d" .Status.numberAvailable | redBoldIf (not ( eq .Status.desiredNumberScheduled .Status.numberAvailable)) }}{{ end }} 28 | {{- if hasKey .Status "numberReady" }}, {{ printf "ready:%d" .Status.numberReady | redBoldIf (not (eq .Status.desiredNumberScheduled .Status.numberReady)) }}{{ end }} 29 | {{- if hasKey .Status "updatedNumberScheduled" }}, {{ printf "updated:%d" .Status.updatedNumberScheduled | redBoldIf (not (eq .Status.desiredNumberScheduled .Status.updatedNumberScheduled)) }}{{ end }} 30 | {{- with .Status.numberMisscheduled }}{{ "numberMisscheduled" | red | bold }}:{{ . }}{{- end }} 31 | {{- else }} 32 | {{- "Fault!" | red | bold | nindent 2 }}: The total number of nodes that should be running the daemon pod is not yet set (".status.desiredNumberScheduled"). 33 | {{- end }} 34 | {{- end -}} 35 | 36 | {{- define "recent_daemonset_rollouts" }} 37 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 38 | {{- $sectionHeader := false }} 39 | {{- $previousRevision := "" }} 40 | {{- range .KubeGetByLabelsMap .Namespace "controllerrevisions" .Labels }} 41 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 42 | {{- if eq (index .Metadata.ownerReferences 0).name $.Name }} 43 | {{- if not $sectionHeader }} 44 | {{- "Rollouts:" | nindent 2 }} 45 | {{- template "rollout_diffs_flag_help" $ }} 46 | {{- $sectionHeader = true }} 47 | {{- end }} 48 | {{- "" | nindent 4 }} 49 | {{- with .Metadata.creationTimestamp }}{{ . | colorAgo }} ago{{ end }} used {{ .Kind | bold }}/{{ .Name }}. 50 | {{- if and $previousRevision ($.Config.GetBool "include-rollout-diffs") }} 51 | {{- with $.KubeGetUnifiedDiffString "ControllerRevision" $.Namespace $previousRevision.Name .Name }} 52 | {{- . | markRed "^-.*" | markGreen "^\\+.*" | nindent 6 }} 53 | {{- end }} 54 | {{- end }} 55 | {{- $previousRevision = . }} 56 | {{- end }} 57 | {{- end }} 58 | {{- end }} 59 | -------------------------------------------------------------------------------- /pkg/plugin/templates/Deployment.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "Deployment" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }}{{- with index .Annotations "deployment.kubernetes.io/revision" }} rev:{{.}}{{ end }} 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- template "observed_generation_summary" . }} 7 | {{- template "application_details" . }} 8 | {{- $injectedStatus := .Status | default dict }} 9 | {{- $_ := set $injectedStatus "replicas" ( $injectedStatus.replicas | default 0 ) }} 10 | {{- $_ := set $injectedStatus "readyReplicas" ( $injectedStatus.readyReplicas | default 0) }} 11 | {{- $_ := set $injectedStatus "availableReplicas" ( $injectedStatus.availableReplicas | default 0 ) }} 12 | {{- $_ := set $injectedStatus "updatedReplicas" ( $injectedStatus.updatedReplicas | default 0 ) }} 13 | {{- $_ := set .Object "status" $injectedStatus }} 14 | {{- template "replicas_status" . }} 15 | {{- template "conditions_summary" . }} 16 | {{- template "suspended" . }} 17 | {{- $rolloutStatus := .RolloutStatus . }} 18 | {{- if not $rolloutStatus.done }} 19 | {{- "Ongoing Rollout" | yellow | bold | redBoldIf $rolloutStatus.error | nindent 2 }} 20 | {{- with $rolloutStatus.message }}: {{ . | yellow }}{{ end }} 21 | {{- with $rolloutStatus.error }}: Error: {{ . | red }}{{ end }} 22 | {{- end }} 23 | {{- if not .Status.readyReplicas }} 24 | {{- "Outage" | red | bold | nindent 2 }}: Deployment has no Ready replicas. 25 | {{- else if ne .Status.replicas .Status.readyReplicas }} 26 | {{- if $rolloutStatus.done }} 27 | {{- "Not Ready Replicas" | yellow | bold | nindent 2 }}: {{ sub .Status.replicas .Status.readyReplicas }} replicas are not Ready. 28 | {{- end }} 29 | {{- else if .Status.unavailableReplicas }} 30 | {{- if $rolloutStatus.done }} 31 | {{- "Unavailable Replicas" | yellow | bold | nindent 2 }}: {{ .status.unavailableReplicas }} replicas are not Available. 32 | {{- end }} 33 | {{- end }} 34 | {{- template "recent_deployment_rollouts" . }} 35 | {{- template "recent_updates" . }} 36 | {{- template "events" . }} 37 | {{- template "owners" . }} 38 | {{- end }} 39 | 40 | {{- define "recent_deployment_rollouts" }} 41 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 42 | {{- $deploymentRevision := index .Annotations "deployment.kubernetes.io/revision" }} 43 | {{- $sectionHeader := false }} 44 | {{- $previousReplicaSet := "" }} 45 | {{- range .KubeGet .Namespace "ReplicaSets" }} 46 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 47 | {{- if eq (index .Metadata.ownerReferences 0).name $.Name }} 48 | {{- if not $sectionHeader }} 49 | {{- "Rollouts:" | nindent 2}} 50 | {{- template "rollout_diffs_flag_help" $ }} 51 | {{- $sectionHeader = true }} 52 | {{- end }} 53 | {{- with .Metadata.creationTimestamp }}{{ . | colorAgo | nindent 4 }} ago{{ end }}, managed by {{ .Kind | bold }}/{{ .Name }} 54 | {{- $replicationSetRevision := index .Annotations "deployment.kubernetes.io/revision" }} 55 | {{- $activeReplicaSet := eq $deploymentRevision $replicationSetRevision }} 56 | {{- if .Status.replicas }}, has {{ .Status.replicas }} replicas 57 | {{- if not $activeReplicaSet }}, {{ "abandoning" | red }}{{ end }} 58 | {{- else }} 59 | {{- if $activeReplicaSet }}, {{ "current revision but doesn't yet have any replicas" | red }}{{ end }} 60 | {{- end }} 61 | {{- with index .Annotations "deployment.kubernetes.io/revision-history" }} 62 | {{- "" | nindent 6 }} Used at revisions: {{ . }},{{ $replicationSetRevision}} 63 | {{- if $activeReplicaSet }} ({{ "current revision" | green }}) {{ end }} 64 | {{- end }} 65 | {{- if and $previousReplicaSet ($.Config.GetBool "include-rollout-diffs") }} 66 | {{- with $.KubeGetUnifiedDiffString "ReplicaSet" $.Namespace $previousReplicaSet.Name .Name }} 67 | {{- "Diff" | bold | nindent 6 }}: 68 | {{- . | markRed "^-.*" | markGreen "^\\+.*" | nindent 6 }} 69 | {{- end }} 70 | {{- end }} 71 | {{- $previousReplicaSet = . }} 72 | {{- end }} 73 | {{- end }} 74 | {{- end }} 75 | -------------------------------------------------------------------------------- /pkg/plugin/templates/Event.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "Event" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{ template "event" .Unstructured.Object }} 5 | {{- end }} 6 | 7 | {{- define "event" }} 8 | {{- list .reportingInstance .source.host .reportingComponent .source.component .source.fieldPath | compact | uniq | join "," | bold }} 9 | {{- if eq .type "Warning" }} {{ .reason | red | bold }}{{ else }} {{ .reason | bold }}{{ end -}} 10 | {{- with .action}} {{ . }}{{end}} 11 | {{- with .involvedObject }} involving {{ "" }} 12 | {{- with .kind }}{{. | bold }}/{{ end }} 13 | {{- with .name }}{{ . }}{{ end }} 14 | {{- with .fieldPath }}[{{ . }}]{{ end }} 15 | {{- with .namespace }} (in {{ . }}){{ end }} 16 | {{- end }} 17 | {{- with .lastTimestamp }} {{ . | colorAgo }} ago{{ end }} 18 | {{- if gt (.count | int) 1 -}} 19 | {{- if .firstTimestamp }} (x{{ .count }} over {{ .firstTimestamp | colorAgo }}) 20 | {{- else }} (x{{ .count }}) 21 | {{- end -}} 22 | {{- end -}} 23 | {{- " " }}{{ .message }} 24 | {{- end -}} 25 | -------------------------------------------------------------------------------- /pkg/plugin/templates/HorizontalPodAutoscaler.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "HorizontalPodAutoscaler" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} last scale was {{ .Status.lastScaleTime | colorAgo }} ago 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- "current" | bold | nindent 2 }} replicas:{{ .Status.currentReplicas }}/({{ .Spec.minReplicas | default "1" }}-{{ .Spec.maxReplicas }}) 7 | {{- if .Status.currentCPUUtilizationPercentage }} CPUUtilisation: {{ .Status.currentCPUUtilizationPercentage | toString | redIf (ge .Status.currentCPUUtilizationPercentage .Spec.targetCPUUtilizationPercentage) }}%/{{ .Spec.targetCPUUtilizationPercentage }}%{{ end }} 8 | {{- if (ne .Status.currentReplicas .Status.desiredReplicas) }}, {{ "desired" | red | bold}}: {{ .Status.currentReplicas }} --> {{ .Status.desiredReplicas }}{{ end }} 9 | {{- template "application_details" . }} 10 | {{- template "recent_updates" . }} 11 | {{- template "events" . }} 12 | {{- template "owners" . }} 13 | {{- end -}} -------------------------------------------------------------------------------- /pkg/plugin/templates/Ingress.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "Ingress" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- template "load_balancer_ingress" . }} 7 | {{- template "application_details" . }} 8 | {{- $missingSvcs := list }} 9 | {{- $unhealthySvcs := list }} 10 | {{- range .Spec.rules }} 11 | {{- range .http.paths }} 12 | {{- $ingSvcName := coalesce .backend.service.name .backend.serviceName }} 13 | {{- $ingSvcPortNumber := coalesce .backend.service.port.number .backend.servicePort }} 14 | {{- $ingSvcPortName := coalesce .backend.service.port.name .backend.serviceName }} 15 | {{- $ingSvcPort := coalesce $ingSvcPortNumber $ingSvcPortName }} 16 | {{- if not (and $ingSvcName $ingSvcPort) }} 17 | {{- "Ingress has no service reference" | yellow | bold | nindent 2 }}: Ingress doesn't have either a name ({{ $ingSvcName }}) or a port ({{ $ingSvcPort }}). 18 | {{- else }} 19 | {{- $svc := $.KubeGetFirst $.Namespace "Service" $ingSvcName }} 20 | {{- if not $svc }} 21 | {{- if not ($missingSvcs | has $ingSvcName) }} 22 | {{- "Service" | bold | nindent 2 }}/{{ $ingSvcName }} {{ "doesn't exist" | red | bold }}, but it's referenced in Ingress. 23 | {{- $missingSvcs = append $missingSvcs $ingSvcName }} 24 | {{- end }} 25 | {{- else }} 26 | {{- $portDefinedInSvc := false }} 27 | {{- range $svc.Spec.ports }} 28 | {{- if or (eq $ingSvcPortName .name) (eq ($ingSvcPortNumber | int) (.port | int)) }} 29 | {{- $portDefinedInSvc = true }} 30 | {{- end }} 31 | {{- end }} 32 | {{- if not $portDefinedInSvc }} 33 | {{- "Service port doesnt exist" | red | bold | nindent 2 }}: {{ "Service" | bold }}/{{ $ingSvcName }}:{{ $ingSvcPort }} referenced in ingress, but Service doesn't have that port defined. 34 | {{- else }} 35 | {{- $ep := $.KubeGetFirst $.Namespace "EndPoints" $ingSvcName }} 36 | {{- if not $ep.Object }} 37 | {{ "Service" | bold | nindent 2 }}/{{ $ingSvcName }} is {{ "experiencing outage" | red | bold }}, it doesn't have the corresponding {{ "EndPoints" | bold }}/{{ $ingSvcName }}. 38 | {{- else }} 39 | {{- $addressCount := 0 }} 40 | {{- range $ep.Object.subsets }} 41 | {{- range .addresses }} 42 | {{- $addressCount = add1 $addressCount }} 43 | {{- end }} 44 | {{- end }} 45 | {{- if not $addressCount }} 46 | {{- if not ($unhealthySvcs | has $ingSvcName) }} 47 | {{- "Service" | bold | nindent 2 }}/{{ $ingSvcName }} is {{ "experiencing outage" | red | bold }}, it doesn't have any Healthy endpoints. 48 | {{- $unhealthySvcs = append $unhealthySvcs $ingSvcName }} 49 | {{- end }} 50 | {{- else }} 51 | {{- "Service" | bold | nindent 2 }}/{{ $ingSvcName }}:{{ $ingSvcPort }} has {{ printf "%d" $addressCount | green }} endpoints. 52 | {{- end }} 53 | {{- end }} 54 | {{- end }} 55 | {{- end }} 56 | {{- end }} 57 | {{- end }} 58 | {{- end }} 59 | {{- template "recent_updates" . }} 60 | {{- template "events" . }} 61 | {{- template "owners" . }} 62 | {{- end -}} 63 | 64 | {{- define "load_balancer_ingress" }} 65 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 66 | {{- with .Status.loadBalancer.ingress }} 67 | {{- if or (index . 0).hostname (index . 0).ip }} 68 | {{- with (index . 0).hostname }} {{ "LoadBalancer" | green }}:{{ . }}{{ end }} 69 | {{- with (index . 0).ip }} {{ "LoadBalancer" | green }}:{{ . }}{{ end }} 70 | {{- else }} {{ "Pending LoadBalancer" | red | bold }} 71 | {{- end }} 72 | {{- end }} 73 | {{- end -}} -------------------------------------------------------------------------------- /pkg/plugin/templates/Job.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "Job" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- /* See https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#parallel-jobs */ -}} 5 | {{- if eq (coalesce .Spec.completions .Spec.parallelism 1 | toString) "1" }} 6 | {{- template "job_non_parallel" . }} 7 | {{- else if .Spec.completions }} 8 | {{- /* TODO: handle "fixed completion count jobs" better */ -}} 9 | {{- template "job_parallel" . }} 10 | {{- else if .Spec.parallelism }} 11 | {{- /* TODO: handle "work queue jobs" better */ -}} 12 | {{- template "job_parallel" . }} 13 | {{- end }} 14 | {{- template "kstatus_summary" . }} 15 | {{- template "finalizer_details_on_termination" . }} 16 | {{- template "conditions_summary" . }} 17 | {{- template "application_details" . }} 18 | {{- template "recent_updates" . }} 19 | {{- template "events" . }} 20 | {{- template "owners" . }} 21 | {{- end -}} 22 | 23 | {{- define "job_non_parallel" }} 24 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 25 | {{- if .Status.active }}, {{ "Active" | yellow | bold }}{{ end }} 26 | {{- if .Status.succeeded }}, {{ "Succeeded" | green }}{{ end }} 27 | {{- if .Status.failed }}, {{ "Failed" | red | bold }}{{ end }} 28 | {{- end -}} 29 | 30 | {{- define "job_parallel" }} 31 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 32 | {{- if .Status.active }}, {{ "Active" | yellow | bold }}{{ end }} 33 | {{- if .Status.failed }}, {{ "Failed" | red | bold }} {{ .Status.failed }}/{{ .Spec.backoffLimit }} times.{{ end }} 34 | {{- end -}} -------------------------------------------------------------------------------- /pkg/plugin/templates/Lease.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "Lease" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject */ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "application_details" . }} 5 | {{- template "owners" . }} 6 | {{- "Lease" | bold | nindent 2 }} 7 | {{- with .Spec.acquireTime }} acquired {{ . | colorAgo }} ago{{ end }} 8 | {{- with .Spec.renewTime }} renewed {{ . | colorAgo }} ago{{ end }} 9 | {{- with .Spec.leaseDurationSeconds }} being hold for {{ . | duration }}{{ end }} 10 | {{- with .Spec.holderIdentity }} by {{ . | bold }}{{ end }} 11 | {{- with .Spec.leaseTransitions }}{{ if gt . 0 }}{{ printf "Transitioned %d times" . | yellow | nindent 4 }}{{ end }}{{ end }} 12 | {{- with .Metadata.labels }}{{with index . "kubernetes.io/hostname" }} on host {{ . | bold }}{{ end }}{{ end }} 13 | {{- template "recent_updates" . }} 14 | {{- template "events" . }} 15 | {{- end }} -------------------------------------------------------------------------------- /pkg/plugin/templates/NamespaceConfig.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "NamespaceConfig" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "observed_generation_summary" . }} 5 | {{- template "application_details" . }} 6 | {{- template "kstatus_summary" . }} 7 | {{- template "finalizer_details_on_termination" . }} 8 | {{- template "conditions_summary" . }} 9 | {{- range $resource, $valueList := .Status.lockedResourceStatuses }} 10 | {{- $resource | nindent 2 }}: {{ range $status := $valueList }}{{ template "condition_summary" . }}{{- end }} 11 | {{- end }} 12 | {{- template "recent_updates" . }} 13 | {{- template "events" . }} 14 | {{- template "owners" . }} 15 | {{- end }} -------------------------------------------------------------------------------- /pkg/plugin/templates/PersistentVolume.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "PersistentVolume" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- "PV" | nindent 2 }} is {{- with .Status.phase }} {{ . | colorKeyword }}{{ end }} 7 | {{- with .Spec.storageClassName }} managed by {{ "StorageClass" | bold }}/{{ . }}{{ end }} 8 | {{- with index .Annotations "kubernetes.io/createdby" }} created by {{ . | bold }}{{ end }} 9 | {{- with index .Annotations "pv.kubernetes.io/provisioned-by" }} provisioned by {{ . | bold }}{{ end }} 10 | {{- with index .Spec.accessModes 0 }} with {{ . | bold }} mode{{ end }} 11 | {{- with .Status.reason }}{{ "reason" | bold }}: {{ . }}{{ end }} 12 | {{- with .Status.message }}{{ "message" | red | bold | nindent 2 }}: {{ . }}{{- end }}{{/* Exists usually when there is problem */}} 13 | {{- with .Spec.claimRef }} 14 | {{- "Created" | nindent 2 }} for {{ .kind | bold }}/{{ .name }} -n {{ .namespace }} 15 | {{- $pvc := $.KubeGetFirst .namespace .kind .name }} 16 | {{- if $pvc.Object }} 17 | {{- if ne $pvc.Metadata.uid .uid }} 18 | {{- "Dangling" | red | bold | nindent 4 }}: The PVC referenced in this PV is replaced by a new one. And a new PV is created for the replacement PVC. 19 | {{- "PVC" | nindent 6 }} uid referenced in this PV: {{ .uid }} 20 | {{- "Current PVC" | nindent 6 }} uid: {{ $pvc.Metadata.uid }} 21 | {{- end }} 22 | {{- else }} 23 | {{- "Replaced" | red | bold | nindent 4 }}: This PVC doesn't exist anymore. Its likely that this PV is dangling. 24 | {{- end }} 25 | {{- end }} 26 | {{- with .Spec.azureDisk }} 27 | {{- "Azure Disk" | bold | nindent 2 }} 28 | {{- with .kind }} of kind {{ . | bold }}{{ end }} 29 | {{- with .readOnly }}, in {{ "RO" | bold | yellow }} mode{{ end }} 30 | {{- with .cachingMode }}, with {{ . | bold }} host cache{{end}} 31 | {{- with .fsType }}, using {{ . | bold }} FS type{{end}} 32 | {{- "Disk URI" | nindent 4 }}: {{ .diskURI }} 33 | {{- end }} 34 | {{- with .Spec.azureFiles }} 35 | {{- "Azure Files" | bold | nindent 2 }} 36 | {{- with .shareName }}, share name is {{ . | bold }}{{end}} 37 | {{- with .readOnly }}, in {{ "RO" | bold | yellow }} mode{{ end }} 38 | {{- end }} 39 | {{- template "application_details" . }} 40 | {{- template "recent_updates" . }} 41 | {{- template "events" . }} 42 | {{- template "owners" . }} 43 | {{- end -}} -------------------------------------------------------------------------------- /pkg/plugin/templates/PersistentVolumeClaim.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "PersistentVolumeClaim" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- template "application_details" . }} 7 | {{- template "conditions_summary" . }} 8 | {{- "PVC" | nindent 2 }} 9 | {{- with .Spec.volumeName }} uses {{ "PersistentVolume" | bold }}/{{ . }}{{ end }} 10 | {{- with .Spec.volumeMode }}, with {{ . | bold }} mode{{ end }} 11 | {{- with .Status.capacity.storage }}, asks for {{ . | bold }}{{ end }} 12 | {{- with index .Annotations "volume.beta.kubernetes.io/storage-provisioner" }}, provisioned by {{ . | bold }}{{ end }} 13 | {{- with index .Annotations "volume.kubernetes.io/selected-node" }}, attached on {{ "Node" | bold }}/{{ . }}{{ end }} 14 | {{- if not .Spec.volumeName }} 15 | {{- "Pending" | red | bold | nindent 2 }}: This PVC doesnt yet have a paired PV. 16 | {{- end }} 17 | {{- template "recent_updates" . }} 18 | {{- template "events" . }} 19 | {{- template "owners" . }} 20 | {{- with $volumeName := .Spec.volumeName }} 21 | {{- if $.Config.GetBool "include-volumes" }} 22 | {{- "Binds:" | nindent 2 }} 23 | {{- $pv := $.KubeGetFirst "" "PersistentVolume" $volumeName }} 24 | {{- $.Include "PersistentVolume" $pv | nindent 4 -}} 25 | {{- else }} 26 | {{- "Binds:" | nindent 2 }} PersistentVolume/{{ $volumeName }} 27 | {{- end }} 28 | {{- end }} 29 | {{- end -}} -------------------------------------------------------------------------------- /pkg/plugin/templates/ReplicaSet.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "ReplicaSet" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- template "observed_generation_summary" . }} 7 | {{- template "application_details" . }} 8 | {{- /* Where there is no readyReplicas, STS doesn't have that fields at all, 9 | and apparantly the numbers are parsed as float 64, so used 0.0 rather then 0 */ -}} 10 | {{- $injectedStatus := .Status }} 11 | {{- if not (hasKey .Status "readyReplicas") }} 12 | {{- $injectedStatus := set $injectedStatus "readyReplicas" 0.0 }} 13 | {{- end }} 14 | {{- if not (hasKey .Status "availableReplicas") }} 15 | {{- $injectedStatus := set $injectedStatus "availableReplicas" 0.0 }} 16 | {{- end }} 17 | {{- $injectedManifest := set .Object "status" $injectedStatus }} 18 | {{- template "replicas_status" $injectedManifest }} 19 | {{- template "conditions_summary" . }} 20 | {{- if and .Spec.replicas (or (not .Status.replicas) (not .Status.readyReplicas)) }} 21 | {{- "Outage" | red | bold | nindent 2 }}: ReplicaSet has no Ready replicas. 22 | {{- end }} 23 | {{- if hasKey .Annotations "deployment.kubernetes.io/desired-replicas" }} 24 | {{- $deploymentDesiredReplicas := index .Annotations "deployment.kubernetes.io/desired-replicas" | float64 }} 25 | {{- if $deploymentDesiredReplicas }} 26 | {{- if .Spec.replicas }} 27 | {{- if ne $deploymentDesiredReplicas (.Spec.replicas | float64) }} 28 | {{- "Ongoing rollout" | yellow | bold | nindent 2 }}, check Owner Reference resources. 29 | {{- end }} 30 | {{- else }}{{/* means .Spec.replicass: 0 */}} 31 | {{- "Old" | red | bold | nindent 2 }}: This ReplicaSet is likely replaced by a new one, check Owner Reference resources. 32 | {{- end }} 33 | {{- else }}{{/* means deployment.kubernetes.io/desired-replicas: "0" */}} 34 | {{- template "suspended" . }} 35 | {{- end }} 36 | {{- end }} 37 | {{- template "recent_updates" . }} 38 | {{- template "events" . }} 39 | {{- template "owners" . }} 40 | {{- end }} -------------------------------------------------------------------------------- /pkg/plugin/templates/ResourceQuota.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "ResourceQuota" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- $hard := .Status.hard }} 7 | {{- range $key, $value := .Status.used }} 8 | {{- $key | nindent 2 }}: {{ $value }}/{{ index $hard $key }} 9 | {{- end }} 10 | {{- template "application_details" . }} 11 | {{- template "recent_updates" . }} 12 | {{- template "events" . }} 13 | {{- template "owners" . }} 14 | {{- end -}} -------------------------------------------------------------------------------- /pkg/plugin/templates/Service.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "Service" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "finalizer_details_on_termination" . }} 5 | {{- if eq .Spec.clusterIP "None" }} {{ "Headless" | yellow | bold }}{{- end }} 6 | {{- if eq .Spec.type "LoadBalancer" }} 7 | {{- template "load_balancer_ingress" . }} 8 | {{- end }} 9 | {{- $endpoint := .KubeGetFirst .Namespace "Endpoints" .Name }} 10 | {{- if $endpoint.Object }} 11 | {{- if hasKey $endpoint.Annotations "endpoints.kubernetes.io/last-change-trigger-time" -}} 12 | , last endpoint change was {{ index $endpoint.Annotations "endpoints.kubernetes.io/last-change-trigger-time" | colorAgo }} ago 13 | {{- end}} 14 | {{- end}} 15 | {{- template "kstatus_summary" . }} 16 | {{- template "application_details" . }} 17 | {{- if $endpoint.Object }} 18 | {{- if $endpoint.Object.subsets }} 19 | {{- range $endpoint.Object.subsets }} 20 | {{- $ports := .ports }} 21 | {{- if .addresses }} 22 | {{- range .addresses }} 23 | Ready: {{ template "endpoint_subset_address" (dict "address" . "ports" $ports) }} 24 | {{- end }} 25 | {{- else }} 26 | {{ "Outage" | red | bold }}: This service doesn't match any Ready pods. 27 | {{- end }} 28 | {{- with .notReadyAddresses }} 29 | {{- range . }} 30 | {{ "NotReady" | red | bold }}: {{ template "endpoint_subset_address" (dict "address" . "ports" $ports) }} 31 | {{- end }} 32 | {{- end }} 33 | {{- end }} 34 | {{- else }} 35 | {{ "No matching pods" | red | bold }}: Service selector either doens't match any Pods or the Service's targetPort doesn't match the Pod's port. 36 | {{- end }} 37 | {{- else }} 38 | {{ "Missing Endpoint" | red | bold }}: Service has no matching endpoint. 39 | {{- end }} 40 | {{- template "recent_updates" . }} 41 | {{- template "matching_ingresses" . }} 42 | {{- template "events" . }} 43 | {{- template "owners" . }} 44 | {{- end -}} 45 | 46 | {{- define "matching_ingresses" }} 47 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 48 | {{- if .Config.GetBool "include-matching-ingresses" }} 49 | {{- range $index, $ing := .KubeGetIngressesMatchingService .Namespace .Name }} 50 | {{- if eq $index 0 }} 51 | {{- "Ingresses matching this Service:" | nindent 2}} 52 | {{- end }} 53 | {{- $.Include "Ingress" $ing | nindent 4 }} 54 | {{- end }} 55 | {{- end }} 56 | {{- end }} 57 | 58 | {{- define "endpoint_subset_address" }} 59 | {{- /* Expects to get a map with these keys: 60 | * address: either Endpoints.subsets.addresses[N] or Endpoints.subsets.notReadyAddresses 61 | * ports: Endpoints.subsets.ports 62 | */ -}} 63 | {{- $ip := .address.ip | ip }} 64 | {{- $hasTargetRef := not (not .address.targetRef) }} 65 | {{- if .address.targetRef }} 66 | {{- .address.targetRef.kind | bold }}/{{ .address.targetRef.name }} 67 | {{- with .address.targetRef.namespace }} -n {{ . }}{{ end }} 68 | {{- with .address.nodeName }} on {{ "Node" | bold }}/{{ . }}{{ end }} 69 | {{- end }} 70 | {{- range $index, $port := .ports }} 71 | {{- if $hasTargetRef }}, {{ else }}{{if $index}}, {{end}}{{ end }} 72 | {{- $ip }}:{{ $port.port }}/{{ $port.protocol }}{{ with $port.name }} ({{ . }}){{ end }} 73 | {{- end }} 74 | {{- end -}} -------------------------------------------------------------------------------- /pkg/plugin/templates/StatefulSet.tmpl: -------------------------------------------------------------------------------- 1 | {{- define "StatefulSet" }} 2 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 3 | {{- template "status_summary_line" . }} 4 | {{- template "kstatus_summary" . }} 5 | {{- template "finalizer_details_on_termination" . }} 6 | {{- template "observed_generation_summary" . }} 7 | {{- template "application_details" . }} 8 | {{- /* When there is no readyReplicas, STS may not have related fields at all */ -}} 9 | {{- $status := .Status }} 10 | {{- $_ := set $status "readyReplicas" ($status.readyReplicas | default 0) }} 11 | {{- $_ := set $status "currentReplicas" ($status.replicas | default 0) }} 12 | {{- $_ := set .Object "status" $status }} 13 | {{- template "replicas_status" . }} 14 | {{- template "suspended" . }} 15 | {{- template "conditions_summary" . }} 16 | {{- $rolloutStatus := .RolloutStatus . }} 17 | {{- if not $status.readyReplicas }} 18 | {{- "Outage" | red | bold | nindent 2 }}: StatefulSet has no Ready replicas. 19 | {{- if and $status.currentRevision (eq ($status.observedGeneration | default 1) 1) }} 20 | {{- "Stuck Initial Rollout?" | yellow | bold | nindent 2 }} First rollout not yet progressed. 21 | {{- end }} 22 | {{- else if ne $status.currentReplicas $status.readyReplicas }} 23 | {{- if $rolloutStatus.done }} 24 | {{- "Not Ready Replicas" | yellow | bold | nindent 2 }}: {{ sub $status.currentReplicas $status.readyReplicas }} replicas are not Ready. 25 | {{- end }} 26 | {{- end }} 27 | {{- if not $rolloutStatus.done }} 28 | {{- "Ongoing rollout" | yellow | bold | redBoldIf $rolloutStatus.error | nindent 2 }} 29 | {{- with $rolloutStatus.message }}: {{ . | yellow }}{{ end }} 30 | {{- with $rolloutStatus.error }}: Error: {{ . | red }}{{ end }} 31 | {{- if eq ($status.updatedCount | default 0) 0 }} 32 | {{- "Stuck Rollout?" | yellow | bold | nindent 2 }}: Still replacing the first Pod, may indicate a stuck rollout. 33 | {{- end }} 34 | {{- if and (hasKey $status "currentRevision") (hasKey $status "updateRevision") }} 35 | {{- with .KubeGetUnifiedDiffString "ControllerRevision" .Namespace $status.currentRevision $status.updateRevision }} 36 | {{- "Active Rollout Diff:" | nindent 2 }} 37 | {{- . | markRed "^-.*" | markGreen "^\\+.*" | nindent 4 }} 38 | {{- end }} 39 | {{- end }} 40 | {{- else }} 41 | {{- if and .Spec.replicas (eq $status.readyReplicas 0) }} 42 | {{- "Stuck rollout" | red | bold | nindent 2 }}: No ready replicas, this StatefulSet won't likely go further. 43 | {{- end }} 44 | {{- end }} 45 | {{- template "recent_statefulset_rollouts" . }} 46 | {{- template "recent_updates" . }} 47 | {{- template "events" . }} 48 | {{- template "owners" . }} 49 | {{- end }} 50 | 51 | {{- define "recent_statefulset_rollouts" }} 52 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 53 | {{- $sectionHeader := false }} 54 | {{- $previousRevision := "" }} 55 | {{- range .KubeGetByLabelsMap .Namespace "controllerrevisions" .Labels }} 56 | {{- /*gotype: github.com/bergerx/kubectl-status/pkg/plugin.RenderableObject*/ -}} 57 | {{- if eq (index .Metadata.ownerReferences 0).name $.Name }} 58 | {{- if not $sectionHeader }} 59 | {{- "Rollouts:" | nindent 2 }} 60 | {{- template "rollout_diffs_flag_help" $ }} 61 | {{- $sectionHeader = true }} 62 | {{- end }} 63 | {{- "" | nindent 4 }} 64 | {{- with .Metadata.creationTimestamp }}{{ . | colorAgo }} ago{{ end }} used {{ .Kind | bold }}/{{ .Name }}. 65 | {{- if and $previousRevision ($.Config.GetBool "include-rollout-diffs") }} 66 | {{- with $.KubeGetUnifiedDiffString "ControllerRevision" $.Namespace $previousRevision.Name .Name }} 67 | {{- . | markRed "^-.*" | markGreen "^\\+.*" | nindent 6 }} 68 | {{- end }} 69 | {{- end }} 70 | {{- $previousRevision = . }} 71 | {{- end }} 72 | {{- end }} 73 | {{- end }} 74 | -------------------------------------------------------------------------------- /pkg/plugin/templates_common_test.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | "k8s.io/cli-runtime/pkg/genericiooptions" 8 | "k8s.io/client-go/rest/fake" 9 | cmdtesting "k8s.io/kubectl/pkg/cmd/testing" 10 | 11 | "github.com/bergerx/kubectl-status/pkg/input" 12 | ) 13 | 14 | func checkTemplate(t *testing.T, templateName string, obj map[string]interface{}, shouldContain string, useRenderable bool) { 15 | t.Helper() 16 | tmpl, _ := getTemplate() 17 | f := cmdtesting.NewTestFactory().WithNamespace("test") 18 | f.Client = &fake.RESTClient{} 19 | f.UnstructuredClient = f.Client 20 | t.Cleanup(func() { f.Cleanup() }) 21 | repo, _ := input.NewResourceRepo(f) 22 | e, _ := newRenderEngine(genericiooptions.NewTestIOStreamsDiscard()) 23 | e.Template = *tmpl 24 | r := newRenderableObject(obj, e, repo) 25 | var objToPassTemplate interface{} 26 | if useRenderable { 27 | objToPassTemplate = r 28 | } else { 29 | objToPassTemplate = obj 30 | } 31 | got, err := r.renderTemplate(templateName, objToPassTemplate) 32 | if err != nil { 33 | t.Errorf("renderTemplate() error = %v", err) 34 | return 35 | } 36 | if !strings.Contains(got, shouldContain) { 37 | t.Errorf("template 'suspended' got = %v, shouldContain = %v", got, shouldContain) 38 | return 39 | } 40 | } 41 | 42 | func TestObservedGenerationSummaryTemplate(t *testing.T) { 43 | tests := []struct { 44 | name string 45 | obj map[string]interface{} 46 | want string 47 | }{ 48 | { 49 | name: "neither generation or observed generation", 50 | obj: map[string]interface{}{}, 51 | want: "", 52 | }, { 53 | name: "has generation but no observed", 54 | obj: map[string]interface{}{ 55 | "metadata": map[string]interface{}{ 56 | "generation": 1, 57 | }, 58 | }, 59 | want: "", 60 | }, { 61 | name: "has matching generation and observed", 62 | obj: map[string]interface{}{ 63 | "metadata": map[string]interface{}{ 64 | "generation": 1, 65 | }, 66 | "status": map[string]interface{}{ 67 | "observedGeneration": 1, 68 | }, 69 | }, 70 | want: "", 71 | }, { 72 | name: "generation and observed are not matching", 73 | obj: map[string]interface{}{ 74 | "metadata": map[string]interface{}{ 75 | "generation": 2, 76 | }, 77 | "status": map[string]interface{}{ 78 | "observedGeneration": 1, 79 | }, 80 | }, 81 | want: "doesn't match", 82 | }, 83 | } 84 | for _, tt := range tests { 85 | t.Run(tt.name, func(t *testing.T) { 86 | checkTemplate(t, "observed_generation_summary", tt.obj, tt.want, true) 87 | }) 88 | } 89 | } 90 | 91 | func TestSuspendTemplate(t *testing.T) { 92 | tests := []struct { 93 | name string 94 | obj map[string]interface{} 95 | want string 96 | }{ 97 | { 98 | name: "replicas set to 0", 99 | obj: map[string]interface{}{ 100 | "spec": map[string]interface{}{ 101 | "replicas": 0, 102 | }, 103 | }, 104 | want: "Suspended", 105 | }, { 106 | name: "replicas set to 1", 107 | obj: map[string]interface{}{ 108 | "spec": map[string]interface{}{ 109 | "replicas": 1, 110 | }, 111 | }, 112 | want: "", 113 | }, { 114 | name: "no replicas field should do nothing", 115 | obj: map[string]interface{}{ 116 | "spec": map[string]interface{}{}, 117 | }, 118 | want: "", 119 | }, 120 | } 121 | for _, tt := range tests { 122 | t.Run(tt.name, func(t *testing.T) { 123 | checkTemplate(t, "suspended", tt.obj, tt.want, true) 124 | }) 125 | } 126 | } 127 | 128 | func TestOwnersTemplate(t *testing.T) { 129 | tests := []struct { 130 | name string 131 | obj map[string]interface{} 132 | want string 133 | }{ 134 | { 135 | name: "has no owners", 136 | obj: map[string]interface{}{}, 137 | want: "", 138 | }, { 139 | name: "has empty owner references field", 140 | obj: map[string]interface{}{ 141 | "metadata": map[string]interface{}{ 142 | "ownerReferences": []map[string]interface{}{ 143 | {}, 144 | }, 145 | }, 146 | }, 147 | want: "", 148 | }, { 149 | name: "has an owner reference", 150 | obj: map[string]interface{}{ 151 | "metadata": map[string]interface{}{ 152 | "ownerReferences": []map[string]interface{}{ 153 | { 154 | "apiVersion": "apps/v1", 155 | "blockOwnerDeletion": true, 156 | "controller": true, 157 | "kind": "ReplicaSet", 158 | "name": "coredns-558bd4d5db", 159 | }, 160 | }, 161 | }, 162 | }, 163 | want: "", 164 | }, 165 | } 166 | for _, tt := range tests { 167 | t.Run(tt.name, func(t *testing.T) { 168 | checkTemplate(t, "owners", tt.obj, tt.want, true) 169 | }) 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /staticcheck.conf: -------------------------------------------------------------------------------- 1 | checks = ["all", "-ST1000"] 2 | -------------------------------------------------------------------------------- /tests/artifacts/README.md: -------------------------------------------------------------------------------- 1 | # Test artifacts 2 | 3 | This folder holds various example yaml files and their rendered outputs (without color). 4 | We use these to track/verify the impact of the changes we introduce to the templates. 5 | Every time we change a template, it's expected to update corresponding out files here. 6 | 7 | # Re-generate all the "*.out" files 8 | 9 | ```bash 10 | cd ../.. 11 | for yaml in ./tests/artifacts/*.yaml; do 12 | out=$(echo ${yaml} | sed 's/.yaml/.out/') 13 | echo "${yaml} --> ${out}" 14 | go run ./cmd --test-hack -f ${yaml} --local --shallow > ${out} 15 | done 16 | ``` 17 | 18 | # Adding a new case 19 | 20 | First generate the artifact file using a command like this: 21 | 22 | ```bash 23 | cmd="" # the command line parameters for generating the new manifest, e.g.: -n default node,service 24 | file="" # filename for the new artifact file to be stored, e.g. node-and-service 25 | 26 | cd ../.. 27 | kubectl get -o yaml ${cmd} > tests/artifacts/${file}.yaml 28 | go run ./cmd --test-hack ${cmd} --shallow > tests/artifacts/${file}.out 29 | make test 30 | git add tests/artifacts/${file}.yaml tests/artifacts/${file}.out 31 | ``` 32 | -------------------------------------------------------------------------------- /tests/artifacts/cr-dbconn-mymysql-deleted.out: -------------------------------------------------------------------------------- 1 | 2 | DatabaseConnection/mymysql -n default, created 1m ago, gen:2 3 | Terminating: Resource scheduled for deletion 4 | Finalizers: dbconn-operator1, dbconn-operator2 5 | Observed generation(1) doesn't match generation(2) 6 | This usually means related controller has not yet reconciled this resource! 7 | Ready ConnectionEstablished, Database connection successfully established. for 1m 8 | -------------------------------------------------------------------------------- /tests/artifacts/cr-dbconn-mymysql-deleted.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: example.com/v1alpha1 2 | kind: DatabaseConnection 3 | metadata: 4 | creationTimestamp: "2024-07-25T19:58:20Z" 5 | deletionTimestamp: "2024-07-25T19:59:20Z" 6 | generation: 2 7 | name: mymysql 8 | namespace: default 9 | finalizers: 10 | - dbconn-operator1 11 | - dbconn-operator2 12 | resourceVersion: "1330976" 13 | uid: be814fad-43ba-4d86-bc5a-f4fb471af6ae 14 | spec: 15 | defaultSchema: myapp 16 | engine: mysql 17 | rootPassword: password123 18 | rootUser: admin 19 | status: 20 | observedGeneration: 1 21 | conditions: 22 | - lastTransitionTime: "2024-07-25T15:27:00Z" 23 | message: Database connection successfully established. 24 | reason: ConnectionEstablished 25 | status: "True" 26 | type: Ready 27 | -------------------------------------------------------------------------------- /tests/artifacts/cr-dbconn-mymysql.out: -------------------------------------------------------------------------------- 1 | 2 | DatabaseConnection/mymysql -n default, created 1m ago 3 | Current: Resource is Ready 4 | Ready ConnectionEstablished, Database connection successfully established. for 1m 5 | -------------------------------------------------------------------------------- /tests/artifacts/cr-dbconn-mymysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: example.com/v1alpha1 2 | kind: DatabaseConnection 3 | metadata: 4 | creationTimestamp: "2024-07-25T19:58:20Z" 5 | name: mymysql 6 | namespace: default 7 | resourceVersion: "1330976" 8 | uid: be814fad-43ba-4d86-bc5a-f4fb471af6ae 9 | spec: 10 | defaultSchema: myapp 11 | engine: mysql 12 | rootPassword: password123 13 | rootUser: admin 14 | status: 15 | observedGeneration: 1 16 | conditions: 17 | - lastTransitionTime: "2024-07-25T15:27:00Z" 18 | message: Database connection successfully established. 19 | reason: ConnectionEstablished 20 | status: "True" 21 | type: Ready 22 | -------------------------------------------------------------------------------- /tests/artifacts/crd-dbconn.out: -------------------------------------------------------------------------------- 1 | 2 | CustomResourceDefinition/databaseconnections.example.com, created 1m ago, gen:2 3 | Current: CRD is established 4 | NamesAccepted NoConflicts, no conflicts found for 1m 5 | Established InitialNamesAccepted, the initial names have been accepted for 1m 6 | -------------------------------------------------------------------------------- /tests/artifacts/crd-dbconn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | creationTimestamp: "2024-07-25T19:51:55Z" 5 | generation: 2 6 | name: databaseconnections.example.com 7 | resourceVersion: "1330873" 8 | uid: fdad7e62-bef1-44ba-9a45-fc2030028fc6 9 | spec: 10 | conversion: 11 | strategy: None 12 | group: example.com 13 | names: 14 | kind: DatabaseConnection 15 | listKind: DatabaseConnectionList 16 | plural: databaseconnections 17 | shortNames: 18 | - dbconn 19 | singular: databaseconnection 20 | scope: Namespaced 21 | versions: 22 | - name: v1alpha1 23 | served: true 24 | storage: true 25 | subresources: 26 | status: {} 27 | schema: 28 | openAPIV3Schema: 29 | properties: 30 | spec: 31 | properties: 32 | defaultSchema: 33 | type: string 34 | engine: 35 | enum: 36 | - mysql 37 | - postgresql 38 | type: string 39 | rootPassword: 40 | type: string 41 | rootUser: 42 | type: string 43 | type: object 44 | status: 45 | properties: 46 | conditions: 47 | items: 48 | properties: 49 | lastTransitionTime: 50 | description: Last time the condition transitioned from one status 51 | to another. 52 | format: date-time 53 | type: string 54 | message: 55 | description: Human-readable message indicating details about 56 |   the last transition. 57 | type: string 58 | observedGeneration: 59 | description: Last generation that the condition was updated. 60 | type: integer 61 | reason: 62 | description: Unique, one-word, CamelCase reason for the condition's 63 | last transition. 64 | type: string 65 | status: 66 | description: Status   of the condition, one of True, False, 67 | Unknown. 68 | type: string 69 | type: 70 | description: Type of the condition. 71 | type: string 72 | type: object 73 | type: array 74 | observedGeneration: 75 | description: Most recent generation observed by the controller. 76 | type: integer 77 | type: object 78 | type: object 79 | status: 80 | acceptedNames: 81 | kind: DatabaseConnection 82 | listKind: DatabaseConnectionList 83 | plural: databaseconnections 84 | shortNames: 85 | - dbconn 86 | singular: databaseconnection 87 | conditions: 88 | - lastTransitionTime: "2024-07-25T19:51:55Z" 89 | message: no conflicts found 90 | reason: NoConflicts 91 | status: "True" 92 | type: NamesAccepted 93 | - lastTransitionTime: "2024-07-25T19:51:55Z" 94 | message: the initial names have been accepted 95 | reason: InitialNamesAccepted 96 | status: "True" 97 | type: Established 98 | storedVersions: 99 | - v1alpha1 100 | -------------------------------------------------------------------------------- /tests/artifacts/cronjob-regular-active.out: -------------------------------------------------------------------------------- 1 | 2 | CronJob/hello -n default, created 1m ago, last ran at 2020-03-18T00:47:00Z (1m ago) 3 | Active: Job/hello-1584492420 is running. 4 | Current: Resource is always ready 5 | -------------------------------------------------------------------------------- /tests/artifacts/cronjob-regular-active.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:46:56Z" 5 | name: hello 6 | namespace: default 7 | resourceVersion: "305988" 8 | selfLink: /apis/batch/v1beta1/namespaces/default/cronjobs/hello 9 | uid: 8324766b-49a4-4bd6-9c9b-f1ffb5e62a0b 10 | spec: 11 | concurrencyPolicy: Allow 12 | failedJobsHistoryLimit: 1 13 | jobTemplate: 14 | metadata: 15 | creationTimestamp: null 16 | spec: 17 | template: 18 | metadata: 19 | creationTimestamp: null 20 | spec: 21 | containers: 22 | - args: 23 | - /bin/sh 24 | - -c 25 | - date; echo Hello from the Kubernetes cluster 26 | image: busybox 27 | imagePullPolicy: Always 28 | name: hello 29 | resources: {} 30 | terminationMessagePath: /dev/termination-log 31 | terminationMessagePolicy: File 32 | dnsPolicy: ClusterFirst 33 | restartPolicy: OnFailure 34 | schedulerName: default-scheduler 35 | securityContext: {} 36 | terminationGracePeriodSeconds: 30 37 | schedule: '*/1 * * * *' 38 | successfulJobsHistoryLimit: 3 39 | suspend: false 40 | status: 41 | active: 42 | - apiVersion: batch/v1 43 | kind: Job 44 | name: hello-1584492420 45 | namespace: default 46 | resourceVersion: "305985" 47 | uid: 70e69d69-0c54-4626-8b33-b81d140e5d3d 48 | lastScheduleTime: "2020-03-18T00:47:00Z" 49 | -------------------------------------------------------------------------------- /tests/artifacts/cronjob-regular-new.out: -------------------------------------------------------------------------------- 1 | 2 | CronJob/hello -n default, created 1m ago 3 | Not yet scheduled 4 | Current: Resource is always ready 5 | -------------------------------------------------------------------------------- /tests/artifacts/cronjob-regular-new.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:46:56Z" 5 | name: hello 6 | namespace: default 7 | resourceVersion: "305962" 8 | selfLink: /apis/batch/v1beta1/namespaces/default/cronjobs/hello 9 | uid: 8324766b-49a4-4bd6-9c9b-f1ffb5e62a0b 10 | spec: 11 | concurrencyPolicy: Allow 12 | failedJobsHistoryLimit: 1 13 | jobTemplate: 14 | metadata: 15 | creationTimestamp: null 16 | spec: 17 | template: 18 | metadata: 19 | creationTimestamp: null 20 | spec: 21 | containers: 22 | - args: 23 | - /bin/sh 24 | - -c 25 | - date; echo Hello from the Kubernetes cluster 26 | image: busybox 27 | imagePullPolicy: Always 28 | name: hello 29 | resources: {} 30 | terminationMessagePath: /dev/termination-log 31 | terminationMessagePolicy: File 32 | dnsPolicy: ClusterFirst 33 | restartPolicy: OnFailure 34 | schedulerName: default-scheduler 35 | securityContext: {} 36 | terminationGracePeriodSeconds: 30 37 | schedule: '*/1 * * * *' 38 | successfulJobsHistoryLimit: 3 39 | suspend: false 40 | status: {} 41 | -------------------------------------------------------------------------------- /tests/artifacts/cronjob-regular-scheduled-and-active.out: -------------------------------------------------------------------------------- 1 | 2 | CronJob/hello -n default, created 1m ago, last ran at 2020-03-18T00:48:00Z (1m ago) 3 | Active: Job/hello-1584492480 is running. 4 | Current: Resource is always ready 5 | -------------------------------------------------------------------------------- /tests/artifacts/cronjob-regular-scheduled-and-active.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:46:56Z" 5 | name: hello 6 | namespace: default 7 | resourceVersion: "306136" 8 | selfLink: /apis/batch/v1beta1/namespaces/default/cronjobs/hello 9 | uid: 8324766b-49a4-4bd6-9c9b-f1ffb5e62a0b 10 | spec: 11 | concurrencyPolicy: Allow 12 | failedJobsHistoryLimit: 1 13 | jobTemplate: 14 | metadata: 15 | creationTimestamp: null 16 | spec: 17 | template: 18 | metadata: 19 | creationTimestamp: null 20 | spec: 21 | containers: 22 | - args: 23 | - /bin/sh 24 | - -c 25 | - date; echo Hello from the Kubernetes cluster 26 | image: busybox 27 | imagePullPolicy: Always 28 | name: hello 29 | resources: {} 30 | terminationMessagePath: /dev/termination-log 31 | terminationMessagePolicy: File 32 | dnsPolicy: ClusterFirst 33 | restartPolicy: OnFailure 34 | schedulerName: default-scheduler 35 | securityContext: {} 36 | terminationGracePeriodSeconds: 30 37 | schedule: '*/1 * * * *' 38 | successfulJobsHistoryLimit: 3 39 | suspend: false 40 | status: 41 | active: 42 | - apiVersion: batch/v1 43 | kind: Job 44 | name: hello-1584492480 45 | namespace: default 46 | resourceVersion: "306135" 47 | uid: c277f8b0-1497-40a8-9da1-b95dd3be81fc 48 | lastScheduleTime: "2020-03-18T00:48:00Z" 49 | -------------------------------------------------------------------------------- /tests/artifacts/cronjob-regular-scheduled.out: -------------------------------------------------------------------------------- 1 | 2 | CronJob/hello -n default, created 1m ago, last ran at 2020-03-18T00:47:00Z (1m ago) 3 | Current: Resource is always ready 4 | -------------------------------------------------------------------------------- /tests/artifacts/cronjob-regular-scheduled.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:46:56Z" 5 | name: hello 6 | namespace: default 7 | resourceVersion: "306023" 8 | selfLink: /apis/batch/v1beta1/namespaces/default/cronjobs/hello 9 | uid: 8324766b-49a4-4bd6-9c9b-f1ffb5e62a0b 10 | spec: 11 | concurrencyPolicy: Allow 12 | failedJobsHistoryLimit: 1 13 | jobTemplate: 14 | metadata: 15 | creationTimestamp: null 16 | spec: 17 | template: 18 | metadata: 19 | creationTimestamp: null 20 | spec: 21 | containers: 22 | - args: 23 | - /bin/sh 24 | - -c 25 | - date; echo Hello from the Kubernetes cluster 26 | image: busybox 27 | imagePullPolicy: Always 28 | name: hello 29 | resources: {} 30 | terminationMessagePath: /dev/termination-log 31 | terminationMessagePolicy: File 32 | dnsPolicy: ClusterFirst 33 | restartPolicy: OnFailure 34 | schedulerName: default-scheduler 35 | securityContext: {} 36 | terminationGracePeriodSeconds: 30 37 | schedule: '*/1 * * * *' 38 | successfulJobsHistoryLimit: 3 39 | suspend: false 40 | status: 41 | lastScheduleTime: "2020-03-18T00:47:00Z" 42 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-healthy.out: -------------------------------------------------------------------------------- 1 | 2 | Deployment/httpbin-deployment -n test1, created 1m ago, gen:1 rev:1 3 | Current: Deployment is available. Replicas: 3 4 | desired:3, existing:3, ready:3, updated:3, available:3 5 | Available MinimumReplicasAvailable, Deployment has minimum availability. for 1m 6 | Progressing NewReplicaSetAvailable, ReplicaSet "httpbin-deployment-79f6dfbb9" has successfully progressed. for 1m 7 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-healthy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "1" 6 | creationTimestamp: "2020-03-18T01:24:09Z" 7 | generation: 1 8 | labels: 9 | run: httpbin-deployment 10 | name: httpbin-deployment 11 | namespace: test1 12 | resourceVersion: "311461" 13 | selfLink: /apis/apps/v1/namespaces/test1/deployments/httpbin-deployment 14 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 15 | spec: 16 | progressDeadlineSeconds: 600 17 | replicas: 3 18 | revisionHistoryLimit: 10 19 | selector: 20 | matchLabels: 21 | run: httpbin-deployment 22 | strategy: 23 | rollingUpdate: 24 | maxSurge: 25% 25 | maxUnavailable: 25% 26 | type: RollingUpdate 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | run: httpbin-deployment 32 | spec: 33 | containers: 34 | - image: kennethreitz/httpbin 35 | imagePullPolicy: Always 36 | name: httpbin-deployment 37 | ports: 38 | - containerPort: 80 39 | protocol: TCP 40 | resources: {} 41 | terminationMessagePath: /dev/termination-log 42 | terminationMessagePolicy: File 43 | dnsPolicy: ClusterFirst 44 | restartPolicy: Always 45 | schedulerName: default-scheduler 46 | securityContext: {} 47 | terminationGracePeriodSeconds: 30 48 | status: 49 | availableReplicas: 3 50 | conditions: 51 | - lastTransitionTime: "2020-03-18T01:24:49Z" 52 | lastUpdateTime: "2020-03-18T01:24:49Z" 53 | message: Deployment has minimum availability. 54 | reason: MinimumReplicasAvailable 55 | status: "True" 56 | type: Available 57 | - lastTransitionTime: "2020-03-18T01:24:09Z" 58 | lastUpdateTime: "2020-03-18T01:24:49Z" 59 | message: ReplicaSet "httpbin-deployment-79f6dfbb9" has successfully progressed. 60 | reason: NewReplicaSetAvailable 61 | status: "True" 62 | type: Progressing 63 | observedGeneration: 1 64 | readyReplicas: 3 65 | replicas: 3 66 | updatedReplicas: 3 67 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-initial-progressing.out: -------------------------------------------------------------------------------- 1 | 2 | Deployment/httpbin-deployment -n test1, created 1m ago, gen:1 rev:1 3 | InProgress: Available: 1/3 4 | Reconciling: LessAvailable, Available: 1/3 5 | desired:3, existing:3, ready:1, updated:3, available:1, unavailable:2 6 | Available MinimumReplicasUnavailable, Deployment does not have minimum availability. for 1m 7 | Progressing ReplicaSetUpdated, ReplicaSet "httpbin-deployment-79f6dfbb9" is progressing. for 1m 8 | Ongoing Rollout: Waiting for deployment "httpbin-deployment" rollout to finish: 1 of 3 updated replicas are available... 9 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-initial-progressing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "1" 6 | creationTimestamp: "2020-03-18T01:24:09Z" 7 | generation: 1 8 | labels: 9 | run: httpbin-deployment 10 | name: httpbin-deployment 11 | namespace: test1 12 | resourceVersion: "311441" 13 | selfLink: /apis/apps/v1/namespaces/test1/deployments/httpbin-deployment 14 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 15 | spec: 16 | progressDeadlineSeconds: 600 17 | replicas: 3 18 | revisionHistoryLimit: 10 19 | selector: 20 | matchLabels: 21 | run: httpbin-deployment 22 | strategy: 23 | rollingUpdate: 24 | maxSurge: 25% 25 | maxUnavailable: 25% 26 | type: RollingUpdate 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | run: httpbin-deployment 32 | spec: 33 | containers: 34 | - image: kennethreitz/httpbin 35 | imagePullPolicy: Always 36 | name: httpbin-deployment 37 | ports: 38 | - containerPort: 80 39 | protocol: TCP 40 | resources: {} 41 | terminationMessagePath: /dev/termination-log 42 | terminationMessagePolicy: File 43 | dnsPolicy: ClusterFirst 44 | restartPolicy: Always 45 | schedulerName: default-scheduler 46 | securityContext: {} 47 | terminationGracePeriodSeconds: 30 48 | status: 49 | availableReplicas: 1 50 | conditions: 51 | - lastTransitionTime: "2020-03-18T01:24:09Z" 52 | lastUpdateTime: "2020-03-18T01:24:09Z" 53 | message: Deployment does not have minimum availability. 54 | reason: MinimumReplicasUnavailable 55 | status: "False" 56 | type: Available 57 | - lastTransitionTime: "2020-03-18T01:24:09Z" 58 | lastUpdateTime: "2020-03-18T01:24:47Z" 59 | message: ReplicaSet "httpbin-deployment-79f6dfbb9" is progressing. 60 | reason: ReplicaSetUpdated 61 | status: "True" 62 | type: Progressing 63 | observedGeneration: 1 64 | readyReplicas: 1 65 | replicas: 3 66 | unavailableReplicas: 2 67 | updatedReplicas: 3 68 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-new.out: -------------------------------------------------------------------------------- 1 | 2 | Deployment/httpbin-deployment -n test1, created 1m ago, gen:1 3 | InProgress: Replicas: 0/3 4 | Reconciling: LessReplicas, Replicas: 0/3 5 | desired:3, ready:0, updated:0, available:0 6 | Ongoing Rollout: Waiting for deployment spec update to be observed... 7 | Outage: Deployment has no Ready replicas. 8 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-new.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | creationTimestamp: "2020-03-18T01:24:09Z" 5 | generation: 1 6 | labels: 7 | run: httpbin-deployment 8 | name: httpbin-deployment 9 | namespace: test1 10 | resourceVersion: "311323" 11 | selfLink: /apis/apps/v1/namespaces/test1/deployments/httpbin-deployment 12 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 13 | spec: 14 | progressDeadlineSeconds: 600 15 | replicas: 3 16 | revisionHistoryLimit: 10 17 | selector: 18 | matchLabels: 19 | run: httpbin-deployment 20 | strategy: 21 | rollingUpdate: 22 | maxSurge: 25% 23 | maxUnavailable: 25% 24 | type: RollingUpdate 25 | template: 26 | metadata: 27 | creationTimestamp: null 28 | labels: 29 | run: httpbin-deployment 30 | spec: 31 | containers: 32 | - image: kennethreitz/httpbin 33 | imagePullPolicy: Always 34 | name: httpbin-deployment 35 | ports: 36 | - containerPort: 80 37 | protocol: TCP 38 | resources: {} 39 | terminationMessagePath: /dev/termination-log 40 | terminationMessagePolicy: File 41 | dnsPolicy: ClusterFirst 42 | restartPolicy: Always 43 | schedulerName: default-scheduler 44 | securityContext: {} 45 | terminationGracePeriodSeconds: 30 46 | status: {} 47 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-non-existing-image.out: -------------------------------------------------------------------------------- 1 | 2 | Deployment/missing-image -n test1, created 1m ago, gen:1 rev:1 3 | InProgress: Available: 0/1 4 | Reconciling: LessAvailable, Available: 0/1 5 | desired:1, existing:1, ready:0, updated:1, available:0, unavailable:1 6 | Available MinimumReplicasUnavailable, Deployment does not have minimum availability. for 1m 7 | Progressing ReplicaSetUpdated, ReplicaSet "missing-image-755c8c54f7" is progressing. for 1m 8 | Ongoing Rollout: Waiting for deployment "missing-image" rollout to finish: 0 of 1 updated replicas are available... 9 | Outage: Deployment has no Ready replicas. 10 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-non-existing-image.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "1" 6 | creationTimestamp: "2020-03-18T13:07:42Z" 7 | generation: 1 8 | labels: 9 | run: missing-image 10 | name: missing-image 11 | namespace: test1 12 | resourceVersion: "347027" 13 | selfLink: /apis/apps/v1/namespaces/test1/deployments/missing-image 14 | uid: 4d11ce88-1f23-400d-81c2-ed4f8ac10faa 15 | spec: 16 | progressDeadlineSeconds: 600 17 | replicas: 1 18 | revisionHistoryLimit: 10 19 | selector: 20 | matchLabels: 21 | run: missing-image 22 | strategy: 23 | rollingUpdate: 24 | maxSurge: 25% 25 | maxUnavailable: 25% 26 | type: RollingUpdate 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | run: missing-image 32 | spec: 33 | containers: 34 | - image: this-image-doesnt-exist 35 | imagePullPolicy: Always 36 | name: missing-image 37 | resources: {} 38 | terminationMessagePath: /dev/termination-log 39 | terminationMessagePolicy: File 40 | dnsPolicy: ClusterFirst 41 | restartPolicy: Always 42 | schedulerName: default-scheduler 43 | securityContext: {} 44 | terminationGracePeriodSeconds: 30 45 | status: 46 | conditions: 47 | - lastTransitionTime: "2020-03-18T13:07:42Z" 48 | lastUpdateTime: "2020-03-18T13:07:42Z" 49 | message: Deployment does not have minimum availability. 50 | reason: MinimumReplicasUnavailable 51 | status: "False" 52 | type: Available 53 | - lastTransitionTime: "2020-03-18T13:07:42Z" 54 | lastUpdateTime: "2020-03-18T13:07:42Z" 55 | message: ReplicaSet "missing-image-755c8c54f7" is progressing. 56 | reason: ReplicaSetUpdated 57 | status: "True" 58 | type: Progressing 59 | observedGeneration: 1 60 | replicas: 1 61 | unavailableReplicas: 1 62 | updatedReplicas: 1 63 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-ongoing-rollout.out: -------------------------------------------------------------------------------- 1 | 2 | Deployment/httpbin-deployment -n test1, created 1m ago, gen:2 rev:2 3 | InProgress: Updated: 1/3 4 | Reconciling: LessUpdated, Updated: 1/3 5 | desired:3, existing:4, ready:3, updated:1, available:3, unavailable:1 6 | Available MinimumReplicasAvailable, Deployment has minimum availability. for 1m 7 | Progressing ReplicaSetUpdated, ReplicaSet "httpbin-deployment-d9b875c5b" is progressing. for 1m 8 | Ongoing Rollout: Waiting for deployment "httpbin-deployment" rollout to finish: 1 out of 3 new replicas have been updated... 9 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-ongoing-rollout.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "2" 6 | creationTimestamp: "2020-03-18T01:24:09Z" 7 | generation: 2 8 | labels: 9 | run: httpbin-deployment 10 | name: httpbin-deployment 11 | namespace: test1 12 | resourceVersion: "313327" 13 | selfLink: /apis/apps/v1/namespaces/test1/deployments/httpbin-deployment 14 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 15 | spec: 16 | progressDeadlineSeconds: 600 17 | replicas: 3 18 | revisionHistoryLimit: 10 19 | selector: 20 | matchLabels: 21 | run: httpbin-deployment 22 | strategy: 23 | rollingUpdate: 24 | maxSurge: 25% 25 | maxUnavailable: 25% 26 | type: RollingUpdate 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | run: httpbin-deployment 32 | trigger: rollout 33 | spec: 34 | containers: 35 | - image: kennethreitz/httpbin 36 | imagePullPolicy: Always 37 | name: httpbin-deployment 38 | ports: 39 | - containerPort: 80 40 | protocol: TCP 41 | resources: {} 42 | terminationMessagePath: /dev/termination-log 43 | terminationMessagePolicy: File 44 | dnsPolicy: ClusterFirst 45 | restartPolicy: Always 46 | schedulerName: default-scheduler 47 | securityContext: {} 48 | terminationGracePeriodSeconds: 30 49 | status: 50 | availableReplicas: 3 51 | conditions: 52 | - lastTransitionTime: "2020-03-18T01:24:49Z" 53 | lastUpdateTime: "2020-03-18T01:24:49Z" 54 | message: Deployment has minimum availability. 55 | reason: MinimumReplicasAvailable 56 | status: "True" 57 | type: Available 58 | - lastTransitionTime: "2020-03-18T01:24:09Z" 59 | lastUpdateTime: "2020-03-18T01:38:57Z" 60 | message: ReplicaSet "httpbin-deployment-d9b875c5b" is progressing. 61 | reason: ReplicaSetUpdated 62 | status: "True" 63 | type: Progressing 64 | observedGeneration: 2 65 | readyReplicas: 3 66 | replicas: 4 67 | unavailableReplicas: 1 68 | updatedReplicas: 1 69 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-progressing.out: -------------------------------------------------------------------------------- 1 | 2 | Deployment/httpbin-deployment -n test1, created 1m ago, gen:1 rev:1 3 | InProgress: Replicas: 0/3 4 | Reconciling: LessReplicas, Replicas: 0/3 5 | desired:3, ready:0, updated:0, available:0 6 | Progressing NewReplicaSetCreated, Created new replica set "httpbin-deployment-79f6dfbb9" for 1m 7 | Ongoing Rollout: Waiting for deployment spec update to be observed... 8 | Outage: Deployment has no Ready replicas. 9 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-progressing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "1" 6 | creationTimestamp: "2020-03-18T01:24:09Z" 7 | generation: 1 8 | labels: 9 | run: httpbin-deployment 10 | name: httpbin-deployment 11 | namespace: test1 12 | resourceVersion: "311325" 13 | selfLink: /apis/apps/v1/namespaces/test1/deployments/httpbin-deployment 14 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 15 | spec: 16 | progressDeadlineSeconds: 600 17 | replicas: 3 18 | revisionHistoryLimit: 10 19 | selector: 20 | matchLabels: 21 | run: httpbin-deployment 22 | strategy: 23 | rollingUpdate: 24 | maxSurge: 25% 25 | maxUnavailable: 25% 26 | type: RollingUpdate 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | run: httpbin-deployment 32 | spec: 33 | containers: 34 | - image: kennethreitz/httpbin 35 | imagePullPolicy: Always 36 | name: httpbin-deployment 37 | ports: 38 | - containerPort: 80 39 | protocol: TCP 40 | resources: {} 41 | terminationMessagePath: /dev/termination-log 42 | terminationMessagePolicy: File 43 | dnsPolicy: ClusterFirst 44 | restartPolicy: Always 45 | schedulerName: default-scheduler 46 | securityContext: {} 47 | terminationGracePeriodSeconds: 30 48 | status: 49 | conditions: 50 | - lastTransitionTime: "2020-03-18T01:24:09Z" 51 | lastUpdateTime: "2020-03-18T01:24:09Z" 52 | message: Created new replica set "httpbin-deployment-79f6dfbb9" 53 | reason: NewReplicaSetCreated 54 | status: "True" 55 | type: Progressing 56 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-unavailable-replicas.out: -------------------------------------------------------------------------------- 1 | 2 | Deployment/httpbin-deployment -n test1, created 1m ago, gen:1 rev:1 3 | InProgress: Replicas: 0/3 4 | Reconciling: LessReplicas, Replicas: 0/3 5 | desired:3, ready:0, updated:0, available:0, unavailable:3 6 | Progressing NewReplicaSetCreated, Created new replica set "httpbin-deployment-79f6dfbb9" for 1m 7 | Available MinimumReplicasUnavailable, Deployment does not have minimum availability. for 1m 8 | Ongoing Rollout: Waiting for deployment "httpbin-deployment" rollout to finish: 0 out of 3 new replicas have been updated... 9 | Outage: Deployment has no Ready replicas. 10 | -------------------------------------------------------------------------------- /tests/artifacts/deployment-unavailable-replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "1" 6 | creationTimestamp: "2020-03-18T01:24:09Z" 7 | generation: 1 8 | labels: 9 | run: httpbin-deployment 10 | name: httpbin-deployment 11 | namespace: test1 12 | resourceVersion: "311330" 13 | selfLink: /apis/apps/v1/namespaces/test1/deployments/httpbin-deployment 14 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 15 | spec: 16 | progressDeadlineSeconds: 600 17 | replicas: 3 18 | revisionHistoryLimit: 10 19 | selector: 20 | matchLabels: 21 | run: httpbin-deployment 22 | strategy: 23 | rollingUpdate: 24 | maxSurge: 25% 25 | maxUnavailable: 25% 26 | type: RollingUpdate 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | run: httpbin-deployment 32 | spec: 33 | containers: 34 | - image: kennethreitz/httpbin 35 | imagePullPolicy: Always 36 | name: httpbin-deployment 37 | ports: 38 | - containerPort: 80 39 | protocol: TCP 40 | resources: {} 41 | terminationMessagePath: /dev/termination-log 42 | terminationMessagePolicy: File 43 | dnsPolicy: ClusterFirst 44 | restartPolicy: Always 45 | schedulerName: default-scheduler 46 | securityContext: {} 47 | terminationGracePeriodSeconds: 30 48 | status: 49 | conditions: 50 | - lastTransitionTime: "2020-03-18T01:24:09Z" 51 | lastUpdateTime: "2020-03-18T01:24:09Z" 52 | message: Created new replica set "httpbin-deployment-79f6dfbb9" 53 | reason: NewReplicaSetCreated 54 | status: "True" 55 | type: Progressing 56 | - lastTransitionTime: "2020-03-18T01:24:09Z" 57 | lastUpdateTime: "2020-03-18T01:24:09Z" 58 | message: Deployment does not have minimum availability. 59 | reason: MinimumReplicasUnavailable 60 | status: "False" 61 | type: Available 62 | observedGeneration: 1 63 | unavailableReplicas: 3 64 | -------------------------------------------------------------------------------- /tests/artifacts/ingress-regular.out: -------------------------------------------------------------------------------- 1 | 2 | Ingress/web -n test1, created 1m ago, gen:2 3 | Current: Resource is current 4 | Service port doesnt exist: Service/web:80 referenced in ingress, but Service doesn't have that port defined. 5 | -------------------------------------------------------------------------------- /tests/artifacts/ingress-regular.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | creationTimestamp: "2020-04-03T19:44:01Z" 5 | generation: 2 6 | name: web 7 | namespace: test1 8 | resourceVersion: "460204" 9 | selfLink: /apis/extensions/v1beta1/namespaces/test1/ingresses/web 10 | uid: 3d7ed81f-e2a6-46bc-84dd-8a2b99511daa 11 | spec: 12 | rules: 13 | - host: web.my.cool.domain.com 14 | http: 15 | paths: 16 | - backend: 17 | serviceName: web 18 | servicePort: 80 19 | path: / 20 | status: 21 | loadBalancer: {} 22 | -------------------------------------------------------------------------------- /tests/artifacts/ingress-with-problems.out: -------------------------------------------------------------------------------- 1 | 2 | Ingress/web -n test1, created 1m ago, gen:1 3 | Current: Resource is current 4 | Service port doesnt exist: Service/web:80 referenced in ingress, but Service doesn't have that port defined. 5 | Service port doesnt exist: Service/not-web:80 referenced in ingress, but Service doesn't have that port defined. 6 | Service port doesnt exist: Service/web:81 referenced in ingress, but Service doesn't have that port defined. 7 | -------------------------------------------------------------------------------- /tests/artifacts/ingress-with-problems.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | kubectl.kubernetes.io/last-applied-configuration: | 6 | {"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"annotations":{},"name":"web","namespace":"test1"},"spec":{"rules":[{"host":"web.my.cool.domain.com","http":{"paths":[{"backend":{"serviceName":"web","servicePort":80},"path":"/"},{"backend":{"serviceName":"not-web","servicePort":80},"path":"/missing-service"},{"backend":{"serviceName":"web","servicePort":81},"path":"/missing-service-port"}]}}]}} 7 | creationTimestamp: "2020-04-03T19:44:01Z" 8 | generation: 1 9 | name: web 10 | namespace: test1 11 | resourceVersion: "459482" 12 | selfLink: /apis/extensions/v1beta1/namespaces/test1/ingresses/web 13 | uid: 3d7ed81f-e2a6-46bc-84dd-8a2b99511daa 14 | spec: 15 | rules: 16 | - host: web.my.cool.domain.com 17 | http: 18 | paths: 19 | - backend: 20 | serviceName: web 21 | servicePort: 80 22 | path: /not-ready 23 | - backend: 24 | serviceName: not-web 25 | servicePort: 80 26 | path: /missing-service 27 | - backend: 28 | serviceName: web 29 | servicePort: 81 30 | path: /missing-service-port 31 | status: 32 | loadBalancer: {} 33 | backendIssues: 34 | - Backend: 35 | ServiceName: not-web 36 | ServicePort: 80 37 | IssueType: serviceWithNoReadyAddresses 38 | - Backend: 39 | ServiceName: not-web 40 | ServicePort: 80 41 | IssueType: serviceMissing 42 | - Backend: 43 | ServiceName: web 44 | ServicePort: 81 45 | IssueType: serviceWithPortMismatch 46 | -------------------------------------------------------------------------------- /tests/artifacts/job-active.out: -------------------------------------------------------------------------------- 1 | 2 | Job/hello-1584493380 -n default, created 1m ago by CronJob/hello, Active 3 | Current: Job in progress. success:0, active: 1, failed: 0 4 | -------------------------------------------------------------------------------- /tests/artifacts/job-active.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | creationTimestamp: "2020-03-18T01:03:07Z" 5 | labels: 6 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 7 | job-name: hello-1584493380 8 | name: hello-1584493380 9 | namespace: default 10 | ownerReferences: 11 | - apiVersion: batch/v1beta1 12 | blockOwnerDeletion: true 13 | controller: true 14 | kind: CronJob 15 | name: hello 16 | uid: 8324766b-49a4-4bd6-9c9b-f1ffb5e62a0b 17 | resourceVersion: "308421" 18 | selfLink: /apis/batch/v1/namespaces/default/jobs/hello-1584493380 19 | uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 20 | spec: 21 | backoffLimit: 6 22 | completions: 1 23 | parallelism: 1 24 | selector: 25 | matchLabels: 26 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 32 | job-name: hello-1584493380 33 | spec: 34 | containers: 35 | - args: 36 | - /bin/sh 37 | - -c 38 | - date; echo Hello from the Kubernetes cluster 39 | image: busybox 40 | imagePullPolicy: Always 41 | name: hello 42 | resources: {} 43 | terminationMessagePath: /dev/termination-log 44 | terminationMessagePolicy: File 45 | dnsPolicy: ClusterFirst 46 | restartPolicy: OnFailure 47 | schedulerName: default-scheduler 48 | securityContext: {} 49 | terminationGracePeriodSeconds: 30 50 | status: 51 | active: 1 52 | startTime: "2020-03-18T01:03:07Z" 53 | -------------------------------------------------------------------------------- /tests/artifacts/job-complete.out: -------------------------------------------------------------------------------- 1 | 2 | Job/hello-1584493380 -n default, created 1m ago by CronJob/hello and completed in 1m, Succeeded 3 | Current: Job Completed. succeeded: 1/1 4 | Complete for 1m 5 | -------------------------------------------------------------------------------- /tests/artifacts/job-complete.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | creationTimestamp: "2020-03-18T01:03:07Z" 5 | labels: 6 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 7 | job-name: hello-1584493380 8 | name: hello-1584493380 9 | namespace: default 10 | ownerReferences: 11 | - apiVersion: batch/v1beta1 12 | blockOwnerDeletion: true 13 | controller: true 14 | kind: CronJob 15 | name: hello 16 | uid: 8324766b-49a4-4bd6-9c9b-f1ffb5e62a0b 17 | resourceVersion: "308438" 18 | selfLink: /apis/batch/v1/namespaces/default/jobs/hello-1584493380 19 | uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 20 | spec: 21 | backoffLimit: 6 22 | completions: 1 23 | parallelism: 1 24 | selector: 25 | matchLabels: 26 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 32 | job-name: hello-1584493380 33 | spec: 34 | containers: 35 | - args: 36 | - /bin/sh 37 | - -c 38 | - date; echo Hello from the Kubernetes cluster 39 | image: busybox 40 | imagePullPolicy: Always 41 | name: hello 42 | resources: {} 43 | terminationMessagePath: /dev/termination-log 44 | terminationMessagePolicy: File 45 | dnsPolicy: ClusterFirst 46 | restartPolicy: OnFailure 47 | schedulerName: default-scheduler 48 | securityContext: {} 49 | terminationGracePeriodSeconds: 30 50 | status: 51 | completionTime: "2020-03-18T01:03:11Z" 52 | conditions: 53 | - lastProbeTime: "2020-03-18T01:03:11Z" 54 | lastTransitionTime: "2020-03-18T01:03:11Z" 55 | status: "True" 56 | type: Complete 57 | startTime: "2020-03-18T01:03:07Z" 58 | succeeded: 1 59 | -------------------------------------------------------------------------------- /tests/artifacts/job-new.out: -------------------------------------------------------------------------------- 1 | 2 | Job/hello-1584493380 -n default, created 1m ago by CronJob/hello 3 | InProgress: Job not started 4 | Reconciling: JobNotStarted, Job not started 5 | -------------------------------------------------------------------------------- /tests/artifacts/job-new.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | creationTimestamp: "2020-03-18T01:03:07Z" 5 | labels: 6 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 7 | job-name: hello-1584493380 8 | name: hello-1584493380 9 | namespace: default 10 | ownerReferences: 11 | - apiVersion: batch/v1beta1 12 | blockOwnerDeletion: true 13 | controller: true 14 | kind: CronJob 15 | name: hello 16 | uid: 8324766b-49a4-4bd6-9c9b-f1ffb5e62a0b 17 | resourceVersion: "308416" 18 | selfLink: /apis/batch/v1/namespaces/default/jobs/hello-1584493380 19 | uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 20 | spec: 21 | backoffLimit: 6 22 | completions: 1 23 | parallelism: 1 24 | selector: 25 | matchLabels: 26 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | controller-uid: 237c7625-d81e-472d-8f8e-ed1f8ff06ef0 32 | job-name: hello-1584493380 33 | spec: 34 | containers: 35 | - args: 36 | - /bin/sh 37 | - -c 38 | - date; echo Hello from the Kubernetes cluster 39 | image: busybox 40 | imagePullPolicy: Always 41 | name: hello 42 | resources: {} 43 | terminationMessagePath: /dev/termination-log 44 | terminationMessagePolicy: File 45 | dnsPolicy: ClusterFirst 46 | restartPolicy: OnFailure 47 | schedulerName: default-scheduler 48 | securityContext: {} 49 | terminationGracePeriodSeconds: 30 50 | status: {} 51 | -------------------------------------------------------------------------------- /tests/artifacts/multiple-2-pods-docs.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/etcd-minikube -n kube-system, created 1m ago by Node/minikube, started after 0s Running Burstable 3 | Current: Pod is Ready 4 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 5 | Containers: 6 | etcd (registry.k8s.io/etcd:3.5.9-0) Running for 1m and Ready, restarted 9 times 7 | previously: Started 1m ago and Completed after 1m 8 | 9 | Pod/storage-provisioner -n kube-system, created 1m ago, started after 0s Running BestEffort 10 | Current: Pod is Ready 11 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 12 | Standalone POD. 13 | Containers: 14 | storage-provisioner (gcr.io/k8s-minikube/storage-provisioner:v5) Running for 1m and Ready, restarted 16 times 15 | previously: Started 1m ago and Error after 1m with exit code exit with 1 16 | -------------------------------------------------------------------------------- /tests/artifacts/multiple-2-pods-list.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/etcd-minikube -n kube-system, created 1m ago by Node/minikube, started after 0s Running Burstable 3 | Current: Pod is Ready 4 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 5 | Containers: 6 | etcd (registry.k8s.io/etcd:3.5.9-0) Running for 1m and Ready, restarted 9 times 7 | previously: Started 1m ago and Completed after 1m 8 | 9 | Pod/storage-provisioner -n kube-system, created 1m ago, started after 0s Running BestEffort 10 | Current: Pod is Ready 11 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 12 | Standalone POD. 13 | Containers: 14 | storage-provisioner (gcr.io/k8s-minikube/storage-provisioner:v5) Running for 1m and Ready, restarted 16 times 15 | previously: Started 1m ago and Error after 1m with exit code exit with 1 16 | -------------------------------------------------------------------------------- /tests/artifacts/node-aks.out: -------------------------------------------------------------------------------- 1 | 2 | Node/aks-cpuworkers-41776494-vmss00006u, created 1m ago 3 | linux Ubuntu 16.04.6 LTS (amd64), kernel 4.15.0-1066-azure, kubelet v1.15.7, kube-proxy v1.15.7 4 | cloudprovider eastus0 Standard_D8s_v3, agentpool:cpuworkers, role:agent 5 | images 50 volumes inuse=1/16, attached=1 6 | Current: Resource is Ready 7 | MemoryPressure KubeletHasSufficientMemory, kubelet has sufficient memory available for 1m 8 | DiskPressure KubeletHasNoDiskPressure, kubelet has no disk pressure for 1m 9 | PIDPressure KubeletHasSufficientPID, kubelet has sufficient PID available for 1m 10 | Ready KubeletReady, kubelet is posting ready status. AppArmor enabled for 1m 11 | addresses: Hostname=aks-cpuworkers-41776494-vmss00006u InternalIP=10.250.4.65 12 | allocatable: pods:30, cpu:7.82, mem:29.1GB, ephemeral-storage:59.7GB 13 | capacity: pods:30, cpu:8, mem:33.7GB, ephemeral-storage:66.4GB 14 | -------------------------------------------------------------------------------- /tests/artifacts/node-and-service.out: -------------------------------------------------------------------------------- 1 | 2 | Node/minikube, created 1m ago 3 | linux Ubuntu 22.04.4 LTS (amd64), kernel 6.6.41-1-MANJARO, kubelet v1.30.0, kube-proxy v1.30.0 4 | images 8 5 | Current: Resource is Ready 6 | MemoryPressure KubeletHasSufficientMemory, kubelet has sufficient memory available for 1m 7 | DiskPressure KubeletHasNoDiskPressure, kubelet has no disk pressure for 1m 8 | PIDPressure KubeletHasSufficientPID, kubelet has sufficient PID available for 1m 9 | Ready KubeletReady, kubelet is posting ready status for 1m 10 | addresses: InternalIP=192.168.49.2 Hostname=minikube 11 | allocatable: pods:110, cpu:16, mem:33.3GB, ephemeral-storage:997.3GB 12 | capacity: pods:110, cpu:16, mem:33.3GB, ephemeral-storage:997.3GB 13 | 14 | Service/kubernetes -n default, created 1m ago 15 | Current: Service is ready 16 | Missing Endpoint: Service has no matching endpoint. 17 | -------------------------------------------------------------------------------- /tests/artifacts/node-minikube-with-metrics.out: -------------------------------------------------------------------------------- 1 | 2 | Node/minikube, created 1m ago 3 | linux Buildroot 2019.02.9 (amd64), kernel 4.19.94, kubelet v1.17.3, kube-proxy v1.17.3 4 | images 13 5 | Current: Resource is Ready 6 | MemoryPressure KubeletHasSufficientMemory, kubelet has sufficient memory available for 1m 7 | DiskPressure KubeletHasNoDiskPressure, kubelet has no disk pressure for 1m 8 | PIDPressure KubeletHasSufficientPID, kubelet has sufficient PID available for 1m 9 | Ready KubeletReady, kubelet is posting ready status for 1m 10 | addresses: InternalIP=192.168.99.105 Hostname=minikube 11 | allocatable: pods:110, cpu:2, mem:1.9GB, ephemeral-storage:16.3GB 12 | capacity: pods:110, cpu:2, mem:2GB, ephemeral-storage:18.2GB 13 | -------------------------------------------------------------------------------- /tests/artifacts/node-minikube-with-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Node 3 | metadata: 4 | annotations: 5 | kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock 6 | node.alpha.kubernetes.io/ttl: "0" 7 | volumes.kubernetes.io/controller-managed-attach-detach: "true" 8 | creationTimestamp: "2020-03-23T14:51:02Z" 9 | labels: 10 | beta.kubernetes.io/arch: amd64 11 | beta.kubernetes.io/os: linux 12 | kubernetes.io/arch: amd64 13 | kubernetes.io/hostname: minikube 14 | kubernetes.io/os: linux 15 | node-role.kubernetes.io/master: "" 16 | name: minikube 17 | resourceVersion: "334412" 18 | selfLink: /api/v1/nodes/minikube 19 | uid: a0ef422a-3943-44d9-825b-9ecd1e2e7ca0 20 | spec: {} 21 | status: 22 | addresses: 23 | - address: 192.168.99.105 24 | type: InternalIP 25 | - address: minikube 26 | type: Hostname 27 | allocatable: 28 | cpu: "2" 29 | ephemeral-storage: "16390427417" 30 | hugepages-2Mi: "0" 31 | memory: 1884340Ki 32 | pods: "110" 33 | capacity: 34 | cpu: "2" 35 | ephemeral-storage: 17784752Ki 36 | hugepages-2Mi: "0" 37 | memory: 1986740Ki 38 | pods: "110" 39 | conditions: 40 | - lastHeartbeatTime: "2020-03-28T22:21:04Z" 41 | lastTransitionTime: "2020-03-23T14:50:57Z" 42 | message: kubelet has sufficient memory available 43 | reason: KubeletHasSufficientMemory 44 | status: "False" 45 | type: MemoryPressure 46 | - lastHeartbeatTime: "2020-03-28T22:21:04Z" 47 | lastTransitionTime: "2020-03-23T14:50:57Z" 48 | message: kubelet has no disk pressure 49 | reason: KubeletHasNoDiskPressure 50 | status: "False" 51 | type: DiskPressure 52 | - lastHeartbeatTime: "2020-03-28T22:21:04Z" 53 | lastTransitionTime: "2020-03-23T14:50:57Z" 54 | message: kubelet has sufficient PID available 55 | reason: KubeletHasSufficientPID 56 | status: "False" 57 | type: PIDPressure 58 | - lastHeartbeatTime: "2020-03-28T22:21:04Z" 59 | lastTransitionTime: "2020-03-25T02:08:25Z" 60 | message: kubelet is posting ready status 61 | reason: KubeletReady 62 | status: "True" 63 | type: Ready 64 | daemonEndpoints: 65 | kubeletEndpoint: 66 | Port: 10250 67 | images: 68 | - names: 69 | - k8s.gcr.io/etcd:3.4.3-0 70 | sizeBytes: 288426917 71 | - names: 72 | - progrium/stress@sha256:e34d56d60f5caae79333cee395aae93b74791d50e3841986420d23c2ee4697bf 73 | - progrium/stress:latest 74 | sizeBytes: 281783943 75 | - names: 76 | - k8s.gcr.io/kube-apiserver:v1.17.3 77 | sizeBytes: 170986003 78 | - names: 79 | - k8s.gcr.io/kube-controller-manager:v1.17.3 80 | sizeBytes: 160918035 81 | - names: 82 | - k8s.gcr.io/kube-proxy:v1.17.3 83 | sizeBytes: 115964919 84 | - names: 85 | - k8s.gcr.io/nginx-slim@sha256:8b4501fe0fe221df663c22e16539f399e89594552f400408303c42f3dd8d0e52 86 | - k8s.gcr.io/nginx-slim:0.8 87 | sizeBytes: 110487599 88 | - names: 89 | - k8s.gcr.io/kube-scheduler:v1.17.3 90 | sizeBytes: 94435859 91 | - names: 92 | - kubernetesui/dashboard:v2.0.0-beta8 93 | sizeBytes: 90835427 94 | - names: 95 | - gcr.io/k8s-minikube/storage-provisioner:v1.8.1 96 | sizeBytes: 80815640 97 | - names: 98 | - k8s.gcr.io/metrics-server-amd64@sha256:49a9f12f7067d11f42c803dbe61ed2c1299959ad85cb315b25ff7eef8e6b8892 99 | - k8s.gcr.io/metrics-server-amd64:v0.2.1 100 | sizeBytes: 42541759 101 | - names: 102 | - k8s.gcr.io/coredns:1.6.5 103 | sizeBytes: 41578211 104 | - names: 105 | - kubernetesui/metrics-scraper:v1.0.2 106 | sizeBytes: 40101552 107 | - names: 108 | - k8s.gcr.io/pause:3.1 109 | sizeBytes: 742472 110 | nodeInfo: 111 | architecture: amd64 112 | bootID: f6b11f94-1ba2-4661-b9e8-502551f0478e 113 | containerRuntimeVersion: docker://19.3.6 114 | kernelVersion: 4.19.94 115 | kubeProxyVersion: v1.17.3 116 | kubeletVersion: v1.17.3 117 | machineID: 2b4443b20bb247bcb4982522dbcd9b6f 118 | operatingSystem: linux 119 | osImage: Buildroot 2019.02.9 120 | systemUUID: d5764b8f-42fd-40a0-b62e-5d19c76d4dae 121 | nodeMetrics: 122 | apiVersion: metrics.k8s.io/v1beta1 123 | kind: NodeMetrics 124 | metadata: 125 | creationTimestamp: "2020-03-28T22:25:34Z" 126 | name: minikube 127 | selfLink: /apis/metrics.k8s.io/v1beta1/nodes/minikube 128 | timestamp: "2020-03-28T22:25:00Z" 129 | usage: 130 | cpu: 133m 131 | memory: 1459144Ki 132 | window: 1m0s 133 | 134 | -------------------------------------------------------------------------------- /tests/artifacts/node-minikube.out: -------------------------------------------------------------------------------- 1 | 2 | Node/minikube, created 1m ago 3 | linux Buildroot 2019.02.9 (amd64), kernel 4.19.94, kubelet v1.17.3, kube-proxy v1.17.3 4 | images 13 5 | Current: Resource is Ready 6 | MemoryPressure KubeletHasSufficientMemory, kubelet has sufficient memory available for 1m 7 | DiskPressure KubeletHasNoDiskPressure, kubelet has no disk pressure for 1m 8 | PIDPressure KubeletHasSufficientPID, kubelet has sufficient PID available for 1m 9 | Ready KubeletReady, kubelet is posting ready status for 1m 10 | addresses: InternalIP=192.168.99.102 Hostname=minikube 11 | allocatable: pods:110, cpu:2, mem:1.9GB, ephemeral-storage:16.3GB 12 | capacity: pods:110, cpu:2, mem:2GB, ephemeral-storage:18.2GB 13 | -------------------------------------------------------------------------------- /tests/artifacts/node-minikube.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Node 3 | metadata: 4 | annotations: 5 | kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock 6 | node.alpha.kubernetes.io/ttl: "0" 7 | volumes.kubernetes.io/controller-managed-attach-detach: "true" 8 | creationTimestamp: "2020-02-24T20:05:52Z" 9 | labels: 10 | beta.kubernetes.io/arch: amd64 11 | beta.kubernetes.io/os: linux 12 | kubernetes.io/arch: amd64 13 | kubernetes.io/hostname: minikube 14 | kubernetes.io/os: linux 15 | node-role.kubernetes.io/master: "" 16 | name: minikube 17 | resourceVersion: "381343" 18 | selfLink: /api/v1/nodes/minikube 19 | uid: b2665321-4843-4c32-8e45-4fdb7024c4d7 20 | spec: {} 21 | status: 22 | addresses: 23 | - address: 192.168.99.102 24 | type: InternalIP 25 | - address: minikube 26 | type: Hostname 27 | allocatable: 28 | cpu: "2" 29 | ephemeral-storage: "16390427417" 30 | hugepages-2Mi: "0" 31 | memory: 1884340Ki 32 | pods: "110" 33 | capacity: 34 | cpu: "2" 35 | ephemeral-storage: 17784752Ki 36 | hugepages-2Mi: "0" 37 | memory: 1986740Ki 38 | pods: "110" 39 | conditions: 40 | - lastHeartbeatTime: "2020-03-18T17:24:40Z" 41 | lastTransitionTime: "2020-02-24T20:05:47Z" 42 | message: kubelet has sufficient memory available 43 | reason: KubeletHasSufficientMemory 44 | status: "False" 45 | type: MemoryPressure 46 | - lastHeartbeatTime: "2020-03-18T17:24:40Z" 47 | lastTransitionTime: "2020-02-24T20:05:47Z" 48 | message: kubelet has no disk pressure 49 | reason: KubeletHasNoDiskPressure 50 | status: "False" 51 | type: DiskPressure 52 | - lastHeartbeatTime: "2020-03-18T17:24:40Z" 53 | lastTransitionTime: "2020-02-24T20:05:47Z" 54 | message: kubelet has sufficient PID available 55 | reason: KubeletHasSufficientPID 56 | status: "False" 57 | type: PIDPressure 58 | - lastHeartbeatTime: "2020-03-18T17:24:40Z" 59 | lastTransitionTime: "2020-02-24T20:06:06Z" 60 | message: kubelet is posting ready status 61 | reason: KubeletReady 62 | status: "True" 63 | type: Ready 64 | daemonEndpoints: 65 | kubeletEndpoint: 66 | Port: 10250 67 | images: 68 | - names: 69 | - kennethreitz/httpbin@sha256:599fe5e5073102dbb0ee3dbb65f049dab44fa9fc251f6835c9990f8fb196a72b 70 | - kennethreitz/httpbin:latest 71 | sizeBytes: 533675008 72 | - names: 73 | - k8s.gcr.io/etcd:3.4.3-0 74 | sizeBytes: 288426917 75 | - names: 76 | - k8s.gcr.io/kube-apiserver:v1.17.3 77 | sizeBytes: 170986003 78 | - names: 79 | - k8s.gcr.io/kube-controller-manager:v1.17.3 80 | sizeBytes: 160918035 81 | - names: 82 | - k8s.gcr.io/kube-proxy:v1.17.3 83 | sizeBytes: 115964919 84 | - names: 85 | - k8s.gcr.io/nginx-slim@sha256:8b4501fe0fe221df663c22e16539f399e89594552f400408303c42f3dd8d0e52 86 | - k8s.gcr.io/nginx-slim:0.8 87 | sizeBytes: 110487599 88 | - names: 89 | - k8s.gcr.io/kube-scheduler:v1.17.3 90 | sizeBytes: 94435859 91 | - names: 92 | - kubernetesui/dashboard:v2.0.0-beta8 93 | sizeBytes: 90835427 94 | - names: 95 | - gcr.io/k8s-minikube/storage-provisioner:v1.8.1 96 | sizeBytes: 80815640 97 | - names: 98 | - k8s.gcr.io/coredns:1.6.5 99 | sizeBytes: 41578211 100 | - names: 101 | - kubernetesui/metrics-scraper:v1.0.2 102 | sizeBytes: 40101552 103 | - names: 104 | - busybox@sha256:b26cd013274a657b86e706210ddd5cc1f82f50155791199d29b9e86e935ce135 105 | - busybox:latest 106 | sizeBytes: 1219590 107 | - names: 108 | - k8s.gcr.io/pause:3.1 109 | sizeBytes: 742472 110 | nodeInfo: 111 | architecture: amd64 112 | bootID: ec3cbcb4-67a8-406e-88f3-8baaa046f2bd 113 | containerRuntimeVersion: docker://19.3.6 114 | kernelVersion: 4.19.94 115 | kubeProxyVersion: v1.17.3 116 | kubeletVersion: v1.17.3 117 | machineID: bf994b721cbb401db323f6d994cbe3c7 118 | operatingSystem: linux 119 | osImage: Buildroot 2019.02.9 120 | systemUUID: b1d82a2f-42a0-417d-a2b2-017d5c21e14c 121 | -------------------------------------------------------------------------------- /tests/artifacts/pod-deleted-due-to-missing-container.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/prometheus-operator-5c5784bc5f-4h65z -n prometheus, created 1m ago by ReplicaSet/prometheus-operator-5c5784bc5f Failed Evicted BestEffort, message: The node was low on resource: ephemeral-storage. Container kube-prometheus-stack was using 19212Ki, which exceeds its request of 0. 3 | Current: Pod has completed, but not successfully 4 | PodScheduled -> Initialized -> Not ContainersReady -> Not Ready 5 | Ready PodFailed for 1m 6 | ContainersReady PodFailed for 1m 7 | Containers: 8 | kube-prometheus-stack (quay.io/prometheus-operator/prometheus-operator:v0.58.0) Terminated as ContainerStatusUnknown with "The container could not be located when the pod was terminated" exit with 137 (SIGKILL), restarted 1 times 9 | previously: Terminated as ContainerStatusUnknown with "The container could not be located when the pod was deleted. The container used to be Running" exit with 137 (SIGKILL) 10 | -------------------------------------------------------------------------------- /tests/artifacts/pod-deleted-due-to-missing-container.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2023-06-01T18:00:19Z" 5 | generateName: prometheus-operator-5c5784bc5f- 6 | labels: 7 | app: kube-prometheus-stack-operator 8 | app.kubernetes.io/instance: prometheus 9 | app.kubernetes.io/managed-by: Helm 10 | app.kubernetes.io/part-of: kube-prometheus-stack 11 | app.kubernetes.io/version: 39.13.3 12 | chart: kube-prometheus-stack-39.13.3 13 | heritage: Helm 14 | kapp.k14s.io/app: "1678977570261484075" 15 | kapp.k14s.io/association: v1.f13edd101e6616b842558d095e5e2694 16 | pod-template-hash: 5c5784bc5f 17 | release: prometheus 18 | name: prometheus-operator-5c5784bc5f-4h65z 19 | namespace: prometheus 20 | ownerReferences: 21 | - apiVersion: apps/v1 22 | blockOwnerDeletion: true 23 | controller: true 24 | kind: ReplicaSet 25 | name: prometheus-operator-5c5784bc5f 26 | uid: 5c5b29b3-a779-41fc-a9b0-a49e3609701c 27 | resourceVersion: "305310117" 28 | uid: e4fd3bc9-cd2f-4f5b-b33b-fc52a78ecf92 29 | spec: 30 | containers: 31 | - args: 32 | - --kubelet-service=kube-system/prometheus-kubelet 33 | - --localhost=127.0.0.1 34 | - --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.58.0 35 | - --config-reloader-cpu-request=200m 36 | - --config-reloader-cpu-limit=200m 37 | - --config-reloader-memory-request=50Mi 38 | - --config-reloader-memory-limit=50Mi 39 | - --thanos-default-base-image=quay.io/thanos/thanos:v0.27.0 40 | image: quay.io/prometheus-operator/prometheus-operator:v0.58.0 41 | imagePullPolicy: IfNotPresent 42 | name: kube-prometheus-stack 43 | ports: 44 | - containerPort: 8080 45 | name: http 46 | protocol: TCP 47 | resources: {} 48 | securityContext: 49 | allowPrivilegeEscalation: false 50 | readOnlyRootFilesystem: true 51 | terminationMessagePath: /dev/termination-log 52 | terminationMessagePolicy: File 53 | volumeMounts: 54 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 55 | name: kube-api-access-787tt 56 | readOnly: true 57 | dnsPolicy: ClusterFirst 58 | enableServiceLinks: true 59 | nodeName: tkc-workers-mlg7b-567667688c-69ncv 60 | preemptionPolicy: PreemptLowerPriority 61 | priority: 0 62 | restartPolicy: Always 63 | schedulerName: default-scheduler 64 | securityContext: 65 | fsGroup: 65534 66 | runAsGroup: 65534 67 | runAsNonRoot: true 68 | runAsUser: 65534 69 | serviceAccount: prometheus-op-sa 70 | serviceAccountName: prometheus-op-sa 71 | terminationGracePeriodSeconds: 30 72 | tolerations: 73 | - effect: NoExecute 74 | key: node.kubernetes.io/not-ready 75 | operator: Exists 76 | tolerationSeconds: 300 77 | - effect: NoExecute 78 | key: node.kubernetes.io/unreachable 79 | operator: Exists 80 | tolerationSeconds: 300 81 | volumes: 82 | - name: kube-api-access-787tt 83 | projected: 84 | defaultMode: 420 85 | sources: 86 | - serviceAccountToken: 87 | expirationSeconds: 3607 88 | path: token 89 | - configMap: 90 | items: 91 | - key: ca.crt 92 | path: ca.crt 93 | name: kube-root-ca.crt 94 | - downwardAPI: 95 | items: 96 | - fieldRef: 97 | apiVersion: v1 98 | fieldPath: metadata.namespace 99 | path: namespace 100 | status: 101 | conditions: 102 | - lastProbeTime: null 103 | lastTransitionTime: "2023-06-01T18:00:19Z" 104 | status: "True" 105 | type: Initialized 106 | - lastProbeTime: null 107 | lastTransitionTime: "2023-11-01T19:21:01Z" 108 | reason: PodFailed 109 | status: "False" 110 | type: Ready 111 | - lastProbeTime: null 112 | lastTransitionTime: "2023-11-01T19:21:01Z" 113 | reason: PodFailed 114 | status: "False" 115 | type: ContainersReady 116 | - lastProbeTime: null 117 | lastTransitionTime: "2023-06-01T18:00:19Z" 118 | status: "True" 119 | type: PodScheduled 120 | containerStatuses: 121 | - image: quay.io/prometheus-operator/prometheus-operator:v0.58.0 122 | imageID: "" 123 | lastState: 124 | terminated: 125 | exitCode: 137 126 | finishedAt: null 127 | message: The container could not be located when the pod was deleted. The 128 | container used to be Running 129 | reason: ContainerStatusUnknown 130 | startedAt: null 131 | name: kube-prometheus-stack 132 | ready: false 133 | restartCount: 1 134 | started: false 135 | state: 136 | terminated: 137 | exitCode: 137 138 | finishedAt: null 139 | message: The container could not be located when the pod was terminated 140 | reason: ContainerStatusUnknown 141 | startedAt: null 142 | hostIP: 100.72.96.202 143 | message: 'The node was low on resource: ephemeral-storage. Container kube-prometheus-stack 144 | was using 19212Ki, which exceeds its request of 0.' 145 | phase: Failed 146 | podIP: 192.168.15.5 147 | podIPs: 148 | - ip: 192.168.15.5 149 | qosClass: BestEffort 150 | reason: Evicted 151 | startTime: "2023-06-01T18:00:19Z" 152 | -------------------------------------------------------------------------------- /tests/artifacts/pod-job-completed.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/hello-1584492660-d2c6p -n default, created 1m ago by Job/hello-1584492660 Succeeded BestEffort 3 | Current: Pod has completed successfully 4 | PodScheduled -> Initialized -> Not ContainersReady -> Not Ready 5 | Ready PodCompleted for 1m 6 | ContainersReady PodCompleted for 1m 7 | Containers: 8 | hello (busybox:latest) Started 1m ago and Completed after 1m 9 | -------------------------------------------------------------------------------- /tests/artifacts/pod-job-completed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:51:06Z" 5 | generateName: hello-1584492660- 6 | labels: 7 | controller-uid: 2f9027bb-0df2-4bb3-af64-ce6c90b4e1fe 8 | job-name: hello-1584492660 9 | name: hello-1584492660-d2c6p 10 | namespace: default 11 | ownerReferences: 12 | - apiVersion: batch/v1 13 | blockOwnerDeletion: true 14 | controller: true 15 | kind: Job 16 | name: hello-1584492660 17 | uid: 2f9027bb-0df2-4bb3-af64-ce6c90b4e1fe 18 | resourceVersion: "306610" 19 | selfLink: /api/v1/namespaces/default/pods/hello-1584492660-d2c6p 20 | uid: 0c88ccfc-2abd-4169-926f-bb3acdc86ed3 21 | spec: 22 | containers: 23 | - args: 24 | - /bin/sh 25 | - -c 26 | - date; echo Hello from the Kubernetes cluster 27 | image: busybox 28 | imagePullPolicy: Always 29 | name: hello 30 | resources: {} 31 | terminationMessagePath: /dev/termination-log 32 | terminationMessagePolicy: File 33 | volumeMounts: 34 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 35 | name: default-token-5bc8k 36 | readOnly: true 37 | dnsPolicy: ClusterFirst 38 | enableServiceLinks: true 39 | nodeName: minikube 40 | priority: 0 41 | restartPolicy: OnFailure 42 | schedulerName: default-scheduler 43 | securityContext: {} 44 | serviceAccount: default 45 | serviceAccountName: default 46 | terminationGracePeriodSeconds: 30 47 | tolerations: 48 | - effect: NoExecute 49 | key: node.kubernetes.io/not-ready 50 | operator: Exists 51 | tolerationSeconds: 300 52 | - effect: NoExecute 53 | key: node.kubernetes.io/unreachable 54 | operator: Exists 55 | tolerationSeconds: 300 56 | volumes: 57 | - name: default-token-5bc8k 58 | secret: 59 | defaultMode: 420 60 | secretName: default-token-5bc8k 61 | status: 62 | conditions: 63 | - lastProbeTime: null 64 | lastTransitionTime: "2020-03-18T00:51:06Z" 65 | reason: PodCompleted 66 | status: "True" 67 | type: Initialized 68 | - lastProbeTime: null 69 | lastTransitionTime: "2020-03-18T00:51:06Z" 70 | reason: PodCompleted 71 | status: "False" 72 | type: Ready 73 | - lastProbeTime: null 74 | lastTransitionTime: "2020-03-18T00:51:06Z" 75 | reason: PodCompleted 76 | status: "False" 77 | type: ContainersReady 78 | - lastProbeTime: null 79 | lastTransitionTime: "2020-03-18T00:51:06Z" 80 | status: "True" 81 | type: PodScheduled 82 | containerStatuses: 83 | - containerID: docker://6cdc11d57e5c95402ff9448a578ce53d26609c6968859e4a702f0094391220d6 84 | image: busybox:latest 85 | imageID: docker-pullable://busybox@sha256:b26cd013274a657b86e706210ddd5cc1f82f50155791199d29b9e86e935ce135 86 | lastState: {} 87 | name: hello 88 | ready: false 89 | restartCount: 0 90 | started: false 91 | state: 92 | terminated: 93 | containerID: docker://6cdc11d57e5c95402ff9448a578ce53d26609c6968859e4a702f0094391220d6 94 | exitCode: 0 95 | finishedAt: "2020-03-18T00:51:10Z" 96 | reason: Completed 97 | startedAt: "2020-03-18T00:51:10Z" 98 | hostIP: 192.168.99.102 99 | phase: Succeeded 100 | podIP: 172.17.0.4 101 | podIPs: 102 | - ip: 172.17.0.4 103 | qosClass: BestEffort 104 | startTime: "2020-03-18T00:51:06Z" 105 | -------------------------------------------------------------------------------- /tests/artifacts/pod-marked-for-deletion-completed.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/hello-1584492660-d2c6p -n default, created 1m ago by Job/hello-1584492660 Succeeded BestEffort 3 | Terminating: Resource scheduled for deletion 4 | PodScheduled -> Initialized -> Not ContainersReady -> Not Ready 5 | Ready PodCompleted for 1m 6 | ContainersReady PodCompleted for 1m 7 | Containers: 8 | hello (busybox:latest) Started 1m ago and Completed after 1m 9 | -------------------------------------------------------------------------------- /tests/artifacts/pod-marked-for-deletion-completed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:51:06Z" 5 | deletionGracePeriodSeconds: 0 6 | deletionTimestamp: "2020-03-18T00:54:17Z" 7 | generateName: hello-1584492660- 8 | labels: 9 | controller-uid: 2f9027bb-0df2-4bb3-af64-ce6c90b4e1fe 10 | job-name: hello-1584492660 11 | name: hello-1584492660-d2c6p 12 | namespace: default 13 | ownerReferences: 14 | - apiVersion: batch/v1 15 | blockOwnerDeletion: true 16 | controller: true 17 | kind: Job 18 | name: hello-1584492660 19 | uid: 2f9027bb-0df2-4bb3-af64-ce6c90b4e1fe 20 | resourceVersion: "307093" 21 | selfLink: /api/v1/namespaces/default/pods/hello-1584492660-d2c6p 22 | uid: 0c88ccfc-2abd-4169-926f-bb3acdc86ed3 23 | spec: 24 | containers: 25 | - args: 26 | - /bin/sh 27 | - -c 28 | - date; echo Hello from the Kubernetes cluster 29 | image: busybox 30 | imagePullPolicy: Always 31 | name: hello 32 | resources: {} 33 | terminationMessagePath: /dev/termination-log 34 | terminationMessagePolicy: File 35 | volumeMounts: 36 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 37 | name: default-token-5bc8k 38 | readOnly: true 39 | dnsPolicy: ClusterFirst 40 | enableServiceLinks: true 41 | nodeName: minikube 42 | priority: 0 43 | restartPolicy: OnFailure 44 | schedulerName: default-scheduler 45 | securityContext: {} 46 | serviceAccount: default 47 | serviceAccountName: default 48 | terminationGracePeriodSeconds: 30 49 | tolerations: 50 | - effect: NoExecute 51 | key: node.kubernetes.io/not-ready 52 | operator: Exists 53 | tolerationSeconds: 300 54 | - effect: NoExecute 55 | key: node.kubernetes.io/unreachable 56 | operator: Exists 57 | tolerationSeconds: 300 58 | volumes: 59 | - name: default-token-5bc8k 60 | secret: 61 | defaultMode: 420 62 | secretName: default-token-5bc8k 63 | status: 64 | conditions: 65 | - lastProbeTime: null 66 | lastTransitionTime: "2020-03-18T00:51:06Z" 67 | reason: PodCompleted 68 | status: "True" 69 | type: Initialized 70 | - lastProbeTime: null 71 | lastTransitionTime: "2020-03-18T00:51:06Z" 72 | reason: PodCompleted 73 | status: "False" 74 | type: Ready 75 | - lastProbeTime: null 76 | lastTransitionTime: "2020-03-18T00:51:06Z" 77 | reason: PodCompleted 78 | status: "False" 79 | type: ContainersReady 80 | - lastProbeTime: null 81 | lastTransitionTime: "2020-03-18T00:51:06Z" 82 | status: "True" 83 | type: PodScheduled 84 | containerStatuses: 85 | - containerID: docker://6cdc11d57e5c95402ff9448a578ce53d26609c6968859e4a702f0094391220d6 86 | image: busybox:latest 87 | imageID: docker-pullable://busybox@sha256:b26cd013274a657b86e706210ddd5cc1f82f50155791199d29b9e86e935ce135 88 | lastState: {} 89 | name: hello 90 | ready: false 91 | restartCount: 0 92 | started: false 93 | state: 94 | terminated: 95 | containerID: docker://6cdc11d57e5c95402ff9448a578ce53d26609c6968859e4a702f0094391220d6 96 | exitCode: 0 97 | finishedAt: "2020-03-18T00:51:10Z" 98 | reason: Completed 99 | startedAt: "2020-03-18T00:51:10Z" 100 | hostIP: 192.168.99.102 101 | phase: Succeeded 102 | podIP: 172.17.0.4 103 | podIPs: 104 | - ip: 172.17.0.4 105 | qosClass: BestEffort 106 | startTime: "2020-03-18T00:51:06Z" 107 | -------------------------------------------------------------------------------- /tests/artifacts/pod-marked-for-deletion.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/web-0 -n test1, created 1m ago by StatefulSet/web Running BestEffort 3 | Terminating: Resource scheduled for deletion 4 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 5 | Containers: 6 | nginx (k8s.gcr.io/nginx-slim:0.8) Running for 1m and Ready, restarted 2 times 7 | previously: Started 1m ago and Completed after 1m 8 | -------------------------------------------------------------------------------- /tests/artifacts/pod-marked-for-deletion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | newAnnotation: newAnnotationValue 6 | creationTimestamp: "2020-03-24T07:52:28Z" 7 | deletionGracePeriodSeconds: 10 8 | deletionTimestamp: "2020-03-25T09:09:56Z" 9 | generateName: web- 10 | labels: 11 | app: nginx 12 | controller-revision-hash: web-f96c5b685 13 | statefulset.kubernetes.io/pod-name: web-0 14 | name: web-0 15 | namespace: test1 16 | ownerReferences: 17 | - apiVersion: apps/v1 18 | blockOwnerDeletion: true 19 | controller: true 20 | kind: StatefulSet 21 | name: web 22 | uid: a8392004-fa2e-4081-93c7-19de06e8f96c 23 | resourceVersion: "219639" 24 | selfLink: /api/v1/namespaces/test1/pods/web-0 25 | uid: bd5e7c1e-1d0f-43d5-bead-7a1e27348cd4 26 | spec: 27 | containers: 28 | - image: k8s.gcr.io/nginx-slim:0.8 29 | imagePullPolicy: IfNotPresent 30 | name: nginx 31 | ports: 32 | - containerPort: 80 33 | name: web 34 | protocol: TCP 35 | resources: {} 36 | terminationMessagePath: /dev/termination-log 37 | terminationMessagePolicy: File 38 | volumeMounts: 39 | - mountPath: /usr/share/nginx/html 40 | name: www 41 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 42 | name: default-token-dfmm7 43 | readOnly: true 44 | dnsPolicy: ClusterFirst 45 | enableServiceLinks: true 46 | hostname: web-0 47 | nodeName: minikube 48 | priority: 0 49 | restartPolicy: Always 50 | schedulerName: default-scheduler 51 | securityContext: {} 52 | serviceAccount: default 53 | serviceAccountName: default 54 | subdomain: nginx 55 | terminationGracePeriodSeconds: 10 56 | tolerations: 57 | - effect: NoExecute 58 | key: node.kubernetes.io/not-ready 59 | operator: Exists 60 | tolerationSeconds: 300 61 | - effect: NoExecute 62 | key: node.kubernetes.io/unreachable 63 | operator: Exists 64 | tolerationSeconds: 300 65 | volumes: 66 | - name: www 67 | persistentVolumeClaim: 68 | claimName: www-web-0 69 | - name: default-token-dfmm7 70 | secret: 71 | defaultMode: 420 72 | secretName: default-token-dfmm7 73 | status: 74 | conditions: 75 | - lastProbeTime: null 76 | lastTransitionTime: "2020-03-24T07:52:28Z" 77 | status: "True" 78 | type: Initialized 79 | - lastProbeTime: null 80 | lastTransitionTime: "2020-03-25T02:08:18Z" 81 | status: "True" 82 | type: Ready 83 | - lastProbeTime: null 84 | lastTransitionTime: "2020-03-25T02:08:18Z" 85 | status: "True" 86 | type: ContainersReady 87 | - lastProbeTime: null 88 | lastTransitionTime: "2020-03-24T07:52:28Z" 89 | status: "True" 90 | type: PodScheduled 91 | containerStatuses: 92 | - containerID: docker://21dd5b5b104075c7209c98fac0e0c0e507aaf18fe47aa8dba1b1fcfbbb45a14f 93 | image: k8s.gcr.io/nginx-slim:0.8 94 | imageID: docker-pullable://k8s.gcr.io/nginx-slim@sha256:8b4501fe0fe221df663c22e16539f399e89594552f400408303c42f3dd8d0e52 95 | lastState: 96 | terminated: 97 | containerID: docker://ade3b1f8576aa40b97917f8e7a569508b3b6e65b4a8de91ea9c398391ac019b2 98 | exitCode: 0 99 | finishedAt: "2020-03-25T02:07:56Z" 100 | reason: Completed 101 | startedAt: "2020-03-25T02:07:52Z" 102 | name: nginx 103 | ready: true 104 | restartCount: 2 105 | started: true 106 | state: 107 | running: 108 | startedAt: "2020-03-25T02:08:18Z" 109 | hostIP: 192.168.99.105 110 | phase: Running 111 | podIP: 172.17.0.4 112 | podIPs: 113 | - ip: 172.17.0.4 114 | qosClass: BestEffort 115 | startTime: "2020-03-24T07:52:28Z" 116 | -------------------------------------------------------------------------------- /tests/artifacts/pod-missing-pvc.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/web-0 -n test1, created 1m ago by StatefulSet/web Pending BestEffort 3 | Failed: Pod could not be scheduled 4 | Stalled: PodUnschedulable, Pod could not be scheduled 5 | Not PodScheduled -> Not Initialized -> Not ContainersReady -> Not Ready 6 | PodScheduled Unschedulable, error while running "VolumeBinding" filter plugin for pod "web-0": pod has unbound immediate PersistentVolumeClaims for 1m 7 | -------------------------------------------------------------------------------- /tests/artifacts/pod-missing-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T13:27:16Z" 5 | generateName: web- 6 | labels: 7 | app: nginx 8 | controller-revision-hash: web-6596ffb49b 9 | statefulset.kubernetes.io/pod-name: web-0 10 | name: web-0 11 | namespace: test1 12 | ownerReferences: 13 | - apiVersion: apps/v1 14 | blockOwnerDeletion: true 15 | controller: true 16 | kind: StatefulSet 17 | name: web 18 | uid: 5aa22e86-29df-4fc3-a75a-bc97f3cc619e 19 | resourceVersion: "349717" 20 | selfLink: /api/v1/namespaces/test1/pods/web-0 21 | uid: 87e30944-badb-466b-a6f9-310d35bd48d1 22 | spec: 23 | containers: 24 | - image: k8s.gcr.io/nginx-slim:0.8 25 | imagePullPolicy: IfNotPresent 26 | name: nginx 27 | ports: 28 | - containerPort: 80 29 | name: web 30 | protocol: TCP 31 | resources: {} 32 | terminationMessagePath: /dev/termination-log 33 | terminationMessagePolicy: File 34 | volumeMounts: 35 | - mountPath: /usr/share/nginx/html 36 | name: www 37 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 38 | name: default-token-xqj2x 39 | readOnly: true 40 | dnsPolicy: ClusterFirst 41 | enableServiceLinks: true 42 | hostname: web-0 43 | priority: 0 44 | restartPolicy: Always 45 | schedulerName: default-scheduler 46 | securityContext: {} 47 | serviceAccount: default 48 | serviceAccountName: default 49 | subdomain: nginx 50 | terminationGracePeriodSeconds: 10 51 | tolerations: 52 | - effect: NoExecute 53 | key: node.kubernetes.io/not-ready 54 | operator: Exists 55 | tolerationSeconds: 300 56 | - effect: NoExecute 57 | key: node.kubernetes.io/unreachable 58 | operator: Exists 59 | tolerationSeconds: 300 60 | volumes: 61 | - name: www 62 | persistentVolumeClaim: 63 | claimName: www-web-0 64 | - name: default-token-xqj2x 65 | secret: 66 | defaultMode: 420 67 | secretName: default-token-xqj2x 68 | status: 69 | conditions: 70 | - lastProbeTime: null 71 | lastTransitionTime: "2020-03-18T13:27:16Z" 72 | message: 'error while running "VolumeBinding" filter plugin for pod "web-0": pod 73 | has unbound immediate PersistentVolumeClaims' 74 | reason: Unschedulable 75 | status: "False" 76 | type: PodScheduled 77 | phase: Pending 78 | qosClass: BestEffort 79 | -------------------------------------------------------------------------------- /tests/artifacts/pod-non-existing-image.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/missing-image-755c8c54f7-26v4c -n test1, created 1m ago by ReplicaSet/missing-image-755c8c54f7 Pending BestEffort 3 | InProgress: Pod is in the Pending phase 4 | Reconciling: PodPending, Pod is in the Pending phase 5 | PodScheduled -> Initialized -> Not ContainersReady -> Not Ready 6 | Ready ContainersNotReady, containers with unready status: [missing-image] for 1m 7 | ContainersReady ContainersNotReady, containers with unready status: [missing-image] for 1m 8 | Containers: 9 | missing-image (this-image-doesnt-exist) Waiting ImagePullBackOff: Back-off pulling image "this-image-doesnt-exist" 10 | -------------------------------------------------------------------------------- /tests/artifacts/pod-non-existing-image.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T13:07:42Z" 5 | generateName: missing-image-755c8c54f7- 6 | labels: 7 | pod-template-hash: 755c8c54f7 8 | run: missing-image 9 | name: missing-image-755c8c54f7-26v4c 10 | namespace: test1 11 | ownerReferences: 12 | - apiVersion: apps/v1 13 | blockOwnerDeletion: true 14 | controller: true 15 | kind: ReplicaSet 16 | name: missing-image-755c8c54f7 17 | uid: 0a872235-2667-46ad-9281-5be395c7e95f 18 | resourceVersion: "347866" 19 | selfLink: /api/v1/namespaces/test1/pods/missing-image-755c8c54f7-26v4c 20 | uid: fa2831ed-9234-415c-8da0-287e9eaa755b 21 | spec: 22 | containers: 23 | - image: this-image-doesnt-exist 24 | imagePullPolicy: Always 25 | name: missing-image 26 | resources: {} 27 | terminationMessagePath: /dev/termination-log 28 | terminationMessagePolicy: File 29 | volumeMounts: 30 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 31 | name: default-token-xqj2x 32 | readOnly: true 33 | dnsPolicy: ClusterFirst 34 | enableServiceLinks: true 35 | nodeName: minikube 36 | priority: 0 37 | restartPolicy: Always 38 | schedulerName: default-scheduler 39 | securityContext: {} 40 | serviceAccount: default 41 | serviceAccountName: default 42 | terminationGracePeriodSeconds: 30 43 | tolerations: 44 | - effect: NoExecute 45 | key: node.kubernetes.io/not-ready 46 | operator: Exists 47 | tolerationSeconds: 300 48 | - effect: NoExecute 49 | key: node.kubernetes.io/unreachable 50 | operator: Exists 51 | tolerationSeconds: 300 52 | volumes: 53 | - name: default-token-xqj2x 54 | secret: 55 | defaultMode: 420 56 | secretName: default-token-xqj2x 57 | status: 58 | conditions: 59 | - lastProbeTime: null 60 | lastTransitionTime: "2020-03-18T13:07:42Z" 61 | status: "True" 62 | type: Initialized 63 | - lastProbeTime: null 64 | lastTransitionTime: "2020-03-18T13:07:42Z" 65 | message: 'containers with unready status: [missing-image]' 66 | reason: ContainersNotReady 67 | status: "False" 68 | type: Ready 69 | - lastProbeTime: null 70 | lastTransitionTime: "2020-03-18T13:07:42Z" 71 | message: 'containers with unready status: [missing-image]' 72 | reason: ContainersNotReady 73 | status: "False" 74 | type: ContainersReady 75 | - lastProbeTime: null 76 | lastTransitionTime: "2020-03-18T13:07:42Z" 77 | status: "True" 78 | type: PodScheduled 79 | containerStatuses: 80 | - image: this-image-doesnt-exist 81 | imageID: "" 82 | lastState: {} 83 | name: missing-image 84 | ready: false 85 | restartCount: 0 86 | started: false 87 | state: 88 | waiting: 89 | message: Back-off pulling image "this-image-doesnt-exist" 90 | reason: ImagePullBackOff 91 | hostIP: 192.168.99.102 92 | phase: Pending 93 | podIP: 172.17.0.4 94 | podIPs: 95 | - ip: 172.17.0.4 96 | qosClass: BestEffort 97 | startTime: "2020-03-18T13:07:42Z" 98 | -------------------------------------------------------------------------------- /tests/artifacts/pod-pending-scheduled.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/hello-1584492840-kqf62 -n default, created 1m ago by Job/hello-1584492840 Pending BestEffort 3 | InProgress: Pod is in the Pending phase 4 | Reconciling: PodPending, Pod is in the Pending phase 5 | PodScheduled -> Not Initialized -> Not ContainersReady -> Not Ready 6 | -------------------------------------------------------------------------------- /tests/artifacts/pod-pending-scheduled.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:54:06Z" 5 | generateName: hello-1584492840- 6 | labels: 7 | controller-uid: caad4bc2-2124-4e32-9240-c9caf90643fa 8 | job-name: hello-1584492840 9 | name: hello-1584492840-kqf62 10 | namespace: default 11 | ownerReferences: 12 | - apiVersion: batch/v1 13 | blockOwnerDeletion: true 14 | controller: true 15 | kind: Job 16 | name: hello-1584492840 17 | uid: caad4bc2-2124-4e32-9240-c9caf90643fa 18 | resourceVersion: "307055" 19 | selfLink: /api/v1/namespaces/default/pods/hello-1584492840-kqf62 20 | uid: 7bfad5cb-67f0-41c9-b480-0af4342450ff 21 | spec: 22 | containers: 23 | - args: 24 | - /bin/sh 25 | - -c 26 | - date; echo Hello from the Kubernetes cluster 27 | image: busybox 28 | imagePullPolicy: Always 29 | name: hello 30 | resources: {} 31 | terminationMessagePath: /dev/termination-log 32 | terminationMessagePolicy: File 33 | volumeMounts: 34 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 35 | name: default-token-5bc8k 36 | readOnly: true 37 | dnsPolicy: ClusterFirst 38 | enableServiceLinks: true 39 | nodeName: minikube 40 | priority: 0 41 | restartPolicy: OnFailure 42 | schedulerName: default-scheduler 43 | securityContext: {} 44 | serviceAccount: default 45 | serviceAccountName: default 46 | terminationGracePeriodSeconds: 30 47 | tolerations: 48 | - effect: NoExecute 49 | key: node.kubernetes.io/not-ready 50 | operator: Exists 51 | tolerationSeconds: 300 52 | - effect: NoExecute 53 | key: node.kubernetes.io/unreachable 54 | operator: Exists 55 | tolerationSeconds: 300 56 | volumes: 57 | - name: default-token-5bc8k 58 | secret: 59 | defaultMode: 420 60 | secretName: default-token-5bc8k 61 | status: 62 | conditions: 63 | - lastProbeTime: null 64 | lastTransitionTime: "2020-03-18T00:54:06Z" 65 | status: "True" 66 | type: PodScheduled 67 | phase: Pending 68 | qosClass: BestEffort 69 | -------------------------------------------------------------------------------- /tests/artifacts/pod-pending-waiting-container.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/hello-1584492840-kqf62 -n default, created 1m ago by Job/hello-1584492840 Pending BestEffort 3 | InProgress: Pod is in the Pending phase 4 | Reconciling: PodPending, Pod is in the Pending phase 5 | PodScheduled -> Initialized -> Not ContainersReady -> Not Ready 6 | Ready ContainersNotReady, containers with unready status: [hello] for 1m 7 | ContainersReady ContainersNotReady, containers with unready status: [hello] for 1m 8 | Containers: 9 | hello (busybox) Waiting ContainerCreating 10 | -------------------------------------------------------------------------------- /tests/artifacts/pod-pending-waiting-container.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:54:06Z" 5 | generateName: hello-1584492840- 6 | labels: 7 | controller-uid: caad4bc2-2124-4e32-9240-c9caf90643fa 8 | job-name: hello-1584492840 9 | name: hello-1584492840-kqf62 10 | namespace: default 11 | ownerReferences: 12 | - apiVersion: batch/v1 13 | blockOwnerDeletion: true 14 | controller: true 15 | kind: Job 16 | name: hello-1584492840 17 | uid: caad4bc2-2124-4e32-9240-c9caf90643fa 18 | resourceVersion: "307059" 19 | selfLink: /api/v1/namespaces/default/pods/hello-1584492840-kqf62 20 | uid: 7bfad5cb-67f0-41c9-b480-0af4342450ff 21 | spec: 22 | containers: 23 | - args: 24 | - /bin/sh 25 | - -c 26 | - date; echo Hello from the Kubernetes cluster 27 | image: busybox 28 | imagePullPolicy: Always 29 | name: hello 30 | resources: {} 31 | terminationMessagePath: /dev/termination-log 32 | terminationMessagePolicy: File 33 | volumeMounts: 34 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 35 | name: default-token-5bc8k 36 | readOnly: true 37 | dnsPolicy: ClusterFirst 38 | enableServiceLinks: true 39 | nodeName: minikube 40 | priority: 0 41 | restartPolicy: OnFailure 42 | schedulerName: default-scheduler 43 | securityContext: {} 44 | serviceAccount: default 45 | serviceAccountName: default 46 | terminationGracePeriodSeconds: 30 47 | tolerations: 48 | - effect: NoExecute 49 | key: node.kubernetes.io/not-ready 50 | operator: Exists 51 | tolerationSeconds: 300 52 | - effect: NoExecute 53 | key: node.kubernetes.io/unreachable 54 | operator: Exists 55 | tolerationSeconds: 300 56 | volumes: 57 | - name: default-token-5bc8k 58 | secret: 59 | defaultMode: 420 60 | secretName: default-token-5bc8k 61 | status: 62 | conditions: 63 | - lastProbeTime: null 64 | lastTransitionTime: "2020-03-18T00:54:06Z" 65 | status: "True" 66 | type: Initialized 67 | - lastProbeTime: null 68 | lastTransitionTime: "2020-03-18T00:54:06Z" 69 | message: 'containers with unready status: [hello]' 70 | reason: ContainersNotReady 71 | status: "False" 72 | type: Ready 73 | - lastProbeTime: null 74 | lastTransitionTime: "2020-03-18T00:54:06Z" 75 | message: 'containers with unready status: [hello]' 76 | reason: ContainersNotReady 77 | status: "False" 78 | type: ContainersReady 79 | - lastProbeTime: null 80 | lastTransitionTime: "2020-03-18T00:54:06Z" 81 | status: "True" 82 | type: PodScheduled 83 | containerStatuses: 84 | - image: busybox 85 | imageID: "" 86 | lastState: {} 87 | name: hello 88 | ready: false 89 | restartCount: 0 90 | started: false 91 | state: 92 | waiting: 93 | reason: ContainerCreating 94 | hostIP: 192.168.99.102 95 | phase: Pending 96 | qosClass: BestEffort 97 | startTime: "2020-03-18T00:54:06Z" 98 | -------------------------------------------------------------------------------- /tests/artifacts/pod-pending.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/hello-1584492840-kqf62 -n default, created 1m ago by Job/hello-1584492840 Pending BestEffort 3 | InProgress: Pod is in the Pending phase 4 | Reconciling: PodPending, Pod is in the Pending phase 5 | Not PodScheduled -> Not Initialized -> Not ContainersReady -> Not Ready 6 | -------------------------------------------------------------------------------- /tests/artifacts/pod-pending.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T00:54:06Z" 5 | generateName: hello-1584492840- 6 | labels: 7 | controller-uid: caad4bc2-2124-4e32-9240-c9caf90643fa 8 | job-name: hello-1584492840 9 | name: hello-1584492840-kqf62 10 | namespace: default 11 | ownerReferences: 12 | - apiVersion: batch/v1 13 | blockOwnerDeletion: true 14 | controller: true 15 | kind: Job 16 | name: hello-1584492840 17 | uid: caad4bc2-2124-4e32-9240-c9caf90643fa 18 | resourceVersion: "307054" 19 | selfLink: /api/v1/namespaces/default/pods/hello-1584492840-kqf62 20 | uid: 7bfad5cb-67f0-41c9-b480-0af4342450ff 21 | spec: 22 | containers: 23 | - args: 24 | - /bin/sh 25 | - -c 26 | - date; echo Hello from the Kubernetes cluster 27 | image: busybox 28 | imagePullPolicy: Always 29 | name: hello 30 | resources: {} 31 | terminationMessagePath: /dev/termination-log 32 | terminationMessagePolicy: File 33 | volumeMounts: 34 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 35 | name: default-token-5bc8k 36 | readOnly: true 37 | dnsPolicy: ClusterFirst 38 | enableServiceLinks: true 39 | priority: 0 40 | restartPolicy: OnFailure 41 | schedulerName: default-scheduler 42 | securityContext: {} 43 | serviceAccount: default 44 | serviceAccountName: default 45 | terminationGracePeriodSeconds: 30 46 | tolerations: 47 | - effect: NoExecute 48 | key: node.kubernetes.io/not-ready 49 | operator: Exists 50 | tolerationSeconds: 300 51 | - effect: NoExecute 52 | key: node.kubernetes.io/unreachable 53 | operator: Exists 54 | tolerationSeconds: 300 55 | volumes: 56 | - name: default-token-5bc8k 57 | secret: 58 | defaultMode: 420 59 | secretName: default-token-5bc8k 60 | status: 61 | phase: Pending 62 | qosClass: BestEffort 63 | -------------------------------------------------------------------------------- /tests/artifacts/pod-standalone-ineractive.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/test-pod -n test1, created 1m ago Running BestEffort 3 | Current: Pod is Ready 4 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 5 | Standalone POD, interactive with attached TTY. 6 | Containers: 7 | test-pod (k8s.gcr.io/pause:3.1) Running for 1m and Ready 8 | -------------------------------------------------------------------------------- /tests/artifacts/pod-standalone-ineractive.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T02:01:52Z" 5 | labels: 6 | run: test-pod 7 | name: test-pod 8 | namespace: test1 9 | resourceVersion: "316517" 10 | selfLink: /api/v1/namespaces/test1/pods/test-pod 11 | uid: 6a9dc1eb-71da-43f4-85b1-822156f3a37e 12 | spec: 13 | containers: 14 | - image: k8s.gcr.io/pause:3.1 15 | imagePullPolicy: IfNotPresent 16 | name: test-pod 17 | resources: {} 18 | stdin: true 19 | stdinOnce: true 20 | terminationMessagePath: /dev/termination-log 21 | terminationMessagePolicy: File 22 | tty: true 23 | volumeMounts: 24 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 25 | name: default-token-mq57w 26 | readOnly: true 27 | dnsPolicy: ClusterFirst 28 | enableServiceLinks: true 29 | nodeName: minikube 30 | priority: 0 31 | restartPolicy: Never 32 | schedulerName: default-scheduler 33 | securityContext: {} 34 | serviceAccount: default 35 | serviceAccountName: default 36 | terminationGracePeriodSeconds: 30 37 | tolerations: 38 | - effect: NoExecute 39 | key: node.kubernetes.io/not-ready 40 | operator: Exists 41 | tolerationSeconds: 300 42 | - effect: NoExecute 43 | key: node.kubernetes.io/unreachable 44 | operator: Exists 45 | tolerationSeconds: 300 46 | volumes: 47 | - name: default-token-mq57w 48 | secret: 49 | defaultMode: 420 50 | secretName: default-token-mq57w 51 | status: 52 | conditions: 53 | - lastProbeTime: null 54 | lastTransitionTime: "2020-03-18T02:01:52Z" 55 | status: "True" 56 | type: Initialized 57 | - lastProbeTime: null 58 | lastTransitionTime: "2020-03-18T02:01:55Z" 59 | status: "True" 60 | type: Ready 61 | - lastProbeTime: null 62 | lastTransitionTime: "2020-03-18T02:01:55Z" 63 | status: "True" 64 | type: ContainersReady 65 | - lastProbeTime: null 66 | lastTransitionTime: "2020-03-18T02:01:52Z" 67 | status: "True" 68 | type: PodScheduled 69 | containerStatuses: 70 | - containerID: docker://b930dcacd93843e5144bd11fe6a225d30dd3c23f5a1e8c190ce4a22d0150a1f3 71 | image: k8s.gcr.io/pause:3.1 72 | imageID: docker://sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e 73 | lastState: {} 74 | name: test-pod 75 | ready: true 76 | restartCount: 0 77 | started: true 78 | state: 79 | running: 80 | startedAt: "2020-03-18T02:01:54Z" 81 | hostIP: 192.168.99.102 82 | phase: Running 83 | podIP: 172.17.0.4 84 | podIPs: 85 | - ip: 172.17.0.4 86 | qosClass: BestEffort 87 | startTime: "2020-03-18T02:01:52Z" 88 | -------------------------------------------------------------------------------- /tests/artifacts/pod-standalone.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/test-pod -n test1, created 1m ago Running BestEffort 3 | Current: Pod is Ready 4 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 5 | Standalone POD. 6 | Containers: 7 | test-pod (k8s.gcr.io/pause:3.1) Running for 1m and Ready 8 | -------------------------------------------------------------------------------- /tests/artifacts/pod-standalone.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2020-03-18T02:05:46Z" 5 | labels: 6 | run: test-pod 7 | name: test-pod 8 | namespace: test1 9 | resourceVersion: "317040" 10 | selfLink: /api/v1/namespaces/test1/pods/test-pod 11 | uid: 23498768-c86b-4102-a660-139ed9044bc1 12 | spec: 13 | containers: 14 | - image: k8s.gcr.io/pause:3.1 15 | imagePullPolicy: IfNotPresent 16 | name: test-pod 17 | resources: {} 18 | terminationMessagePath: /dev/termination-log 19 | terminationMessagePolicy: File 20 | volumeMounts: 21 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 22 | name: default-token-mq57w 23 | readOnly: true 24 | dnsPolicy: ClusterFirst 25 | enableServiceLinks: true 26 | nodeName: minikube 27 | priority: 0 28 | restartPolicy: Never 29 | schedulerName: default-scheduler 30 | securityContext: {} 31 | serviceAccount: default 32 | serviceAccountName: default 33 | terminationGracePeriodSeconds: 30 34 | tolerations: 35 | - effect: NoExecute 36 | key: node.kubernetes.io/not-ready 37 | operator: Exists 38 | tolerationSeconds: 300 39 | - effect: NoExecute 40 | key: node.kubernetes.io/unreachable 41 | operator: Exists 42 | tolerationSeconds: 300 43 | volumes: 44 | - name: default-token-mq57w 45 | secret: 46 | defaultMode: 420 47 | secretName: default-token-mq57w 48 | status: 49 | conditions: 50 | - lastProbeTime: null 51 | lastTransitionTime: "2020-03-18T02:05:46Z" 52 | status: "True" 53 | type: Initialized 54 | - lastProbeTime: null 55 | lastTransitionTime: "2020-03-18T02:05:47Z" 56 | status: "True" 57 | type: Ready 58 | - lastProbeTime: null 59 | lastTransitionTime: "2020-03-18T02:05:47Z" 60 | status: "True" 61 | type: ContainersReady 62 | - lastProbeTime: null 63 | lastTransitionTime: "2020-03-18T02:05:46Z" 64 | status: "True" 65 | type: PodScheduled 66 | containerStatuses: 67 | - containerID: docker://51d582ef0ff5860e1ea82ab262c5546e457dbb88fc8bd6daadf685ee478f2c67 68 | image: k8s.gcr.io/pause:3.1 69 | imageID: docker://sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e 70 | lastState: {} 71 | name: test-pod 72 | ready: true 73 | restartCount: 0 74 | started: true 75 | state: 76 | running: 77 | startedAt: "2020-03-18T02:05:47Z" 78 | hostIP: 192.168.99.102 79 | phase: Running 80 | podIP: 172.17.0.4 81 | podIPs: 82 | - ip: 172.17.0.4 83 | qosClass: BestEffort 84 | startTime: "2020-03-18T02:05:46Z" 85 | -------------------------------------------------------------------------------- /tests/artifacts/pod-with-metrics-and-events.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/test-7d7bf58f7d-pvk2s -n test1, created 1m ago by ReplicaSet/test-7d7bf58f7d Running Burstable 3 | Current: Pod is Ready 4 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 5 | Containers: 6 | test (k8s.gcr.io/pause:3.1) Running for 1m and Ready 7 | -------------------------------------------------------------------------------- /tests/artifacts/rs-all-replicas-ready.out: -------------------------------------------------------------------------------- 1 | 2 | ReplicaSet/httpbin-deployment-79f6dfbb9 -n test1, created 1m ago by Deployment/httpbin-deployment, gen:1 3 | Current: ReplicaSet is available. Replicas: 3 4 | -------------------------------------------------------------------------------- /tests/artifacts/rs-all-replicas-ready.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/desired-replicas: "3" 6 | deployment.kubernetes.io/max-replicas: "4" 7 | deployment.kubernetes.io/revision: "1" 8 | creationTimestamp: "2020-03-18T01:24:09Z" 9 | generation: 1 10 | labels: 11 | pod-template-hash: 79f6dfbb9 12 | run: httpbin-deployment 13 | name: httpbin-deployment-79f6dfbb9 14 | namespace: test1 15 | ownerReferences: 16 | - apiVersion: apps/v1 17 | blockOwnerDeletion: true 18 | controller: true 19 | kind: Deployment 20 | name: httpbin-deployment 21 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 22 | resourceVersion: "311460" 23 | selfLink: /apis/apps/v1/namespaces/test1/replicasets/httpbin-deployment-79f6dfbb9 24 | uid: c112a8f1-fea8-4d23-a6eb-4ece01ea3ec6 25 | spec: 26 | replicas: 3 27 | selector: 28 | matchLabels: 29 | pod-template-hash: 79f6dfbb9 30 | run: httpbin-deployment 31 | template: 32 | metadata: 33 | creationTimestamp: null 34 | labels: 35 | pod-template-hash: 79f6dfbb9 36 | run: httpbin-deployment 37 | spec: 38 | containers: 39 | - image: kennethreitz/httpbin 40 | imagePullPolicy: Always 41 | name: httpbin-deployment 42 | ports: 43 | - containerPort: 80 44 | protocol: TCP 45 | resources: {} 46 | terminationMessagePath: /dev/termination-log 47 | terminationMessagePolicy: File 48 | dnsPolicy: ClusterFirst 49 | restartPolicy: Always 50 | schedulerName: default-scheduler 51 | securityContext: {} 52 | terminationGracePeriodSeconds: 30 53 | status: 54 | availableReplicas: 3 55 | fullyLabeledReplicas: 3 56 | observedGeneration: 1 57 | readyReplicas: 3 58 | replicas: 3 59 | -------------------------------------------------------------------------------- /tests/artifacts/rs-no-ready-replicas.out: -------------------------------------------------------------------------------- 1 | 2 | ReplicaSet/httpbin-deployment-79f6dfbb9 -n test1, created 1m ago by Deployment/httpbin-deployment, gen:1 3 | InProgress: Available: 0/3 4 | Reconciling: LessAvailable, Available: 0/3 5 | Outage: ReplicaSet has no Ready replicas. 6 | -------------------------------------------------------------------------------- /tests/artifacts/rs-no-ready-replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/desired-replicas: "3" 6 | deployment.kubernetes.io/max-replicas: "4" 7 | deployment.kubernetes.io/revision: "1" 8 | creationTimestamp: "2020-03-18T01:24:09Z" 9 | generation: 1 10 | labels: 11 | pod-template-hash: 79f6dfbb9 12 | run: httpbin-deployment 13 | name: httpbin-deployment-79f6dfbb9 14 | namespace: test1 15 | ownerReferences: 16 | - apiVersion: apps/v1 17 | blockOwnerDeletion: true 18 | controller: true 19 | kind: Deployment 20 | name: httpbin-deployment 21 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 22 | resourceVersion: "311344" 23 | selfLink: /apis/apps/v1/namespaces/test1/replicasets/httpbin-deployment-79f6dfbb9 24 | uid: c112a8f1-fea8-4d23-a6eb-4ece01ea3ec6 25 | spec: 26 | replicas: 3 27 | selector: 28 | matchLabels: 29 | pod-template-hash: 79f6dfbb9 30 | run: httpbin-deployment 31 | template: 32 | metadata: 33 | creationTimestamp: null 34 | labels: 35 | pod-template-hash: 79f6dfbb9 36 | run: httpbin-deployment 37 | spec: 38 | containers: 39 | - image: kennethreitz/httpbin 40 | imagePullPolicy: Always 41 | name: httpbin-deployment 42 | ports: 43 | - containerPort: 80 44 | protocol: TCP 45 | resources: {} 46 | terminationMessagePath: /dev/termination-log 47 | terminationMessagePolicy: File 48 | dnsPolicy: ClusterFirst 49 | restartPolicy: Always 50 | schedulerName: default-scheduler 51 | securityContext: {} 52 | terminationGracePeriodSeconds: 30 53 | status: 54 | fullyLabeledReplicas: 3 55 | observedGeneration: 1 56 | replicas: 3 57 | -------------------------------------------------------------------------------- /tests/artifacts/rs-non-existing-image.out: -------------------------------------------------------------------------------- 1 | 2 | ReplicaSet/missing-image-755c8c54f7 -n test1, created 1m ago by Deployment/missing-image, gen:1 3 | InProgress: Available: 0/1 4 | Reconciling: LessAvailable, Available: 0/1 5 | Outage: ReplicaSet has no Ready replicas. 6 | -------------------------------------------------------------------------------- /tests/artifacts/rs-non-existing-image.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/desired-replicas: "1" 6 | deployment.kubernetes.io/max-replicas: "2" 7 | deployment.kubernetes.io/revision: "1" 8 | creationTimestamp: "2020-03-18T13:07:42Z" 9 | generation: 1 10 | labels: 11 | pod-template-hash: 755c8c54f7 12 | run: missing-image 13 | name: missing-image-755c8c54f7 14 | namespace: test1 15 | ownerReferences: 16 | - apiVersion: apps/v1 17 | blockOwnerDeletion: true 18 | controller: true 19 | kind: Deployment 20 | name: missing-image 21 | uid: 4d11ce88-1f23-400d-81c2-ed4f8ac10faa 22 | resourceVersion: "347026" 23 | selfLink: /apis/apps/v1/namespaces/test1/replicasets/missing-image-755c8c54f7 24 | uid: 0a872235-2667-46ad-9281-5be395c7e95f 25 | spec: 26 | replicas: 1 27 | selector: 28 | matchLabels: 29 | pod-template-hash: 755c8c54f7 30 | run: missing-image 31 | template: 32 | metadata: 33 | creationTimestamp: null 34 | labels: 35 | pod-template-hash: 755c8c54f7 36 | run: missing-image 37 | spec: 38 | containers: 39 | - image: this-image-doesnt-exist 40 | imagePullPolicy: Always 41 | name: missing-image 42 | resources: {} 43 | terminationMessagePath: /dev/termination-log 44 | terminationMessagePolicy: File 45 | dnsPolicy: ClusterFirst 46 | restartPolicy: Always 47 | schedulerName: default-scheduler 48 | securityContext: {} 49 | terminationGracePeriodSeconds: 30 50 | status: 51 | fullyLabeledReplicas: 1 52 | observedGeneration: 1 53 | replicas: 1 54 | -------------------------------------------------------------------------------- /tests/artifacts/rs-not-all-replicas-ready.out: -------------------------------------------------------------------------------- 1 | 2 | ReplicaSet/httpbin-deployment-79f6dfbb9 -n test1, created 1m ago by Deployment/httpbin-deployment, gen:1 3 | InProgress: Available: 1/3 4 | Reconciling: LessAvailable, Available: 1/3 5 | -------------------------------------------------------------------------------- /tests/artifacts/rs-not-all-replicas-ready.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/desired-replicas: "3" 6 | deployment.kubernetes.io/max-replicas: "4" 7 | deployment.kubernetes.io/revision: "1" 8 | creationTimestamp: "2020-03-18T01:24:09Z" 9 | generation: 1 10 | labels: 11 | pod-template-hash: 79f6dfbb9 12 | run: httpbin-deployment 13 | name: httpbin-deployment-79f6dfbb9 14 | namespace: test1 15 | ownerReferences: 16 | - apiVersion: apps/v1 17 | blockOwnerDeletion: true 18 | controller: true 19 | kind: Deployment 20 | name: httpbin-deployment 21 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 22 | resourceVersion: "311439" 23 | selfLink: /apis/apps/v1/namespaces/test1/replicasets/httpbin-deployment-79f6dfbb9 24 | uid: c112a8f1-fea8-4d23-a6eb-4ece01ea3ec6 25 | spec: 26 | replicas: 3 27 | selector: 28 | matchLabels: 29 | pod-template-hash: 79f6dfbb9 30 | run: httpbin-deployment 31 | template: 32 | metadata: 33 | creationTimestamp: null 34 | labels: 35 | pod-template-hash: 79f6dfbb9 36 | run: httpbin-deployment 37 | spec: 38 | containers: 39 | - image: kennethreitz/httpbin 40 | imagePullPolicy: Always 41 | name: httpbin-deployment 42 | ports: 43 | - containerPort: 80 44 | protocol: TCP 45 | resources: {} 46 | terminationMessagePath: /dev/termination-log 47 | terminationMessagePolicy: File 48 | dnsPolicy: ClusterFirst 49 | restartPolicy: Always 50 | schedulerName: default-scheduler 51 | securityContext: {} 52 | terminationGracePeriodSeconds: 30 53 | status: 54 | availableReplicas: 1 55 | fullyLabeledReplicas: 3 56 | observedGeneration: 1 57 | readyReplicas: 1 58 | replicas: 3 59 | -------------------------------------------------------------------------------- /tests/artifacts/rs-ongoing-rollout.out: -------------------------------------------------------------------------------- 1 | 2 | ReplicaSet/httpbin-deployment-79f6dfbb9 -n test1, created 1m ago by Deployment/httpbin-deployment, gen:2 3 | Current: ReplicaSet is available. Replicas: 2 4 | Ongoing rollout, check Owner Reference resources. 5 | -------------------------------------------------------------------------------- /tests/artifacts/rs-ongoing-rollout.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/desired-replicas: "3" 6 | deployment.kubernetes.io/max-replicas: "4" 7 | deployment.kubernetes.io/revision: "1" 8 | creationTimestamp: "2020-03-18T01:24:09Z" 9 | generation: 2 10 | labels: 11 | pod-template-hash: 79f6dfbb9 12 | run: httpbin-deployment 13 | name: httpbin-deployment-79f6dfbb9 14 | namespace: test1 15 | ownerReferences: 16 | - apiVersion: apps/v1 17 | blockOwnerDeletion: true 18 | controller: true 19 | kind: Deployment 20 | name: httpbin-deployment 21 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 22 | resourceVersion: "313358" 23 | selfLink: /apis/apps/v1/namespaces/test1/replicasets/httpbin-deployment-79f6dfbb9 24 | uid: c112a8f1-fea8-4d23-a6eb-4ece01ea3ec6 25 | spec: 26 | replicas: 2 27 | selector: 28 | matchLabels: 29 | pod-template-hash: 79f6dfbb9 30 | run: httpbin-deployment 31 | template: 32 | metadata: 33 | creationTimestamp: null 34 | labels: 35 | pod-template-hash: 79f6dfbb9 36 | run: httpbin-deployment 37 | spec: 38 | containers: 39 | - image: kennethreitz/httpbin 40 | imagePullPolicy: Always 41 | name: httpbin-deployment 42 | ports: 43 | - containerPort: 80 44 | protocol: TCP 45 | resources: {} 46 | terminationMessagePath: /dev/termination-log 47 | terminationMessagePolicy: File 48 | dnsPolicy: ClusterFirst 49 | restartPolicy: Always 50 | schedulerName: default-scheduler 51 | securityContext: {} 52 | terminationGracePeriodSeconds: 30 53 | status: 54 | availableReplicas: 2 55 | fullyLabeledReplicas: 2 56 | observedGeneration: 2 57 | readyReplicas: 2 58 | replicas: 2 59 | -------------------------------------------------------------------------------- /tests/artifacts/rs-replicas-0.out: -------------------------------------------------------------------------------- 1 | 2 | ReplicaSet/httpbin-deployment-79f6dfbb9 -n test1, created 1m ago by Deployment/httpbin-deployment, gen:1 3 | InProgress: Labelled: 0/3 4 | Reconciling: LessLabelled, Labelled: 0/3 5 | Outage: ReplicaSet has no Ready replicas. 6 | -------------------------------------------------------------------------------- /tests/artifacts/rs-replicas-0.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/desired-replicas: "3" 6 | deployment.kubernetes.io/max-replicas: "4" 7 | deployment.kubernetes.io/revision: "1" 8 | creationTimestamp: "2020-03-18T01:24:09Z" 9 | generation: 1 10 | labels: 11 | pod-template-hash: 79f6dfbb9 12 | run: httpbin-deployment 13 | name: httpbin-deployment-79f6dfbb9 14 | namespace: test1 15 | ownerReferences: 16 | - apiVersion: apps/v1 17 | blockOwnerDeletion: true 18 | controller: true 19 | kind: Deployment 20 | name: httpbin-deployment 21 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 22 | resourceVersion: "311324" 23 | selfLink: /apis/apps/v1/namespaces/test1/replicasets/httpbin-deployment-79f6dfbb9 24 | uid: c112a8f1-fea8-4d23-a6eb-4ece01ea3ec6 25 | spec: 26 | replicas: 3 27 | selector: 28 | matchLabels: 29 | pod-template-hash: 79f6dfbb9 30 | run: httpbin-deployment 31 | template: 32 | metadata: 33 | creationTimestamp: null 34 | labels: 35 | pod-template-hash: 79f6dfbb9 36 | run: httpbin-deployment 37 | spec: 38 | containers: 39 | - image: kennethreitz/httpbin 40 | imagePullPolicy: Always 41 | name: httpbin-deployment 42 | ports: 43 | - containerPort: 80 44 | protocol: TCP 45 | resources: {} 46 | terminationMessagePath: /dev/termination-log 47 | terminationMessagePolicy: File 48 | dnsPolicy: ClusterFirst 49 | restartPolicy: Always 50 | schedulerName: default-scheduler 51 | securityContext: {} 52 | terminationGracePeriodSeconds: 30 53 | status: 54 | replicas: 0 55 | -------------------------------------------------------------------------------- /tests/artifacts/rs-superseeded.out: -------------------------------------------------------------------------------- 1 | 2 | ReplicaSet/httpbin-deployment-79f6dfbb9 -n test1, created 1m ago by Deployment/httpbin-deployment, gen:4 3 | Current: ReplicaSet is available. Replicas: 0 4 | Old: This ReplicaSet is likely replaced by a new one, check Owner Reference resources. 5 | -------------------------------------------------------------------------------- /tests/artifacts/rs-superseeded.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/desired-replicas: "3" 6 | deployment.kubernetes.io/max-replicas: "4" 7 | deployment.kubernetes.io/revision: "1" 8 | creationTimestamp: "2020-03-18T01:24:09Z" 9 | generation: 4 10 | labels: 11 | pod-template-hash: 79f6dfbb9 12 | run: httpbin-deployment 13 | name: httpbin-deployment-79f6dfbb9 14 | namespace: test1 15 | ownerReferences: 16 | - apiVersion: apps/v1 17 | blockOwnerDeletion: true 18 | controller: true 19 | kind: Deployment 20 | name: httpbin-deployment 21 | uid: 4feef9ce-7e6d-4a28-a973-7ea3c0a49d6f 22 | resourceVersion: "313424" 23 | selfLink: /apis/apps/v1/namespaces/test1/replicasets/httpbin-deployment-79f6dfbb9 24 | uid: c112a8f1-fea8-4d23-a6eb-4ece01ea3ec6 25 | spec: 26 | replicas: 0 27 | selector: 28 | matchLabels: 29 | pod-template-hash: 79f6dfbb9 30 | run: httpbin-deployment 31 | template: 32 | metadata: 33 | creationTimestamp: null 34 | labels: 35 | pod-template-hash: 79f6dfbb9 36 | run: httpbin-deployment 37 | spec: 38 | containers: 39 | - image: kennethreitz/httpbin 40 | imagePullPolicy: Always 41 | name: httpbin-deployment 42 | ports: 43 | - containerPort: 80 44 | protocol: TCP 45 | resources: {} 46 | terminationMessagePath: /dev/termination-log 47 | terminationMessagePolicy: File 48 | dnsPolicy: ClusterFirst 49 | restartPolicy: Always 50 | schedulerName: default-scheduler 51 | securityContext: {} 52 | terminationGracePeriodSeconds: 30 53 | status: 54 | observedGeneration: 4 55 | replicas: 0 56 | -------------------------------------------------------------------------------- /tests/artifacts/service-clusterip-missing-endpoint.out: -------------------------------------------------------------------------------- 1 | 2 | Service/httpbin-deployment -n test1, created 1m ago 3 | Current: Service is ready 4 | Missing Endpoint: Service has no matching endpoint. 5 | -------------------------------------------------------------------------------- /tests/artifacts/service-clusterip-missing-endpoint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: "2020-03-18T01:24:09Z" 5 | name: httpbin-deployment 6 | namespace: test1 7 | resourceVersion: "311329" 8 | selfLink: /api/v1/namespaces/test1/services/httpbin-deployment 9 | uid: 5edbf6d9-eb5b-4062-927a-151f604a07f6 10 | spec: 11 | clusterIP: 10.106.38.176 12 | ports: 13 | - port: 80 14 | protocol: TCP 15 | targetPort: 80 16 | selector: 17 | run: httpbin-deployment 18 | sessionAffinity: None 19 | type: ClusterIP 20 | status: 21 | loadBalancer: {} 22 | -------------------------------------------------------------------------------- /tests/artifacts/service-clusterip-multiport-with-endpoints.out: -------------------------------------------------------------------------------- 1 | 2 | Service/kube-dns -n kube-system, created 1m ago 3 | Current: Service is ready 4 | Missing Endpoint: Service has no matching endpoint. 5 | -------------------------------------------------------------------------------- /tests/artifacts/service-clusterip-multiport-with-endpoints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | prometheus.io/port: "9153" 6 | prometheus.io/scrape: "true" 7 | creationTimestamp: "2020-03-23T14:51:05Z" 8 | labels: 9 | k8s-app: kube-dns 10 | kubernetes.io/cluster-service: "true" 11 | kubernetes.io/name: KubeDNS 12 | name: kube-dns 13 | namespace: kube-system 14 | resourceVersion: "177" 15 | selfLink: /api/v1/namespaces/kube-system/services/kube-dns 16 | uid: 07ae1351-0df2-4ea5-b7d3-68fe83790681 17 | spec: 18 | clusterIP: 10.96.0.10 19 | ports: 20 | - name: dns 21 | port: 53 22 | protocol: UDP 23 | targetPort: 53 24 | - name: dns-tcp 25 | port: 53 26 | protocol: TCP 27 | targetPort: 53 28 | - name: metrics 29 | port: 9153 30 | protocol: TCP 31 | targetPort: 9153 32 | selector: 33 | k8s-app: kube-dns 34 | sessionAffinity: None 35 | type: ClusterIP 36 | status: 37 | loadBalancer: {} 38 | endpoint: 39 | apiVersion: v1 40 | kind: Endpoints 41 | metadata: 42 | annotations: 43 | endpoints.kubernetes.io/last-change-trigger-time: "2020-03-23T14:51:05Z" 44 | creationTimestamp: "2020-03-23T14:51:13Z" 45 | labels: 46 | k8s-app: kube-dns 47 | kubernetes.io/cluster-service: "true" 48 | kubernetes.io/name: KubeDNS 49 | name: kube-dns 50 | namespace: kube-system 51 | resourceVersion: "323847" 52 | selfLink: /api/v1/namespaces/kube-system/endpoints/kube-dns 53 | uid: f2dd279a-a79f-4097-99ef-e3c766a4221a 54 | subsets: 55 | - addresses: 56 | - ip: 172.17.0.5 57 | nodeName: minikube 58 | targetRef: 59 | kind: Pod 60 | name: coredns-6955765f44-8vjw8 61 | namespace: kube-system 62 | resourceVersion: "323806" 63 | uid: b5fba922-6dcb-4a8d-8b57-f7c01f027609 64 | - ip: 172.17.0.7 65 | nodeName: minikube 66 | targetRef: 67 | kind: Pod 68 | name: coredns-6955765f44-knpzb 69 | namespace: kube-system 70 | resourceVersion: "323809" 71 | uid: 0693c3e6-1fe4-4514-8013-53ddb1d8001b 72 | ports: 73 | - name: dns 74 | port: 53 75 | protocol: UDP 76 | - name: dns-tcp 77 | port: 53 78 | protocol: TCP 79 | - name: metrics 80 | port: 9153 81 | protocol: TCP 82 | -------------------------------------------------------------------------------- /tests/artifacts/service-clusterip-with-endpoint.out: -------------------------------------------------------------------------------- 1 | 2 | Service/test -n test1, created 1m ago 3 | Current: Service is ready 4 | Missing Endpoint: Service has no matching endpoint. 5 | -------------------------------------------------------------------------------- /tests/artifacts/service-clusterip-with-endpoint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: "2020-03-28T22:39:40Z" 5 | name: test 6 | namespace: test1 7 | resourceVersion: "336925" 8 | selfLink: /api/v1/namespaces/test1/services/test 9 | uid: 333cc578-e563-429e-b246-f8098a99aa67 10 | spec: 11 | clusterIP: 10.106.7.199 12 | ports: 13 | - port: 80 14 | protocol: TCP 15 | targetPort: 80 16 | selector: 17 | run: test 18 | sessionAffinity: None 19 | type: ClusterIP 20 | status: 21 | loadBalancer: {} 22 | endpoint: 23 | apiVersion: v1 24 | kind: Endpoints 25 | metadata: 26 | annotations: 27 | endpoints.kubernetes.io/last-change-trigger-time: "2020-03-28T22:39:42Z" 28 | creationTimestamp: "2020-03-28T22:39:40Z" 29 | name: test 30 | namespace: test1 31 | resourceVersion: "336942" 32 | selfLink: /api/v1/namespaces/test1/endpoints/test 33 | uid: 0e965c84-5d52-4486-94da-30981db48d24 34 | subsets: 35 | - addresses: 36 | - ip: 172.17.0.8 37 | nodeName: minikube 38 | targetRef: 39 | kind: Pod 40 | name: test-876b99cb7-n9zpp 41 | namespace: test1 42 | resourceVersion: "336941" 43 | uid: a889846c-1d60-4ecb-a9c3-dddce16d9a05 44 | ports: 45 | - port: 80 46 | protocol: TCP 47 | -------------------------------------------------------------------------------- /tests/artifacts/service-clusterip-with-no-endpoint.out: -------------------------------------------------------------------------------- 1 | 2 | Service/test -n test1, created 1m ago 3 | Current: Service is ready 4 | Missing Endpoint: Service has no matching endpoint. 5 | -------------------------------------------------------------------------------- /tests/artifacts/service-clusterip-with-no-endpoint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: "2020-03-28T22:39:40Z" 5 | name: test 6 | namespace: test1 7 | resourceVersion: "336925" 8 | selfLink: /api/v1/namespaces/test1/services/test 9 | uid: 333cc578-e563-429e-b246-f8098a99aa67 10 | spec: 11 | clusterIP: 10.106.7.199 12 | ports: 13 | - port: 80 14 | protocol: TCP 15 | targetPort: 80 16 | selector: 17 | run: test 18 | sessionAffinity: None 19 | type: ClusterIP 20 | status: 21 | loadBalancer: {} 22 | endpoint: 23 | apiVersion: v1 24 | kind: Endpoints 25 | metadata: 26 | annotations: 27 | endpoints.kubernetes.io/last-change-trigger-time: "2020-03-28T22:39:42Z" 28 | creationTimestamp: "2020-03-28T22:39:40Z" 29 | name: test 30 | namespace: test1 31 | resourceVersion: "336942" 32 | selfLink: /api/v1/namespaces/test1/endpoints/test 33 | uid: 0e965c84-5d52-4486-94da-30981db48d24 34 | subsets: [] 35 | -------------------------------------------------------------------------------- /tests/artifacts/service-with-not-ready-addresses.out: -------------------------------------------------------------------------------- 1 | 2 | Service/web -n test1, created 1m ago 3 | Current: Service is ready 4 | Missing Endpoint: Service has no matching endpoint. 5 | -------------------------------------------------------------------------------- /tests/artifacts/service-with-not-ready-addresses.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: "2020-03-29T12:26:48Z" 5 | labels: 6 | app: web 7 | name: web 8 | namespace: test1 9 | resourceVersion: "363639" 10 | selfLink: /api/v1/namespaces/test1/services/web 11 | uid: 0decf327-5a03-47fc-b72c-5daaac8a4db9 12 | spec: 13 | clusterIP: 10.98.108.181 14 | ports: 15 | - name: 80-80 16 | port: 80 17 | protocol: TCP 18 | targetPort: 80 19 | selector: 20 | app: nginx 21 | sessionAffinity: None 22 | type: ClusterIP 23 | status: 24 | loadBalancer: {} 25 | endpoint: 26 | apiVersion: v1 27 | kind: Endpoints 28 | metadata: 29 | annotations: 30 | endpoints.kubernetes.io/last-change-trigger-time: "2020-03-29T12:30:03Z" 31 | creationTimestamp: "2020-03-29T12:26:48Z" 32 | labels: 33 | app: web 34 | name: web 35 | namespace: test1 36 | resourceVersion: "364028" 37 | selfLink: /api/v1/namespaces/test1/endpoints/web 38 | uid: f9907ef2-9d23-4257-8e6a-0f961793174b 39 | subsets: 40 | - addresses: 41 | - ip: 172.17.0.2 42 | nodeName: minikube 43 | targetRef: 44 | kind: Pod 45 | name: web-0 46 | namespace: test1 47 | resourceVersion: "363287" 48 | uid: a216c1e9-b7c8-4c87-b03e-9057dd9f5a30 49 | - ip: 172.17.0.4 50 | nodeName: minikube 51 | targetRef: 52 | kind: Pod 53 | name: web-1 54 | namespace: test1 55 | resourceVersion: "363297" 56 | uid: 0f79e0c0-967c-4052-8002-85f9609072d1 57 | notReadyAddresses: 58 | - ip: 172.17.0.6 59 | nodeName: minikube 60 | targetRef: 61 | kind: Pod 62 | name: web-2 63 | namespace: test1 64 | resourceVersion: "364027" 65 | uid: 02e518ef-c7a0-4c91-81c9-de48da572405 66 | ports: 67 | - name: 80-80 68 | port: 80 69 | protocol: TCP 70 | -------------------------------------------------------------------------------- /tests/artifacts/sts-inital-rollout-done.out: -------------------------------------------------------------------------------- 1 | 2 | StatefulSet/web -n test1, created 1m ago, gen:1 3 | Current: Partition rollout complete. updated: 3 4 | desired:3, existing:3, ready:3, current:3, updated:3 5 | -------------------------------------------------------------------------------- /tests/artifacts/sts-inital-rollout-done.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | creationTimestamp: "2020-03-18T13:32:36Z" 5 | generation: 1 6 | name: web 7 | namespace: test1 8 | resourceVersion: "350661" 9 | selfLink: /apis/apps/v1/namespaces/test1/statefulsets/web 10 | uid: 7d86223a-b192-43cc-a52c-99d91a311d91 11 | spec: 12 | podManagementPolicy: OrderedReady 13 | replicas: 3 14 | revisionHistoryLimit: 10 15 | selector: 16 | matchLabels: 17 | app: nginx 18 | serviceName: nginx 19 | template: 20 | metadata: 21 | creationTimestamp: null 22 | labels: 23 | app: nginx 24 | spec: 25 | containers: 26 | - image: k8s.gcr.io/nginx-slim:0.8 27 | imagePullPolicy: IfNotPresent 28 | name: nginx 29 | ports: 30 | - containerPort: 80 31 | name: web 32 | protocol: TCP 33 | resources: {} 34 | terminationMessagePath: /dev/termination-log 35 | terminationMessagePolicy: File 36 | volumeMounts: 37 | - mountPath: /usr/share/nginx/html 38 | name: www 39 | dnsPolicy: ClusterFirst 40 | restartPolicy: Always 41 | schedulerName: default-scheduler 42 | securityContext: {} 43 | terminationGracePeriodSeconds: 10 44 | updateStrategy: 45 | rollingUpdate: 46 | partition: 0 47 | type: RollingUpdate 48 | volumeClaimTemplates: 49 | - apiVersion: v1 50 | kind: PersistentVolumeClaim 51 | metadata: 52 | creationTimestamp: null 53 | name: www 54 | spec: 55 | accessModes: 56 | - ReadWriteOnce 57 | resources: 58 | requests: 59 | storage: 1Gi 60 | storageClassName: standard 61 | volumeMode: Filesystem 62 | status: 63 | phase: Pending 64 | status: 65 | collisionCount: 0 66 | currentReplicas: 3 67 | currentRevision: web-6596ffb49b 68 | observedGeneration: 1 69 | readyReplicas: 3 70 | replicas: 3 71 | updateRevision: web-6596ffb49b 72 | updatedReplicas: 3 73 | -------------------------------------------------------------------------------- /tests/artifacts/sts-new.out: -------------------------------------------------------------------------------- 1 | 2 | StatefulSet/web -n test1, created 1m ago, gen:1 3 | InProgress: Replicas: 0/3 4 | Reconciling: LessReplicas, Replicas: 0/3 5 | desired:3, ready:0, current:0 6 | Outage: StatefulSet has no Ready replicas. 7 | Ongoing rollout: Waiting for statefulset spec update to be observed... 8 | Stuck Rollout?: Still replacing the first Pod, may indicate a stuck rollout. 9 | -------------------------------------------------------------------------------- /tests/artifacts/sts-new.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | creationTimestamp: "2020-03-18T13:21:02Z" 5 | generation: 1 6 | name: web 7 | namespace: test1 8 | resourceVersion: "348839" 9 | selfLink: /apis/apps/v1/namespaces/test1/statefulsets/web 10 | uid: cdcaa1e0-4c22-47f3-b96a-7d5f5489634c 11 | spec: 12 | podManagementPolicy: OrderedReady 13 | replicas: 3 14 | revisionHistoryLimit: 10 15 | selector: 16 | matchLabels: 17 | app: nginx 18 | serviceName: nginx 19 | template: 20 | metadata: 21 | creationTimestamp: null 22 | labels: 23 | app: nginx 24 | spec: 25 | containers: 26 | - image: k8s.gcr.io/nginx-slim:0.8 27 | imagePullPolicy: IfNotPresent 28 | name: nginx 29 | ports: 30 | - containerPort: 80 31 | name: web 32 | protocol: TCP 33 | resources: {} 34 | terminationMessagePath: /dev/termination-log 35 | terminationMessagePolicy: File 36 | volumeMounts: 37 | - mountPath: /usr/share/nginx/html 38 | name: www 39 | dnsPolicy: ClusterFirst 40 | restartPolicy: Always 41 | schedulerName: default-scheduler 42 | securityContext: {} 43 | terminationGracePeriodSeconds: 10 44 | updateStrategy: 45 | rollingUpdate: 46 | partition: 0 47 | type: RollingUpdate 48 | volumeClaimTemplates: 49 | - apiVersion: v1 50 | kind: PersistentVolumeClaim 51 | metadata: 52 | creationTimestamp: null 53 | name: www 54 | spec: 55 | accessModes: 56 | - ReadWriteOnce 57 | resources: 58 | requests: 59 | storage: 1Gi 60 | storageClassName: my-storage-class 61 | volumeMode: Filesystem 62 | status: 63 | phase: Pending 64 | status: 65 | replicas: 0 66 | -------------------------------------------------------------------------------- /tests/artifacts/sts-ongoing-update-rollout-with-diff.out: -------------------------------------------------------------------------------- 1 | 2 | StatefulSet/web -n test1, created 1m ago, gen:2 3 | InProgress: Ready: 2/3 4 | Reconciling: LessReady, Ready: 2/3 5 | desired:3, existing:3, ready:2, current:3 6 | Ongoing rollout: Waiting for 1 pods to be ready... 7 | Stuck Rollout?: Still replacing the first Pod, may indicate a stuck rollout. 8 | -------------------------------------------------------------------------------- /tests/artifacts/sts-ongoing-update-rollout-with-diff.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | creationTimestamp: "2020-03-18T13:32:36Z" 5 | generation: 2 6 | name: web 7 | namespace: test1 8 | resourceVersion: "351753" 9 | selfLink: /apis/apps/v1/namespaces/test1/statefulsets/web 10 | uid: 7d86223a-b192-43cc-a52c-99d91a311d91 11 | spec: 12 | podManagementPolicy: OrderedReady 13 | replicas: 3 14 | revisionHistoryLimit: 10 15 | selector: 16 | matchLabels: 17 | app: nginx 18 | serviceName: nginx 19 | template: 20 | metadata: 21 | creationTimestamp: null 22 | labels: 23 | app: nginx 24 | spec: 25 | containers: 26 | - env: 27 | - name: dummy 28 | value: var 29 | image: k8s.gcr.io/nginx-slim:0.8 30 | imagePullPolicy: IfNotPresent 31 | name: nginx 32 | ports: 33 | - containerPort: 80 34 | name: web 35 | protocol: TCP 36 | resources: {} 37 | terminationMessagePath: /dev/termination-log 38 | terminationMessagePolicy: File 39 | volumeMounts: 40 | - mountPath: /usr/share/nginx/html 41 | name: www 42 | dnsPolicy: ClusterFirst 43 | restartPolicy: Always 44 | schedulerName: default-scheduler 45 | securityContext: {} 46 | terminationGracePeriodSeconds: 10 47 | updateStrategy: 48 | rollingUpdate: 49 | partition: 0 50 | type: RollingUpdate 51 | volumeClaimTemplates: 52 | - apiVersion: v1 53 | kind: PersistentVolumeClaim 54 | metadata: 55 | creationTimestamp: null 56 | name: www 57 | spec: 58 | accessModes: 59 | - ReadWriteOnce 60 | resources: 61 | requests: 62 | storage: 1Gi 63 | storageClassName: standard 64 | volumeMode: Filesystem 65 | status: 66 | phase: Pending 67 | status: 68 | collisionCount: 0 69 | currentReplicas: 2 70 | currentRevision: web-6596ffb49b 71 | observedGeneration: 2 72 | readyReplicas: 2 73 | replicas: 3 74 | updateRevision: web-c5987f8d 75 | diff: | 76 | --- currentRevision ControllerRevision/web-f96c5b685 2020-03-23 23:12:01 +0000 GMT (4d ago) 77 | +++ updateRevision ControllerRevision/web-7445dd65b5 2020-03-28 22:33:42 +0000 GMT (11s ago) 78 | @@ -4,7 +4,7 @@ 79 | "$patch": "replace", 80 | "metadata": { 81 | "annotations": { 82 | - "newAnnotation": "newAnnotationValue" 83 | + "newAnnotation": "newAnnotationValue2" 84 | }, 85 | "creationTimestamp": null, 86 | "labels": { 87 | -------------------------------------------------------------------------------- /tests/artifacts/sts-ongoing-update-rollout.out: -------------------------------------------------------------------------------- 1 | 2 | StatefulSet/web -n test1, created 1m ago, gen:2 3 | InProgress: Ready: 2/3 4 | Reconciling: LessReady, Ready: 2/3 5 | desired:3, existing:3, ready:2, current:3 6 | Ongoing rollout: Waiting for 1 pods to be ready... 7 | Stuck Rollout?: Still replacing the first Pod, may indicate a stuck rollout. 8 | -------------------------------------------------------------------------------- /tests/artifacts/sts-ongoing-update-rollout.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | creationTimestamp: "2020-03-18T13:32:36Z" 5 | generation: 2 6 | name: web 7 | namespace: test1 8 | resourceVersion: "351753" 9 | selfLink: /apis/apps/v1/namespaces/test1/statefulsets/web 10 | uid: 7d86223a-b192-43cc-a52c-99d91a311d91 11 | spec: 12 | podManagementPolicy: OrderedReady 13 | replicas: 3 14 | revisionHistoryLimit: 10 15 | selector: 16 | matchLabels: 17 | app: nginx 18 | serviceName: nginx 19 | template: 20 | metadata: 21 | creationTimestamp: null 22 | labels: 23 | app: nginx 24 | spec: 25 | containers: 26 | - env: 27 | - name: dummy 28 | value: var 29 | image: k8s.gcr.io/nginx-slim:0.8 30 | imagePullPolicy: IfNotPresent 31 | name: nginx 32 | ports: 33 | - containerPort: 80 34 | name: web 35 | protocol: TCP 36 | resources: {} 37 | terminationMessagePath: /dev/termination-log 38 | terminationMessagePolicy: File 39 | volumeMounts: 40 | - mountPath: /usr/share/nginx/html 41 | name: www 42 | dnsPolicy: ClusterFirst 43 | restartPolicy: Always 44 | schedulerName: default-scheduler 45 | securityContext: {} 46 | terminationGracePeriodSeconds: 10 47 | updateStrategy: 48 | rollingUpdate: 49 | partition: 0 50 | type: RollingUpdate 51 | volumeClaimTemplates: 52 | - apiVersion: v1 53 | kind: PersistentVolumeClaim 54 | metadata: 55 | creationTimestamp: null 56 | name: www 57 | spec: 58 | accessModes: 59 | - ReadWriteOnce 60 | resources: 61 | requests: 62 | storage: 1Gi 63 | storageClassName: standard 64 | volumeMode: Filesystem 65 | status: 66 | phase: Pending 67 | status: 68 | collisionCount: 0 69 | currentReplicas: 2 70 | currentRevision: web-6596ffb49b 71 | observedGeneration: 2 72 | readyReplicas: 2 73 | replicas: 3 74 | updateRevision: web-c5987f8d 75 | -------------------------------------------------------------------------------- /tests/artifacts/sts-stuck-initial-rollout.out: -------------------------------------------------------------------------------- 1 | 2 | StatefulSet/web -n test1, created 1m ago, gen:1 3 | InProgress: Replicas: 1/3 4 | Reconciling: LessReplicas, Replicas: 1/3 5 | desired:3, existing:1, ready:0, current:1, updated:1 6 | Outage: StatefulSet has no Ready replicas. 7 | Stuck Initial Rollout? First rollout not yet progressed. 8 | Ongoing rollout: Waiting for 3 pods to be ready... 9 | Stuck Rollout?: Still replacing the first Pod, may indicate a stuck rollout. 10 | -------------------------------------------------------------------------------- /tests/artifacts/sts-stuck-initial-rollout.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | creationTimestamp: "2020-03-18T13:21:02Z" 5 | generation: 1 6 | name: web 7 | namespace: test1 8 | resourceVersion: "348844" 9 | selfLink: /apis/apps/v1/namespaces/test1/statefulsets/web 10 | uid: cdcaa1e0-4c22-47f3-b96a-7d5f5489634c 11 | spec: 12 | podManagementPolicy: OrderedReady 13 | replicas: 3 14 | revisionHistoryLimit: 10 15 | selector: 16 | matchLabels: 17 | app: nginx 18 | serviceName: nginx 19 | template: 20 | metadata: 21 | creationTimestamp: null 22 | labels: 23 | app: nginx 24 | spec: 25 | containers: 26 | - image: k8s.gcr.io/nginx-slim:0.8 27 | imagePullPolicy: IfNotPresent 28 | name: nginx 29 | ports: 30 | - containerPort: 80 31 | name: web 32 | protocol: TCP 33 | resources: {} 34 | terminationMessagePath: /dev/termination-log 35 | terminationMessagePolicy: File 36 | volumeMounts: 37 | - mountPath: /usr/share/nginx/html 38 | name: www 39 | dnsPolicy: ClusterFirst 40 | restartPolicy: Always 41 | schedulerName: default-scheduler 42 | securityContext: {} 43 | terminationGracePeriodSeconds: 10 44 | updateStrategy: 45 | rollingUpdate: 46 | partition: 0 47 | type: RollingUpdate 48 | volumeClaimTemplates: 49 | - apiVersion: v1 50 | kind: PersistentVolumeClaim 51 | metadata: 52 | creationTimestamp: null 53 | name: www 54 | spec: 55 | accessModes: 56 | - ReadWriteOnce 57 | resources: 58 | requests: 59 | storage: 1Gi 60 | storageClassName: my-storage-class 61 | volumeMode: Filesystem 62 | status: 63 | phase: Pending 64 | status: 65 | collisionCount: 0 66 | currentReplicas: 1 67 | currentRevision: web-6596ffb49b 68 | observedGeneration: 1 69 | replicas: 1 70 | updateRevision: web-6596ffb49b 71 | updatedReplicas: 1 72 | -------------------------------------------------------------------------------- /tests/artifacts/sts-suspended.out: -------------------------------------------------------------------------------- 1 | 2 | StatefulSet/web -n test1, created 1m ago, gen:4 3 | Current: Partition rollout complete. updated: 0 4 | desired:0, ready:0, current:0 5 | Suspended: Scaled down to 0. 6 | Outage: StatefulSet has no Ready replicas. 7 | -------------------------------------------------------------------------------- /tests/artifacts/sts-suspended.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | annotations: 5 | kubectl.kubernetes.io/last-applied-configuration: | 6 | {"apiVersion":"apps/v1","kind":"StatefulSet","metadata":{"annotations":{},"name":"web","namespace":"test1"},"spec":{"replicas":3,"selector":{"matchLabels":{"app":"nginx"}},"serviceName":"nginx","template":{"metadata":{"labels":{"app":"nginx"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx-slim:0.8","name":"nginx","ports":[{"containerPort":80,"name":"web"}],"volumeMounts":[{"mountPath":"/usr/share/nginx/html","name":"www"}]}],"terminationGracePeriodSeconds":10}},"volumeClaimTemplates":[{"metadata":{"name":"www"},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}}}]}} 7 | creationTimestamp: "2020-03-23T23:08:14Z" 8 | generation: 4 9 | name: web 10 | namespace: test1 11 | resourceVersion: "360748" 12 | selfLink: /apis/apps/v1/namespaces/test1/statefulsets/web 13 | uid: a8392004-fa2e-4081-93c7-19de06e8f96c 14 | spec: 15 | podManagementPolicy: OrderedReady 16 | replicas: 0 17 | revisionHistoryLimit: 10 18 | selector: 19 | matchLabels: 20 | app: nginx 21 | serviceName: nginx 22 | template: 23 | metadata: 24 | annotations: 25 | newAnnotation: newAnnotationValue2 26 | creationTimestamp: null 27 | labels: 28 | app: nginx 29 | spec: 30 | containers: 31 | - image: k8s.gcr.io/nginx-slim:0.8 32 | imagePullPolicy: IfNotPresent 33 | name: nginx 34 | ports: 35 | - containerPort: 80 36 | name: web 37 | protocol: TCP 38 | resources: {} 39 | terminationMessagePath: /dev/termination-log 40 | terminationMessagePolicy: File 41 | volumeMounts: 42 | - mountPath: /usr/share/nginx/html 43 | name: www 44 | dnsPolicy: ClusterFirst 45 | restartPolicy: Always 46 | schedulerName: default-scheduler 47 | securityContext: {} 48 | terminationGracePeriodSeconds: 10 49 | updateStrategy: 50 | rollingUpdate: 51 | partition: 0 52 | type: RollingUpdate 53 | volumeClaimTemplates: 54 | - apiVersion: v1 55 | kind: PersistentVolumeClaim 56 | metadata: 57 | creationTimestamp: null 58 | name: www 59 | spec: 60 | accessModes: 61 | - ReadWriteOnce 62 | resources: 63 | requests: 64 | storage: 1Gi 65 | volumeMode: Filesystem 66 | status: 67 | phase: Pending 68 | status: 69 | collisionCount: 0 70 | currentRevision: web-7445dd65b5 71 | observedGeneration: 4 72 | replicas: 0 73 | updateRevision: web-7445dd65b5 74 | -------------------------------------------------------------------------------- /tests/e2e-artifacts/sts-with-ingress.pod.out: -------------------------------------------------------------------------------- 1 | 2 | Pod/sts-with-ingress-0 -n default, created 1m ago by StatefulSet/sts-with-ingress Running BestEffort 3 | Current: Pod is Ready 4 | Managed by sts-with-ingress application 5 | PodScheduled -> Initialized -> ContainersReady -> Ready for 1m 6 | Containers: 7 | sts-with-ingress (registry.k8s.io/pause:3.9) Running for 1m and Ready 8 | Known/recorded manage events: 9 | 1m ago Updated by kube-controller-manager (metadata, spec) 10 | 1m ago Updated by kubelet (status) 11 | Services matching this pod: 12 | Service/sts-with-ingress -n default, created 1m ago, last endpoint change was 1m ago 13 | Current: Service is ready 14 | Ready: Pod/sts-with-ingress-0 -n default on Node/minikube, 1.1.1.1:80/TCP 15 | Known/recorded manage events: 16 | 1m ago Updated by kubectl-client-side-apply (metadata, spec) 17 | Ingresses matching this Service: 18 | Ingress/sts-with-ingress -n default, created 1m ago, gen:1 19 | Current: Resource is current 20 | Service/sts-with-ingress:80 has 1 endpoints. 21 | Known/recorded manage events: 22 | 1m ago Updated by kubectl-client-side-apply (metadata, spec) 23 | -------------------------------------------------------------------------------- /tests/e2e-artifacts/sts-with-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: sts-with-ingress 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: sts-with-ingress 10 | template: 11 | metadata: 12 | labels: 13 | app: sts-with-ingress 14 | spec: 15 | containers: 16 | - name: sts-with-ingress 17 | image: registry.k8s.io/pause:3.9 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: sts-with-ingress 23 | spec: 24 | selector: 25 | app: sts-with-ingress 26 | ports: 27 | - protocol: TCP 28 | port: 80 29 | targetPort: 80 30 | --- 31 | apiVersion: networking.k8s.io/v1 32 | kind: Ingress 33 | metadata: 34 | name: sts-with-ingress 35 | spec: 36 | rules: 37 | - host: sts-with-ingress.com 38 | http: 39 | paths: 40 | - path: / 41 | pathType: Prefix 42 | backend: 43 | service: 44 | name: sts-with-ingress 45 | port: 46 | number: 80 47 | --------------------------------------------------------------------------------