├── .github
└── workflows
│ ├── lint.yaml
│ ├── release_please.yml
│ └── trdl_releaser.yml
├── .gitignore
├── .golangci.yaml
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── LICENSE
├── Makefile
├── README.md
├── Taskfile.dist.yaml
├── cmd
└── kubedog
│ └── main.go
├── contrib
└── resource_status_rules.yaml
├── doc
├── cli-1.gif
├── deployment_follow.gif
├── deployment_rollout.gif
├── kubedog-logo.svg
└── usage.md
├── go-build.sh
├── go.mod
├── go.sum
├── kubedog.go
├── pkg
├── display
│ └── display.go
├── kube
│ ├── kube.go
│ ├── kube_config_getter.go
│ └── vendor.go
├── tracker
│ ├── canary
│ │ ├── feed.go
│ │ ├── status.go
│ │ └── tracker.go
│ ├── controller
│ │ └── feed.go
│ ├── daemonset
│ │ ├── feed.go
│ │ ├── status.go
│ │ └── tracker.go
│ ├── debug
│ │ └── debug.go
│ ├── deployment
│ │ ├── feed.go
│ │ ├── status.go
│ │ └── tracker.go
│ ├── event
│ │ └── informer.go
│ ├── generic
│ │ ├── common.go
│ │ ├── contrib_resource_status_rules.go
│ │ ├── contrib_resource_status_rules.schema.json
│ │ ├── contrib_resource_status_rules.yaml
│ │ ├── event_status.go
│ │ ├── feed.go
│ │ ├── ready_condition.go
│ │ ├── resource_events_watcher.go
│ │ ├── resource_state_json_paths.go
│ │ ├── resource_state_watcher.go
│ │ ├── resource_status.go
│ │ └── tracker.go
│ ├── indicators
│ │ └── indicators.go
│ ├── job
│ │ ├── feed.go
│ │ ├── status.go
│ │ └── tracker.go
│ ├── pod
│ │ ├── feed.go
│ │ ├── informer.go
│ │ ├── probes.go
│ │ ├── status.go
│ │ └── tracker.go
│ ├── replicaset
│ │ └── informer.go
│ ├── resid
│ │ └── resource_id.go
│ ├── statefulset
│ │ ├── feed.go
│ │ ├── status.go
│ │ └── tracker.go
│ └── tracker.go
├── trackers
│ ├── dyntracker
│ │ ├── dynamic_absence_tracker.go
│ │ ├── dynamic_presence_tracker.go
│ │ ├── dynamic_readiness_tracker.go
│ │ ├── logstore
│ │ │ ├── log_line.go
│ │ │ ├── log_store.go
│ │ │ └── resource_logs.go
│ │ ├── statestore
│ │ │ ├── absence_task_state.go
│ │ │ ├── attribute.go
│ │ │ ├── conditions.go
│ │ │ ├── error.go
│ │ │ ├── event.go
│ │ │ ├── presence_task_state.go
│ │ │ ├── readiness_task_state.go
│ │ │ ├── resource_state.go
│ │ │ ├── resource_status.go
│ │ │ ├── task_status.go
│ │ │ └── task_store.go
│ │ └── util
│ │ │ ├── concurrency.go
│ │ │ └── resource.go
│ ├── elimination
│ │ └── elimination.go
│ ├── follow
│ │ ├── daemonset.go
│ │ ├── deployment.go
│ │ ├── job.go
│ │ ├── pod.go
│ │ └── statefulset.go
│ └── rollout
│ │ ├── daemonset.go
│ │ ├── deployment.go
│ │ ├── job.go
│ │ ├── multitrack
│ │ ├── canary.go
│ │ ├── daemonset.go
│ │ ├── deployment.go
│ │ ├── generic.go
│ │ ├── generic
│ │ │ ├── context.go
│ │ │ ├── resource.go
│ │ │ ├── spec.go
│ │ │ └── state.go
│ │ ├── job.go
│ │ ├── multitrack.go
│ │ ├── multitrack_display.go
│ │ └── statefulset.go
│ │ ├── pod.go
│ │ └── statefulset.go
└── utils
│ ├── color.go
│ ├── controller_utils.go
│ ├── deployment_utils.go
│ ├── events.go
│ ├── file.go
│ ├── json.go
│ ├── pod_utils.go
│ ├── ref.go
│ ├── table.go
│ └── time.go
├── playground
├── multitrack-1
│ └── main.go
└── table
│ └── main.go
├── scripts
└── ci
│ ├── build_release.sh
│ └── build_release_v2.sh
└── trdl.yaml
/.github/workflows/lint.yaml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 |
9 | jobs:
10 | lint:
11 | name: Lint
12 | runs-on: ubuntu-latest
13 | env:
14 | GOFLAGS: -mod=readonly
15 | steps:
16 | - name: Checkout code
17 | uses: actions/checkout@v3
18 |
19 | - name: Set up Go
20 | uses: actions/setup-go@v3
21 | with:
22 | go-version-file: go.mod
23 |
24 | - name: Install linter
25 | run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2
26 |
27 | - name: Lint
28 | run: make lint
29 |
--------------------------------------------------------------------------------
/.github/workflows/release_please.yml:
--------------------------------------------------------------------------------
1 | name: Do release PR or GitHub release
2 | on:
3 | push:
4 | branches:
5 | - main
6 |
7 | jobs:
8 | release-please:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: werf/third-party-release-please-action@werf
12 | with:
13 | release-type: go
14 | token: ${{ secrets.RELEASE_PLEASE_TOKEN }}
15 | release-notes-header: "## Changelog"
16 | release-notes-footer: |
17 | ## Installation
18 | To install `kubedog` we recommend following [these instructions](https://github.com/werf/kubedog#install-kubedog-cli).
19 |
20 | Alternatively, you can download `kubedog` binaries from here:
21 | * [Linux amd64](https://tuf.kubedog.werf.io/targets/releases/{{> version }}/linux-amd64/bin/kubedog) ([PGP signature](https://tuf.kubedog.werf.io/targets/signatures/{{> version }}/linux-amd64/bin/kubedog.sig))
22 | * [Linux arm64](https://tuf.kubedog.werf.io/targets/releases/{{> version }}/linux-arm64/bin/kubedog) ([PGP signature](https://tuf.kubedog.werf.io/targets/signatures/{{> version }}/linux-arm64/bin/kubedog.sig))
23 | * [macOS amd64](https://tuf.kubedog.werf.io/targets/releases/{{> version }}/darwin-amd64/bin/kubedog) ([PGP signature](https://tuf.kubedog.werf.io/targets/signatures/{{> version }}/darwin-amd64/bin/kubedog.sig))
24 | * [macOS arm64](https://tuf.kubedog.werf.io/targets/releases/{{> version }}/darwin-arm64/bin/kubedog) ([PGP signature](https://tuf.kubedog.werf.io/targets/signatures/{{> version }}/darwin-arm64/bin/kubedog.sig))
25 | * [Windows amd64](https://tuf.kubedog.werf.io/targets/releases/{{> version }}/windows-amd64/bin/kubedog.exe) ([PGP signature](https://tuf.kubedog.werf.io/targets/signatures/{{> version }}/windows-amd64/bin/kubedog.exe.sig))
26 |
27 | These binaries were signed with PGP and could be verified with the [kubedog PGP public key](https://werf.io/kubedog.asc). For example, `kubedog` binary can be downloaded and verified with `gpg` on Linux with these commands:
28 | ```shell
29 | curl -sSLO "https://tuf.kubedog.werf.io/targets/releases/{{> version }}/linux-amd64/bin/kubedog" -O "https://tuf.kubedog.werf.io/targets/signatures/{{> version }}/linux-amd64/bin/kubedog.sig"
30 | curl -sSL https://werf.io/kubedog.asc | gpg --import
31 | gpg --verify kubedog.sig kubedog
32 | ```
33 |
--------------------------------------------------------------------------------
/.github/workflows/trdl_releaser.yml:
--------------------------------------------------------------------------------
1 | name: Trdl releaser
2 | on:
3 | create:
4 |
5 | jobs:
6 | release:
7 | if: ${{ startsWith(github.ref, 'refs/tags/v') }}
8 | name: Perform kubedog release using trdl server
9 | runs-on: ubuntu-latest
10 | steps:
11 | - name: Release
12 | uses: werf/trdl-vault-actions/release@main
13 | with:
14 | vault-addr: ${{ secrets.TRDL_VAULT_ADDR }}
15 | project-name: kubedog
16 | git-tag: ${{ github.event.ref }}
17 | vault-auth-method: approle
18 | vault-role-id: ${{ secrets.TRDL_VAULT_ROLE_ID }}
19 | vault-secret-id: ${{ secrets.TRDL_VAULT_SECRET_ID }}
20 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.toptal.com/developers/gitignore/api/go
2 | # Edit at https://www.toptal.com/developers/gitignore?templates=go
3 |
4 | ### Go ###
5 | # Binaries for programs and plugins
6 | *.exe
7 | *.exe~
8 | *.dll
9 | *.so
10 | *.dylib
11 | .idea
12 |
13 | # Test binary, built with `go test -c`
14 | *.test
15 |
16 | # Output of the go coverage tool, specifically when used with LiteIDE
17 | *.out
18 |
19 | # Dependency directories (remove the comment below to include it)
20 | # vendor/
21 |
22 | ### Go Patch ###
23 | /vendor/
24 | /Godeps/
25 |
26 | # End of https://www.toptal.com/developers/gitignore/api/go
27 |
28 |
29 |
30 | release-build
31 |
--------------------------------------------------------------------------------
/.golangci.yaml:
--------------------------------------------------------------------------------
1 | run:
2 | timeout: 10m
3 | skip-dirs:
4 | - playground
5 | - doc
6 | - scripts
7 |
8 | linters-settings:
9 | gofumpt:
10 | extra-rules: true
11 | gci:
12 | sections:
13 | - standard
14 | - default
15 | - prefix(github.com/werf/)
16 | gocritic:
17 | disabled-checks:
18 | - ifElseChain
19 | errorlint:
20 | comparison: false
21 | asserts: false
22 |
23 | linters:
24 | disable-all: true
25 | enable:
26 | # Default linters.
27 | - deadcode
28 | - ineffassign
29 | - typecheck
30 | - varcheck
31 |
32 | # Extra linters.
33 | - asciicheck
34 | - bidichk
35 | - bodyclose
36 | - errname
37 | - errorlint
38 | - exportloopref
39 | - gci
40 | - gocritic
41 | - gofumpt
42 | - misspell
43 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, caste, color, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | werf@flant.com.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.1, available at
119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
120 |
121 | Community Impact Guidelines were inspired by
122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
123 |
124 | For answers to common questions about this code of conduct, see the FAQ at
125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available
126 | at [https://www.contributor-covenant.org/translations][translations].
127 |
128 | [homepage]: https://www.contributor-covenant.org
129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
130 | [Mozilla CoC]: https://github.com/mozilla/diversity
131 | [FAQ]: https://www.contributor-covenant.org/faq
132 | [translations]: https://www.contributor-covenant.org/translations
133 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | GOARCH = amd64
2 |
3 | UNAME = $(shell uname -s)
4 |
5 | ifndef OS
6 | ifeq ($(UNAME), Linux)
7 | else ifeq ($(UNAME), Darwin)
8 | OS = darwin
9 | endif
10 | endif
11 |
12 | GOSRC = $(shell find . -type f -name '*.go')
13 | .DEFAULT_GOAL := all
14 |
15 | .PHONY: fmt
16 | fmt:
17 | go mod tidy
18 | gci write -s Standard -s Default -s 'Prefix(github.com/werf)' pkg/ cmd/
19 | gofumpt -extra -w cmd/ pkg/
20 | GOOS=$(OS) GOARCH="$(GOARCH)" golangci-lint run --fix ./...
21 |
22 | .PHONY: lint
23 | lint:
24 | GOOS=$(OS) GOARCH="$(GOARCH)" golangci-lint run ./...
25 |
26 | .PHONY: build
27 | build:
28 | go build github.com/werf/kubedog/cmd/kubedog
29 |
30 | .PHONY: install
31 | install:
32 | go install github.com/werf/kubedog/cmd/kubedog
33 |
34 | all: fmt lint install
35 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | # kubedog
6 |
7 | Kubedog is a library to watch and follow Kubernetes resources in CI/CD deploy pipelines.
8 |
9 | This library is used in the [werf CI/CD tool](https://github.com/werf/werf) to track resources during deploy process.
10 |
11 | **NOTE:** Kubedog also includes a CLI, however it provides a *minimal* interface to access library functions. CLI was created to check library features and for debug purposes. Currently, we have no plans on further improvement of CLI.
12 |
13 | ## Table of Contents
14 | - [Install kubedog CLI](#install-kubedog-cli)
15 | - [Linux/macOS](#linuxmacos)
16 | - [Windows](#windows-powershell)
17 | - [Alternative binary installation](#alternative-binary-installation)
18 | - [Usage](#usage)
19 | - [Community](#community)
20 | - [License](#license)
21 |
22 | ## Install `kubedog` CLI
23 |
24 | ### Linux/macOS
25 |
26 | [Install trdl](https://github.com/werf/trdl/releases/) to `~/bin/trdl`, which will manage `kubedog` installation and updates. Add `~/bin` to your $PATH.
27 |
28 | Add `kubedog` repo to `trdl`:
29 | ```shell
30 | trdl add kubedog https://tuf.kubedog.werf.io 1 2cc56abdc649a9699074097ba60206f1299e43b320d6170c40eab552dcb940d9e813a8abf5893ff391d71f0a84b39111ffa6403a3e038b81634a40d29674a531
31 | ```
32 |
33 | To use `kubedog` on a workstation we recommend setting up `kubedog` _automatic activation_. For this the activation command should be executed for each new shell session. Often this is achieved by adding the activation command to `~/.bashrc` (for Bash), `~/.zshrc` (for Zsh) or to the one of the profile files, but this depends on the OS/shell/terminal. Refer to your shell/terminal manuals for more information.
34 |
35 | This is the `kubedog` activation command for the current shell-session:
36 | ```shell
37 | source "$(trdl use kubedog 0 stable)"
38 | ```
39 |
40 | To use `kubedog` in CI prefer activating `kubedog` manually instead. For this execute the activation command in the beginning of your CI job, before calling the `kubedog` binary.
41 |
42 | ### Windows (PowerShell)
43 |
44 | Following instructions should be executed in PowerShell.
45 |
46 | [Install trdl](https://github.com/werf/trdl/releases/) to `:\Users\\bin\trdl`, which will manage `kubedog` installation and updates. Add `:\Users\\bin\` to your $PATH environment variable.
47 |
48 | Add `kubedog` repo to `trdl`:
49 | ```powershell
50 | trdl add kubedog https://tuf.kubedog.werf.io 1 2cc56abdc649a9699074097ba60206f1299e43b320d6170c40eab552dcb940d9e813a8abf5893ff391d71f0a84b39111ffa6403a3e038b81634a40d29674a531
51 | ```
52 |
53 | To use `kubedog` on a workstation we recommend setting up `kubedog` _automatic activation_. For this the activation command should be executed for each new PowerShell session. For PowerShell this is usually achieved by adding the activation command to [$PROFILE file](https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_profiles).
54 |
55 | This is the `kubedog` activation command for the current PowerShell-session:
56 | ```powershell
57 | . $(trdl use kubedog 0 stable)
58 | ```
59 |
60 | To use `kubedog` in CI prefer activating `kubedog` manually instead. For this execute the activation command in the beginning of your CI job, before calling the `kubedog` binary.
61 |
62 | ### Alternative binary installation
63 |
64 | The recommended way to install `kubedog` is described above. Alternatively, although not recommended, you can download `kubedog` binary straight from the [GitHub Releases page](https://github.com/werf/kubedog/releases/), optionally verifying the binary with the PGP signature.
65 |
66 | ## Usage
67 |
68 | * [CLI usage](doc/usage.md#cli-usage)
69 | * [Library usage: Multitracker](doc/usage.md#Multitracker)
70 |
71 | ## Community
72 |
73 | Please feel free to reach us via [project's Discussions](https://github.com/werf/kubedog/discussions) and [werf's Telegram group](https://t.me/werf_io) (there's [another one in Russian](https://t.me/werf_ru) as well).
74 |
75 | You're also welcome to follow [@werf_io](https://twitter.com/werf_io) to stay informed about all important news, articles, etc.
76 |
77 | ## License
78 |
79 | Kubedog is an Open Source project licensed under the [Apache License](https://www.apache.org/licenses/LICENSE-2.0).
80 |
--------------------------------------------------------------------------------
/Taskfile.dist.yaml:
--------------------------------------------------------------------------------
1 | version: "3"
2 |
3 | silent: true
4 |
5 | tasks:
6 | sign:
7 | desc: 'Sign last version tag + origin/main and push signatures. Important vars: "refs".'
8 | cmds:
9 | - git fetch --tags -f
10 | - git signatures pull {{.CLI_ARGS}}
11 | - |
12 | for ref in {{.refs | default "$(git tag --sort=v:refname | tail -n1) origin/main origin/trdl"}}; do
13 | echo Signing $ref...
14 | git signatures add {{.CLI_ARGS}} $ref
15 | git signatures show {{.CLI_ARGS}} $ref
16 | done
17 | - git signatures push {{.CLI_ARGS}}
18 |
19 |
--------------------------------------------------------------------------------
/contrib/resource_status_rules.yaml:
--------------------------------------------------------------------------------
1 | ../pkg/tracker/generic/contrib_resource_status_rules.yaml
--------------------------------------------------------------------------------
/doc/cli-1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/werf/kubedog/3d8084fab0ec3ffbd0854cde1df60ca69a5c8162/doc/cli-1.gif
--------------------------------------------------------------------------------
/doc/deployment_follow.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/werf/kubedog/3d8084fab0ec3ffbd0854cde1df60ca69a5c8162/doc/deployment_follow.gif
--------------------------------------------------------------------------------
/doc/deployment_rollout.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/werf/kubedog/3d8084fab0ec3ffbd0854cde1df60ca69a5c8162/doc/deployment_rollout.gif
--------------------------------------------------------------------------------
/doc/kubedog-logo.svg:
--------------------------------------------------------------------------------
1 |
4 |
--------------------------------------------------------------------------------
/go-build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | export GO111MODULE=on
4 |
5 | go install github.com/werf/kubedog/cmd/kubedog
6 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/werf/kubedog
2 |
3 | go 1.21
4 |
5 | toolchain go1.21.6
6 |
7 | require (
8 | github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
9 | github.com/chanced/caps v1.0.2
10 | github.com/dominikbraun/graph v0.23.0
11 | github.com/fluxcd/flagger v1.36.1
12 | github.com/google/uuid v1.6.0
13 | github.com/gookit/color v1.5.4
14 | github.com/samber/lo v1.39.0
15 | github.com/spf13/cobra v1.8.0
16 | github.com/werf/logboek v0.6.1
17 | github.com/xeipuuv/gojsonschema v1.2.0
18 | golang.org/x/crypto v0.21.0
19 | k8s.io/api v0.29.3
20 | k8s.io/apimachinery v0.29.3
21 | k8s.io/cli-runtime v0.29.3
22 | k8s.io/client-go v0.29.3
23 | k8s.io/klog v1.0.0
24 | k8s.io/klog/v2 v2.120.1
25 | )
26 |
27 | require (
28 | github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
29 | github.com/avelino/slugify v0.0.0-20180501145920-855f152bd774 // indirect
30 | github.com/creack/pty v1.1.21 // indirect
31 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
32 | github.com/emicklei/go-restful/v3 v3.11.2 // indirect
33 | github.com/evanphx/json-patch v5.8.0+incompatible // indirect
34 | github.com/go-errors/errors v1.5.1 // indirect
35 | github.com/go-logr/logr v1.4.1 // indirect
36 | github.com/go-openapi/jsonpointer v0.21.0 // indirect
37 | github.com/go-openapi/jsonreference v0.21.0 // indirect
38 | github.com/go-openapi/swag v0.23.0 // indirect
39 | github.com/gogo/protobuf v1.3.2 // indirect
40 | github.com/golang/protobuf v1.5.4 // indirect
41 | github.com/google/btree v1.1.2 // indirect
42 | github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
43 | github.com/google/go-cmp v0.6.0 // indirect
44 | github.com/google/gofuzz v1.2.0 // indirect
45 | github.com/google/pprof v0.0.0-20240327155427-868f304927ed // indirect
46 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
47 | github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
48 | github.com/imdario/mergo v0.3.16 // indirect
49 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
50 | github.com/josharian/intern v1.0.0 // indirect
51 | github.com/json-iterator/go v1.1.12 // indirect
52 | github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
53 | github.com/mailru/easyjson v0.7.7 // indirect
54 | github.com/moby/term v0.5.0 // indirect
55 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
56 | github.com/modern-go/reflect2 v1.0.2 // indirect
57 | github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
58 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
59 | github.com/onsi/ginkgo/v2 v2.17.1 // indirect
60 | github.com/onsi/gomega v1.32.0 // indirect
61 | github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
62 | github.com/pkg/errors v0.9.1 // indirect
63 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
64 | github.com/rogpeppe/go-internal v1.12.0 // indirect
65 | github.com/sergi/go-diff v1.3.1 // indirect
66 | github.com/spf13/pflag v1.0.5 // indirect
67 | github.com/stretchr/objx v0.5.2 // indirect
68 | github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
69 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
70 | github.com/xlab/treeprint v1.2.0 // indirect
71 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
72 | go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect
73 | golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect
74 | golang.org/x/net v0.22.0 // indirect
75 | golang.org/x/oauth2 v0.18.0 // indirect
76 | golang.org/x/sync v0.6.0 // indirect
77 | golang.org/x/sys v0.18.0 // indirect
78 | golang.org/x/term v0.18.0 // indirect
79 | golang.org/x/text v0.14.0 // indirect
80 | golang.org/x/time v0.5.0 // indirect
81 | google.golang.org/appengine v1.6.8 // indirect
82 | google.golang.org/protobuf v1.33.0 // indirect
83 | gopkg.in/evanphx/json-patch.v5 v5.8.0 // indirect
84 | gopkg.in/inf.v0 v0.9.1 // indirect
85 | gopkg.in/yaml.v2 v2.4.0 // indirect
86 | gopkg.in/yaml.v3 v3.0.1 // indirect
87 | k8s.io/kube-openapi v0.0.0-20240105020646-a37d4de58910 // indirect
88 | k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect
89 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
90 | sigs.k8s.io/kustomize/api v0.16.0 // indirect
91 | sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect
92 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
93 | sigs.k8s.io/yaml v1.4.0 // indirect
94 | )
95 |
--------------------------------------------------------------------------------
/kubedog.go:
--------------------------------------------------------------------------------
1 | package kubedog
2 |
3 | var Version = "dev"
4 |
--------------------------------------------------------------------------------
/pkg/display/display.go:
--------------------------------------------------------------------------------
1 | package display
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "os"
7 | "sync"
8 | )
9 |
10 | var (
11 | Out io.Writer = os.Stdout
12 | Err io.Writer = os.Stderr
13 |
14 | mutex = &sync.Mutex{}
15 | currentLogHeader = ""
16 | )
17 |
18 | func SetOut(out io.Writer) {
19 | Out = out
20 | }
21 |
22 | func SetErr(err io.Writer) {
23 | Err = err
24 | }
25 |
26 | type LogLine struct {
27 | Timestamp string
28 | Message string
29 | }
30 |
31 | func fWriteF(stream io.Writer, format string, args ...interface{}) (n int, err error) {
32 | mutex.Lock()
33 | defer mutex.Unlock()
34 | return fmt.Fprintf(stream, format, args...)
35 | }
36 |
37 | func OutF(format string, args ...interface{}) (n int, err error) {
38 | return fWriteF(Out, format, args...)
39 | }
40 |
41 | func ErrF(format string, args ...interface{}) (n int, err error) {
42 | return fWriteF(Err, format, args...)
43 | }
44 |
45 | func SetLogHeader(logHeader string) {
46 | mutex.Lock()
47 | defer mutex.Unlock()
48 |
49 | if currentLogHeader != logHeader {
50 | if currentLogHeader != "" {
51 | fmt.Fprintln(Out)
52 | }
53 | fmt.Fprintf(Out, ">> %s\n", logHeader)
54 | currentLogHeader = logHeader
55 | }
56 | }
57 |
58 | func OutputLogLines(header string, logLines []LogLine) {
59 | if inline() {
60 | for _, line := range logLines {
61 | fmt.Fprintf(Out, ">> %s: %s\n", header, line.Message)
62 | }
63 | } else {
64 | SetLogHeader(header)
65 | for _, line := range logLines {
66 | fmt.Fprintln(Out, line.Message)
67 | }
68 | }
69 | }
70 |
71 | func inline() bool {
72 | return os.Getenv("KUBEDOG_LOG_INLINE") == "1"
73 | }
74 |
--------------------------------------------------------------------------------
/pkg/kube/kube_config_getter.go:
--------------------------------------------------------------------------------
1 | package kube
2 |
3 | import (
4 | "encoding/base64"
5 | "fmt"
6 | "path/filepath"
7 | "time"
8 |
9 | "k8s.io/apimachinery/pkg/api/meta"
10 | "k8s.io/cli-runtime/pkg/genericclioptions"
11 | "k8s.io/client-go/discovery"
12 | diskcached "k8s.io/client-go/discovery/cached/disk"
13 | "k8s.io/client-go/rest"
14 | "k8s.io/client-go/restmapper"
15 | "k8s.io/client-go/tools/clientcmd"
16 | )
17 |
18 | type KubeConfigGetterOptions struct {
19 | KubeConfigOptions
20 |
21 | Namespace string
22 | BearerToken string
23 | APIServer string
24 | CAFile string
25 | TLSServerName string
26 | SkipTLSVerify bool
27 | Impersonate string
28 | ImpersonateGroup []string
29 | QPSLimit int
30 | BurstLimit int
31 | }
32 |
33 | func NewKubeConfigGetter(opts KubeConfigGetterOptions) (genericclioptions.RESTClientGetter, error) {
34 | var configGetter genericclioptions.RESTClientGetter
35 |
36 | if opts.ConfigDataBase64 != "" {
37 | if getter, err := NewClientGetterFromConfigData(opts.Context, opts.ConfigDataBase64); err != nil {
38 | return nil, fmt.Errorf("unable to create kube client getter (context=%q, config-data-base64=%q): %w", opts.Context, opts.ConfigPath, err)
39 | } else {
40 | configGetter = getter
41 | }
42 | } else {
43 | configFlags := genericclioptions.NewConfigFlags(true)
44 |
45 | if len(opts.ConfigPathMergeList) > 0 {
46 | if err := setConfigPathMergeListEnvironment(opts.ConfigPathMergeList); err != nil {
47 | return nil, err
48 | }
49 | }
50 |
51 | configFlags.Context = new(string)
52 | *configFlags.Context = opts.Context
53 |
54 | configFlags.KubeConfig = new(string)
55 | *configFlags.KubeConfig = opts.ConfigPath
56 |
57 | configFlags.Insecure = new(bool)
58 | *configFlags.Insecure = opts.SkipTLSVerify
59 |
60 | configFlags.WrapConfigFn = func(config *rest.Config) *rest.Config {
61 | if opts.QPSLimit > 0 {
62 | config.QPS = float32(opts.QPSLimit)
63 | }
64 |
65 | if opts.BurstLimit > 0 {
66 | config.Burst = opts.BurstLimit
67 | }
68 |
69 | return config
70 | }
71 |
72 | if opts.Namespace != "" {
73 | configFlags.Namespace = new(string)
74 | *configFlags.Namespace = opts.Namespace
75 | }
76 |
77 | if opts.BearerToken != "" {
78 | configFlags.BearerToken = new(string)
79 | *configFlags.BearerToken = opts.BearerToken
80 | }
81 |
82 | if opts.APIServer != "" {
83 | configFlags.APIServer = new(string)
84 | *configFlags.APIServer = opts.APIServer
85 | }
86 |
87 | if opts.CAFile != "" {
88 | configFlags.CAFile = new(string)
89 | *configFlags.CAFile = opts.CAFile
90 | }
91 |
92 | if opts.TLSServerName != "" {
93 | configFlags.TLSServerName = new(string)
94 | *configFlags.TLSServerName = opts.TLSServerName
95 | }
96 |
97 | if opts.Impersonate != "" {
98 | configFlags.Impersonate = new(string)
99 | *configFlags.Impersonate = opts.Impersonate
100 | }
101 |
102 | if opts.ImpersonateGroup != nil {
103 | configFlags.ImpersonateGroup = new([]string)
104 | *configFlags.ImpersonateGroup = append(*configFlags.ImpersonateGroup, opts.ImpersonateGroup...)
105 | }
106 |
107 | configGetter = configFlags
108 | }
109 |
110 | return configGetter, nil
111 | }
112 |
113 | type ClientGetterFromConfigData struct {
114 | Context string
115 | ConfigDataBase64 string
116 |
117 | ClientConfig clientcmd.ClientConfig
118 | }
119 |
120 | func NewClientGetterFromConfigData(context, configDataBase64 string) (*ClientGetterFromConfigData, error) {
121 | getter := &ClientGetterFromConfigData{Context: context, ConfigDataBase64: configDataBase64}
122 |
123 | if clientConfig, err := getter.getRawKubeConfigLoader(); err != nil {
124 | return nil, err
125 | } else {
126 | getter.ClientConfig = clientConfig
127 | }
128 |
129 | return getter, nil
130 | }
131 |
132 | func (getter *ClientGetterFromConfigData) ToRESTConfig() (*rest.Config, error) {
133 | return getter.ClientConfig.ClientConfig()
134 | }
135 |
136 | func (getter *ClientGetterFromConfigData) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
137 | config, err := getter.ClientConfig.ClientConfig()
138 | if err != nil {
139 | return nil, err
140 | }
141 |
142 | config.Burst = 100
143 |
144 | cacheDir := defaultCacheDir
145 | httpCacheDir := filepath.Join(cacheDir, "http")
146 | discoveryCacheDir := computeDiscoverCacheDir(filepath.Join(cacheDir, "discovery"), config.Host)
147 |
148 | return diskcached.NewCachedDiscoveryClientForConfig(config, discoveryCacheDir, httpCacheDir, time.Duration(10*time.Minute))
149 | }
150 |
151 | func (getter *ClientGetterFromConfigData) ToRESTMapper() (meta.RESTMapper, error) {
152 | discoveryClient, err := getter.ToDiscoveryClient()
153 | if err != nil {
154 | return nil, err
155 | }
156 |
157 | mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
158 | expander := restmapper.NewShortcutExpander(mapper, discoveryClient, func(s string) {
159 | fmt.Printf(s)
160 | })
161 | return expander, nil
162 | }
163 |
164 | func (getter *ClientGetterFromConfigData) ToRawKubeConfigLoader() clientcmd.ClientConfig {
165 | return getter.ClientConfig
166 | }
167 |
168 | func (getter *ClientGetterFromConfigData) getRawKubeConfigLoader() (clientcmd.ClientConfig, error) {
169 | if data, err := base64.StdEncoding.DecodeString(getter.ConfigDataBase64); err != nil {
170 | return nil, fmt.Errorf("unable to decode base64 config data: %w", err)
171 | } else {
172 | return GetClientConfig(getter.Context, "", data, nil)
173 | }
174 | }
175 |
--------------------------------------------------------------------------------
/pkg/kube/vendor.go:
--------------------------------------------------------------------------------
1 | package kube
2 |
3 | import (
4 | "path/filepath"
5 | "regexp"
6 | "strings"
7 |
8 | "k8s.io/client-go/util/homedir"
9 | )
10 |
11 | // Code is taken from the k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go
12 | // because it is not publicly exposed by the package, but we need it to create
13 | // compatible CachedDiscoveryClient for base64 config the same as for the default config loader.
14 |
15 | var defaultCacheDir = filepath.Join(homedir.HomeDir(), ".kube", "cache")
16 |
17 | // overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. Windows is really restrictive, so this is really restrictive
18 | var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/\.)]`)
19 |
20 | // computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name.
21 | func computeDiscoverCacheDir(parentDir, host string) string {
22 | // strip the optional scheme from host if its there:
23 | schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1)
24 | // now do a simple collapse of non-AZ09 characters. Collisions are possible but unlikely. Even if we do collide the problem is short lived
25 | safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_")
26 | return filepath.Join(parentDir, safeHost)
27 | }
28 |
--------------------------------------------------------------------------------
/pkg/tracker/canary/feed.go:
--------------------------------------------------------------------------------
1 | package canary
2 |
3 | import (
4 | "context"
5 | "sync"
6 |
7 | "k8s.io/client-go/kubernetes"
8 | watchtools "k8s.io/client-go/tools/watch"
9 |
10 | "github.com/werf/kubedog/pkg/tracker"
11 | )
12 |
13 | type Feed interface {
14 | OnAdded(func() error)
15 | OnSucceeded(func() error)
16 | OnFailed(func(reason string) error)
17 | OnEventMsg(func(msg string) error)
18 | OnStatus(func(CanaryStatus) error)
19 |
20 | GetStatus() CanaryStatus
21 | Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error
22 | }
23 |
24 | func NewFeed() Feed {
25 | return &feed{}
26 | }
27 |
28 | type feed struct {
29 | OnAddedFunc func() error
30 | OnSucceededFunc func() error
31 | OnFailedFunc func(string) error
32 | OnEventMsgFunc func(string) error
33 | OnStatusFunc func(CanaryStatus) error
34 |
35 | statusMux sync.Mutex
36 | status CanaryStatus
37 | }
38 |
39 | func (f *feed) OnAdded(function func() error) {
40 | f.OnAddedFunc = function
41 | }
42 |
43 | func (f *feed) OnSucceeded(function func() error) {
44 | f.OnSucceededFunc = function
45 | }
46 |
47 | func (f *feed) OnFailed(function func(string) error) {
48 | f.OnFailedFunc = function
49 | }
50 |
51 | func (f *feed) OnEventMsg(function func(string) error) {
52 | f.OnEventMsgFunc = function
53 | }
54 |
55 | func (f *feed) OnStatus(function func(CanaryStatus) error) {
56 | f.OnStatusFunc = function
57 | }
58 |
59 | func (f *feed) Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
60 | errorChan := make(chan error)
61 | doneChan := make(chan struct{})
62 |
63 | parentContext := opts.ParentContext
64 | if parentContext == nil {
65 | parentContext = context.Background()
66 | }
67 | ctx, cancel := watchtools.ContextWithOptionalTimeout(parentContext, opts.Timeout)
68 | defer cancel()
69 |
70 | canary := NewTracker(name, namespace, kube, nil, opts)
71 |
72 | go func() {
73 | err := canary.Track(ctx)
74 | if err != nil {
75 | errorChan <- err
76 | } else {
77 | doneChan <- struct{}{}
78 | }
79 | }()
80 |
81 | for {
82 | select {
83 | case status := <-canary.Added:
84 | f.setStatus(status)
85 |
86 | if f.OnAddedFunc != nil {
87 | err := f.OnAddedFunc()
88 | if err == tracker.ErrStopTrack {
89 | return nil
90 | }
91 | if err != nil {
92 | return err
93 | }
94 | }
95 | case status := <-canary.Succeeded:
96 | f.setStatus(status)
97 |
98 | if f.OnSucceededFunc != nil {
99 | err := f.OnSucceededFunc()
100 | if err == tracker.ErrStopTrack {
101 | return nil
102 | }
103 | if err != nil {
104 | return err
105 | }
106 | }
107 |
108 | case status := <-canary.Failed:
109 | f.setStatus(status)
110 |
111 | if f.OnFailedFunc != nil {
112 | err := f.OnFailedFunc(status.FailedReason)
113 | if err == tracker.ErrStopTrack {
114 | return nil
115 | }
116 | if err != nil {
117 | return err
118 | }
119 | }
120 |
121 | case msg := <-canary.EventMsg:
122 | if f.OnEventMsgFunc != nil {
123 | err := f.OnEventMsgFunc(msg)
124 | if err == tracker.ErrStopTrack {
125 | return nil
126 | }
127 | if err != nil {
128 | return err
129 | }
130 | }
131 |
132 | case status := <-canary.Status:
133 | f.setStatus(status)
134 |
135 | if f.OnStatusFunc != nil {
136 | err := f.OnStatusFunc(status)
137 | if err == tracker.ErrStopTrack {
138 | return nil
139 | }
140 | if err != nil {
141 | return err
142 | }
143 | }
144 |
145 | case err := <-errorChan:
146 | return err
147 | case <-doneChan:
148 | return nil
149 | }
150 | }
151 | }
152 |
153 | func (f *feed) setStatus(status CanaryStatus) {
154 | f.statusMux.Lock()
155 | defer f.statusMux.Unlock()
156 |
157 | if status.StatusGeneration > f.status.StatusGeneration {
158 | f.status = status
159 | }
160 | }
161 |
162 | func (f *feed) GetStatus() CanaryStatus {
163 | f.statusMux.Lock()
164 | defer f.statusMux.Unlock()
165 | return f.status
166 | }
167 |
--------------------------------------------------------------------------------
/pkg/tracker/canary/status.go:
--------------------------------------------------------------------------------
1 | package canary
2 |
3 | import (
4 | "github.com/fluxcd/flagger/pkg/apis/flagger/v1beta1"
5 |
6 | "github.com/werf/kubedog/pkg/tracker/indicators"
7 | "github.com/werf/kubedog/pkg/utils"
8 | )
9 |
10 | type CanaryStatus struct {
11 | v1beta1.CanaryStatus
12 |
13 | StatusGeneration uint64
14 |
15 | StatusIndicator *indicators.StringEqualConditionIndicator
16 |
17 | Duration string
18 | Age string
19 |
20 | IsSucceeded bool
21 | IsFailed bool
22 | FailedReason string
23 | }
24 |
25 | func NewCanaryStatus(object *v1beta1.Canary, statusGeneration uint64, isTrackerFailed bool, trackerFailedReason string, canariesStatuses map[string]v1beta1.CanaryStatus) CanaryStatus {
26 | res := CanaryStatus{
27 | CanaryStatus: object.Status,
28 | StatusGeneration: statusGeneration,
29 | StatusIndicator: &indicators.StringEqualConditionIndicator{},
30 | Age: utils.TranslateTimestampSince(object.CreationTimestamp),
31 | }
32 |
33 | switch object.Status.Phase {
34 | case v1beta1.CanaryPhaseInitialized, v1beta1.CanaryPhaseSucceeded:
35 | res.IsSucceeded = true
36 | case v1beta1.CanaryPhaseFailed:
37 | if !res.IsFailed {
38 | errorMessage := "Failed - "
39 | for _, condition := range object.Status.Conditions {
40 | errorMessage += condition.Message
41 | }
42 | res.IsFailed = true
43 | res.FailedReason = errorMessage
44 | }
45 | default:
46 | res.StatusIndicator.Value = string(object.Status.Phase)
47 | }
48 |
49 | if !res.IsSucceeded && !res.IsFailed {
50 | res.IsFailed = isTrackerFailed
51 | res.FailedReason = trackerFailedReason
52 | }
53 |
54 | return res
55 | }
56 |
--------------------------------------------------------------------------------
/pkg/tracker/controller/feed.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import "github.com/werf/kubedog/pkg/tracker/replicaset"
4 |
5 | type ControllerFeed interface {
6 | OnAdded(func(ready bool) error)
7 | OnReady(func() error)
8 | OnFailed(func(reason string) error)
9 | OnEventMsg(func(msg string) error) // Pulling: pull alpine:3.6....
10 | OnAddedReplicaSet(func(replicaset.ReplicaSet) error)
11 | OnAddedPod(func(replicaset.ReplicaSetPod) error)
12 | OnPodLogChunk(func(*replicaset.ReplicaSetPodLogChunk) error)
13 | OnPodError(func(replicaset.ReplicaSetPodError) error)
14 | }
15 |
16 | type CommonControllerFeed struct {
17 | OnAddedFunc func(bool) error
18 | OnReadyFunc func() error
19 | OnFailedFunc func(reason string) error
20 | OnEventMsgFunc func(msg string) error
21 | OnAddedReplicaSetFunc func(replicaset.ReplicaSet) error
22 | OnAddedPodFunc func(replicaset.ReplicaSetPod) error
23 | OnPodLogChunkFunc func(*replicaset.ReplicaSetPodLogChunk) error
24 | OnPodErrorFunc func(replicaset.ReplicaSetPodError) error
25 | }
26 |
27 | func (f *CommonControllerFeed) OnAdded(function func(bool) error) {
28 | f.OnAddedFunc = function
29 | }
30 |
31 | func (f *CommonControllerFeed) OnReady(function func() error) {
32 | f.OnReadyFunc = function
33 | }
34 |
35 | func (f *CommonControllerFeed) OnFailed(function func(string) error) {
36 | f.OnFailedFunc = function
37 | }
38 |
39 | func (f *CommonControllerFeed) OnEventMsg(function func(string) error) {
40 | f.OnEventMsgFunc = function
41 | }
42 |
43 | func (f *CommonControllerFeed) OnAddedReplicaSet(function func(replicaset.ReplicaSet) error) {
44 | f.OnAddedReplicaSetFunc = function
45 | }
46 |
47 | func (f *CommonControllerFeed) OnAddedPod(function func(replicaset.ReplicaSetPod) error) {
48 | f.OnAddedPodFunc = function
49 | }
50 |
51 | func (f *CommonControllerFeed) OnPodLogChunk(function func(*replicaset.ReplicaSetPodLogChunk) error) {
52 | f.OnPodLogChunkFunc = function
53 | }
54 |
55 | func (f *CommonControllerFeed) OnPodError(function func(replicaset.ReplicaSetPodError) error) {
56 | f.OnPodErrorFunc = function
57 | }
58 |
--------------------------------------------------------------------------------
/pkg/tracker/daemonset/feed.go:
--------------------------------------------------------------------------------
1 | package daemonset
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sync"
7 |
8 | "k8s.io/client-go/kubernetes"
9 | watchtools "k8s.io/client-go/tools/watch"
10 |
11 | "github.com/werf/kubedog/pkg/tracker"
12 | "github.com/werf/kubedog/pkg/tracker/controller"
13 | "github.com/werf/kubedog/pkg/tracker/debug"
14 | )
15 |
16 | type Feed interface {
17 | controller.ControllerFeed
18 |
19 | OnStatus(func(DaemonSetStatus) error)
20 |
21 | GetStatus() DaemonSetStatus
22 | Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error
23 | }
24 |
25 | func NewFeed() Feed {
26 | return &feed{}
27 | }
28 |
29 | type feed struct {
30 | controller.CommonControllerFeed
31 |
32 | OnStatusFunc func(DaemonSetStatus) error
33 |
34 | statusMux sync.Mutex
35 | status DaemonSetStatus
36 | }
37 |
38 | func (f *feed) OnStatus(function func(DaemonSetStatus) error) {
39 | f.OnStatusFunc = function
40 | }
41 |
42 | func (f *feed) Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
43 | errorChan := make(chan error)
44 | doneChan := make(chan bool)
45 |
46 | parentContext := opts.ParentContext
47 | if parentContext == nil {
48 | parentContext = context.Background()
49 | }
50 | ctx, cancel := watchtools.ContextWithOptionalTimeout(parentContext, opts.Timeout)
51 | defer cancel()
52 |
53 | daemonSetTracker := NewTracker(name, namespace, kube, opts)
54 |
55 | go func() {
56 | if debug.Debug() {
57 | fmt.Printf(" goroutine: start DaemonSet/%s tracker\n", name)
58 | }
59 | err := daemonSetTracker.Track(ctx)
60 | if err != nil {
61 | errorChan <- err
62 | } else {
63 | doneChan <- true
64 | }
65 | }()
66 |
67 | if debug.Debug() {
68 | fmt.Printf(" ds/%s: for-select DaemonSetTracker channels\n", name)
69 | }
70 |
71 | for {
72 | select {
73 | case status := <-daemonSetTracker.Added:
74 | f.setStatus(status)
75 |
76 | if f.OnAddedFunc != nil {
77 | err := f.OnAddedFunc(status.IsReady)
78 | if err == tracker.ErrStopTrack {
79 | return nil
80 | }
81 | if err != nil {
82 | return err
83 | }
84 |
85 | }
86 |
87 | case status := <-daemonSetTracker.Ready:
88 | f.setStatus(status)
89 |
90 | if f.OnReadyFunc != nil {
91 | err := f.OnReadyFunc()
92 | if err == tracker.ErrStopTrack {
93 | return nil
94 | }
95 | if err != nil {
96 | return err
97 | }
98 | }
99 |
100 | case status := <-daemonSetTracker.Failed:
101 | f.setStatus(status)
102 |
103 | if f.OnFailedFunc != nil {
104 | err := f.OnFailedFunc(status.FailedReason)
105 | if err == tracker.ErrStopTrack {
106 | return nil
107 | }
108 | if err != nil {
109 | return err
110 | }
111 | }
112 |
113 | case msg := <-daemonSetTracker.EventMsg:
114 | if f.OnEventMsgFunc != nil {
115 | err := f.OnEventMsgFunc(msg)
116 | if err == tracker.ErrStopTrack {
117 | return nil
118 | }
119 | if err != nil {
120 | return err
121 | }
122 | }
123 |
124 | case report := <-daemonSetTracker.AddedPod:
125 | f.setStatus(report.DaemonSetStatus)
126 |
127 | if f.OnAddedPodFunc != nil {
128 | err := f.OnAddedPodFunc(report.Pod)
129 | if err == tracker.ErrStopTrack {
130 | return nil
131 | }
132 | if err != nil {
133 | return err
134 | }
135 | }
136 |
137 | case chunk := <-daemonSetTracker.PodLogChunk:
138 | if debug.Debug() {
139 | fmt.Printf(" ds/%s po/%s log chunk\n", daemonSetTracker.ResourceName, chunk.PodName)
140 | for _, line := range chunk.LogLines {
141 | fmt.Printf("po/%s [%s] %s\n", chunk.PodName, line.Timestamp, line.Message)
142 | }
143 | }
144 |
145 | if f.OnPodLogChunkFunc != nil {
146 | err := f.OnPodLogChunkFunc(chunk)
147 | if err == tracker.ErrStopTrack {
148 | return nil
149 | }
150 | if err != nil {
151 | return err
152 | }
153 | }
154 |
155 | case report := <-daemonSetTracker.PodError:
156 | f.setStatus(report.DaemonSetStatus)
157 |
158 | if f.OnPodErrorFunc != nil {
159 | err := f.OnPodErrorFunc(report.PodError)
160 | if err == tracker.ErrStopTrack {
161 | return nil
162 | }
163 | if err != nil {
164 | return err
165 | }
166 | }
167 |
168 | case status := <-daemonSetTracker.Status:
169 | f.setStatus(status)
170 |
171 | if f.OnStatusFunc != nil {
172 | err := f.OnStatusFunc(status)
173 | if err == tracker.ErrStopTrack {
174 | return nil
175 | }
176 | if err != nil {
177 | return err
178 | }
179 | }
180 |
181 | case err := <-errorChan:
182 | return err
183 | case <-doneChan:
184 | return nil
185 | }
186 | }
187 | }
188 |
189 | func (f *feed) setStatus(status DaemonSetStatus) {
190 | f.statusMux.Lock()
191 | defer f.statusMux.Unlock()
192 | f.status = status
193 | }
194 |
195 | func (f *feed) GetStatus() DaemonSetStatus {
196 | f.statusMux.Lock()
197 | defer f.statusMux.Unlock()
198 | return f.status
199 | }
200 |
--------------------------------------------------------------------------------
/pkg/tracker/daemonset/status.go:
--------------------------------------------------------------------------------
1 | package daemonset
2 |
3 | import (
4 | "fmt"
5 |
6 | appsv1 "k8s.io/api/apps/v1"
7 |
8 | "github.com/werf/kubedog/pkg/tracker/indicators"
9 | "github.com/werf/kubedog/pkg/tracker/pod"
10 | )
11 |
12 | type DaemonSetStatus struct {
13 | appsv1.DaemonSetStatus
14 |
15 | StatusGeneration uint64
16 |
17 | ReplicasIndicator *indicators.Int32EqualConditionIndicator
18 | UpToDateIndicator *indicators.Int32EqualConditionIndicator
19 | AvailableIndicator *indicators.Int32EqualConditionIndicator
20 |
21 | WaitingForMessages []string
22 |
23 | IsReady bool
24 | IsFailed bool
25 | FailedReason string
26 |
27 | Pods map[string]pod.PodStatus
28 | NewPodsNames []string
29 | }
30 |
31 | func NewDaemonSetStatus(object *appsv1.DaemonSet, statusGeneration uint64, isTrackerFailed bool, trackerFailedReason string, podsStatuses map[string]pod.PodStatus, newPodsNames []string) DaemonSetStatus {
32 | res := DaemonSetStatus{
33 | StatusGeneration: statusGeneration,
34 | DaemonSetStatus: object.Status,
35 | Pods: make(map[string]pod.PodStatus),
36 | NewPodsNames: newPodsNames,
37 | }
38 |
39 | processingPodsStatuses:
40 | for k, v := range podsStatuses {
41 | res.Pods[k] = v
42 |
43 | for _, newPodName := range newPodsNames {
44 | if newPodName == k {
45 | if v.StatusIndicator != nil {
46 | // New Pod should be Running
47 | v.StatusIndicator.TargetValue = "Running"
48 | }
49 | continue processingPodsStatuses
50 | }
51 | }
52 |
53 | if v.StatusIndicator != nil {
54 | // Old Pod should gone
55 | v.StatusIndicator.TargetValue = ""
56 | }
57 | }
58 |
59 | res.IsReady = false
60 |
61 | // FIXME: tracker should track other update strategy types as well
62 | if object.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {
63 | res.IsReady = true
64 | return res
65 | }
66 |
67 | if object.Status.ObservedGeneration >= object.Generation {
68 | res.ReplicasIndicator = &indicators.Int32EqualConditionIndicator{
69 | Value: object.Status.CurrentNumberScheduled + object.Status.NumberMisscheduled,
70 | TargetValue: object.Status.DesiredNumberScheduled,
71 | }
72 | res.UpToDateIndicator = &indicators.Int32EqualConditionIndicator{
73 | Value: object.Status.UpdatedNumberScheduled,
74 | TargetValue: object.Status.DesiredNumberScheduled,
75 | }
76 | res.AvailableIndicator = &indicators.Int32EqualConditionIndicator{
77 | Value: object.Status.NumberAvailable,
78 | TargetValue: object.Status.DesiredNumberScheduled,
79 | }
80 |
81 | res.IsReady = true
82 |
83 | if object.Status.UpdatedNumberScheduled != object.Status.DesiredNumberScheduled {
84 | res.IsReady = false
85 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("up-to-date %d->%d", object.Status.UpdatedNumberScheduled, object.Status.DesiredNumberScheduled))
86 | }
87 | if object.Status.NumberAvailable != object.Status.DesiredNumberScheduled {
88 | res.IsReady = false
89 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("available %d->%d", object.Status.NumberAvailable, object.Status.DesiredNumberScheduled))
90 | }
91 | } else {
92 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("observed generation %d should be >= %d", object.Status.ObservedGeneration, object.Generation))
93 | }
94 |
95 | if !res.IsReady && !res.IsFailed {
96 | res.IsFailed = isTrackerFailed
97 | res.FailedReason = trackerFailedReason
98 | }
99 |
100 | return res
101 | }
102 |
103 | // DaemonSetRolloutStatus returns a message describing daemon set status, and a bool value indicating if the status is considered done.
104 | func DaemonSetRolloutStatus(daemon *appsv1.DaemonSet) (string, bool, error) {
105 | if daemon.Spec.UpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {
106 | return "", true, fmt.Errorf("rollout status is only available for %s strategy type", appsv1.RollingUpdateDaemonSetStrategyType)
107 | }
108 | if daemon.Generation <= daemon.Status.ObservedGeneration {
109 | if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
110 | return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d out of %d new pods have been updated...\n", daemon.Name, daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil
111 | }
112 | if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
113 | return fmt.Sprintf("Waiting for daemon set %q rollout to finish: %d of %d updated pods are available...\n", daemon.Name, daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled), false, nil
114 | }
115 | return fmt.Sprintf("daemon set %q successfully rolled out\n", daemon.Name), true, nil
116 | }
117 | return "Waiting for daemon set spec update to be observed...\n", false, nil
118 | }
119 |
--------------------------------------------------------------------------------
/pkg/tracker/debug/debug.go:
--------------------------------------------------------------------------------
1 | package debug
2 |
3 | import "os"
4 |
5 | func YesNo(v bool) string {
6 | if v {
7 | return "YES"
8 | }
9 | return " no"
10 | }
11 |
12 | func Debug() bool {
13 | return os.Getenv("KUBEDOG_TRACKER_DEBUG") == "1"
14 | }
15 |
--------------------------------------------------------------------------------
/pkg/tracker/deployment/feed.go:
--------------------------------------------------------------------------------
1 | package deployment
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sync"
7 |
8 | "k8s.io/client-go/kubernetes"
9 | watchtools "k8s.io/client-go/tools/watch"
10 |
11 | "github.com/werf/kubedog/pkg/tracker"
12 | "github.com/werf/kubedog/pkg/tracker/controller"
13 | "github.com/werf/kubedog/pkg/tracker/debug"
14 | )
15 |
16 | type Feed interface {
17 | controller.ControllerFeed
18 |
19 | OnStatus(func(DeploymentStatus) error)
20 |
21 | GetStatus() DeploymentStatus
22 | Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error
23 | }
24 |
25 | func NewFeed() Feed {
26 | return &feed{}
27 | }
28 |
29 | type feed struct {
30 | controller.CommonControllerFeed
31 |
32 | OnStatusFunc func(DeploymentStatus) error
33 |
34 | statusMux sync.Mutex
35 | status DeploymentStatus
36 | }
37 |
38 | func (f *feed) OnStatus(function func(DeploymentStatus) error) {
39 | f.OnStatusFunc = function
40 | }
41 |
42 | func (f *feed) Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
43 | errorChan := make(chan error)
44 | doneChan := make(chan bool)
45 |
46 | parentContext := opts.ParentContext
47 | if parentContext == nil {
48 | parentContext = context.Background()
49 | }
50 | ctx, cancel := watchtools.ContextWithOptionalTimeout(parentContext, opts.Timeout)
51 | defer cancel()
52 |
53 | deploymentTracker := NewTracker(name, namespace, kube, opts)
54 |
55 | go func() {
56 | if debug.Debug() {
57 | fmt.Printf(" goroutine: start deploy/%s tracker\n", name)
58 | }
59 | err := deploymentTracker.Track(ctx)
60 | if err != nil {
61 | errorChan <- err
62 | } else {
63 | doneChan <- true
64 | }
65 | }()
66 |
67 | if debug.Debug() {
68 | fmt.Printf(" deploy/%s: for-select DeploymentTracker channels\n", name)
69 | }
70 |
71 | for {
72 | select {
73 | case status := <-deploymentTracker.Added:
74 | f.setStatus(status)
75 |
76 | if f.OnAddedFunc != nil {
77 | err := f.OnAddedFunc(status.IsReady)
78 | if err == tracker.ErrStopTrack {
79 | return nil
80 | }
81 | if err != nil {
82 | return err
83 | }
84 | }
85 |
86 | case status := <-deploymentTracker.Ready:
87 | f.setStatus(status)
88 |
89 | if f.OnReadyFunc != nil {
90 | err := f.OnReadyFunc()
91 | if err == tracker.ErrStopTrack {
92 | return nil
93 | }
94 | if err != nil {
95 | return err
96 | }
97 | }
98 |
99 | case status := <-deploymentTracker.Failed:
100 | f.setStatus(status)
101 |
102 | if f.OnFailedFunc != nil {
103 | err := f.OnFailedFunc(status.FailedReason)
104 | if err == tracker.ErrStopTrack {
105 | return nil
106 | }
107 | if err != nil {
108 | return err
109 | }
110 | }
111 |
112 | case msg := <-deploymentTracker.EventMsg:
113 | if f.OnEventMsgFunc != nil {
114 | err := f.OnEventMsgFunc(msg)
115 | if err == tracker.ErrStopTrack {
116 | return nil
117 | }
118 | if err != nil {
119 | return err
120 | }
121 | }
122 |
123 | case report := <-deploymentTracker.AddedReplicaSet:
124 | f.setStatus(report.DeploymentStatus)
125 |
126 | if f.OnAddedReplicaSetFunc != nil {
127 | err := f.OnAddedReplicaSetFunc(report.ReplicaSet)
128 | if err == tracker.ErrStopTrack {
129 | return nil
130 | }
131 | if err != nil {
132 | return err
133 | }
134 | }
135 |
136 | case report := <-deploymentTracker.AddedPod:
137 | f.setStatus(report.DeploymentStatus)
138 |
139 | if f.OnAddedPodFunc != nil {
140 | err := f.OnAddedPodFunc(report.ReplicaSetPod)
141 | if err == tracker.ErrStopTrack {
142 | return nil
143 | }
144 | if err != nil {
145 | return err
146 | }
147 | }
148 |
149 | case chunk := <-deploymentTracker.PodLogChunk:
150 | if debug.Debug() {
151 | fmt.Printf(" deploy/%s pod `%s` log chunk\n", deploymentTracker.ResourceName, chunk.PodName)
152 | for _, line := range chunk.LogLines {
153 | fmt.Printf("po/%s [%s] %s\n", chunk.PodName, line.Timestamp, line.Message)
154 | }
155 | }
156 |
157 | if f.OnPodLogChunkFunc != nil {
158 | err := f.OnPodLogChunkFunc(chunk)
159 | if err == tracker.ErrStopTrack {
160 | return nil
161 | }
162 | if err != nil {
163 | return err
164 | }
165 | }
166 |
167 | case report := <-deploymentTracker.PodError:
168 | f.setStatus(report.DeploymentStatus)
169 |
170 | if f.OnPodErrorFunc != nil {
171 | err := f.OnPodErrorFunc(report.ReplicaSetPodError)
172 | if err == tracker.ErrStopTrack {
173 | return nil
174 | }
175 | if err != nil {
176 | return err
177 | }
178 | }
179 |
180 | case status := <-deploymentTracker.Status:
181 | f.setStatus(status)
182 |
183 | if f.OnStatusFunc != nil {
184 | err := f.OnStatusFunc(status)
185 | if err == tracker.ErrStopTrack {
186 | return nil
187 | }
188 | if err != nil {
189 | return err
190 | }
191 | }
192 |
193 | case err := <-errorChan:
194 | return err
195 | case <-doneChan:
196 | return nil
197 | }
198 | }
199 | }
200 |
201 | func (f *feed) setStatus(status DeploymentStatus) {
202 | f.statusMux.Lock()
203 | defer f.statusMux.Unlock()
204 | f.status = status
205 | }
206 |
207 | func (f *feed) GetStatus() DeploymentStatus {
208 | f.statusMux.Lock()
209 | defer f.statusMux.Unlock()
210 | return f.status
211 | }
212 |
--------------------------------------------------------------------------------
/pkg/tracker/deployment/status.go:
--------------------------------------------------------------------------------
1 | package deployment
2 |
3 | import (
4 | "fmt"
5 |
6 | appsv1 "k8s.io/api/apps/v1"
7 |
8 | "github.com/werf/kubedog/pkg/tracker/indicators"
9 | "github.com/werf/kubedog/pkg/tracker/pod"
10 | "github.com/werf/kubedog/pkg/utils"
11 | )
12 |
13 | type DeploymentStatus struct {
14 | appsv1.DeploymentStatus
15 |
16 | StatusGeneration uint64
17 |
18 | ReplicasIndicator *indicators.Int32EqualConditionIndicator
19 | UpToDateIndicator *indicators.Int32EqualConditionIndicator
20 | AvailableIndicator *indicators.Int32EqualConditionIndicator
21 |
22 | WaitingForMessages []string
23 |
24 | IsReady bool
25 | IsFailed bool
26 | FailedReason string
27 |
28 | Pods map[string]pod.PodStatus
29 | // New Pod belongs to the new ReplicaSet of the Deployment,
30 | // i.e. actual up-to-date Pod of the Deployment
31 | NewPodsNames []string
32 | }
33 |
34 | func NewDeploymentStatus(object *appsv1.Deployment, statusGeneration uint64, isTrackerFailed bool, trackerFailedReason string, podsStatuses map[string]pod.PodStatus, newPodsNames []string) DeploymentStatus {
35 | res := DeploymentStatus{
36 | StatusGeneration: statusGeneration,
37 | DeploymentStatus: object.Status,
38 | Pods: make(map[string]pod.PodStatus),
39 | NewPodsNames: newPodsNames,
40 | }
41 |
42 | processingPodsStatuses:
43 | for k, v := range podsStatuses {
44 | res.Pods[k] = v
45 |
46 | for _, newPodName := range newPodsNames {
47 | if newPodName == k {
48 | if v.StatusIndicator != nil {
49 | // New Pod should be Running
50 | v.StatusIndicator.TargetValue = "Running"
51 | }
52 | continue processingPodsStatuses
53 | }
54 | }
55 |
56 | if v.StatusIndicator != nil {
57 | // Old Pod should gone
58 | v.StatusIndicator.TargetValue = ""
59 | }
60 | }
61 |
62 | res.IsReady = false
63 |
64 | if object.Status.ObservedGeneration >= object.Generation {
65 | if object.Spec.Replicas == nil {
66 | return res
67 | }
68 |
69 | res.ReplicasIndicator = &indicators.Int32EqualConditionIndicator{
70 | Value: object.Status.Replicas,
71 | TargetValue: *object.Spec.Replicas,
72 | }
73 | res.UpToDateIndicator = &indicators.Int32EqualConditionIndicator{
74 | Value: object.Status.UpdatedReplicas,
75 | TargetValue: *object.Spec.Replicas,
76 | }
77 | res.AvailableIndicator = &indicators.Int32EqualConditionIndicator{
78 | Value: object.Status.AvailableReplicas,
79 | TargetValue: *object.Spec.Replicas,
80 | }
81 |
82 | res.IsReady = true
83 | if object.Status.UpdatedReplicas != *object.Spec.Replicas {
84 | res.IsReady = false
85 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("up-to-date %d->%d", object.Status.UpdatedReplicas, *object.Spec.Replicas))
86 | }
87 | if object.Status.Replicas != *object.Spec.Replicas {
88 | res.IsReady = false
89 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("replicas %d->%d", object.Status.Replicas, *object.Spec.Replicas))
90 | }
91 | if object.Status.AvailableReplicas != *object.Spec.Replicas {
92 | res.IsReady = false
93 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("available %d->%d", object.Status.AvailableReplicas, *object.Spec.Replicas))
94 | }
95 | } else {
96 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("observed generation %d should be >= %d", object.Status.ObservedGeneration, object.Generation))
97 | }
98 |
99 | if !res.IsReady && !res.IsFailed {
100 | res.IsFailed = isTrackerFailed
101 | res.FailedReason = trackerFailedReason
102 | }
103 |
104 | return res
105 | }
106 |
107 | // DeploymentRolloutStatus returns a message describing deployment status, and a bool value indicating if the status is considered done.
108 | func DeploymentRolloutStatus(deployment *appsv1.Deployment, revision int64) (string, bool, error) {
109 | if revision > 0 {
110 | deploymentRev, err := utils.Revision(deployment)
111 | if err != nil {
112 | return "", false, fmt.Errorf("cannot get the revision of deployment %q: %w", deployment.Name, err)
113 | }
114 | if revision != deploymentRev {
115 | return "", false, fmt.Errorf("desired revision (%d) is different from the running revision (%d)", revision, deploymentRev)
116 | }
117 | }
118 | if deployment.Generation <= deployment.Status.ObservedGeneration {
119 | cond := utils.GetDeploymentCondition(deployment.Status, appsv1.DeploymentProgressing)
120 | if cond != nil && cond.Reason == utils.TimedOutReason {
121 | return "", false, fmt.Errorf("deployment %q exceeded its progress deadline", deployment.Name)
122 | }
123 | if deployment.Spec.Replicas != nil && deployment.Status.UpdatedReplicas < *deployment.Spec.Replicas {
124 | return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Name, deployment.Status.UpdatedReplicas, *deployment.Spec.Replicas), false, nil
125 | }
126 | if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
127 | return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d old replicas are pending termination...\n", deployment.Name, deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil
128 | }
129 | if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
130 | return fmt.Sprintf("Waiting for deployment %q rollout to finish: %d of %d updated replicas are available...\n", deployment.Name, deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false, nil
131 | }
132 | return fmt.Sprintf("deployment %q successfully rolled out\n", deployment.Name), true, nil
133 | }
134 | return "Waiting for deployment spec update to be observed...\n", false, nil
135 | }
136 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/common.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 |
8 | apierrors "k8s.io/apimachinery/pkg/api/errors"
9 | "k8s.io/client-go/tools/cache"
10 |
11 | "github.com/werf/logboek"
12 | )
13 |
14 | func init() {
15 | initResourceStatusJSONPathsByPriority()
16 | }
17 |
18 | type UnrecoverableWatchError struct {
19 | ResName string
20 | Err error
21 | }
22 |
23 | func (e UnrecoverableWatchError) Error() string {
24 | return fmt.Sprintf("unrecoverable watch error for %q: %s", e.ResName, e.Err.Error())
25 | }
26 |
27 | func (e UnrecoverableWatchError) Unwrap() error {
28 | return e.Err
29 | }
30 |
31 | type SetWatchErrorHandlerOptions struct {
32 | FatalWatchErr *UnrecoverableWatchError // If unrecoverable watch error occurred it will be saved here.
33 | }
34 |
35 | func SetWatchErrorHandler(cancelFn context.CancelFunc, resName string, setWatchErrorHandler func(handler cache.WatchErrorHandler) error, opts SetWatchErrorHandlerOptions) error {
36 | return setWatchErrorHandler(
37 | func(r *cache.Reflector, err error) {
38 | isExpiredError := func(err error) bool {
39 | // In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and
40 | // apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
41 | // and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone
42 | // check when we fully drop support for Kubernetes 1.17 servers from reflectors.
43 | return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
44 | }
45 |
46 | switch {
47 | case isExpiredError(err):
48 | // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
49 | // has a semantic that it returns data at least as fresh as provided RV.
50 | // So first try to LIST with setting RV to resource version of last observed object.
51 | logboek.Context(context.Background()).Info().LogF("watch of %q closed with: %s\n", resName, err)
52 | case err == io.EOF:
53 | // watch closed normally
54 | case err == io.ErrUnexpectedEOF:
55 | logboek.Context(context.Background()).Info().LogF("watch of %q closed with unexpected EOF: %s\n", resName, err)
56 | default:
57 | logboek.Context(context.Background()).Warn().LogF("failed to watch %q: %s\n", resName, err)
58 | if opts.FatalWatchErr != nil {
59 | *opts.FatalWatchErr = UnrecoverableWatchError{ResName: resName, Err: err}
60 | }
61 | cancelFn()
62 | }
63 | },
64 | )
65 | }
66 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/contrib_resource_status_rules.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | _ "embed"
5 | "encoding/json"
6 | "fmt"
7 |
8 | "github.com/xeipuuv/gojsonschema"
9 | "k8s.io/apimachinery/pkg/runtime/schema"
10 | "k8s.io/apimachinery/pkg/util/yaml"
11 | )
12 |
13 | //go:embed contrib_resource_status_rules.schema.json
14 | var contribResourceStatusRulesSchema string
15 |
16 | //go:embed contrib_resource_status_rules.yaml
17 | var contribResourceStatusRules string
18 |
19 | type ContribResourceStatusRules struct {
20 | Rules []struct {
21 | ResourceGroup *string `yaml:"resourceGroup"`
22 | ResourceKind *string `yaml:"resourceKind"`
23 | JSONPath string `yaml:"jsonPath"`
24 | HumanJSONPath string `yaml:"humanJsonPath"`
25 | Conditions struct {
26 | Ready []string `yaml:"ready"`
27 | Progressing []string `yaml:"progressing"`
28 | Failed []string `yaml:"failed"`
29 | } `yaml:"conditions"`
30 | } `yaml:"rules"`
31 | }
32 |
33 | func buildContribResourceStatusRules() {
34 | rulesJsonByte, err := yaml.ToJSON([]byte(contribResourceStatusRules))
35 | if err != nil {
36 | panic(fmt.Sprintf("convert rules yaml file to json: %s", err))
37 | }
38 | rulesJson := string(rulesJsonByte)
39 |
40 | schemaLoader := gojsonschema.NewStringLoader(contribResourceStatusRulesSchema)
41 | documentLoader := gojsonschema.NewStringLoader(rulesJson)
42 |
43 | if result, err := gojsonschema.Validate(schemaLoader, documentLoader); err != nil {
44 | panic(fmt.Sprintf("validate rules file: %s", err))
45 | } else if !result.Valid() {
46 | msg := "Rules file is not valid:\n"
47 | for _, err := range result.Errors() {
48 | msg += fmt.Sprintf("- %s\n", err)
49 | }
50 | panic(msg)
51 | }
52 |
53 | rules := &ContribResourceStatusRules{}
54 | if err := json.Unmarshal(rulesJsonByte, rules); err != nil {
55 | panic(fmt.Sprintf("unmarshal rules file: %s", err))
56 | }
57 |
58 | for _, rule := range rules.Rules {
59 | var groupKind *schema.GroupKind
60 | if rule.ResourceGroup != nil && rule.ResourceKind != nil {
61 | groupKind = &schema.GroupKind{Group: *rule.ResourceGroup, Kind: *rule.ResourceKind}
62 | }
63 |
64 | ResourceStatusJSONPathConditions = append(ResourceStatusJSONPathConditions, &ResourceStatusJSONPathCondition{
65 | GroupKind: groupKind,
66 | JSONPath: rule.JSONPath,
67 | HumanPath: rule.HumanJSONPath,
68 | ReadyValues: casify(rule.Conditions.Ready...),
69 | PendingValues: casify(rule.Conditions.Progressing...),
70 | FailedValues: casify(rule.Conditions.Failed...),
71 | })
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/contrib_resource_status_rules.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://json-schema.org/draft-07/schema",
3 | "$id": "http://werf.io/contrib_resource_status_rules.schema.json",
4 | "title": "Contrib resource status rules schema",
5 | "type": "object",
6 | "required": [
7 | "rules"
8 | ],
9 | "properties": {
10 | "rules": {
11 | "type": "array",
12 | "items": {
13 | "type": "object",
14 | "dependencies": {
15 | "resourceGroup": ["resourceKind"],
16 | "resourceKind": ["resourceGroup"]
17 | },
18 | "required": [
19 | "jsonPath",
20 | "humanJsonPath",
21 | "conditions"
22 | ],
23 | "properties": {
24 | "resourceGroup": {
25 | "type": "string",
26 | "minLength": 1
27 | },
28 | "resourceKind": {
29 | "type": "string",
30 | "minLength": 1
31 | },
32 | "jsonPath": {
33 | "type": "string",
34 | "minLength": 1
35 | },
36 | "humanJsonPath": {
37 | "type": "string",
38 | "minLength": 1
39 | },
40 | "conditions": {
41 | "type": "object",
42 | "required": [
43 | "ready",
44 | "progressing"
45 | ],
46 | "properties": {
47 | "ready": {
48 | "type": "array",
49 | "items": {
50 | "type": "string",
51 | "minLength": 1
52 | }
53 | },
54 | "progressing": {
55 | "type": "array",
56 | "items": {
57 | "type": "string",
58 | "minLength": 1
59 | }
60 | },
61 | "failed": {
62 | "type": "array",
63 | "items": {
64 | "type": "string",
65 | "minLength": 1
66 | }
67 | }
68 | }
69 | }
70 | }
71 | }
72 | }
73 | }
74 | }
--------------------------------------------------------------------------------
/pkg/tracker/generic/contrib_resource_status_rules.yaml:
--------------------------------------------------------------------------------
1 | # For many of the standard Kubernetes resources we have generic logic to detect their status.
2 | # But for some resources, especially Custom Resources, we need more specific logic to determine
3 | # their status. Custom rules to detect status of such resources are defined in this file.
4 |
5 | rules:
6 | # Match the resource by its API group (without version)
7 | - resourceGroup: "acid.zalan.do"
8 | # Match the resource by its kind (case insensitive)
9 | resourceKind: "postgresql"
10 | # JSON Path to match the single field in the resource live manifest (get the manifest via
11 | # `kubectl get -o json`). This is the field that we will monitor to determine the resource
12 | # status.
13 | jsonPath: "$.status.PostgresClusterStatus"
14 | # JSON Path in more human-readable form, used only in deploy logs to inform the user of the
15 | # field that we are monitoring. Anything can be here, it does not need to be a valid JSON
16 | # path, but it should be informative.
17 | humanJsonPath: "status.PostgresClusterStatus"
18 | # Fill out conditions as much as possible. Quality of tracking and error detection depends on
19 | # this.
20 | conditions:
21 | # If the field that we got via JSON Path has value "Running", then we consider the resource
22 | # to be ready. Status tracking for this resource stopped immediately.
23 | ready:
24 | - "Running"
25 | # If the field that we got via JSON Path has value "Creating" or "Updating", then we
26 | # consider the resource neither ready nor failed yet. Status tracking for this resource
27 | # will continue.
28 | progressing:
29 | - "Creating"
30 | - "Updating"
31 | # If the field that we got via JSON Path has value "CreateFailed" or "UpdateFailed" or
32 | # "DeleteFailed", then we consider the resource to be failed. Status tracking for this
33 | # resource will stop if the error threshold (configurable by the user) is reached. The
34 | # deployment will fail (configurable by the user).
35 | failed:
36 | - "CreateFailed"
37 | - "UpdateFailed"
38 | - "DeleteFailed"
39 | - resourceGroup: "external-secrets.io"
40 | resourceKind: "ExternalSecret"
41 | jsonPath: '$.status.conditions[?(@.type=="Ready")].status'
42 | humanJsonPath: "status.conditions[type=Ready].status"
43 | conditions:
44 | ready:
45 | - "True"
46 | progressing:
47 | - "Unknown"
48 | failed:
49 | - "False"
50 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/event_status.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import corev1 "k8s.io/api/core/v1"
4 |
5 | type EventStatus struct {
6 | event *corev1.Event
7 | isFailure bool
8 | failureReason string
9 | }
10 |
11 | func NewEventStatus(event *corev1.Event) *EventStatus {
12 | return &EventStatus{
13 | event: event,
14 | }
15 | }
16 |
17 | func (s *EventStatus) IsFailure() bool {
18 | return s.isFailure
19 | }
20 |
21 | func (s *EventStatus) FailureReason() string {
22 | return s.failureReason
23 | }
24 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/feed.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "time"
8 |
9 | corev1 "k8s.io/api/core/v1"
10 | watchtools "k8s.io/client-go/tools/watch"
11 |
12 | "github.com/werf/kubedog/pkg/tracker"
13 | "github.com/werf/kubedog/pkg/tracker/debug"
14 | )
15 |
16 | type Feed struct {
17 | tracker *Tracker
18 |
19 | onAddedFunc func(status *ResourceStatus) error
20 | onReadyFunc func(status *ResourceStatus) error
21 | onFailedFunc func(status *ResourceStatus) error
22 | onStatusFunc func(status *ResourceStatus) error
23 | onEventMsgFunc func(event *corev1.Event) error
24 | }
25 |
26 | func NewFeed(tracker *Tracker) *Feed {
27 | return &Feed{
28 | tracker: tracker,
29 | }
30 | }
31 |
32 | func (f *Feed) OnAdded(function func(status *ResourceStatus) error) {
33 | f.onAddedFunc = function
34 | }
35 |
36 | func (f *Feed) OnReady(function func(status *ResourceStatus) error) {
37 | f.onReadyFunc = function
38 | }
39 |
40 | func (f *Feed) OnFailed(function func(status *ResourceStatus) error) {
41 | f.onFailedFunc = function
42 | }
43 |
44 | func (f *Feed) OnStatus(function func(status *ResourceStatus) error) {
45 | f.onStatusFunc = function
46 | }
47 |
48 | func (f *Feed) OnEventMsg(function func(event *corev1.Event) error) {
49 | f.onEventMsgFunc = function
50 | }
51 |
52 | func (f *Feed) Track(ctx context.Context, timeout, noActivityTimeout time.Duration) error {
53 | ctx, cancelFunc := watchtools.ContextWithOptionalTimeout(ctx, timeout)
54 | defer cancelFunc()
55 |
56 | addedCh := make(chan *ResourceStatus)
57 | succeededCh := make(chan *ResourceStatus)
58 | failedCh := make(chan *ResourceStatus)
59 | regularCh := make(chan *ResourceStatus, 100)
60 | eventCh := make(chan *corev1.Event)
61 | errCh := make(chan error, 10)
62 |
63 | go func() {
64 | if debug.Debug() {
65 | fmt.Printf(" goroutine: start %s tracker\n", f.tracker.ResourceID)
66 | }
67 |
68 | errCh <- f.tracker.Track(ctx, noActivityTimeout, addedCh, succeededCh, failedCh, regularCh, eventCh)
69 | }()
70 |
71 | for {
72 | select {
73 | case status := <-addedCh:
74 | if f.onAddedFunc != nil {
75 | if err := f.onAddedFunc(status); errors.Is(err, tracker.ErrStopTrack) {
76 | return nil
77 | } else if err != nil {
78 | return err
79 | }
80 | }
81 | case status := <-succeededCh:
82 | if f.onReadyFunc != nil {
83 | if err := f.onReadyFunc(status); errors.Is(err, tracker.ErrStopTrack) {
84 | return nil
85 | } else if err != nil {
86 | return err
87 | }
88 | }
89 | case status := <-failedCh:
90 | if f.onFailedFunc != nil {
91 | if err := f.onFailedFunc(status); errors.Is(err, tracker.ErrStopTrack) {
92 | return nil
93 | } else if err != nil {
94 | return err
95 | }
96 | }
97 | case status := <-regularCh:
98 | if f.onStatusFunc != nil {
99 | if err := f.onStatusFunc(status); errors.Is(err, tracker.ErrStopTrack) {
100 | return nil
101 | } else if err != nil {
102 | return err
103 | }
104 | }
105 | case event := <-eventCh:
106 | if f.onEventMsgFunc != nil {
107 | if err := f.onEventMsgFunc(event); errors.Is(err, tracker.ErrStopTrack) {
108 | return nil
109 | } else if err != nil {
110 | return err
111 | }
112 | }
113 | case err := <-errCh:
114 | return err
115 | }
116 | }
117 | }
118 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/ready_condition.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/samber/lo"
7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
8 |
9 | "github.com/werf/kubedog/pkg/tracker/indicators"
10 | "github.com/werf/kubedog/pkg/utils"
11 | )
12 |
13 | func NewResourceStatusIndicator(object *unstructured.Unstructured) (indicator *indicators.StringEqualConditionIndicator, humanJSONPath string, err error) {
14 | groupKind := object.GroupVersionKind().GroupKind()
15 |
16 | var matchedCondition *ResourceStatusJSONPathCondition
17 | for _, condition := range ResourceStatusJSONPathConditions {
18 | exactCondition := condition.GroupKind != nil
19 |
20 | if exactCondition {
21 | exactMatch := *condition.GroupKind == groupKind
22 | if !exactMatch {
23 | continue
24 | }
25 |
26 | currentValue, _, err := utils.JSONPath(condition.JSONPath, object.UnstructuredContent())
27 | if err != nil {
28 | return nil, "", fmt.Errorf("jsonpath error: %w", err)
29 | }
30 |
31 | matchedCondition = condition
32 | matchedCondition.CurrentValue = currentValue
33 | break
34 | } else {
35 | currentValue, found, err := utils.JSONPath(condition.JSONPath, object.UnstructuredContent())
36 | if err != nil {
37 | return nil, "", fmt.Errorf("jsonpath error: %w", err)
38 | } else if !found {
39 | continue
40 | }
41 |
42 | knownValues := lo.Union(condition.ReadyValues, condition.PendingValues, condition.FailedValues)
43 |
44 | if lo.Contains(knownValues, currentValue) {
45 | matchedCondition = condition
46 | matchedCondition.CurrentValue = currentValue
47 | break
48 | }
49 | }
50 | }
51 |
52 | if matchedCondition == nil {
53 | return nil, "", nil
54 | }
55 |
56 | indicator = &indicators.StringEqualConditionIndicator{
57 | Value: matchedCondition.CurrentValue,
58 | }
59 | indicator.SetReady(lo.Contains(matchedCondition.ReadyValues, matchedCondition.CurrentValue))
60 | indicator.SetFailed(lo.Contains(matchedCondition.FailedValues, matchedCondition.CurrentValue))
61 |
62 | return indicator, matchedCondition.HumanPath, nil
63 | }
64 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/resource_events_watcher.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sync"
7 |
8 | corev1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
11 | "k8s.io/apimachinery/pkg/runtime"
12 | "k8s.io/apimachinery/pkg/types"
13 | "k8s.io/apimachinery/pkg/watch"
14 | "k8s.io/client-go/kubernetes"
15 | "k8s.io/client-go/tools/cache"
16 |
17 | "github.com/werf/kubedog/pkg/tracker/debug"
18 | "github.com/werf/kubedog/pkg/tracker/resid"
19 | "github.com/werf/kubedog/pkg/utils"
20 | )
21 |
22 | type ResourceEventsWatcher struct {
23 | ResourceID *resid.ResourceID
24 |
25 | object *unstructured.Unstructured
26 |
27 | resourceInitialEventsUIDsMux sync.Mutex
28 | resourceInitialEventsUIDsList []types.UID
29 |
30 | client kubernetes.Interface
31 | }
32 |
33 | func NewResourceEventsWatcher(
34 | object *unstructured.Unstructured,
35 | resID *resid.ResourceID,
36 | client kubernetes.Interface,
37 | ) *ResourceEventsWatcher {
38 | return &ResourceEventsWatcher{
39 | ResourceID: resID,
40 | object: object,
41 | client: client,
42 | }
43 | }
44 |
45 | func (i *ResourceEventsWatcher) Run(ctx context.Context, eventsCh chan<- *corev1.Event) error {
46 | runCtx, runCancelFn := context.WithCancel(ctx)
47 |
48 | i.generateResourceInitialEventsUIDs(runCtx)
49 |
50 | fieldsSet, eventsNs := utils.EventFieldSelectorFromUnstructured(i.object)
51 |
52 | tweakListOptsFn := func(options *metav1.ListOptions) {
53 | options.FieldSelector = fieldsSet.AsSelector().String()
54 | }
55 |
56 | informer := cache.NewSharedIndexInformer(
57 | &cache.ListWatch{
58 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
59 | tweakListOptsFn(&options)
60 | return i.client.CoreV1().Events(eventsNs).List(runCtx, options)
61 | },
62 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
63 | tweakListOptsFn(&options)
64 | return i.client.CoreV1().Events(eventsNs).Watch(runCtx, options)
65 | },
66 | },
67 | &corev1.Event{},
68 | 0,
69 | cache.Indexers{},
70 | )
71 |
72 | informer.AddEventHandler(
73 | cache.ResourceEventHandlerFuncs{
74 | AddFunc: func(obj interface{}) {
75 | if debug.Debug() {
76 | fmt.Printf(" add event: %#v\n", i.ResourceID)
77 | }
78 | i.handleEventStateChange(runCtx, obj.(*corev1.Event), eventsCh)
79 | },
80 | UpdateFunc: func(oldObj, newObj interface{}) {
81 | if debug.Debug() {
82 | fmt.Printf(" update event: %#v\n", i.ResourceID)
83 | }
84 | i.handleEventStateChange(runCtx, newObj.(*corev1.Event), eventsCh)
85 | },
86 | DeleteFunc: func(obj interface{}) {
87 | if debug.Debug() {
88 | fmt.Printf(" delete event: %#v\n", i.ResourceID)
89 | }
90 | },
91 | },
92 | )
93 |
94 | if err := SetWatchErrorHandler(runCancelFn, i.ResourceID.String(), informer.SetWatchErrorHandler, SetWatchErrorHandlerOptions{}); err != nil {
95 | return fmt.Errorf("error setting watch error handler: %w", err)
96 | }
97 |
98 | if debug.Debug() {
99 | fmt.Printf("> %s run event informer\n", i.ResourceID)
100 | }
101 |
102 | informer.Run(runCtx.Done())
103 |
104 | if debug.Debug() {
105 | fmt.Printf(" %s event informer DONE\n", i.ResourceID)
106 | }
107 |
108 | return nil
109 | }
110 |
111 | func (i *ResourceEventsWatcher) generateResourceInitialEventsUIDs(ctx context.Context) {
112 | eventsList, err := utils.ListEventsForUnstructured(ctx, i.client, i.object)
113 | if err != nil {
114 | if debug.Debug() {
115 | fmt.Printf("list event error: %v\n", err)
116 | }
117 | return
118 | }
119 |
120 | if debug.Debug() {
121 | utils.DescribeEvents(eventsList)
122 | }
123 |
124 | for _, event := range eventsList.Items {
125 | i.appendResourceInitialEventsUID(event.GetUID())
126 | }
127 | }
128 |
129 | func (i *ResourceEventsWatcher) handleEventStateChange(ctx context.Context, eventObj *corev1.Event, eventsCh chan<- *corev1.Event) {
130 | for _, uid := range i.resourceInitialEventsUIDs() {
131 | if uid != eventObj.GetUID() {
132 | continue
133 | }
134 |
135 | if debug.Debug() {
136 | fmt.Printf("IGNORE initial event: %s: %s\n", eventObj.Reason, eventObj.Message)
137 | }
138 |
139 | i.deleteResourceInitialEventsUID(uid)
140 |
141 | return
142 | }
143 |
144 | if debug.Debug() {
145 | fmt.Printf(" %s got normal event: %s: %s\n", i.ResourceID, eventObj.Reason, eventObj.Message)
146 | }
147 |
148 | eventsCh <- eventObj
149 | }
150 |
151 | func (i *ResourceEventsWatcher) resourceInitialEventsUIDs() []types.UID {
152 | i.resourceInitialEventsUIDsMux.Lock()
153 | defer i.resourceInitialEventsUIDsMux.Unlock()
154 |
155 | return i.resourceInitialEventsUIDsList
156 | }
157 |
158 | func (i *ResourceEventsWatcher) appendResourceInitialEventsUID(uid types.UID) {
159 | i.resourceInitialEventsUIDsMux.Lock()
160 | defer i.resourceInitialEventsUIDsMux.Unlock()
161 |
162 | i.resourceInitialEventsUIDsList = append(i.resourceInitialEventsUIDsList, uid)
163 | }
164 |
165 | func (i *ResourceEventsWatcher) deleteResourceInitialEventsUID(uid types.UID) {
166 | i.resourceInitialEventsUIDsMux.Lock()
167 | defer i.resourceInitialEventsUIDsMux.Unlock()
168 |
169 | var result []types.UID
170 | for _, u := range i.resourceInitialEventsUIDsList {
171 | if u == uid {
172 | continue
173 | }
174 |
175 | result = append(result, u)
176 | }
177 |
178 | i.resourceInitialEventsUIDsList = result
179 | }
180 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/resource_state_watcher.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "k8s.io/apimachinery/pkg/api/meta"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
10 | "k8s.io/apimachinery/pkg/fields"
11 | "k8s.io/client-go/dynamic"
12 | "k8s.io/client-go/dynamic/dynamicinformer"
13 | "k8s.io/client-go/kubernetes"
14 | "k8s.io/client-go/tools/cache"
15 |
16 | "github.com/werf/kubedog/pkg/tracker/debug"
17 | "github.com/werf/kubedog/pkg/tracker/resid"
18 | )
19 |
20 | type ResourceStateWatcher struct {
21 | ResourceID *resid.ResourceID
22 |
23 | client kubernetes.Interface
24 | dynamicClient dynamic.Interface
25 | mapper meta.RESTMapper
26 | }
27 |
28 | func NewResourceStateWatcher(
29 | resID *resid.ResourceID,
30 | client kubernetes.Interface,
31 | dynClient dynamic.Interface,
32 | mapper meta.RESTMapper,
33 | ) *ResourceStateWatcher {
34 | return &ResourceStateWatcher{
35 | ResourceID: resID,
36 | client: client,
37 | dynamicClient: dynClient,
38 | mapper: mapper,
39 | }
40 | }
41 |
42 | func (w *ResourceStateWatcher) Run(ctx context.Context, resourceAddedCh, resourceModifiedCh, resourceDeletedCh chan<- *unstructured.Unstructured) error {
43 | runCtx, runCancelFn := context.WithCancel(ctx)
44 | defer runCancelFn()
45 |
46 | gvr, err := w.ResourceID.GroupVersionResource(w.mapper)
47 | if err != nil {
48 | return fmt.Errorf("error getting GroupVersionResource: %w", err)
49 | }
50 |
51 | informer := dynamicinformer.NewFilteredDynamicInformer(
52 | w.dynamicClient,
53 | *gvr,
54 | w.ResourceID.Namespace,
55 | 0,
56 | cache.Indexers{},
57 | func(options *metav1.ListOptions) {
58 | options.FieldSelector = fields.OneTermEqualSelector("metadata.name", w.ResourceID.Name).String()
59 | },
60 | )
61 |
62 | informer.Informer().AddEventHandler(
63 | cache.ResourceEventHandlerFuncs{
64 | AddFunc: func(obj interface{}) {
65 | if debug.Debug() {
66 | fmt.Printf(" add state event: %#v\n", w.ResourceID)
67 | }
68 | resourceAddedCh <- obj.(*unstructured.Unstructured)
69 | },
70 | UpdateFunc: func(oldObj, newObj interface{}) {
71 | if debug.Debug() {
72 | fmt.Printf(" update state event: %#v\n", w.ResourceID)
73 | }
74 | resourceModifiedCh <- newObj.(*unstructured.Unstructured)
75 | },
76 | DeleteFunc: func(obj interface{}) {
77 | if debug.Debug() {
78 | fmt.Printf(" delete state event: %#v\n", w.ResourceID)
79 | }
80 | resourceDeletedCh <- obj.(*unstructured.Unstructured)
81 | },
82 | },
83 | )
84 |
85 | fatalWatchErr := &UnrecoverableWatchError{}
86 | if err := SetWatchErrorHandler(runCancelFn, w.ResourceID.String(), informer.Informer().SetWatchErrorHandler, SetWatchErrorHandlerOptions{FatalWatchErr: fatalWatchErr}); err != nil {
87 | return fmt.Errorf("error setting watch error handler: %w", err)
88 | }
89 |
90 | if debug.Debug() {
91 | fmt.Printf(" %s resource watcher STARTED\n", w.ResourceID)
92 | }
93 |
94 | informer.Informer().Run(runCtx.Done())
95 |
96 | if debug.Debug() {
97 | fmt.Printf(" %s resource watcher DONE\n", w.ResourceID)
98 | }
99 |
100 | if fatalWatchErr.Err != nil {
101 | return fatalWatchErr
102 | } else {
103 | return nil
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/pkg/tracker/generic/resource_status.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
7 |
8 | "github.com/werf/kubedog/pkg/tracker/indicators"
9 | )
10 |
11 | type ResourceStatus struct {
12 | Indicator *indicators.StringEqualConditionIndicator
13 |
14 | isReady bool
15 | isDeleted bool
16 | isFailed bool
17 | failureReason string
18 | humanConditionPath string
19 | }
20 |
21 | func NewResourceStatus(object *unstructured.Unstructured) (*ResourceStatus, error) {
22 | resourceStatusIndicator, humanJSONPath, err := NewResourceStatusIndicator(object)
23 | if err != nil {
24 | return nil, fmt.Errorf("error getting resource status indicator: %w", err)
25 | }
26 |
27 | isFailed := resourceStatusIndicator != nil && resourceStatusIndicator.IsFailed()
28 |
29 | var failureReason string
30 | if isFailed {
31 | failureReason = "Resource status field value matched failed condition."
32 | }
33 |
34 | return &ResourceStatus{
35 | Indicator: resourceStatusIndicator,
36 | isReady: resourceStatusIndicator == nil || (resourceStatusIndicator != nil && resourceStatusIndicator.IsReady()),
37 | isFailed: isFailed,
38 | humanConditionPath: humanJSONPath,
39 | failureReason: failureReason,
40 | }, nil
41 | }
42 |
43 | func NewSucceededResourceStatus() *ResourceStatus {
44 | return &ResourceStatus{
45 | isReady: true,
46 | }
47 | }
48 |
49 | func NewFailedResourceStatus(failureReason string) *ResourceStatus {
50 | return &ResourceStatus{
51 | isFailed: true,
52 | failureReason: failureReason,
53 | }
54 | }
55 |
56 | func NewDeletedResourceStatus() *ResourceStatus {
57 | return &ResourceStatus{
58 | isDeleted: true,
59 | }
60 | }
61 |
62 | func (s *ResourceStatus) IsReady() bool {
63 | return s.isReady
64 | }
65 |
66 | func (s *ResourceStatus) IsFailed() bool {
67 | return s.isFailed
68 | }
69 |
70 | func (s *ResourceStatus) IsDeleted() bool {
71 | return s.isDeleted
72 | }
73 |
74 | func (s *ResourceStatus) FailureReason() string {
75 | return s.failureReason
76 | }
77 |
78 | func (s *ResourceStatus) HumanConditionPath() string {
79 | return s.humanConditionPath
80 | }
81 |
82 | func (s *ResourceStatus) DiffersFrom(newStatus *ResourceStatus) bool {
83 | if s.IsReady() != newStatus.IsReady() ||
84 | s.IsDeleted() != newStatus.IsDeleted() ||
85 | s.IsFailed() != newStatus.IsFailed() ||
86 | s.FailureReason() != newStatus.FailureReason() ||
87 | s.HumanConditionPath() != newStatus.HumanConditionPath() {
88 | return true
89 | }
90 |
91 | return false
92 | }
93 |
--------------------------------------------------------------------------------
/pkg/tracker/job/feed.go:
--------------------------------------------------------------------------------
1 | package job
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sync"
7 |
8 | "k8s.io/client-go/kubernetes"
9 | watchtools "k8s.io/client-go/tools/watch"
10 |
11 | "github.com/werf/kubedog/pkg/tracker"
12 | "github.com/werf/kubedog/pkg/tracker/debug"
13 | "github.com/werf/kubedog/pkg/tracker/pod"
14 | )
15 |
16 | type Feed interface {
17 | OnAdded(func() error)
18 | OnSucceeded(func() error)
19 | OnFailed(func(reason string) error)
20 | OnEventMsg(func(msg string) error)
21 | OnAddedPod(func(podName string) error)
22 | OnPodLogChunk(func(*pod.PodLogChunk) error)
23 | OnPodError(func(pod.PodError) error)
24 | OnStatus(func(JobStatus) error)
25 |
26 | GetStatus() JobStatus
27 | Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error
28 | }
29 |
30 | func NewFeed() Feed {
31 | return &feed{}
32 | }
33 |
34 | type feed struct {
35 | OnAddedFunc func() error
36 | OnSucceededFunc func() error
37 | OnFailedFunc func(string) error
38 | OnEventMsgFunc func(string) error
39 | OnAddedPodFunc func(string) error
40 | OnPodLogChunkFunc func(*pod.PodLogChunk) error
41 | OnPodErrorFunc func(pod.PodError) error
42 | OnStatusFunc func(JobStatus) error
43 |
44 | statusMux sync.Mutex
45 | status JobStatus
46 | }
47 |
48 | func (f *feed) OnAdded(function func() error) {
49 | f.OnAddedFunc = function
50 | }
51 |
52 | func (f *feed) OnSucceeded(function func() error) {
53 | f.OnSucceededFunc = function
54 | }
55 |
56 | func (f *feed) OnFailed(function func(string) error) {
57 | f.OnFailedFunc = function
58 | }
59 |
60 | func (f *feed) OnEventMsg(function func(string) error) {
61 | f.OnEventMsgFunc = function
62 | }
63 |
64 | func (f *feed) OnAddedPod(function func(string) error) {
65 | f.OnAddedPodFunc = function
66 | }
67 |
68 | func (f *feed) OnPodLogChunk(function func(*pod.PodLogChunk) error) {
69 | f.OnPodLogChunkFunc = function
70 | }
71 |
72 | func (f *feed) OnPodError(function func(pod.PodError) error) {
73 | f.OnPodErrorFunc = function
74 | }
75 |
76 | func (f *feed) OnStatus(function func(JobStatus) error) {
77 | f.OnStatusFunc = function
78 | }
79 |
80 | func (f *feed) Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
81 | errorChan := make(chan error)
82 | doneChan := make(chan struct{})
83 |
84 | parentContext := opts.ParentContext
85 | if parentContext == nil {
86 | parentContext = context.Background()
87 | }
88 | ctx, cancel := watchtools.ContextWithOptionalTimeout(parentContext, opts.Timeout)
89 | defer cancel()
90 |
91 | job := NewTracker(name, namespace, kube, opts)
92 |
93 | go func() {
94 | err := job.Track(ctx)
95 | if err != nil {
96 | errorChan <- err
97 | } else {
98 | doneChan <- struct{}{}
99 | }
100 | }()
101 |
102 | for {
103 | select {
104 | case status := <-job.Added:
105 | f.setStatus(status)
106 |
107 | if f.OnAddedFunc != nil {
108 | err := f.OnAddedFunc()
109 | if err == tracker.ErrStopTrack {
110 | return nil
111 | }
112 | if err != nil {
113 | return err
114 | }
115 | }
116 |
117 | case status := <-job.Succeeded:
118 | f.setStatus(status)
119 |
120 | if f.OnSucceededFunc != nil {
121 | err := f.OnSucceededFunc()
122 | if err == tracker.ErrStopTrack {
123 | return nil
124 | }
125 | if err != nil {
126 | return err
127 | }
128 | }
129 |
130 | case status := <-job.Failed:
131 | f.setStatus(status)
132 |
133 | if f.OnFailedFunc != nil {
134 | err := f.OnFailedFunc(status.FailedReason)
135 | if err == tracker.ErrStopTrack {
136 | return nil
137 | }
138 | if err != nil {
139 | return err
140 | }
141 | }
142 |
143 | case msg := <-job.EventMsg:
144 | if debug.Debug() {
145 | fmt.Printf("Job `%s` event msg: %s\n", job.ResourceName, msg)
146 | }
147 |
148 | if f.OnEventMsgFunc != nil {
149 | err := f.OnEventMsgFunc(msg)
150 | if err == tracker.ErrStopTrack {
151 | return nil
152 | }
153 | if err != nil {
154 | return err
155 | }
156 | }
157 |
158 | case report := <-job.AddedPod:
159 | f.setStatus(report.JobStatus)
160 |
161 | if f.OnAddedPodFunc != nil {
162 | err := f.OnAddedPodFunc(report.PodName)
163 | if err == tracker.ErrStopTrack {
164 | return nil
165 | }
166 | if err != nil {
167 | return err
168 | }
169 | }
170 |
171 | case chunk := <-job.PodLogChunk:
172 | if debug.Debug() {
173 | fmt.Printf("Job's `%s` pod `%s` log chunk\n", job.ResourceName, chunk.PodName)
174 | for _, line := range chunk.LogLines {
175 | fmt.Printf("[%s] %s\n", line.Timestamp, line.Message)
176 | }
177 | }
178 |
179 | if f.OnPodLogChunkFunc != nil {
180 | err := f.OnPodLogChunkFunc(chunk)
181 | if err == tracker.ErrStopTrack {
182 | return nil
183 | }
184 | if err != nil {
185 | return err
186 | }
187 | }
188 |
189 | case report := <-job.PodError:
190 | f.setStatus(report.JobStatus)
191 |
192 | if f.OnPodErrorFunc != nil {
193 | err := f.OnPodErrorFunc(report.PodError)
194 | if err == tracker.ErrStopTrack {
195 | return nil
196 | }
197 | if err != nil {
198 | return err
199 | }
200 | }
201 |
202 | case status := <-job.Status:
203 | f.setStatus(status)
204 |
205 | if f.OnStatusFunc != nil {
206 | err := f.OnStatusFunc(status)
207 | if err == tracker.ErrStopTrack {
208 | return nil
209 | }
210 | if err != nil {
211 | return err
212 | }
213 | }
214 |
215 | case err := <-errorChan:
216 | return err
217 | case <-doneChan:
218 | return nil
219 | }
220 | }
221 | }
222 |
223 | func (f *feed) setStatus(status JobStatus) {
224 | f.statusMux.Lock()
225 | defer f.statusMux.Unlock()
226 |
227 | if status.StatusGeneration > f.status.StatusGeneration {
228 | f.status = status
229 | }
230 | }
231 |
232 | func (f *feed) GetStatus() JobStatus {
233 | f.statusMux.Lock()
234 | defer f.statusMux.Unlock()
235 | return f.status
236 | }
237 |
--------------------------------------------------------------------------------
/pkg/tracker/job/status.go:
--------------------------------------------------------------------------------
1 | package job
2 |
3 | import (
4 | "fmt"
5 |
6 | batchv1 "k8s.io/api/batch/v1"
7 | corev1 "k8s.io/api/core/v1"
8 |
9 | "github.com/werf/kubedog/pkg/tracker/indicators"
10 | "github.com/werf/kubedog/pkg/tracker/pod"
11 | "github.com/werf/kubedog/pkg/utils"
12 | )
13 |
14 | type JobStatus struct {
15 | batchv1.JobStatus
16 |
17 | StatusGeneration uint64
18 |
19 | SucceededIndicator *indicators.Int32EqualConditionIndicator
20 | Age string
21 |
22 | WaitingForMessages []string
23 |
24 | IsSucceeded bool
25 | IsFailed bool
26 | FailedReason string
27 |
28 | Pods map[string]pod.PodStatus
29 | }
30 |
31 | func NewJobStatus(object *batchv1.Job, statusGeneration uint64, isTrackerFailed bool, trackerFailedReason string, podsStatuses map[string]pod.PodStatus, trackedPodsNames []string) JobStatus {
32 | res := JobStatus{
33 | JobStatus: object.Status,
34 | StatusGeneration: statusGeneration,
35 | Age: utils.TranslateTimestampSince(object.CreationTimestamp),
36 | Pods: make(map[string]pod.PodStatus),
37 | }
38 |
39 | for k, v := range podsStatuses {
40 | res.Pods[k] = v
41 | if v.StatusIndicator != nil {
42 | v.StatusIndicator.TargetValue = "Completed"
43 | }
44 | }
45 |
46 | doCheckJobConditions := true
47 | for _, trackedPodName := range trackedPodsNames {
48 | podStatus := podsStatuses[trackedPodName]
49 |
50 | if !podStatus.IsFailed && !podStatus.IsSucceeded {
51 | doCheckJobConditions = false // unterminated pods exists
52 | }
53 | }
54 |
55 | if doCheckJobConditions {
56 | for _, c := range object.Status.Conditions {
57 | switch c.Type {
58 | case batchv1.JobComplete:
59 | if c.Status == corev1.ConditionTrue {
60 | res.IsSucceeded = true
61 | }
62 |
63 | case batchv1.JobFailed:
64 | if c.Status == corev1.ConditionTrue {
65 | if !res.IsFailed {
66 | res.IsFailed = true
67 | res.FailedReason = c.Reason
68 | }
69 | }
70 | }
71 | }
72 |
73 | if !res.IsSucceeded {
74 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("condition %s->%s", batchv1.JobComplete, corev1.ConditionTrue))
75 | }
76 | } else {
77 | res.WaitingForMessages = append(res.WaitingForMessages, "pods should be terminated")
78 | }
79 |
80 | res.SucceededIndicator = &indicators.Int32EqualConditionIndicator{}
81 | res.SucceededIndicator.Value = object.Status.Succeeded
82 |
83 | if object.Spec.Completions != nil {
84 | res.SucceededIndicator.TargetValue = *object.Spec.Completions
85 |
86 | if !res.SucceededIndicator.IsReady() {
87 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("succeeded %d->%d", res.SucceededIndicator.Value, res.SucceededIndicator.TargetValue))
88 | }
89 | } else {
90 | res.SucceededIndicator.TargetValue = 1
91 |
92 | if !res.IsSucceeded {
93 | parallelism := int32(0)
94 | if object.Spec.Parallelism != nil {
95 | parallelism = *object.Spec.Parallelism
96 | }
97 | if parallelism > 1 {
98 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("succeeded %d->%d of %d", res.SucceededIndicator.Value, res.SucceededIndicator.TargetValue, parallelism))
99 | } else {
100 | res.WaitingForMessages = append(res.WaitingForMessages, fmt.Sprintf("succeeded %d->%d", res.SucceededIndicator.Value, res.SucceededIndicator.TargetValue))
101 | }
102 | }
103 | }
104 |
105 | if !res.IsSucceeded && !res.IsFailed {
106 | res.IsFailed = isTrackerFailed
107 | res.FailedReason = trackerFailedReason
108 | }
109 |
110 | return res
111 | }
112 |
--------------------------------------------------------------------------------
/pkg/tracker/pod/informer.go:
--------------------------------------------------------------------------------
1 | package pod
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | corev1 "k8s.io/api/core/v1"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | "k8s.io/apimachinery/pkg/runtime"
10 | "k8s.io/apimachinery/pkg/watch"
11 | "k8s.io/client-go/tools/cache"
12 | watchtools "k8s.io/client-go/tools/watch"
13 |
14 | "github.com/werf/kubedog/pkg/tracker"
15 | "github.com/werf/kubedog/pkg/tracker/debug"
16 | "github.com/werf/kubedog/pkg/utils"
17 | )
18 |
19 | // PodsInformer monitor pod add events to use with controllers (Deployment, StatefulSet, DaemonSet)
20 | type PodsInformer struct {
21 | tracker.Tracker
22 | Controller utils.ControllerMetadata
23 | PodAdded chan *corev1.Pod
24 | Errors chan error
25 | }
26 |
27 | func NewPodsInformer(trk *tracker.Tracker, controller utils.ControllerMetadata) *PodsInformer {
28 | return &PodsInformer{
29 | Tracker: tracker.Tracker{
30 | Kube: trk.Kube,
31 | Namespace: trk.Namespace,
32 | FullResourceName: trk.FullResourceName,
33 | },
34 | Controller: controller,
35 | PodAdded: make(chan *corev1.Pod, 1),
36 | Errors: make(chan error),
37 | }
38 | }
39 |
40 | func (p *PodsInformer) WithChannels(added chan *corev1.Pod, errors chan error) *PodsInformer {
41 | p.PodAdded = added
42 | p.Errors = errors
43 | return p
44 | }
45 |
46 | func (p *PodsInformer) Run(ctx context.Context) {
47 | if debug.Debug() {
48 | fmt.Printf("> PodsInformer.Run\n")
49 | }
50 |
51 | client := p.Kube
52 |
53 | selector, err := metav1.LabelSelectorAsSelector(p.Controller.LabelSelector())
54 | if err != nil {
55 | // TODO rescue this error!
56 | return
57 | }
58 |
59 | tweakListOptions := func(options metav1.ListOptions) metav1.ListOptions {
60 | options.LabelSelector = selector.String()
61 | return options
62 | }
63 | lw := &cache.ListWatch{
64 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
65 | return client.CoreV1().Pods(p.Namespace).List(ctx, tweakListOptions(options))
66 | },
67 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
68 | return client.CoreV1().Pods(p.Namespace).Watch(ctx, tweakListOptions(options))
69 | },
70 | }
71 |
72 | go func() {
73 | _, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, func(e watch.Event) (bool, error) {
74 | if debug.Debug() {
75 | fmt.Printf(" %s pod event: %#v\n", p.FullResourceName, e.Type)
76 | }
77 |
78 | var object *corev1.Pod
79 |
80 | if e.Type != watch.Error {
81 | var ok bool
82 | object, ok = e.Object.(*corev1.Pod)
83 | if !ok {
84 | return true, fmt.Errorf("corev1.Pod informer for %s got unexpected object %T", p.FullResourceName, e.Object)
85 | }
86 | }
87 |
88 | if e.Type == watch.Added {
89 | p.PodAdded <- object
90 | }
91 |
92 | return false, nil
93 | })
94 |
95 | if err := tracker.AdaptInformerError(err); err != nil {
96 | p.Errors <- fmt.Errorf("%s pods informer error: %w", p.FullResourceName, err)
97 | }
98 |
99 | if debug.Debug() {
100 | fmt.Printf(" %s pods informer DONE\n", p.FullResourceName)
101 | }
102 | }()
103 | }
104 |
--------------------------------------------------------------------------------
/pkg/tracker/pod/probes.go:
--------------------------------------------------------------------------------
1 | package pod
2 |
3 | import (
4 | "fmt"
5 | "math"
6 | "time"
7 |
8 | corev1 "k8s.io/api/core/v1"
9 |
10 | "github.com/werf/kubedog/pkg/tracker/debug"
11 | )
12 |
13 | type ReadinessProbe struct {
14 | corev1.Probe
15 |
16 | startedAtTime *time.Time
17 | ignoreFailuresDuration time.Duration
18 |
19 | startupProbe *corev1.Probe
20 | }
21 |
22 | func NewReadinessProbe(readinessProbe, startupProbe *corev1.Probe, isStartedNow *bool, ignoreFailuresDurationOverride *time.Duration) ReadinessProbe {
23 | probe := ReadinessProbe{
24 | Probe: *readinessProbe,
25 | startupProbe: startupProbe,
26 | }
27 | probe.SetupStartedAtTime(isStartedNow)
28 | probe.setIgnoreFailuresDuration(ignoreFailuresDurationOverride)
29 |
30 | return probe
31 | }
32 |
33 | func (p *ReadinessProbe) SetupStartedAtTime(isStartedNow *bool) {
34 | var startedAtTime *time.Time
35 | if isStartedNow != nil && *isStartedNow {
36 | now := time.Now()
37 | startedAtTime = &now
38 | }
39 | p.startedAtTime = startedAtTime
40 | }
41 |
42 | func (p *ReadinessProbe) IsFailureShouldBeIgnoredNow() bool {
43 | if p.FailureThreshold == 1 {
44 | return false
45 | }
46 |
47 | if p.startedAtTime == nil {
48 | return true
49 | }
50 |
51 | ignoreFailuresUntilTime := p.startedAtTime.Add(p.ignoreFailuresDuration)
52 | if debug.Debug() {
53 | fmt.Printf("startedAtTime time is %q and ignoreFailuresUntilTime is %q for probe: %+v\n",
54 | p.startedAtTime, ignoreFailuresUntilTime, p)
55 | }
56 |
57 | return time.Now().Before(ignoreFailuresUntilTime)
58 | }
59 |
60 | func (p *ReadinessProbe) setIgnoreFailuresDuration(ignoreFailuresDurationOverride *time.Duration) {
61 | if ignoreFailuresDurationOverride != nil {
62 | p.ignoreFailuresDuration = *ignoreFailuresDurationOverride
63 | } else {
64 | p.ignoreFailuresDuration = p.calculateIgnoreFailuresDuration()
65 | }
66 | }
67 |
68 | func (p *ReadinessProbe) calculateIgnoreFailuresDuration() time.Duration {
69 | // Since we can't detect succeeded probes, but only the failed ones, we have to
70 | // make some assumptions in our formula, e.g. we don't account for the
71 | // possibility of breaking the chain of succeeded probes with failed ones and vice
72 | // versa. This means this formula provides an approximate duration, which might
73 | // be somewhat higher or lower than needed.
74 | ignoreFailuresDuration := time.Duration(float32(math.Max(float64(
75 | p.calculateRealInitialDelay()+
76 | // Wait for the first probe to finish.
77 | p.TimeoutSeconds+
78 | // Ignore until we need to perform only the last failure check.
79 | (p.FailureThreshold+p.SuccessThreshold-3)*p.PeriodSeconds+
80 | // Ignore for additional half of a period to account for possible delays in
81 | // events processing.
82 | p.PeriodSeconds/2+
83 | // Ignore for timeout of a ReadinessProbe to make sure the last possible ignored
84 | // probe completed.
85 | p.TimeoutSeconds,
86 | // And after all of this the first failed ReadinessProbe should fail the rollout.
87 | ), 0))) * time.Second
88 |
89 | if debug.Debug() {
90 | fmt.Printf("ignoreFailuresDuration calculated as %q for probe: %+v\n", ignoreFailuresDuration, p)
91 | }
92 |
93 | return ignoreFailuresDuration
94 | }
95 |
96 | // ReadinessProbe initialDelaySeconds counts from the container creation, not
97 | // from startup probe finish. Because of this we have to determine what will
98 | // take longer — startup probe completion or readiness probe initialDelaySeconds
99 | // and make sure we waited from the container creation for as long as the
100 | // biggest time duration of these two.
101 | func (p *ReadinessProbe) calculateRealInitialDelay() int32 {
102 | var initialDelaySeconds int32
103 | if p.startupProbe != nil {
104 | // Maximum possible time between container creation and readiness probe starting.
105 | readinessProbeInitialDelay := p.PeriodSeconds + p.InitialDelaySeconds
106 |
107 | // Another maximum possible time between container creation and readiness probe
108 | // starting. Won't respect failed probe breaking chain of successful probes and
109 | // vice versa.
110 | startupProbeInitialDelay := p.startupProbe.PeriodSeconds + // Between container creation and first startup probe there is a delay in the range of 0-"periodSeconds".
111 | // Add initialDelaySeconds.
112 | p.startupProbe.InitialDelaySeconds +
113 | // Calculate maximum time it might take to iterate over all success/failureThresholds.
114 | (p.startupProbe.FailureThreshold+p.startupProbe.SuccessThreshold-1)*p.startupProbe.PeriodSeconds +
115 | // Make sure last possible startup probe finished.
116 | p.startupProbe.TimeoutSeconds +
117 | // Add 1 extra periodSeconds to account for delay between last startup probe
118 | // finished and first readiness probe started.
119 | p.PeriodSeconds
120 |
121 | if startupProbeInitialDelay > readinessProbeInitialDelay {
122 | // If startup delay more than readiness delay, then we won't need to wait any
123 | // extra and just wait for a single periodSeconds (correction for delay between
124 | // startup probe finished and first readiness probe started).
125 | initialDelaySeconds = p.PeriodSeconds
126 | } else {
127 | // Else wait for the time left for readiness probe to initialize.
128 | initialDelaySeconds = readinessProbeInitialDelay - startupProbeInitialDelay
129 | }
130 | } else {
131 | initialDelaySeconds = p.PeriodSeconds + p.InitialDelaySeconds
132 | }
133 | return initialDelaySeconds
134 | }
135 |
--------------------------------------------------------------------------------
/pkg/tracker/pod/status.go:
--------------------------------------------------------------------------------
1 | package pod
2 |
3 | import (
4 | "fmt"
5 |
6 | corev1 "k8s.io/api/core/v1"
7 |
8 | "github.com/werf/kubedog/pkg/tracker/indicators"
9 | "github.com/werf/kubedog/pkg/utils"
10 | )
11 |
12 | type PodStatus struct {
13 | corev1.PodStatus
14 |
15 | Name string
16 |
17 | StatusGeneration uint64
18 |
19 | StatusIndicator *indicators.StringEqualConditionIndicator
20 | Age string
21 | Restarts int32
22 | ReadyContainers int32
23 | TotalContainers int32
24 |
25 | IsReady bool
26 | IsFailed bool
27 | IsSucceeded bool
28 | FailedReason string
29 |
30 | ContainersErrors []ContainerError
31 | }
32 |
33 | func NewPodStatus(pod *corev1.Pod, statusGeneration uint64, trackedContainers []string, isTrackerFailed bool, trackerFailedReason string) PodStatus {
34 | res := PodStatus{
35 | PodStatus: pod.Status,
36 | TotalContainers: int32(len(pod.Spec.Containers)),
37 | Age: utils.TranslateTimestampSince(pod.CreationTimestamp),
38 | StatusIndicator: &indicators.StringEqualConditionIndicator{},
39 | StatusGeneration: statusGeneration,
40 | Name: pod.Name,
41 | }
42 |
43 | for _, cond := range pod.Status.Conditions {
44 | if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue {
45 | res.IsReady = true
46 | break
47 | }
48 | }
49 |
50 | var restarts, readyContainers int32
51 |
52 | reason := string(pod.Status.Phase)
53 | if pod.Status.Reason != "" {
54 | reason = pod.Status.Reason
55 | }
56 |
57 | initializing := false
58 | for i := range pod.Status.InitContainerStatuses {
59 | container := pod.Status.InitContainerStatuses[i]
60 | restarts += container.RestartCount
61 | switch {
62 | case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
63 | continue
64 | case container.State.Terminated != nil:
65 | // initialization is failed
66 | if len(container.State.Terminated.Reason) == 0 {
67 | if container.State.Terminated.Signal != 0 {
68 | reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
69 | } else {
70 | reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
71 | }
72 | } else {
73 | reason = "Init:" + container.State.Terminated.Reason
74 | }
75 | initializing = true
76 | case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
77 | reason = "Init:" + container.State.Waiting.Reason
78 | initializing = true
79 | default:
80 | reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
81 | initializing = true
82 | }
83 | break
84 | }
85 |
86 | if !initializing {
87 | restarts = 0
88 | hasRunning := false
89 | for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
90 | container := pod.Status.ContainerStatuses[i]
91 |
92 | restarts += container.RestartCount
93 | switch {
94 | case container.State.Waiting != nil && container.State.Waiting.Reason != "":
95 | reason = container.State.Waiting.Reason
96 | case container.State.Terminated != nil && container.State.Terminated.Reason != "":
97 | reason = container.State.Terminated.Reason
98 | case container.State.Terminated != nil && container.State.Terminated.Reason == "":
99 | if container.State.Terminated.Signal != 0 {
100 | reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
101 | } else {
102 | reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
103 | }
104 | case container.Ready && container.State.Running != nil:
105 | hasRunning = true
106 | readyContainers++
107 | }
108 | }
109 |
110 | // change pod status back to "Running" if there is at least one container still reporting as "Running" status
111 | if reason == "Completed" && hasRunning {
112 | reason = "Running"
113 | }
114 | }
115 |
116 | if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
117 | reason = "Unknown"
118 | } else if pod.DeletionTimestamp != nil {
119 | reason = "Terminating"
120 | }
121 |
122 | res.StatusIndicator.Value = reason
123 | res.StatusIndicator.FailedValue = "Error"
124 | res.Restarts = restarts
125 | res.ReadyContainers = readyContainers
126 |
127 | if len(trackedContainers) == 0 {
128 | switch pod.Status.Phase {
129 | case corev1.PodSucceeded:
130 | res.IsSucceeded = true
131 | case corev1.PodFailed:
132 | res.IsFailed = true
133 | res.FailedReason = reason
134 | }
135 | }
136 |
137 | if !res.IsReady && !res.IsFailed && !res.IsSucceeded {
138 | res.IsFailed = isTrackerFailed
139 | res.FailedReason = trackerFailedReason
140 | }
141 |
142 | setContainersStatusesToPodStatus(&res, pod)
143 |
144 | return res
145 | }
146 |
147 | func setContainersStatusesToPodStatus(status *PodStatus, pod *corev1.Pod) {
148 | allContainerStatuses := make([]corev1.ContainerStatus, 0)
149 | allContainerStatuses = append(allContainerStatuses, pod.Status.InitContainerStatuses...)
150 | allContainerStatuses = append(allContainerStatuses, pod.Status.ContainerStatuses...)
151 |
152 | for _, cs := range allContainerStatuses {
153 | if cs.State.Waiting != nil {
154 | switch cs.State.Waiting.Reason {
155 | case "ImagePullBackOff", "ErrImagePull", "CrashLoopBackOff", "ErrImageNeverPull":
156 | if status.ContainersErrors == nil {
157 | status.ContainersErrors = []ContainerError{}
158 | }
159 |
160 | status.ContainersErrors = append(status.ContainersErrors, ContainerError{
161 | ContainerName: cs.Name,
162 | Message: fmt.Sprintf("%s: %s", cs.State.Waiting.Reason, cs.State.Waiting.Message),
163 | })
164 | }
165 | }
166 | }
167 | }
168 |
--------------------------------------------------------------------------------
/pkg/tracker/replicaset/informer.go:
--------------------------------------------------------------------------------
1 | package replicaset
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | appsv1 "k8s.io/api/apps/v1"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | "k8s.io/apimachinery/pkg/runtime"
10 | "k8s.io/apimachinery/pkg/watch"
11 | "k8s.io/client-go/tools/cache"
12 | watchtools "k8s.io/client-go/tools/watch"
13 |
14 | "github.com/werf/kubedog/pkg/tracker"
15 | "github.com/werf/kubedog/pkg/tracker/debug"
16 | "github.com/werf/kubedog/pkg/tracker/pod"
17 | "github.com/werf/kubedog/pkg/utils"
18 | )
19 |
20 | type ReplicaSet struct {
21 | Name string
22 | IsNew bool
23 | }
24 |
25 | // TODO: add containers!
26 | type ReplicaSetPod struct {
27 | ReplicaSet ReplicaSet
28 | Name string
29 | }
30 |
31 | type ReplicaSetPodLogChunk struct {
32 | *pod.PodLogChunk
33 | ReplicaSet ReplicaSet
34 | }
35 |
36 | type ReplicaSetPodError struct {
37 | pod.PodError
38 | ReplicaSet ReplicaSet
39 | }
40 |
41 | // ReplicaSetInformer monitor ReplicaSet events to use with controllers (Deployment, StatefulSet, DaemonSet)
42 | type ReplicaSetInformer struct {
43 | tracker.Tracker
44 | Controller utils.ControllerMetadata
45 | ReplicaSetAdded chan *appsv1.ReplicaSet
46 | ReplicaSetModified chan *appsv1.ReplicaSet
47 | ReplicaSetDeleted chan *appsv1.ReplicaSet
48 | Errors chan error
49 | }
50 |
51 | func NewReplicaSetInformer(trk *tracker.Tracker, controller utils.ControllerMetadata) *ReplicaSetInformer {
52 | if debug.Debug() {
53 | fmt.Printf("> NewReplicaSetInformer\n")
54 | }
55 | return &ReplicaSetInformer{
56 | Tracker: tracker.Tracker{
57 | Kube: trk.Kube,
58 | Namespace: trk.Namespace,
59 | FullResourceName: trk.FullResourceName,
60 | },
61 | Controller: controller,
62 | ReplicaSetAdded: make(chan *appsv1.ReplicaSet, 1),
63 | ReplicaSetModified: make(chan *appsv1.ReplicaSet, 1),
64 | ReplicaSetDeleted: make(chan *appsv1.ReplicaSet, 1),
65 | Errors: make(chan error),
66 | }
67 | }
68 |
69 | func (r *ReplicaSetInformer) WithChannels(added chan *appsv1.ReplicaSet,
70 | modified chan *appsv1.ReplicaSet,
71 | deleted chan *appsv1.ReplicaSet,
72 | errors chan error,
73 | ) *ReplicaSetInformer {
74 | r.ReplicaSetAdded = added
75 | r.ReplicaSetModified = modified
76 | r.ReplicaSetDeleted = deleted
77 | r.Errors = errors
78 | return r
79 | }
80 |
81 | func (r *ReplicaSetInformer) Run(ctx context.Context) {
82 | client := r.Kube
83 |
84 | selector, err := metav1.LabelSelectorAsSelector(r.Controller.LabelSelector())
85 | if err != nil {
86 | // TODO rescue this error!
87 | return
88 | }
89 |
90 | tweakListOptions := func(options metav1.ListOptions) metav1.ListOptions {
91 | options.LabelSelector = selector.String()
92 | return options
93 | }
94 | lw := &cache.ListWatch{
95 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
96 | return client.AppsV1().ReplicaSets(r.Namespace).List(ctx, tweakListOptions(options))
97 | },
98 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
99 | return client.AppsV1().ReplicaSets(r.Namespace).Watch(ctx, tweakListOptions(options))
100 | },
101 | }
102 |
103 | go func() {
104 | _, err := watchtools.UntilWithSync(ctx, lw, &appsv1.ReplicaSet{}, nil, func(e watch.Event) (bool, error) {
105 | if debug.Debug() {
106 | fmt.Printf(" %s replica set event: %#v\n", r.FullResourceName, e.Type)
107 | }
108 |
109 | var object *appsv1.ReplicaSet
110 |
111 | if e.Type != watch.Error {
112 | var ok bool
113 | object, ok = e.Object.(*appsv1.ReplicaSet)
114 | if !ok {
115 | return true, fmt.Errorf("appsv1.ReplicaSet informer for %s got unexpected object %T", r.FullResourceName, e.Object)
116 | }
117 | }
118 |
119 | switch e.Type {
120 | case watch.Added:
121 | r.ReplicaSetAdded <- object
122 | case watch.Modified:
123 | r.ReplicaSetModified <- object
124 | case watch.Deleted:
125 | r.ReplicaSetDeleted <- object
126 | }
127 |
128 | return false, nil
129 | })
130 |
131 | if err := tracker.AdaptInformerError(err); err != nil {
132 | r.Errors <- err
133 | }
134 |
135 | if debug.Debug() {
136 | fmt.Printf(" %s replicaSets informer DONE\n", r.FullResourceName)
137 | }
138 | }()
139 | }
140 |
--------------------------------------------------------------------------------
/pkg/tracker/resid/resource_id.go:
--------------------------------------------------------------------------------
1 | package resid
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "k8s.io/apimachinery/pkg/api/meta"
8 | "k8s.io/apimachinery/pkg/runtime/schema"
9 | )
10 |
11 | type ResourceID struct {
12 | Name string
13 | Namespace string
14 | GroupVersionKind schema.GroupVersionKind
15 | }
16 |
17 | type NewResourceIDOptions struct {
18 | Namespace string
19 | }
20 |
21 | func NewResourceID(name string, gvk schema.GroupVersionKind, options NewResourceIDOptions) *ResourceID {
22 | return &ResourceID{
23 | Name: name,
24 | GroupVersionKind: gvk,
25 | Namespace: options.Namespace,
26 | }
27 | }
28 |
29 | func (r *ResourceID) String() string {
30 | return r.GroupVersionKindNamespaceNameString()
31 | }
32 |
33 | func (r *ResourceID) GroupVersionKindString() string {
34 | var gvkElems []string
35 | if r.GroupVersionKind.Kind != "" {
36 | gvkElems = append(gvkElems, r.GroupVersionKind.Kind)
37 | }
38 |
39 | if r.GroupVersionKind.Version != "" {
40 | gvkElems = append(gvkElems, r.GroupVersionKind.Version)
41 | }
42 |
43 | if r.GroupVersionKind.Group != "" {
44 | gvkElems = append(gvkElems, r.GroupVersionKind.Group)
45 | }
46 |
47 | return strings.Join(gvkElems, ".")
48 | }
49 |
50 | func (r *ResourceID) GroupVersionKindNameString() string {
51 | return strings.Join([]string{r.GroupVersionKindString(), r.Name}, "/")
52 | }
53 |
54 | func (r *ResourceID) KindNameString() string {
55 | return strings.Join([]string{r.GroupVersionKind.Kind, r.Name}, "/")
56 | }
57 |
58 | func (r *ResourceID) GroupVersionKindNamespaceString() string {
59 | var resultElems []string
60 |
61 | if r.Namespace != "" {
62 | resultElems = append(resultElems, fmt.Sprint("ns:", r.Namespace))
63 | }
64 |
65 | gvk := r.GroupVersionKindString()
66 | if gvk != "" {
67 | resultElems = append(resultElems, gvk)
68 | }
69 |
70 | return strings.Join(resultElems, "/")
71 | }
72 |
73 | func (r *ResourceID) GroupVersionKindNamespaceNameString() string {
74 | return strings.Join([]string{r.GroupVersionKindNamespaceString(), r.Name}, "/")
75 | }
76 |
77 | func (r *ResourceID) GroupVersionResource(mapper meta.RESTMapper) (*schema.GroupVersionResource, error) {
78 | mapping, err := r.mapping(mapper)
79 | if err != nil {
80 | return nil, fmt.Errorf("error getting mapping: %w", err)
81 | }
82 |
83 | return &mapping.Resource, nil
84 | }
85 |
86 | func (r *ResourceID) Namespaced(mapper meta.RESTMapper) (bool, error) {
87 | mapping, err := r.mapping(mapper)
88 | if err != nil {
89 | return false, fmt.Errorf("error getting mapping: %w", err)
90 | }
91 |
92 | return mapping.Scope.Name() == meta.RESTScopeNameNamespace, nil
93 | }
94 |
95 | func (r *ResourceID) mapping(mapper meta.RESTMapper) (*meta.RESTMapping, error) {
96 | mapping, err := mapper.RESTMapping(r.GroupVersionKind.GroupKind(), r.GroupVersionKind.Version)
97 | if err != nil {
98 | return nil, fmt.Errorf("error mapping %q to api resource: %w", r.GroupVersionKindString(), err)
99 | }
100 |
101 | return mapping, nil
102 | }
103 |
--------------------------------------------------------------------------------
/pkg/tracker/statefulset/feed.go:
--------------------------------------------------------------------------------
1 | package statefulset
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sync"
7 |
8 | "k8s.io/client-go/kubernetes"
9 | watchtools "k8s.io/client-go/tools/watch"
10 |
11 | "github.com/werf/kubedog/pkg/tracker"
12 | "github.com/werf/kubedog/pkg/tracker/controller"
13 | "github.com/werf/kubedog/pkg/tracker/debug"
14 | )
15 |
16 | type Feed interface {
17 | controller.ControllerFeed
18 |
19 | OnStatus(func(StatefulSetStatus) error)
20 |
21 | GetStatus() StatefulSetStatus
22 | Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error
23 | }
24 |
25 | func NewFeed() Feed {
26 | return &feed{}
27 | }
28 |
29 | type feed struct {
30 | controller.CommonControllerFeed
31 |
32 | OnStatusFunc func(StatefulSetStatus) error
33 |
34 | statusMux sync.Mutex
35 | status StatefulSetStatus
36 | }
37 |
38 | func (f *feed) OnStatus(function func(StatefulSetStatus) error) {
39 | f.OnStatusFunc = function
40 | }
41 |
42 | func (f *feed) Track(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
43 | errorChan := make(chan error)
44 | doneChan := make(chan bool)
45 |
46 | parentContext := opts.ParentContext
47 | if parentContext == nil {
48 | parentContext = context.Background()
49 | }
50 | ctx, cancel := watchtools.ContextWithOptionalTimeout(parentContext, opts.Timeout)
51 | defer cancel()
52 |
53 | stsTracker := NewTracker(name, namespace, kube, opts)
54 |
55 | go func() {
56 | if debug.Debug() {
57 | fmt.Printf(" goroutine: start statefulset/%s tracker\n", name)
58 | }
59 | err := stsTracker.Track(ctx)
60 | if err != nil {
61 | errorChan <- err
62 | } else {
63 | doneChan <- true
64 | }
65 | }()
66 |
67 | if debug.Debug() {
68 | fmt.Printf(" statefulset/%s: for-select stsTracker channels\n", name)
69 | }
70 |
71 | for {
72 | select {
73 | case status := <-stsTracker.Added:
74 | f.setStatus(status)
75 |
76 | if f.OnAddedFunc != nil {
77 | err := f.OnAddedFunc(status.IsReady)
78 | if err == tracker.ErrStopTrack {
79 | return nil
80 | }
81 | if err != nil {
82 | return err
83 | }
84 | }
85 |
86 | case status := <-stsTracker.Ready:
87 | f.setStatus(status)
88 |
89 | if f.OnReadyFunc != nil {
90 | err := f.OnReadyFunc()
91 | if err == tracker.ErrStopTrack {
92 | return nil
93 | }
94 | if err != nil {
95 | return err
96 | }
97 | }
98 |
99 | case status := <-stsTracker.Failed:
100 | f.setStatus(status)
101 |
102 | if f.OnFailedFunc != nil {
103 | err := f.OnFailedFunc(status.FailedReason)
104 | if err == tracker.ErrStopTrack {
105 | return nil
106 | }
107 | if err != nil {
108 | return err
109 | }
110 | }
111 |
112 | case msg := <-stsTracker.EventMsg:
113 | if f.OnEventMsgFunc != nil {
114 | err := f.OnEventMsgFunc(msg)
115 | if err == tracker.ErrStopTrack {
116 | return nil
117 | }
118 | if err != nil {
119 | return err
120 | }
121 | }
122 |
123 | case report := <-stsTracker.AddedPod:
124 | f.setStatus(report.StatefulSetStatus)
125 |
126 | if f.OnAddedPodFunc != nil {
127 | err := f.OnAddedPodFunc(report.ReplicaSetPod)
128 | if err == tracker.ErrStopTrack {
129 | return nil
130 | }
131 | if err != nil {
132 | return err
133 | }
134 | }
135 |
136 | case chunk := <-stsTracker.PodLogChunk:
137 | if debug.Debug() {
138 | fmt.Printf(" statefulset/%s pod `%s` log chunk\n", stsTracker.ResourceName, chunk.PodName)
139 | for _, line := range chunk.LogLines {
140 | fmt.Printf("po/%s [%s] %s\n", chunk.PodName, line.Timestamp, line.Message)
141 | }
142 | }
143 |
144 | if f.OnPodLogChunkFunc != nil {
145 | err := f.OnPodLogChunkFunc(chunk)
146 | if err == tracker.ErrStopTrack {
147 | return nil
148 | }
149 | if err != nil {
150 | return err
151 | }
152 | }
153 |
154 | case report := <-stsTracker.PodError:
155 | f.setStatus(report.StatefulSetStatus)
156 |
157 | if f.OnPodErrorFunc != nil {
158 | err := f.OnPodErrorFunc(report.ReplicaSetPodError)
159 | if err == tracker.ErrStopTrack {
160 | return nil
161 | }
162 | if err != nil {
163 | return err
164 | }
165 | }
166 |
167 | case status := <-stsTracker.Status:
168 | f.setStatus(status)
169 |
170 | if f.OnStatusFunc != nil {
171 | err := f.OnStatusFunc(status)
172 | if err == tracker.ErrStopTrack {
173 | return nil
174 | }
175 | if err != nil {
176 | return err
177 | }
178 | }
179 |
180 | case err := <-errorChan:
181 | return err
182 | case <-doneChan:
183 | return nil
184 | }
185 | }
186 | }
187 |
188 | func (f *feed) setStatus(status StatefulSetStatus) {
189 | f.statusMux.Lock()
190 | defer f.statusMux.Unlock()
191 | f.status = status
192 | }
193 |
194 | func (f *feed) GetStatus() StatefulSetStatus {
195 | f.statusMux.Lock()
196 | defer f.statusMux.Unlock()
197 | return f.status
198 | }
199 |
--------------------------------------------------------------------------------
/pkg/tracker/tracker.go:
--------------------------------------------------------------------------------
1 | package tracker
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "time"
8 |
9 | "k8s.io/apimachinery/pkg/util/wait"
10 | "k8s.io/client-go/kubernetes"
11 | )
12 |
13 | var ErrStopTrack = errors.New("stop tracking now")
14 |
15 | const (
16 | Initial TrackerState = ""
17 | ResourceAdded TrackerState = "ResourceAdded"
18 | ResourceSucceeded TrackerState = "ResourceSucceeded"
19 | ResourceReady TrackerState = "ResourceReady"
20 | ResourceFailed TrackerState = "ResourceFailed"
21 | ResourceDeleted TrackerState = "ResourceDeleted"
22 |
23 | FollowingContainerLogs TrackerState = "FollowingContainerLogs"
24 | ContainerTrackerDone TrackerState = "ContainerTrackerDone"
25 | )
26 |
27 | type TrackerState string
28 |
29 | type Tracker struct {
30 | Kube kubernetes.Interface
31 | Namespace string
32 | ResourceName string
33 | FullResourceName string // full resource name with resource kind (deploy/superapp)
34 | LogsFromTime time.Time
35 |
36 | StatusGeneration uint64
37 | }
38 |
39 | type Options struct {
40 | ParentContext context.Context
41 | Timeout time.Duration
42 | LogsFromTime time.Time
43 | IgnoreReadinessProbeFailsByContainerName map[string]time.Duration
44 | }
45 |
46 | type ResourceError struct {
47 | msg string
48 | }
49 |
50 | func (r *ResourceError) Error() string {
51 | return r.msg
52 | }
53 |
54 | func ResourceErrorf(format string, a ...interface{}) error {
55 | return &ResourceError{
56 | msg: fmt.Sprintf(format, a...),
57 | }
58 | }
59 |
60 | func AdaptInformerError(err error) error {
61 | if errors.Is(err, wait.ErrWaitTimeout) {
62 | return nil
63 | }
64 | return err
65 | }
66 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/dynamic_absence_tracker.go:
--------------------------------------------------------------------------------
1 | package dyntracker
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 | "time"
8 |
9 | apierrors "k8s.io/apimachinery/pkg/api/errors"
10 | "k8s.io/apimachinery/pkg/api/meta"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "k8s.io/apimachinery/pkg/runtime/schema"
13 | "k8s.io/apimachinery/pkg/util/wait"
14 | "k8s.io/client-go/dynamic"
15 |
16 | "github.com/werf/kubedog/pkg/trackers/dyntracker/statestore"
17 | "github.com/werf/kubedog/pkg/trackers/dyntracker/util"
18 | )
19 |
20 | type DynamicAbsenceTracker struct {
21 | taskState *util.Concurrent[*statestore.AbsenceTaskState]
22 | dynamicClient dynamic.Interface
23 | mapper meta.ResettableRESTMapper
24 |
25 | timeout time.Duration
26 | pollPeriod time.Duration
27 | }
28 |
29 | func NewDynamicAbsenceTracker(
30 | taskState *util.Concurrent[*statestore.AbsenceTaskState],
31 | dynamicClient dynamic.Interface,
32 | mapper meta.ResettableRESTMapper,
33 | opts DynamicAbsenceTrackerOptions,
34 | ) *DynamicAbsenceTracker {
35 | timeout := opts.Timeout
36 | var pollPeriod time.Duration
37 | if opts.PollPeriod != 0 {
38 | pollPeriod = opts.PollPeriod
39 | } else {
40 | pollPeriod = 1 * time.Second
41 | }
42 |
43 | return &DynamicAbsenceTracker{
44 | taskState: taskState,
45 | dynamicClient: dynamicClient,
46 | mapper: mapper,
47 | timeout: timeout,
48 | pollPeriod: pollPeriod,
49 | }
50 | }
51 |
52 | type DynamicAbsenceTrackerOptions struct {
53 | Timeout time.Duration
54 | PollPeriod time.Duration
55 | }
56 |
57 | func (t *DynamicAbsenceTracker) Track(ctx context.Context) error {
58 | var (
59 | name string
60 | namespace string
61 | groupVersionKind schema.GroupVersionKind
62 | )
63 | t.taskState.RTransaction(func(ts *statestore.AbsenceTaskState) {
64 | name = ts.Name()
65 | namespace = ts.Namespace()
66 | groupVersionKind = ts.GroupVersionKind()
67 | })
68 |
69 | namespaced, err := util.IsNamespaced(groupVersionKind, t.mapper)
70 | if err != nil {
71 | return fmt.Errorf("check if namespaced: %w", err)
72 | }
73 |
74 | gvr, err := util.GVRFromGVK(groupVersionKind, t.mapper)
75 | if err != nil {
76 | return fmt.Errorf("get GroupVersionResource: %w", err)
77 | }
78 |
79 | var resourceClient dynamic.ResourceInterface
80 | if namespaced {
81 | resourceClient = t.dynamicClient.Resource(gvr).Namespace(namespace)
82 | } else {
83 | resourceClient = t.dynamicClient.Resource(gvr)
84 | }
85 |
86 | resourceHumanID := util.ResourceHumanID(name, namespace, groupVersionKind, t.mapper)
87 |
88 | if err := wait.PollImmediate(t.pollPeriod, t.timeout, func() (bool, error) {
89 | if _, err := resourceClient.Get(ctx, name, metav1.GetOptions{}); err != nil {
90 | if apierrors.IsResourceExpired(err) || apierrors.IsGone(err) || err == io.EOF || err == io.ErrUnexpectedEOF {
91 | return false, nil
92 | }
93 |
94 | if apierrors.IsNotFound(err) {
95 | return true, nil
96 | }
97 |
98 | return false, fmt.Errorf("get resource %q: %w", resourceHumanID, err)
99 | }
100 |
101 | t.taskState.RWTransaction(func(ats *statestore.AbsenceTaskState) {
102 | ats.ResourceState().RWTransaction(func(rs *statestore.ResourceState) {
103 | rs.SetStatus(statestore.ResourceStatusCreated)
104 | })
105 | })
106 |
107 | return false, nil
108 | }); err != nil {
109 | return fmt.Errorf("poll resource %q: %w", resourceHumanID, err)
110 | }
111 |
112 | t.taskState.RWTransaction(func(ats *statestore.AbsenceTaskState) {
113 | ats.ResourceState().RWTransaction(func(rs *statestore.ResourceState) {
114 | rs.SetStatus(statestore.ResourceStatusDeleted)
115 | })
116 | })
117 |
118 | return nil
119 | }
120 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/dynamic_presence_tracker.go:
--------------------------------------------------------------------------------
1 | package dyntracker
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 | "time"
8 |
9 | apierrors "k8s.io/apimachinery/pkg/api/errors"
10 | "k8s.io/apimachinery/pkg/api/meta"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "k8s.io/apimachinery/pkg/runtime/schema"
13 | "k8s.io/apimachinery/pkg/util/wait"
14 | "k8s.io/client-go/dynamic"
15 |
16 | "github.com/werf/kubedog/pkg/trackers/dyntracker/statestore"
17 | "github.com/werf/kubedog/pkg/trackers/dyntracker/util"
18 | )
19 |
20 | type DynamicPresenceTracker struct {
21 | taskState *util.Concurrent[*statestore.PresenceTaskState]
22 | dynamicClient dynamic.Interface
23 | mapper meta.ResettableRESTMapper
24 |
25 | timeout time.Duration
26 | pollPeriod time.Duration
27 | }
28 |
29 | func NewDynamicPresenceTracker(
30 | taskState *util.Concurrent[*statestore.PresenceTaskState],
31 | dynamicClient dynamic.Interface,
32 | mapper meta.ResettableRESTMapper,
33 | opts DynamicPresenceTrackerOptions,
34 | ) *DynamicPresenceTracker {
35 | var timeout time.Duration
36 | if opts.Timeout != 0 {
37 | timeout = opts.Timeout
38 | } else {
39 | timeout = 5 * time.Minute
40 | }
41 |
42 | var pollPeriod time.Duration
43 | if opts.PollPeriod != 0 {
44 | pollPeriod = opts.PollPeriod
45 | } else {
46 | pollPeriod = 1 * time.Second
47 | }
48 |
49 | return &DynamicPresenceTracker{
50 | taskState: taskState,
51 | dynamicClient: dynamicClient,
52 | mapper: mapper,
53 | timeout: timeout,
54 | pollPeriod: pollPeriod,
55 | }
56 | }
57 |
58 | type DynamicPresenceTrackerOptions struct {
59 | Timeout time.Duration
60 | PollPeriod time.Duration
61 | }
62 |
63 | func (t *DynamicPresenceTracker) Track(ctx context.Context) error {
64 | var (
65 | name string
66 | namespace string
67 | groupVersionKind schema.GroupVersionKind
68 | )
69 | t.taskState.RTransaction(func(ts *statestore.PresenceTaskState) {
70 | name = ts.Name()
71 | namespace = ts.Namespace()
72 | groupVersionKind = ts.GroupVersionKind()
73 | })
74 |
75 | namespaced, err := util.IsNamespaced(groupVersionKind, t.mapper)
76 | if err != nil {
77 | return fmt.Errorf("check if namespaced: %w", err)
78 | }
79 |
80 | gvr, err := util.GVRFromGVK(groupVersionKind, t.mapper)
81 | if err != nil {
82 | return fmt.Errorf("get GroupVersionResource: %w", err)
83 | }
84 |
85 | var resourceClient dynamic.ResourceInterface
86 | if namespaced {
87 | resourceClient = t.dynamicClient.Resource(gvr).Namespace(namespace)
88 | } else {
89 | resourceClient = t.dynamicClient.Resource(gvr)
90 | }
91 |
92 | resourceHumanID := util.ResourceHumanID(name, namespace, groupVersionKind, t.mapper)
93 |
94 | if err := wait.PollImmediate(t.pollPeriod, t.timeout, func() (bool, error) {
95 | if _, err := resourceClient.Get(ctx, name, metav1.GetOptions{}); err != nil {
96 | if apierrors.IsResourceExpired(err) || apierrors.IsGone(err) || err == io.EOF || err == io.ErrUnexpectedEOF {
97 | return false, nil
98 | }
99 |
100 | if apierrors.IsNotFound(err) {
101 | t.taskState.RWTransaction(func(pts *statestore.PresenceTaskState) {
102 | pts.ResourceState().RWTransaction(func(rs *statestore.ResourceState) {
103 | rs.SetStatus(statestore.ResourceStatusDeleted)
104 | })
105 | })
106 |
107 | return false, nil
108 | }
109 |
110 | return false, fmt.Errorf("get resource %q: %w", resourceHumanID, err)
111 | }
112 |
113 | return true, nil
114 | }); err != nil {
115 | return fmt.Errorf("poll resource %q: %w", resourceHumanID, err)
116 | }
117 |
118 | t.taskState.RWTransaction(func(pts *statestore.PresenceTaskState) {
119 | pts.ResourceState().RWTransaction(func(rs *statestore.ResourceState) {
120 | rs.SetStatus(statestore.ResourceStatusCreated)
121 | })
122 | })
123 |
124 | return nil
125 | }
126 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/logstore/log_line.go:
--------------------------------------------------------------------------------
1 | package logstore
2 |
3 | import "time"
4 |
5 | type LogLine struct {
6 | Time time.Time
7 | Line string
8 | }
9 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/logstore/log_store.go:
--------------------------------------------------------------------------------
1 | package logstore
2 |
3 | import (
4 | "github.com/werf/kubedog/pkg/trackers/dyntracker/util"
5 | )
6 |
7 | type LogStore struct {
8 | resourcesLogs []*util.Concurrent[*ResourceLogs]
9 | }
10 |
11 | func NewLogStore() *LogStore {
12 | return &LogStore{}
13 | }
14 |
15 | func (s *LogStore) AddResourceLogs(resourceLogs *util.Concurrent[*ResourceLogs]) {
16 | s.resourcesLogs = append(s.resourcesLogs, resourceLogs)
17 | }
18 |
19 | func (s *LogStore) ResourcesLogs() []*util.Concurrent[*ResourceLogs] {
20 | return append([]*util.Concurrent[*ResourceLogs]{}, s.resourcesLogs...)
21 | }
22 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/logstore/resource_logs.go:
--------------------------------------------------------------------------------
1 | package logstore
2 |
3 | import (
4 | "time"
5 |
6 | "k8s.io/apimachinery/pkg/runtime/schema"
7 | )
8 |
9 | type ResourceLogs struct {
10 | name string
11 | namespace string
12 | groupVersionKind schema.GroupVersionKind
13 | logs map[string][]*LogLine
14 | }
15 |
16 | func NewResourceLogs(name, namespace string, groupVersionKind schema.GroupVersionKind) *ResourceLogs {
17 | return &ResourceLogs{
18 | name: name,
19 | namespace: namespace,
20 | groupVersionKind: groupVersionKind,
21 | logs: make(map[string][]*LogLine),
22 | }
23 | }
24 |
25 | func (s *ResourceLogs) Name() string {
26 | return s.name
27 | }
28 |
29 | func (s *ResourceLogs) Namespace() string {
30 | return s.namespace
31 | }
32 |
33 | func (s *ResourceLogs) GroupVersionKind() schema.GroupVersionKind {
34 | return s.groupVersionKind
35 | }
36 |
37 | func (s *ResourceLogs) AddLogLine(line, source string, timestamp time.Time) {
38 | l := &LogLine{
39 | Time: timestamp,
40 | Line: line,
41 | }
42 |
43 | if _, ok := s.logs[source]; !ok {
44 | s.logs[source] = []*LogLine{}
45 | }
46 |
47 | s.logs[source] = append(s.logs[source], l)
48 | }
49 |
50 | func (s *ResourceLogs) LogLines() map[string][]*LogLine {
51 | result := make(map[string][]*LogLine)
52 |
53 | for source, logs := range s.logs {
54 | result[source] = append([]*LogLine{}, logs...)
55 | }
56 |
57 | return result
58 | }
59 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/absence_task_state.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | import (
4 | "github.com/google/uuid"
5 | "k8s.io/apimachinery/pkg/runtime/schema"
6 |
7 | "github.com/werf/kubedog/pkg/trackers/dyntracker/util"
8 | )
9 |
10 | type AbsenceTaskState struct {
11 | name string
12 | namespace string
13 | groupVersionKind schema.GroupVersionKind
14 |
15 | absentConditions []AbsenceTaskConditionFn
16 | failureConditions []AbsenceTaskConditionFn
17 |
18 | status AbsenceTaskStatus
19 | uuid string
20 | resourceState *util.Concurrent[*ResourceState]
21 | }
22 |
23 | func NewAbsenceTaskState(name, namespace string, groupVersionKind schema.GroupVersionKind, opts AbsenceTaskStateOptions) *AbsenceTaskState {
24 | resourceState := util.NewConcurrent(NewResourceState(name, namespace, groupVersionKind))
25 |
26 | absentConditions := initAbsenceTaskStateAbsentConditions()
27 | failureConditions := []AbsenceTaskConditionFn{}
28 |
29 | uuid := uuid.NewString()
30 |
31 | return &AbsenceTaskState{
32 | name: name,
33 | namespace: namespace,
34 | groupVersionKind: groupVersionKind,
35 | absentConditions: absentConditions,
36 | failureConditions: failureConditions,
37 | uuid: uuid,
38 | resourceState: resourceState,
39 | }
40 | }
41 |
42 | type AbsenceTaskStateOptions struct{}
43 |
44 | func (s *AbsenceTaskState) Name() string {
45 | return s.name
46 | }
47 |
48 | func (s *AbsenceTaskState) Namespace() string {
49 | return s.namespace
50 | }
51 |
52 | func (s *AbsenceTaskState) GroupVersionKind() schema.GroupVersionKind {
53 | return s.groupVersionKind
54 | }
55 |
56 | func (s *AbsenceTaskState) ResourceState() *util.Concurrent[*ResourceState] {
57 | return s.resourceState
58 | }
59 |
60 | func (s *AbsenceTaskState) SetStatus(status AbsenceTaskStatus) {
61 | s.status = status
62 | }
63 |
64 | func (s *AbsenceTaskState) Status() AbsenceTaskStatus {
65 | if s.status != "" {
66 | return s.status
67 | }
68 |
69 | for _, failureCondition := range s.failureConditions {
70 | if failureCondition(s) {
71 | return AbsenceTaskStatusFailed
72 | }
73 | }
74 |
75 | for _, absentCondition := range s.absentConditions {
76 | if !absentCondition(s) {
77 | return AbsenceTaskStatusProgressing
78 | }
79 | }
80 |
81 | return AbsenceTaskStatusAbsent
82 | }
83 |
84 | func (s *AbsenceTaskState) UUID() string {
85 | return s.uuid
86 | }
87 |
88 | func initAbsenceTaskStateAbsentConditions() []AbsenceTaskConditionFn {
89 | var absentConditions []AbsenceTaskConditionFn
90 |
91 | absentConditions = append(absentConditions, func(taskState *AbsenceTaskState) bool {
92 | var absent bool
93 | taskState.resourceState.RTransaction(func(rs *ResourceState) {
94 | if rs.Status() == ResourceStatusDeleted {
95 | absent = true
96 | }
97 | })
98 |
99 | return absent
100 | })
101 |
102 | return absentConditions
103 | }
104 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/attribute.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | const (
4 | AttributeNameRequiredReplicas = "required replicas"
5 | AttributeNameStatus = "status"
6 | AttributeNameConditionTarget = "condition target"
7 | AttributeNameConditionCurrentValue = "condition current value"
8 | )
9 |
10 | type Attributer interface {
11 | Name() string
12 | }
13 |
14 | func NewAttribute[T int | string](name string, value T) *Attribute[T] {
15 | return &Attribute[T]{
16 | Value: value,
17 | name: name,
18 | }
19 | }
20 |
21 | type Attribute[T int | string] struct {
22 | Value T
23 |
24 | name string
25 | }
26 |
27 | func (a *Attribute[T]) Name() string {
28 | return a.name
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/conditions.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | type ReadinessTaskConditionFn func(taskState *ReadinessTaskState) bool
4 |
5 | type PresenceTaskConditionFn func(taskState *PresenceTaskState) bool
6 |
7 | type AbsenceTaskConditionFn func(taskState *AbsenceTaskState) bool
8 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/error.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | import "time"
4 |
5 | type Error struct {
6 | Time time.Time
7 | Err error
8 | }
9 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/event.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | import "time"
4 |
5 | type Event struct {
6 | Time time.Time
7 | Message string
8 | }
9 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/presence_task_state.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | import (
4 | "github.com/google/uuid"
5 | "k8s.io/apimachinery/pkg/runtime/schema"
6 |
7 | "github.com/werf/kubedog/pkg/trackers/dyntracker/util"
8 | )
9 |
10 | type PresenceTaskState struct {
11 | name string
12 | namespace string
13 | groupVersionKind schema.GroupVersionKind
14 |
15 | presentConditions []PresenceTaskConditionFn
16 | failureConditions []PresenceTaskConditionFn
17 |
18 | status PresenceTaskStatus
19 | uuid string
20 | resourceState *util.Concurrent[*ResourceState]
21 | }
22 |
23 | func NewPresenceTaskState(name, namespace string, groupVersionKind schema.GroupVersionKind, opts PresenceTaskStateOptions) *PresenceTaskState {
24 | resourceState := util.NewConcurrent(NewResourceState(name, namespace, groupVersionKind))
25 |
26 | presentConditions := initPresenceTaskStatePresentConditions()
27 | failureConditions := []PresenceTaskConditionFn{}
28 |
29 | uuid := uuid.NewString()
30 |
31 | return &PresenceTaskState{
32 | name: name,
33 | namespace: namespace,
34 | groupVersionKind: groupVersionKind,
35 | presentConditions: presentConditions,
36 | failureConditions: failureConditions,
37 | uuid: uuid,
38 | resourceState: resourceState,
39 | }
40 | }
41 |
42 | type PresenceTaskStateOptions struct{}
43 |
44 | func (s *PresenceTaskState) Name() string {
45 | return s.name
46 | }
47 |
48 | func (s *PresenceTaskState) Namespace() string {
49 | return s.namespace
50 | }
51 |
52 | func (s *PresenceTaskState) GroupVersionKind() schema.GroupVersionKind {
53 | return s.groupVersionKind
54 | }
55 |
56 | func (s *PresenceTaskState) ResourceState() *util.Concurrent[*ResourceState] {
57 | return s.resourceState
58 | }
59 |
60 | func (s *PresenceTaskState) SetStatus(status PresenceTaskStatus) {
61 | s.status = status
62 | }
63 |
64 | func (s *PresenceTaskState) Status() PresenceTaskStatus {
65 | if s.status != "" {
66 | return s.status
67 | }
68 |
69 | for _, failureCondition := range s.failureConditions {
70 | if failureCondition(s) {
71 | return PresenceTaskStatusFailed
72 | }
73 | }
74 |
75 | for _, presentCondition := range s.presentConditions {
76 | if !presentCondition(s) {
77 | return PresenceTaskStatusProgressing
78 | }
79 | }
80 |
81 | return PresenceTaskStatusPresent
82 | }
83 |
84 | func (s *PresenceTaskState) UUID() string {
85 | return s.uuid
86 | }
87 |
88 | func initPresenceTaskStatePresentConditions() []PresenceTaskConditionFn {
89 | var presentConditions []PresenceTaskConditionFn
90 |
91 | presentConditions = append(presentConditions, func(taskState *PresenceTaskState) bool {
92 | var present bool
93 | taskState.resourceState.RTransaction(func(rs *ResourceState) {
94 | switch rs.Status() {
95 | case ResourceStatusCreated, ResourceStatusReady:
96 | present = true
97 | }
98 | })
99 |
100 | return present
101 | })
102 |
103 | return presentConditions
104 | }
105 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/resource_state.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | import (
4 | "time"
5 |
6 | "k8s.io/apimachinery/pkg/runtime/schema"
7 |
8 | "github.com/werf/kubedog/pkg/trackers/dyntracker/util"
9 | )
10 |
11 | type ResourceState struct {
12 | name string
13 | namespace string
14 | groupVersionKind schema.GroupVersionKind
15 |
16 | status ResourceStatus
17 | attributes []Attributer
18 | events []*Event
19 | errors map[string][]*Error
20 | }
21 |
22 | func NewResourceState(name, namespace string, groupVersionKind schema.GroupVersionKind) *ResourceState {
23 | return &ResourceState{
24 | name: name,
25 | namespace: namespace,
26 | groupVersionKind: groupVersionKind,
27 | status: ResourceStatusUnknown,
28 | errors: make(map[string][]*Error),
29 | }
30 | }
31 |
32 | func (s *ResourceState) Name() string {
33 | return s.name
34 | }
35 |
36 | func (s *ResourceState) Namespace() string {
37 | return s.namespace
38 | }
39 |
40 | func (s *ResourceState) GroupVersionKind() schema.GroupVersionKind {
41 | return s.groupVersionKind
42 | }
43 |
44 | func (s *ResourceState) SetStatus(status ResourceStatus) {
45 | s.status = status
46 | }
47 |
48 | func (s *ResourceState) Status() ResourceStatus {
49 | return s.status
50 | }
51 |
52 | func (s *ResourceState) AddError(err error, source string, timestamp time.Time) {
53 | e := &Error{
54 | Time: timestamp,
55 | Err: err,
56 | }
57 |
58 | if _, ok := s.errors[source]; !ok {
59 | s.errors[source] = []*Error{}
60 | }
61 |
62 | s.errors[source] = append(s.errors[source], e)
63 | }
64 |
65 | func (s *ResourceState) Errors() map[string][]*Error {
66 | result := make(map[string][]*Error)
67 |
68 | for source, errors := range s.errors {
69 | result[source] = append([]*Error{}, errors...)
70 | }
71 |
72 | return result
73 | }
74 |
75 | func (s *ResourceState) AddEvent(message string, timestamp time.Time) {
76 | e := &Event{
77 | Time: timestamp,
78 | Message: message,
79 | }
80 |
81 | s.events = append(s.events, e)
82 | }
83 |
84 | func (s *ResourceState) Events() []*Event {
85 | return append([]*Event{}, s.events...)
86 | }
87 |
88 | func (s *ResourceState) AddAttribute(attr Attributer) {
89 | s.attributes = append(s.attributes, attr)
90 | }
91 |
92 | func (s *ResourceState) Attributes() []Attributer {
93 | return append([]Attributer{}, s.attributes...)
94 | }
95 |
96 | func (s *ResourceState) ID() string {
97 | return util.ResourceID(s.name, s.namespace, s.groupVersionKind)
98 | }
99 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/resource_status.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | type ResourceStatus string
4 |
5 | const (
6 | ResourceStatusUnknown ResourceStatus = "unknown"
7 | ResourceStatusReady ResourceStatus = "ready"
8 | ResourceStatusCreated ResourceStatus = "created"
9 | ResourceStatusDeleted ResourceStatus = "deleted"
10 | ResourceStatusFailed ResourceStatus = "failed"
11 | )
12 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/task_status.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | type ReadinessTaskStatus string
4 |
5 | const (
6 | ReadinessTaskStatusProgressing ReadinessTaskStatus = "progressing"
7 | ReadinessTaskStatusReady ReadinessTaskStatus = "ready"
8 | ReadinessTaskStatusFailed ReadinessTaskStatus = "failed"
9 | )
10 |
11 | type PresenceTaskStatus string
12 |
13 | const (
14 | PresenceTaskStatusProgressing PresenceTaskStatus = "progressing"
15 | PresenceTaskStatusPresent PresenceTaskStatus = "present"
16 | PresenceTaskStatusFailed PresenceTaskStatus = "failed"
17 | )
18 |
19 | type AbsenceTaskStatus string
20 |
21 | const (
22 | AbsenceTaskStatusProgressing AbsenceTaskStatus = "progressing"
23 | AbsenceTaskStatusAbsent AbsenceTaskStatus = "absent"
24 | AbsenceTaskStatusFailed AbsenceTaskStatus = "failed"
25 | )
26 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/statestore/task_store.go:
--------------------------------------------------------------------------------
1 | package statestore
2 |
3 | import (
4 | "github.com/werf/kubedog/pkg/trackers/dyntracker/util"
5 | )
6 |
7 | type TaskStore struct {
8 | readinessTasks []*util.Concurrent[*ReadinessTaskState]
9 | presenceTasks []*util.Concurrent[*PresenceTaskState]
10 | absenceTasks []*util.Concurrent[*AbsenceTaskState]
11 | }
12 |
13 | func NewTaskStore() *TaskStore {
14 | return &TaskStore{}
15 | }
16 |
17 | func (s *TaskStore) AddReadinessTaskState(task *util.Concurrent[*ReadinessTaskState]) {
18 | s.readinessTasks = append(s.readinessTasks, task)
19 | }
20 |
21 | func (s *TaskStore) AddPresenceTaskState(task *util.Concurrent[*PresenceTaskState]) {
22 | s.presenceTasks = append(s.presenceTasks, task)
23 | }
24 |
25 | func (s *TaskStore) AddAbsenceTaskState(task *util.Concurrent[*AbsenceTaskState]) {
26 | s.absenceTasks = append(s.absenceTasks, task)
27 | }
28 |
29 | func (s *TaskStore) ReadinessTasksStates() []*util.Concurrent[*ReadinessTaskState] {
30 | return append([]*util.Concurrent[*ReadinessTaskState]{}, s.readinessTasks...)
31 | }
32 |
33 | func (s *TaskStore) PresenceTasksStates() []*util.Concurrent[*PresenceTaskState] {
34 | return append([]*util.Concurrent[*PresenceTaskState]{}, s.presenceTasks...)
35 | }
36 |
37 | func (s *TaskStore) AbsenceTasksStates() []*util.Concurrent[*AbsenceTaskState] {
38 | return append([]*util.Concurrent[*AbsenceTaskState]{}, s.absenceTasks...)
39 | }
40 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/util/concurrency.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import "sync"
4 |
5 | type Concurrent[T any] struct {
6 | lock *sync.RWMutex
7 | object T
8 | }
9 |
10 | func NewConcurrent[T any](obj T) *Concurrent[T] {
11 | return &Concurrent[T]{
12 | lock: &sync.RWMutex{},
13 | object: obj,
14 | }
15 | }
16 |
17 | func NewConcurrentWithLock[T any](obj T, lock *sync.RWMutex) *Concurrent[T] {
18 | return &Concurrent[T]{
19 | lock: lock,
20 | object: obj,
21 | }
22 | }
23 |
24 | func (c *Concurrent[T]) RWTransaction(f func(object T)) {
25 | c.lock.Lock()
26 | defer c.lock.Unlock()
27 |
28 | f(c.object)
29 | }
30 |
31 | func (c *Concurrent[T]) RTransaction(f func(object T)) {
32 | c.lock.RLock()
33 | defer c.lock.RUnlock()
34 |
35 | f(c.object)
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/trackers/dyntracker/util/resource.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/apimachinery/pkg/api/meta"
7 | "k8s.io/apimachinery/pkg/runtime/schema"
8 | )
9 |
10 | func ResourceID(name, namespace string, groupVersionKind schema.GroupVersionKind) string {
11 | return fmt.Sprintf("%s:%s:%s:%s", namespace, groupVersionKind.Group, groupVersionKind.Kind, name)
12 | }
13 |
14 | func IsNamespaced(groupVersionKind schema.GroupVersionKind, mapper meta.ResettableRESTMapper) (namespaced bool, err error) {
15 | mapping, err := mapper.RESTMapping(groupVersionKind.GroupKind(), groupVersionKind.Version)
16 | if err != nil {
17 | return false, fmt.Errorf("get resource mapping for %q: %w", groupVersionKind.String(), err)
18 | }
19 |
20 | return mapping.Scope == meta.RESTScopeNamespace, nil
21 | }
22 |
23 | func GVRFromGVK(groupVersionKind schema.GroupVersionKind, mapper meta.ResettableRESTMapper) (schema.GroupVersionResource, error) {
24 | mapping, err := mapper.RESTMapping(groupVersionKind.GroupKind(), groupVersionKind.Version)
25 | if err != nil {
26 | return schema.GroupVersionResource{}, fmt.Errorf("get resource mapping for %q: %w", groupVersionKind.String(), err)
27 | }
28 |
29 | return mapping.Resource, nil
30 | }
31 |
32 | func ResourceHumanID(name, namespace string, groupVersionKind schema.GroupVersionKind, mapper meta.ResettableRESTMapper) string {
33 | namespaced := true
34 | if mapper != nil {
35 | if nsed, err := IsNamespaced(groupVersionKind, mapper); err == nil {
36 | namespaced = nsed
37 | }
38 | }
39 |
40 | if namespaced && namespace != "" {
41 | return fmt.Sprintf("%s/%s/%s", namespace, groupVersionKind.Kind, name)
42 | } else {
43 | return fmt.Sprintf("%s/%s", groupVersionKind.Kind, name)
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/pkg/trackers/follow/daemonset.go:
--------------------------------------------------------------------------------
1 | package follow
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/daemonset"
11 | "github.com/werf/kubedog/pkg/tracker/replicaset"
12 | )
13 |
14 | func TrackDaemonSet(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
15 | feed := daemonset.NewFeed()
16 |
17 | feed.OnAdded(func(isReady bool) error {
18 | if isReady {
19 | fmt.Fprintf(display.Out, "# ds/%s appears to be ready\n", name)
20 | } else {
21 | fmt.Fprintf(display.Out, "# ds/%s added\n", name)
22 | }
23 | return nil
24 | })
25 | feed.OnReady(func() error {
26 | fmt.Fprintf(display.Out, "# ds/%s become READY\n", name)
27 | return nil
28 | })
29 | feed.OnEventMsg(func(msg string) error {
30 | fmt.Fprintf(display.Out, "# ds/%s event: %s\n", name, msg)
31 | return nil
32 | })
33 | feed.OnFailed(func(reason string) error {
34 | fmt.Fprintf(display.Out, "# ds/%s FAIL: %s\n", name, reason)
35 | return nil
36 | })
37 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
38 | fmt.Fprintf(display.Out, "# ds/%s po/%s added\n", name, pod.Name)
39 | return nil
40 | })
41 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
42 | fmt.Fprintf(display.Out, "# ds/%s %s %s error: %s\n", name, podError.PodName, podError.ContainerName, podError.Message)
43 | return nil
44 | })
45 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
46 | header := fmt.Sprintf("po/%s %s", chunk.PodName, chunk.ContainerName)
47 | display.OutputLogLines(header, chunk.LogLines)
48 | return nil
49 | })
50 |
51 | return feed.Track(name, namespace, kube, opts)
52 | }
53 |
--------------------------------------------------------------------------------
/pkg/trackers/follow/deployment.go:
--------------------------------------------------------------------------------
1 | package follow
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/deployment"
11 | "github.com/werf/kubedog/pkg/tracker/replicaset"
12 | )
13 |
14 | func TrackDeployment(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
15 | feed := deployment.NewFeed()
16 |
17 | feed.OnAdded(func(isReady bool) error {
18 | if isReady {
19 | fmt.Fprintf(display.Out, "# deploy/%s appears to be ready\n", name)
20 | } else {
21 | fmt.Fprintf(display.Out, "# deploy/%s added\n", name)
22 | }
23 | return nil
24 | })
25 | feed.OnReady(func() error {
26 | fmt.Fprintf(display.Out, "# deploy/%s become READY\n", name)
27 | return nil
28 | })
29 | feed.OnFailed(func(reason string) error {
30 | fmt.Fprintf(display.Out, "# deploy/%s FAIL: %s\n", name, reason)
31 | return nil
32 | })
33 | feed.OnEventMsg(func(msg string) error {
34 | fmt.Fprintf(display.Out, "# deploy/%s event: %s\n", name, msg)
35 | return nil
36 | })
37 | feed.OnAddedReplicaSet(func(rs replicaset.ReplicaSet) error {
38 | if rs.IsNew {
39 | fmt.Fprintf(display.Out, "# deploy/%s new rs/%s added\n", name, rs.Name)
40 | } else {
41 | fmt.Fprintf(display.Out, "# deploy/%s rs/%s added\n", name, rs.Name)
42 | }
43 |
44 | return nil
45 | })
46 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
47 | if pod.ReplicaSet.IsNew {
48 | fmt.Fprintf(display.Out, "# deploy/%s rs/%s(new) po/%s added\n", name, pod.ReplicaSet.Name, pod.Name)
49 | } else {
50 | fmt.Fprintf(display.Out, "# deploy/%s rs/%s po/%s added\n", name, pod.ReplicaSet.Name, pod.Name)
51 | }
52 | return nil
53 | })
54 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
55 | if podError.ReplicaSet.IsNew {
56 | fmt.Fprintf(display.Out, "# deploy/%s rs/%s(new) po/%s %s error: %s\n", name, podError.ReplicaSet.Name, podError.PodName, podError.ContainerName, podError.Message)
57 | } else {
58 | fmt.Fprintf(display.Out, "# deploy/%s rs/%s po/%s %s error: %s\n", name, podError.ReplicaSet.Name, podError.PodName, podError.ContainerName, podError.Message)
59 | }
60 | return nil
61 | })
62 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
63 | header := ""
64 | if chunk.ReplicaSet.IsNew {
65 | header = fmt.Sprintf("deploy/%s rs/%s(new) po/%s %s", name, chunk.ReplicaSet.Name, chunk.PodName, chunk.ContainerName)
66 | } else {
67 | header = fmt.Sprintf("deploy/%s rs/%s po/%s %s", name, chunk.ReplicaSet.Name, chunk.PodName, chunk.ContainerName)
68 | }
69 | display.OutputLogLines(header, chunk.LogLines)
70 | return nil
71 | })
72 |
73 | return feed.Track(name, namespace, kube, opts)
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/trackers/follow/job.go:
--------------------------------------------------------------------------------
1 | package follow
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/job"
11 | "github.com/werf/kubedog/pkg/tracker/pod"
12 | )
13 |
14 | func TrackJob(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
15 | feed := job.NewFeed()
16 |
17 | feed.OnAdded(func() error {
18 | fmt.Fprintf(display.Out, "# job/%s added\n", name)
19 | return nil
20 | })
21 | feed.OnSucceeded(func() error {
22 | fmt.Fprintf(display.Out, "# job/%s succeeded\n", name)
23 | return nil
24 | })
25 | feed.OnFailed(func(reason string) error {
26 | fmt.Fprintf(display.Out, "# job/%s FAIL: %s\n", name, reason)
27 | return nil
28 | })
29 | feed.OnEventMsg(func(msg string) error {
30 | fmt.Fprintf(display.Out, "# job/%s event: %s\n", name, msg)
31 | return nil
32 | })
33 | feed.OnAddedPod(func(podName string) error {
34 | fmt.Fprintf(display.Out, "# job/%s po/%s added\n", name, podName)
35 | return nil
36 | })
37 | feed.OnPodError(func(podError pod.PodError) error {
38 | fmt.Fprintf(display.Out, "# job/%s po/%s %s error: %s\n", name, podError.PodName, podError.ContainerName, podError.Message)
39 | return nil
40 | })
41 | feed.OnPodLogChunk(func(chunk *pod.PodLogChunk) error {
42 | header := fmt.Sprintf("po/%s %s", chunk.PodName, chunk.ContainerName)
43 | display.OutputLogLines(header, chunk.LogLines)
44 | return nil
45 | })
46 |
47 | return feed.Track(name, namespace, kube, opts)
48 | }
49 |
--------------------------------------------------------------------------------
/pkg/trackers/follow/pod.go:
--------------------------------------------------------------------------------
1 | package follow
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/pod"
11 | )
12 |
13 | func TrackPod(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
14 | feed := pod.NewFeed()
15 |
16 | feed.OnAdded(func() error {
17 | fmt.Fprintf(display.Out, "# po/%s added\n", name)
18 | return nil
19 | })
20 | feed.OnSucceeded(func() error {
21 | fmt.Fprintf(display.Out, "# po/%s succeeded\n", name)
22 | return nil
23 | })
24 | feed.OnFailed(func(reason string) error {
25 | fmt.Fprintf(display.Out, "# po/%s failed: %s\n", name, reason)
26 | return nil
27 | })
28 | feed.OnReady(func() error {
29 | fmt.Fprintf(display.Out, "# po/%s become READY\n", name)
30 | return nil
31 | })
32 | feed.OnEventMsg(func(msg string) error {
33 | fmt.Fprintf(display.Out, "# po/%s event: %s\n", name, msg)
34 | return nil
35 | })
36 | feed.OnContainerError(func(containerError pod.ContainerError) error {
37 | fmt.Fprintf(display.Out, "# po/%s %s error: %s\n", name, containerError.ContainerName, containerError.Message)
38 | return nil
39 | })
40 | feed.OnContainerLogChunk(func(chunk *pod.ContainerLogChunk) error {
41 | header := fmt.Sprintf("po/%s %s", name, chunk.ContainerName)
42 | display.OutputLogLines(header, chunk.LogLines)
43 | return nil
44 | })
45 |
46 | return feed.Track(name, namespace, kube, opts)
47 | }
48 |
--------------------------------------------------------------------------------
/pkg/trackers/follow/statefulset.go:
--------------------------------------------------------------------------------
1 | package follow
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/replicaset"
11 | "github.com/werf/kubedog/pkg/tracker/statefulset"
12 | )
13 |
14 | func TrackStatefulSet(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
15 | feed := statefulset.NewFeed()
16 |
17 | feed.OnAdded(func(isReady bool) error {
18 | if isReady {
19 | fmt.Fprintf(display.Out, "# sts/%s appears to be ready\n", name)
20 | } else {
21 | fmt.Fprintf(display.Out, "# sts/%s added\n", name)
22 | }
23 | return nil
24 | })
25 | feed.OnReady(func() error {
26 | fmt.Fprintf(display.Out, "# sts/%s become READY\n", name)
27 | return nil
28 | })
29 | feed.OnFailed(func(reason string) error {
30 | fmt.Fprintf(display.Out, "# sts/%s FAIL: %s\n", name, reason)
31 | return nil
32 | })
33 | feed.OnEventMsg(func(msg string) error {
34 | fmt.Fprintf(display.Out, "# sts/%s event: %s\n", name, msg)
35 | return nil
36 | })
37 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
38 | fmt.Fprintf(display.Out, "# sts/%s po/%s added\n", name, pod.Name)
39 | return nil
40 | })
41 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
42 | fmt.Fprintf(display.Out, "# sts/%s %s %s error: %s\n", name, podError.PodName, podError.ContainerName, podError.Message)
43 | return nil
44 | })
45 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
46 | header := fmt.Sprintf("po/%s %s", chunk.PodName, chunk.ContainerName)
47 | display.OutputLogLines(header, chunk.LogLines)
48 | return nil
49 | })
50 |
51 | return feed.Track(name, namespace, kube, opts)
52 | }
53 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/daemonset.go:
--------------------------------------------------------------------------------
1 | package rollout
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/daemonset"
11 | "github.com/werf/kubedog/pkg/tracker/replicaset"
12 | )
13 |
14 | // TrackDaemonSetTillReady implements rollout track mode for DaemonSet
15 | //
16 | // Exit on DaemonSet ready or on errors
17 | func TrackDaemonSetTillReady(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
18 | feed := daemonset.NewFeed()
19 |
20 | feed.OnAdded(func(isReady bool) error {
21 | if isReady {
22 | fmt.Fprintf(display.Out, "# ds/%s appears to be ready. Exit\n", name)
23 | return tracker.ErrStopTrack
24 | }
25 | fmt.Fprintf(display.Out, "# ds/%s added\n", name)
26 | return nil
27 | })
28 | feed.OnReady(func() error {
29 | fmt.Fprintf(display.Out, "# ds/%s become READY\n", name)
30 | return tracker.ErrStopTrack
31 | })
32 | feed.OnFailed(func(reason string) error {
33 | fmt.Fprintf(display.Err, "# ds/%s FAIL: %s\n", name, reason)
34 | return tracker.ResourceErrorf("ds/%s failed: %s", name, reason)
35 | })
36 | feed.OnEventMsg(func(msg string) error {
37 | fmt.Fprintf(display.Out, "# ds/%s event: %s\n", name, msg)
38 | return nil
39 | })
40 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
41 | fmt.Fprintf(display.Out, "# ds/%s po/%s added\n", name, pod.Name)
42 | return nil
43 | })
44 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
45 | fmt.Fprintf(display.Err, "# ds/%s %s %s error: %s\n", name, podError.PodName, podError.ContainerName, podError.Message)
46 | return tracker.ResourceErrorf("ds/%s po/%s %s failed: %s", name, podError.PodName, podError.ContainerName, podError.Message)
47 | })
48 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
49 | header := fmt.Sprintf("po/%s %s", chunk.PodName, chunk.ContainerName)
50 | display.OutputLogLines(header, chunk.LogLines)
51 | return nil
52 | })
53 |
54 | err := feed.Track(name, namespace, kube, opts)
55 | if err != nil {
56 | switch e := err.(type) {
57 | case *tracker.ResourceError:
58 | return e
59 | default:
60 | fmt.Fprintf(display.Err, "error tracking ds/%s in ns/%s: %s\n", name, namespace, err)
61 | }
62 | }
63 | return err
64 | }
65 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/deployment.go:
--------------------------------------------------------------------------------
1 | package rollout
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/deployment"
11 | "github.com/werf/kubedog/pkg/tracker/replicaset"
12 | )
13 |
14 | func TrackDeploymentTillReady(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
15 | feed := deployment.NewFeed()
16 |
17 | feed.OnAdded(func(isReady bool) error {
18 | if isReady {
19 | fmt.Fprintf(display.Out, "# deploy/%s appears to be ready\n", name)
20 | return tracker.ErrStopTrack
21 | }
22 | fmt.Fprintf(display.Out, "# deploy/%s added\n", name)
23 | return nil
24 | })
25 | feed.OnReady(func() error {
26 | fmt.Fprintf(display.Out, "# deploy/%s become READY\n", name)
27 | return tracker.ErrStopTrack
28 | })
29 | feed.OnFailed(func(reason string) error {
30 | fmt.Fprintf(display.Out, "# deploy/%s FAIL: %s\n", name, reason)
31 | return tracker.ResourceErrorf("failed: %s", reason)
32 | })
33 | feed.OnEventMsg(func(msg string) error {
34 | fmt.Fprintf(display.Out, "# deploy/%s event: %s\n", name, msg)
35 | return nil
36 | })
37 | feed.OnAddedReplicaSet(func(rs replicaset.ReplicaSet) error {
38 | if !rs.IsNew {
39 | return nil
40 | }
41 | fmt.Fprintf(display.Out, "# deploy/%s rs/%s added\n", name, rs.Name)
42 | return nil
43 | })
44 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
45 | if !pod.ReplicaSet.IsNew {
46 | return nil
47 | }
48 | fmt.Fprintf(display.Out, "# deploy/%s po/%s added\n", name, pod.Name)
49 | return nil
50 | })
51 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
52 | if !podError.ReplicaSet.IsNew {
53 | return nil
54 | }
55 | fmt.Fprintf(display.Out, "# deploy/%s po/%s %s error: %s\n", name, podError.PodName, podError.ContainerName, podError.Message)
56 | return tracker.ResourceErrorf("deploy/%s po/%s %s failed: %s", name, podError.PodName, podError.ContainerName, podError.Message)
57 | })
58 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
59 | if !chunk.ReplicaSet.IsNew {
60 | return nil
61 | }
62 | header := fmt.Sprintf("po/%s %s", chunk.PodName, chunk.ContainerName)
63 | display.OutputLogLines(header, chunk.LogLines)
64 | return nil
65 | })
66 |
67 | err := feed.Track(name, namespace, kube, opts)
68 | if err != nil {
69 | switch e := err.(type) {
70 | case *tracker.ResourceError:
71 | return e
72 | default:
73 | fmt.Fprintf(display.Err, "error tracking deploy/%s in ns/%s: %s\n", name, namespace, err)
74 | }
75 | }
76 | return err
77 | }
78 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/job.go:
--------------------------------------------------------------------------------
1 | package rollout
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/job"
11 | "github.com/werf/kubedog/pkg/tracker/pod"
12 | )
13 |
14 | func TrackJobTillDone(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
15 | feed := job.NewFeed()
16 |
17 | feed.OnAdded(func() error {
18 | fmt.Fprintf(display.Out, "# job/%s added\n", name)
19 | return nil
20 | })
21 | feed.OnSucceeded(func() error {
22 | fmt.Fprintf(display.Out, "# job/%s succeeded\n", name)
23 | return tracker.ErrStopTrack
24 | })
25 | feed.OnFailed(func(reason string) error {
26 | fmt.Fprintf(display.Out, "# job/%s FAIL: %s\n", name, reason)
27 | return tracker.ResourceErrorf("failed: %s", reason)
28 | })
29 | feed.OnEventMsg(func(msg string) error {
30 | fmt.Fprintf(display.Out, "# job/%s event: %s\n", name, msg)
31 | return nil
32 | })
33 | feed.OnAddedPod(func(podName string) error {
34 | fmt.Fprintf(display.Out, "# job/%s po/%s added\n", name, podName)
35 | return nil
36 | })
37 | feed.OnPodLogChunk(func(chunk *pod.PodLogChunk) error {
38 | header := fmt.Sprintf("po/%s %s", chunk.PodName, chunk.ContainerName)
39 | display.OutputLogLines(header, chunk.LogLines)
40 | return nil
41 | })
42 | feed.OnPodError(func(podError pod.PodError) error {
43 | fmt.Fprintf(display.Out, "# job/%s po/%s %s error: %s\n", name, podError.PodName, podError.ContainerName, podError.Message)
44 | return tracker.ResourceErrorf("job/%s po/%s %s failed: %s", name, podError.PodName, podError.ContainerName, podError.Message)
45 | })
46 |
47 | err := feed.Track(name, namespace, kube, opts)
48 | if err != nil {
49 | switch e := err.(type) {
50 | case *tracker.ResourceError:
51 | return e
52 | default:
53 | fmt.Fprintf(display.Err, "error tracking job/%s in ns/%s: %s\n", name, namespace, err)
54 | }
55 | }
56 | return err
57 | }
58 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/canary.go:
--------------------------------------------------------------------------------
1 | package multitrack
2 |
3 | import (
4 | "k8s.io/client-go/kubernetes"
5 |
6 | "github.com/werf/kubedog/pkg/tracker/canary"
7 | )
8 |
9 | func (mt *multitracker) TrackCanary(kube kubernetes.Interface, spec MultitrackSpec, opts MultitrackOptions) error {
10 | feed := canary.NewFeed()
11 |
12 | feed.OnAdded(func() error {
13 | mt.mux.Lock()
14 | defer mt.mux.Unlock()
15 |
16 | mt.CanariesStatuses[spec.ResourceName] = feed.GetStatus()
17 |
18 | return mt.canaryAdded(spec, feed)
19 | })
20 | feed.OnSucceeded(func() error {
21 | mt.mux.Lock()
22 | defer mt.mux.Unlock()
23 |
24 | mt.CanariesStatuses[spec.ResourceName] = feed.GetStatus()
25 |
26 | return mt.canarySucceeded(spec, feed)
27 | })
28 | feed.OnFailed(func(reason string) error {
29 | mt.mux.Lock()
30 | defer mt.mux.Unlock()
31 |
32 | mt.CanariesStatuses[spec.ResourceName] = feed.GetStatus()
33 |
34 | return mt.canaryFailed(spec, feed, reason)
35 | })
36 | feed.OnEventMsg(func(msg string) error {
37 | mt.mux.Lock()
38 | defer mt.mux.Unlock()
39 |
40 | mt.CanariesStatuses[spec.ResourceName] = feed.GetStatus()
41 |
42 | return mt.canaryEventMsg(spec, feed, msg)
43 | })
44 |
45 | feed.OnStatus(func(status canary.CanaryStatus) error {
46 | mt.mux.Lock()
47 | defer mt.mux.Unlock()
48 |
49 | mt.CanariesStatuses[spec.ResourceName] = status
50 |
51 | return nil
52 | })
53 |
54 | return feed.Track(spec.ResourceName, spec.Namespace, kube, opts.Options)
55 | }
56 |
57 | func (mt *multitracker) canaryAdded(spec MultitrackSpec, feed canary.Feed) error {
58 | mt.displayResourceTrackerMessageF("canary", spec.ResourceName, spec.ShowServiceMessages, "added")
59 |
60 | return nil
61 | }
62 |
63 | func (mt *multitracker) canarySucceeded(spec MultitrackSpec, feed canary.Feed) error {
64 | mt.displayResourceTrackerMessageF("canary", spec.ResourceName, spec.ShowServiceMessages, "succeeded")
65 |
66 | return mt.handleResourceReadyCondition(mt.TrackingCanaries, spec)
67 | }
68 |
69 | func (mt *multitracker) canaryFailed(spec MultitrackSpec, feed canary.Feed, reason string) error {
70 | mt.displayResourceErrorF("canary", spec.ResourceName, "%s", reason)
71 |
72 | return mt.handleResourceFailure(mt.TrackingCanaries, "canary", spec, reason)
73 | }
74 |
75 | func (mt *multitracker) canaryEventMsg(spec MultitrackSpec, feed canary.Feed, msg string) error {
76 | mt.displayResourceEventF("canary", spec.ResourceName, spec.ShowServiceMessages, "%s", msg)
77 | return nil
78 | }
79 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/daemonset.go:
--------------------------------------------------------------------------------
1 | package multitrack
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/tracker/daemonset"
9 | "github.com/werf/kubedog/pkg/tracker/replicaset"
10 | )
11 |
12 | func (mt *multitracker) TrackDaemonSet(kube kubernetes.Interface, spec MultitrackSpec, opts MultitrackOptions) error {
13 | feed := daemonset.NewFeed()
14 |
15 | feed.OnAdded(func(isReady bool) error {
16 | mt.mux.Lock()
17 | defer mt.mux.Unlock()
18 |
19 | mt.DaemonSetsStatuses[spec.ResourceName] = feed.GetStatus()
20 |
21 | return mt.daemonsetAdded(spec, feed, isReady)
22 | })
23 | feed.OnReady(func() error {
24 | mt.mux.Lock()
25 | defer mt.mux.Unlock()
26 |
27 | mt.DaemonSetsStatuses[spec.ResourceName] = feed.GetStatus()
28 |
29 | return mt.daemonsetReady(spec, feed)
30 | })
31 | feed.OnFailed(func(reason string) error {
32 | mt.mux.Lock()
33 | defer mt.mux.Unlock()
34 |
35 | mt.DaemonSetsStatuses[spec.ResourceName] = feed.GetStatus()
36 |
37 | return mt.daemonsetFailed(spec, feed, reason)
38 | })
39 | feed.OnEventMsg(func(msg string) error {
40 | mt.mux.Lock()
41 | defer mt.mux.Unlock()
42 |
43 | mt.DaemonSetsStatuses[spec.ResourceName] = feed.GetStatus()
44 |
45 | return mt.daemonsetEventMsg(spec, feed, msg)
46 | })
47 | feed.OnAddedReplicaSet(func(rs replicaset.ReplicaSet) error {
48 | mt.mux.Lock()
49 | defer mt.mux.Unlock()
50 |
51 | mt.DaemonSetsStatuses[spec.ResourceName] = feed.GetStatus()
52 |
53 | return mt.daemonsetAddedReplicaSet(spec, feed, rs)
54 | })
55 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
56 | mt.mux.Lock()
57 | defer mt.mux.Unlock()
58 |
59 | mt.DaemonSetsStatuses[spec.ResourceName] = feed.GetStatus()
60 |
61 | return mt.daemonsetAddedPod(spec, feed, pod)
62 | })
63 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
64 | mt.mux.Lock()
65 | defer mt.mux.Unlock()
66 |
67 | mt.DaemonSetsStatuses[spec.ResourceName] = feed.GetStatus()
68 |
69 | return mt.daemonsetPodError(spec, feed, podError)
70 | })
71 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
72 | mt.mux.Lock()
73 | defer mt.mux.Unlock()
74 |
75 | mt.DaemonSetsStatuses[spec.ResourceName] = feed.GetStatus()
76 |
77 | return mt.daemonsetPodLogChunk(spec, feed, chunk)
78 | })
79 | feed.OnStatus(func(status daemonset.DaemonSetStatus) error {
80 | mt.mux.Lock()
81 | defer mt.mux.Unlock()
82 |
83 | mt.DaemonSetsStatuses[spec.ResourceName] = status
84 |
85 | return nil
86 | })
87 |
88 | return feed.Track(spec.ResourceName, spec.Namespace, kube, opts.Options)
89 | }
90 |
91 | func (mt *multitracker) daemonsetAdded(spec MultitrackSpec, feed daemonset.Feed, isReady bool) error {
92 | if isReady {
93 | mt.displayResourceTrackerMessageF("ds", spec.ResourceName, spec.ShowServiceMessages, "appears to be READY")
94 |
95 | return mt.handleResourceReadyCondition(mt.TrackingDaemonSets, spec)
96 | }
97 |
98 | mt.displayResourceTrackerMessageF("ds", spec.ResourceName, spec.ShowServiceMessages, "added")
99 |
100 | return nil
101 | }
102 |
103 | func (mt *multitracker) daemonsetReady(spec MultitrackSpec, feed daemonset.Feed) error {
104 | mt.displayResourceTrackerMessageF("ds", spec.ResourceName, spec.ShowServiceMessages, "become READY")
105 |
106 | return mt.handleResourceReadyCondition(mt.TrackingDaemonSets, spec)
107 | }
108 |
109 | func (mt *multitracker) daemonsetFailed(spec MultitrackSpec, feed daemonset.Feed, reason string) error {
110 | mt.displayResourceErrorF("ds", spec.ResourceName, "%s", reason)
111 |
112 | return mt.handleResourceFailure(mt.TrackingDaemonSets, "ds", spec, reason)
113 | }
114 |
115 | func (mt *multitracker) daemonsetEventMsg(spec MultitrackSpec, feed daemonset.Feed, msg string) error {
116 | mt.displayResourceEventF("ds", spec.ResourceName, spec.ShowServiceMessages, "%s", msg)
117 | return nil
118 | }
119 |
120 | func (mt *multitracker) daemonsetAddedReplicaSet(spec MultitrackSpec, feed daemonset.Feed, rs replicaset.ReplicaSet) error {
121 | mt.displayResourceTrackerMessageF("ds", spec.ResourceName, spec.ShowServiceMessages, "rs/%s added", rs.Name)
122 | return nil
123 | }
124 |
125 | func (mt *multitracker) daemonsetAddedPod(spec MultitrackSpec, feed daemonset.Feed, pod replicaset.ReplicaSetPod) error {
126 | mt.displayResourceTrackerMessageF("ds", spec.ResourceName, spec.ShowServiceMessages, "po/%s added", pod.Name)
127 | return nil
128 | }
129 |
130 | func (mt *multitracker) daemonsetPodError(spec MultitrackSpec, feed daemonset.Feed, podError replicaset.ReplicaSetPodError) error {
131 | reason := fmt.Sprintf("po/%s container/%s: %s", podError.PodName, podError.ContainerName, podError.Message)
132 |
133 | mt.displayResourceErrorF("ds", spec.ResourceName, "%s", reason)
134 |
135 | return mt.handleResourceFailure(mt.TrackingDaemonSets, "ds", spec, reason)
136 | }
137 |
138 | func (mt *multitracker) daemonsetPodLogChunk(spec MultitrackSpec, feed daemonset.Feed, chunk *replicaset.ReplicaSetPodLogChunk) error {
139 | status := mt.DaemonSetsStatuses[spec.ResourceName]
140 | if podStatus, hasKey := status.Pods[chunk.PodName]; hasKey {
141 | if podStatus.IsReady {
142 | return nil
143 | }
144 | }
145 |
146 | mt.displayResourceLogChunk("ds", spec, podContainerLogChunkHeader(chunk.PodName, chunk.ContainerLogChunk), chunk.ContainerLogChunk)
147 | return nil
148 | }
149 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/deployment.go:
--------------------------------------------------------------------------------
1 | package multitrack
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/tracker/deployment"
9 | "github.com/werf/kubedog/pkg/tracker/replicaset"
10 | )
11 |
12 | func (mt *multitracker) TrackDeployment(kube kubernetes.Interface, spec MultitrackSpec, opts MultitrackOptions) error {
13 | feed := deployment.NewFeed()
14 |
15 | feed.OnAdded(func(isReady bool) error {
16 | mt.mux.Lock()
17 | defer mt.mux.Unlock()
18 |
19 | mt.DeploymentsStatuses[spec.ResourceName] = feed.GetStatus()
20 |
21 | return mt.deploymentAdded(spec, feed, isReady)
22 | })
23 | feed.OnReady(func() error {
24 | mt.mux.Lock()
25 | defer mt.mux.Unlock()
26 |
27 | mt.DeploymentsStatuses[spec.ResourceName] = feed.GetStatus()
28 |
29 | return mt.deploymentReady(spec, feed)
30 | })
31 | feed.OnFailed(func(reason string) error {
32 | mt.mux.Lock()
33 | defer mt.mux.Unlock()
34 |
35 | mt.DeploymentsStatuses[spec.ResourceName] = feed.GetStatus()
36 |
37 | return mt.deploymentFailed(spec, feed, reason)
38 | })
39 | feed.OnEventMsg(func(msg string) error {
40 | mt.mux.Lock()
41 | defer mt.mux.Unlock()
42 |
43 | mt.DeploymentsStatuses[spec.ResourceName] = feed.GetStatus()
44 |
45 | return mt.deploymentEventMsg(spec, feed, msg)
46 | })
47 | feed.OnAddedReplicaSet(func(rs replicaset.ReplicaSet) error {
48 | mt.mux.Lock()
49 | defer mt.mux.Unlock()
50 |
51 | mt.DeploymentsStatuses[spec.ResourceName] = feed.GetStatus()
52 |
53 | return mt.deploymentAddedReplicaSet(spec, feed, rs)
54 | })
55 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
56 | mt.mux.Lock()
57 | defer mt.mux.Unlock()
58 |
59 | mt.DeploymentsStatuses[spec.ResourceName] = feed.GetStatus()
60 |
61 | return mt.deploymentAddedPod(spec, feed, pod)
62 | })
63 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
64 | mt.mux.Lock()
65 | defer mt.mux.Unlock()
66 |
67 | mt.DeploymentsStatuses[spec.ResourceName] = feed.GetStatus()
68 |
69 | return mt.deploymentPodError(spec, feed, podError)
70 | })
71 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
72 | mt.mux.Lock()
73 | defer mt.mux.Unlock()
74 |
75 | mt.DeploymentsStatuses[spec.ResourceName] = feed.GetStatus()
76 |
77 | return mt.deploymentPodLogChunk(spec, feed, chunk)
78 | })
79 | feed.OnStatus(func(status deployment.DeploymentStatus) error {
80 | mt.mux.Lock()
81 | defer mt.mux.Unlock()
82 |
83 | mt.DeploymentsStatuses[spec.ResourceName] = status
84 |
85 | return nil
86 | })
87 |
88 | return feed.Track(spec.ResourceName, spec.Namespace, kube, opts.Options)
89 | }
90 |
91 | func (mt *multitracker) deploymentAdded(spec MultitrackSpec, feed deployment.Feed, isReady bool) error {
92 | if isReady {
93 | mt.displayResourceTrackerMessageF("deploy", spec.ResourceName, spec.ShowServiceMessages, "appears to be READY")
94 |
95 | return mt.handleResourceReadyCondition(mt.TrackingDeployments, spec)
96 | }
97 |
98 | mt.displayResourceTrackerMessageF("deploy", spec.ResourceName, spec.ShowServiceMessages, "added")
99 |
100 | return nil
101 | }
102 |
103 | func (mt *multitracker) deploymentReady(spec MultitrackSpec, feed deployment.Feed) error {
104 | mt.displayResourceTrackerMessageF("deploy", spec.ResourceName, spec.ShowServiceMessages, "become READY")
105 |
106 | return mt.handleResourceReadyCondition(mt.TrackingDeployments, spec)
107 | }
108 |
109 | func (mt *multitracker) deploymentFailed(spec MultitrackSpec, feed deployment.Feed, reason string) error {
110 | mt.displayResourceErrorF("deploy", spec.ResourceName, "%s", reason)
111 |
112 | return mt.handleResourceFailure(mt.TrackingDeployments, "deploy", spec, reason)
113 | }
114 |
115 | func (mt *multitracker) deploymentEventMsg(spec MultitrackSpec, feed deployment.Feed, msg string) error {
116 | mt.displayResourceEventF("deploy", spec.ResourceName, spec.ShowServiceMessages, "%s", msg)
117 | return nil
118 | }
119 |
120 | func (mt *multitracker) deploymentAddedReplicaSet(spec MultitrackSpec, feed deployment.Feed, rs replicaset.ReplicaSet) error {
121 | if !rs.IsNew {
122 | return nil
123 | }
124 |
125 | mt.displayResourceTrackerMessageF("deploy", spec.ResourceName, spec.ShowServiceMessages, "rs/%s added", rs.Name)
126 |
127 | return nil
128 | }
129 |
130 | func (mt *multitracker) deploymentAddedPod(spec MultitrackSpec, feed deployment.Feed, pod replicaset.ReplicaSetPod) error {
131 | if !pod.ReplicaSet.IsNew {
132 | return nil
133 | }
134 |
135 | mt.displayResourceTrackerMessageF("deploy", spec.ResourceName, spec.ShowServiceMessages, "po/%s added", pod.Name)
136 |
137 | return nil
138 | }
139 |
140 | func (mt *multitracker) deploymentPodError(spec MultitrackSpec, feed deployment.Feed, podError replicaset.ReplicaSetPodError) error {
141 | if !podError.ReplicaSet.IsNew {
142 | return nil
143 | }
144 |
145 | reason := fmt.Sprintf("po/%s container/%s: %s", podError.PodName, podError.ContainerName, podError.Message)
146 |
147 | mt.displayResourceErrorF("deploy", spec.ResourceName, "%s", reason)
148 |
149 | return mt.handleResourceFailure(mt.TrackingDeployments, "deploy", spec, reason)
150 | }
151 |
152 | func (mt *multitracker) deploymentPodLogChunk(spec MultitrackSpec, feed deployment.Feed, chunk *replicaset.ReplicaSetPodLogChunk) error {
153 | if !chunk.ReplicaSet.IsNew {
154 | return nil
155 | }
156 |
157 | status := mt.DeploymentsStatuses[spec.ResourceName]
158 | if podStatus, hasKey := status.Pods[chunk.PodName]; hasKey {
159 | if podStatus.IsReady {
160 | return nil
161 | }
162 | }
163 |
164 | mt.displayResourceLogChunk("deploy", spec, podContainerLogChunkHeader(chunk.PodName, chunk.ContainerLogChunk), chunk.ContainerLogChunk)
165 |
166 | return nil
167 | }
168 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/generic.go:
--------------------------------------------------------------------------------
1 | package multitrack
2 |
3 | import (
4 | "time"
5 |
6 | corev1 "k8s.io/api/core/v1"
7 |
8 | gentrck "github.com/werf/kubedog/pkg/tracker/generic"
9 | "github.com/werf/kubedog/pkg/trackers/rollout/multitrack/generic"
10 | )
11 |
12 | func (mt *multitracker) TrackGeneric(resource *generic.Resource, timeout, noActivityTimeout time.Duration) error {
13 | resource.Feed.OnAdded(func(status *gentrck.ResourceStatus) error {
14 | return mt.genericAdded(resource, status)
15 | })
16 |
17 | resource.Feed.OnReady(func(status *gentrck.ResourceStatus) error {
18 | return mt.genericReady(resource, status)
19 | })
20 |
21 | resource.Feed.OnFailed(func(status *gentrck.ResourceStatus) error {
22 | return mt.genericFailed(resource, status)
23 | })
24 |
25 | resource.Feed.OnStatus(func(status *gentrck.ResourceStatus) error {
26 | resource.State.SetLastStatus(status)
27 | return nil
28 | })
29 |
30 | resource.Feed.OnEventMsg(func(event *corev1.Event) error {
31 | return mt.genericEventMsg(resource, event)
32 | })
33 |
34 | return resource.Feed.Track(resource.Context.Context(), timeout, noActivityTimeout)
35 | }
36 |
37 | func (mt *multitracker) genericAdded(resource *generic.Resource, status *gentrck.ResourceStatus) error {
38 | resource.State.SetLastStatus(status)
39 |
40 | mt.mux.Lock()
41 | defer mt.mux.Unlock()
42 |
43 | if status.IsReady() {
44 | mt.displayResourceTrackerMessageF(resource.Spec.GroupVersionKindNamespaceString(), resource.Spec.Name, resource.Spec.ShowServiceMessages, "appears to be READY")
45 | return mt.handleGenericResourceReadyCondition(resource)
46 | }
47 |
48 | mt.displayResourceTrackerMessageF(resource.Spec.GroupVersionKindNamespaceString(), resource.Spec.Name, resource.Spec.ShowServiceMessages, "added")
49 |
50 | return nil
51 | }
52 |
53 | func (mt *multitracker) genericReady(resource *generic.Resource, status *gentrck.ResourceStatus) error {
54 | resource.State.SetLastStatus(status)
55 |
56 | mt.mux.Lock()
57 | defer mt.mux.Unlock()
58 |
59 | mt.displayResourceTrackerMessageF(resource.Spec.GroupVersionKindNamespaceString(), resource.Spec.Name, resource.Spec.ShowServiceMessages, "become READY")
60 |
61 | return mt.handleGenericResourceReadyCondition(resource)
62 | }
63 |
64 | func (mt *multitracker) genericFailed(resource *generic.Resource, status *gentrck.ResourceStatus) error {
65 | resource.State.SetLastStatus(status)
66 |
67 | mt.mux.Lock()
68 | defer mt.mux.Unlock()
69 |
70 | mt.displayResourceErrorF(resource.Spec.GroupVersionKindNamespaceString(), resource.Spec.Name, "%s", status.FailureReason())
71 |
72 | return mt.handleGenericResourceFailure(resource, status.FailureReason())
73 | }
74 |
75 | func (mt *multitracker) genericEventMsg(resource *generic.Resource, event *corev1.Event) error {
76 | mt.mux.Lock()
77 | defer mt.mux.Unlock()
78 |
79 | mt.displayResourceEventF(resource.Spec.GroupVersionKindNamespaceString(), resource.Spec.Name, !resource.Spec.HideEvents, "%s: %s", event.Reason, event.Message)
80 |
81 | return nil
82 | }
83 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/generic/context.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import "context"
4 |
5 | type Context struct {
6 | context context.Context
7 | cancelFunc context.CancelFunc
8 | }
9 |
10 | func NewContext(ctx context.Context) *Context {
11 | if ctx == nil {
12 | ctx = context.Background()
13 | }
14 |
15 | newCtx, cancelFunc := context.WithCancel(ctx)
16 |
17 | return &Context{
18 | context: newCtx,
19 | cancelFunc: cancelFunc,
20 | }
21 | }
22 |
23 | func (c *Context) Context() context.Context {
24 | return c.context
25 | }
26 |
27 | func (c *Context) Cancel() {
28 | c.cancelFunc()
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/generic/resource.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "context"
5 |
6 | "k8s.io/apimachinery/pkg/api/meta"
7 | "k8s.io/client-go/discovery"
8 | "k8s.io/client-go/dynamic"
9 | "k8s.io/client-go/kubernetes"
10 |
11 | "github.com/werf/kubedog/pkg/tracker/generic"
12 | )
13 |
14 | type Resource struct {
15 | Spec *Spec
16 | State *State
17 | Feed *generic.Feed
18 | Context *Context
19 | }
20 |
21 | func NewResource(
22 | ctx context.Context,
23 | spec *Spec,
24 | client kubernetes.Interface,
25 | dynClient dynamic.Interface,
26 | discClient discovery.CachedDiscoveryInterface,
27 | mapper meta.RESTMapper,
28 | ) *Resource {
29 | tracker := generic.NewTracker(spec.ResourceID, client, dynClient, discClient, mapper)
30 |
31 | return &Resource{
32 | Spec: spec,
33 | State: NewState(),
34 | Feed: generic.NewFeed(tracker),
35 | Context: NewContext(ctx),
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/generic/spec.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/werf/kubedog/pkg/tracker/resid"
8 | )
9 |
10 | type TrackTerminationMode string
11 |
12 | const (
13 | WaitUntilResourceReady TrackTerminationMode = "WaitUntilResourceReady"
14 | NonBlocking TrackTerminationMode = "NonBlocking"
15 | )
16 |
17 | type FailMode string
18 |
19 | const (
20 | IgnoreAndContinueDeployProcess FailMode = "IgnoreAndContinueDeployProcess"
21 | FailWholeDeployProcessImmediately FailMode = "FailWholeDeployProcessImmediately"
22 | HopeUntilEndOfDeployProcess FailMode = "HopeUntilEndOfDeployProcess"
23 | )
24 |
25 | type Spec struct {
26 | *resid.ResourceID
27 |
28 | Timeout time.Duration
29 | NoActivityTimeout *time.Duration
30 | TrackTerminationMode TrackTerminationMode
31 | FailMode FailMode
32 | AllowFailuresCount *int
33 | ShowServiceMessages bool
34 | HideEvents bool
35 | StatusProgressPeriod time.Duration
36 | }
37 |
38 | func (s *Spec) Init() error {
39 | if s.Name == "" {
40 | return fmt.Errorf("resource can't be nil")
41 | }
42 |
43 | if s.GroupVersionKind.Kind == "" {
44 | return fmt.Errorf("resource kind can't be empty")
45 | }
46 |
47 | if s.NoActivityTimeout == nil {
48 | s.NoActivityTimeout = new(time.Duration)
49 | *s.NoActivityTimeout = time.Duration(4 * time.Minute)
50 | }
51 |
52 | if s.TrackTerminationMode == "" {
53 | s.TrackTerminationMode = WaitUntilResourceReady
54 | }
55 |
56 | if s.FailMode == "" {
57 | s.FailMode = FailWholeDeployProcessImmediately
58 | }
59 |
60 | if s.AllowFailuresCount == nil {
61 | s.AllowFailuresCount = new(int)
62 | *s.AllowFailuresCount = 1
63 | }
64 |
65 | return nil
66 | }
67 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/generic/state.go:
--------------------------------------------------------------------------------
1 | package generic
2 |
3 | import (
4 | "sync"
5 |
6 | "github.com/werf/kubedog/pkg/tracker/generic"
7 | )
8 |
9 | type ResourceState string
10 |
11 | const (
12 | ResourceStateActive ResourceState = "ResourceStateActive"
13 | ResourceStateSucceeded ResourceState = "ResourceStateSucceeded"
14 | ResourceStateFailed ResourceState = "ResourceStateFailed"
15 | ResourceStateHoping ResourceState = "ResourceStateHoping"
16 | ResourceStateActiveAfterHoping ResourceState = "ResourceStateActiveAfterHoping"
17 | )
18 |
19 | type State struct {
20 | resourceState ResourceState
21 |
22 | lastStatus *generic.ResourceStatus
23 | lastPrintedStatus *generic.ResourceStatus
24 |
25 | failuresCount int
26 | failedReason string
27 |
28 | mux sync.Mutex
29 | }
30 |
31 | func NewState() *State {
32 | state := &State{}
33 | state.SetResourceState(ResourceStateActive)
34 |
35 | return state
36 | }
37 |
38 | func (s *State) ResourceState() ResourceState {
39 | s.mux.Lock()
40 | defer s.mux.Unlock()
41 |
42 | return s.resourceState
43 | }
44 |
45 | func (s *State) SetResourceState(status ResourceState) {
46 | s.mux.Lock()
47 | defer s.mux.Unlock()
48 |
49 | s.resourceState = status
50 | }
51 |
52 | func (s *State) LastStatus() *generic.ResourceStatus {
53 | s.mux.Lock()
54 | defer s.mux.Unlock()
55 |
56 | return s.lastStatus
57 | }
58 |
59 | func (s *State) SetLastStatus(status *generic.ResourceStatus) {
60 | s.mux.Lock()
61 | defer s.mux.Unlock()
62 |
63 | s.lastStatus = status
64 | }
65 |
66 | func (s *State) LastPrintedStatus() *generic.ResourceStatus {
67 | s.mux.Lock()
68 | defer s.mux.Unlock()
69 |
70 | return s.lastPrintedStatus
71 | }
72 |
73 | func (s *State) SetLastPrintedStatus(status *generic.ResourceStatus) {
74 | s.mux.Lock()
75 | defer s.mux.Unlock()
76 |
77 | s.lastPrintedStatus = status
78 | }
79 |
80 | func (s *State) FailuresCount() int {
81 | s.mux.Lock()
82 | defer s.mux.Unlock()
83 |
84 | return s.failuresCount
85 | }
86 |
87 | func (s *State) BumpFailuresCount() {
88 | s.mux.Lock()
89 | defer s.mux.Unlock()
90 |
91 | s.failuresCount++
92 | }
93 |
94 | func (s *State) FailedReason() string {
95 | s.mux.Lock()
96 | defer s.mux.Unlock()
97 |
98 | return s.failedReason
99 | }
100 |
101 | func (s *State) SetFailedReason(reason string) {
102 | s.mux.Lock()
103 | defer s.mux.Unlock()
104 |
105 | s.failedReason = reason
106 | }
107 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/job.go:
--------------------------------------------------------------------------------
1 | package multitrack
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/tracker/job"
9 | "github.com/werf/kubedog/pkg/tracker/pod"
10 | )
11 |
12 | func (mt *multitracker) TrackJob(kube kubernetes.Interface, spec MultitrackSpec, opts MultitrackOptions) error {
13 | feed := job.NewFeed()
14 |
15 | feed.OnAdded(func() error {
16 | mt.mux.Lock()
17 | defer mt.mux.Unlock()
18 |
19 | mt.JobsStatuses[spec.ResourceName] = feed.GetStatus()
20 |
21 | return mt.jobAdded(spec, feed)
22 | })
23 | feed.OnSucceeded(func() error {
24 | mt.mux.Lock()
25 | defer mt.mux.Unlock()
26 |
27 | mt.JobsStatuses[spec.ResourceName] = feed.GetStatus()
28 |
29 | return mt.jobSucceeded(spec, feed)
30 | })
31 | feed.OnFailed(func(reason string) error {
32 | mt.mux.Lock()
33 | defer mt.mux.Unlock()
34 |
35 | mt.JobsStatuses[spec.ResourceName] = feed.GetStatus()
36 |
37 | return mt.jobFailed(spec, feed, reason)
38 | })
39 | feed.OnEventMsg(func(msg string) error {
40 | mt.mux.Lock()
41 | defer mt.mux.Unlock()
42 |
43 | mt.JobsStatuses[spec.ResourceName] = feed.GetStatus()
44 |
45 | return mt.jobEventMsg(spec, feed, msg)
46 | })
47 | feed.OnAddedPod(func(podName string) error {
48 | mt.mux.Lock()
49 | defer mt.mux.Unlock()
50 |
51 | mt.JobsStatuses[spec.ResourceName] = feed.GetStatus()
52 |
53 | return mt.jobAddedPod(spec, feed, podName)
54 | })
55 | feed.OnPodLogChunk(func(chunk *pod.PodLogChunk) error {
56 | mt.mux.Lock()
57 | defer mt.mux.Unlock()
58 |
59 | mt.JobsStatuses[spec.ResourceName] = feed.GetStatus()
60 |
61 | return mt.jobPodLogChunk(spec, feed, chunk)
62 | })
63 | feed.OnPodError(func(podError pod.PodError) error {
64 | mt.mux.Lock()
65 | defer mt.mux.Unlock()
66 |
67 | mt.JobsStatuses[spec.ResourceName] = feed.GetStatus()
68 |
69 | return mt.jobPodError(spec, feed, podError)
70 | })
71 | feed.OnStatus(func(status job.JobStatus) error {
72 | mt.mux.Lock()
73 | defer mt.mux.Unlock()
74 |
75 | mt.JobsStatuses[spec.ResourceName] = status
76 |
77 | return nil
78 | })
79 |
80 | return feed.Track(spec.ResourceName, spec.Namespace, kube, opts.Options)
81 | }
82 |
83 | func (mt *multitracker) jobAdded(spec MultitrackSpec, feed job.Feed) error {
84 | mt.displayResourceTrackerMessageF("job", spec.ResourceName, spec.ShowServiceMessages, "added")
85 |
86 | return nil
87 | }
88 |
89 | func (mt *multitracker) jobSucceeded(spec MultitrackSpec, feed job.Feed) error {
90 | mt.displayResourceTrackerMessageF("job", spec.ResourceName, spec.ShowServiceMessages, "succeeded")
91 |
92 | return mt.handleResourceReadyCondition(mt.TrackingJobs, spec)
93 | }
94 |
95 | func (mt *multitracker) jobFailed(spec MultitrackSpec, feed job.Feed, reason string) error {
96 | mt.displayResourceErrorF("job", spec.ResourceName, "%s", reason)
97 | return mt.handleResourceFailure(mt.TrackingJobs, "job", spec, reason)
98 | }
99 |
100 | func (mt *multitracker) jobEventMsg(spec MultitrackSpec, feed job.Feed, msg string) error {
101 | mt.displayResourceEventF("job", spec.ResourceName, spec.ShowServiceMessages, "%s", msg)
102 | return nil
103 | }
104 |
105 | func (mt *multitracker) jobAddedPod(spec MultitrackSpec, feed job.Feed, podName string) error {
106 | mt.displayResourceTrackerMessageF("job", spec.ResourceName, spec.ShowServiceMessages, "po/%s added", podName)
107 | return nil
108 | }
109 |
110 | func (mt *multitracker) jobPodLogChunk(spec MultitrackSpec, feed job.Feed, chunk *pod.PodLogChunk) error {
111 | mt.displayResourceLogChunk("job", spec, podContainerLogChunkHeader(chunk.PodName, chunk.ContainerLogChunk), chunk.ContainerLogChunk)
112 | return nil
113 | }
114 |
115 | func (mt *multitracker) jobPodError(spec MultitrackSpec, feed job.Feed, podError pod.PodError) error {
116 | reason := fmt.Sprintf("po/%s container/%s: %s", podError.PodName, podError.ContainerName, podError.Message)
117 |
118 | mt.displayResourceErrorF("job", spec.ResourceName, "%s", reason)
119 |
120 | return mt.handleResourceFailure(mt.TrackingJobs, "job", spec, reason)
121 | }
122 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/multitrack/statefulset.go:
--------------------------------------------------------------------------------
1 | package multitrack
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/tracker/replicaset"
9 | "github.com/werf/kubedog/pkg/tracker/statefulset"
10 | )
11 |
12 | func (mt *multitracker) TrackStatefulSet(kube kubernetes.Interface, spec MultitrackSpec, opts MultitrackOptions) error {
13 | feed := statefulset.NewFeed()
14 |
15 | feed.OnAdded(func(isReady bool) error {
16 | mt.mux.Lock()
17 | defer mt.mux.Unlock()
18 |
19 | mt.StatefulSetsStatuses[spec.ResourceName] = feed.GetStatus()
20 |
21 | return mt.statefulsetAdded(spec, feed, isReady)
22 | })
23 | feed.OnReady(func() error {
24 | mt.mux.Lock()
25 | defer mt.mux.Unlock()
26 |
27 | mt.StatefulSetsStatuses[spec.ResourceName] = feed.GetStatus()
28 |
29 | return mt.statefulsetReady(spec, feed)
30 | })
31 | feed.OnFailed(func(reason string) error {
32 | mt.mux.Lock()
33 | defer mt.mux.Unlock()
34 |
35 | mt.StatefulSetsStatuses[spec.ResourceName] = feed.GetStatus()
36 |
37 | return mt.statefulsetFailed(spec, feed, reason)
38 | })
39 | feed.OnEventMsg(func(msg string) error {
40 | mt.mux.Lock()
41 | defer mt.mux.Unlock()
42 |
43 | mt.StatefulSetsStatuses[spec.ResourceName] = feed.GetStatus()
44 |
45 | return mt.statefulsetEventMsg(spec, feed, msg)
46 | })
47 | feed.OnAddedReplicaSet(func(rs replicaset.ReplicaSet) error {
48 | mt.mux.Lock()
49 | defer mt.mux.Unlock()
50 |
51 | mt.StatefulSetsStatuses[spec.ResourceName] = feed.GetStatus()
52 |
53 | return mt.statefulsetAddedReplicaSet(spec, feed, rs)
54 | })
55 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
56 | mt.mux.Lock()
57 | defer mt.mux.Unlock()
58 |
59 | mt.StatefulSetsStatuses[spec.ResourceName] = feed.GetStatus()
60 |
61 | return mt.statefulsetAddedPod(spec, feed, pod)
62 | })
63 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
64 | mt.mux.Lock()
65 | defer mt.mux.Unlock()
66 |
67 | mt.StatefulSetsStatuses[spec.ResourceName] = feed.GetStatus()
68 |
69 | return mt.statefulsetPodError(spec, feed, podError)
70 | })
71 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
72 | mt.mux.Lock()
73 | defer mt.mux.Unlock()
74 |
75 | mt.StatefulSetsStatuses[spec.ResourceName] = feed.GetStatus()
76 |
77 | return mt.statefulsetPodLogChunk(spec, feed, chunk)
78 | })
79 | feed.OnStatus(func(status statefulset.StatefulSetStatus) error {
80 | mt.mux.Lock()
81 | defer mt.mux.Unlock()
82 |
83 | mt.StatefulSetsStatuses[spec.ResourceName] = status
84 |
85 | return nil
86 | })
87 |
88 | return feed.Track(spec.ResourceName, spec.Namespace, kube, opts.Options)
89 | }
90 |
91 | func (mt *multitracker) statefulsetAdded(spec MultitrackSpec, feed statefulset.Feed, isReady bool) error {
92 | if isReady {
93 | mt.displayResourceTrackerMessageF("sts", spec.ResourceName, spec.ShowServiceMessages, "appears to be READY")
94 |
95 | return mt.handleResourceReadyCondition(mt.TrackingStatefulSets, spec)
96 | }
97 |
98 | mt.displayResourceTrackerMessageF("sts", spec.ResourceName, spec.ShowServiceMessages, "added")
99 |
100 | return nil
101 | }
102 |
103 | func (mt *multitracker) statefulsetReady(spec MultitrackSpec, feed statefulset.Feed) error {
104 | mt.displayResourceTrackerMessageF("sts", spec.ResourceName, spec.ShowServiceMessages, "become READY")
105 |
106 | return mt.handleResourceReadyCondition(mt.TrackingStatefulSets, spec)
107 | }
108 |
109 | func (mt *multitracker) statefulsetFailed(spec MultitrackSpec, feed statefulset.Feed, reason string) error {
110 | mt.displayResourceErrorF("sts", spec.ResourceName, "%s", reason)
111 | return mt.handleResourceFailure(mt.TrackingStatefulSets, "sts", spec, reason)
112 | }
113 |
114 | func (mt *multitracker) statefulsetEventMsg(spec MultitrackSpec, feed statefulset.Feed, msg string) error {
115 | mt.displayResourceEventF("sts", spec.ResourceName, spec.ShowServiceMessages, "%s", msg)
116 | return nil
117 | }
118 |
119 | func (mt *multitracker) statefulsetAddedReplicaSet(spec MultitrackSpec, feed statefulset.Feed, rs replicaset.ReplicaSet) error {
120 | mt.displayResourceTrackerMessageF("sts", spec.ResourceName, spec.ShowServiceMessages, "rs/%s added", rs.Name)
121 | return nil
122 | }
123 |
124 | func (mt *multitracker) statefulsetAddedPod(spec MultitrackSpec, feed statefulset.Feed, pod replicaset.ReplicaSetPod) error {
125 | mt.displayResourceTrackerMessageF("sts", spec.ResourceName, spec.ShowServiceMessages, "po/%s added", pod.Name)
126 | return nil
127 | }
128 |
129 | func (mt *multitracker) statefulsetPodError(spec MultitrackSpec, feed statefulset.Feed, podError replicaset.ReplicaSetPodError) error {
130 | reason := fmt.Sprintf("po/%s container/%s: %s", podError.PodName, podError.ContainerName, podError.Message)
131 |
132 | mt.displayResourceErrorF("sts", spec.ResourceName, "%s", reason)
133 |
134 | return mt.handleResourceFailure(mt.TrackingStatefulSets, "sts", spec, reason)
135 | }
136 |
137 | func (mt *multitracker) statefulsetPodLogChunk(spec MultitrackSpec, feed statefulset.Feed, chunk *replicaset.ReplicaSetPodLogChunk) error {
138 | status := mt.StatefulSetsStatuses[spec.ResourceName]
139 | if podStatus, hasKey := status.Pods[chunk.PodName]; hasKey {
140 | if podStatus.IsReady {
141 | return nil
142 | }
143 | }
144 |
145 | mt.displayResourceLogChunk("sts", spec, podContainerLogChunkHeader(chunk.PodName, chunk.ContainerLogChunk), chunk.ContainerLogChunk)
146 | return nil
147 | }
148 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/pod.go:
--------------------------------------------------------------------------------
1 | package rollout
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/pod"
11 | )
12 |
13 | func TrackPodTillReady(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
14 | feed := pod.NewFeed()
15 |
16 | feed.OnAdded(func() error {
17 | fmt.Fprintf(display.Out, "# po/%s added\n", name)
18 | return nil
19 | })
20 | feed.OnSucceeded(func() error {
21 | fmt.Fprintf(display.Out, "# po/%s succeeded\n", name)
22 | return tracker.ErrStopTrack
23 | })
24 | feed.OnFailed(func(reason string) error {
25 | fmt.Fprintf(display.Out, "# po/%s failed: %s\n", name, reason)
26 | return tracker.ResourceErrorf("po/%s failed: %s", name, reason)
27 | })
28 | feed.OnReady(func() error {
29 | fmt.Fprintf(display.Out, "# po/%s become READY\n", name)
30 | return tracker.ErrStopTrack
31 | })
32 | feed.OnEventMsg(func(msg string) error {
33 | fmt.Fprintf(display.Out, "# po/%s event: %s\n", name, msg)
34 | return nil
35 | })
36 | feed.OnContainerError(func(containerError pod.ContainerError) error {
37 | fmt.Fprintf(display.Out, "# po/%s %s error: %s\n", name, containerError.ContainerName, containerError.Message)
38 | return tracker.ResourceErrorf("po/%s %s failed: %s", name, containerError.ContainerName, containerError.Message)
39 | })
40 | feed.OnContainerLogChunk(func(chunk *pod.ContainerLogChunk) error {
41 | header := fmt.Sprintf("po/%s %s", name, chunk.ContainerName)
42 | display.OutputLogLines(header, chunk.LogLines)
43 | return nil
44 | })
45 |
46 | err := feed.Track(name, namespace, kube, opts)
47 | if err != nil {
48 | switch e := err.(type) {
49 | case *tracker.ResourceError:
50 | return e
51 | default:
52 | fmt.Fprintf(display.Err, "error tracking po/%s in ns/%s: %s\n", name, namespace, err)
53 | }
54 | }
55 | return err
56 | }
57 |
--------------------------------------------------------------------------------
/pkg/trackers/rollout/statefulset.go:
--------------------------------------------------------------------------------
1 | package rollout
2 |
3 | import (
4 | "fmt"
5 |
6 | "k8s.io/client-go/kubernetes"
7 |
8 | "github.com/werf/kubedog/pkg/display"
9 | "github.com/werf/kubedog/pkg/tracker"
10 | "github.com/werf/kubedog/pkg/tracker/replicaset"
11 | "github.com/werf/kubedog/pkg/tracker/statefulset"
12 | )
13 |
14 | // TrackStatefulSetTillReady implements rollout track mode for StatefulSet
15 | //
16 | // Exit on DaemonSet ready or on errors
17 | func TrackStatefulSetTillReady(name, namespace string, kube kubernetes.Interface, opts tracker.Options) error {
18 | feed := statefulset.NewFeed()
19 |
20 | feed.OnAdded(func(isReady bool) error {
21 | if isReady {
22 | fmt.Fprintf(display.Out, "# sts/%s appears to be ready\n", name)
23 | return tracker.ErrStopTrack
24 | }
25 |
26 | fmt.Fprintf(display.Out, "# sts/%s added\n", name)
27 | return nil
28 | })
29 | feed.OnReady(func() error {
30 | fmt.Fprintf(display.Out, "# sts/%s become READY\n", name)
31 | return tracker.ErrStopTrack
32 | })
33 | feed.OnFailed(func(reason string) error {
34 | fmt.Fprintf(display.Out, "# sts/%s FAIL: %s\n", name, reason)
35 | return tracker.ResourceErrorf("failed: %s", reason)
36 | })
37 | feed.OnEventMsg(func(msg string) error {
38 | fmt.Fprintf(display.Out, "# sts/%s event: %s\n", name, msg)
39 | return nil
40 | })
41 | feed.OnAddedPod(func(pod replicaset.ReplicaSetPod) error {
42 | fmt.Fprintf(display.Out, "# sts/%s po/%s added\n", name, pod.Name)
43 | return nil
44 | })
45 | feed.OnPodError(func(podError replicaset.ReplicaSetPodError) error {
46 | fmt.Fprintf(display.Out, "# sts/%s %s %s error: %s\n", name, podError.PodName, podError.ContainerName, podError.Message)
47 | return tracker.ResourceErrorf("sts/%s %s %s failed: %s", name, podError.PodName, podError.ContainerName, podError.Message)
48 | })
49 | feed.OnPodLogChunk(func(chunk *replicaset.ReplicaSetPodLogChunk) error {
50 | header := fmt.Sprintf("po/%s %s", chunk.PodName, chunk.ContainerName)
51 | display.OutputLogLines(header, chunk.LogLines)
52 | return nil
53 | })
54 |
55 | err := feed.Track(name, namespace, kube, opts)
56 | if err != nil {
57 | switch e := err.(type) {
58 | case *tracker.ResourceError:
59 | return e
60 | default:
61 | fmt.Fprintf(display.Err, "error tracking StatefulSet `%s` in namespace `%s`: %s\n", name, namespace, err)
62 | }
63 | }
64 | return err
65 | }
66 |
--------------------------------------------------------------------------------
/pkg/utils/color.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/gookit/color"
7 |
8 | "github.com/werf/logboek"
9 | )
10 |
11 | func BoldF(format string, a ...interface{}) string {
12 | return styleF(color.New(color.Bold), format, a...)
13 | }
14 |
15 | func BlueF(format string, a ...interface{}) string {
16 | return styleF(color.New(color.FgBlue), format, a...)
17 | }
18 |
19 | func YellowF(format string, a ...interface{}) string {
20 | return styleF(color.New(color.FgYellow), format, a...)
21 | }
22 |
23 | func GreenF(format string, a ...interface{}) string {
24 | return styleF(color.New(color.FgGreen), format, a...)
25 | }
26 |
27 | func RedF(format string, a ...interface{}) string {
28 | return styleF(color.New(color.FgRed), format, a...)
29 | }
30 |
31 | func styleF(style color.Style, format string, a ...interface{}) string {
32 | return logboek.Context(context.Background()).ColorizeF(style, format, a...)
33 | }
34 |
--------------------------------------------------------------------------------
/pkg/utils/controller_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 |
6 | appsv1 "k8s.io/api/apps/v1"
7 | batchv1 "k8s.io/api/batch/v1"
8 | corev1 "k8s.io/api/core/v1"
9 | "k8s.io/apimachinery/pkg/api/meta"
10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11 | "k8s.io/apimachinery/pkg/types"
12 | )
13 |
14 | type ControllerMetadata interface {
15 | NewReplicaSetTemplate() corev1.PodTemplateSpec
16 | LabelSelector() *metav1.LabelSelector
17 | Namespace() string
18 | Name() string
19 | UID() types.UID
20 | }
21 |
22 | type ReplicaSetControllerWrapper struct {
23 | replicaSetTemplate corev1.PodTemplateSpec
24 | labelSelector *metav1.LabelSelector
25 | metadata metav1.Object
26 | }
27 |
28 | func (w *ReplicaSetControllerWrapper) NewReplicaSetTemplate() corev1.PodTemplateSpec {
29 | return w.replicaSetTemplate
30 | }
31 |
32 | func (w *ReplicaSetControllerWrapper) LabelSelector() *metav1.LabelSelector {
33 | return w.labelSelector
34 | }
35 |
36 | func (w *ReplicaSetControllerWrapper) Namespace() string {
37 | return w.metadata.GetNamespace()
38 | }
39 |
40 | func (w *ReplicaSetControllerWrapper) Name() string {
41 | return w.metadata.GetName()
42 | }
43 |
44 | func (w *ReplicaSetControllerWrapper) UID() types.UID {
45 | return w.metadata.GetUID()
46 | }
47 |
48 | func ControllerAccessor(controller interface{}) ControllerMetadata {
49 | w := &ReplicaSetControllerWrapper{}
50 | var err error
51 | w.metadata, err = meta.Accessor(controller)
52 | if err != nil {
53 | if debug() {
54 | fmt.Printf("ControllerAccessor for %T metadata error: %v", controller, err)
55 | }
56 | }
57 |
58 | switch c := controller.(type) {
59 | case *appsv1.Deployment:
60 | w.replicaSetTemplate = corev1.PodTemplateSpec{
61 | ObjectMeta: c.Spec.Template.ObjectMeta,
62 | Spec: c.Spec.Template.Spec,
63 | }
64 | w.labelSelector = c.Spec.Selector
65 | case *appsv1.StatefulSet:
66 | w.replicaSetTemplate = corev1.PodTemplateSpec{
67 | ObjectMeta: c.Spec.Template.ObjectMeta,
68 | Spec: c.Spec.Template.Spec,
69 | }
70 | w.labelSelector = c.Spec.Selector
71 | case *appsv1.DaemonSet:
72 | w.replicaSetTemplate = corev1.PodTemplateSpec{
73 | ObjectMeta: c.Spec.Template.ObjectMeta,
74 | Spec: c.Spec.Template.Spec,
75 | }
76 | w.labelSelector = c.Spec.Selector
77 | case *batchv1.Job:
78 | w.replicaSetTemplate = corev1.PodTemplateSpec{
79 | ObjectMeta: c.Spec.Template.ObjectMeta,
80 | Spec: c.Spec.Template.Spec,
81 | }
82 | w.labelSelector = c.Spec.Selector
83 | }
84 | return w
85 | }
86 |
--------------------------------------------------------------------------------
/pkg/utils/events.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sort"
7 | "strings"
8 |
9 | corev1 "k8s.io/api/core/v1"
10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
12 | "k8s.io/apimachinery/pkg/fields"
13 | "k8s.io/client-go/kubernetes"
14 | )
15 |
16 | // SortableEvents implements sort.Interface for []api.Event based on the Timestamp field
17 | type SortableEvents []corev1.Event
18 |
19 | func (list SortableEvents) Len() int {
20 | return len(list)
21 | }
22 |
23 | func (list SortableEvents) Swap(i, j int) {
24 | list[i], list[j] = list[j], list[i]
25 | }
26 |
27 | func (list SortableEvents) Less(i, j int) bool {
28 | return list[i].LastTimestamp.Time.Before(list[j].LastTimestamp.Time)
29 | }
30 |
31 | // FormatEventSource formats EventSource as a comma separated string excluding Host when empty
32 | func FormatEventSource(es corev1.EventSource) string {
33 | EventSourceString := []string{es.Component}
34 | if len(es.Host) > 0 {
35 | EventSourceString = append(EventSourceString, es.Host)
36 | }
37 | return strings.Join(EventSourceString, ", ")
38 | }
39 |
40 | func DescribeEvents(el *corev1.EventList) {
41 | if len(el.Items) == 0 {
42 | fmt.Printf("Events:\t\n")
43 | return
44 | }
45 | // w.Flush()
46 | sort.Sort(SortableEvents(el.Items))
47 | // w.Write(LEVEL_0, "Events:\n Type\tReason\tAge\tFrom\tMessage\n")
48 | fmt.Printf("Events:\n Type\tReason\tAge\tFrom\tAction\tMessage\n")
49 | // w.Write(LEVEL_1, "----\t------\t----\t----\t-------\n")
50 | fmt.Printf("----\t------\t----\t----\t-------\n")
51 | for _, e := range el.Items {
52 | var interval string
53 | if e.Count > 1 {
54 | interval = fmt.Sprintf("%s (x%d over %s)", TranslateTimestampSince(e.LastTimestamp), e.Count, TranslateTimestampSince(e.FirstTimestamp))
55 | } else {
56 | interval = TranslateTimestampSince(e.FirstTimestamp)
57 | }
58 | // w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n",
59 | fmt.Printf("%v\t%v\t%s\t%v\t%v\t%v\n",
60 | e.Type,
61 | e.Reason,
62 | interval,
63 | FormatEventSource(e.Source),
64 | e.Action,
65 | strings.TrimSpace(e.Message),
66 | )
67 | }
68 | }
69 |
70 | func EventFieldSelectorFromResource(obj interface{}) string {
71 | meta := ControllerAccessor(obj)
72 | field := fields.Set{}
73 | field["involvedObject.name"] = meta.Name()
74 | field["involvedObject.namespace"] = meta.Namespace()
75 | field["involvedObject.uid"] = string(meta.UID())
76 | return field.AsSelector().String()
77 | }
78 |
79 | func ListEventsForObject(ctx context.Context, client kubernetes.Interface, obj interface{}) (*corev1.EventList, error) {
80 | options := metav1.ListOptions{
81 | FieldSelector: EventFieldSelectorFromResource(obj),
82 | }
83 | evList, err := client.CoreV1().Events(ControllerAccessor(obj).Namespace()).List(ctx, options)
84 | if err != nil {
85 | return nil, err
86 | }
87 | return evList, nil
88 | }
89 |
90 | func EventFieldSelectorFromUnstructured(obj *unstructured.Unstructured) (field fields.Set, eventNs string) {
91 | field = fields.Set{}
92 | field["involvedObject.name"] = obj.GetName()
93 | field["involvedObject.apiVersion"] = obj.GetAPIVersion()
94 | field["involvedObject.kind"] = obj.GetKind()
95 |
96 | if ns := obj.GetNamespace(); ns != "" {
97 | field["involvedObject.namespace"] = ns
98 | eventNs = ns
99 | } else {
100 | eventNs = metav1.NamespaceDefault
101 | }
102 |
103 | return field, eventNs
104 | }
105 |
106 | func ListEventsForUnstructured(ctx context.Context, client kubernetes.Interface, obj *unstructured.Unstructured) (*corev1.EventList, error) {
107 | fieldsSet, eventsNs := EventFieldSelectorFromUnstructured(obj)
108 |
109 | eventsList, err := client.CoreV1().Events(eventsNs).List(ctx, metav1.ListOptions{
110 | FieldSelector: fieldsSet.AsSelector().String(),
111 | })
112 | if err != nil {
113 | return nil, fmt.Errorf("error getting events list: %w", err)
114 | }
115 |
116 | return eventsList, nil
117 | }
118 |
--------------------------------------------------------------------------------
/pkg/utils/file.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "os"
5 | )
6 |
7 | // FileExists returns true if path exists
8 | func FileExists(path string) (bool, error) {
9 | _, err := os.Stat(path)
10 | if err != nil {
11 | if os.IsNotExist(err) {
12 | return false, nil
13 | }
14 | return false, err
15 | }
16 | return true, nil
17 | }
18 |
19 | func DirExists(path string) (bool, error) {
20 | fileInfo, err := os.Stat(path)
21 | if err != nil && os.IsNotExist(err) {
22 | return false, nil
23 | }
24 |
25 | return fileInfo.IsDir(), nil
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/utils/json.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "strings"
7 |
8 | "k8s.io/client-go/util/jsonpath"
9 | )
10 |
11 | func JSONPath(tmpl string, input interface{}) (result string, found bool, err error) {
12 | jsonPath := jsonpath.New("")
13 |
14 | if err := jsonPath.Parse(fmt.Sprintf("{%s}", tmpl)); err != nil {
15 | return "", false, fmt.Errorf("error parsing jsonpath: %w", err)
16 | }
17 |
18 | resultBuf := &bytes.Buffer{}
19 | if err := jsonPath.Execute(resultBuf, input); err != nil {
20 | if debug() && !strings.HasSuffix(err.Error(), " is not found") {
21 | fmt.Printf("error executing jsonpath for tmpl %q and input %v: %s\n", tmpl, input, err)
22 | }
23 | return "", false, nil
24 | }
25 |
26 | if strings.TrimSpace(resultBuf.String()) == "" {
27 | return "", false, nil
28 | }
29 |
30 | return resultBuf.String(), true, nil
31 | }
32 |
--------------------------------------------------------------------------------
/pkg/utils/pod_utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import corev1 "k8s.io/api/core/v1"
4 |
5 | func GetPodReplicaSetName(pod *corev1.Pod) string {
6 | for _, ref := range pod.OwnerReferences {
7 | if ref.Kind == "ReplicaSet" {
8 | return ref.Name
9 | }
10 | }
11 | return ""
12 | }
13 |
--------------------------------------------------------------------------------
/pkg/utils/ref.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "net/url"
7 | "strings"
8 |
9 | corev1 "k8s.io/api/core/v1"
10 | "k8s.io/apimachinery/pkg/api/meta"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "k8s.io/apimachinery/pkg/runtime"
13 | )
14 |
15 | // Errors that could be returned by GetReference.
16 | var (
17 | ErrNilObject = errors.New("can't reference a nil object")
18 | ErrNoSelfLink = errors.New("selfLink was empty, can't make reference")
19 | )
20 |
21 | var scheme = runtime.NewScheme()
22 |
23 | func GetReference(obj runtime.Object) (*corev1.ObjectReference, error) {
24 | if obj == nil {
25 | return nil, ErrNilObject
26 | }
27 | if ref, ok := obj.(*corev1.ObjectReference); ok {
28 | // Don't make a reference to a reference.
29 | return ref, nil
30 | }
31 |
32 | gvk := obj.GetObjectKind().GroupVersionKind()
33 |
34 | // if the object referenced is actually persisted, we can just get kind from meta
35 | // if we are building an object reference to something not yet persisted, we should fallback to scheme
36 | kind := gvk.Kind
37 | if len(kind) == 0 {
38 | // TODO: this is wrong
39 | gvks, _, err := scheme.ObjectKinds(obj)
40 | if err != nil {
41 | return nil, err
42 | }
43 | kind = gvks[0].Kind
44 | }
45 |
46 | // An object that implements only List has enough metadata to build a reference
47 | var listMeta metav1.Common
48 | objectMeta, err := meta.Accessor(obj)
49 | if err != nil {
50 | listMeta, err = meta.CommonAccessor(obj)
51 | if err != nil {
52 | return nil, err
53 | }
54 | } else {
55 | listMeta = objectMeta
56 | }
57 |
58 | // if the object referenced is actually persisted, we can also get version from meta
59 | version := gvk.GroupVersion().String()
60 | if len(version) == 0 {
61 | selfLink := listMeta.GetSelfLink()
62 | if len(selfLink) == 0 {
63 | return nil, ErrNoSelfLink
64 | }
65 | selfLinkURL, err := url.Parse(selfLink)
66 | if err != nil {
67 | return nil, err
68 | }
69 | // example paths: ///*
70 | parts := strings.Split(selfLinkURL.Path, "/")
71 | if len(parts) < 3 {
72 | return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version)
73 | }
74 | version = parts[2]
75 | }
76 |
77 | // only has list metadata
78 | if objectMeta == nil {
79 | return &corev1.ObjectReference{
80 | Kind: kind,
81 | APIVersion: version,
82 | ResourceVersion: listMeta.GetResourceVersion(),
83 | }, nil
84 | }
85 |
86 | return &corev1.ObjectReference{
87 | Kind: kind,
88 | APIVersion: version,
89 | Name: objectMeta.GetName(),
90 | Namespace: objectMeta.GetNamespace(),
91 | UID: objectMeta.GetUID(),
92 | ResourceVersion: objectMeta.GetResourceVersion(),
93 | }, nil
94 | }
95 |
--------------------------------------------------------------------------------
/pkg/utils/time.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "time"
5 |
6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7 | "k8s.io/apimachinery/pkg/util/duration"
8 | )
9 |
10 | func TranslateTimestampSince(timestamp metav1.Time) string {
11 | if timestamp.IsZero() {
12 | return ""
13 | }
14 |
15 | return duration.HumanDuration(time.Since(timestamp.Time))
16 | }
17 |
--------------------------------------------------------------------------------
/playground/multitrack-1/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/werf/kubedog/pkg/kube"
5 | "github.com/werf/kubedog/pkg/trackers/rollout/multitrack"
6 | )
7 |
8 | func main() {
9 | err := kube.Init(kube.InitOptions{})
10 | if err != nil {
11 | panic(err.Error())
12 | }
13 |
14 | err = multitrack.Multitrack(kube.Kubernetes, multitrack.MultitrackSpecs{
15 | Deployments: []multitrack.MultitrackSpec{
16 | {ResourceName: "tiller-deploy", Namespace: "kube-system"},
17 | {ResourceName: "coredns", Namespace: "kube-system"},
18 | },
19 | Jobs: []multitrack.MultitrackSpec{
20 | {ResourceName: "myjob", Namespace: "myns"},
21 | },
22 | }, multitrack.MultitrackOptions{})
23 | if err != nil {
24 | panic(err.Error())
25 | }
26 |
27 | // err = rollout.TrackJobTillDone("helo", "", kube.Kubernetes, tracker.Options{})
28 | // if err != nil {
29 | // panic(err.Error())
30 | // }
31 | }
32 |
--------------------------------------------------------------------------------
/playground/table/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/gookit/color"
7 | "github.com/werf/logboek"
8 |
9 | "github.com/werf/kubedog/pkg/utils"
10 | )
11 |
12 | func main() {
13 | _ = logboek.Context(context.Background()).LogProcess("xxx").DoError(func() error {
14 | _ = logboek.Context(context.Background()).LogProcess("1").DoError(func() error {
15 | t := utils.NewTable(.7, .1, .1, .1)
16 | t.SetWidth(logboek.Context(context.Background()).Streams().ContentWidth() - 1)
17 | t.Header("NAME", "REPLICAS", "UP-TO-DATE", "AVAILABLE")
18 | t.Row("deploy/extended-monitoring", "1/1", 1, 1)
19 | // t.Row("deploy/extended-monitoring", "1/1", 1, 1, color.RedString("Error: See the server log for details. BUILD FAILED (total time: 1 second)"), color.RedString("Error: An individual language user's deviations from standard language norms in grammar, pronunciation and orthography are sometimes referred to as errors"))
20 | st := t.SubTable(.3, .15, .3, .15, .1)
21 | st.Header("NAME", "READY", "STATUS", "RESTARTS", "AGE")
22 | st.Rows([][]interface{}{
23 | {"654fc55df-5zs4m", "3/3", "Pulling", "0", "49m", color.Red.Sprint("pod/myapp-backend-cbdb856d7-bvplx Failed: Error: ImagePullBackOff"), color.Red.Sprint("pod/myapp-backend-cbdb856d7-b6ms8 Failed: Failed to pull image \"ubuntu:kaka\": rpc error: code Unknown desc = Error response from daemon: manifest for ubuntu:kaka not found")},
24 | {"654fc55df-hsm67", "3/3", color.Green.Sprint("Running") + " -> " + color.Red.Sprint("Terminating"), "0", "49m"},
25 | {"654fc55df-fffff", "3/3", "Ready", "0", "49m"},
26 | {"654fc55df-5zs4m", "3/3", "Pulling", "0", "49m", color.Red.Sprint("pod/myapp-backend-cbdb856d7-bvplx Failed: Error: ImagePullBackOff"), color.Red.Sprint("pod/myapp-backend-cbdb856d7-b6ms8 Failed: Failed to pull image \"ubuntu:kaka\": rpc error: code Unknown desc = Error response from daemon: manifest for ubuntu:kaka not found")},
27 | }...)
28 | st.Commit(color.Red.Sprint("pod/myapp-backend-cbdb856d7-b6ms8 Failed: Failed to pull image \"ubuntu:kaka\": rpc error: code Unknown desc = Error response from daemon: manifest for ubuntu:kaka not found"), color.Red.Sprint("pod/myapp-backend-cbdb856d7-b6ms8 Failed: Failed to pull image \"ubuntu:kaka\": rpc error: code Unknown desc = Error response from daemon: manifest for ubuntu:kaka not found"))
29 | t.Row("deploy/grafana", "1/1", 1, 1)
30 | t.Row("deploy/kube-state-metrics", "1/1", 1, 1)
31 | t.Row("deploy/madison-proxy-0450d21f50d1e3f3b3131a07bcbcfe85ec02dd9758b7ee12968ee6eaee7057fc", "1/1", 1, 1)
32 | t.Row("deploy/madison-proxy-2c5bdd9ba9f80394e478714dc299d007182bc49fed6c319d67b6645e4812b198", "1/1", 1, 1)
33 | t.Row("deploy/madison-proxy-9c6b5f859895442cb645c7f3d1ef647e1ed5388c159a9e5f7e1cf50163a878c1", "1/1", 1, "1 (-1)")
34 | t.Row("deploy/prometheus-metrics-adapter", "1/1", 1, "1 (-1)")
35 | t.Row("sts/mysql", "1/1", 1, "1 (-1)")
36 | t.Row("ds/node-exporter", "1/1", 1, "1 (-1)")
37 | t.Row("deploy/trickster", "1/1", 1, "1 (-1)")
38 | logboek.Context(context.Background()).Log(t.Render())
39 |
40 | return nil
41 | })
42 | return nil
43 | })
44 | }
45 |
--------------------------------------------------------------------------------
/scripts/ci/build_release.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | export RELEASE_BUILD_DIR=release-build
4 | export GO111MODULE=on
5 | export CGO_ENABLED=0
6 |
7 | go_build_v2() {
8 | VERSION=$1
9 |
10 | rm -rf $RELEASE_BUILD_DIR/$VERSION
11 | mkdir -p $RELEASE_BUILD_DIR/$VERSION
12 | chmod -R 0777 $RELEASE_BUILD_DIR/$VERSION
13 |
14 | for os in linux darwin windows ; do
15 | for arch in amd64 arm64 ; do
16 | if [ "$os" == "windows" ] && [ "$arch" == "arm64" ] ; then
17 | continue
18 | fi
19 |
20 | outputFile=$RELEASE_BUILD_DIR/$VERSION/$os-$arch/bin/kubedog
21 | if [ "$os" == "windows" ] ; then
22 | outputFile=$outputFile.exe
23 | fi
24 |
25 | echo "# Building kubedog $VERSION for $os $arch ..."
26 |
27 | GOOS=$os GOARCH=$arch \
28 | go build -ldflags="-s -w -X github.com/werf/kubedog.Version=$VERSION" \
29 | -o $outputFile github.com/werf/kubedog/cmd/kubedog
30 |
31 | echo "# Built $outputFile"
32 | done
33 | done
34 | }
35 |
36 | VERSION=$1
37 | if [ -z "$VERSION" ] ; then
38 | echo "Required version argument!" 1>&2
39 | echo 1>&2
40 | echo "Usage: $0 VERSION" 1>&2
41 | exit 1
42 | fi
43 |
44 | ( go_build_v2 $VERSION ) || ( echo "Failed to build!" 1>&2 && exit 1 )
45 |
--------------------------------------------------------------------------------
/scripts/ci/build_release_v2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh -e
2 |
3 | VERSION=$1
4 | if [ -z "$VERSION" ] ; then
5 | echo "Required version argument!" 1>&2
6 | echo 1>&2
7 | echo "Usage: $0 VERSION" 1>&2
8 | exit 1
9 | fi
10 |
11 | export CGO_ENABLED=0
12 |
13 | gox -osarch="linux/amd64 linux/arm64 darwin/amd64 darwin/arm64 windows/amd64" \
14 | -output="release-build/$VERSION/{{.OS}}-{{.Arch}}/bin/kubedog" \
15 | -ldflags="-s -w -X github.com/werf/kubedog.Version=$VERSION" \
16 | github.com/werf/kubedog/cmd/kubedog
17 |
--------------------------------------------------------------------------------
/trdl.yaml:
--------------------------------------------------------------------------------
1 | docker_image: golang:1.21-alpine@sha256:fd78f2fb1e49bcf343079bbbb851c936a18fc694df993cbddaa24ace0cc724c5
2 | commands:
3 | - go install github.com/mitchellh/gox@8c3b2b9e647dc52457d6ee7b5adcf97e2bafe131
4 | - ./scripts/ci/build_release_v2.sh {{ .Tag }} && cp -a release-build/{{ .Tag }}/* /result
5 |
--------------------------------------------------------------------------------