├── .gitattributes ├── .github ├── dependabot.yml └── workflows │ ├── compliance.yml │ ├── docker-image.yml │ └── go.yml ├── .gitignore ├── Containerfile ├── LICENSE ├── README.md ├── cmd └── icinga-kubernetes │ └── main.go ├── config.example.yml ├── doc ├── 01-About.md ├── 02-Installation.md ├── 02-Installation.md.d │ └── .gitignore ├── 03-Configuration.md └── res │ ├── icinga-kubernetes-dashboard.png │ ├── icinga-kubernetes-deployment.png │ ├── icinga-kubernetes-favorites-dashboard.png │ ├── icinga-kubernetes-installation.png │ ├── icinga-kubernetes-overview.png │ ├── icinga-kubernetes-replicaset.png │ └── icinga-kubernetes-statefulset.png ├── go.mod ├── go.sum ├── icinga-kubernetes.example.yml ├── internal ├── cache │ └── v1 │ │ └── multiplexers.go ├── channel_multiplexer.go ├── multiplex.go ├── notifications.go ├── prometheus.go └── version.go ├── pkg ├── cluster │ └── cluster.go ├── com │ └── basic_auth_transport.go ├── daemon │ └── config.go ├── database │ ├── cleanup.go │ ├── contracts.go │ ├── database.go │ ├── driver.go │ ├── features.go │ ├── pgsql_driver.go │ ├── quoter.go │ ├── relations.go │ ├── utils.go │ └── uuid.go ├── metrics │ ├── config.go │ └── metrics.go ├── notifications │ ├── client.go │ ├── config.go │ ├── contracts.go │ └── event.go ├── schema │ └── v1 │ │ ├── annotation.go │ │ ├── bitmask.go │ │ ├── cluster.go │ │ ├── config.go │ │ ├── config_map.go │ │ ├── container.go │ │ ├── contracts.go │ │ ├── cron_job.go │ │ ├── daemon_set.go │ │ ├── deployment.go │ │ ├── endpoint.go │ │ ├── event.go │ │ ├── favorite.go │ │ ├── icinga_state.go │ │ ├── ingress.go │ │ ├── instance.go │ │ ├── job.go │ │ ├── label.go │ │ ├── metric.go │ │ ├── metrics.go │ │ ├── namespace.go │ │ ├── node.go │ │ ├── persistent_volume.go │ │ ├── pod.go │ │ ├── pvc.go │ │ ├── replica_set.go │ │ ├── resource_annotation.go │ │ ├── resource_label.go │ │ ├── secret.go │ │ ├── selector.go │ │ ├── service.go │ │ ├── stateful_set.go │ │ └── utils.go └── sync │ └── v1 │ ├── controller.go │ ├── event_handler.go │ ├── features.go │ ├── sink.go │ └── sync.go └── schema └── mysql ├── embed.go └── schema.sql /.gitattributes: -------------------------------------------------------------------------------- 1 | # Include version information on `git archive' 2 | /internal/version.go export-subst 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" 8 | -------------------------------------------------------------------------------- /.github/workflows/compliance.yml: -------------------------------------------------------------------------------- 1 | name: Compliance 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: {} 7 | 8 | permissions: 9 | # https://docs.github.com/en/rest/overview/permissions-required-for-github-apps?apiVersion=2022-11-28#repository-permissions-for-contents 10 | contents: read 11 | 12 | jobs: 13 | compliance: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: actions/setup-go@v5 18 | with: 19 | go-version: stable 20 | 21 | - name: Download modules to local cache 22 | run: go mod download 23 | 24 | - name: Install go-licenses 25 | run: go install github.com/google/go-licenses@latest 26 | 27 | - name: Check licenses against allow list 28 | run: | 29 | # Pass allowed licenses as SPDX Identifiers: https://spdx.org/licenses/ 30 | go-licenses check github.com/icinga/icinga-kubernetes/... \ 31 | --allowed_licenses AGPL-3.0,Apache-2.0,BSD-2-Clause,BSD-3-Clause,ISC,MIT,MPL-2.0 32 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | release: 11 | types: 12 | - published 13 | 14 | jobs: 15 | docker: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Docker meta 19 | id: meta 20 | uses: docker/metadata-action@v5 21 | with: 22 | images: | 23 | icinga/icinga-kubernetes 24 | tags: | 25 | type=edge 26 | type=semver,pattern={{version}} 27 | type=semver,pattern={{major}}.{{minor}} 28 | 29 | - name: Set up QEMU 30 | uses: docker/setup-qemu-action@v3 31 | 32 | - name: Set up Docker Buildx 33 | uses: docker/setup-buildx-action@v3 34 | 35 | - name: Login to Docker Hub 36 | if: github.event_name != 'pull_request' 37 | uses: docker/login-action@v3 38 | with: 39 | username: ${{ secrets.DOCKERHUB_USERNAME }} 40 | password: ${{ secrets.DOCKERHUB_TOKEN }} 41 | 42 | - name: Build and push 43 | uses: docker/build-push-action@v6 44 | with: 45 | file: Containerfile 46 | labels: ${{ steps.meta.outputs.labels }} 47 | platforms: linux/amd64,linux/arm64 48 | push: ${{ github.event_name != 'pull_request' }} 49 | tags: ${{ steps.meta.outputs.tags }} 50 | # Keep the .git to allow including the commit in the --version output, see also: 51 | # https://docs.docker.com/build/building/context/#keep-git-directory 52 | build-args: | 53 | BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 54 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: { } 7 | 8 | jobs: 9 | build-and-test: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | 14 | - uses: actions/setup-go@v5 15 | with: 16 | go-version: stable 17 | 18 | - name: Build 19 | run: go build -gcflags="-m" ./... 20 | 21 | - name: Test 22 | run: go test -v -race ./... 23 | 24 | lint: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v4 28 | 29 | - uses: actions/setup-go@v5 30 | with: 31 | go-version: stable 32 | 33 | - uses: golangci/golangci-lint-action@v8 34 | with: 35 | version: latest 36 | only-new-issues: true 37 | args: -E gosec --timeout=2m --verbose 38 | 39 | modtidy: 40 | runs-on: ubuntu-latest 41 | steps: 42 | - uses: actions/checkout@v4 43 | 44 | - uses: actions/setup-go@v5 45 | with: 46 | go-version: stable 47 | 48 | - name: Run go mod tidy 49 | run: | 50 | go mod tidy 51 | gitdiff="$(git diff -U0)" 52 | echo "$gitdiff" 53 | test -z "$gitdiff" 54 | 55 | vendor-diff: 56 | if: github.event_name == 'pull_request' 57 | runs-on: ubuntu-latest 58 | steps: 59 | - uses: actions/setup-go@v5 60 | with: 61 | go-version: stable 62 | 63 | - name: Checkout base commit 64 | uses: actions/checkout@v4 65 | with: 66 | path: a 67 | ref: ${{ github.base_ref }} 68 | 69 | - name: Download dependencies of base commit 70 | run: go mod vendor 71 | working-directory: a 72 | 73 | - name: Checkout PR 74 | uses: actions/checkout@v4 75 | with: 76 | path: b 77 | 78 | - name: Download dependencies of PR 79 | run: go mod vendor 80 | working-directory: b 81 | 82 | - name: Diff of dependencies 83 | run: diff -ur --color=always a/vendor b/vendor || true 84 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !.git* 3 | -------------------------------------------------------------------------------- /Containerfile: -------------------------------------------------------------------------------- 1 | FROM golang AS build 2 | 3 | WORKDIR /build 4 | COPY go.mod go.sum ./ 5 | RUN go mod download 6 | 7 | COPY . . 8 | RUN CGO_ENABLED=0 GOOS=linux go build -ldflags '-s -w' -o /icinga-kubernetes cmd/icinga-kubernetes/main.go 9 | 10 | FROM scratch 11 | 12 | COPY < 2 | 3 | # Installing Icinga for Kubernetes 4 | 5 | ![Icinga for Kubernetes](res/icinga-kubernetes-installation.png) 6 | 7 | ## Using Helm 8 | 9 | For deploying Icinga for Kubernetes and its dependencies within a Kubernetes cluster, 10 | the recommended approach is to use our 11 | [Helm charts](https://github.com/Icinga/helm-charts/tree/main/charts/icinga-stack) to 12 | deploy a ready-to-use Icinga stack. 13 | 14 | ## Alternative Installation Methods 15 | 16 | Though any of the Icinga for Kubernetes components can run either inside or outside Kubernetes clusters, 17 | including the database, common setup approaches include the following: 18 | 19 | * All components run inside a Kubernetes cluster. 20 | * All components run outside a Kubernetes cluster. 21 | * Only the Icinga for Kubernetes daemon runs inside a Kubernetes cluster, 22 | requiring configuration for an external service to connect to the database outside the cluster. 23 | 24 | ### Setting up the Database 25 | 26 | A MySQL (≥8.0) or MariaDB (≥10.5) database is required to run Icinga for Kubernetes. 27 | Please follow the steps, which guide you through setting up the database and user, and importing the schema. 28 | 29 | #### Setting up a MySQL or MariaDB Database 30 | 31 | Set up a MySQL database for Icinga for Kubernetes: 32 | 33 | ``` 34 | CREATE DATABASE kubernetes; 35 | CREATE USER 'kubernetes'@'localhost' IDENTIFIED BY 'CHANGEME'; 36 | GRANT ALL ON kubernetes.* TO 'kubernetes'@'localhost'; 37 | ``` 38 | 39 | Icinga for Kubernetes automatically imports the schema on first start and also applies schema migrations if required. 40 | 41 | ### Running Within Kubernetes 42 | 43 | Instead of using Helm charts, you can deploy Icinga for Kubernetes using the 44 | [sample configuration](../icinga-kubernetes.example.yml). 45 | First, create a local copy and adjust the database credentials as needed, 46 | and modify the connection configuration if necessary. 47 | The sample configuration provides an overview of general settings, 48 | and all available settings are detailed under [Configuration](03-Configuration.md). 49 | 50 | ### Running Out-of-Cluster 51 | 52 | #### Installing via Package 53 | 54 | To install Icinga for Kubernetes outside of a Kubernetes cluster, 55 | it is recommended to use prebuilt packages available for all supported platforms from 56 | our official release [repository](https://packages.icinga.com). 57 | Follow the steps provided for your target operating system to set up the repository and 58 | install Icinga for Kubernetes via the `icinga-kubernetes` package. 59 | 60 | ##### Configuring Icinga for Kubernetes 61 | 62 | Icinga for Kubernetes installs its configuration file to `/etc/icinga-kubernetes/config.yml`, 63 | pre-populating most of the settings for a local setup. Before running Icinga for Kubernetes, 64 | adjust the database credentials and, if necessary, the connection configuration. 65 | The configuration file explains general settings. 66 | All available settings can be found under [Configuration](03-Configuration.md#configuration-via-yaml-file). 67 | 68 | The `icinga-kubernetes` package automatically installs the required systemd unit files to run Icinga for Kubernetes. 69 | The service instances are configured via environment files in `/etc/icinga-kubernetes`. 70 | More about the configuration via environment files can be found under 71 | [Configuration](03-Configuration.md#managing-instances-with-environment-files). 72 | 73 | To connect to a Kubernetes cluster, a locally accessible 74 | [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file is needed. 75 | 76 | If you're only planning to monitor a single Kubernetes cluster, you can simply edit 77 | `/etc/icinga-kubernetes/default.env`. 78 | This file serves as the configuration for your Icinga for Kubernetes default instance. It contains all the necessary 79 | parameters to connect to your Kubernetes cluster, such as the `KUBECONFIG` variable pointing to your kubeconfig file. 80 | More about the `default.env` file can be found under [Configuration](03-Configuration.md#default-environment). 81 | 82 | ##### Configuring multiple Instances of Icinga for Kubernetes for Multi-Cluster Support 83 | 84 | If you're planning to monitor multiple Kubernetes clusters, you can add additional environment files. 85 | 86 | **Add a new Instance**: 87 | 88 | 1. Create a new environment file in `/etc/icinga-kubernetes`. The file name will be the instance name for the 89 | systemd service. For example `test-cluster.env` will start the service instance `icinga-kubernetes@test-cluster`. 90 | 2. Set the `KUBECONFIG` environment variable to configure how Icinga for Kubernetes can connect to the cluster. 91 | 3. Set the `ICINGA_FOR_KUBERNETES_CLUSTER_NAME` environment variable to configure the cluster name. If the environment 92 | variable is not set the cluster name will be the environment file's name. 93 | 4. You can add additional environment variables to override the `config.yml` 94 | ([Available environment variables](03-Configuration.md#configuration-via-environment-variables)). 95 | 5. Reload the systemd daemon with `systemctl daemon-reload` to recognize the new cluster configs. 96 | 97 | An example `test-cluster.env` file could look like the following: 98 | 99 | ```bash 100 | KUBECONFIG=$HOME/.kube/config 101 | ICINGA_FOR_KUBERNETES_CLUSTER_NAME="Test Cluster" 102 | ICINGA_FOR_KUBERNETES_PROMETHEUS_URL=http://localhost:9090 103 | ``` 104 | 105 | **Remove Instance**: 106 | 107 | 1. If running, stop the service instance manually. For `test-cluster` it would be 108 | `systemctl stop icinga-kubernetes@test-cluster`. 109 | 2. Remove the corresponding environment file from `/etc/icinga-kubernetes`. 110 | 3. Reload the systemd daemon with `systemctl daemon-reload` to make sure the daemon forgets the file. 111 | 112 | !!! Warning 113 | 114 | If you stop the service without removing the environment file, the instance will restart when the service is 115 | restarted. 116 | 117 | If you remove the environment file without stopping the instance, the instance will try to restart and 118 | fail when the service is restarted. 119 | 120 | You can also explicitly define which environment files should be used to start service instances. For this, 121 | you can adjust the `/etc/default/icinga-kubernetes` file. 122 | More about the this can be found under [Configuration](03-Configuration.md#service-configuration). 123 | 124 | ##### Running Icinga for Kubernetes 125 | 126 | After configuring, please run the following command to enable and start all configured Icinga for Kubernetes 127 | service instances: 128 | 129 | ```bash 130 | systemctl enable --now icinga-kubernetes 131 | ``` 132 | 133 | ##### Stopping Icinga for Kubernetes 134 | 135 | The following command will stop all running Icinga for Kubernetes services instances: 136 | 137 | ```bash 138 | systemctl stop icinga-kubernetes 139 | ``` 140 | 141 | #### Using a Container 142 | 143 | Before running Icinga for Kubernetes, create a local `config.yml` 144 | using [the sample configuration](../config.example.yml) 145 | adjust the database credentials and, if necessary, the connection configuration. 146 | The configuration file explains general settings. 147 | All available settings can be found under [Configuration](03-Configuration.md). 148 | 149 | With locally accessible 150 | [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) 151 | and `config.yml` files, 152 | run the `icinga/icinga-kubernetes` image using a container runtime of you choice, e.g. Docker: 153 | 154 | ```bash 155 | export KUBECONFIG=$HOME/.kube/config 156 | export ICINGA_KUBERNETES_CONFIG=./config.yml 157 | docker run --rm -v $ICINGA_KUBERNETES_CONFIG:/config.yml -v $KUBECONFIG:/.kube/config icinga/icinga-kubernetes 158 | ``` 159 | 160 | #### From Source 161 | 162 | ##### Using `go install` 163 | 164 | You can build and install `icinga-kubernetes` as follows: 165 | 166 | ```bash 167 | go install github.com/icinga/icinga-kubernetes@latest 168 | ``` 169 | 170 | This should place the `icinga-kubernetes` binary in your configured `$GOBIN` path which defaults to `$GOPATH/bin` or 171 | `$HOME/go/bin` if the `GOPATH` environment variable is not set. 172 | 173 | ##### Build from Source 174 | 175 | Download or clone the source and run the following command from the source's root directory. 176 | 177 | ```bash 178 | go build -o icinga-kubernetes cmd/icinga-kubernetes/main.go 179 | ``` 180 | 181 | ##### Configuring Icinga for Kubernetes 182 | 183 | Before running Icinga for Kubernetes, create a local `config.yml` 184 | using [the sample configuration](../config.example.yml) 185 | adjust the database credentials and, if necessary, the connection configuration. 186 | The configuration file explains general settings. 187 | All available settings can be found under [Configuration](03-Configuration.md). 188 | 189 | ##### Running Icinga for Kubernetes 190 | 191 | With locally accessible 192 | [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) 193 | and `config.yml` files, `icinga-kubernetes` can be executed by running: 194 | 195 | ```bash 196 | icinga-kubernetes -config /path/to/config.yml 197 | ``` 198 | 199 | ### Kubernetes Access Control Requirements 200 | 201 | Icinga for Kubernetes requires the following read-only permissions on all resources within a Kubernetes cluster: 202 | 203 | * **get**: Allows to retrieve details of resources. 204 | * **list**: Allows to list all instances of resources. 205 | * **watch**: Allows to watch for changes to resources. 206 | 207 | You can grant these permissions by creating a `ClusterRole` with the necessary rules and 208 | binding it to an appropriate service account or user. 209 | Below is an example `ClusterRole` configuration: 210 | 211 | ``` 212 | apiVersion: rbac.authorization.k8s.io/v1 213 | kind: ClusterRole 214 | metadata: 215 | name: icinga-for-kubernetes 216 | rules: 217 | - apiGroups: [ "*" ] 218 | resources: [ "*" ] 219 | verbs: [ "get", "list", "watch" ] 220 | ``` 221 | 222 | A complete example of the Kubernetes RBAC configuration is included in the 223 | [sample configuration](../icinga-kubernetes.example.yml). As a result, 224 | you don't need to manually configure access when deploying Icinga for Kubernetes using the sample configuration or our 225 | [Helm charts](https://github.com/Icinga/helm-charts/tree/main/charts/icinga-stack). 226 | 227 | **When running Icinga for Kubernetes outside of a Kubernetes cluster, 228 | it is required to connect as a user with the necessary permissions.** 229 | 230 | ### Installing Icinga for Kubernetes Web 231 | 232 | With Icinga for Kubernetes and the database fully set up, you have completed the instructions here and can proceed to 233 | [installing Icinga for Kubernetes Web](https://icinga.com/docs/icinga-kubernetes-web/latest/doc/02-Installation/) 234 | which connects to the database to display and work with the monitoring data. 235 | 236 | -------------------------------------------------------------------------------- /doc/02-Installation.md.d/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /doc/res/icinga-kubernetes-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Icinga/icinga-kubernetes/de88147599ae6e86c1a8746cdf3dd88f32c126f5/doc/res/icinga-kubernetes-dashboard.png -------------------------------------------------------------------------------- /doc/res/icinga-kubernetes-deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Icinga/icinga-kubernetes/de88147599ae6e86c1a8746cdf3dd88f32c126f5/doc/res/icinga-kubernetes-deployment.png -------------------------------------------------------------------------------- /doc/res/icinga-kubernetes-favorites-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Icinga/icinga-kubernetes/de88147599ae6e86c1a8746cdf3dd88f32c126f5/doc/res/icinga-kubernetes-favorites-dashboard.png -------------------------------------------------------------------------------- /doc/res/icinga-kubernetes-installation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Icinga/icinga-kubernetes/de88147599ae6e86c1a8746cdf3dd88f32c126f5/doc/res/icinga-kubernetes-installation.png -------------------------------------------------------------------------------- /doc/res/icinga-kubernetes-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Icinga/icinga-kubernetes/de88147599ae6e86c1a8746cdf3dd88f32c126f5/doc/res/icinga-kubernetes-overview.png -------------------------------------------------------------------------------- /doc/res/icinga-kubernetes-replicaset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Icinga/icinga-kubernetes/de88147599ae6e86c1a8746cdf3dd88f32c126f5/doc/res/icinga-kubernetes-replicaset.png -------------------------------------------------------------------------------- /doc/res/icinga-kubernetes-statefulset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Icinga/icinga-kubernetes/de88147599ae6e86c1a8746cdf3dd88f32c126f5/doc/res/icinga-kubernetes-statefulset.png -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/icinga/icinga-kubernetes 2 | 3 | go 1.23.0 4 | 5 | require ( 6 | github.com/go-co-op/gocron v1.37.0 7 | github.com/go-logr/logr v1.4.2 8 | github.com/go-sql-driver/mysql v1.9.2 9 | github.com/google/uuid v1.6.0 10 | github.com/icinga/icinga-go-library v0.6.4-0.20250519095646-5a1c5090f238 11 | github.com/jmoiron/sqlx v1.4.0 12 | github.com/lib/pq v1.10.9 13 | github.com/okzk/sdnotify v0.0.0-20240725214427-1c1fdd37c5ac 14 | github.com/pkg/errors v0.9.1 15 | github.com/prometheus/client_golang v1.20.4 16 | github.com/prometheus/common v0.59.1 17 | github.com/spf13/pflag v1.0.5 18 | go.uber.org/zap v1.27.0 19 | golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 20 | golang.org/x/sync v0.14.0 21 | k8s.io/api v0.31.1 22 | k8s.io/apimachinery v0.31.1 23 | k8s.io/client-go v0.31.1 24 | k8s.io/klog/v2 v2.130.1 25 | k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 26 | ) 27 | 28 | require ( 29 | filippo.io/edwards25519 v1.1.0 // indirect 30 | github.com/caarlos0/env/v11 v11.3.1 // indirect 31 | github.com/creasty/defaults v1.8.0 // indirect 32 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 33 | github.com/emicklei/go-restful/v3 v3.12.1 // indirect 34 | github.com/fatih/color v1.18.0 // indirect 35 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 36 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 37 | github.com/go-openapi/jsonreference v0.21.0 // indirect 38 | github.com/go-openapi/swag v0.23.0 // indirect 39 | github.com/goccy/go-yaml v1.13.0 // indirect 40 | github.com/gogo/protobuf v1.3.2 // indirect 41 | github.com/golang/protobuf v1.5.4 // indirect 42 | github.com/google/gnostic-models v0.6.8 // indirect 43 | github.com/google/go-cmp v0.6.0 // indirect 44 | github.com/google/gofuzz v1.2.0 // indirect 45 | github.com/imdario/mergo v0.3.16 // indirect 46 | github.com/jessevdk/go-flags v1.6.1 // indirect 47 | github.com/josharian/intern v1.0.0 // indirect 48 | github.com/json-iterator/go v1.1.12 // indirect 49 | github.com/mailru/easyjson v0.7.7 // indirect 50 | github.com/mattn/go-colorable v0.1.13 // indirect 51 | github.com/mattn/go-isatty v0.0.20 // indirect 52 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 53 | github.com/modern-go/reflect2 v1.0.2 // indirect 54 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 55 | github.com/prometheus/client_model v0.6.1 // indirect 56 | github.com/robfig/cron/v3 v3.0.1 // indirect 57 | github.com/ssgreg/journald v1.0.0 // indirect 58 | github.com/x448/float16 v0.8.4 // indirect 59 | go.uber.org/atomic v1.11.0 // indirect 60 | go.uber.org/multierr v1.11.0 // indirect 61 | golang.org/x/net v0.30.0 // indirect 62 | golang.org/x/oauth2 v0.23.0 // indirect 63 | golang.org/x/sys v0.26.0 // indirect 64 | golang.org/x/term v0.25.0 // indirect 65 | golang.org/x/text v0.19.0 // indirect 66 | golang.org/x/time v0.6.0 // indirect 67 | google.golang.org/protobuf v1.34.2 // indirect 68 | gopkg.in/inf.v0 v0.9.1 // indirect 69 | gopkg.in/yaml.v2 v2.4.0 // indirect 70 | gopkg.in/yaml.v3 v3.0.1 // indirect 71 | k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect 72 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 73 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 74 | sigs.k8s.io/yaml v1.4.0 // indirect 75 | ) 76 | -------------------------------------------------------------------------------- /icinga-kubernetes.example.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: icinga 5 | 6 | --- 7 | apiVersion: v1 8 | kind: ServiceAccount 9 | metadata: 10 | name: icinga-for-kubernetes 11 | namespace: icinga 12 | 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | kind: ClusterRole 16 | metadata: 17 | name: icinga-for-kubernetes 18 | rules: 19 | - apiGroups: [ "*" ] 20 | resources: [ "*" ] 21 | verbs: [ "get", "list", "watch" ] 22 | 23 | --- 24 | apiVersion: rbac.authorization.k8s.io/v1 25 | kind: ClusterRoleBinding 26 | metadata: 27 | name: icinga-for-kubernetes 28 | roleRef: 29 | apiGroup: "rbac.authorization.k8s.io" 30 | kind: ClusterRole 31 | name: icinga-for-kubernetes 32 | subjects: 33 | - kind: ServiceAccount 34 | name: icinga-for-kubernetes 35 | namespace: icinga 36 | # - kind: User 37 | # name: icinga-for-kubernetes 38 | 39 | --- 40 | apiVersion: v1 41 | kind: ConfigMap 42 | metadata: 43 | name: icinga-for-kubernetes 44 | namespace: icinga 45 | data: 46 | config.yml: |- 47 | # This is the configuration file for Icinga for Kubernetes. 48 | 49 | # Connection configuration for the database to which Icinga for Kubernetes synchronizes data. 50 | # This is also the database used in Icinga for Kubernetes Web to view and work with the data. 51 | database: 52 | # Database type. Only 'mysql' is supported yet which is the default. 53 | # type: mysql 54 | 55 | # Database host or absolute Unix socket path. 56 | host: mysql 57 | 58 | # Database port. By default, the MySQL port. 59 | # port: 60 | 61 | # Database name. 62 | database: kubernetes 63 | 64 | # Database user. 65 | user: kubernetes 66 | 67 | # Database password. 68 | password: CHANGEME 69 | 70 | # Configuration for Prometheus metrics API. 71 | prometheus: 72 | # Prometheus server URL. 73 | # url: http://localhost:9090 74 | 75 | --- 76 | apiVersion: v1 77 | kind: Pod 78 | metadata: 79 | name: icinga-for-kubernetes 80 | namespace: icinga 81 | spec: 82 | serviceAccountName: icinga-for-kubernetes 83 | containers: 84 | - name: icinga-for-kubernetes 85 | image: icinga/icinga-kubernetes 86 | volumeMounts: 87 | - name: config-volume 88 | mountPath: /config.yml 89 | subPath: config.yml 90 | volumes: 91 | - name: config-volume 92 | configMap: 93 | name: icinga-for-kubernetes 94 | -------------------------------------------------------------------------------- /internal/cache/v1/multiplexers.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "context" 5 | "github.com/icinga/icinga-kubernetes/internal" 6 | "golang.org/x/sync/errgroup" 7 | ) 8 | 9 | type EventsMultiplexer interface { 10 | UpsertEvents() internal.ChannelMultiplexer[any] 11 | DeleteEvents() internal.ChannelMultiplexer[any] 12 | Run(context.Context) error 13 | } 14 | 15 | type EventsMultiplexers interface { 16 | DaemonSets() EventsMultiplexer 17 | Deployments() EventsMultiplexer 18 | Nodes() EventsMultiplexer 19 | Pods() EventsMultiplexer 20 | ReplicaSets() EventsMultiplexer 21 | Services() EventsMultiplexer 22 | StatefulSets() EventsMultiplexer 23 | Run(context.Context) error 24 | } 25 | 26 | func Multiplexers() EventsMultiplexers { 27 | return m 28 | } 29 | 30 | type events struct { 31 | upsertEvents internal.ChannelMultiplexer[any] 32 | deleteEvents internal.ChannelMultiplexer[any] 33 | } 34 | 35 | func (e events) UpsertEvents() internal.ChannelMultiplexer[any] { 36 | return e.upsertEvents 37 | } 38 | 39 | func (e events) DeleteEvents() internal.ChannelMultiplexer[any] { 40 | return e.deleteEvents 41 | } 42 | 43 | func (e events) Run(ctx context.Context) error { 44 | g, ctx := errgroup.WithContext(ctx) 45 | 46 | g.Go(func() error { 47 | return e.upsertEvents.Run(ctx) 48 | }) 49 | 50 | g.Go(func() error { 51 | return e.deleteEvents.Run(ctx) 52 | }) 53 | 54 | return g.Wait() 55 | } 56 | 57 | type multiplexers struct { 58 | daemonSets events 59 | deployments events 60 | nodes events 61 | pods events 62 | replicaSets events 63 | services events 64 | statefulSets events 65 | } 66 | 67 | func (m multiplexers) DaemonSets() EventsMultiplexer { 68 | return m.daemonSets 69 | } 70 | 71 | func (m multiplexers) Deployments() EventsMultiplexer { 72 | return m.deployments 73 | } 74 | 75 | func (m multiplexers) Nodes() EventsMultiplexer { 76 | return m.nodes 77 | } 78 | 79 | func (m multiplexers) Pods() EventsMultiplexer { 80 | return m.pods 81 | } 82 | 83 | func (m multiplexers) ReplicaSets() EventsMultiplexer { 84 | return m.replicaSets 85 | } 86 | 87 | func (m multiplexers) Services() EventsMultiplexer { 88 | return m.services 89 | } 90 | 91 | func (m multiplexers) StatefulSets() EventsMultiplexer { 92 | return m.statefulSets 93 | } 94 | 95 | func (m multiplexers) Run(ctx context.Context) error { 96 | g, ctx := errgroup.WithContext(ctx) 97 | 98 | g.Go(func() error { 99 | return m.daemonSets.Run(ctx) 100 | }) 101 | 102 | g.Go(func() error { 103 | return m.deployments.Run(ctx) 104 | }) 105 | 106 | g.Go(func() error { 107 | return m.nodes.Run(ctx) 108 | }) 109 | 110 | g.Go(func() error { 111 | return m.pods.Run(ctx) 112 | }) 113 | 114 | g.Go(func() error { 115 | return m.replicaSets.Run(ctx) 116 | }) 117 | 118 | g.Go(func() error { 119 | return m.services.Run(ctx) 120 | }) 121 | 122 | g.Go(func() error { 123 | return m.statefulSets.Run(ctx) 124 | }) 125 | 126 | return g.Wait() 127 | } 128 | 129 | var m multiplexers 130 | 131 | func init() { 132 | m = multiplexers{ 133 | daemonSets: events{ 134 | upsertEvents: internal.NewChannelMux[any](), 135 | deleteEvents: internal.NewChannelMux[any](), 136 | }, 137 | deployments: events{ 138 | upsertEvents: internal.NewChannelMux[any](), 139 | deleteEvents: internal.NewChannelMux[any](), 140 | }, 141 | nodes: events{ 142 | upsertEvents: internal.NewChannelMux[any](), 143 | deleteEvents: internal.NewChannelMux[any](), 144 | }, 145 | pods: events{ 146 | upsertEvents: internal.NewChannelMux[any](), 147 | deleteEvents: internal.NewChannelMux[any](), 148 | }, 149 | replicaSets: events{ 150 | upsertEvents: internal.NewChannelMux[any](), 151 | deleteEvents: internal.NewChannelMux[any](), 152 | }, 153 | services: events{ 154 | upsertEvents: internal.NewChannelMux[any](), 155 | deleteEvents: internal.NewChannelMux[any](), 156 | }, 157 | statefulSets: events{ 158 | upsertEvents: internal.NewChannelMux[any](), 159 | deleteEvents: internal.NewChannelMux[any](), 160 | }, 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /internal/channel_multiplexer.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "golang.org/x/sync/errgroup" 6 | "sync/atomic" 7 | ) 8 | 9 | // ChannelMultiplexer is a multiplexer for channels of variable types. 10 | // It fans out all input channels to all output channels. 11 | type ChannelMultiplexer[T any] interface { 12 | // In adds the given input channel reading. 13 | In() chan<- T 14 | 15 | AddIn(chan T) 16 | 17 | // Out returns a new output channel that receives from all input channels. 18 | Out() <-chan T 19 | 20 | // AddOut registers the given output channel to receive from all input channels. 21 | AddOut(chan T) 22 | 23 | // Run starts multiplexing of all input channels to all output channels. 24 | // Once run is called, cannot be modified and will panic. 25 | Run(context.Context) error 26 | } 27 | 28 | // NewChannelMux returns a new ChannelMultiplexer initialized with at least one input channel. 29 | func NewChannelMux[T any](inChannels ...chan T) ChannelMultiplexer[T] { 30 | return &channelMultiplexer[T]{ 31 | inAdded: inChannels, 32 | } 33 | } 34 | 35 | type channelMultiplexer[T any] struct { 36 | in []chan T 37 | inAdded []chan T 38 | out []chan T 39 | outAdded []chan T 40 | started atomic.Bool 41 | } 42 | 43 | func (mux *channelMultiplexer[T]) In() chan<- T { 44 | if mux.started.Load() { 45 | panic("channelMultiplexer already started") 46 | } 47 | 48 | channel := make(chan T) 49 | 50 | mux.in = append(mux.in, channel) 51 | 52 | return channel 53 | } 54 | 55 | func (mux *channelMultiplexer[T]) AddIn(channel chan T) { 56 | if mux.started.Load() { 57 | panic("channelMultiplexer already started") 58 | } 59 | 60 | mux.inAdded = append(mux.inAdded, channel) 61 | } 62 | 63 | func (mux *channelMultiplexer[T]) Out() <-chan T { 64 | if mux.started.Load() { 65 | panic("channelMultiplexer already started") 66 | } 67 | 68 | channel := make(chan T) 69 | mux.out = append(mux.out, channel) 70 | 71 | return channel 72 | } 73 | 74 | func (mux *channelMultiplexer[T]) AddOut(channel chan T) { 75 | if mux.started.Load() { 76 | panic("channelMultiplexer already started") 77 | } 78 | 79 | mux.outAdded = append(mux.outAdded, channel) 80 | } 81 | 82 | func (mux *channelMultiplexer[T]) Run(ctx context.Context) error { 83 | if mux.started.Swap(true) { 84 | panic("channelMultiplexer already started") 85 | } 86 | 87 | defer func() { 88 | for _, channelToClose := range mux.in { 89 | close(channelToClose) 90 | } 91 | 92 | for _, channelToClose := range mux.out { 93 | close(channelToClose) 94 | } 95 | }() 96 | 97 | if len(mux.in)+len(mux.inAdded) == 0 { 98 | if len(mux.out)+len(mux.outAdded) > 0 { 99 | panic("foobar") 100 | } 101 | 102 | return nil 103 | } 104 | 105 | g, ctx := errgroup.WithContext(ctx) 106 | 107 | sink := make(chan T) 108 | defer close(sink) 109 | 110 | for _, ch := range mux.in { 111 | ch := ch 112 | 113 | g.Go(func() error { 114 | for { 115 | select { 116 | case spread, more := <-ch: 117 | if !more { 118 | return nil 119 | } 120 | select { 121 | case sink <- spread: 122 | case <-ctx.Done(): 123 | return ctx.Err() 124 | } 125 | 126 | case <-ctx.Done(): 127 | return ctx.Err() 128 | } 129 | } 130 | }) 131 | } 132 | 133 | outs := append(mux.outAdded, mux.out...) 134 | g.Go(func() error { 135 | for { 136 | select { 137 | case spread, more := <-sink: 138 | if !more { 139 | return nil 140 | } 141 | 142 | for _, ch := range outs { 143 | select { 144 | case ch <- spread: 145 | case <-ctx.Done(): 146 | return ctx.Err() 147 | } 148 | } 149 | case <-ctx.Done(): 150 | return ctx.Err() 151 | } 152 | } 153 | }) 154 | 155 | return g.Wait() 156 | } 157 | -------------------------------------------------------------------------------- /internal/multiplex.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "golang.org/x/sync/errgroup" 6 | "k8s.io/apimachinery/pkg/util/runtime" 7 | ) 8 | 9 | type Multiplex interface { 10 | In() chan interface{} 11 | Out() chan interface{} 12 | Do(context.Context) error 13 | } 14 | 15 | func NewMultiplex() Multiplex { 16 | return &multiplex{ 17 | started: false, 18 | in: make([]chan interface{}, 0, 1), 19 | out: make([]chan interface{}, 0, 2), 20 | } 21 | } 22 | 23 | type multiplex struct { 24 | started bool 25 | in []chan interface{} 26 | out []chan interface{} 27 | } 28 | 29 | func (m *multiplex) In() chan interface{} { 30 | if m.started { 31 | panic("already started") 32 | } 33 | 34 | ch := make(chan interface{}) 35 | m.in = append(m.in, ch) 36 | 37 | return ch 38 | } 39 | 40 | func (m *multiplex) Out() chan interface{} { 41 | if m.started { 42 | panic("already started") 43 | } 44 | 45 | ch := make(chan interface{}) 46 | m.out = append(m.out, ch) 47 | 48 | return ch 49 | } 50 | 51 | func (m *multiplex) Do(ctx context.Context) error { 52 | m.started = true 53 | 54 | g, ctx := errgroup.WithContext(ctx) 55 | 56 | sink := make(chan interface{}) 57 | defer close(sink) 58 | 59 | g.Go(func() error { 60 | defer runtime.HandleCrash() 61 | 62 | for { 63 | for _, in := range m.in { 64 | select { 65 | case item, more := <-in: 66 | if !more { 67 | return nil 68 | } 69 | 70 | select { 71 | case sink <- item: 72 | case <-ctx.Done(): 73 | return ctx.Err() 74 | } 75 | case <-ctx.Done(): 76 | return ctx.Err() 77 | } 78 | } 79 | } 80 | }) 81 | 82 | g.Go(func() error { 83 | defer runtime.HandleCrash() 84 | 85 | for { 86 | select { 87 | case item, more := <-sink: 88 | if !more { 89 | return nil 90 | } 91 | 92 | for _, out := range m.out { 93 | select { 94 | case out <- item: 95 | case <-ctx.Done(): 96 | return ctx.Err() 97 | } 98 | } 99 | case <-ctx.Done(): 100 | return ctx.Err() 101 | } 102 | } 103 | }) 104 | 105 | return g.Wait() 106 | } 107 | -------------------------------------------------------------------------------- /internal/notifications.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/icinga/icinga-go-library/database" 7 | "github.com/icinga/icinga-go-library/types" 8 | "github.com/icinga/icinga-kubernetes/pkg/notifications" 9 | schemav1 "github.com/icinga/icinga-kubernetes/pkg/schema/v1" 10 | "github.com/jmoiron/sqlx" 11 | "github.com/pkg/errors" 12 | ) 13 | 14 | func SyncNotificationsConfig(ctx context.Context, db *database.DB, config *notifications.Config, clusterUuid types.UUID) error { 15 | _true := types.Bool{Bool: true, Valid: true} 16 | 17 | if config.Url != "" { 18 | toDb := []schemav1.Config{ 19 | {ClusterUuid: clusterUuid, Key: schemav1.ConfigKeyNotificationsUrl, Value: config.Url, Locked: _true}, 20 | {ClusterUuid: clusterUuid, Key: schemav1.ConfigKeyNotificationsUsername, Value: config.Username, Locked: _true}, 21 | {ClusterUuid: clusterUuid, Key: schemav1.ConfigKeyNotificationsPassword, Value: config.Password, Locked: _true}, 22 | } 23 | 24 | err := db.ExecTx(ctx, func(ctx context.Context, tx *sqlx.Tx) error { 25 | if kwebUrl := config.KubernetesWebUrl; kwebUrl != "" { 26 | toDb = append(toDb, schemav1.Config{ 27 | ClusterUuid: clusterUuid, 28 | Key: schemav1.ConfigKeyNotificationsKubernetesWebUrl, 29 | Value: kwebUrl, 30 | Locked: _true, 31 | }) 32 | } else { 33 | if err := tx.SelectContext(ctx, &config.KubernetesWebUrl, fmt.Sprintf( 34 | `SELECT "%s" FROM "%s"`, 35 | schemav1.ConfigKeyNotificationsKubernetesWebUrl, 36 | database.TableName(schemav1.Config{})), 37 | ); err != nil { 38 | return errors.Wrap(err, "cannot select Icinga Notifications config") 39 | } 40 | } 41 | 42 | if _, err := tx.ExecContext( 43 | ctx, 44 | fmt.Sprintf( 45 | `DELETE FROM "%s" WHERE "cluster_uuid" = ? AND "key" LIKE ? AND "locked" = ?`, 46 | database.TableName(&schemav1.Config{}), 47 | ), 48 | clusterUuid, 49 | `notifications.%`, 50 | _true, 51 | ); err != nil { 52 | return errors.Wrap(err, "cannot delete Icinga Notifications config") 53 | } 54 | 55 | stmt, _ := db.BuildUpsertStmt(schemav1.Config{}) 56 | if _, err := tx.NamedExecContext(ctx, stmt, toDb); err != nil { 57 | return errors.Wrap(err, "cannot upsert Icinga Notifications config") 58 | } 59 | 60 | return nil 61 | }) 62 | if err != nil { 63 | return errors.Wrap(err, "transaction failed") 64 | } 65 | } else { 66 | err := db.ExecTx(ctx, func(ctx context.Context, tx *sqlx.Tx) error { 67 | if _, err := tx.ExecContext( 68 | ctx, 69 | fmt.Sprintf( 70 | `DELETE FROM "%s" WHERE "cluster_uuid" = ? AND "key" LIKE ? AND "locked" = ?`, 71 | database.TableName(&schemav1.Config{}), 72 | ), 73 | clusterUuid, 74 | `notifications.%`, 75 | _true, 76 | ); err != nil { 77 | return errors.Wrap(err, "cannot delete Icinga Notifications config") 78 | } 79 | 80 | rows, err := tx.QueryxContext(ctx, db.BuildSelectStmt(&schemav1.Config{}, &schemav1.Config{})) 81 | if err != nil { 82 | return errors.Wrap(err, "cannot fetch Icinga Notifications config from DB") 83 | } 84 | 85 | for rows.Next() { 86 | var r schemav1.Config 87 | if err := rows.StructScan(&r); err != nil { 88 | return errors.Wrap(err, "cannot fetch Icinga Notifications config from DB") 89 | } 90 | 91 | switch r.Key { 92 | case schemav1.ConfigKeyNotificationsUrl: 93 | config.Url = r.Value 94 | case schemav1.ConfigKeyNotificationsUsername: 95 | config.Username = r.Value 96 | case schemav1.ConfigKeyNotificationsPassword: 97 | config.Password = r.Value 98 | case schemav1.ConfigKeyNotificationsKubernetesWebUrl: 99 | config.KubernetesWebUrl = r.Value 100 | } 101 | } 102 | 103 | return nil 104 | }) 105 | if err != nil { 106 | return errors.Wrap(err, "cannot retrieve Icinga Notifications config") 107 | } 108 | } 109 | 110 | return nil 111 | } 112 | -------------------------------------------------------------------------------- /internal/prometheus.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/icinga/icinga-go-library/database" 7 | "github.com/icinga/icinga-go-library/types" 8 | "github.com/icinga/icinga-kubernetes/pkg/metrics" 9 | schemav1 "github.com/icinga/icinga-kubernetes/pkg/schema/v1" 10 | "github.com/jmoiron/sqlx" 11 | "github.com/pkg/errors" 12 | v1 "k8s.io/api/core/v1" 13 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/client-go/kubernetes" 15 | "k8s.io/client-go/rest" 16 | "strings" 17 | ) 18 | 19 | func SyncPrometheusConfig(ctx context.Context, db *database.DB, config *metrics.PrometheusConfig, clusterUuid types.UUID) error { 20 | _true := types.Bool{Bool: true, Valid: true} 21 | 22 | if config.Url != "" { 23 | toDb := []schemav1.Config{ 24 | {ClusterUuid: clusterUuid, Key: schemav1.ConfigKeyPrometheusUrl, Value: config.Url, Locked: _true}, 25 | } 26 | 27 | if config.Insecure != "" { 28 | toDb = append( 29 | toDb, 30 | schemav1.Config{ClusterUuid: clusterUuid, Key: schemav1.ConfigKeyPrometheusInsecure, Value: config.Insecure, Locked: _true}, 31 | ) 32 | } 33 | 34 | if config.Username != "" { 35 | toDb = append( 36 | toDb, 37 | schemav1.Config{ClusterUuid: clusterUuid, Key: schemav1.ConfigKeyPrometheusUsername, Value: config.Username, Locked: _true}, 38 | schemav1.Config{ClusterUuid: clusterUuid, Key: schemav1.ConfigKeyPrometheusPassword, Value: config.Password, Locked: _true}, 39 | ) 40 | } 41 | 42 | err := db.ExecTx(ctx, func(ctx context.Context, tx *sqlx.Tx) error { 43 | if _, err := tx.ExecContext( 44 | ctx, 45 | fmt.Sprintf( 46 | `DELETE FROM "%s" WHERE "cluster_uuid" = ? AND "key" LIKE ? AND "locked" = ?`, 47 | database.TableName(&schemav1.Config{}), 48 | ), 49 | clusterUuid, 50 | `prometheus.%`, 51 | _true, 52 | ); err != nil { 53 | return errors.Wrap(err, "cannot delete Prometheus config") 54 | } 55 | 56 | stmt, _ := db.BuildUpsertStmt(schemav1.Config{}) 57 | if _, err := tx.NamedExecContext(ctx, stmt, toDb); err != nil { 58 | return errors.Wrap(err, "cannot upsert Prometheus config") 59 | } 60 | 61 | return nil 62 | }) 63 | if err != nil { 64 | return errors.Wrap(err, "transaction failed") 65 | } 66 | } else { 67 | err := db.ExecTx(ctx, func(ctx context.Context, tx *sqlx.Tx) error { 68 | if _, err := tx.ExecContext( 69 | ctx, 70 | fmt.Sprintf( 71 | `DELETE FROM "%s" WHERE "cluster_uuid" = ? AND "key" LIKE ? AND "locked" = ?`, 72 | database.TableName(&schemav1.Config{}), 73 | ), 74 | clusterUuid, 75 | `prometheus.%`, 76 | _true, 77 | ); err != nil { 78 | return errors.Wrap(err, "cannot delete Prometheus config") 79 | } 80 | 81 | rows, err := tx.QueryxContext(ctx, db.BuildSelectStmt(&schemav1.Config{}, &schemav1.Config{})) 82 | if err != nil { 83 | return errors.Wrap(err, "cannot fetch Prometheus config from DB") 84 | } 85 | 86 | for rows.Next() { 87 | var r schemav1.Config 88 | if err := rows.StructScan(&r); err != nil { 89 | return errors.Wrap(err, "cannot fetch Prometheus config from DB") 90 | } 91 | 92 | switch r.Key { 93 | case schemav1.ConfigKeyPrometheusUrl: 94 | config.Url = r.Value 95 | case schemav1.ConfigKeyPrometheusInsecure: 96 | config.Insecure = r.Value 97 | case schemav1.ConfigKeyPrometheusUsername: 98 | config.Username = r.Value 99 | case schemav1.ConfigKeyPrometheusPassword: 100 | config.Password = r.Value 101 | } 102 | } 103 | 104 | return nil 105 | }) 106 | if err != nil { 107 | return errors.Wrap(err, "cannot retrieve Prometheus config") 108 | } 109 | } 110 | 111 | return nil 112 | } 113 | 114 | // AutoDetectPrometheus tries to auto-detect the Prometheus service in the monitoring namespace and 115 | // if found sets the URL in the supplied Prometheus configuration. The first service with the label 116 | // "app.kubernetes.io/name=prometheus" is used. Until now the ServiceTypes ClusterIP and NodePort are supported. 117 | func AutoDetectPrometheus(ctx context.Context, clientset *kubernetes.Clientset, config *metrics.PrometheusConfig) error { 118 | services, err := clientset.CoreV1().Services("monitoring").List(ctx, kmetav1.ListOptions{ 119 | LabelSelector: "app.kubernetes.io/name=prometheus", 120 | }) 121 | if err != nil { 122 | return errors.Wrap(err, "cannot list Prometheus services") 123 | } 124 | 125 | if len(services.Items) == 0 { 126 | return errors.New("no Prometheus service found") 127 | } 128 | 129 | var ip string 130 | var port int32 131 | 132 | // Check if we are running in a Kubernetes cluster. If so, use the 133 | // service's ClusterIP. Otherwise, use the API Server's IP and NodePort. 134 | if _, err = rest.InClusterConfig(); err == nil { 135 | for _, service := range services.Items { 136 | if service.Spec.Type == v1.ServiceTypeClusterIP { 137 | ip = service.Spec.ClusterIP 138 | port = service.Spec.Ports[0].Port 139 | 140 | break 141 | } 142 | } 143 | } else if errors.Is(err, rest.ErrNotInCluster) { 144 | for _, service := range services.Items { 145 | if service.Spec.Type == v1.ServiceTypeNodePort { 146 | ip = strings.Split(clientset.RESTClient().Get().URL().Host, ":")[0] 147 | port = service.Spec.Ports[0].NodePort 148 | 149 | break 150 | } 151 | } 152 | } 153 | 154 | if ip == "" { 155 | return errors.New("no Prometheus found") 156 | } 157 | 158 | config.Url = fmt.Sprintf("http://%s:%d", ip, port) 159 | 160 | return nil 161 | } 162 | -------------------------------------------------------------------------------- /internal/version.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/version" 5 | ) 6 | 7 | // Version contains version and Git commit information. 8 | // 9 | // The placeholders are replaced on `git archive` using the `export-subst` attribute. 10 | var Version = version.Version("0.3.0", "v0.3.0-4-gde881475", "de88147599ae6e86c1a8746cdf3dd88f32c126f5") 11 | -------------------------------------------------------------------------------- /pkg/cluster/cluster.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "github.com/icinga/icinga-go-library/types" 6 | ) 7 | 8 | // Private type to prevent collisions with other context keys 9 | type contextKey string 10 | 11 | // clusterUuidContextKey is the key for Cluster values in contexts. 12 | var clusterUuidContextKey = contextKey("cluster_uuid") 13 | 14 | // NewClusterUuidContext creates a new context that carries the provided cluster UUID. 15 | // The new context is derived from the given parent context and associates the cluster UUID 16 | // with a predefined key (clusterContextKey). 17 | func NewClusterUuidContext(parent context.Context, clusterUuid types.UUID) context.Context { 18 | return context.WithValue(parent, clusterUuidContextKey, clusterUuid) 19 | } 20 | 21 | // ClusterUuidFromContext returns the uuid value of the cluster stored in ctx, if any: 22 | // 23 | // clusterUuid, ok := ClusterUuidFromContext(ctx) 24 | // if !ok { 25 | // // Error handling. 26 | // } 27 | func ClusterUuidFromContext(ctx context.Context) types.UUID { 28 | clusterUuid, ok := ctx.Value(clusterUuidContextKey).(types.UUID) 29 | if !ok { 30 | panic("cluster not found in context") 31 | } 32 | 33 | return clusterUuid 34 | } 35 | -------------------------------------------------------------------------------- /pkg/com/basic_auth_transport.go: -------------------------------------------------------------------------------- 1 | package com 2 | 3 | import ( 4 | "crypto/tls" 5 | "net/http" 6 | ) 7 | 8 | // BasicAuthTransport is a http.RoundTripper that authenticates all requests using HTTP Basic Authentication. 9 | type BasicAuthTransport struct { 10 | http.RoundTripper 11 | Username string 12 | Password string 13 | Insecure bool 14 | } 15 | 16 | // RoundTrip executes a single HTTP transaction with the basic auth credentials. 17 | func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) { 18 | if t.Username != "" { 19 | req.SetBasicAuth(t.Username, t.Password) 20 | } 21 | 22 | rt := t.RoundTripper 23 | if rt == nil { 24 | rt = http.DefaultTransport 25 | } 26 | 27 | if t.Insecure { 28 | if transport, ok := rt.(*http.Transport); ok { 29 | transportCopy := transport.Clone() 30 | // #nosec G402 -- TLS certificate verification is intentionally configurable via YAML config. 31 | transportCopy.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} 32 | rt = transportCopy 33 | } 34 | } 35 | 36 | return rt.RoundTrip(req) 37 | } 38 | -------------------------------------------------------------------------------- /pkg/daemon/config.go: -------------------------------------------------------------------------------- 1 | package daemon 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/database" 5 | "github.com/icinga/icinga-go-library/logging" 6 | "github.com/icinga/icinga-kubernetes/pkg/metrics" 7 | "github.com/icinga/icinga-kubernetes/pkg/notifications" 8 | ) 9 | 10 | // DefaultConfigPath specifies the default location of Icinga for Kubernetes's config.yml 11 | // if not set via command line flag. 12 | const DefaultConfigPath = "./config.yml" 13 | 14 | // Config defines Icinga Kubernetes config. 15 | type Config struct { 16 | Database database.Config `yaml:"database" envPrefix:"DATABASE_"` 17 | Logging logging.Config `yaml:"logging" envPrefix:"LOGGING_"` 18 | Notifications notifications.Config `yaml:"notifications" envPrefix:"NOTIFICATIONS_"` 19 | Prometheus metrics.PrometheusConfig `yaml:"prometheus" envPrefix:"PROMETHEUS_"` 20 | } 21 | 22 | // Validate checks constraints in the supplied configuration and returns an error if they are violated. 23 | func (c *Config) Validate() error { 24 | if err := c.Database.Validate(); err != nil { 25 | return err 26 | } 27 | 28 | if err := c.Logging.Validate(); err != nil { 29 | return err 30 | } 31 | 32 | if err := c.Prometheus.Validate(); err != nil { 33 | return err 34 | } 35 | 36 | return c.Notifications.Validate() 37 | } 38 | 39 | // ConfigFlagGlue provides a glue struct for the CLI config flag. 40 | // 41 | // ConfigFlagGlue implements the [github.com/icinga/icinga-go-library/config.Flags] interface. 42 | type ConfigFlagGlue struct { 43 | // Config is the path to the config file 44 | Config string 45 | } 46 | 47 | // GetConfigPath retrieves the path to the configuration file. 48 | // It returns the path specified via the command line, or DefaultConfigPath if none is provided. 49 | func (f ConfigFlagGlue) GetConfigPath() string { 50 | if f.Config == "" { 51 | return DefaultConfigPath 52 | } 53 | 54 | return f.Config 55 | } 56 | 57 | // IsExplicitConfigPath indicates whether the configuration file path was explicitly set. 58 | func (f ConfigFlagGlue) IsExplicitConfigPath() bool { 59 | return f.Config != "" 60 | } 61 | -------------------------------------------------------------------------------- /pkg/database/cleanup.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/icinga/icinga-go-library/backoff" 7 | "github.com/icinga/icinga-go-library/com" 8 | "github.com/icinga/icinga-go-library/periodic" 9 | "github.com/icinga/icinga-go-library/retry" 10 | "github.com/icinga/icinga-go-library/types" 11 | "time" 12 | ) 13 | 14 | // CleanupStmt defines information needed to compose cleanup statements. 15 | type CleanupStmt struct { 16 | Table string 17 | PK string 18 | Column string 19 | } 20 | 21 | // Build assembles the cleanup statement for the specified database driver with the given limit. 22 | func (stmt *CleanupStmt) Build(driverName string, limit uint64) string { 23 | switch driverName { 24 | case MySQL, "mysql": 25 | return fmt.Sprintf(`DELETE FROM %[1]s WHERE %[2]s < :time LIMIT %[3]d`, stmt.Table, stmt.Column, limit) 26 | case PostgreSQL, "postgres": 27 | return fmt.Sprintf(`WITH rows AS (SELECT %[1]s FROM %[2]s WHERE %[3]s < :time LIMIT %[4]d) 28 | DELETE FROM %[2]s WHERE %[1]s IN (SELECT %[1]s FROM rows)`, stmt.PK, stmt.Table, stmt.Column, limit) 29 | default: 30 | panic(fmt.Sprintf("invalid database type %s", driverName)) 31 | } 32 | } 33 | 34 | // CleanupOlderThan deletes all rows with the specified statement that are older than the given time. 35 | // Deletes a maximum of as many rows per round as defined in count. Actually deleted rows will be passed to onSuccess. 36 | // Returns the total number of rows deleted. 37 | func (db *Database) CleanupOlderThan( 38 | ctx context.Context, stmt CleanupStmt, 39 | count uint64, olderThan time.Time, onSuccess ...OnSuccess[struct{}], 40 | ) (uint64, error) { 41 | var counter com.Counter 42 | 43 | q := db.Rebind(stmt.Build(db.DriverName(), count)) 44 | 45 | defer db.periodicLog(ctx, q, &counter).Stop() 46 | 47 | for { 48 | var rowsDeleted int64 49 | 50 | err := retry.WithBackoff( 51 | ctx, 52 | func(ctx context.Context) error { 53 | rs, err := db.NamedExecContext(ctx, q, cleanupWhere{ 54 | Time: types.UnixMilli(olderThan), 55 | }) 56 | if err != nil { 57 | return CantPerformQuery(err, q) 58 | } 59 | 60 | rowsDeleted, err = rs.RowsAffected() 61 | 62 | return err 63 | }, 64 | retry.Retryable, 65 | backoff.NewExponentialWithJitter(1*time.Millisecond, 1*time.Second), 66 | retry.Settings{ 67 | Timeout: retry.DefaultTimeout, 68 | OnRetryableError: func(_ time.Duration, _ uint64, err, lastErr error) { 69 | if lastErr == nil || err.Error() != lastErr.Error() { 70 | db.log.Info("Cannot execute query. Retrying", "error", err) 71 | } 72 | }, 73 | OnSuccess: func(elapsed time.Duration, attempt uint64, lastErr error) { 74 | if attempt > 1 { 75 | db.log.Info("Query retried successfully after error", 76 | "after", elapsed, "attempt", attempt, "recovered_error", lastErr) 77 | } 78 | }, 79 | }, 80 | ) 81 | if err != nil { 82 | return 0, err 83 | } 84 | 85 | counter.Add(uint64(rowsDeleted)) 86 | 87 | for _, onSuccess := range onSuccess { 88 | if err := onSuccess(ctx, make([]struct{}, rowsDeleted)); err != nil { 89 | return 0, err 90 | } 91 | } 92 | 93 | if rowsDeleted < int64(count) { 94 | break 95 | } 96 | } 97 | 98 | return counter.Total(), nil 99 | } 100 | 101 | type cleanupWhere struct { 102 | Time types.UnixMilli 103 | } 104 | 105 | func (db *Database) PeriodicCleanup(ctx context.Context, stmt CleanupStmt) error { 106 | errs := make(chan error, 1) 107 | defer close(errs) 108 | 109 | periodic.Start(ctx, time.Hour, func(tick periodic.Tick) { 110 | olderThan := tick.Time.AddDate(0, 0, -1) 111 | 112 | _, err := db.CleanupOlderThan( 113 | ctx, stmt, 5000, olderThan, 114 | ) 115 | 116 | if err != nil { 117 | select { 118 | case errs <- err: 119 | case <-ctx.Done(): 120 | } 121 | 122 | return 123 | } 124 | }, periodic.Immediate()).Stop() 125 | 126 | select { 127 | case err := <-errs: 128 | return err 129 | case <-ctx.Done(): 130 | return ctx.Err() 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /pkg/database/contracts.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | // TableNamer implements the TableName method, 4 | // which returns the table of the object. 5 | type TableNamer interface { 6 | TableName() string // TableName tells the table. 7 | } 8 | 9 | type Upserter interface { 10 | Upsert() interface{} 11 | } 12 | -------------------------------------------------------------------------------- /pkg/database/driver.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "database/sql/driver" 7 | "fmt" 8 | "github.com/go-logr/logr" 9 | "github.com/go-sql-driver/mysql" 10 | "github.com/icinga/icinga-go-library/backoff" 11 | "github.com/icinga/icinga-go-library/retry" 12 | "github.com/jmoiron/sqlx" 13 | "github.com/pkg/errors" 14 | "time" 15 | ) 16 | 17 | const MySQL = "icinga-mysql" 18 | const PostgreSQL = "icinga-pgsql" 19 | 20 | var timeout = time.Minute * 5 21 | 22 | // RetryConnector wraps driver.Connector with retry logic. 23 | type RetryConnector struct { 24 | driver.Connector 25 | driver Driver 26 | } 27 | 28 | // Connect implements part of the driver.Connector interface. 29 | func (c RetryConnector) Connect(ctx context.Context) (driver.Conn, error) { 30 | var conn driver.Conn 31 | err := errors.Wrap(retry.WithBackoff( 32 | ctx, 33 | func(ctx context.Context) (err error) { 34 | conn, err = c.Connector.Connect(ctx) 35 | return 36 | }, 37 | shouldRetry, 38 | backoff.NewExponentialWithJitter(time.Millisecond*128, time.Minute*1), 39 | retry.Settings{ 40 | Timeout: timeout, 41 | OnRetryableError: func(_ time.Duration, _ uint64, err, lastErr error) { 42 | if lastErr == nil || err.Error() != lastErr.Error() { 43 | c.driver.Logger.Info("Cannot connect to database. Retrying", "error", err) 44 | } 45 | }, 46 | OnSuccess: func(elapsed time.Duration, attempt uint64, _ error) { 47 | if attempt > 1 { 48 | c.driver.Logger.Info("Reconnected to database") 49 | // c.driver.Logger.Info(1, "Reconnected to database", 50 | // zap.Duration("after", elapsed), zap.Uint64("attempts", attempt+1)) 51 | } 52 | }, 53 | }, 54 | ), "cannot connect to database") 55 | return conn, err 56 | } 57 | 58 | // Driver implements part of the driver.Connector interface. 59 | func (c RetryConnector) Driver() driver.Driver { 60 | return c.driver 61 | } 62 | 63 | // Driver wraps a driver.Driver that also must implement driver.DriverContext with logging capabilities and provides our RetryConnector. 64 | type Driver struct { 65 | ctxDriver 66 | Logger logr.Logger 67 | } 68 | 69 | // OpenConnector implements the DriverContext interface. 70 | func (d Driver) OpenConnector(name string) (driver.Connector, error) { 71 | c, err := d.ctxDriver.OpenConnector(name) 72 | if err != nil { 73 | return nil, err 74 | } 75 | 76 | return &RetryConnector{ 77 | driver: d, 78 | Connector: c, 79 | }, nil 80 | } 81 | 82 | // RegisterDrivers makes our database Driver(s) available under the name "icinga-*sql". 83 | func RegisterDrivers(logger logr.Logger) { 84 | sql.Register(MySQL, &Driver{ctxDriver: &mysql.MySQLDriver{}, Logger: logger}) 85 | sql.Register(PostgreSQL, &Driver{ctxDriver: &PgSQLDriver{}, Logger: logger}) 86 | _ = mysql.SetLogger(mysqlLogger(func(v ...interface{}) { fmt.Println(v...) })) 87 | sqlx.BindDriver(PostgreSQL, sqlx.DOLLAR) 88 | } 89 | 90 | // ctxDriver helps ensure that we only support drivers that implement driver.Driver and driver.DriverContext. 91 | type ctxDriver interface { 92 | driver.Driver 93 | driver.DriverContext 94 | } 95 | 96 | // mysqlLogger is an adapter that allows ordinary functions to be used as a logger for mysql.SetLogger. 97 | type mysqlLogger func(v ...interface{}) 98 | 99 | // Print implements the mysql.Logger interface. 100 | func (log mysqlLogger) Print(v ...interface{}) { 101 | log(v) 102 | } 103 | 104 | func shouldRetry(err error) bool { 105 | if errors.Is(err, driver.ErrBadConn) { 106 | return true 107 | } 108 | 109 | return retry.Retryable(err) 110 | } 111 | -------------------------------------------------------------------------------- /pkg/database/features.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/database" 5 | ) 6 | 7 | type Feature func(*Features) 8 | 9 | type Features struct { 10 | blocking bool 11 | cascading bool 12 | onSuccess database.OnSuccess[any] 13 | } 14 | 15 | func NewFeatures(features ...Feature) *Features { 16 | f := &Features{} 17 | for _, feature := range features { 18 | feature(f) 19 | } 20 | 21 | return f 22 | } 23 | 24 | func WithBlocking() Feature { 25 | return func(f *Features) { 26 | f.blocking = true 27 | } 28 | } 29 | 30 | func WithCascading() Feature { 31 | return func(f *Features) { 32 | f.cascading = true 33 | } 34 | } 35 | 36 | func WithOnSuccess(fn database.OnSuccess[any]) Feature { 37 | return func(f *Features) { 38 | f.onSuccess = fn 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /pkg/database/pgsql_driver.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "database/sql/driver" 5 | "github.com/lib/pq" 6 | ) 7 | 8 | // PgSQLDriver extends pq.Driver with driver.DriverContext compliance. 9 | type PgSQLDriver struct { 10 | pq.Driver 11 | } 12 | 13 | // Assert interface compliance. 14 | var ( 15 | _ driver.Driver = &PgSQLDriver{} 16 | _ driver.DriverContext = &PgSQLDriver{} 17 | ) 18 | 19 | // OpenConnector implements the driver.DriverContext interface. 20 | func (PgSQLDriver) OpenConnector(name string) (driver.Connector, error) { 21 | return pq.NewConnector(name) 22 | } 23 | -------------------------------------------------------------------------------- /pkg/database/quoter.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "fmt" 5 | "github.com/jmoiron/sqlx" 6 | "strings" 7 | ) 8 | 9 | type Quoter struct { 10 | quoteCharacter string 11 | } 12 | 13 | func NewQuoter(db *sqlx.DB) *Quoter { 14 | var qc string 15 | 16 | switch db.DriverName() { 17 | case MySQL: 18 | qc = "`" 19 | case PostgreSQL: 20 | qc = `"` 21 | } 22 | 23 | return &Quoter{quoteCharacter: qc} 24 | } 25 | 26 | func (q *Quoter) QuoteIdentifier(identifier string) string { 27 | return q.quoteCharacter + identifier + q.quoteCharacter 28 | } 29 | 30 | func (q *Quoter) QuoteColumns(columns []string) string { 31 | return fmt.Sprintf("%[1]s%s%[1]s", q.quoteCharacter, strings.Join(columns, q.quoteCharacter+", "+q.quoteCharacter)) 32 | } 33 | -------------------------------------------------------------------------------- /pkg/database/relations.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | type Relation interface { 8 | ForeignKey() string 9 | SetForeignKey(fk string) 10 | CascadeDelete() bool 11 | WithoutCascadeDelete() 12 | StreamInto(context.Context, chan interface{}) error 13 | TableName() string 14 | } 15 | 16 | type HasRelations interface { 17 | Relations() []Relation 18 | } 19 | 20 | type RelationOption func(r Relation) 21 | 22 | func WithForeignKey(fk string) RelationOption { 23 | return func(r Relation) { 24 | r.SetForeignKey(fk) 25 | } 26 | } 27 | 28 | func WithoutCascadeDelete() RelationOption { 29 | return func(r Relation) { 30 | r.WithoutCascadeDelete() 31 | } 32 | } 33 | 34 | type relation[T comparable] struct { 35 | foreignKey string 36 | withoutCascadeDelete bool 37 | } 38 | 39 | func (r *relation[T]) ForeignKey() string { 40 | return r.foreignKey 41 | } 42 | 43 | func (r *relation[T]) SetForeignKey(fk string) { 44 | r.foreignKey = fk 45 | } 46 | 47 | func (r *relation[T]) CascadeDelete() bool { 48 | return !r.withoutCascadeDelete 49 | } 50 | 51 | func (r *relation[T]) WithoutCascadeDelete() { 52 | r.withoutCascadeDelete = true 53 | } 54 | 55 | func (r *relation[T]) TableName() string { 56 | return TableName(*new(T)) 57 | } 58 | 59 | type hasMany[T comparable] struct { 60 | relation[T] 61 | entities []T 62 | } 63 | 64 | func HasMany[T comparable](entities []T, options ...RelationOption) Relation { 65 | r := &hasMany[T]{entities: entities} 66 | 67 | for _, o := range options { 68 | o(r) 69 | } 70 | 71 | return r 72 | } 73 | 74 | func (r *hasMany[T]) StreamInto(ctx context.Context, ch chan interface{}) error { 75 | for _, entity := range r.entities { 76 | select { 77 | case ch <- entity: 78 | case <-ctx.Done(): 79 | return ctx.Err() 80 | } 81 | } 82 | 83 | return nil 84 | } 85 | 86 | type hasOne[T comparable] struct { 87 | relation[T] 88 | entity T 89 | } 90 | 91 | func HasOne[T comparable](entity T, options ...RelationOption) Relation { 92 | r := &hasOne[T]{entity: entity} 93 | 94 | for _, o := range options { 95 | o(r) 96 | } 97 | 98 | return r 99 | } 100 | 101 | func (r *hasOne[T]) StreamInto(ctx context.Context, ch chan interface{}) error { 102 | if r.entity != Zero[T]() { 103 | select { 104 | case ch <- r.entity: 105 | case <-ctx.Done(): 106 | return ctx.Err() 107 | } 108 | } 109 | 110 | return nil 111 | } 112 | 113 | func Zero[T any]() T { 114 | return *new(T) 115 | } 116 | -------------------------------------------------------------------------------- /pkg/database/utils.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | sqlDriver "database/sql/driver" 5 | "fmt" 6 | "github.com/go-sql-driver/mysql" 7 | "github.com/icinga/icinga-go-library/strcase" 8 | "github.com/icinga/icinga-go-library/types" 9 | "github.com/lib/pq" 10 | "github.com/pkg/errors" 11 | "net" 12 | "strings" 13 | ) 14 | 15 | // CantPerformQuery wraps the given error with the specified query that cannot be executed. 16 | func CantPerformQuery(err error, q string) error { 17 | return errors.Wrapf(err, "cannot perform %q", q) 18 | } 19 | 20 | func IsUnixAddr(host string) bool { 21 | return strings.HasPrefix(host, "/") 22 | } 23 | 24 | // JoinHostPort is like its equivalent in net., but handles UNIX sockets as well. 25 | func JoinHostPort(host string, port int) string { 26 | if IsUnixAddr(host) { 27 | return host 28 | } 29 | 30 | return net.JoinHostPort(host, fmt.Sprint(port)) 31 | } 32 | 33 | // IsRetryable checks whether the given error is retryable. 34 | func IsRetryable(err error) bool { 35 | if errors.Is(err, sqlDriver.ErrBadConn) { 36 | return true 37 | } 38 | 39 | if errors.Is(err, mysql.ErrInvalidConn) { 40 | return true 41 | } 42 | 43 | var e *mysql.MySQLError 44 | if errors.As(err, &e) { 45 | switch e.Number { 46 | case 1053, 1205, 1213, 2006: 47 | // 1053: Server shutdown in progress 48 | // 1205: Lock wait timeout 49 | // 1213: Deadlock found when trying to get lock 50 | // 2006: MySQL server has gone away 51 | return true 52 | default: 53 | return false 54 | } 55 | } 56 | 57 | var pe *pq.Error 58 | if errors.As(err, &pe) { 59 | switch pe.Code { 60 | case "08000", // connection_exception 61 | "08006", // connection_failure 62 | "08001", // sqlclient_unable_to_establish_sqlconnection 63 | "08004", // sqlserver_rejected_establishment_of_sqlconnection 64 | "40001", // serialization_failure 65 | "40P01", // deadlock_detected 66 | "54000", // program_limit_exceeded 67 | "55006", // object_in_use 68 | "55P03", // lock_not_available 69 | "57P01", // admin_shutdown 70 | "57P02", // crash_shutdown 71 | "57P03", // cannot_connect_now 72 | "58000", // system_error 73 | "58030", // io_error 74 | "XX000": // internal_error 75 | return true 76 | default: 77 | if strings.HasPrefix(string(pe.Code), "53") { 78 | // Class 53 - Insufficient Resources 79 | return true 80 | } 81 | } 82 | } 83 | 84 | return false 85 | } 86 | 87 | // TableName returns the table of t. 88 | func TableName(t interface{}) string { 89 | if tn, ok := t.(TableNamer); ok { 90 | return tn.TableName() 91 | } 92 | 93 | if s, ok := t.(string); ok { 94 | return s 95 | } 96 | 97 | return strcase.Snake(types.Name(t)) 98 | } 99 | -------------------------------------------------------------------------------- /pkg/database/uuid.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "database/sql/driver" 5 | "encoding" 6 | "github.com/google/uuid" 7 | ) 8 | 9 | // UUID is like uuid.UUID, but marshals itself binarily (not like xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) in SQL context. 10 | type UUID struct { 11 | uuid.UUID 12 | } 13 | 14 | // Value implements driver.Valuer. 15 | func (uuid UUID) Value() (driver.Value, error) { 16 | return uuid.UUID[:], nil 17 | } 18 | 19 | // Assert interface compliance. 20 | var ( 21 | _ encoding.TextUnmarshaler = (*UUID)(nil) 22 | _ driver.Valuer = UUID{} 23 | _ driver.Valuer = (*UUID)(nil) 24 | ) 25 | -------------------------------------------------------------------------------- /pkg/metrics/config.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | ) 6 | 7 | // PrometheusConfig defines Prometheus configuration. 8 | type PrometheusConfig struct { 9 | Url string `yaml:"url" env:"URL"` 10 | Insecure string `yaml:"insecure" env:"INSECURE"` 11 | Username string `yaml:"username" env:"USERNAME"` 12 | Password string `yaml:"password" env:"PASSWORD"` 13 | } 14 | 15 | // Validate checks constraints in the supplied Prometheus configuration and returns an error if they are violated. 16 | func (c *PrometheusConfig) Validate() error { 17 | if c.Url != "" { 18 | if (c.Username == "") != (c.Password == "") { 19 | return errors.New("both username and password must be provided") 20 | } 21 | 22 | if c.Insecure != "" && c.Insecure != "true" && c.Insecure != "false" { 23 | return errors.New("'insecure' has to be 'true', 'false' or empty") 24 | } 25 | } 26 | 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /pkg/notifications/client.go: -------------------------------------------------------------------------------- 1 | package notifications 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "github.com/icinga/icinga-kubernetes/pkg/com" 8 | "github.com/pkg/errors" 9 | "io" 10 | "k8s.io/klog/v2" 11 | "net/http" 12 | "net/url" 13 | ) 14 | 15 | type Client struct { 16 | client http.Client 17 | userAgent string 18 | processEventUrl string 19 | webUrl *url.URL 20 | } 21 | 22 | func NewClient(name string, config Config) (*Client, error) { 23 | baseUrl, err := url.Parse(config.Url) 24 | if err != nil { 25 | return nil, errors.Wrap(err, "unable to parse url") 26 | } 27 | 28 | webUrl, err := url.Parse(config.KubernetesWebUrl) 29 | if err != nil { 30 | return nil, errors.Wrap(err, "unable to parse web url") 31 | } 32 | 33 | return &Client{ 34 | client: http.Client{ 35 | Transport: &com.BasicAuthTransport{ 36 | RoundTripper: http.DefaultTransport, 37 | Username: config.Username, 38 | Password: config.Password, 39 | }, 40 | }, 41 | userAgent: name, 42 | processEventUrl: baseUrl.ResolveReference(&url.URL{Path: "/process-event"}).String(), 43 | webUrl: webUrl, 44 | }, nil 45 | } 46 | 47 | func (c *Client) ProcessEvent(ctx context.Context, event Marshaler) error { 48 | e, _ := event.MarshalEvent() 49 | e.URL = c.webUrl.ResolveReference(e.URL) 50 | 51 | body, err := json.Marshal(e) 52 | if err != nil { 53 | return errors.Wrapf(err, "cannot marshal notifications event data of type: %T", e) 54 | } 55 | 56 | req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.processEventUrl, bytes.NewReader(body)) 57 | if err != nil { 58 | return errors.Wrap(err, "cannot create new notifications http request") 59 | } 60 | 61 | req.Header.Add("Content-Type", "application/json") 62 | 63 | res, err := c.client.Do(req) 64 | if err != nil { 65 | return errors.Wrap(err, "cannot send notifications event") 66 | } 67 | 68 | defer func() { 69 | _ = res.Body.Close() 70 | }() 71 | 72 | if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusNotAcceptable { 73 | _, msg := io.ReadAll(res.Body) 74 | return errors.Errorf("received unexpected http status code from Icinga Notifications: %d: %s", res.StatusCode, msg) 75 | } 76 | 77 | return nil 78 | } 79 | 80 | // Stream consumes the items from the given `entities` chan and triggers a notifications event for each of them. 81 | func (c *Client) Stream(ctx context.Context, entities <-chan any) error { 82 | for { 83 | select { 84 | case entity, more := <-entities: 85 | if !more { 86 | return nil 87 | } 88 | 89 | if err := c.ProcessEvent(ctx, entity.(Marshaler)); err != nil { 90 | klog.Error(err) 91 | } 92 | case <-ctx.Done(): 93 | return ctx.Err() 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /pkg/notifications/config.go: -------------------------------------------------------------------------------- 1 | package notifications 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | "net/url" 6 | "regexp" 7 | ) 8 | 9 | type Config struct { 10 | // If URL is the empty string, notifications are disabled. 11 | Url string `yaml:"url" env:"URL"` 12 | Username string `yaml:"username" env:"USERNAME"` 13 | Password string `yaml:"password" env:"PASSWORD"` 14 | KubernetesWebUrl string `yaml:"kubernetes_web_url" env:"KUBERNETES_WEB_URL" default:"http://localhost/icingaweb2/kubernetes"` 15 | } 16 | 17 | // Validate checks constraints in the supplied configuration and returns an error if they are violated. 18 | func (c *Config) Validate() error { 19 | if c.Url != "" || c.Username != "" || c.Password != "" { 20 | if c.Url == "" || c.Username == "" || c.Password == "" { 21 | return errors.New("if one of 'url', 'username', or 'password' is set, all must be set") 22 | } 23 | 24 | usernameValid, err := regexp.MatchString(`^source-\d+$`, c.Username) 25 | if err != nil { 26 | return errors.WithStack(err) 27 | } 28 | if !usernameValid { 29 | return errors.New("'username' must be of the form 'source-'") 30 | } 31 | 32 | if _, err := url.Parse(c.Url); err != nil { 33 | return errors.Wrap(err, "'url' invalid") 34 | } 35 | } 36 | 37 | if _, err := url.Parse(c.KubernetesWebUrl); err != nil { 38 | return errors.Wrap(err, "'kubernetes_web_url' invalid") 39 | } 40 | 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /pkg/notifications/contracts.go: -------------------------------------------------------------------------------- 1 | package notifications 2 | 3 | // Marshaler is the interface implemented by types that 4 | // can marshal themselves into valid notification events. 5 | type Marshaler interface { 6 | MarshalEvent() (Event, error) 7 | } 8 | -------------------------------------------------------------------------------- /pkg/notifications/event.go: -------------------------------------------------------------------------------- 1 | package notifications 2 | 3 | import ( 4 | "encoding/json" 5 | "net/url" 6 | ) 7 | 8 | type Event struct { 9 | Name string 10 | Severity string 11 | Message string 12 | URL *url.URL 13 | Tags map[string]string 14 | ExtraTags map[string]string 15 | } 16 | 17 | func (e Event) MarshalJSON() ([]byte, error) { 18 | return json.Marshal(struct { 19 | Name string `json:"name"` 20 | Severity string `json:"severity"` 21 | Message string `json:"message"` 22 | URL string `json:"json"` 23 | Tags map[string]string `json:"tags"` 24 | ExtraTags map[string]string `json:"extra_tags"` 25 | }{ 26 | Name: e.Name, 27 | Severity: e.Severity, 28 | Message: e.Message, 29 | URL: e.URL.String(), 30 | Tags: e.Tags, 31 | ExtraTags: e.ExtraTags, 32 | }) 33 | } 34 | -------------------------------------------------------------------------------- /pkg/schema/v1/annotation.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import "github.com/icinga/icinga-go-library/types" 4 | 5 | type Annotation struct { 6 | Uuid types.UUID 7 | Name string 8 | Value string 9 | } 10 | -------------------------------------------------------------------------------- /pkg/schema/v1/bitmask.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "database/sql/driver" 6 | "golang.org/x/exp/constraints" 7 | "strconv" 8 | ) 9 | 10 | type Bitmask[T constraints.Integer] struct { 11 | bitmask T 12 | } 13 | 14 | func (b Bitmask[T]) Bits() T { return b.bitmask } 15 | func (b Bitmask[T]) Has(flag T) bool { return b.bitmask&flag != 0 } 16 | func (b *Bitmask[T]) Set(flag T) { b.bitmask |= flag } 17 | func (b *Bitmask[T]) Clear(flag T) { b.bitmask &= ^flag } 18 | func (b *Bitmask[T]) Toggle(flag T) { b.bitmask ^= flag } 19 | 20 | // Scan implements the sql.Scanner interface. 21 | func (b *Bitmask[T]) Scan(src interface{}) error { 22 | i, err := strconv.ParseInt(string(src.([]byte)), 10, 64) 23 | if err != nil { 24 | return err 25 | } 26 | 27 | b.bitmask = T(i) 28 | 29 | return nil 30 | } 31 | 32 | // Value implements the driver.Valuer interface. 33 | func (b Bitmask[T]) Value() (driver.Value, error) { 34 | if b.bitmask == 0 { 35 | return nil, nil 36 | } 37 | 38 | return int64(b.bitmask), nil 39 | } 40 | 41 | // Assert interface compliance. 42 | var ( 43 | _ sql.Scanner = (*Bitmask[byte])(nil) 44 | _ driver.Valuer = (*Bitmask[byte])(nil) 45 | ) 46 | -------------------------------------------------------------------------------- /pkg/schema/v1/cluster.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "github.com/icinga/icinga-go-library/types" 6 | ) 7 | 8 | type Cluster struct { 9 | Uuid types.UUID 10 | Name sql.NullString 11 | } 12 | -------------------------------------------------------------------------------- /pkg/schema/v1/config.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import "github.com/icinga/icinga-go-library/types" 4 | 5 | // Config represents a single key => value pair database config entry. 6 | type Config struct { 7 | ClusterUuid types.UUID 8 | Key ConfigKey 9 | Value string 10 | Locked types.Bool 11 | } 12 | 13 | // ConfigKey represents the database config.Key enums. 14 | type ConfigKey string 15 | 16 | const ( 17 | ConfigKeyNotificationsUsername ConfigKey = "notifications.username" 18 | ConfigKeyNotificationsPassword ConfigKey = "notifications.password" 19 | ConfigKeyNotificationsUrl ConfigKey = "notifications.url" 20 | ConfigKeyNotificationsKubernetesWebUrl ConfigKey = "notifications.kubernetes_web_url" 21 | ConfigKeyPrometheusUrl ConfigKey = "prometheus.url" 22 | ConfigKeyPrometheusInsecure ConfigKey = "prometheus.insecure" 23 | ConfigKeyPrometheusUsername ConfigKey = "prometheus.username" 24 | ConfigKeyPrometheusPassword ConfigKey = "prometheus.password" 25 | ) 26 | -------------------------------------------------------------------------------- /pkg/schema/v1/config_map.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/types" 5 | "github.com/icinga/icinga-kubernetes/pkg/database" 6 | kcorev1 "k8s.io/api/core/v1" 7 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "strings" 9 | ) 10 | 11 | type ConfigMap struct { 12 | Meta 13 | Immutable types.Bool 14 | Labels []Label `db:"-"` 15 | ConfigMapLabels []ConfigMapLabel `db:"-"` 16 | ResourceLabels []ResourceLabel `db:"-"` 17 | Annotations []Annotation `db:"-"` 18 | ConfigMapAnnotations []ConfigMapAnnotation `db:"-"` 19 | ResourceAnnotations []ResourceAnnotation `db:"-"` 20 | } 21 | 22 | type ConfigMapLabel struct { 23 | ConfigMapUuid types.UUID 24 | LabelUuid types.UUID 25 | } 26 | 27 | type ConfigMapAnnotation struct { 28 | ConfigMapUuid types.UUID 29 | AnnotationUuid types.UUID 30 | } 31 | 32 | func NewConfigMap() Resource { 33 | return &ConfigMap{} 34 | } 35 | 36 | func (c *ConfigMap) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 37 | c.ObtainMeta(k8s, clusterUuid) 38 | 39 | configMap := k8s.(*kcorev1.ConfigMap) 40 | 41 | var immutable bool 42 | if configMap.Immutable != nil { 43 | immutable = *configMap.Immutable 44 | } 45 | c.Immutable = types.Bool{ 46 | Bool: immutable, 47 | Valid: true, 48 | } 49 | 50 | for labelName, labelValue := range configMap.Labels { 51 | labelUuid := NewUUID(c.Uuid, strings.ToLower(labelName+":"+labelValue)) 52 | c.Labels = append(c.Labels, Label{ 53 | Uuid: labelUuid, 54 | Name: labelName, 55 | Value: labelValue, 56 | }) 57 | c.ConfigMapLabels = append(c.ConfigMapLabels, ConfigMapLabel{ 58 | ConfigMapUuid: c.Uuid, 59 | LabelUuid: labelUuid, 60 | }) 61 | c.ResourceLabels = append(c.ResourceLabels, ResourceLabel{ 62 | ResourceUuid: c.Uuid, 63 | LabelUuid: labelUuid, 64 | }) 65 | } 66 | 67 | for annotationName, annotationValue := range configMap.Annotations { 68 | annotationUuid := NewUUID(c.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 69 | c.Annotations = append(c.Annotations, Annotation{ 70 | Uuid: annotationUuid, 71 | Name: annotationName, 72 | Value: annotationValue, 73 | }) 74 | c.ConfigMapAnnotations = append(c.ConfigMapAnnotations, ConfigMapAnnotation{ 75 | ConfigMapUuid: c.Uuid, 76 | AnnotationUuid: annotationUuid, 77 | }) 78 | c.ResourceAnnotations = append(c.ResourceAnnotations, ResourceAnnotation{ 79 | ResourceUuid: c.Uuid, 80 | AnnotationUuid: annotationUuid, 81 | }) 82 | } 83 | } 84 | 85 | func (c *ConfigMap) Relations() []database.Relation { 86 | fk := database.WithForeignKey("config_map_uuid") 87 | 88 | return []database.Relation{ 89 | database.HasMany(c.ResourceLabels, database.WithForeignKey("resource_uuid")), 90 | database.HasMany(c.Labels, database.WithoutCascadeDelete()), 91 | database.HasMany(c.ConfigMapLabels, fk), 92 | database.HasMany(c.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 93 | database.HasMany(c.Annotations, database.WithoutCascadeDelete()), 94 | database.HasMany(c.ConfigMapAnnotations, fk), 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /pkg/schema/v1/contracts.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "github.com/google/uuid" 7 | "github.com/icinga/icinga-go-library/types" 8 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ktypes "k8s.io/apimachinery/pkg/types" 10 | "reflect" 11 | ) 12 | 13 | var NameSpaceKubernetes = uuid.MustParse("3f249403-2bb0-428f-8e91-504d1fd7ddb6") 14 | 15 | type Resource interface { 16 | kmetav1.Object 17 | Obtain(k8s kmetav1.Object, clusterUuid types.UUID) 18 | } 19 | 20 | type Meta struct { 21 | Uuid types.UUID 22 | ClusterUuid types.UUID 23 | Uid ktypes.UID 24 | Namespace string 25 | Name string 26 | ResourceVersion string 27 | Created types.UnixMilli 28 | } 29 | 30 | func (m *Meta) ObtainMeta(k8s kmetav1.Object, clusterUuid types.UUID) { 31 | m.Uuid = EnsureUUID(k8s.GetUID()) 32 | m.ClusterUuid = clusterUuid 33 | m.Uid = k8s.GetUID() 34 | m.Namespace = k8s.GetNamespace() 35 | m.Name = k8s.GetName() 36 | m.ResourceVersion = k8s.GetResourceVersion() 37 | m.Created = types.UnixMilli(k8s.GetCreationTimestamp().Time) 38 | } 39 | 40 | func (m *Meta) GetNamespace() string { return m.Namespace } 41 | func (m *Meta) SetNamespace(string) { panic("Not expected to be called") } 42 | func (m *Meta) GetName() string { return m.Name } 43 | func (m *Meta) SetName(string) { panic("Not expected to be called") } 44 | func (m *Meta) GetGenerateName() string { panic("Not expected to be called") } 45 | func (m *Meta) SetGenerateName(string) { panic("Not expected to be called") } 46 | func (m *Meta) GetUID() ktypes.UID { return m.Uid } 47 | func (m *Meta) SetUID(ktypes.UID) { panic("Not expected to be called") } 48 | func (m *Meta) GetResourceVersion() string { return m.ResourceVersion } 49 | func (m *Meta) SetResourceVersion(string) { panic("Not expected to be called") } 50 | func (m *Meta) GetGeneration() int64 { panic("Not expected to be called") } 51 | func (m *Meta) SetGeneration(int64) { panic("Not expected to be called") } 52 | func (m *Meta) GetSelfLink() string { panic("Not expected to be called") } 53 | func (m *Meta) SetSelfLink(string) { panic("Not expected to be called") } 54 | func (m *Meta) GetCreationTimestamp() kmetav1.Time { return kmetav1.NewTime(m.Created.Time()) } 55 | func (m *Meta) SetCreationTimestamp(kmetav1.Time) { panic("Not expected to be called") } 56 | func (m *Meta) GetDeletionTimestamp() *kmetav1.Time { panic("Not expected to be called") } 57 | func (m *Meta) SetDeletionTimestamp(*kmetav1.Time) { panic("Not expected to be called") } 58 | func (m *Meta) GetDeletionGracePeriodSeconds() *int64 { panic("Not expected to be called") } 59 | func (m *Meta) SetDeletionGracePeriodSeconds(*int64) { panic("Not expected to be called") } 60 | func (m *Meta) GetLabels() map[string]string { panic("Not expected to be called") } 61 | func (m *Meta) SetLabels(map[string]string) { panic("Not expected to be called") } 62 | func (m *Meta) GetAnnotations() map[string]string { panic("Not expected to be called") } 63 | func (m *Meta) SetAnnotations(_ map[string]string) { panic("Not expected to be called") } 64 | func (m *Meta) GetFinalizers() []string { panic("Not expected to be called") } 65 | func (m *Meta) SetFinalizers([]string) { panic("Not expected to be called") } 66 | func (m *Meta) GetOwnerReferences() []kmetav1.OwnerReference { panic("Not expected to be called") } 67 | func (m *Meta) SetOwnerReferences([]kmetav1.OwnerReference) { panic("Not expected to be called") } 68 | func (m *Meta) GetManagedFields() []kmetav1.ManagedFieldsEntry { panic("Not expected to be called") } 69 | func (m *Meta) SetManagedFields([]kmetav1.ManagedFieldsEntry) { panic("Not expected to be called") } 70 | 71 | func EnsureUUID(uid ktypes.UID) types.UUID { 72 | if id, err := uuid.Parse(string(uid)); err == nil { 73 | return types.UUID{UUID: id} 74 | } 75 | 76 | return types.UUID{UUID: uuid.NewSHA1(NameSpaceKubernetes, []byte(uid))} 77 | } 78 | 79 | func NewUUID(space types.UUID, data string) types.UUID { 80 | return types.UUID{UUID: uuid.NewSHA1(space.UUID, []byte(data))} 81 | } 82 | 83 | func NewNullableString(s any) sql.NullString { 84 | if v, ok := s.(string); ok { 85 | return sql.NullString{Valid: v != "", String: v} 86 | } 87 | 88 | if v, ok := s.(*string); ok { 89 | if v != nil { 90 | return sql.NullString{Valid: true, String: *v} 91 | } 92 | 93 | return sql.NullString{Valid: false, String: ""} 94 | } 95 | 96 | if v, ok := s.(error); ok { 97 | return sql.NullString{Valid: true, String: v.Error()} 98 | } 99 | 100 | if s == nil { 101 | return sql.NullString{Valid: false, String: ""} 102 | } 103 | 104 | v := reflect.ValueOf(s) 105 | if v.Kind() == reflect.Pointer { 106 | if v.IsNil() { 107 | return sql.NullString{Valid: false, String: ""} 108 | } 109 | 110 | stringType := reflect.TypeOf("") 111 | if v.Elem().CanConvert(stringType) { 112 | v := v.Elem().Convert(stringType).Interface().(string) 113 | 114 | return sql.NullString{Valid: v != "", String: v} 115 | } 116 | } 117 | 118 | panic(fmt.Sprintf("invalid type %T", s)) 119 | } 120 | 121 | // Assert interface compliance. 122 | var ( 123 | _ kmetav1.Object = (*Meta)(nil) 124 | ) 125 | -------------------------------------------------------------------------------- /pkg/schema/v1/cron_job.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "github.com/icinga/icinga-go-library/types" 7 | "github.com/icinga/icinga-kubernetes/pkg/database" 8 | kbatchv1 "k8s.io/api/batch/v1" 9 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | kruntime "k8s.io/apimachinery/pkg/runtime" 11 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 12 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 13 | "strings" 14 | "time" 15 | ) 16 | 17 | type CronJob struct { 18 | Meta 19 | Schedule string 20 | Timezone sql.NullString 21 | StartingDeadlineSeconds sql.NullInt64 22 | ConcurrencyPolicy string 23 | Suspend types.Bool 24 | SuccessfulJobsHistoryLimit int32 25 | FailedJobsHistoryLimit int32 26 | Active int32 27 | LastScheduleTime types.UnixMilli 28 | LastSuccessfulTime types.UnixMilli 29 | Yaml string 30 | IcingaState IcingaState 31 | IcingaStateReason string 32 | Labels []Label `db:"-"` 33 | CronJobLabels []CronJobLabel `db:"-"` 34 | ResourceLabels []ResourceLabel `db:"-"` 35 | Annotations []Annotation `db:"-"` 36 | CronJobAnnotations []CronJobAnnotation `db:"-"` 37 | ResourceAnnotations []ResourceAnnotation `db:"-"` 38 | Favorites []Favorite `db:"-"` 39 | } 40 | 41 | type CronJobLabel struct { 42 | CronJobUuid types.UUID 43 | LabelUuid types.UUID 44 | } 45 | 46 | type CronJobAnnotation struct { 47 | CronJobUuid types.UUID 48 | AnnotationUuid types.UUID 49 | } 50 | 51 | func NewCronJob() Resource { 52 | return &CronJob{} 53 | } 54 | 55 | func (c *CronJob) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 56 | c.ObtainMeta(k8s, clusterUuid) 57 | 58 | cronJob := k8s.(*kbatchv1.CronJob) 59 | 60 | c.Schedule = cronJob.Spec.Schedule 61 | c.Timezone = NewNullableString(cronJob.Spec.TimeZone) 62 | if cronJob.Spec.StartingDeadlineSeconds != nil { 63 | c.StartingDeadlineSeconds.Int64 = *cronJob.Spec.StartingDeadlineSeconds 64 | c.StartingDeadlineSeconds.Valid = true 65 | } 66 | c.ConcurrencyPolicy = string(cronJob.Spec.ConcurrencyPolicy) 67 | // It is safe to use the pointer directly here, 68 | // as Kubernetes sets it to false by default. 69 | c.Suspend.Bool = *cronJob.Spec.Suspend 70 | c.Suspend.Valid = true 71 | // It is safe to use the pointer directly here, 72 | // as Kubernetes sets it to 3 if not configured. 73 | c.SuccessfulJobsHistoryLimit = *cronJob.Spec.SuccessfulJobsHistoryLimit 74 | // It is safe to use the pointer directly here, 75 | // as Kubernetes sets it to 1 if not configured. 76 | c.FailedJobsHistoryLimit = *cronJob.Spec.FailedJobsHistoryLimit 77 | 78 | c.Active = int32(len(cronJob.Status.Active)) 79 | if cronJob.Status.LastScheduleTime != nil { 80 | c.LastScheduleTime = types.UnixMilli(cronJob.Status.LastScheduleTime.Time) 81 | } 82 | if cronJob.Status.LastSuccessfulTime != nil { 83 | c.LastSuccessfulTime = types.UnixMilli(cronJob.Status.LastSuccessfulTime.Time) 84 | } 85 | 86 | c.IcingaState, c.IcingaStateReason = c.getIcingaState() 87 | 88 | for labelName, labelValue := range cronJob.Labels { 89 | labelUuid := NewUUID(c.Uuid, strings.ToLower(labelName+":"+labelValue)) 90 | c.Labels = append(c.Labels, Label{ 91 | Uuid: labelUuid, 92 | Name: labelName, 93 | Value: labelValue, 94 | }) 95 | c.CronJobLabels = append(c.CronJobLabels, CronJobLabel{ 96 | CronJobUuid: c.Uuid, 97 | LabelUuid: labelUuid, 98 | }) 99 | c.ResourceLabels = append(c.ResourceLabels, ResourceLabel{ 100 | ResourceUuid: c.Uuid, 101 | LabelUuid: labelUuid, 102 | }) 103 | } 104 | 105 | for annotationName, annotationValue := range cronJob.Annotations { 106 | annotationUuid := NewUUID(c.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 107 | c.Annotations = append(c.Annotations, Annotation{ 108 | Uuid: annotationUuid, 109 | Name: annotationName, 110 | Value: annotationValue, 111 | }) 112 | c.CronJobAnnotations = append(c.CronJobAnnotations, CronJobAnnotation{ 113 | CronJobUuid: c.Uuid, 114 | AnnotationUuid: annotationUuid, 115 | }) 116 | c.ResourceAnnotations = append(c.ResourceAnnotations, ResourceAnnotation{ 117 | ResourceUuid: c.Uuid, 118 | AnnotationUuid: annotationUuid, 119 | }) 120 | } 121 | 122 | scheme := kruntime.NewScheme() 123 | _ = kbatchv1.AddToScheme(scheme) 124 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kbatchv1.SchemeGroupVersion) 125 | output, _ := kruntime.Encode(codec, cronJob) 126 | c.Yaml = string(output) 127 | } 128 | 129 | func (c *CronJob) getIcingaState() (IcingaState, string) { 130 | now := time.Now() 131 | 132 | if c.LastScheduleTime.Time().IsZero() { 133 | return Warning, fmt.Sprintf("CronJob %s has never been scheduled.", c.Name) 134 | } 135 | 136 | if c.LastSuccessfulTime.Time().IsZero() { 137 | return Critical, fmt.Sprintf("CronJob %s has never completed successfully.", c.Name) 138 | } 139 | 140 | if c.StartingDeadlineSeconds.Valid { 141 | deadlineDuration := time.Duration(c.StartingDeadlineSeconds.Int64) * time.Second 142 | deadline := c.LastScheduleTime.Time().Add(deadlineDuration) 143 | 144 | if now.After(deadline) { 145 | return Critical, fmt.Sprintf("CronJob %s missed its starting deadline. Last scheduled at %v, deadline was %v.", 146 | c.Name, c.LastScheduleTime.Time().Format(time.RFC3339), deadline.Format(time.RFC3339)) 147 | } 148 | } 149 | 150 | if c.LastScheduleTime.Time().After(c.LastSuccessfulTime.Time()) { 151 | return Warning, fmt.Sprintf("CronJob %s has recent schedules without success. Last successful run: %v, last scheduled: %v.", 152 | c.Name, c.LastSuccessfulTime.Time().Format(time.RFC3339), c.LastScheduleTime.Time().Format(time.RFC3339)) 153 | } 154 | 155 | if c.Suspend.Valid && c.Suspend.Bool { 156 | return Warning, fmt.Sprintf("CronJob %s is currently suspended.", c.Name) 157 | } 158 | 159 | return Ok, fmt.Sprintf("CronJob %s is operating normally. Last successful run: %v.", 160 | c.Name, c.LastSuccessfulTime.Time().Format(time.RFC3339)) 161 | } 162 | 163 | func (c *CronJob) Relations() []database.Relation { 164 | fk := database.WithForeignKey("cron_job_uuid") 165 | 166 | return []database.Relation{ 167 | database.HasMany(c.ResourceLabels, database.WithForeignKey("resource_uuid")), 168 | database.HasMany(c.Labels, database.WithoutCascadeDelete()), 169 | database.HasMany(c.CronJobLabels, fk), 170 | database.HasMany(c.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 171 | database.HasMany(c.Annotations, database.WithoutCascadeDelete()), 172 | database.HasMany(c.CronJobAnnotations, fk), 173 | database.HasMany(c.Favorites, database.WithForeignKey("resource_uuid")), 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /pkg/schema/v1/daemon_set.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "fmt" 5 | "github.com/icinga/icinga-go-library/strcase" 6 | "github.com/icinga/icinga-go-library/types" 7 | "github.com/icinga/icinga-kubernetes/pkg/database" 8 | "github.com/icinga/icinga-kubernetes/pkg/notifications" 9 | kappsv1 "k8s.io/api/apps/v1" 10 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | kruntime "k8s.io/apimachinery/pkg/runtime" 12 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 13 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 14 | ktypes "k8s.io/apimachinery/pkg/types" 15 | "net/url" 16 | "strings" 17 | ) 18 | 19 | type DaemonSet struct { 20 | Meta 21 | UpdateStrategy string 22 | MinReadySeconds int32 23 | DesiredNumberScheduled int32 24 | CurrentNumberScheduled int32 25 | NumberMisscheduled int32 26 | NumberReady int32 27 | UpdateNumberScheduled int32 28 | NumberAvailable int32 29 | NumberUnavailable int32 30 | Yaml string 31 | IcingaState IcingaState 32 | IcingaStateReason string 33 | Conditions []DaemonSetCondition `db:"-"` 34 | Owners []DaemonSetOwner `db:"-"` 35 | Labels []Label `db:"-"` 36 | DaemonSetLabels []DaemonSetLabel `db:"-"` 37 | ResourceLabels []ResourceLabel `db:"-"` 38 | Annotations []Annotation `db:"-"` 39 | DaemonSetAnnotations []DaemonSetAnnotation `db:"-"` 40 | ResourceAnnotations []ResourceAnnotation `db:"-"` 41 | Favorites []Favorite `db:"-"` 42 | } 43 | 44 | type DaemonSetCondition struct { 45 | DaemonSetUuid types.UUID 46 | Type string 47 | Status string 48 | LastTransition types.UnixMilli 49 | Reason string 50 | Message string 51 | } 52 | 53 | type DaemonSetOwner struct { 54 | DaemonSetUuid types.UUID 55 | OwnerUuid types.UUID 56 | Kind string 57 | Name string 58 | Uid ktypes.UID 59 | Controller types.Bool 60 | BlockOwnerDeletion types.Bool 61 | } 62 | 63 | type DaemonSetLabel struct { 64 | DaemonSetUuid types.UUID 65 | LabelUuid types.UUID 66 | } 67 | 68 | type DaemonSetAnnotation struct { 69 | DaemonSetUuid types.UUID 70 | AnnotationUuid types.UUID 71 | } 72 | 73 | func NewDaemonSet() Resource { 74 | return &DaemonSet{} 75 | } 76 | 77 | func (d *DaemonSet) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 78 | d.ObtainMeta(k8s, clusterUuid) 79 | 80 | daemonSet := k8s.(*kappsv1.DaemonSet) 81 | 82 | d.UpdateStrategy = string(daemonSet.Spec.UpdateStrategy.Type) 83 | d.MinReadySeconds = daemonSet.Spec.MinReadySeconds 84 | d.DesiredNumberScheduled = daemonSet.Status.DesiredNumberScheduled 85 | d.CurrentNumberScheduled = daemonSet.Status.CurrentNumberScheduled 86 | d.NumberMisscheduled = daemonSet.Status.NumberMisscheduled 87 | d.NumberReady = daemonSet.Status.NumberReady 88 | d.UpdateNumberScheduled = daemonSet.Status.UpdatedNumberScheduled 89 | d.NumberAvailable = daemonSet.Status.NumberAvailable 90 | d.NumberUnavailable = daemonSet.Status.NumberUnavailable 91 | d.IcingaState, d.IcingaStateReason = d.getIcingaState() 92 | 93 | for _, condition := range daemonSet.Status.Conditions { 94 | d.Conditions = append(d.Conditions, DaemonSetCondition{ 95 | DaemonSetUuid: d.Uuid, 96 | Type: string(condition.Type), 97 | Status: string(condition.Status), 98 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 99 | Reason: condition.Reason, 100 | Message: condition.Message, 101 | }) 102 | } 103 | 104 | for _, ownerReference := range daemonSet.OwnerReferences { 105 | var blockOwnerDeletion, controller bool 106 | if ownerReference.BlockOwnerDeletion != nil { 107 | blockOwnerDeletion = *ownerReference.BlockOwnerDeletion 108 | } 109 | if ownerReference.Controller != nil { 110 | controller = *ownerReference.Controller 111 | } 112 | d.Owners = append(d.Owners, DaemonSetOwner{ 113 | DaemonSetUuid: d.Uuid, 114 | OwnerUuid: EnsureUUID(ownerReference.UID), 115 | Kind: strcase.Snake(ownerReference.Kind), 116 | Name: ownerReference.Name, 117 | Uid: ownerReference.UID, 118 | BlockOwnerDeletion: types.Bool{ 119 | Bool: blockOwnerDeletion, 120 | Valid: true, 121 | }, 122 | Controller: types.Bool{ 123 | Bool: controller, 124 | Valid: true, 125 | }, 126 | }) 127 | } 128 | 129 | for labelName, labelValue := range daemonSet.Labels { 130 | labelUuid := NewUUID(d.Uuid, strings.ToLower(labelName+":"+labelValue)) 131 | d.Labels = append(d.Labels, Label{ 132 | Uuid: labelUuid, 133 | Name: labelName, 134 | Value: labelValue, 135 | }) 136 | d.DaemonSetLabels = append(d.DaemonSetLabels, DaemonSetLabel{ 137 | DaemonSetUuid: d.Uuid, 138 | LabelUuid: labelUuid, 139 | }) 140 | d.ResourceLabels = append(d.ResourceLabels, ResourceLabel{ 141 | ResourceUuid: d.Uuid, 142 | LabelUuid: labelUuid, 143 | }) 144 | } 145 | 146 | for annotationName, annotationValue := range daemonSet.Annotations { 147 | annotationUuid := NewUUID(d.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 148 | d.Annotations = append(d.Annotations, Annotation{ 149 | Uuid: annotationUuid, 150 | Name: annotationName, 151 | Value: annotationValue, 152 | }) 153 | d.DaemonSetAnnotations = append(d.DaemonSetAnnotations, DaemonSetAnnotation{ 154 | DaemonSetUuid: d.Uuid, 155 | AnnotationUuid: annotationUuid, 156 | }) 157 | d.ResourceAnnotations = append(d.ResourceAnnotations, ResourceAnnotation{ 158 | ResourceUuid: d.Uuid, 159 | AnnotationUuid: annotationUuid, 160 | }) 161 | } 162 | 163 | scheme := kruntime.NewScheme() 164 | _ = kappsv1.AddToScheme(scheme) 165 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kappsv1.SchemeGroupVersion) 166 | output, _ := kruntime.Encode(codec, daemonSet) 167 | d.Yaml = string(output) 168 | } 169 | 170 | func (d *DaemonSet) MarshalEvent() (notifications.Event, error) { 171 | return notifications.Event{ 172 | Name: d.Namespace + "/" + d.Name, 173 | Severity: d.IcingaState.ToSeverity(), 174 | Message: d.IcingaStateReason, 175 | URL: &url.URL{Path: "/daemonset", RawQuery: fmt.Sprintf("id=%s", d.Uuid)}, 176 | Tags: map[string]string{ 177 | "uuid": d.Uuid.String(), 178 | "name": d.Name, 179 | "namespace": d.Namespace, 180 | "resource": "daemon_set", 181 | }, 182 | }, nil 183 | } 184 | 185 | func (d *DaemonSet) getIcingaState() (IcingaState, string) { 186 | if d.DesiredNumberScheduled < 1 { 187 | reason := fmt.Sprintf("DaemonSet %s/%s has an invalid desired node count: %d.", d.Namespace, d.Name, d.DesiredNumberScheduled) 188 | 189 | return Unknown, reason 190 | } 191 | 192 | switch { 193 | case d.NumberAvailable == 0: 194 | reason := fmt.Sprintf("DaemonSet %s/%s does not have a single pod available which should run on %d desired nodes.", d.Namespace, d.Name, d.DesiredNumberScheduled) 195 | 196 | return Critical, reason 197 | case d.NumberAvailable < d.DesiredNumberScheduled: 198 | reason := fmt.Sprintf("DaemonSet %s/%s pods are only available on %d out of %d desired nodes.", d.Namespace, d.Name, d.NumberAvailable, d.DesiredNumberScheduled) 199 | 200 | return Warning, reason 201 | default: 202 | reason := fmt.Sprintf("DaemonSet %s/%s has pods available on all %d desired nodes.", d.Namespace, d.Name, d.DesiredNumberScheduled) 203 | 204 | return Ok, reason 205 | } 206 | } 207 | 208 | func (d *DaemonSet) Relations() []database.Relation { 209 | fk := database.WithForeignKey("daemon_set_uuid") 210 | 211 | return []database.Relation{ 212 | database.HasMany(d.Conditions, fk), 213 | database.HasMany(d.Owners, fk), 214 | database.HasMany(d.ResourceLabels, database.WithForeignKey("resource_uuid")), 215 | database.HasMany(d.Labels, database.WithoutCascadeDelete()), 216 | database.HasMany(d.DaemonSetLabels, fk), 217 | database.HasMany(d.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 218 | database.HasMany(d.Annotations, database.WithoutCascadeDelete()), 219 | database.HasMany(d.DaemonSetAnnotations, fk), 220 | database.HasMany(d.Favorites, database.WithForeignKey("resource_uuid")), 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /pkg/schema/v1/deployment.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "fmt" 5 | "github.com/icinga/icinga-go-library/strcase" 6 | "github.com/icinga/icinga-go-library/types" 7 | "github.com/icinga/icinga-kubernetes/pkg/database" 8 | "github.com/icinga/icinga-kubernetes/pkg/notifications" 9 | kappsv1 "k8s.io/api/apps/v1" 10 | kcorev1 "k8s.io/api/core/v1" 11 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | kruntime "k8s.io/apimachinery/pkg/runtime" 13 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 14 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 15 | ktypes "k8s.io/apimachinery/pkg/types" 16 | "net/url" 17 | "strings" 18 | ) 19 | 20 | type Deployment struct { 21 | Meta 22 | Strategy string 23 | MinReadySeconds int32 24 | ProgressDeadlineSeconds int32 25 | Paused types.Bool 26 | DesiredReplicas int32 27 | ActualReplicas int32 28 | UpdatedReplicas int32 29 | ReadyReplicas int32 30 | AvailableReplicas int32 31 | UnavailableReplicas int32 32 | Yaml string 33 | IcingaState IcingaState 34 | IcingaStateReason string 35 | Conditions []DeploymentCondition `db:"-"` 36 | Owners []DeploymentOwner `db:"-"` 37 | Labels []Label `db:"-"` 38 | DeploymentLabels []DeploymentLabel `db:"-"` 39 | ResourceLabels []ResourceLabel `db:"-"` 40 | Annotations []Annotation `db:"-"` 41 | DeploymentAnnotations []DeploymentAnnotation `db:"-"` 42 | ResourceAnnotations []ResourceAnnotation `db:"-"` 43 | Favorites []Favorite `db:"-"` 44 | } 45 | 46 | type DeploymentCondition struct { 47 | DeploymentUuid types.UUID 48 | Type string 49 | Status string 50 | LastUpdate types.UnixMilli 51 | LastTransition types.UnixMilli 52 | Reason string 53 | Message string 54 | } 55 | 56 | type DeploymentOwner struct { 57 | DeploymentUuid types.UUID 58 | OwnerUuid types.UUID 59 | Kind string 60 | Name string 61 | Uid ktypes.UID 62 | Controller types.Bool 63 | BlockOwnerDeletion types.Bool 64 | } 65 | 66 | type DeploymentLabel struct { 67 | DeploymentUuid types.UUID 68 | LabelUuid types.UUID 69 | } 70 | 71 | type DeploymentAnnotation struct { 72 | DeploymentUuid types.UUID 73 | AnnotationUuid types.UUID 74 | } 75 | 76 | func NewDeployment() Resource { 77 | return &Deployment{} 78 | } 79 | 80 | func (d *Deployment) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 81 | d.ObtainMeta(k8s, clusterUuid) 82 | 83 | deployment := k8s.(*kappsv1.Deployment) 84 | 85 | d.Strategy = string(deployment.Spec.Strategy.Type) 86 | d.MinReadySeconds = deployment.Spec.MinReadySeconds 87 | // It is safe to use the pointer directly here, 88 | // as Kubernetes sets it to 600s if no deadline is configured. 89 | d.ProgressDeadlineSeconds = *deployment.Spec.ProgressDeadlineSeconds 90 | d.Paused = types.Bool{ 91 | Bool: deployment.Spec.Paused, 92 | Valid: true, 93 | } 94 | // It is safe to use the pointer directly here, 95 | // as Kubernetes sets it to 1 if no replicas are configured. 96 | d.DesiredReplicas = *deployment.Spec.Replicas 97 | d.ActualReplicas = deployment.Status.Replicas 98 | d.UpdatedReplicas = deployment.Status.UpdatedReplicas 99 | d.AvailableReplicas = deployment.Status.AvailableReplicas 100 | d.ReadyReplicas = deployment.Status.ReadyReplicas 101 | d.UnavailableReplicas = deployment.Status.UnavailableReplicas 102 | d.IcingaState, d.IcingaStateReason = d.getIcingaState() 103 | 104 | for _, condition := range deployment.Status.Conditions { 105 | d.Conditions = append(d.Conditions, DeploymentCondition{ 106 | DeploymentUuid: d.Uuid, 107 | Type: string(condition.Type), 108 | Status: string(condition.Status), 109 | LastUpdate: types.UnixMilli(condition.LastUpdateTime.Time), 110 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 111 | Reason: condition.Reason, 112 | Message: condition.Message, 113 | }) 114 | } 115 | 116 | for _, ownerReference := range deployment.OwnerReferences { 117 | var blockOwnerDeletion, controller bool 118 | if ownerReference.BlockOwnerDeletion != nil { 119 | blockOwnerDeletion = *ownerReference.BlockOwnerDeletion 120 | } 121 | if ownerReference.Controller != nil { 122 | controller = *ownerReference.Controller 123 | } 124 | d.Owners = append(d.Owners, DeploymentOwner{ 125 | DeploymentUuid: d.Uuid, 126 | OwnerUuid: EnsureUUID(ownerReference.UID), 127 | Kind: strcase.Snake(ownerReference.Kind), 128 | Name: ownerReference.Name, 129 | Uid: ownerReference.UID, 130 | BlockOwnerDeletion: types.Bool{ 131 | Bool: blockOwnerDeletion, 132 | Valid: true, 133 | }, 134 | Controller: types.Bool{ 135 | Bool: controller, 136 | Valid: true, 137 | }, 138 | }) 139 | } 140 | 141 | for labelName, labelValue := range deployment.Labels { 142 | labelUuid := NewUUID(d.Uuid, strings.ToLower(labelName+":"+labelValue)) 143 | d.Labels = append(d.Labels, Label{ 144 | Uuid: labelUuid, 145 | Name: labelName, 146 | Value: labelValue, 147 | }) 148 | d.DeploymentLabels = append(d.DeploymentLabels, DeploymentLabel{ 149 | DeploymentUuid: d.Uuid, 150 | LabelUuid: labelUuid, 151 | }) 152 | d.ResourceLabels = append(d.ResourceLabels, ResourceLabel{ 153 | ResourceUuid: d.Uuid, 154 | LabelUuid: labelUuid, 155 | }) 156 | } 157 | 158 | for annotationName, annotationValue := range deployment.Annotations { 159 | annotationUuid := NewUUID(d.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 160 | d.Annotations = append(d.Annotations, Annotation{ 161 | Uuid: annotationUuid, 162 | Name: annotationName, 163 | Value: annotationValue, 164 | }) 165 | d.DeploymentAnnotations = append(d.DeploymentAnnotations, DeploymentAnnotation{ 166 | DeploymentUuid: d.Uuid, 167 | AnnotationUuid: annotationUuid, 168 | }) 169 | d.ResourceAnnotations = append(d.ResourceAnnotations, ResourceAnnotation{ 170 | ResourceUuid: d.Uuid, 171 | AnnotationUuid: annotationUuid, 172 | }) 173 | } 174 | 175 | scheme := kruntime.NewScheme() 176 | _ = kappsv1.AddToScheme(scheme) 177 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kappsv1.SchemeGroupVersion) 178 | output, _ := kruntime.Encode(codec, deployment) 179 | d.Yaml = string(output) 180 | } 181 | 182 | func (d *Deployment) MarshalEvent() (notifications.Event, error) { 183 | return notifications.Event{ 184 | Name: d.Namespace + "/" + d.Name, 185 | Severity: d.IcingaState.ToSeverity(), 186 | Message: d.IcingaStateReason, 187 | URL: &url.URL{Path: "/deployment", RawQuery: fmt.Sprintf("id=%s", d.Uuid)}, 188 | Tags: map[string]string{ 189 | "uuid": d.Uuid.String(), 190 | "name": d.Name, 191 | "namespace": d.Namespace, 192 | "resource": "deployment", 193 | }, 194 | }, nil 195 | } 196 | 197 | func (d *Deployment) getIcingaState() (IcingaState, string) { 198 | for _, condition := range d.Conditions { 199 | if condition.Type == string(kappsv1.DeploymentAvailable) && condition.Status != string(kcorev1.ConditionTrue) { 200 | reason := fmt.Sprintf("Deployment %s/%s is not available: %s.", d.Namespace, d.Name, condition.Message) 201 | 202 | return Critical, reason 203 | } 204 | if condition.Type == string(kappsv1.ReplicaSetReplicaFailure) && condition.Status != string(kcorev1.ConditionTrue) { 205 | reason := fmt.Sprintf("Deployment %s/%s has replica failure: %s.", d.Namespace, d.Name, condition.Message) 206 | 207 | return Critical, reason 208 | } 209 | } 210 | 211 | switch { 212 | case d.UnavailableReplicas > 0: 213 | reason := fmt.Sprintf("Deployment %s/%s has %d unavailable replicas.", d.Namespace, d.Name, d.UnavailableReplicas) 214 | 215 | return Critical, reason 216 | case d.AvailableReplicas < d.DesiredReplicas: 217 | reason := fmt.Sprintf("Deployment %s/%s only has %d out of %d desired replicas available.", d.Namespace, d.Name, d.AvailableReplicas, d.DesiredReplicas) 218 | 219 | return Warning, reason 220 | default: 221 | reason := fmt.Sprintf("Deployment %s/%s has all %d desired replicas available.", d.Namespace, d.Name, d.DesiredReplicas) 222 | 223 | return Ok, reason 224 | } 225 | } 226 | 227 | func (d *Deployment) Relations() []database.Relation { 228 | fk := database.WithForeignKey("deployment_uuid") 229 | 230 | return []database.Relation{ 231 | database.HasMany(d.Conditions, fk), 232 | database.HasMany(d.Owners, fk), 233 | database.HasMany(d.ResourceLabels, database.WithForeignKey("resource_uuid")), 234 | database.HasMany(d.Labels, database.WithoutCascadeDelete()), 235 | database.HasMany(d.DeploymentLabels, fk), 236 | database.HasMany(d.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 237 | database.HasMany(d.Annotations, database.WithoutCascadeDelete()), 238 | database.HasMany(d.DeploymentAnnotations, fk), 239 | database.HasMany(d.Favorites, database.WithForeignKey("resource_uuid")), 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /pkg/schema/v1/endpoint.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "github.com/icinga/icinga-go-library/types" 6 | "github.com/icinga/icinga-kubernetes/pkg/database" 7 | v1 "k8s.io/api/core/v1" 8 | kdiscoveryv1 "k8s.io/api/discovery/v1" 9 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ktypes "k8s.io/apimachinery/pkg/types" 11 | "strings" 12 | ) 13 | 14 | type EndpointSlice struct { 15 | Meta 16 | AddressType string 17 | Endpoints []Endpoint `db:"-"` 18 | Labels []Label `db:"-"` 19 | EndpointLabels []EndpointSliceLabel `db:"-"` 20 | EndpointTargetRefs []EndpointTargetRef `db:"-"` 21 | } 22 | 23 | type EndpointSliceLabel struct { 24 | EndpointSliceUuid types.UUID 25 | LabelUuid types.UUID 26 | } 27 | 28 | type Endpoint struct { 29 | Uuid types.UUID 30 | EndpointSliceUuid types.UUID 31 | HostName string 32 | NodeName string 33 | Ready types.Bool 34 | Serving types.Bool 35 | Terminating types.Bool 36 | Address string 37 | PortName string 38 | Protocol string 39 | Port int32 40 | AppProtocol string 41 | } 42 | 43 | type EndpointTargetRef struct { 44 | EndpointSliceUuid types.UUID 45 | Kind sql.NullString 46 | Namespace string 47 | Name string 48 | Uid ktypes.UID 49 | ApiVersion string 50 | ResourceVersion string 51 | } 52 | 53 | func NewEndpointSlice() Resource { 54 | return &EndpointSlice{} 55 | } 56 | 57 | func (e *EndpointSlice) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 58 | e.ObtainMeta(k8s, clusterUuid) 59 | 60 | endpointSlice := k8s.(*kdiscoveryv1.EndpointSlice) 61 | 62 | e.AddressType = string(endpointSlice.AddressType) 63 | 64 | for labelName, labelValue := range endpointSlice.Labels { 65 | labelUuid := NewUUID(e.Uuid, strings.ToLower(labelName+":"+labelValue)) 66 | e.Labels = append(e.Labels, Label{ 67 | Uuid: labelUuid, 68 | Name: labelName, 69 | Value: labelValue, 70 | }) 71 | e.EndpointLabels = append(e.EndpointLabels, EndpointSliceLabel{ 72 | EndpointSliceUuid: e.Uuid, 73 | LabelUuid: labelUuid, 74 | }) 75 | } 76 | 77 | for _, endpoint := range endpointSlice.Endpoints { 78 | var hostName, nodeName string 79 | if endpoint.Hostname != nil { 80 | hostName = *endpoint.Hostname 81 | } 82 | if endpoint.NodeName != nil { 83 | nodeName = *endpoint.NodeName 84 | } 85 | var ready, serving, terminating types.Bool 86 | if endpoint.Conditions.Ready != nil { 87 | ready.Bool = *endpoint.Conditions.Ready 88 | ready.Valid = true 89 | } 90 | if endpoint.Conditions.Serving != nil { 91 | serving.Bool = *endpoint.Conditions.Serving 92 | serving.Valid = true 93 | } 94 | if endpoint.Conditions.Terminating != nil { 95 | terminating.Bool = *endpoint.Conditions.Terminating 96 | terminating.Valid = true 97 | } 98 | for _, endpointPort := range endpointSlice.Ports { 99 | var name, protocol, appProtocol string 100 | var port int32 101 | if endpointPort.Name != nil { 102 | name = *endpointPort.Name 103 | } 104 | if endpointPort.Protocol != nil { 105 | protocol = string(*endpointPort.Protocol) 106 | } 107 | if endpointPort.Port != nil { 108 | port = *endpointPort.Port 109 | } 110 | if endpointPort.AppProtocol != nil { 111 | appProtocol = *endpointPort.AppProtocol 112 | } 113 | for _, address := range endpoint.Addresses { 114 | endpointUuid := NewUUID(e.Uuid, name+address+string(port)) 115 | e.Endpoints = append(e.Endpoints, Endpoint{ 116 | Uuid: endpointUuid, 117 | EndpointSliceUuid: e.Uuid, 118 | HostName: hostName, 119 | NodeName: nodeName, 120 | Ready: ready, 121 | Serving: serving, 122 | Terminating: terminating, 123 | PortName: name, 124 | Protocol: protocol, 125 | Port: port, 126 | AppProtocol: appProtocol, 127 | Address: address, 128 | }) 129 | } 130 | } 131 | var targetRef v1.ObjectReference 132 | if endpoint.TargetRef != nil { 133 | targetRef = *endpoint.TargetRef 134 | } 135 | var kind sql.NullString 136 | if targetRef.Kind != "" { 137 | kind.String = targetRef.Kind 138 | kind.Valid = true 139 | } 140 | e.EndpointTargetRefs = append(e.EndpointTargetRefs, EndpointTargetRef{ 141 | EndpointSliceUuid: e.Uuid, 142 | Kind: kind, 143 | Namespace: targetRef.Namespace, 144 | Name: targetRef.Name, 145 | Uid: targetRef.UID, 146 | ApiVersion: targetRef.APIVersion, 147 | ResourceVersion: targetRef.ResourceVersion, 148 | }) 149 | } 150 | } 151 | 152 | func (e *EndpointSlice) Relations() []database.Relation { 153 | fk := database.WithForeignKey("endpoint_slice_uuid") 154 | 155 | return []database.Relation{ 156 | database.HasMany(e.Endpoints, fk), 157 | database.HasMany(e.Labels, database.WithoutCascadeDelete()), 158 | database.HasMany(e.EndpointLabels, fk), 159 | database.HasMany(e.EndpointTargetRefs, fk), 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /pkg/schema/v1/event.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "github.com/icinga/icinga-go-library/types" 6 | keventsv1 "k8s.io/api/events/v1" 7 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | kruntime "k8s.io/apimachinery/pkg/runtime" 9 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 10 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 11 | ) 12 | 13 | type Event struct { 14 | Meta 15 | ReferenceUuid types.UUID 16 | ReportingController sql.NullString 17 | ReportingInstance sql.NullString 18 | Action sql.NullString 19 | Reason string 20 | Note string 21 | Type string 22 | ReferenceKind string 23 | ReferenceNamespace sql.NullString 24 | ReferenceName string 25 | FirstSeen types.UnixMilli 26 | LastSeen types.UnixMilli 27 | Count int32 28 | Yaml string 29 | } 30 | 31 | func NewEvent() Resource { 32 | return &Event{} 33 | } 34 | 35 | func (e *Event) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 36 | e.ObtainMeta(k8s, clusterUuid) 37 | 38 | event := k8s.(*keventsv1.Event) 39 | 40 | e.ReferenceUuid = EnsureUUID(event.Regarding.UID) 41 | e.ReportingController = NewNullableString(event.ReportingController) 42 | e.ReportingInstance = NewNullableString(event.ReportingInstance) 43 | e.Action = NewNullableString(event.Action) 44 | e.Reason = event.Reason 45 | e.Note = event.Note 46 | e.Type = event.Type 47 | e.ReferenceKind = event.Regarding.Kind 48 | e.ReferenceNamespace = NewNullableString(event.Regarding.Namespace) 49 | e.ReferenceName = event.Regarding.Name 50 | 51 | if !event.EventTime.Time.IsZero() { 52 | e.FirstSeen = types.UnixMilli(event.EventTime.Time) 53 | } else if !event.DeprecatedFirstTimestamp.Time.IsZero() { 54 | e.FirstSeen = types.UnixMilli(event.DeprecatedFirstTimestamp.Time) 55 | } else { 56 | e.FirstSeen = types.UnixMilli(k8s.GetCreationTimestamp().Time) 57 | } 58 | 59 | var count int32 60 | var lastSeen types.UnixMilli 61 | 62 | if event.Series != nil { 63 | if !event.Series.LastObservedTime.IsZero() { 64 | lastSeen = types.UnixMilli(event.Series.LastObservedTime.Time) 65 | } 66 | 67 | count = event.Series.Count 68 | } 69 | 70 | if lastSeen.Time().IsZero() { 71 | if !event.DeprecatedLastTimestamp.IsZero() { 72 | lastSeen = types.UnixMilli(event.DeprecatedLastTimestamp.Time) 73 | } else { 74 | lastSeen = e.FirstSeen 75 | } 76 | } 77 | 78 | count = max(count, event.DeprecatedCount, 1) 79 | 80 | e.LastSeen = lastSeen 81 | e.Count = count 82 | 83 | scheme := kruntime.NewScheme() 84 | _ = keventsv1.AddToScheme(scheme) 85 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), keventsv1.SchemeGroupVersion) 86 | output, _ := kruntime.Encode(codec, event) 87 | e.Yaml = string(output) 88 | } 89 | -------------------------------------------------------------------------------- /pkg/schema/v1/favorite.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import "github.com/icinga/icinga-go-library/types" 4 | 5 | type Favorite struct { 6 | ResourceUuid types.UUID 7 | Kind string 8 | Username string 9 | Priority int 10 | } 11 | -------------------------------------------------------------------------------- /pkg/schema/v1/icinga_state.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql/driver" 5 | "fmt" 6 | ) 7 | 8 | type IcingaState uint8 9 | 10 | const ( 11 | Ok IcingaState = iota 12 | Pending 13 | Unknown 14 | Warning 15 | Critical 16 | ) 17 | 18 | func (s IcingaState) String() string { 19 | switch s { 20 | case Ok: 21 | return "ok" 22 | case Warning: 23 | return "warning" 24 | case Critical: 25 | return "critical" 26 | case Unknown: 27 | return "unknown" 28 | case Pending: 29 | return "pending" 30 | default: 31 | panic(fmt.Sprintf("invalid Icinga state %d", s)) 32 | } 33 | } 34 | 35 | // Value implements the driver.Valuer interface. 36 | func (s IcingaState) Value() (driver.Value, error) { 37 | return s.String(), nil 38 | } 39 | 40 | func (s IcingaState) ToSeverity() string { 41 | switch s { 42 | case Ok: 43 | return "ok" 44 | case Pending: 45 | return "info" 46 | case Unknown: 47 | return "err" 48 | case Warning: 49 | return "warning" 50 | case Critical: 51 | return "crit" 52 | default: 53 | panic(fmt.Sprintf("invalid Icinga state %d", s)) 54 | } 55 | } 56 | 57 | // Assert interface compliance. 58 | var ( 59 | _ fmt.Stringer = (*IcingaState)(nil) 60 | _ driver.Valuer = (*IcingaState)(nil) 61 | ) 62 | -------------------------------------------------------------------------------- /pkg/schema/v1/ingress.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "github.com/icinga/icinga-go-library/types" 6 | "github.com/icinga/icinga-kubernetes/pkg/database" 7 | networkingv1 "k8s.io/api/networking/v1" 8 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | kruntime "k8s.io/apimachinery/pkg/runtime" 10 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 11 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 12 | "strings" 13 | ) 14 | 15 | type Ingress struct { 16 | Meta 17 | Yaml string 18 | IngressTls []IngressTls `db:"-"` 19 | IngressBackendService []IngressBackendService `db:"-"` 20 | IngressBackendResource []IngressBackendResource `db:"-"` 21 | IngressRule []IngressRule `db:"-"` 22 | Labels []Label `db:"-"` 23 | IngressLabels []IngressLabel `db:"-"` 24 | ResourceLabels []ResourceLabel `db:"-"` 25 | Annotations []Annotation `db:"-"` 26 | IngressAnnotations []IngressAnnotation `db:"-"` 27 | ResourceAnnotations []ResourceAnnotation `db:"-"` 28 | Favorites []Favorite `db:"-"` 29 | } 30 | 31 | type IngressTls struct { 32 | IngressUuid types.UUID 33 | TlsHost string 34 | TlsSecret string 35 | } 36 | 37 | type IngressBackendService struct { 38 | ServiceUuid types.UUID 39 | IngressUuid types.UUID 40 | IngressRuleUuid types.UUID 41 | ServiceName string 42 | ServicePortName string 43 | ServicePortNumber int32 44 | } 45 | 46 | type IngressBackendResource struct { 47 | ResourceUuid types.UUID 48 | IngressUuid types.UUID 49 | IngressRuleUuid types.UUID 50 | ApiGroup sql.NullString 51 | Kind string 52 | Name string 53 | } 54 | 55 | type IngressRule struct { 56 | Uuid types.UUID 57 | BackendUuid types.UUID 58 | IngressUuid types.UUID 59 | Host sql.NullString 60 | Path sql.NullString 61 | PathType string 62 | } 63 | 64 | type IngressLabel struct { 65 | IngressUuid types.UUID 66 | LabelUuid types.UUID 67 | } 68 | 69 | type IngressAnnotation struct { 70 | IngressUuid types.UUID 71 | AnnotationUuid types.UUID 72 | } 73 | 74 | func NewIngress() Resource { 75 | return &Ingress{} 76 | } 77 | 78 | func (i *Ingress) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 79 | i.ObtainMeta(k8s, clusterUuid) 80 | 81 | ingress := k8s.(*networkingv1.Ingress) 82 | 83 | for _, tls := range ingress.Spec.TLS { 84 | for _, host := range tls.Hosts { 85 | i.IngressTls = append(i.IngressTls, IngressTls{ 86 | IngressUuid: i.Uuid, 87 | TlsHost: host, 88 | TlsSecret: tls.SecretName, 89 | }) 90 | } 91 | } 92 | 93 | if ingress.Spec.DefaultBackend != nil { 94 | if ingress.Spec.DefaultBackend.Service != nil { 95 | serviceUuid := NewUUID(i.Uuid, ingress.Spec.DefaultBackend.Service.Name+ingress.Spec.DefaultBackend.Service.Port.Name) 96 | i.IngressBackendService = append(i.IngressBackendService, IngressBackendService{ 97 | ServiceUuid: serviceUuid, 98 | IngressUuid: i.Uuid, 99 | ServiceName: ingress.Spec.DefaultBackend.Service.Name, 100 | ServicePortName: ingress.Spec.DefaultBackend.Service.Port.Name, 101 | ServicePortNumber: ingress.Spec.DefaultBackend.Service.Port.Number, 102 | }) 103 | } 104 | if ingress.Spec.DefaultBackend.Resource != nil { 105 | resourceUuid := NewUUID(i.Uuid, ingress.Spec.DefaultBackend.Resource.Kind+ingress.Spec.DefaultBackend.Resource.Name) 106 | var apiGroup sql.NullString 107 | if ingress.Spec.DefaultBackend.Resource.APIGroup != nil { 108 | apiGroup.String = *ingress.Spec.DefaultBackend.Resource.APIGroup 109 | apiGroup.Valid = true 110 | i.IngressBackendResource = append(i.IngressBackendResource, IngressBackendResource{ 111 | ResourceUuid: resourceUuid, 112 | IngressUuid: i.Uuid, 113 | ApiGroup: apiGroup, 114 | Kind: ingress.Spec.DefaultBackend.Resource.Kind, 115 | Name: ingress.Spec.DefaultBackend.Resource.Name, 116 | }) 117 | } 118 | } 119 | } 120 | 121 | for _, rules := range ingress.Spec.Rules { 122 | if rules.IngressRuleValue.HTTP == nil { 123 | continue 124 | } 125 | 126 | for _, ruleValue := range rules.IngressRuleValue.HTTP.Paths { 127 | // It is safe to use the pointer directly here. 128 | pathType := string(*ruleValue.PathType) 129 | if ruleValue.Backend.Service != nil { 130 | ingressRuleUuid := NewUUID(i.Uuid, rules.Host+ruleValue.Path+ruleValue.Backend.Service.Name) 131 | serviceUuid := NewUUID(ingressRuleUuid, ruleValue.Backend.Service.Name) 132 | i.IngressBackendService = append(i.IngressBackendService, IngressBackendService{ 133 | ServiceUuid: serviceUuid, 134 | IngressUuid: i.Uuid, 135 | IngressRuleUuid: ingressRuleUuid, 136 | ServiceName: ruleValue.Backend.Service.Name, 137 | ServicePortName: ruleValue.Backend.Service.Port.Name, 138 | ServicePortNumber: ruleValue.Backend.Service.Port.Number, 139 | }) 140 | i.IngressRule = append(i.IngressRule, IngressRule{ 141 | Uuid: ingressRuleUuid, 142 | BackendUuid: serviceUuid, 143 | IngressUuid: i.Uuid, 144 | Host: NewNullableString(rules.Host), 145 | Path: NewNullableString(ruleValue.Path), 146 | PathType: pathType, 147 | }) 148 | } else if ruleValue.Backend.Resource != nil { 149 | ingressRuleUuid := NewUUID(i.Uuid, rules.Host+ruleValue.Path+ruleValue.Backend.Resource.Name) 150 | resourceUuid := NewUUID(ingressRuleUuid, ruleValue.Backend.Resource.Name) 151 | var apiGroup sql.NullString 152 | if ruleValue.Backend.Resource.APIGroup != nil { 153 | apiGroup.String = *ruleValue.Backend.Resource.APIGroup 154 | apiGroup.Valid = true 155 | } 156 | i.IngressBackendResource = append(i.IngressBackendResource, IngressBackendResource{ 157 | ResourceUuid: resourceUuid, 158 | IngressUuid: i.Uuid, 159 | IngressRuleUuid: ingressRuleUuid, 160 | ApiGroup: apiGroup, 161 | Kind: ruleValue.Backend.Resource.Kind, 162 | Name: ruleValue.Backend.Resource.Name, 163 | }) 164 | i.IngressRule = append(i.IngressRule, IngressRule{ 165 | Uuid: ingressRuleUuid, 166 | IngressUuid: i.Uuid, 167 | BackendUuid: resourceUuid, 168 | Host: NewNullableString(rules.Host), 169 | Path: NewNullableString(ruleValue.Path), 170 | PathType: pathType, 171 | }) 172 | } 173 | } 174 | 175 | } 176 | 177 | for labelName, labelValue := range ingress.Labels { 178 | labelUuid := NewUUID(i.Uuid, strings.ToLower(labelName+":"+labelValue)) 179 | i.Labels = append(i.Labels, Label{ 180 | Uuid: labelUuid, 181 | Name: labelName, 182 | Value: labelValue, 183 | }) 184 | i.IngressLabels = append(i.IngressLabels, IngressLabel{ 185 | IngressUuid: i.Uuid, 186 | LabelUuid: labelUuid, 187 | }) 188 | i.ResourceLabels = append(i.ResourceLabels, ResourceLabel{ 189 | ResourceUuid: i.Uuid, 190 | LabelUuid: labelUuid, 191 | }) 192 | } 193 | 194 | for annotationName, annotationValue := range ingress.Annotations { 195 | annotationUuid := NewUUID(i.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 196 | i.Annotations = append(i.Annotations, Annotation{ 197 | Uuid: annotationUuid, 198 | Name: annotationName, 199 | Value: annotationValue, 200 | }) 201 | i.IngressAnnotations = append(i.IngressAnnotations, IngressAnnotation{ 202 | IngressUuid: i.Uuid, 203 | AnnotationUuid: annotationUuid, 204 | }) 205 | i.ResourceAnnotations = append(i.ResourceAnnotations, ResourceAnnotation{ 206 | ResourceUuid: i.Uuid, 207 | AnnotationUuid: annotationUuid, 208 | }) 209 | } 210 | 211 | scheme := kruntime.NewScheme() 212 | _ = networkingv1.AddToScheme(scheme) 213 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), networkingv1.SchemeGroupVersion) 214 | output, _ := kruntime.Encode(codec, ingress) 215 | i.Yaml = string(output) 216 | } 217 | 218 | func (i *Ingress) Relations() []database.Relation { 219 | fk := database.WithForeignKey("ingress_uuid") 220 | 221 | return []database.Relation{ 222 | database.HasMany(i.IngressTls, fk), 223 | database.HasMany(i.IngressBackendService, fk), 224 | database.HasMany(i.IngressBackendResource, fk), 225 | database.HasMany(i.IngressRule, fk), 226 | database.HasMany(i.ResourceLabels, database.WithForeignKey("resource_uuid")), 227 | database.HasMany(i.Labels, database.WithoutCascadeDelete()), 228 | database.HasMany(i.IngressLabels, fk), 229 | database.HasMany(i.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 230 | database.HasMany(i.Annotations, database.WithoutCascadeDelete()), 231 | database.HasMany(i.IngressAnnotations, fk), 232 | database.HasMany(i.Favorites, database.WithForeignKey("resource_uuid")), 233 | } 234 | } 235 | -------------------------------------------------------------------------------- /pkg/schema/v1/instance.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "github.com/icinga/icinga-go-library/types" 6 | ) 7 | 8 | type Instance struct { 9 | Uuid types.Binary 10 | ClusterUuid types.UUID 11 | Version string 12 | KubernetesVersion sql.NullString 13 | KubernetesHeartbeat types.UnixMilli 14 | KubernetesApiReachable types.Bool 15 | Message sql.NullString 16 | Heartbeat types.UnixMilli 17 | } 18 | 19 | func (Instance) TableName() string { 20 | return "kubernetes_instance" 21 | } 22 | -------------------------------------------------------------------------------- /pkg/schema/v1/job.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "github.com/icinga/icinga-go-library/strcase" 7 | "github.com/icinga/icinga-go-library/types" 8 | "github.com/icinga/icinga-kubernetes/pkg/database" 9 | kbatchv1 "k8s.io/api/batch/v1" 10 | kcorev1 "k8s.io/api/core/v1" 11 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | kruntime "k8s.io/apimachinery/pkg/runtime" 13 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 14 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 15 | ktypes "k8s.io/apimachinery/pkg/types" 16 | "strings" 17 | ) 18 | 19 | type Job struct { 20 | Meta 21 | Parallelism sql.NullInt32 22 | Completions sql.NullInt32 23 | ActiveDeadlineSeconds sql.NullInt64 24 | BackoffLimit sql.NullInt32 25 | TtlSecondsAfterFinished sql.NullInt32 26 | CompletionMode sql.NullString 27 | Suspend types.Bool 28 | StartTime types.UnixMilli 29 | CompletionTime types.UnixMilli 30 | Active int32 31 | Succeeded int32 32 | Failed int32 33 | Yaml string 34 | IcingaState IcingaState 35 | IcingaStateReason string 36 | Conditions []JobCondition `db:"-"` 37 | Labels []Label `db:"-"` 38 | JobLabels []JobLabel `db:"-"` 39 | ResourceLabels []ResourceLabel `db:"-"` 40 | Annotations []Annotation `db:"-"` 41 | JobAnnotations []JobAnnotation `db:"-"` 42 | ResourceAnnotations []ResourceAnnotation `db:"-"` 43 | Owners []JobOwner `db:"-"` 44 | Favorites []Favorite `db:"-"` 45 | } 46 | 47 | type JobCondition struct { 48 | JobUuid types.UUID 49 | Type string 50 | Status string 51 | LastProbe types.UnixMilli 52 | LastTransition types.UnixMilli 53 | Reason string 54 | Message string 55 | } 56 | 57 | type JobLabel struct { 58 | JobUuid types.UUID 59 | LabelUuid types.UUID 60 | } 61 | 62 | type JobAnnotation struct { 63 | JobUuid types.UUID 64 | AnnotationUuid types.UUID 65 | } 66 | 67 | type JobOwner struct { 68 | JobUuid types.UUID 69 | OwnerUuid types.UUID 70 | Kind string 71 | Name string 72 | Uid ktypes.UID 73 | Controller types.Bool 74 | BlockOwnerDeletion types.Bool 75 | } 76 | 77 | func NewJob() Resource { 78 | return &Job{} 79 | } 80 | 81 | func (j *Job) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 82 | j.ObtainMeta(k8s, clusterUuid) 83 | 84 | job := k8s.(*kbatchv1.Job) 85 | 86 | var parallelism sql.NullInt32 87 | if job.Spec.Parallelism != nil { 88 | parallelism.Int32 = *job.Spec.Parallelism 89 | parallelism.Valid = true 90 | } 91 | var completions sql.NullInt32 92 | if job.Spec.Completions != nil { 93 | completions.Int32 = *job.Spec.Completions 94 | completions.Valid = true 95 | } 96 | var activeDeadlineSeconds sql.NullInt64 97 | if job.Spec.ActiveDeadlineSeconds != nil { 98 | activeDeadlineSeconds.Int64 = *job.Spec.ActiveDeadlineSeconds 99 | activeDeadlineSeconds.Valid = true 100 | } 101 | var backoffLimit sql.NullInt32 102 | if job.Spec.BackoffLimit != nil { 103 | backoffLimit.Int32 = *job.Spec.BackoffLimit 104 | backoffLimit.Valid = true 105 | } 106 | var ttlSecondsAfterFinished sql.NullInt32 107 | if job.Spec.TTLSecondsAfterFinished != nil { 108 | ttlSecondsAfterFinished.Int32 = *job.Spec.TTLSecondsAfterFinished 109 | ttlSecondsAfterFinished.Valid = true 110 | } 111 | var suspend types.Bool 112 | if job.Spec.Suspend != nil { 113 | suspend.Bool = *job.Spec.Suspend 114 | suspend.Valid = true 115 | } 116 | var completionMode sql.NullString 117 | if job.Spec.CompletionMode != nil { 118 | completionMode.String = string(*job.Spec.CompletionMode) 119 | completionMode.Valid = true 120 | } 121 | var startTime kmetav1.Time 122 | if job.Status.StartTime != nil { 123 | startTime = *job.Status.StartTime 124 | } 125 | var completionTime kmetav1.Time 126 | if job.Status.CompletionTime != nil { 127 | completionTime = *job.Status.CompletionTime 128 | } 129 | 130 | j.Parallelism = parallelism 131 | j.Completions = completions 132 | j.ActiveDeadlineSeconds = activeDeadlineSeconds 133 | j.BackoffLimit = backoffLimit 134 | j.TtlSecondsAfterFinished = ttlSecondsAfterFinished 135 | j.Suspend = suspend 136 | j.CompletionMode = completionMode 137 | j.StartTime = types.UnixMilli(startTime.Time) 138 | j.CompletionTime = types.UnixMilli(completionTime.Time) 139 | j.Active = job.Status.Active 140 | j.Succeeded = job.Status.Succeeded 141 | j.Failed = job.Status.Failed 142 | j.IcingaState, j.IcingaStateReason = j.getIcingaState(job) 143 | 144 | for _, condition := range job.Status.Conditions { 145 | j.Conditions = append(j.Conditions, JobCondition{ 146 | JobUuid: j.Uuid, 147 | Type: string(condition.Type), 148 | Status: string(condition.Status), 149 | LastProbe: types.UnixMilli(condition.LastProbeTime.Time), 150 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 151 | Reason: condition.Reason, 152 | Message: condition.Message, 153 | }) 154 | } 155 | 156 | for labelName, labelValue := range job.Labels { 157 | labelUuid := NewUUID(j.Uuid, strings.ToLower(labelName+":"+labelValue)) 158 | j.Labels = append(j.Labels, Label{ 159 | Uuid: labelUuid, 160 | Name: labelName, 161 | Value: labelValue, 162 | }) 163 | j.JobLabels = append(j.JobLabels, JobLabel{ 164 | JobUuid: j.Uuid, 165 | LabelUuid: labelUuid, 166 | }) 167 | j.ResourceLabels = append(j.ResourceLabels, ResourceLabel{ 168 | ResourceUuid: j.Uuid, 169 | LabelUuid: labelUuid, 170 | }) 171 | } 172 | 173 | for annotationName, annotationValue := range job.Annotations { 174 | annotationUuid := NewUUID(j.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 175 | j.Annotations = append(j.Annotations, Annotation{ 176 | Uuid: annotationUuid, 177 | Name: annotationName, 178 | Value: annotationValue, 179 | }) 180 | j.JobAnnotations = append(j.JobAnnotations, JobAnnotation{ 181 | JobUuid: j.Uuid, 182 | AnnotationUuid: annotationUuid, 183 | }) 184 | j.ResourceAnnotations = append(j.ResourceAnnotations, ResourceAnnotation{ 185 | ResourceUuid: j.Uuid, 186 | AnnotationUuid: annotationUuid, 187 | }) 188 | } 189 | 190 | for _, ownerReference := range job.OwnerReferences { 191 | var blockOwnerDeletion, controller bool 192 | if ownerReference.BlockOwnerDeletion != nil { 193 | blockOwnerDeletion = *ownerReference.BlockOwnerDeletion 194 | } 195 | if ownerReference.Controller != nil { 196 | controller = *ownerReference.Controller 197 | } 198 | j.Owners = append(j.Owners, JobOwner{ 199 | JobUuid: j.Uuid, 200 | OwnerUuid: EnsureUUID(ownerReference.UID), 201 | Kind: strcase.Snake(ownerReference.Kind), 202 | Name: ownerReference.Name, 203 | Uid: ownerReference.UID, 204 | BlockOwnerDeletion: types.Bool{ 205 | Bool: blockOwnerDeletion, 206 | Valid: true, 207 | }, 208 | Controller: types.Bool{ 209 | Bool: controller, 210 | Valid: true, 211 | }, 212 | }) 213 | } 214 | 215 | scheme := kruntime.NewScheme() 216 | _ = kbatchv1.AddToScheme(scheme) 217 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kbatchv1.SchemeGroupVersion) 218 | output, _ := kruntime.Encode(codec, job) 219 | j.Yaml = string(output) 220 | } 221 | 222 | func (j *Job) getIcingaState(job *kbatchv1.Job) (IcingaState, string) { 223 | for _, condition := range job.Status.Conditions { 224 | if condition.Status != kcorev1.ConditionTrue { 225 | continue 226 | } 227 | 228 | switch condition.Type { 229 | case kbatchv1.JobSuccessCriteriaMet: 230 | return Ok, fmt.Sprintf( 231 | "Job %s/%s met its sucess criteria.", 232 | j.Namespace, j.Name) 233 | case kbatchv1.JobComplete: 234 | reason := fmt.Sprintf( 235 | "Job %s/%s has completed its execution successfully with", 236 | j.Namespace, j.Name) 237 | 238 | if j.Completions.Valid { 239 | reason += fmt.Sprintf(" %d necessary pod completions.", j.Completions.Int32) 240 | } else { 241 | reason += " any pod completion." 242 | } 243 | 244 | return Ok, reason 245 | case kbatchv1.JobFailed: 246 | return Critical, fmt.Sprintf( 247 | "Job %s/%s has failed its execution. %s: %s.", 248 | j.Namespace, j.Name, condition.Reason, condition.Message) 249 | case kbatchv1.JobFailureTarget: 250 | return Warning, fmt.Sprintf( 251 | "Job %s/%s is about to fail its execution. %s: %s.", 252 | j.Namespace, j.Name, condition.Reason, condition.Message) 253 | case kbatchv1.JobSuspended: 254 | return Ok, fmt.Sprintf( 255 | "Job %s/%s is suspended.", 256 | j.Namespace, j.Name) 257 | } 258 | } 259 | 260 | var completions string 261 | if j.Completions.Valid { 262 | completions = fmt.Sprintf("%d pod completions", j.Completions.Int32) 263 | } else { 264 | completions = "any pod completion" 265 | } 266 | 267 | reason := fmt.Sprintf( 268 | "Job %s/%s is running since %s with currently %d active, %d completed and %d failed pods. "+ 269 | "Successful termination requires %s. The back-off limit is %d.", 270 | j.Namespace, j.Name, job.Status.StartTime, j.Active, j.Succeeded, j.Failed, completions, *job.Spec.BackoffLimit) 271 | 272 | if job.Spec.ActiveDeadlineSeconds != nil { 273 | reason += fmt.Sprintf(" Deadline for completion is %d.", job.Spec.ActiveDeadlineSeconds) 274 | } 275 | 276 | return Pending, reason 277 | } 278 | 279 | func (j *Job) Relations() []database.Relation { 280 | fk := database.WithForeignKey("job_uuid") 281 | 282 | return []database.Relation{ 283 | database.HasMany(j.Conditions, fk), 284 | database.HasMany(j.ResourceLabels, database.WithForeignKey("resource_uuid")), 285 | database.HasMany(j.Labels, database.WithoutCascadeDelete()), 286 | database.HasMany(j.JobLabels, fk), 287 | database.HasMany(j.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 288 | database.HasMany(j.Annotations, database.WithoutCascadeDelete()), 289 | database.HasMany(j.JobAnnotations, fk), 290 | database.HasMany(j.Owners, fk), 291 | database.HasMany(j.Favorites, database.WithForeignKey("resource_uuid")), 292 | } 293 | } 294 | -------------------------------------------------------------------------------- /pkg/schema/v1/label.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/types" 5 | ) 6 | 7 | type Label struct { 8 | Uuid types.UUID 9 | Name string 10 | Value string 11 | } 12 | -------------------------------------------------------------------------------- /pkg/schema/v1/metric.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/database" 5 | "github.com/icinga/icinga-go-library/types" 6 | "strconv" 7 | ) 8 | 9 | type PrometheusClusterMetric struct { 10 | ClusterUuid types.UUID 11 | Timestamp int64 12 | Category string 13 | Name string 14 | Value float64 15 | } 16 | 17 | func (m *PrometheusClusterMetric) ID() database.ID { 18 | return compoundId{id: m.ClusterUuid.String() + strconv.FormatInt(m.Timestamp, 10) + m.Category + m.Name} 19 | } 20 | 21 | func (m *PrometheusClusterMetric) SetID(id database.ID) { 22 | panic("Not expected to be called") 23 | } 24 | 25 | func (m *PrometheusClusterMetric) Fingerprint() database.Fingerprinter { 26 | return m 27 | } 28 | 29 | type PrometheusNodeMetric struct { 30 | NodeUuid types.UUID 31 | Timestamp int64 32 | Category string 33 | Name string 34 | Value float64 35 | } 36 | 37 | func (m *PrometheusNodeMetric) ID() database.ID { 38 | return compoundId{id: m.NodeUuid.String() + strconv.FormatInt(m.Timestamp, 10) + m.Category + m.Name} 39 | } 40 | 41 | func (m *PrometheusNodeMetric) SetID(id database.ID) { 42 | panic("Not expected to be called") 43 | } 44 | 45 | func (m *PrometheusNodeMetric) Fingerprint() database.Fingerprinter { 46 | return m 47 | } 48 | 49 | type PrometheusPodMetric struct { 50 | PodUuid types.UUID 51 | Timestamp int64 52 | Category string 53 | Name string 54 | Value float64 55 | } 56 | 57 | func (m *PrometheusPodMetric) ID() database.ID { 58 | return compoundId{id: m.PodUuid.String() + strconv.FormatInt(m.Timestamp, 10) + m.Category + m.Name} 59 | } 60 | 61 | func (m *PrometheusPodMetric) SetID(id database.ID) { 62 | panic("Not expected to be called") 63 | } 64 | 65 | func (m *PrometheusPodMetric) Fingerprint() database.Fingerprinter { 66 | return m 67 | } 68 | 69 | type PrometheusContainerMetric struct { 70 | ContainerUuid types.UUID 71 | Timestamp int64 72 | Category string 73 | Name string 74 | Value float64 75 | } 76 | 77 | func (m *PrometheusContainerMetric) ID() database.ID { 78 | return compoundId{id: m.ContainerUuid.String() + strconv.FormatInt(m.Timestamp, 10) + m.Category + m.Name} 79 | } 80 | 81 | func (m *PrometheusContainerMetric) SetID(id database.ID) { 82 | panic("Not expected to be called") 83 | } 84 | 85 | func (m *PrometheusContainerMetric) Fingerprint() database.Fingerprinter { 86 | return m 87 | } 88 | 89 | type compoundId struct { 90 | id string 91 | } 92 | 93 | func (i compoundId) String() string { 94 | return i.id 95 | } 96 | -------------------------------------------------------------------------------- /pkg/schema/v1/metrics.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/types" 5 | "time" 6 | ) 7 | 8 | type PodMetrics struct { 9 | Namespace string `db:"namespace"` 10 | PodName string `db:"pod_name"` 11 | ContainerName string `db:"container_name"` 12 | Timestamp types.UnixMilli `db:"timestamp"` 13 | Duration time.Duration `db:"duration"` 14 | CPUUsage float64 `db:"cpu_usage"` 15 | MemoryUsage float64 `db:"memory_usage"` 16 | StorageUsage float64 `db:"storage_usage"` 17 | EphemeralStorageUsage float64 `db:"ephemeral_storage_usage"` 18 | } 19 | -------------------------------------------------------------------------------- /pkg/schema/v1/namespace.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/types" 5 | "github.com/icinga/icinga-kubernetes/pkg/database" 6 | kcorev1 "k8s.io/api/core/v1" 7 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | kruntime "k8s.io/apimachinery/pkg/runtime" 9 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 10 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 11 | "strings" 12 | ) 13 | 14 | type Namespace struct { 15 | Meta 16 | Phase string 17 | Yaml string 18 | Conditions []NamespaceCondition `db:"-"` 19 | Labels []Label `db:"-"` 20 | NamespaceLabels []NamespaceLabel `db:"-"` 21 | ResourceLabels []ResourceLabel `db:"-"` 22 | Annotations []Annotation `db:"-"` 23 | NamespaceAnnotations []NamespaceAnnotation `db:"-"` 24 | ResourceAnnotations []ResourceAnnotation `db:"-"` 25 | Favorites []Favorite `db:"-"` 26 | } 27 | 28 | type NamespaceCondition struct { 29 | NamespaceUuid types.UUID 30 | Type string 31 | Status string 32 | LastTransition types.UnixMilli 33 | Reason string 34 | Message string 35 | } 36 | 37 | type NamespaceLabel struct { 38 | NamespaceUuid types.UUID 39 | LabelUuid types.UUID 40 | } 41 | 42 | type NamespaceAnnotation struct { 43 | NamespaceUuid types.UUID 44 | AnnotationUuid types.UUID 45 | } 46 | 47 | func NewNamespace() Resource { 48 | return &Namespace{} 49 | } 50 | 51 | func (n *Namespace) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 52 | n.ObtainMeta(k8s, clusterUuid) 53 | 54 | namespace := k8s.(*kcorev1.Namespace) 55 | 56 | n.Phase = string(namespace.Status.Phase) 57 | 58 | for _, condition := range namespace.Status.Conditions { 59 | n.Conditions = append(n.Conditions, NamespaceCondition{ 60 | NamespaceUuid: n.Uuid, 61 | Type: string(condition.Type), 62 | Status: string(condition.Status), 63 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 64 | Reason: condition.Reason, 65 | Message: condition.Message, 66 | }) 67 | } 68 | 69 | for labelName, labelValue := range namespace.Labels { 70 | labelUuid := NewUUID(n.Uuid, strings.ToLower(labelName+":"+labelValue)) 71 | n.Labels = append(n.Labels, Label{ 72 | Uuid: labelUuid, 73 | Name: labelName, 74 | Value: labelValue, 75 | }) 76 | n.NamespaceLabels = append(n.NamespaceLabels, NamespaceLabel{ 77 | NamespaceUuid: n.Uuid, 78 | LabelUuid: labelUuid, 79 | }) 80 | n.ResourceLabels = append(n.ResourceLabels, ResourceLabel{ 81 | ResourceUuid: n.Uuid, 82 | LabelUuid: labelUuid, 83 | }) 84 | } 85 | 86 | for annotationName, annotationValue := range namespace.Annotations { 87 | annotationUuid := NewUUID(n.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 88 | n.Annotations = append(n.Annotations, Annotation{ 89 | Uuid: annotationUuid, 90 | Name: annotationName, 91 | Value: annotationValue, 92 | }) 93 | n.NamespaceAnnotations = append(n.NamespaceAnnotations, NamespaceAnnotation{ 94 | NamespaceUuid: n.Uuid, 95 | AnnotationUuid: annotationUuid, 96 | }) 97 | n.ResourceAnnotations = append(n.ResourceAnnotations, ResourceAnnotation{ 98 | ResourceUuid: n.Uuid, 99 | AnnotationUuid: annotationUuid, 100 | }) 101 | } 102 | 103 | scheme := kruntime.NewScheme() 104 | _ = kcorev1.AddToScheme(scheme) 105 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kcorev1.SchemeGroupVersion) 106 | output, _ := kruntime.Encode(codec, namespace) 107 | n.Yaml = string(output) 108 | } 109 | 110 | func (n *Namespace) Relations() []database.Relation { 111 | fk := database.WithForeignKey("namespace_uuid") 112 | 113 | return []database.Relation{ 114 | database.HasMany(n.Conditions, fk), 115 | database.HasMany(n.ResourceLabels, database.WithForeignKey("resource_uuid")), 116 | database.HasMany(n.Labels, database.WithoutCascadeDelete()), 117 | database.HasMany(n.NamespaceLabels, fk), 118 | database.HasMany(n.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 119 | database.HasMany(n.Annotations, database.WithoutCascadeDelete()), 120 | database.HasMany(n.NamespaceAnnotations, fk), 121 | database.HasMany(n.Favorites, database.WithForeignKey("resource_uuid")), 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /pkg/schema/v1/node.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "fmt" 5 | "github.com/icinga/icinga-go-library/types" 6 | "github.com/icinga/icinga-kubernetes/pkg/database" 7 | "github.com/icinga/icinga-kubernetes/pkg/notifications" 8 | "github.com/pkg/errors" 9 | kcorev1 "k8s.io/api/core/v1" 10 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | kruntime "k8s.io/apimachinery/pkg/runtime" 12 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 13 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 14 | knet "k8s.io/utils/net" 15 | "net" 16 | "net/url" 17 | "strings" 18 | ) 19 | 20 | type Node struct { 21 | Meta 22 | PodCIDR string 23 | NumIps int64 24 | Unschedulable types.Bool 25 | Ready types.Bool 26 | CpuCapacity int64 27 | CpuAllocatable int64 28 | MemoryCapacity int64 29 | MemoryAllocatable int64 30 | PodCapacity int64 31 | Yaml string 32 | Roles string 33 | MachineId string 34 | SystemUUID string 35 | BootId string 36 | KernelVersion string 37 | OsImage string 38 | OperatingSystem string 39 | Architecture string 40 | ContainerRuntimeVersion string 41 | KubeletVersion string 42 | KubeProxyVersion string 43 | IcingaState IcingaState 44 | IcingaStateReason string 45 | Conditions []NodeCondition `db:"-"` 46 | Volumes []NodeVolume `db:"-"` 47 | Labels []Label `db:"-"` 48 | NodeLabels []NodeLabel `db:"-"` 49 | ResourceLabels []ResourceLabel `db:"-"` 50 | Annotations []Annotation `db:"-"` 51 | NodeAnnotations []NodeAnnotation `db:"-"` 52 | ResourceAnnotations []ResourceAnnotation `db:"-"` 53 | Favorites []Favorite `db:"-"` 54 | } 55 | 56 | type NodeCondition struct { 57 | NodeUuid types.UUID 58 | Type string 59 | Status string 60 | LastHeartbeat types.UnixMilli 61 | LastTransition types.UnixMilli 62 | Reason string 63 | Message string 64 | } 65 | 66 | type NodeVolume struct { 67 | NodeUuid types.UUID 68 | Name kcorev1.UniqueVolumeName 69 | DevicePath string 70 | Mounted types.Bool 71 | } 72 | 73 | type NodeLabel struct { 74 | NodeUuid types.UUID 75 | LabelUuid types.UUID 76 | } 77 | 78 | type NodeAnnotation struct { 79 | NodeUuid types.UUID 80 | AnnotationUuid types.UUID 81 | } 82 | 83 | func NewNode() Resource { 84 | return &Node{} 85 | } 86 | 87 | func (n *Node) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 88 | n.ObtainMeta(k8s, clusterUuid) 89 | 90 | node := k8s.(*kcorev1.Node) 91 | 92 | n.PodCIDR = node.Spec.PodCIDR 93 | if n.PodCIDR != "" { 94 | _, cidr, err := net.ParseCIDR(n.PodCIDR) 95 | if err != nil { 96 | panic(errors.Wrapf(err, "failed to parse CIDR %s", n.PodCIDR)) 97 | } 98 | n.NumIps = knet.RangeSize(cidr) - 2 99 | } 100 | n.Unschedulable = types.Bool{ 101 | Bool: node.Spec.Unschedulable, 102 | Valid: true, 103 | } 104 | n.Ready = types.Bool{ 105 | Bool: getNodeConditionStatus(node, kcorev1.NodeReady), 106 | Valid: true, 107 | } 108 | n.CpuCapacity = node.Status.Capacity.Cpu().MilliValue() 109 | n.CpuAllocatable = node.Status.Allocatable.Cpu().MilliValue() 110 | n.MemoryCapacity = node.Status.Capacity.Memory().MilliValue() 111 | n.MemoryAllocatable = node.Status.Allocatable.Memory().MilliValue() 112 | n.PodCapacity = node.Status.Allocatable.Pods().Value() 113 | n.MachineId = node.Status.NodeInfo.MachineID 114 | n.SystemUUID = node.Status.NodeInfo.SystemUUID 115 | n.BootId = node.Status.NodeInfo.BootID 116 | n.KernelVersion = node.Status.NodeInfo.KernelVersion 117 | n.OsImage = node.Status.NodeInfo.OSImage 118 | n.OperatingSystem = node.Status.NodeInfo.OperatingSystem 119 | n.Architecture = node.Status.NodeInfo.Architecture 120 | n.ContainerRuntimeVersion = node.Status.NodeInfo.ContainerRuntimeVersion 121 | n.KubeletVersion = node.Status.NodeInfo.KubeletVersion 122 | n.KubeProxyVersion = node.Status.NodeInfo.KubeProxyVersion 123 | 124 | var roles []string 125 | for labelName := range node.Labels { 126 | if strings.Contains(labelName, "node-role") { 127 | role := strings.SplitAfter(labelName, "/")[1] 128 | roles = append(roles, role) 129 | } 130 | } 131 | n.Roles = strings.Join(roles, ", ") 132 | 133 | n.IcingaState, n.IcingaStateReason = n.getIcingaState(node) 134 | 135 | for _, condition := range node.Status.Conditions { 136 | n.Conditions = append(n.Conditions, NodeCondition{ 137 | NodeUuid: n.Uuid, 138 | Type: string(condition.Type), 139 | Status: string(condition.Status), 140 | LastHeartbeat: types.UnixMilli(condition.LastHeartbeatTime.Time), 141 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 142 | Reason: condition.Reason, 143 | Message: condition.Message, 144 | }) 145 | } 146 | 147 | volumesMounted := make(map[kcorev1.UniqueVolumeName]interface{}, len(node.Status.VolumesInUse)) 148 | for _, name := range node.Status.VolumesInUse { 149 | volumesMounted[name] = struct{}{} 150 | } 151 | 152 | for _, volume := range node.Status.VolumesAttached { 153 | _, mounted := volumesMounted[volume.Name] 154 | n.Volumes = append(n.Volumes, NodeVolume{ 155 | NodeUuid: n.Uuid, 156 | Name: volume.Name, 157 | DevicePath: volume.DevicePath, 158 | Mounted: types.Bool{ 159 | Bool: mounted, 160 | Valid: true, 161 | }, 162 | }) 163 | } 164 | 165 | for labelName, labelValue := range node.Labels { 166 | labelUuid := NewUUID(n.Uuid, strings.ToLower(labelName+":"+labelValue)) 167 | n.Labels = append(n.Labels, Label{ 168 | Uuid: labelUuid, 169 | Name: labelName, 170 | Value: labelValue, 171 | }) 172 | n.NodeLabels = append(n.NodeLabels, NodeLabel{ 173 | NodeUuid: n.Uuid, 174 | LabelUuid: labelUuid, 175 | }) 176 | n.ResourceLabels = append(n.ResourceLabels, ResourceLabel{ 177 | ResourceUuid: n.Uuid, 178 | LabelUuid: labelUuid, 179 | }) 180 | } 181 | 182 | scheme := kruntime.NewScheme() 183 | _ = kcorev1.AddToScheme(scheme) 184 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kcorev1.SchemeGroupVersion) 185 | output, _ := kruntime.Encode(codec, node) 186 | n.Yaml = string(output) 187 | 188 | for annotationName, annotationValue := range node.Annotations { 189 | annotationUuid := NewUUID(n.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 190 | n.Annotations = append(n.Annotations, Annotation{ 191 | Uuid: annotationUuid, 192 | Name: annotationName, 193 | Value: annotationValue, 194 | }) 195 | n.NodeAnnotations = append(n.NodeAnnotations, NodeAnnotation{ 196 | NodeUuid: n.Uuid, 197 | AnnotationUuid: annotationUuid, 198 | }) 199 | n.ResourceAnnotations = append(n.ResourceAnnotations, ResourceAnnotation{ 200 | ResourceUuid: n.Uuid, 201 | AnnotationUuid: annotationUuid, 202 | }) 203 | } 204 | } 205 | 206 | func (n *Node) MarshalEvent() (notifications.Event, error) { 207 | return notifications.Event{ 208 | Name: n.Namespace + "/" + n.Name, 209 | Severity: n.IcingaState.ToSeverity(), 210 | Message: n.IcingaStateReason, 211 | URL: &url.URL{Path: "/node", RawQuery: fmt.Sprintf("id=%s", n.Uuid)}, 212 | Tags: map[string]string{ 213 | "uuid": n.Uuid.String(), 214 | "name": n.Name, 215 | "namespace": n.Namespace, 216 | "resource": "node", 217 | }, 218 | }, nil 219 | } 220 | 221 | func (n *Node) getIcingaState(node *kcorev1.Node) (IcingaState, string) { 222 | // if node.Status.Phase == kcorev1.NodePending { 223 | // return Pending, fmt.Sprintf("Node %s is pending.", node.Name) 224 | // } 225 | // 226 | // if node.Status.Phase == kcorev1.NodeTerminated { 227 | // return Ok, fmt.Sprintf("Node %s is terminated.", node.Name) 228 | // } 229 | 230 | var state IcingaState 231 | var reason []string 232 | 233 | for _, condition := range node.Status.Conditions { 234 | switch condition.Type { 235 | case kcorev1.NodeDiskPressure: 236 | if condition.Status == kcorev1.ConditionTrue { 237 | state = Critical 238 | reason = append(reason, fmt.Sprintf("Node %s is running out of disk space", node.Name)) 239 | } 240 | case kcorev1.NodeMemoryPressure: 241 | if condition.Status == kcorev1.ConditionTrue { 242 | state = Critical 243 | reason = append(reason, fmt.Sprintf("Node %s is running out of available memory", node.Name)) 244 | } 245 | case kcorev1.NodePIDPressure: 246 | if condition.Status == kcorev1.ConditionTrue { 247 | state = Critical 248 | reason = append(reason, fmt.Sprintf("Node %s is running out of process IDs", node.Name)) 249 | } 250 | case kcorev1.NodeNetworkUnavailable: 251 | if condition.Status == kcorev1.ConditionTrue { 252 | state = Critical 253 | reason = append(reason, fmt.Sprintf("Node %s network is not correctly configured", node.Name)) 254 | } 255 | case kcorev1.NodeReady: 256 | switch condition.Status { 257 | case kcorev1.ConditionFalse: 258 | state = Critical 259 | reason = append(reason, fmt.Sprintf("Node %s has 'NodeReady' condition 'false'", node.Name)) 260 | case kcorev1.ConditionUnknown: 261 | state = Unknown 262 | reason = append(reason, fmt.Sprintf("Node %s has 'NodeReady' condition 'unknown'", node.Name)) 263 | } 264 | } 265 | } 266 | 267 | if state != Ok { 268 | return state, strings.Join(reason, ". ") + "." 269 | } 270 | 271 | return Ok, fmt.Sprintf("Node %s is ok.", node.Name) 272 | } 273 | 274 | func (n *Node) Relations() []database.Relation { 275 | fk := database.WithForeignKey("node_uuid") 276 | 277 | return []database.Relation{ 278 | database.HasMany(n.Conditions, fk), 279 | database.HasMany(n.Volumes, fk), 280 | database.HasMany(n.ResourceLabels, database.WithForeignKey("resource_uuid")), 281 | database.HasMany(n.Labels, database.WithoutCascadeDelete()), 282 | database.HasMany(n.NodeLabels, fk), 283 | database.HasMany(n.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 284 | database.HasMany(n.Annotations, database.WithoutCascadeDelete()), 285 | database.HasMany(n.NodeAnnotations, fk), 286 | database.HasMany(n.Favorites, database.WithForeignKey("resource_uuid")), 287 | } 288 | } 289 | 290 | func getNodeConditionStatus(node *kcorev1.Node, conditionType kcorev1.NodeConditionType) bool { 291 | for _, condition := range node.Status.Conditions { 292 | if condition.Type == conditionType && condition.Status == kcorev1.ConditionTrue { 293 | return true 294 | } 295 | } 296 | 297 | return false 298 | } 299 | -------------------------------------------------------------------------------- /pkg/schema/v1/persistent_volume.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "github.com/icinga/icinga-go-library/types" 6 | "github.com/icinga/icinga-kubernetes/pkg/database" 7 | kcorev1 "k8s.io/api/core/v1" 8 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | kruntime "k8s.io/apimachinery/pkg/runtime" 10 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 11 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 12 | ktypes "k8s.io/apimachinery/pkg/types" 13 | "strings" 14 | ) 15 | 16 | type PersistentVolume struct { 17 | Meta 18 | AccessModes Bitmask[kpersistentVolumeAccessModesSize] 19 | Capacity int64 20 | ReclaimPolicy string 21 | StorageClass sql.NullString 22 | VolumeMode string 23 | VolumeSourceType string 24 | VolumeSource string 25 | Phase string 26 | Reason sql.NullString 27 | Message sql.NullString 28 | Yaml string 29 | Claim *PersistentVolumeClaimRef `db:"-"` 30 | Labels []Label `db:"-"` 31 | PersistentVolumeLabels []PersistentVolumeLabel `db:"-"` 32 | ResourceLabels []ResourceLabel `db:"-"` 33 | Annotations []Annotation `db:"-"` 34 | PersistentVolumeAnnotations []PersistentVolumeAnnotation `db:"-"` 35 | ResourceAnnotations []PersistentVolumeAnnotation `db:"-"` 36 | Favorites []Favorite `db:"-"` 37 | } 38 | 39 | type PersistentVolumeClaimRef struct { 40 | PersistentVolumeUuid types.UUID 41 | Kind string 42 | Name string 43 | Uid ktypes.UID 44 | } 45 | 46 | type PersistentVolumeLabel struct { 47 | PersistentVolumeUuid types.UUID 48 | LabelUuid types.UUID 49 | } 50 | 51 | type PersistentVolumeAnnotation struct { 52 | PersistentVolumeUuid types.UUID 53 | AnnotationUuid types.UUID 54 | } 55 | 56 | func NewPersistentVolume() Resource { 57 | return &PersistentVolume{} 58 | } 59 | 60 | func (p *PersistentVolume) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 61 | p.ObtainMeta(k8s, clusterUuid) 62 | 63 | persistentVolume := k8s.(*kcorev1.PersistentVolume) 64 | 65 | p.AccessModes = persistentVolumeAccessModes.Bitmask(persistentVolume.Spec.AccessModes...) 66 | p.Capacity = persistentVolume.Spec.Capacity.Storage().MilliValue() 67 | p.ReclaimPolicy = string(persistentVolume.Spec.PersistentVolumeReclaimPolicy) 68 | p.StorageClass = NewNullableString(persistentVolume.Spec.StorageClassName) 69 | p.Phase = string(persistentVolume.Status.Phase) 70 | p.Reason = NewNullableString(persistentVolume.Status.Reason) 71 | p.Message = NewNullableString(persistentVolume.Status.Message) 72 | var volumeMode string 73 | if persistentVolume.Spec.VolumeMode != nil { 74 | volumeMode = string(*persistentVolume.Spec.VolumeMode) 75 | } else { 76 | volumeMode = string(kcorev1.PersistentVolumeFilesystem) 77 | } 78 | p.VolumeMode = volumeMode 79 | 80 | var err error 81 | p.VolumeSourceType, p.VolumeSource, err = MarshalFirstNonNilStructFieldToJSON(persistentVolume.Spec.PersistentVolumeSource) 82 | if err != nil { 83 | panic(err) 84 | } 85 | 86 | if persistentVolume.Spec.ClaimRef != nil { 87 | p.Claim = &PersistentVolumeClaimRef{ 88 | PersistentVolumeUuid: p.Uuid, 89 | Kind: persistentVolume.Spec.ClaimRef.Kind, 90 | Name: persistentVolume.Spec.ClaimRef.Name, 91 | Uid: persistentVolume.Spec.ClaimRef.UID, 92 | } 93 | } 94 | 95 | for labelName, labelValue := range persistentVolume.Labels { 96 | labelUuid := NewUUID(p.Uuid, strings.ToLower(labelName+":"+labelValue)) 97 | p.Labels = append(p.Labels, Label{ 98 | Uuid: labelUuid, 99 | Name: labelName, 100 | Value: labelValue, 101 | }) 102 | p.PersistentVolumeLabels = append(p.PersistentVolumeLabels, PersistentVolumeLabel{ 103 | PersistentVolumeUuid: p.Uuid, 104 | LabelUuid: labelUuid, 105 | }) 106 | p.ResourceLabels = append(p.ResourceLabels, ResourceLabel{ 107 | ResourceUuid: p.Uuid, 108 | LabelUuid: labelUuid, 109 | }) 110 | } 111 | 112 | for annotationName, annotationValue := range persistentVolume.Annotations { 113 | annotationUuid := NewUUID(p.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 114 | p.Annotations = append(p.Annotations, Annotation{ 115 | Uuid: annotationUuid, 116 | Name: annotationName, 117 | Value: annotationValue, 118 | }) 119 | p.PersistentVolumeAnnotations = append(p.PersistentVolumeAnnotations, PersistentVolumeAnnotation{ 120 | PersistentVolumeUuid: p.Uuid, 121 | AnnotationUuid: annotationUuid, 122 | }) 123 | p.ResourceAnnotations = append(p.ResourceAnnotations, PersistentVolumeAnnotation{ 124 | PersistentVolumeUuid: p.Uuid, 125 | AnnotationUuid: annotationUuid, 126 | }) 127 | } 128 | 129 | scheme := kruntime.NewScheme() 130 | _ = kcorev1.AddToScheme(scheme) 131 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kcorev1.SchemeGroupVersion) 132 | output, _ := kruntime.Encode(codec, persistentVolume) 133 | p.Yaml = string(output) 134 | } 135 | 136 | func (p *PersistentVolume) Relations() []database.Relation { 137 | if p.Claim == nil { 138 | return []database.Relation{} 139 | } 140 | 141 | fk := database.WithForeignKey("persistent_volume_uuid") 142 | 143 | return []database.Relation{ 144 | database.HasOne(p.Claim, fk), 145 | database.HasMany(p.ResourceLabels, database.WithForeignKey("resource_uuid")), 146 | database.HasMany(p.Labels, database.WithoutCascadeDelete()), 147 | database.HasMany(p.PersistentVolumeLabels, fk), 148 | database.HasMany(p.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 149 | database.HasMany(p.Annotations, database.WithoutCascadeDelete()), 150 | database.HasMany(p.PersistentVolumeAnnotations, fk), 151 | database.HasMany(p.Favorites, database.WithForeignKey("resource_uuid")), 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /pkg/schema/v1/pvc.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "github.com/icinga/icinga-go-library/strcase" 6 | "github.com/icinga/icinga-go-library/types" 7 | "github.com/icinga/icinga-kubernetes/pkg/database" 8 | kcorev1 "k8s.io/api/core/v1" 9 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | kruntime "k8s.io/apimachinery/pkg/runtime" 11 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 12 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 13 | "strings" 14 | ) 15 | 16 | type kpersistentVolumeAccessModesSize byte 17 | 18 | type kpersistentVolumeAccessModes map[kcorev1.PersistentVolumeAccessMode]kpersistentVolumeAccessModesSize 19 | 20 | func (modes kpersistentVolumeAccessModes) Bitmask(mode ...kcorev1.PersistentVolumeAccessMode) Bitmask[kpersistentVolumeAccessModesSize] { 21 | b := Bitmask[kpersistentVolumeAccessModesSize]{} 22 | 23 | for _, m := range mode { 24 | b.Set(modes[m]) 25 | } 26 | 27 | return b 28 | } 29 | 30 | var persistentVolumeAccessModes = kpersistentVolumeAccessModes{ 31 | kcorev1.ReadWriteOnce: 1 << 0, 32 | kcorev1.ReadOnlyMany: 1 << 1, 33 | kcorev1.ReadWriteMany: 1 << 2, 34 | kcorev1.ReadWriteOncePod: 1 << 3, 35 | } 36 | 37 | type Pvc struct { 38 | Meta 39 | DesiredAccessModes Bitmask[kpersistentVolumeAccessModesSize] 40 | ActualAccessModes Bitmask[kpersistentVolumeAccessModesSize] 41 | MinimumCapacity sql.NullInt64 42 | ActualCapacity sql.NullInt64 43 | Phase string 44 | VolumeName sql.NullString 45 | VolumeMode string 46 | StorageClass sql.NullString 47 | Yaml string 48 | Conditions []PvcCondition `db:"-"` 49 | Labels []Label `db:"-"` 50 | PvcLabels []PvcLabel `db:"-"` 51 | ResourceLabels []ResourceLabel `db:"-"` 52 | Annotations []Annotation `db:"-"` 53 | PvcAnnotations []PvcAnnotation `db:"-"` 54 | ResourceAnnotations []ResourceAnnotation `db:"-"` 55 | Favorites []Favorite `db:"-"` 56 | } 57 | 58 | type PvcCondition struct { 59 | PvcUuid types.UUID 60 | Type string 61 | Status string 62 | LastProbe types.UnixMilli 63 | LastTransition types.UnixMilli 64 | Reason string 65 | Message string 66 | } 67 | 68 | type PvcLabel struct { 69 | PvcUuid types.UUID 70 | LabelUuid types.UUID 71 | } 72 | 73 | type PvcAnnotation struct { 74 | PvcUuid types.UUID 75 | AnnotationUuid types.UUID 76 | } 77 | 78 | func NewPvc() Resource { 79 | return &Pvc{} 80 | } 81 | 82 | func (p *Pvc) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 83 | p.ObtainMeta(k8s, clusterUuid) 84 | 85 | pvc := k8s.(*kcorev1.PersistentVolumeClaim) 86 | 87 | p.DesiredAccessModes = persistentVolumeAccessModes.Bitmask(pvc.Spec.AccessModes...) 88 | p.ActualAccessModes = persistentVolumeAccessModes.Bitmask(pvc.Status.AccessModes...) 89 | if storageRequest, ok := pvc.Spec.Resources.Requests[kcorev1.ResourceStorage]; ok { 90 | p.MinimumCapacity = sql.NullInt64{ 91 | Int64: storageRequest.MilliValue(), 92 | Valid: true, 93 | } 94 | } 95 | if actualStorage, ok := pvc.Status.Capacity[kcorev1.ResourceStorage]; ok { 96 | p.ActualCapacity = sql.NullInt64{ 97 | Int64: actualStorage.MilliValue(), 98 | Valid: true, 99 | } 100 | } 101 | p.Phase = string(pvc.Status.Phase) 102 | p.VolumeName = NewNullableString(pvc.Spec.VolumeName) 103 | var volumeMode string 104 | if pvc.Spec.VolumeMode != nil { 105 | volumeMode = string(*pvc.Spec.VolumeMode) 106 | } else { 107 | volumeMode = string(kcorev1.PersistentVolumeFilesystem) 108 | } 109 | p.VolumeMode = volumeMode 110 | p.StorageClass = NewNullableString(pvc.Spec.StorageClassName) 111 | 112 | for _, condition := range pvc.Status.Conditions { 113 | p.Conditions = append(p.Conditions, PvcCondition{ 114 | PvcUuid: p.Uuid, 115 | Type: strcase.Snake(string(condition.Type)), 116 | Status: string(condition.Status), 117 | LastProbe: types.UnixMilli(condition.LastProbeTime.Time), 118 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 119 | Reason: condition.Reason, 120 | Message: condition.Message, 121 | }) 122 | } 123 | 124 | for labelName, labelValue := range pvc.Labels { 125 | labelUuid := NewUUID(p.Uuid, strings.ToLower(labelName+":"+labelValue)) 126 | p.Labels = append(p.Labels, Label{ 127 | Uuid: labelUuid, 128 | Name: labelName, 129 | Value: labelValue, 130 | }) 131 | p.PvcLabels = append(p.PvcLabels, PvcLabel{ 132 | PvcUuid: p.Uuid, 133 | LabelUuid: labelUuid, 134 | }) 135 | p.ResourceLabels = append(p.ResourceLabels, ResourceLabel{ 136 | ResourceUuid: p.Uuid, 137 | LabelUuid: labelUuid, 138 | }) 139 | } 140 | 141 | for annotationName, annotationValue := range pvc.Annotations { 142 | annotationUuid := NewUUID(p.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 143 | p.Annotations = append(p.Annotations, Annotation{ 144 | Uuid: annotationUuid, 145 | Name: annotationName, 146 | Value: annotationValue, 147 | }) 148 | p.PvcAnnotations = append(p.PvcAnnotations, PvcAnnotation{ 149 | PvcUuid: p.Uuid, 150 | AnnotationUuid: annotationUuid, 151 | }) 152 | p.PvcAnnotations = append(p.PvcAnnotations, PvcAnnotation{ 153 | PvcUuid: p.Uuid, 154 | AnnotationUuid: annotationUuid, 155 | }) 156 | } 157 | 158 | scheme := kruntime.NewScheme() 159 | _ = kcorev1.AddToScheme(scheme) 160 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kcorev1.SchemeGroupVersion) 161 | output, _ := kruntime.Encode(codec, pvc) 162 | p.Yaml = string(output) 163 | } 164 | 165 | func (p *Pvc) Relations() []database.Relation { 166 | fk := database.WithForeignKey("pvc_uuid") 167 | 168 | return []database.Relation{ 169 | database.HasMany(p.Conditions, fk), 170 | database.HasMany(p.ResourceLabels, database.WithForeignKey("resource_uuid")), 171 | database.HasMany(p.Labels, database.WithoutCascadeDelete()), 172 | database.HasMany(p.PvcLabels, fk), 173 | database.HasMany(p.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 174 | database.HasMany(p.Annotations, database.WithoutCascadeDelete()), 175 | database.HasMany(p.PvcAnnotations, fk), 176 | database.HasMany(p.Favorites, database.WithForeignKey("resource_uuid")), 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /pkg/schema/v1/replica_set.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "fmt" 5 | "github.com/icinga/icinga-go-library/strcase" 6 | "github.com/icinga/icinga-go-library/types" 7 | "github.com/icinga/icinga-kubernetes/pkg/database" 8 | "github.com/icinga/icinga-kubernetes/pkg/notifications" 9 | kappsv1 "k8s.io/api/apps/v1" 10 | kcorev1 "k8s.io/api/core/v1" 11 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | kruntime "k8s.io/apimachinery/pkg/runtime" 13 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 14 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 15 | ktypes "k8s.io/apimachinery/pkg/types" 16 | "net/url" 17 | "strings" 18 | ) 19 | 20 | type ReplicaSet struct { 21 | Meta 22 | DesiredReplicas int32 23 | MinReadySeconds int32 24 | ActualReplicas int32 25 | FullyLabeledReplicas int32 26 | ReadyReplicas int32 27 | AvailableReplicas int32 28 | Yaml string 29 | IcingaState IcingaState 30 | IcingaStateReason string 31 | Conditions []ReplicaSetCondition `db:"-"` 32 | Owners []ReplicaSetOwner `db:"-"` 33 | Labels []Label `db:"-"` 34 | ReplicaSetLabels []ReplicaSetLabel `db:"-"` 35 | ResourceLabels []ResourceLabel `db:"-"` 36 | Annotations []Annotation `db:"-"` 37 | ReplicaSetAnnotations []ReplicaSetAnnotation `db:"-"` 38 | ResourceAnnotations []ResourceAnnotation `db:"-"` 39 | Favorites []Favorite `db:"-"` 40 | } 41 | 42 | type ReplicaSetCondition struct { 43 | ReplicaSetUuid types.UUID 44 | Type string 45 | Status string 46 | LastTransition types.UnixMilli 47 | Reason string 48 | Message string 49 | } 50 | 51 | type ReplicaSetOwner struct { 52 | ReplicaSetUuid types.UUID 53 | OwnerUuid types.UUID 54 | Kind string 55 | Name string 56 | Uid ktypes.UID 57 | Controller types.Bool 58 | BlockOwnerDeletion types.Bool 59 | } 60 | 61 | type ReplicaSetLabel struct { 62 | ReplicaSetUuid types.UUID 63 | LabelUuid types.UUID 64 | } 65 | 66 | type ReplicaSetAnnotation struct { 67 | ReplicaSetUuid types.UUID 68 | AnnotationUuid types.UUID 69 | } 70 | 71 | func NewReplicaSet() Resource { 72 | return &ReplicaSet{} 73 | } 74 | 75 | func (r *ReplicaSet) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 76 | r.ObtainMeta(k8s, clusterUuid) 77 | 78 | replicaSet := k8s.(*kappsv1.ReplicaSet) 79 | 80 | var desiredReplicas int32 81 | if replicaSet.Spec.Replicas != nil { 82 | desiredReplicas = *replicaSet.Spec.Replicas 83 | } 84 | r.DesiredReplicas = desiredReplicas 85 | r.MinReadySeconds = replicaSet.Spec.MinReadySeconds 86 | r.ActualReplicas = replicaSet.Status.Replicas 87 | r.FullyLabeledReplicas = replicaSet.Status.FullyLabeledReplicas 88 | r.ReadyReplicas = replicaSet.Status.ReadyReplicas 89 | r.AvailableReplicas = replicaSet.Status.AvailableReplicas 90 | r.IcingaState, r.IcingaStateReason = r.getIcingaState() 91 | 92 | for _, condition := range replicaSet.Status.Conditions { 93 | r.Conditions = append(r.Conditions, ReplicaSetCondition{ 94 | ReplicaSetUuid: r.Uuid, 95 | Type: string(condition.Type), 96 | Status: string(condition.Status), 97 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 98 | Reason: condition.Reason, 99 | Message: condition.Message, 100 | }) 101 | } 102 | 103 | for _, ownerReference := range replicaSet.OwnerReferences { 104 | var blockOwnerDeletion, controller bool 105 | if ownerReference.BlockOwnerDeletion != nil { 106 | blockOwnerDeletion = *ownerReference.BlockOwnerDeletion 107 | } 108 | if ownerReference.Controller != nil { 109 | controller = *ownerReference.Controller 110 | } 111 | r.Owners = append(r.Owners, ReplicaSetOwner{ 112 | ReplicaSetUuid: r.Uuid, 113 | OwnerUuid: EnsureUUID(ownerReference.UID), 114 | Kind: strcase.Snake(ownerReference.Kind), 115 | Name: ownerReference.Name, 116 | Uid: ownerReference.UID, 117 | BlockOwnerDeletion: types.Bool{ 118 | Bool: blockOwnerDeletion, 119 | Valid: true, 120 | }, 121 | Controller: types.Bool{ 122 | Bool: controller, 123 | Valid: true, 124 | }, 125 | }) 126 | } 127 | 128 | for labelName, labelValue := range replicaSet.Labels { 129 | labelUuid := NewUUID(r.Uuid, strings.ToLower(labelName+":"+labelValue)) 130 | r.Labels = append(r.Labels, Label{ 131 | Uuid: labelUuid, 132 | Name: labelName, 133 | Value: labelValue, 134 | }) 135 | r.ReplicaSetLabels = append(r.ReplicaSetLabels, ReplicaSetLabel{ 136 | ReplicaSetUuid: r.Uuid, 137 | LabelUuid: labelUuid, 138 | }) 139 | r.ResourceLabels = append(r.ResourceLabels, ResourceLabel{ 140 | ResourceUuid: r.Uuid, 141 | LabelUuid: labelUuid, 142 | }) 143 | } 144 | 145 | for annotationName, annotationValue := range replicaSet.Annotations { 146 | annotationUuid := NewUUID(r.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 147 | r.Annotations = append(r.Annotations, Annotation{ 148 | Uuid: annotationUuid, 149 | Name: annotationName, 150 | Value: annotationValue, 151 | }) 152 | r.ReplicaSetAnnotations = append(r.ReplicaSetAnnotations, ReplicaSetAnnotation{ 153 | ReplicaSetUuid: r.Uuid, 154 | AnnotationUuid: annotationUuid, 155 | }) 156 | r.ResourceAnnotations = append(r.ResourceAnnotations, ResourceAnnotation{ 157 | ResourceUuid: r.Uuid, 158 | AnnotationUuid: annotationUuid, 159 | }) 160 | } 161 | 162 | scheme := kruntime.NewScheme() 163 | _ = kappsv1.AddToScheme(scheme) 164 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kappsv1.SchemeGroupVersion) 165 | output, _ := kruntime.Encode(codec, replicaSet) 166 | r.Yaml = string(output) 167 | } 168 | 169 | func (r *ReplicaSet) MarshalEvent() (notifications.Event, error) { 170 | return notifications.Event{ 171 | Name: r.Namespace + "/" + r.Name, 172 | Severity: r.IcingaState.ToSeverity(), 173 | Message: r.IcingaStateReason, 174 | URL: &url.URL{Path: "/replicaset", RawQuery: fmt.Sprintf("id=%s", r.Uuid)}, 175 | Tags: map[string]string{ 176 | "uuid": r.Uuid.String(), 177 | "name": r.Name, 178 | "namespace": r.Namespace, 179 | "resource": "replica_set", 180 | }, 181 | }, nil 182 | } 183 | 184 | func (r *ReplicaSet) getIcingaState() (IcingaState, string) { 185 | for _, condition := range r.Conditions { 186 | if condition.Type == string(kappsv1.ReplicaSetReplicaFailure) && condition.Status == string(kcorev1.ConditionTrue) { 187 | reason := fmt.Sprintf("ReplicaSet %s/%s has a failure condition: %s.", r.Namespace, r.Name, condition.Message) 188 | 189 | return Critical, reason 190 | } 191 | } 192 | 193 | switch { 194 | case r.AvailableReplicas < 1 && r.DesiredReplicas > 0: 195 | reason := fmt.Sprintf("ReplicaSet %s/%s has no replica available from %d desired.", r.Namespace, r.Name, r.DesiredReplicas) 196 | 197 | return Critical, reason 198 | case r.AvailableReplicas < r.DesiredReplicas: 199 | reason := fmt.Sprintf("ReplicaSet %s/%s only has %d out of %d desired replicas available.", r.Namespace, r.Name, r.AvailableReplicas, r.DesiredReplicas) 200 | 201 | return Warning, reason 202 | default: 203 | reason := fmt.Sprintf("ReplicaSet %s/%s has all %d desired replicas available.", r.Namespace, r.Name, r.DesiredReplicas) 204 | 205 | return Ok, reason 206 | } 207 | } 208 | 209 | func (r *ReplicaSet) Relations() []database.Relation { 210 | fk := database.WithForeignKey("replica_set_uuid") 211 | 212 | return []database.Relation{ 213 | database.HasMany(r.Conditions, fk), 214 | database.HasMany(r.Owners, fk), 215 | database.HasMany(r.ResourceLabels, database.WithForeignKey("resource_uuid")), 216 | database.HasMany(r.Labels, database.WithoutCascadeDelete()), 217 | database.HasMany(r.ReplicaSetLabels, fk), 218 | database.HasMany(r.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 219 | database.HasMany(r.Annotations, database.WithoutCascadeDelete()), 220 | database.HasMany(r.ReplicaSetAnnotations, fk), 221 | database.HasMany(r.Favorites, database.WithForeignKey("resource_uuid")), 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /pkg/schema/v1/resource_annotation.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import "github.com/icinga/icinga-go-library/types" 4 | 5 | type ResourceAnnotation struct { 6 | ResourceUuid types.UUID 7 | AnnotationUuid types.UUID 8 | } 9 | -------------------------------------------------------------------------------- /pkg/schema/v1/resource_label.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import "github.com/icinga/icinga-go-library/types" 4 | 5 | type ResourceLabel struct { 6 | ResourceUuid types.UUID 7 | LabelUuid types.UUID 8 | } 9 | -------------------------------------------------------------------------------- /pkg/schema/v1/secret.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/types" 5 | "github.com/icinga/icinga-kubernetes/pkg/database" 6 | kcorev1 "k8s.io/api/core/v1" 7 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "strings" 9 | ) 10 | 11 | type Secret struct { 12 | Meta 13 | Type string 14 | Immutable types.Bool 15 | Labels []Label `db:"-"` 16 | SecretLabels []SecretLabel `db:"-"` 17 | ResourceLabels []ResourceLabel `db:"-"` 18 | Annotations []Annotation `db:"-"` 19 | SecretAnnotations []SecretAnnotation `db:"-"` 20 | ResourceAnnotations []ResourceAnnotation `db:"-"` 21 | } 22 | 23 | type SecretLabel struct { 24 | SecretUuid types.UUID 25 | LabelUuid types.UUID 26 | } 27 | 28 | type SecretAnnotation struct { 29 | SecretUuid types.UUID 30 | AnnotationUuid types.UUID 31 | } 32 | 33 | func NewSecret() Resource { 34 | return &Secret{} 35 | } 36 | 37 | func (s *Secret) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 38 | s.ObtainMeta(k8s, clusterUuid) 39 | 40 | secret := k8s.(*kcorev1.Secret) 41 | 42 | s.Type = string(secret.Type) 43 | 44 | var immutable bool 45 | if secret.Immutable != nil { 46 | immutable = *secret.Immutable 47 | } 48 | s.Immutable = types.Bool{ 49 | Bool: immutable, 50 | Valid: true, 51 | } 52 | 53 | for labelName, labelValue := range secret.Labels { 54 | labelUuid := NewUUID(s.Uuid, strings.ToLower(labelName+":"+labelValue)) 55 | s.Labels = append(s.Labels, Label{ 56 | Uuid: labelUuid, 57 | Name: labelName, 58 | Value: labelValue, 59 | }) 60 | s.SecretLabels = append(s.SecretLabels, SecretLabel{ 61 | SecretUuid: s.Uuid, 62 | LabelUuid: labelUuid, 63 | }) 64 | s.ResourceLabels = append(s.ResourceLabels, ResourceLabel{ 65 | ResourceUuid: s.Uuid, 66 | LabelUuid: labelUuid, 67 | }) 68 | } 69 | 70 | for annotationName, annotationValue := range secret.Annotations { 71 | annotationUuid := NewUUID(s.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 72 | s.Annotations = append(s.Annotations, Annotation{ 73 | Uuid: annotationUuid, 74 | Name: annotationName, 75 | Value: annotationValue, 76 | }) 77 | s.SecretAnnotations = append(s.SecretAnnotations, SecretAnnotation{ 78 | SecretUuid: s.Uuid, 79 | AnnotationUuid: annotationUuid, 80 | }) 81 | s.ResourceAnnotations = append(s.ResourceAnnotations, ResourceAnnotation{ 82 | ResourceUuid: s.Uuid, 83 | AnnotationUuid: annotationUuid, 84 | }) 85 | } 86 | } 87 | 88 | func (s *Secret) Relations() []database.Relation { 89 | fk := database.WithForeignKey("secret_uuid") 90 | 91 | return []database.Relation{ 92 | database.HasMany(s.ResourceLabels, database.WithForeignKey("resource_uuid")), 93 | database.HasMany(s.Labels, database.WithoutCascadeDelete()), 94 | database.HasMany(s.SecretLabels, fk), 95 | database.HasMany(s.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 96 | database.HasMany(s.Annotations, database.WithoutCascadeDelete()), 97 | database.HasMany(s.SecretAnnotations, fk), 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /pkg/schema/v1/selector.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/types" 5 | ) 6 | 7 | type Selector struct { 8 | Uuid types.UUID 9 | Name string 10 | Value string 11 | } 12 | -------------------------------------------------------------------------------- /pkg/schema/v1/service.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "database/sql" 5 | "github.com/icinga/icinga-go-library/strcase" 6 | "github.com/icinga/icinga-go-library/types" 7 | "github.com/icinga/icinga-kubernetes/pkg/database" 8 | kcorev1 "k8s.io/api/core/v1" 9 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | kruntime "k8s.io/apimachinery/pkg/runtime" 11 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 12 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 13 | "k8s.io/client-go/kubernetes" 14 | "strings" 15 | ) 16 | 17 | type ServiceFactory struct { 18 | clientset *kubernetes.Clientset 19 | } 20 | 21 | type Service struct { 22 | Meta 23 | ClusterIP string 24 | ClusterIPs string 25 | Type string 26 | ExternalIPs sql.NullString 27 | SessionAffinity string 28 | ExternalName sql.NullString 29 | ExternalTrafficPolicy sql.NullString 30 | HealthCheckNodePort sql.NullInt32 31 | PublishNotReadyAddresses types.Bool 32 | IpFamilies sql.NullString 33 | IpFamilyPolicy sql.NullString 34 | AllocateLoadBalancerNodePorts types.Bool 35 | LoadBalancerClass sql.NullString 36 | InternalTrafficPolicy string 37 | Yaml string 38 | Selectors []Selector `db:"-"` 39 | ServiceSelectors []ServiceSelector `db:"-"` 40 | Ports []ServicePort `db:"-"` 41 | Conditions []ServiceCondition `db:"-"` 42 | Labels []Label `db:"-"` 43 | ServiceLabels []ServiceLabel `db:"-"` 44 | ResourceLabels []ResourceLabel `db:"-"` 45 | Annotations []Annotation `db:"-"` 46 | ServiceAnnotations []ServiceAnnotation `db:"-"` 47 | ResourceAnnotations []ResourceAnnotation `db:"-"` 48 | ServicePods []ServicePod `db:"-"` 49 | Favorites []Favorite `db:"-"` 50 | factory *ServiceFactory 51 | } 52 | 53 | type ServiceSelector struct { 54 | ServiceUuid types.UUID 55 | SelectorUuid types.UUID 56 | } 57 | 58 | type ServicePort struct { 59 | ServiceUuid types.UUID 60 | Name string 61 | Protocol string 62 | AppProtocol string 63 | Port int32 64 | TargetPort string 65 | NodePort int32 66 | } 67 | 68 | type ServiceCondition struct { 69 | ServiceUuid types.UUID 70 | Type string 71 | Status string 72 | ObservedGeneration int64 73 | LastTransition types.UnixMilli 74 | Reason string 75 | Message string 76 | } 77 | 78 | type ServiceLabel struct { 79 | ServiceUuid types.UUID 80 | LabelUuid types.UUID 81 | } 82 | 83 | type ServiceAnnotation struct { 84 | ServiceUuid types.UUID 85 | AnnotationUuid types.UUID 86 | } 87 | 88 | type ServicePod struct { 89 | ServiceUuid types.UUID 90 | PodUuid types.UUID 91 | } 92 | 93 | func NewServiceFactory(clientset *kubernetes.Clientset) *ServiceFactory { 94 | return &ServiceFactory{ 95 | clientset: clientset, 96 | } 97 | } 98 | 99 | func (f *ServiceFactory) NewService() Resource { 100 | return &Service{factory: f} 101 | } 102 | 103 | func (s *Service) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 104 | s.ObtainMeta(k8s, clusterUuid) 105 | 106 | service := k8s.(*kcorev1.Service) 107 | 108 | for _, condition := range service.Status.Conditions { 109 | s.Conditions = append(s.Conditions, ServiceCondition{ 110 | ServiceUuid: s.Uuid, 111 | Type: condition.Type, 112 | Status: strcase.Snake(string(condition.Status)), 113 | ObservedGeneration: condition.ObservedGeneration, 114 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 115 | Reason: condition.Reason, 116 | Message: condition.Message, 117 | }) 118 | } 119 | 120 | for labelName, labelValue := range service.Labels { 121 | labelUuid := NewUUID(s.Uuid, strings.ToLower(labelName+":"+labelValue)) 122 | s.Labels = append(s.Labels, Label{ 123 | Uuid: labelUuid, 124 | Name: labelName, 125 | Value: labelValue, 126 | }) 127 | s.ServiceLabels = append(s.ServiceLabels, ServiceLabel{ 128 | ServiceUuid: s.Uuid, 129 | LabelUuid: labelUuid, 130 | }) 131 | } 132 | 133 | for annotationName, annotationValue := range service.Annotations { 134 | annotationUuid := NewUUID(s.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 135 | s.Annotations = append(s.Annotations, Annotation{ 136 | Uuid: annotationUuid, 137 | Name: annotationName, 138 | Value: annotationValue, 139 | }) 140 | s.ServiceAnnotations = append(s.ServiceAnnotations, ServiceAnnotation{ 141 | ServiceUuid: s.Uuid, 142 | AnnotationUuid: annotationUuid, 143 | }) 144 | s.ResourceAnnotations = append(s.ResourceAnnotations, ResourceAnnotation{ 145 | ResourceUuid: s.Uuid, 146 | AnnotationUuid: annotationUuid, 147 | }) 148 | s.ResourceLabels = append(s.ResourceLabels, ResourceLabel{ 149 | ResourceUuid: s.Uuid, 150 | LabelUuid: annotationUuid, 151 | }) 152 | } 153 | 154 | for _, port := range service.Spec.Ports { 155 | var appProtocol string 156 | if port.AppProtocol != nil { 157 | appProtocol = *port.AppProtocol 158 | } 159 | s.Ports = append(s.Ports, ServicePort{ 160 | ServiceUuid: s.Uuid, 161 | Name: port.Name, 162 | Protocol: string(port.Protocol), 163 | AppProtocol: appProtocol, 164 | Port: port.Port, 165 | TargetPort: port.TargetPort.String(), 166 | NodePort: port.NodePort, 167 | }) 168 | } 169 | 170 | for selectorName, selectorValue := range service.Spec.Selector { 171 | selectorUuid := NewUUID(s.Uuid, strings.ToLower(selectorName+":"+selectorValue)) 172 | s.Selectors = append(s.Selectors, Selector{ 173 | Uuid: selectorUuid, 174 | Name: selectorName, 175 | Value: selectorValue, 176 | }) 177 | s.ServiceSelectors = append(s.ServiceSelectors, ServiceSelector{ 178 | ServiceUuid: s.Uuid, 179 | SelectorUuid: selectorUuid, 180 | }) 181 | } 182 | 183 | s.ClusterIP = service.Spec.ClusterIP 184 | s.ClusterIPs = strings.Join(service.Spec.ClusterIPs, ", ") 185 | s.Type = string(service.Spec.Type) 186 | s.ExternalIPs = NewNullableString(strings.Join(service.Spec.ExternalIPs, ", ")) 187 | s.SessionAffinity = string(service.Spec.SessionAffinity) 188 | // TODO(el): Support LoadBalancerSourceRanges? 189 | s.ExternalName = NewNullableString(service.Spec.ExternalName) 190 | s.ExternalTrafficPolicy = NewNullableString(string(service.Spec.ExternalTrafficPolicy)) 191 | s.HealthCheckNodePort = sql.NullInt32{ 192 | Int32: service.Spec.HealthCheckNodePort, 193 | Valid: service.Spec.HealthCheckNodePort != 0, 194 | } 195 | s.PublishNotReadyAddresses = types.Bool{ 196 | Bool: service.Spec.PublishNotReadyAddresses, 197 | Valid: true, 198 | } 199 | // TODO(el): Support SessionAffinityConfig? 200 | var ipv4 bool 201 | var ipv6 bool 202 | for _, ipFamily := range service.Spec.IPFamilies { 203 | s.IpFamilies.Valid = true 204 | 205 | if ipFamily == kcorev1.IPv4Protocol { 206 | ipv4 = true 207 | } else if ipFamily == kcorev1.IPv6Protocol { 208 | ipv6 = true 209 | } 210 | } 211 | switch { 212 | case ipv4 && ipv6: 213 | s.IpFamilies.String = "DualStack" 214 | case ipv4: 215 | s.IpFamilies.String = "IPv4" 216 | case ipv6: 217 | s.IpFamilies.String = "IPv6" 218 | default: 219 | s.IpFamilies.String = "Unknown" 220 | } 221 | s.IpFamilyPolicy = NewNullableString(service.Spec.IPFamilyPolicy) 222 | allocateLoadBalancerNodePorts := true 223 | if service.Spec.AllocateLoadBalancerNodePorts != nil { 224 | allocateLoadBalancerNodePorts = *service.Spec.AllocateLoadBalancerNodePorts 225 | } 226 | s.AllocateLoadBalancerNodePorts = types.Bool{ 227 | Bool: allocateLoadBalancerNodePorts, 228 | Valid: true, 229 | } 230 | s.LoadBalancerClass = NewNullableString(service.Spec.LoadBalancerClass) 231 | var internalTrafficPolicy string 232 | if service.Spec.InternalTrafficPolicy == nil { 233 | internalTrafficPolicy = string(kcorev1.ServiceInternalTrafficPolicyCluster) 234 | } else { 235 | internalTrafficPolicy = string(*service.Spec.InternalTrafficPolicy) 236 | } 237 | s.InternalTrafficPolicy = internalTrafficPolicy 238 | scheme := kruntime.NewScheme() 239 | _ = kcorev1.AddToScheme(scheme) 240 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kcorev1.SchemeGroupVersion) 241 | output, _ := kruntime.Encode(codec, service) 242 | s.Yaml = string(output) 243 | } 244 | 245 | func (s *Service) Relations() []database.Relation { 246 | fk := database.WithForeignKey("service_uuid") 247 | 248 | return []database.Relation{ 249 | database.HasMany(s.Conditions, fk), 250 | database.HasMany(s.Ports, fk), 251 | database.HasMany(s.Selectors, database.WithoutCascadeDelete()), 252 | database.HasMany(s.ServiceSelectors, fk), 253 | database.HasMany(s.ResourceLabels, database.WithForeignKey("resource_uuid")), 254 | database.HasMany(s.Labels, database.WithoutCascadeDelete()), 255 | database.HasMany(s.ServiceLabels, fk), 256 | database.HasMany(s.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 257 | database.HasMany(s.Annotations, database.WithoutCascadeDelete()), 258 | database.HasMany(s.ServiceAnnotations, fk), 259 | database.HasMany(s.ResourceAnnotations, fk), 260 | database.HasMany(s.ServicePods, fk), 261 | database.HasMany(s.Favorites, database.WithForeignKey("resource_uuid")), 262 | } 263 | } 264 | -------------------------------------------------------------------------------- /pkg/schema/v1/stateful_set.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "fmt" 5 | "github.com/icinga/icinga-go-library/strcase" 6 | "github.com/icinga/icinga-go-library/types" 7 | "github.com/icinga/icinga-kubernetes/pkg/database" 8 | "github.com/icinga/icinga-kubernetes/pkg/notifications" 9 | kappsv1 "k8s.io/api/apps/v1" 10 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | kruntime "k8s.io/apimachinery/pkg/runtime" 12 | kserializer "k8s.io/apimachinery/pkg/runtime/serializer" 13 | kjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 14 | ktypes "k8s.io/apimachinery/pkg/types" 15 | "net/url" 16 | "strings" 17 | ) 18 | 19 | type StatefulSet struct { 20 | Meta 21 | DesiredReplicas int32 22 | ServiceName string 23 | PodManagementPolicy string 24 | UpdateStrategy string 25 | MinReadySeconds int32 26 | PersistentVolumeClaimRetentionPolicyWhenDeleted string 27 | PersistentVolumeClaimRetentionPolicyWhenScaled string 28 | Ordinals int32 29 | ActualReplicas int32 30 | ReadyReplicas int32 31 | CurrentReplicas int32 32 | UpdatedReplicas int32 33 | AvailableReplicas int32 34 | Yaml string 35 | IcingaState IcingaState 36 | IcingaStateReason string 37 | Conditions []StatefulSetCondition `db:"-"` 38 | Owners []StatefulSetOwner `db:"-"` 39 | Labels []Label `db:"-"` 40 | StatefulSetLabels []StatefulSetLabel `db:"-"` 41 | ResourceLabels []ResourceLabel `db:"-"` 42 | Annotations []Annotation `db:"-"` 43 | StatefulSetAnnotations []StatefulSetAnnotation `db:"-"` 44 | ResourceAnnotations []ResourceAnnotation `db:"-"` 45 | Favorites []Favorite `db:"-"` 46 | } 47 | 48 | type StatefulSetCondition struct { 49 | StatefulSetUuid types.UUID 50 | Type string 51 | Status string 52 | LastTransition types.UnixMilli 53 | Reason string 54 | Message string 55 | } 56 | 57 | type StatefulSetOwner struct { 58 | StatefulSetUuid types.UUID 59 | OwnerUuid types.UUID 60 | Kind string 61 | Name string 62 | Uid ktypes.UID 63 | Controller types.Bool 64 | BlockOwnerDeletion types.Bool 65 | } 66 | 67 | type StatefulSetLabel struct { 68 | StatefulSetUuid types.UUID 69 | LabelUuid types.UUID 70 | } 71 | 72 | type StatefulSetAnnotation struct { 73 | StatefulSetUuid types.UUID 74 | AnnotationUuid types.UUID 75 | } 76 | 77 | func NewStatefulSet() Resource { 78 | return &StatefulSet{} 79 | } 80 | 81 | func (s *StatefulSet) Obtain(k8s kmetav1.Object, clusterUuid types.UUID) { 82 | s.ObtainMeta(k8s, clusterUuid) 83 | 84 | statefulSet := k8s.(*kappsv1.StatefulSet) 85 | 86 | var replicas, ordinals int32 87 | if statefulSet.Spec.Replicas != nil { 88 | replicas = *statefulSet.Spec.Replicas 89 | } 90 | if statefulSet.Spec.Ordinals != nil { 91 | ordinals = statefulSet.Spec.Ordinals.Start 92 | } 93 | var pvcRetentionPolicyDeleted, pvcRetentionPolicyScaled kappsv1.PersistentVolumeClaimRetentionPolicyType 94 | if statefulSet.Spec.PersistentVolumeClaimRetentionPolicy != nil { 95 | pvcRetentionPolicyDeleted = statefulSet.Spec.PersistentVolumeClaimRetentionPolicy.WhenDeleted 96 | pvcRetentionPolicyScaled = statefulSet.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled 97 | } else { 98 | pvcRetentionPolicyDeleted, pvcRetentionPolicyScaled = kappsv1.RetainPersistentVolumeClaimRetentionPolicyType, kappsv1.RetainPersistentVolumeClaimRetentionPolicyType 99 | } 100 | 101 | s.DesiredReplicas = replicas 102 | s.ServiceName = statefulSet.Spec.ServiceName 103 | s.PodManagementPolicy = string(statefulSet.Spec.PodManagementPolicy) 104 | s.UpdateStrategy = string(statefulSet.Spec.UpdateStrategy.Type) 105 | s.MinReadySeconds = statefulSet.Spec.MinReadySeconds 106 | s.PersistentVolumeClaimRetentionPolicyWhenDeleted = string(pvcRetentionPolicyDeleted) 107 | s.PersistentVolumeClaimRetentionPolicyWhenScaled = string(pvcRetentionPolicyScaled) 108 | s.Ordinals = ordinals 109 | s.ActualReplicas = statefulSet.Status.Replicas 110 | 111 | s.ReadyReplicas = statefulSet.Status.ReadyReplicas 112 | s.CurrentReplicas = statefulSet.Status.CurrentReplicas 113 | s.UpdatedReplicas = statefulSet.Status.UpdatedReplicas 114 | s.AvailableReplicas = statefulSet.Status.AvailableReplicas 115 | s.IcingaState, s.IcingaStateReason = s.getIcingaState() 116 | 117 | for _, condition := range statefulSet.Status.Conditions { 118 | s.Conditions = append(s.Conditions, StatefulSetCondition{ 119 | StatefulSetUuid: s.Uuid, 120 | Type: string(condition.Type), 121 | Status: string(condition.Status), 122 | LastTransition: types.UnixMilli(condition.LastTransitionTime.Time), 123 | Reason: condition.Reason, 124 | Message: condition.Message, 125 | }) 126 | } 127 | 128 | for _, ownerReference := range statefulSet.OwnerReferences { 129 | var blockOwnerDeletion, controller bool 130 | if ownerReference.BlockOwnerDeletion != nil { 131 | blockOwnerDeletion = *ownerReference.BlockOwnerDeletion 132 | } 133 | if ownerReference.Controller != nil { 134 | controller = *ownerReference.Controller 135 | } 136 | s.Owners = append(s.Owners, StatefulSetOwner{ 137 | StatefulSetUuid: s.Uuid, 138 | OwnerUuid: EnsureUUID(ownerReference.UID), 139 | Kind: strcase.Snake(ownerReference.Kind), 140 | Name: ownerReference.Name, 141 | Uid: ownerReference.UID, 142 | BlockOwnerDeletion: types.Bool{ 143 | Bool: blockOwnerDeletion, 144 | Valid: true, 145 | }, 146 | Controller: types.Bool{ 147 | Bool: controller, 148 | Valid: true, 149 | }, 150 | }) 151 | } 152 | 153 | for labelName, labelValue := range statefulSet.Labels { 154 | labelUuid := NewUUID(s.Uuid, strings.ToLower(labelName+":"+labelValue)) 155 | s.Labels = append(s.Labels, Label{ 156 | Uuid: labelUuid, 157 | Name: labelName, 158 | Value: labelValue, 159 | }) 160 | s.StatefulSetLabels = append(s.StatefulSetLabels, StatefulSetLabel{ 161 | StatefulSetUuid: s.Uuid, 162 | LabelUuid: labelUuid, 163 | }) 164 | s.ResourceLabels = append(s.ResourceLabels, ResourceLabel{ 165 | ResourceUuid: s.Uuid, 166 | LabelUuid: labelUuid, 167 | }) 168 | } 169 | 170 | for annotationName, annotationValue := range statefulSet.Annotations { 171 | annotationUuid := NewUUID(s.Uuid, strings.ToLower(annotationName+":"+annotationValue)) 172 | s.Annotations = append(s.Annotations, Annotation{ 173 | Uuid: annotationUuid, 174 | Name: annotationName, 175 | Value: annotationValue, 176 | }) 177 | s.StatefulSetAnnotations = append(s.StatefulSetAnnotations, StatefulSetAnnotation{ 178 | StatefulSetUuid: s.Uuid, 179 | AnnotationUuid: annotationUuid, 180 | }) 181 | s.ResourceAnnotations = append(s.ResourceAnnotations, ResourceAnnotation{ 182 | ResourceUuid: s.Uuid, 183 | AnnotationUuid: annotationUuid, 184 | }) 185 | } 186 | 187 | scheme := kruntime.NewScheme() 188 | _ = kappsv1.AddToScheme(scheme) 189 | codec := kserializer.NewCodecFactory(scheme).EncoderForVersion(kjson.NewYAMLSerializer(kjson.DefaultMetaFactory, scheme, scheme), kappsv1.SchemeGroupVersion) 190 | output, _ := kruntime.Encode(codec, statefulSet) 191 | s.Yaml = string(output) 192 | } 193 | 194 | func (s *StatefulSet) MarshalEvent() (notifications.Event, error) { 195 | return notifications.Event{ 196 | Name: s.Namespace + "/" + s.Name, 197 | Severity: s.IcingaState.ToSeverity(), 198 | Message: s.IcingaStateReason, 199 | URL: &url.URL{Path: "/statefulset", RawQuery: fmt.Sprintf("id=%s", s.Uuid)}, 200 | Tags: map[string]string{ 201 | "uuid": s.Uuid.String(), 202 | "name": s.Name, 203 | "namespace": s.Namespace, 204 | "resource": "stateful_set", 205 | }, 206 | }, nil 207 | } 208 | 209 | func (s *StatefulSet) getIcingaState() (IcingaState, string) { 210 | switch { 211 | case s.AvailableReplicas == 0: 212 | reason := fmt.Sprintf("StatefulSet %s/%s has no replica available from %d desired.", s.Namespace, s.Name, s.DesiredReplicas) 213 | 214 | return Critical, reason 215 | case s.AvailableReplicas < s.DesiredReplicas: 216 | reason := fmt.Sprintf("StatefulSet %s/%s only has %d out of %d desired replicas available.", s.Namespace, s.Name, s.AvailableReplicas, s.DesiredReplicas) 217 | 218 | return Warning, reason 219 | default: 220 | reason := fmt.Sprintf("StatefulSet %s/%s has all %d desired replicas available.", s.Namespace, s.Name, s.DesiredReplicas) 221 | 222 | return Ok, reason 223 | } 224 | } 225 | 226 | func (s *StatefulSet) Relations() []database.Relation { 227 | fk := database.WithForeignKey("stateful_set_uuid") 228 | 229 | return []database.Relation{ 230 | database.HasMany(s.Conditions, fk), 231 | database.HasMany(s.Owners, fk), 232 | database.HasMany(s.ResourceLabels, database.WithForeignKey("resource_uuid")), 233 | database.HasMany(s.Labels, database.WithoutCascadeDelete()), 234 | database.HasMany(s.StatefulSetLabels, fk), 235 | database.HasMany(s.ResourceAnnotations, database.WithForeignKey("resource_uuid")), 236 | database.HasMany(s.Annotations, database.WithoutCascadeDelete()), 237 | database.HasMany(s.StatefulSetAnnotations, fk), 238 | database.HasMany(s.Favorites, database.WithForeignKey("resource_uuid")), 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /pkg/schema/v1/utils.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/icinga/icinga-go-library/types" 5 | "golang.org/x/exp/constraints" 6 | "reflect" 7 | ) 8 | 9 | func MarshalFirstNonNilStructFieldToJSON(i any) (string, string, error) { 10 | v := reflect.ValueOf(i) 11 | for _, field := range reflect.VisibleFields(v.Type()) { 12 | if v.FieldByIndex(field.Index).IsNil() { 13 | continue 14 | } 15 | jsn, err := types.MarshalJSON(v.FieldByIndex(field.Index).Interface()) 16 | if err != nil { 17 | return "", "", err 18 | } 19 | 20 | return field.Name, string(jsn), nil 21 | } 22 | 23 | return "", "", nil 24 | } 25 | 26 | func MaxInt[T constraints.Integer](x, y T) T { 27 | if x > y { 28 | return x 29 | } 30 | 31 | return y 32 | } 33 | -------------------------------------------------------------------------------- /pkg/sync/v1/controller.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/go-logr/logr" 7 | "github.com/pkg/errors" 8 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/util/runtime" 10 | "k8s.io/client-go/tools/cache" 11 | "k8s.io/client-go/util/workqueue" 12 | ) 13 | 14 | type Controller struct { 15 | informer cache.SharedIndexInformer 16 | log logr.Logger 17 | queue workqueue.TypedRateLimitingInterface[EventHandlerItem] 18 | } 19 | 20 | func NewController( 21 | informer cache.SharedIndexInformer, 22 | log logr.Logger, 23 | ) *Controller { 24 | 25 | return &Controller{ 26 | informer: informer, 27 | log: log, 28 | queue: workqueue.NewTypedRateLimitingQueue[EventHandlerItem]( 29 | workqueue.DefaultTypedControllerRateLimiter[EventHandlerItem](), 30 | ), 31 | } 32 | } 33 | 34 | func (c *Controller) Announce(obj interface{}) error { 35 | return c.informer.GetStore().Add(obj) 36 | } 37 | 38 | func (c *Controller) Stream(ctx context.Context, sink *Sink) error { 39 | _, err := c.informer.AddEventHandler(NewEventHandler(c.queue, c.log.WithName("events"))) 40 | if err != nil { 41 | return err 42 | } 43 | 44 | go func() { 45 | defer runtime.HandleCrash() 46 | 47 | <-ctx.Done() 48 | c.queue.ShutDown() 49 | }() 50 | 51 | go c.informer.Run(ctx.Done()) 52 | 53 | if !cache.WaitForCacheSync(ctx.Done(), c.informer.HasSynced) { 54 | return errors.New("timed out waiting for caches to sync") 55 | } 56 | 57 | return c.stream(ctx, sink) 58 | } 59 | 60 | func (c *Controller) stream(ctx context.Context, sink *Sink) error { 61 | var eventHandlerItem EventHandlerItem 62 | var key string 63 | var shutdown bool 64 | for { 65 | c.queue.Done(eventHandlerItem) 66 | 67 | eventHandlerItem, shutdown = c.queue.Get() 68 | if shutdown { 69 | return ctx.Err() 70 | } 71 | 72 | key = eventHandlerItem.KKey 73 | 74 | item, exists, err := c.informer.GetStore().GetByKey(key) 75 | if err != nil { 76 | if c.queue.NumRequeues(eventHandlerItem) < 5 { 77 | c.log.Error(errors.WithStack(err), fmt.Sprintf("Fetching key %s failed. Retrying", key)) 78 | 79 | c.queue.AddRateLimited(eventHandlerItem) 80 | } else { 81 | c.queue.Forget(eventHandlerItem) 82 | 83 | if err := sink.Error(ctx, errors.Wrapf(err, "fetching key %s failed", key)); err != nil { 84 | return err 85 | } 86 | } 87 | 88 | continue 89 | } 90 | 91 | c.queue.Forget(eventHandlerItem) 92 | 93 | if !exists || eventHandlerItem.Type == EventDelete { 94 | if err := sink.Delete(ctx, eventHandlerItem.Id); err != nil { 95 | return err 96 | } 97 | } else { 98 | obj := item.(kmetav1.Object) 99 | err := sink.Upsert(ctx, &Item{ 100 | Key: key, 101 | Item: &obj, 102 | }) 103 | if err != nil { 104 | return err 105 | } 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /pkg/sync/v1/event_handler.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "fmt" 5 | "github.com/go-logr/logr" 6 | "github.com/icinga/icinga-go-library/types" 7 | schemav1 "github.com/icinga/icinga-kubernetes/pkg/schema/v1" 8 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/client-go/tools/cache" 10 | "k8s.io/client-go/util/workqueue" 11 | ) 12 | 13 | type EventHandler struct { 14 | queue workqueue.TypedInterface[EventHandlerItem] 15 | log logr.Logger 16 | } 17 | 18 | type EventHandlerItem struct { 19 | Type EventType 20 | Id types.UUID 21 | KKey string 22 | } 23 | 24 | type EventType string 25 | 26 | const EventAdd EventType = "ADDED" 27 | const EventUpdate EventType = "UPDATED" 28 | const EventDelete EventType = "DELETED" 29 | 30 | func NewEventHandler(queue workqueue.TypedInterface[EventHandlerItem], log logr.Logger) cache.ResourceEventHandler { 31 | return &EventHandler{queue: queue, log: log} 32 | } 33 | 34 | func (e *EventHandler) OnAdd(obj interface{}, _ bool) { 35 | e.enqueue(EventAdd, obj, cache.MetaNamespaceKeyFunc) 36 | } 37 | 38 | func (e *EventHandler) OnUpdate(_, newObj interface{}) { 39 | e.enqueue(EventUpdate, newObj, cache.MetaNamespaceKeyFunc) 40 | } 41 | 42 | func (e *EventHandler) OnDelete(obj interface{}) { 43 | e.enqueue(EventDelete, obj, cache.DeletionHandlingMetaNamespaceKeyFunc) 44 | } 45 | 46 | func (e *EventHandler) enqueue(_type EventType, obj interface{}, keyFunc cache.KeyFunc) { 47 | key, err := keyFunc(obj) 48 | if err != nil { 49 | e.log.Error(err, "cannot make key") 50 | 51 | return 52 | } 53 | 54 | var id types.UUID 55 | switch v := obj.(type) { 56 | case kmetav1.Object: 57 | id = schemav1.EnsureUUID(v.GetUID()) 58 | case cache.DeletedFinalStateUnknown: 59 | id = schemav1.EnsureUUID(v.Obj.(kmetav1.Object).GetUID()) 60 | default: 61 | panic(fmt.Sprintf("unknown object type %#v", v)) 62 | } 63 | 64 | e.queue.Add(EventHandlerItem{ 65 | Type: _type, 66 | Id: id, 67 | KKey: key, 68 | }) 69 | } 70 | -------------------------------------------------------------------------------- /pkg/sync/v1/features.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import "github.com/icinga/icinga-go-library/database" 4 | 5 | type Feature func(*Features) 6 | 7 | type Features struct { 8 | noDelete bool 9 | noWarmup bool 10 | onDelete database.OnSuccess[any] 11 | onUpsert database.OnSuccess[any] 12 | } 13 | 14 | func NewFeatures(features ...Feature) *Features { 15 | f := &Features{} 16 | for _, feature := range features { 17 | feature(f) 18 | } 19 | 20 | return f 21 | } 22 | 23 | func (f *Features) NoDelete() bool { 24 | return f.noDelete 25 | } 26 | 27 | func (f *Features) NoWarmup() bool { 28 | return f.noWarmup 29 | } 30 | 31 | func (f *Features) OnDelete() database.OnSuccess[any] { 32 | return f.onDelete 33 | } 34 | 35 | func (f *Features) OnUpsert() database.OnSuccess[any] { 36 | return f.onUpsert 37 | } 38 | 39 | func WithNoDelete() Feature { 40 | return func(f *Features) { 41 | f.noDelete = true 42 | } 43 | } 44 | 45 | func WithNoWarumup() Feature { 46 | return func(f *Features) { 47 | f.noWarmup = true 48 | } 49 | } 50 | 51 | func WithOnDelete(fn database.OnSuccess[any]) Feature { 52 | return func(f *Features) { 53 | f.onDelete = fn 54 | } 55 | } 56 | 57 | func WithOnUpsert(fn database.OnSuccess[any]) Feature { 58 | return func(f *Features) { 59 | f.onUpsert = fn 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /pkg/sync/v1/sink.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "context" 5 | kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "time" 7 | ) 8 | 9 | type Item struct { 10 | Key string 11 | Item *kmetav1.Object 12 | } 13 | 14 | type Sink struct { 15 | error chan error 16 | delete chan interface{} 17 | deleteFunc func(interface{}) interface{} 18 | upsert chan interface{} 19 | upsertFunc func(*Item) interface{} 20 | } 21 | 22 | func NewSink(upsertFunc func(*Item) interface{}, deleteFunc func(interface{}) interface{}) *Sink { 23 | return &Sink{ 24 | error: make(chan error), 25 | delete: make(chan interface{}), 26 | deleteFunc: deleteFunc, 27 | upsert: make(chan interface{}), 28 | upsertFunc: upsertFunc, 29 | } 30 | } 31 | 32 | func (s *Sink) Delete(ctx context.Context, key interface{}) error { 33 | select { 34 | case s.delete <- s.deleteFunc(key): 35 | return nil 36 | case <-ctx.Done(): 37 | return ctx.Err() 38 | } 39 | } 40 | 41 | func (s *Sink) DeleteCh() <-chan interface{} { 42 | return s.delete 43 | } 44 | 45 | func (s *Sink) Error(ctx context.Context, err error) error { 46 | select { 47 | case s.error <- err: 48 | return nil 49 | case <-ctx.Done(): 50 | return ctx.Err() 51 | } 52 | } 53 | 54 | func (s *Sink) ErrorCh() <-chan error { 55 | return s.error 56 | } 57 | 58 | func (s *Sink) Upsert(ctx context.Context, item *Item) error { 59 | if item.Item != nil { 60 | deletionTimestamp := (*item.Item).GetDeletionTimestamp() 61 | if !deletionTimestamp.IsZero() && deletionTimestamp.Time.Compare(time.Now().Add(30*time.Second)) <= 0 { 62 | // Don't process UPSERTs if the resource is about to be deleted in the next 30 seconds to 63 | // prevent races between simultaneous UPSERT and DELETE statements for the same resource, 64 | // where an UPSERT statement can occur after a DELETE statement has already been executed. 65 | return ctx.Err() 66 | } 67 | } 68 | 69 | select { 70 | case s.upsert <- s.upsertFunc(item): 71 | return nil 72 | case <-ctx.Done(): 73 | return ctx.Err() 74 | } 75 | } 76 | 77 | func (s *Sink) UpsertCh() <-chan interface{} { 78 | return s.upsert 79 | } 80 | -------------------------------------------------------------------------------- /pkg/sync/v1/sync.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "context" 5 | "github.com/go-logr/logr" 6 | "github.com/icinga/icinga-go-library/com" 7 | "github.com/icinga/icinga-kubernetes/pkg/cluster" 8 | "github.com/icinga/icinga-kubernetes/pkg/database" 9 | schemav1 "github.com/icinga/icinga-kubernetes/pkg/schema/v1" 10 | "golang.org/x/sync/errgroup" 11 | "k8s.io/apimachinery/pkg/util/runtime" 12 | "k8s.io/client-go/tools/cache" 13 | ) 14 | 15 | type Sync struct { 16 | db *database.Database 17 | informer cache.SharedIndexInformer 18 | log logr.Logger 19 | factory func() schemav1.Resource 20 | } 21 | 22 | func NewSync( 23 | db *database.Database, 24 | informer cache.SharedIndexInformer, 25 | log logr.Logger, 26 | factory func() schemav1.Resource, 27 | ) *Sync { 28 | return &Sync{ 29 | db: db, 30 | informer: informer, 31 | log: log, 32 | factory: factory, 33 | } 34 | } 35 | 36 | func (s *Sync) Run(ctx context.Context, features ...Feature) error { 37 | controller := NewController(s.informer, s.log.WithName("controller")) 38 | 39 | with := NewFeatures(features...) 40 | 41 | if !with.NoWarmup() { 42 | if err := s.warmup(ctx, controller); err != nil { 43 | return err 44 | } 45 | } 46 | 47 | return s.sync(ctx, controller, features...) 48 | } 49 | 50 | func (s *Sync) warmup(ctx context.Context, c *Controller) error { 51 | g, ctx := errgroup.WithContext(ctx) 52 | 53 | meta := &schemav1.Meta{ClusterUuid: cluster.ClusterUuidFromContext(ctx)} 54 | query := s.db.BuildSelectStmt(s.factory(), meta) + ` WHERE cluster_uuid=:cluster_uuid` 55 | 56 | entities, errs := s.db.YieldAll(ctx, func() (interface{}, error) { 57 | return s.factory(), nil 58 | }, query, meta) 59 | 60 | // Let errors from YieldAll() cancel the group. 61 | com.ErrgroupReceive(g, errs) 62 | 63 | g.Go(func() error { 64 | defer runtime.HandleCrash() 65 | 66 | for { 67 | select { 68 | case e, more := <-entities: 69 | if !more { 70 | return nil 71 | } 72 | 73 | if err := c.Announce(e); err != nil { 74 | return err 75 | } 76 | case <-ctx.Done(): 77 | return ctx.Err() 78 | } 79 | } 80 | }) 81 | 82 | return g.Wait() 83 | } 84 | 85 | func (s *Sync) sync(ctx context.Context, c *Controller, features ...Feature) error { 86 | sink := NewSink(func(i *Item) interface{} { 87 | entity := s.factory() 88 | entity.Obtain(*i.Item, cluster.ClusterUuidFromContext(ctx)) 89 | 90 | return entity 91 | }, func(k interface{}) interface{} { 92 | return k 93 | }) 94 | 95 | with := NewFeatures(features...) 96 | 97 | g, ctx := errgroup.WithContext(ctx) 98 | g.Go(func() error { 99 | defer runtime.HandleCrash() 100 | 101 | return c.Stream(ctx, sink) 102 | }) 103 | g.Go(func() error { 104 | defer runtime.HandleCrash() 105 | 106 | return s.db.UpsertStreamed( 107 | ctx, sink.UpsertCh(), 108 | database.WithCascading(), database.WithOnSuccess(with.OnUpsert())) 109 | }) 110 | g.Go(func() error { 111 | defer runtime.HandleCrash() 112 | 113 | if with.NoDelete() { 114 | for { 115 | select { 116 | case _, more := <-sink.DeleteCh(): 117 | if !more { 118 | return nil 119 | } 120 | case <-ctx.Done(): 121 | return ctx.Err() 122 | } 123 | 124 | } 125 | } else { 126 | return s.db.DeleteStreamed( 127 | ctx, s.factory(), sink.DeleteCh(), 128 | database.WithBlocking(), database.WithCascading(), database.WithOnSuccess(with.OnDelete())) 129 | } 130 | }) 131 | g.Go(func() error { 132 | defer runtime.HandleCrash() 133 | 134 | for { 135 | select { 136 | case err, more := <-sink.ErrorCh(): 137 | if !more { 138 | return nil 139 | } 140 | 141 | s.log.Error(err, "sync error") 142 | case <-ctx.Done(): 143 | return ctx.Err() 144 | } 145 | 146 | } 147 | }) 148 | 149 | return g.Wait() 150 | } 151 | -------------------------------------------------------------------------------- /schema/mysql/embed.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import _ "embed" 4 | 5 | // Schema is a copy of schema.sql. It resides here 6 | // and not in ../../cmd/icinga-kubernetes/main.go due to go:embed restrictions. 7 | // 8 | //go:embed schema.sql 9 | var Schema string 10 | --------------------------------------------------------------------------------