├── .gitignore ├── LICENSE ├── README.md ├── docker-desktop-settings.png ├── gitops ├── .gitignore ├── README.md ├── flux-bootstrap-job.yaml ├── kind-multi-config-001.yaml ├── kind-multi-config-002.yaml ├── kind-simple-config.yaml ├── multicluster │ ├── clusters │ │ ├── cluster-001 │ │ │ ├── gotk-sync.yaml │ │ │ ├── kustomization.yaml │ │ │ └── podinfo │ │ │ │ ├── deployment.yaml │ │ │ │ ├── hpa.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── service.yaml │ │ └── cluster-002 │ │ │ ├── gotk-sync.yaml │ │ │ └── kustomization.yaml │ └── infrastructure │ │ ├── gotk-components.yaml │ │ ├── ingress-controller.yaml │ │ └── kustomization.yaml ├── resilience │ ├── clusters │ │ ├── cluster-001 │ │ │ ├── gotk-sync.yaml │ │ │ ├── infrastructure.yaml │ │ │ ├── kustomization.yaml │ │ │ └── podinfo │ │ │ │ ├── deployment.yaml │ │ │ │ ├── hpa.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── service.yaml │ │ └── cluster-002 │ │ │ ├── gotk-sync.yaml │ │ │ ├── infrastructure.yaml │ │ │ └── kustomization.yaml │ └── infrastructure │ │ ├── gotk-components.yaml │ │ ├── ingress-controller.yaml │ │ └── kustomization.yaml ├── setup-multi.sh ├── setup-resilience.sh ├── setup-simple.sh ├── simple-git-server │ ├── Dockerfile │ ├── README.md │ ├── git-shell-commands │ │ └── no-interactive-login │ ├── sshd_config │ └── start.sh ├── simple │ ├── README.md │ ├── applications │ │ ├── README.md │ │ ├── deployment.yaml │ │ ├── hpa.yaml │ │ ├── ingress.yaml │ │ ├── kustomization.yaml │ │ └── service.yaml │ └── infrastructure │ │ ├── README.md │ │ ├── ingress-controller.yaml │ │ └── kustomization.yaml ├── start-git.sh ├── stop-git.sh ├── teardown-multi.sh ├── teardown-resilience.sh └── teardown-simple.sh ├── jsonnet ├── .gitignore ├── README.md ├── grafonnet │ ├── .envrc │ ├── dashboard.jsonnet │ ├── g.libsonnet │ ├── jsonnetfile.json │ └── jsonnetfile.lock.json ├── kubernetes-mixin │ ├── .envrc │ ├── jsonnetfile.json │ ├── jsonnetfile.lock.json │ ├── kubernetes-mixin.jsonnet │ ├── render.sh │ └── setup.sh └── playground │ ├── index.html │ ├── js │ ├── CodeMirror │ │ ├── LICENSE │ │ ├── README.md │ │ ├── addon │ │ │ └── edit │ │ │ │ └── matchbrackets.js │ │ ├── lib │ │ │ ├── codemirror.css │ │ │ └── codemirror.js │ │ └── mode │ │ │ └── yaml │ │ │ └── yaml.js │ ├── codemirror-mode-jsonnet.js │ ├── js-yaml.min.js │ ├── libjsonnet.wasm │ └── wasm_exec.js │ └── run.sh ├── kubedoom ├── README.md ├── create_cluster.sh ├── delete_cluster.sh ├── demons.sh ├── demons │ ├── deployment.yaml │ ├── kustomization.yaml │ └── namespace.yaml ├── deploy │ ├── deployment.yaml │ ├── kustomization.yaml │ ├── namespace.yaml │ └── rbac.yaml ├── doom.png ├── kind-config.yaml └── kubedoom.sh ├── observability-helm ├── README.md ├── create_cluster.sh ├── delete_cluster.sh ├── install.sh └── kind-config.yaml ├── observability ├── README.md ├── alertmanager.png ├── config.yaml ├── create_cluster.sh ├── delete_cluster.sh ├── grafana.png ├── install │ ├── alertmanager-alertmanager.yaml │ ├── alertmanager-networkPolicy.yaml │ ├── alertmanager-podDisruptionBudget.yaml │ ├── alertmanager-prometheusRule.yaml │ ├── alertmanager-secret.yaml │ ├── alertmanager-service.yaml │ ├── alertmanager-serviceAccount.yaml │ ├── alertmanager-serviceMonitor.yaml │ ├── blackboxExporter-clusterRole.yaml │ ├── blackboxExporter-clusterRoleBinding.yaml │ ├── blackboxExporter-configuration.yaml │ ├── blackboxExporter-deployment.yaml │ ├── blackboxExporter-networkPolicy.yaml │ ├── blackboxExporter-service.yaml │ ├── blackboxExporter-serviceAccount.yaml │ ├── blackboxExporter-serviceMonitor.yaml │ ├── grafana-config.yaml │ ├── grafana-dashboardDatasources.yaml │ ├── grafana-dashboardDefinitions.yaml │ ├── grafana-dashboardSources.yaml │ ├── grafana-deployment.yaml │ ├── grafana-networkPolicy.yaml │ ├── grafana-prometheusRule.yaml │ ├── grafana-service.yaml │ ├── grafana-serviceAccount.yaml │ ├── grafana-serviceMonitor.yaml │ ├── kubePrometheus-prometheusRule.yaml │ ├── kubeStateMetrics-clusterRole.yaml │ ├── kubeStateMetrics-clusterRoleBinding.yaml │ ├── kubeStateMetrics-deployment.yaml │ ├── kubeStateMetrics-networkPolicy.yaml │ ├── kubeStateMetrics-prometheusRule.yaml │ ├── kubeStateMetrics-service.yaml │ ├── kubeStateMetrics-serviceAccount.yaml │ ├── kubeStateMetrics-serviceMonitor.yaml │ ├── kubernetesControlPlane-prometheusRule.yaml │ ├── kubernetesControlPlane-serviceMonitorApiserver.yaml │ ├── kubernetesControlPlane-serviceMonitorCoreDNS.yaml │ ├── kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml │ ├── kubernetesControlPlane-serviceMonitorKubeScheduler.yaml │ ├── kubernetesControlPlane-serviceMonitorKubelet.yaml │ ├── nodeExporter-clusterRole.yaml │ ├── nodeExporter-clusterRoleBinding.yaml │ ├── nodeExporter-daemonset.yaml │ ├── nodeExporter-networkPolicy.yaml │ ├── nodeExporter-prometheusRule.yaml │ ├── nodeExporter-service.yaml │ ├── nodeExporter-serviceAccount.yaml │ ├── nodeExporter-serviceMonitor.yaml │ ├── prometheus-clusterRole.yaml │ ├── prometheus-clusterRoleBinding.yaml │ ├── prometheus-networkPolicy.yaml │ ├── prometheus-podDisruptionBudget.yaml │ ├── prometheus-prometheus.yaml │ ├── prometheus-prometheusRule.yaml │ ├── prometheus-roleBindingConfig.yaml │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ ├── prometheus-roleConfig.yaml │ ├── prometheus-roleSpecificNamespaces.yaml │ ├── prometheus-service.yaml │ ├── prometheus-serviceAccount.yaml │ ├── prometheus-serviceMonitor.yaml │ ├── prometheusAdapter-apiService.yaml │ ├── prometheusAdapter-clusterRole.yaml │ ├── prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml │ ├── prometheusAdapter-clusterRoleBinding.yaml │ ├── prometheusAdapter-clusterRoleBindingDelegator.yaml │ ├── prometheusAdapter-clusterRoleServerResources.yaml │ ├── prometheusAdapter-configMap.yaml │ ├── prometheusAdapter-deployment.yaml │ ├── prometheusAdapter-networkPolicy.yaml │ ├── prometheusAdapter-podDisruptionBudget.yaml │ ├── prometheusAdapter-roleBindingAuthReader.yaml │ ├── prometheusAdapter-service.yaml │ ├── prometheusAdapter-serviceAccount.yaml │ ├── prometheusAdapter-serviceMonitor.yaml │ ├── prometheusOperator-clusterRole.yaml │ ├── prometheusOperator-clusterRoleBinding.yaml │ ├── prometheusOperator-deployment.yaml │ ├── prometheusOperator-networkPolicy.yaml │ ├── prometheusOperator-prometheusRule.yaml │ ├── prometheusOperator-service.yaml │ ├── prometheusOperator-serviceAccount.yaml │ └── prometheusOperator-serviceMonitor.yaml ├── namespace.yaml ├── prometheus.png └── setup │ ├── 0alertmanagerConfigCustomResourceDefinition.yaml │ ├── 0alertmanagerCustomResourceDefinition.yaml │ ├── 0podmonitorCustomResourceDefinition.yaml │ ├── 0probeCustomResourceDefinition.yaml │ ├── 0prometheusCustomResourceDefinition.yaml │ ├── 0prometheusruleCustomResourceDefinition.yaml │ ├── 0servicemonitorCustomResourceDefinition.yaml │ └── 0thanosrulerCustomResourceDefinition.yaml ├── renovate.json ├── simple-kubernetes-with-ingress ├── README.md ├── all.sh ├── config.yaml ├── create_cluster.sh ├── delete_cluster.sh ├── deployments.yaml ├── ingress-controller.yaml ├── ingress.yaml ├── multiple-domains │ └── ingress.yaml └── services.yaml ├── simple-kubernetes-with-priority ├── README.md ├── all.sh ├── config.yaml ├── create_cluster.sh ├── delete_cluster.sh ├── deployment-high-prio-nolimit.yaml ├── deployment-high-prio-pdb.yaml ├── deployment-high-prio.yaml ├── deployment-low-prio.yaml ├── deployment-medium-prio-pbd.yaml ├── deployment-medium-prio.yaml ├── deployment-no-prio.yaml ├── ingress-controller.yaml ├── ingress.yaml ├── multiple-domains │ └── ingress.yaml ├── poddisruptionbudgets.yaml ├── priority-classes.yaml └── services.yaml └── simple-kubernetes ├── README.md ├── create_cluster.sh ├── delete_cluster.sh ├── deployment.yaml ├── img ├── components-of-kubernetes.svg ├── nodes.svg └── pods.svg ├── multi-node-cluster ├── cluster-config.yaml ├── create_cluster.sh └── delete_cluster.sh ├── namespace.yaml ├── service.yaml └── test.log /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Netic A/S (netic.dk) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docker-desktop-settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/neticdk/k8s-workshop/00fe5c7bb1039398da16059e1f4cdf3611ad4e0a/docker-desktop-settings.png -------------------------------------------------------------------------------- /gitops/.gitignore: -------------------------------------------------------------------------------- 1 | hack -------------------------------------------------------------------------------- /gitops/README.md: -------------------------------------------------------------------------------- 1 | # Showcases on gitops 2 | 3 | This directory contains different example on how to utilize [gitops](https://www.weave.works/technologies/gitops/) 4 | to configure and deploy on Kubernetes. 5 | 6 | ## Prerequisite 7 | 8 | Since gitops is naturally dependent on git, a simple git server must be running. The directory includes a small 9 | git server container image in `simple-git-server`. This can be started by running the `start-git.sh` script. Once 10 | the git server is running the remaining gitops setups can be tested out. 11 | 12 | ## Simple gitops Setup 13 | 14 | The most simple gitops example is running a single cluster and a single repository. Running the script `setup-simple.sh` 15 | will create the following setup: 16 | 17 | - Create a git repository for cluster reconciliation in `hack/simple` seeded with files from `simple` - the git repo is set up with 18 | the git server as "remote" so it is possible to do git add, commit and push to update the cluster 19 | - Create a kind cluster based on the `kind-simple-config.yaml` configuration 20 | - Bootstrap the cluster with [flux](https://fluxcd.io/) 21 | - Setup reconciliation against the local git server 22 | 23 | The cluster can be torn down running `teardown-simple.sh`. 24 | 25 | ## Multiple Cluster Setup 26 | 27 | The next example shows how to run multiple clusters reconciling against the same git repository. Running the script 28 | `setup-multi.sh` will do the following: 29 | 30 | - Create a git repository for cluster reconciliation in `hack/multicluster` seeded with files from `multicluster` 31 | - Create two kind clusters based on `kind-multi-config-001.yaml` and `kind-multi-config-002.yaml` 32 | - Bootstrap both clusters with [flux](https://fluxcd.io/) 33 | - Setup reconciliation for both clusters but with different paths (`clusters/cluster-001` and `clusters/cluster-002`) 34 | 35 | The setup can be torn down running `teardown-multi.sh` 36 | 37 | ## Multiple Clusters and Multiple git Repositories 38 | 39 | The last example shows how to run multiple cluster reconciling against a common infrastructure git repository as well 40 | as a git repository containing cluster specific configurations. The repository for infrastructure contains multiple 41 | branches to enable control over the promotion path. To start up the example run the scripts `setup-resilience.sh` - this 42 | will setup the following. 43 | 44 | - Create two git repositories `hack/clusters` and `hack/infrastructure` as well as setting up branches to constitute the promotion 45 | path for the `hack/infrastructure` repository. 46 | - Create two kind clusters based on `kind-multi-config-001.yaml` and `kind-multi-config-002.yaml` 47 | - Bootstrap both clusters with [flux](https://fluxcd.io/) 48 | - Setup reconciliation for both clusters with a starting point in `clusters/cluster-001` and `clusters/cluster-002` 49 | -------------------------------------------------------------------------------- /gitops/flux-bootstrap-job.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: flux-bootstrap 6 | namespace: default 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRoleBinding 10 | metadata: 11 | name: flux-bootstrap 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: cluster-admin 16 | subjects: 17 | - kind: ServiceAccount 18 | name: flux-bootstrap 19 | namespace: default 20 | --- 21 | apiVersion: batch/v1 22 | kind: Job 23 | metadata: 24 | name: flux-bootstrap 25 | spec: 26 | template: 27 | spec: 28 | restartPolicy: Never 29 | containers: 30 | - name: bootstrap 31 | args: 32 | - bootstrap 33 | - git 34 | - --url=ssh://git@host.docker.internal:2222/git-server/repos/simple.git 35 | - --private-key-file=/git-server/keys/ssh-privatekey 36 | - --path=. 37 | - --silent 38 | image: ghcr.io/fluxcd/flux-cli:v2.0.0-rc.2 39 | imagePullPolicy: IfNotPresent 40 | volumeMounts: 41 | - name: sync-key-secret 42 | readOnly: true 43 | mountPath: "/git-server/keys" 44 | serviceAccountName: flux-bootstrap 45 | volumes: 46 | - name: sync-key-secret 47 | secret: 48 | secretName: sync-key-secret 49 | -------------------------------------------------------------------------------- /gitops/kind-multi-config-001.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | name: gitops-multi-001 4 | nodes: 5 | - role: control-plane 6 | kubeadmConfigPatches: 7 | - | 8 | kind: InitConfiguration 9 | nodeRegistration: 10 | kubeletExtraArgs: 11 | node-labels: "ingress-ready=true" 12 | extraPortMappings: 13 | - containerPort: 80 14 | hostPort: 8081 15 | protocol: TCP 16 | - role: worker 17 | -------------------------------------------------------------------------------- /gitops/kind-multi-config-002.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | name: gitops-multi-002 4 | nodes: 5 | - role: control-plane 6 | kubeadmConfigPatches: 7 | - | 8 | kind: InitConfiguration 9 | nodeRegistration: 10 | kubeletExtraArgs: 11 | node-labels: "ingress-ready=true" 12 | extraPortMappings: 13 | - containerPort: 80 14 | hostPort: 8082 15 | protocol: TCP 16 | - role: worker 17 | -------------------------------------------------------------------------------- /gitops/kind-simple-config.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | name: gitops-simple 4 | nodes: 5 | - role: control-plane 6 | kubeadmConfigPatches: 7 | - | 8 | kind: InitConfiguration 9 | nodeRegistration: 10 | kubeletExtraArgs: 11 | node-labels: "ingress-ready=true" 12 | extraPortMappings: 13 | - containerPort: 80 14 | hostPort: 80 15 | protocol: TCP 16 | - containerPort: 443 17 | hostPort: 443 18 | protocol: TCP 19 | - role: worker 20 | - role: worker -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-001/gotk-sync.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: GitRepository 5 | metadata: 6 | name: flux-system 7 | namespace: flux-system 8 | spec: 9 | interval: 30s 10 | ref: 11 | branch: main 12 | secretRef: 13 | name: flux-system 14 | url: ssh://git@host.docker.internal:2222/git-server/repos/multicluster.git 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1 17 | kind: Kustomization 18 | metadata: 19 | name: flux-system 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./clusters/cluster-001 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: flux-system 28 | -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-001/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - gotk-sync.yaml 5 | - podinfo 6 | - ../../infrastructure 7 | -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-001/podinfo/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: podinfo 5 | spec: 6 | minReadySeconds: 3 7 | revisionHistoryLimit: 5 8 | progressDeadlineSeconds: 60 9 | strategy: 10 | rollingUpdate: 11 | maxUnavailable: 0 12 | type: RollingUpdate 13 | selector: 14 | matchLabels: 15 | app: podinfo 16 | template: 17 | metadata: 18 | annotations: 19 | prometheus.io/scrape: "true" 20 | prometheus.io/port: "9797" 21 | labels: 22 | app: podinfo 23 | spec: 24 | containers: 25 | - name: podinfod 26 | image: ghcr.io/stefanprodan/podinfo:6.3.6 27 | imagePullPolicy: IfNotPresent 28 | ports: 29 | - name: http 30 | containerPort: 9898 31 | protocol: TCP 32 | - name: http-metrics 33 | containerPort: 9797 34 | protocol: TCP 35 | - name: grpc 36 | containerPort: 9999 37 | protocol: TCP 38 | command: 39 | - ./podinfo 40 | - --port=9898 41 | - --port-metrics=9797 42 | - --grpc-port=9999 43 | - --grpc-service-name=podinfo 44 | - --level=info 45 | - --random-delay=false 46 | - --random-error=false 47 | env: 48 | - name: PODINFO_UI_COLOR 49 | value: "#34577c" 50 | livenessProbe: 51 | exec: 52 | command: 53 | - podcli 54 | - check 55 | - http 56 | - localhost:9898/healthz 57 | initialDelaySeconds: 5 58 | timeoutSeconds: 5 59 | readinessProbe: 60 | exec: 61 | command: 62 | - podcli 63 | - check 64 | - http 65 | - localhost:9898/readyz 66 | initialDelaySeconds: 5 67 | timeoutSeconds: 5 68 | resources: 69 | limits: 70 | cpu: 2000m 71 | memory: 512Mi 72 | requests: 73 | cpu: 100m 74 | memory: 64Mi 75 | -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-001/podinfo/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: podinfo 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: podinfo 10 | minReplicas: 2 11 | maxReplicas: 4 12 | metrics: 13 | - type: Resource 14 | resource: 15 | name: cpu 16 | target: 17 | type: Utilization 18 | # scale up if usage is above 19 | # 99% of the requested CPU (100m) 20 | averageUtilization: 99 21 | -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-001/podinfo/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: podinfo 5 | spec: 6 | rules: 7 | - host: podinfo-127-0-0-1.nip.io 8 | http: 9 | paths: 10 | - pathType: Prefix 11 | path: / 12 | backend: 13 | service: 14 | name: podinfo 15 | port: 16 | name: http 17 | -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-001/podinfo/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: default 4 | resources: 5 | - deployment.yaml 6 | - hpa.yaml 7 | - ingress.yaml 8 | - service.yaml 9 | -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-001/podinfo/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: podinfo 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | app: podinfo 9 | ports: 10 | - name: http 11 | port: 9898 12 | protocol: TCP 13 | targetPort: http 14 | - port: 9999 15 | targetPort: grpc 16 | protocol: TCP 17 | name: grpc 18 | -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-002/gotk-sync.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: GitRepository 5 | metadata: 6 | name: flux-system 7 | namespace: flux-system 8 | spec: 9 | interval: 1m0s 10 | ref: 11 | branch: main 12 | secretRef: 13 | name: flux-system 14 | url: ssh://git@host.docker.internal:2222/git-server/repos/multicluster.git 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1 17 | kind: Kustomization 18 | metadata: 19 | name: flux-system 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./clusters/cluster-002 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: flux-system 28 | -------------------------------------------------------------------------------- /gitops/multicluster/clusters/cluster-002/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - gotk-sync.yaml 5 | - ../../infrastructure 6 | -------------------------------------------------------------------------------- /gitops/multicluster/infrastructure/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - gotk-components.yaml 5 | - ingress-controller.yaml 6 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-001/gotk-sync.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: GitRepository 5 | metadata: 6 | name: flux-system 7 | namespace: flux-system 8 | spec: 9 | interval: 1m0s 10 | ref: 11 | branch: main 12 | secretRef: 13 | name: flux-system 14 | url: ssh://git@host.docker.internal:2222/git-server/repos/clusters.git 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1 17 | kind: Kustomization 18 | metadata: 19 | name: flux-system 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./cluster-001 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: flux-system 28 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-001/infrastructure.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: GitRepository 5 | metadata: 6 | name: infrastructure 7 | namespace: flux-system 8 | spec: 9 | interval: 30s 10 | ref: 11 | branch: innovators 12 | secretRef: 13 | name: flux-system 14 | url: ssh://git@host.docker.internal:2222/git-server/repos/infrastructure.git 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1 17 | kind: Kustomization 18 | metadata: 19 | name: infrastructure 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./ 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: infrastructure 28 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-001/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - gotk-sync.yaml 5 | - infrastructure.yaml 6 | - podinfo 7 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-001/podinfo/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: podinfo 5 | spec: 6 | minReadySeconds: 3 7 | revisionHistoryLimit: 5 8 | progressDeadlineSeconds: 60 9 | strategy: 10 | rollingUpdate: 11 | maxUnavailable: 0 12 | type: RollingUpdate 13 | selector: 14 | matchLabels: 15 | app: podinfo 16 | template: 17 | metadata: 18 | annotations: 19 | prometheus.io/scrape: "true" 20 | prometheus.io/port: "9797" 21 | labels: 22 | app: podinfo 23 | spec: 24 | containers: 25 | - name: podinfod 26 | image: ghcr.io/stefanprodan/podinfo:6.3.6 27 | imagePullPolicy: IfNotPresent 28 | ports: 29 | - name: http 30 | containerPort: 9898 31 | protocol: TCP 32 | - name: http-metrics 33 | containerPort: 9797 34 | protocol: TCP 35 | - name: grpc 36 | containerPort: 9999 37 | protocol: TCP 38 | command: 39 | - ./podinfo 40 | - --port=9898 41 | - --port-metrics=9797 42 | - --grpc-port=9999 43 | - --grpc-service-name=podinfo 44 | - --level=info 45 | - --random-delay=false 46 | - --random-error=false 47 | env: 48 | - name: PODINFO_UI_COLOR 49 | value: "#34577c" 50 | livenessProbe: 51 | exec: 52 | command: 53 | - podcli 54 | - check 55 | - http 56 | - localhost:9898/healthz 57 | initialDelaySeconds: 5 58 | timeoutSeconds: 5 59 | readinessProbe: 60 | exec: 61 | command: 62 | - podcli 63 | - check 64 | - http 65 | - localhost:9898/readyz 66 | initialDelaySeconds: 5 67 | timeoutSeconds: 5 68 | resources: 69 | limits: 70 | cpu: 2000m 71 | memory: 512Mi 72 | requests: 73 | cpu: 100m 74 | memory: 64Mi 75 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-001/podinfo/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: podinfo 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: podinfo 10 | minReplicas: 2 11 | maxReplicas: 4 12 | metrics: 13 | - type: Resource 14 | resource: 15 | name: cpu 16 | target: 17 | type: Utilization 18 | # scale up if usage is above 19 | # 99% of the requested CPU (100m) 20 | averageUtilization: 99 21 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-001/podinfo/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: podinfo 5 | spec: 6 | rules: 7 | - host: podinfo-127-0-0-1.nip.io 8 | http: 9 | paths: 10 | - pathType: Prefix 11 | path: / 12 | backend: 13 | service: 14 | name: podinfo 15 | port: 16 | name: http 17 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-001/podinfo/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: default 4 | resources: 5 | - deployment.yaml 6 | - hpa.yaml 7 | - ingress.yaml 8 | - service.yaml 9 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-001/podinfo/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: podinfo 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | app: podinfo 9 | ports: 10 | - name: http 11 | port: 9898 12 | protocol: TCP 13 | targetPort: http 14 | - port: 9999 15 | targetPort: grpc 16 | protocol: TCP 17 | name: grpc 18 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-002/gotk-sync.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: GitRepository 5 | metadata: 6 | name: flux-system 7 | namespace: flux-system 8 | spec: 9 | interval: 1m0s 10 | ref: 11 | branch: main 12 | secretRef: 13 | name: flux-system 14 | url: ssh://git@host.docker.internal:2222/git-server/repos/clusters.git 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1 17 | kind: Kustomization 18 | metadata: 19 | name: flux-system 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./cluster-002 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: flux-system 28 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-002/infrastructure.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: GitRepository 5 | metadata: 6 | name: infrastructure 7 | namespace: flux-system 8 | spec: 9 | interval: 1m0s 10 | ref: 11 | branch: early-adopters 12 | secretRef: 13 | name: flux-system 14 | url: ssh://git@host.docker.internal:2222/git-server/repos/infrastructure.git 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1 17 | kind: Kustomization 18 | metadata: 19 | name: infrastructure 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./ 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: infrastructure 28 | -------------------------------------------------------------------------------- /gitops/resilience/clusters/cluster-002/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - gotk-sync.yaml 5 | - infrastructure.yaml 6 | -------------------------------------------------------------------------------- /gitops/resilience/infrastructure/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - gotk-components.yaml 5 | - ingress-controller.yaml 6 | -------------------------------------------------------------------------------- /gitops/setup-multi.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | path=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) 4 | 5 | echo -n "Kind cluster 001: " 6 | kind get clusters | grep gitops-multi-001 || kind create cluster --name gitops-multi-001 --config=kind-multi-config-001.yaml 7 | 8 | echo -n "Kind cluster 002: " 9 | kind get clusters | grep gitops-multi-002 || kind create cluster --name gitops-multi-002 --config=kind-multi-config-002.yaml 10 | 11 | # Create gitops repo 12 | cp -r $path/multicluster $path/hack/ 13 | pushd $path/hack/multicluster 14 | 15 | if [ ! -d .git ]; then 16 | git init 17 | fi 18 | 19 | git add . 20 | git commit -m "feat: Initial commit" 21 | 22 | # Create reposiotory in server - if necessary 23 | if [ ! -d $path/hack/git-server/repos/multicluster.git ]; then 24 | git clone --bare $path/hack/multicluster $path/hack/git-server/repos/multicluster.git 25 | git remote add origin $path/hack/git-server/repos/multicluster.git 26 | git pull --set-upstream origin main 27 | else 28 | git push 29 | fi 30 | 31 | popd 32 | 33 | known_hosts="$(ssh-keyscan -p 2222 localhost 2>/dev/null | sed "s/localhost/host.docker.internal/g")" 34 | 35 | # Bootstrap flux on cluster 001 36 | if kubectl --context kind-gitops-multi-001 get -n flux-system deployments 2>&1 | grep -q "No resources found"; then 37 | flux install --context kind-gitops-multi-001 --namespace=flux-system 38 | kubectl --context kind-gitops-multi-001 create secret generic -n flux-system flux-system --from-file=identity=$path/hack/git-server/keys/sync-key --from-file=identity.pub=$path/hack/git-server/keys/sync-key.pub --from-literal=known_hosts="$known_hosts" 39 | kubectl --context kind-gitops-multi-001 apply -k $path/hack/multicluster/clusters/cluster-001 40 | fi 41 | 42 | # Bootstrap flux on cluster 002 43 | if kubectl --context kind-gitops-multi-002 get -n flux-system deployments 2>&1 | grep -q "No resources found"; then 44 | flux install --context kind-gitops-multi-002 --namespace=flux-system 45 | kubectl --context kind-gitops-multi-002 create secret generic -n flux-system flux-system --from-file=identity=$path/hack/git-server/keys/sync-key --from-file=identity.pub=$path/hack/git-server/keys/sync-key.pub --from-literal=known_hosts="$known_hosts" 46 | kubectl --context kind-gitops-multi-002 apply -k $path/hack/multicluster/clusters/cluster-002 47 | fi 48 | -------------------------------------------------------------------------------- /gitops/setup-resilience.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | path=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) 6 | 7 | # 8 | # Setup clusters 9 | # 10 | echo -n "Kind cluster 001: " 11 | kind get clusters | grep gitops-multi-001 || kind create cluster --name gitops-multi-001 --config=kind-multi-config-001.yaml 12 | 13 | echo -n "Kind cluster 002: " 14 | kind get clusters | grep gitops-multi-002 || kind create cluster --name gitops-multi-002 --config=kind-multi-config-002.yaml 15 | 16 | # 17 | # Setup gitops repositories 18 | # 19 | cp -r $path/resilience/clusters $path/hack 20 | pushd $path/hack/clusters 21 | 22 | if [ ! -d .git ]; then 23 | git init 24 | fi 25 | 26 | git add . 27 | git commit -m "feat: Initial commit" 28 | 29 | if [ ! -d $path/hack/git-server/repos/clusters.git ]; then 30 | git clone --bare $path/hack/clusters $path/hack/git-server/repos/clusters.git 31 | git remote add origin $path/hack/git-server/repos/clusters.git 32 | git pull --set-upstream origin main 33 | else 34 | git push 35 | fi 36 | popd 37 | 38 | cp -r $path/resilience/infrastructure $path/hack 39 | pushd $path/hack/infrastructure 40 | 41 | if [ ! -d .git ]; then 42 | git init -b innovators 43 | fi 44 | 45 | git add . 46 | git commit -m "feat: Initial commit" 47 | 48 | if [ ! -d $path/hack/git-server/repos/infrastructure.git ]; then 49 | git clone --bare $path/hack/infrastructure $path/hack/git-server/repos/infrastructure.git 50 | git remote add origin $path/hack/git-server/repos/infrastructure.git 51 | git pull --set-upstream origin innovators 52 | else 53 | git push 54 | fi 55 | 56 | for zone in early-adopters early-majority late-majority laggards; do 57 | git branch $zone 58 | git push -u origin $zone:$zone 59 | done 60 | 61 | popd 62 | 63 | # 64 | # Bootstrap clusters 65 | # 66 | known_hosts="$(ssh-keyscan -p 2222 localhost 2>/dev/null | sed "s/localhost/host.docker.internal/g")" 67 | 68 | # Bootstrap flux on cluster 001 69 | if kubectl --context kind-gitops-multi-001 get -n flux-system deployments 2>&1 | grep -q "No resources found"; then 70 | flux install --context kind-gitops-multi-001 --namespace=flux-system 71 | kubectl --context kind-gitops-multi-001 create secret generic -n flux-system flux-system --from-file=identity=$path/hack/git-server/keys/sync-key --from-file=identity.pub=$path/hack/git-server/keys/sync-key.pub --from-literal=known_hosts="$known_hosts" 72 | kubectl --context kind-gitops-multi-001 apply -k $path/hack/clusters/cluster-001 73 | fi 74 | 75 | # Bootstrap flux on cluster 002 76 | if kubectl --context kind-gitops-multi-002 get -n flux-system deployments 2>&1 | grep -q "No resources found"; then 77 | flux install --context kind-gitops-multi-002 --namespace=flux-system 78 | kubectl --context kind-gitops-multi-002 create secret generic -n flux-system flux-system --from-file=identity=$path/hack/git-server/keys/sync-key --from-file=identity.pub=$path/hack/git-server/keys/sync-key.pub --from-literal=known_hosts="$known_hosts" 79 | kubectl --context kind-gitops-multi-002 apply -k $path/hack/clusters/cluster-002 80 | fi 81 | -------------------------------------------------------------------------------- /gitops/setup-simple.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | path=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) 4 | 5 | # Create kind cluster - if not exists 6 | echo -n "Kind cluster: " 7 | kind get clusters | grep simple-gitops || kind create cluster --name simple-gitops --config=kind-simple-config.yaml 8 | 9 | # Create gitops repo 10 | cp -r $path/simple $path/hack/ 11 | 12 | pushd $path/hack/simple 13 | 14 | if [ ! -d .git ]; then 15 | git init 16 | fi 17 | 18 | git add . 19 | git commit -m "feat: Initial commit" 20 | 21 | # Create reposiotory in server - if necessary 22 | if [ ! -d $path/hack/git-server/repos/simple.git ]; then 23 | git clone --bare $path/hack/simple $path/hack/git-server/repos/simple.git 24 | git remote add origin $path/hack/git-server/repos/simple.git 25 | else 26 | git push 27 | fi 28 | 29 | popd 30 | 31 | # Bootstrap flux 32 | if kubectl get -n flux-system deployments 2>&1 | grep -q "No resources found"; then 33 | kubectl create secret generic sync-key-secret --from-file=ssh-privatekey=$path/hack/git-server/keys/sync-key --from-file=ssh-publickey=$path/hack/git-server/keys/sync-key.pub 34 | kubectl apply -f $path/flux-bootstrap-job.yaml 35 | kubectl wait --for=condition=complete job/flux-bootstrap --timeout=120s 36 | kubectl delete -f $path/flux-bootstrap-job.yaml 37 | kubectl delete secret sync-key-secret 38 | fi 39 | 40 | # Sync changes from Flux back to git "workspace" 41 | pushd $path/hack/simple 42 | git pull --set-upstream origin main 43 | popd 44 | -------------------------------------------------------------------------------- /gitops/simple-git-server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3@sha256:8a1f59ffb675680d47db6337b49d22281a139e9d709335b492be023728e11715 2 | 3 | RUN apk add --no-cache \ 4 | openssh \ 5 | git 6 | 7 | RUN ssh-keygen -A 8 | 9 | WORKDIR /git-server/ 10 | 11 | RUN mkdir /git-server/keys \ 12 | && adduser -D -s /usr/bin/git-shell git \ 13 | && echo git:12345 | chpasswd \ 14 | && mkdir /home/git/.ssh 15 | 16 | # This is a login shell for SSH accounts to provide restricted Git access. 17 | # It permits execution only of server-side Git commands implementing the 18 | # pull/push functionality, plus custom commands present in a subdirectory 19 | # named git-shell-commands in the user’s home directory. 20 | # More info: https://git-scm.com/docs/git-shell 21 | COPY git-shell-commands /home/git/git-shell-commands 22 | 23 | # sshd_config file is edited for enable access key and disable access password 24 | COPY sshd_config /etc/ssh/sshd_config 25 | COPY start.sh /start.sh 26 | 27 | EXPOSE 22 28 | 29 | CMD ["sh", "/start.sh"] 30 | -------------------------------------------------------------------------------- /gitops/simple-git-server/README.md: -------------------------------------------------------------------------------- 1 | # Simple Git Server 2 | 3 | Building container imager to run a very simple ssh based git server for demo purposes. 4 | -------------------------------------------------------------------------------- /gitops/simple-git-server/git-shell-commands/no-interactive-login: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | printf '%s\n' "Welcome to git-server-docker!" 3 | printf '%s\n' "You've successfully authenticated, but I do not" 4 | printf '%s\n' "provide interactive shell access." 5 | exit 128 6 | -------------------------------------------------------------------------------- /gitops/simple-git-server/sshd_config: -------------------------------------------------------------------------------- 1 | # $OpenBSD: sshd_config,v 1.104 2021/07/02 05:11:21 dtucker Exp $ 2 | 3 | # This is the sshd server system-wide configuration file. See 4 | # sshd_config(5) for more information. 5 | 6 | # This sshd was compiled with PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 7 | 8 | # The strategy used for options in the default sshd_config shipped with 9 | # OpenSSH is to specify options with their default value where 10 | # possible, but leave them commented. Uncommented options override the 11 | # default value. 12 | 13 | #Port 22 14 | #AddressFamily any 15 | #ListenAddress 0.0.0.0 16 | #ListenAddress :: 17 | 18 | #HostKey /etc/ssh/ssh_host_rsa_key 19 | #HostKey /etc/ssh/ssh_host_ecdsa_key 20 | #HostKey /etc/ssh/ssh_host_ed25519_key 21 | 22 | # Ciphers and keying 23 | #RekeyLimit default none 24 | 25 | # Logging 26 | #SyslogFacility AUTH 27 | #LogLevel INFO 28 | 29 | # Authentication: 30 | 31 | #LoginGraceTime 2m 32 | #PermitRootLogin prohibit-password 33 | #StrictModes yes 34 | #MaxAuthTries 6 35 | #MaxSessions 10 36 | 37 | PubkeyAuthentication yes 38 | 39 | # The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2 40 | # but this is overridden so installations will only check .ssh/authorized_keys 41 | AuthorizedKeysFile .ssh/authorized_keys 42 | 43 | #AuthorizedPrincipalsFile none 44 | 45 | #AuthorizedKeysCommand none 46 | #AuthorizedKeysCommandUser nobody 47 | 48 | # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts 49 | #HostbasedAuthentication no 50 | # Change to yes if you don't trust ~/.ssh/known_hosts for 51 | # HostbasedAuthentication 52 | #IgnoreUserKnownHosts no 53 | # Don't read the user's ~/.rhosts and ~/.shosts files 54 | #IgnoreRhosts yes 55 | 56 | # To disable tunneled clear text passwords, change to no here! 57 | PasswordAuthentication no 58 | #PermitEmptyPasswords no 59 | 60 | # Change to no to disable s/key passwords 61 | #KbdInteractiveAuthentication yes 62 | 63 | # Kerberos options 64 | #KerberosAuthentication no 65 | #KerberosOrLocalPasswd yes 66 | #KerberosTicketCleanup yes 67 | #KerberosGetAFSToken no 68 | 69 | # GSSAPI options 70 | #GSSAPIAuthentication no 71 | #GSSAPICleanupCredentials yes 72 | 73 | # Set this to 'yes' to enable PAM authentication, account processing, 74 | # and session processing. If this is enabled, PAM authentication will 75 | # be allowed through the KbdInteractiveAuthentication and 76 | # PasswordAuthentication. Depending on your PAM configuration, 77 | # PAM authentication via KbdInteractiveAuthentication may bypass 78 | # the setting of "PermitRootLogin prohibit-password". 79 | # If you just want the PAM account and session checks to run without 80 | # PAM authentication, then enable this but set PasswordAuthentication 81 | # and KbdInteractiveAuthentication to 'no'. 82 | #UsePAM no 83 | 84 | #AllowAgentForwarding yes 85 | # Feel free to re-enable these if your use case requires them. 86 | AllowTcpForwarding no 87 | GatewayPorts no 88 | X11Forwarding no 89 | #X11DisplayOffset 10 90 | #X11UseLocalhost yes 91 | #PermitTTY yes 92 | #PrintMotd yes 93 | #PrintLastLog yes 94 | #TCPKeepAlive yes 95 | #PermitUserEnvironment no 96 | #Compression delayed 97 | #ClientAliveInterval 0 98 | #ClientAliveCountMax 3 99 | #UseDNS no 100 | #PidFile /run/sshd.pid 101 | #MaxStartups 10:30:100 102 | #PermitTunnel no 103 | #ChrootDirectory none 104 | #VersionAddendum none 105 | 106 | # no default banner path 107 | #Banner none 108 | 109 | # override default of no subsystems 110 | Subsystem sftp internal-sftp 111 | 112 | # Example of overriding settings on a per-user basis 113 | #Match User anoncvs 114 | # X11Forwarding no 115 | # AllowTcpForwarding no 116 | # PermitTTY no 117 | # ForceCommand cvs server 118 | -------------------------------------------------------------------------------- /gitops/simple-git-server/start.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | # If there is some public key in keys folder 4 | # then it copies its contain in authorized_keys file 5 | if [ "$(ls -A /git-server/keys/)" ]; then 6 | cd /home/git 7 | cat /git-server/keys/*.pub > .ssh/authorized_keys 8 | chown -R git:git .ssh 9 | chmod 700 .ssh 10 | chmod -R 600 .ssh/* 11 | fi 12 | 13 | # Checking permissions and fixing SGID bit in repos folder 14 | if [ "$(ls -A /git-server/repos/)" ]; then 15 | cd /git-server/repos 16 | chown -R git:git . 17 | chmod -R ug+rwX . 18 | find . -type d -exec chmod g+s '{}' + 19 | fi 20 | 21 | /usr/sbin/sshd -D 22 | -------------------------------------------------------------------------------- /gitops/simple/README.md: -------------------------------------------------------------------------------- 1 | # Simple GitOps Layout 2 | 3 | Simple approach towards a gitops dierctory structure. One repository for one cluster. 4 | 5 | ```sh 6 | flux bootstrap git --url=https://example.com/repository.git --password= --path=. 7 | ``` 8 | -------------------------------------------------------------------------------- /gitops/simple/applications/README.md: -------------------------------------------------------------------------------- 1 | # Application Deployments 2 | 3 | Set up to deploy [podinfo](https://github.com/stefanprodan/podinfo) appliction for demonstation. 4 | -------------------------------------------------------------------------------- /gitops/simple/applications/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: podinfo 5 | spec: 6 | minReadySeconds: 3 7 | revisionHistoryLimit: 5 8 | progressDeadlineSeconds: 60 9 | strategy: 10 | rollingUpdate: 11 | maxUnavailable: 0 12 | type: RollingUpdate 13 | selector: 14 | matchLabels: 15 | app: podinfo 16 | template: 17 | metadata: 18 | annotations: 19 | prometheus.io/scrape: "true" 20 | prometheus.io/port: "9797" 21 | labels: 22 | app: podinfo 23 | spec: 24 | containers: 25 | - name: podinfod 26 | image: ghcr.io/stefanprodan/podinfo:6.3.6 27 | imagePullPolicy: IfNotPresent 28 | ports: 29 | - name: http 30 | containerPort: 9898 31 | protocol: TCP 32 | - name: http-metrics 33 | containerPort: 9797 34 | protocol: TCP 35 | - name: grpc 36 | containerPort: 9999 37 | protocol: TCP 38 | command: 39 | - ./podinfo 40 | - --port=9898 41 | - --port-metrics=9797 42 | - --grpc-port=9999 43 | - --grpc-service-name=podinfo 44 | - --level=info 45 | - --random-delay=false 46 | - --random-error=false 47 | env: 48 | - name: PODINFO_UI_COLOR 49 | value: "#34577c" 50 | livenessProbe: 51 | exec: 52 | command: 53 | - podcli 54 | - check 55 | - http 56 | - localhost:9898/healthz 57 | initialDelaySeconds: 5 58 | timeoutSeconds: 5 59 | readinessProbe: 60 | exec: 61 | command: 62 | - podcli 63 | - check 64 | - http 65 | - localhost:9898/readyz 66 | initialDelaySeconds: 5 67 | timeoutSeconds: 5 68 | resources: 69 | limits: 70 | cpu: 2000m 71 | memory: 512Mi 72 | requests: 73 | cpu: 100m 74 | memory: 64Mi 75 | -------------------------------------------------------------------------------- /gitops/simple/applications/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: podinfo 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: podinfo 10 | minReplicas: 2 11 | maxReplicas: 4 12 | metrics: 13 | - type: Resource 14 | resource: 15 | name: cpu 16 | target: 17 | type: Utilization 18 | # scale up if usage is above 19 | # 99% of the requested CPU (100m) 20 | averageUtilization: 99 21 | -------------------------------------------------------------------------------- /gitops/simple/applications/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: podinfo 5 | spec: 6 | rules: 7 | - host: podinfo-127-0-0-1.nip.io 8 | http: 9 | paths: 10 | - pathType: Prefix 11 | path: / 12 | backend: 13 | service: 14 | name: podinfo 15 | port: 16 | name: http 17 | -------------------------------------------------------------------------------- /gitops/simple/applications/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: default 4 | resources: 5 | - deployment.yaml 6 | - hpa.yaml 7 | - ingress.yaml 8 | - service.yaml 9 | -------------------------------------------------------------------------------- /gitops/simple/applications/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: podinfo 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | app: podinfo 9 | ports: 10 | - name: http 11 | port: 9898 12 | protocol: TCP 13 | targetPort: http 14 | - port: 9999 15 | targetPort: grpc 16 | protocol: TCP 17 | name: grpc 18 | -------------------------------------------------------------------------------- /gitops/simple/infrastructure/README.md: -------------------------------------------------------------------------------- 1 | # Infrastructure Components 2 | 3 | Demonstration the deployment of infrastructural components. 4 | -------------------------------------------------------------------------------- /gitops/simple/infrastructure/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ingress-controller.yaml 5 | -------------------------------------------------------------------------------- /gitops/start-git.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euxo pipefail 4 | 5 | path=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) 6 | 7 | pushd $path/simple-git-server 8 | docker build -t simple-git-server . 9 | popd 10 | 11 | mkdir -p $path/hack/git-server/keys 12 | if [ ! -f "$path/hack/git-server/keys/sync-key" ]; then 13 | ssh-keygen -t ed25519 -C "support@netic.dk" -f $path/hack/git-server/keys/sync-key -N "" 14 | fi 15 | 16 | mkdir -p $path/hack/git-server/repos 17 | 18 | docker run --name=simple-git-server --rm -d -p 2222:22 -v $path/hack/git-server:/git-server simple-git-server 19 | -------------------------------------------------------------------------------- /gitops/stop-git.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker stop simple-git-server 4 | -------------------------------------------------------------------------------- /gitops/teardown-multi.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | path=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) 4 | 5 | kind delete cluster --name gitops-multi-001 6 | kind delete cluster --name gitops-multi-002 7 | 8 | rm -rf $path/hack/multicluster 9 | rm -rf $path/hack/git-server/repos/multicluster.git 10 | -------------------------------------------------------------------------------- /gitops/teardown-resilience.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | path=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) 4 | 5 | kind delete cluster --name gitops-multi-001 6 | kind delete cluster --name gitops-multi-002 7 | 8 | rm -rf $path/hack/clusters 9 | rm -rf $path/hack/infrastructure 10 | rm -rf $path/hack/git-server/repos/clusters.git 11 | rm -rf $path/hack/git-server/repos/infrastructure.git 12 | -------------------------------------------------------------------------------- /gitops/teardown-simple.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | path=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) 4 | 5 | kind delete cluster --name simple-gitops 6 | rm -rf $path/hack/simple 7 | rm -rf $path/hack/git-server/repos/simple.git 8 | -------------------------------------------------------------------------------- /jsonnet/.gitignore: -------------------------------------------------------------------------------- 1 | vendor/ 2 | -------------------------------------------------------------------------------- /jsonnet/README.md: -------------------------------------------------------------------------------- 1 | # Jsonnet Examples 2 | 3 | [Jsonnet](https://jsonnet.org/) is a configuration language originating from Google. Jsonnet can be used to generate a number of different configuration formats but are specifically well tailored for [JSON](https://www.json.org/). 4 | 5 | People from Grafana has defined the notion of a [Monitoring Mixin](https://github.com/monitoring-mixins/docs) which is a reusable component with Grafana dashboards and Prometheus recording and alerting rules. The mixins is written in Jsonnet. 6 | 7 | These examples shows the usage of Jsonnet: 8 | 9 | - [playground](./playground/) for live evaulation of Jsonnet scripts in a browser. 10 | - [kubernetes-mixin](./kubernetes-mixin/) an example of using the [Prometheus Monotoring Mixin for Kubernetes](https://github.com/kubernetes-monitoring/kubernetes-mixin) to render Grafana dashboards 11 | - [grafonnet](./grafonnet/) an example of using the Grafana [Grafonnet](https://github.com/grafana/grafonnet) library to build Grafana dashboards 12 | 13 | _Note_: You may want to explore the [observability-helm](../observability-helm/) example to get a kind cluster running with Grafana and Prometheus. -------------------------------------------------------------------------------- /jsonnet/grafonnet/.envrc: -------------------------------------------------------------------------------- 1 | export PATH=$PATH:`go env GOPATH`/bin 2 | export JSONNET_PATH=vendor 3 | -------------------------------------------------------------------------------- /jsonnet/grafonnet/dashboard.jsonnet: -------------------------------------------------------------------------------- 1 | local g = import 'g.libsonnet'; 2 | 3 | g.dashboard.new('CNCF Aalborg') 4 | + g.dashboard.withUid('cncf-aalborg-demo') 5 | + g.dashboard.withDescription('Dashboard for CNCF Aalborg demo') 6 | + g.dashboard.graphTooltip.withSharedCrosshair() 7 | + g.dashboard.withPanels([ 8 | g.panel.timeSeries.new('Requests / sec') 9 | + g.panel.timeSeries.queryOptions.withTargets([ 10 | g.query.prometheus.new( 11 | 'PBFA97CFB590B2093', 12 | 'sum by (operation) (rate(etcd_request_duration_seconds_count[$__rate_interval]))', 13 | ) 14 | + g.query.prometheus.withLegendFormat('{{ operation }}'), 15 | ]) 16 | + g.panel.timeSeries.standardOptions.withUnit('reqps') 17 | + g.panel.timeSeries.gridPos.withW(24) 18 | + g.panel.timeSeries.gridPos.withH(8), 19 | ]) 20 | -------------------------------------------------------------------------------- /jsonnet/grafonnet/g.libsonnet: -------------------------------------------------------------------------------- 1 | import 'github.com/grafana/grafonnet/gen/grafonnet-latest/main.libsonnet' 2 | -------------------------------------------------------------------------------- /jsonnet/grafonnet/jsonnetfile.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "dependencies": [ 4 | { 5 | "source": { 6 | "git": { 7 | "remote": "https://github.com/grafana/grafonnet.git", 8 | "subdir": "gen/grafonnet-latest" 9 | } 10 | }, 11 | "version": "main" 12 | } 13 | ], 14 | "legacyImports": true 15 | } 16 | -------------------------------------------------------------------------------- /jsonnet/grafonnet/jsonnetfile.lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "dependencies": [ 4 | { 5 | "source": { 6 | "git": { 7 | "remote": "https://github.com/grafana/grafonnet.git", 8 | "subdir": "gen/grafonnet-latest" 9 | } 10 | }, 11 | "version": "fe65a22df6d3a897729fff47cff599805a2c5710", 12 | "sum": "mDHkB1avJU5IqvwCsXPmPt8pSiGjupLVqx3zCgFKzbw=" 13 | }, 14 | { 15 | "source": { 16 | "git": { 17 | "remote": "https://github.com/grafana/grafonnet.git", 18 | "subdir": "gen/grafonnet-v10.2.0" 19 | } 20 | }, 21 | "version": "fe65a22df6d3a897729fff47cff599805a2c5710", 22 | "sum": "9xwtVj7WmdsFWoQ5CTcvDBKEgIPnAF9DsD6dnKl3M/U=" 23 | }, 24 | { 25 | "source": { 26 | "git": { 27 | "remote": "https://github.com/jsonnet-libs/docsonnet.git", 28 | "subdir": "doc-util" 29 | } 30 | }, 31 | "version": "6ac6c69685b8c29c54515448eaca583da2d88150", 32 | "sum": "BrAL/k23jq+xy9oA7TWIhUx07dsA/QLm3g7ktCwe//U=" 33 | }, 34 | { 35 | "source": { 36 | "git": { 37 | "remote": "https://github.com/jsonnet-libs/xtd.git", 38 | "subdir": "" 39 | } 40 | }, 41 | "version": "80bdea46b69cfbd5a6b57789ad856d3cb525e956", 42 | "sum": "eUSd6nmI07Zl4pYuDnhasQ7Ua37HfHcBVItfvroVUGU=" 43 | } 44 | ], 45 | "legacyImports": false 46 | } 47 | -------------------------------------------------------------------------------- /jsonnet/kubernetes-mixin/.envrc: -------------------------------------------------------------------------------- 1 | export PATH=$PATH:`go env GOPATH`/bin 2 | export JSONNET_PATH=vendor 3 | -------------------------------------------------------------------------------- /jsonnet/kubernetes-mixin/jsonnetfile.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "dependencies": [ 4 | { 5 | "source": { 6 | "git": { 7 | "remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git", 8 | "subdir": "" 9 | } 10 | }, 11 | "version": "bf3acbdd48da8d246f35d198bfe79dd7ec59824d" 12 | } 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /jsonnet/kubernetes-mixin/jsonnetfile.lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "dependencies": [ 4 | { 5 | "source": { 6 | "git": { 7 | "remote": "https://github.com/grafana/grafonnet-lib.git", 8 | "subdir": "grafonnet" 9 | } 10 | }, 11 | "version": "a1d61cce1da59c71409b99b5c7568511fec661ea", 12 | "sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc=" 13 | }, 14 | { 15 | "source": { 16 | "git": { 17 | "remote": "https://github.com/grafana/jsonnet-libs.git", 18 | "subdir": "grafana-builder" 19 | } 20 | }, 21 | "version": "cc9a6a141b3c7a2ed9c46d9c48a98f764ae704bd", 22 | "sum": "B49EzIY2WZsFxNMJcgRxE/gcZ9ltnS8pkOOV6Q5qioc=" 23 | }, 24 | { 25 | "source": { 26 | "git": { 27 | "remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git", 28 | "subdir": "" 29 | } 30 | }, 31 | "version": "bf3acbdd48da8d246f35d198bfe79dd7ec59824d", 32 | "sum": "bT3Zz4l1K/MrN6K0C/iZSjCIsbmK5Qcc0JexalwSU9g=" 33 | } 34 | ], 35 | "legacyImports": false 36 | } 37 | -------------------------------------------------------------------------------- /jsonnet/kubernetes-mixin/kubernetes-mixin.jsonnet: -------------------------------------------------------------------------------- 1 | local mixin = (import 'github.com/kubernetes-monitoring/kubernetes-mixin/mixin.libsonnet') + ({ 2 | _config+:: { 3 | grafanaK8s: { 4 | dashboardNamePrefix: 'Netic OaaS / ', 5 | dashboardTags: ['netic-oaas','kubernetes-mixin'], 6 | linkPrefix: '', 7 | refresh: '10s', 8 | minimumTimeInterval: '1m', 9 | grafanaTimezone: 'browser', 10 | } 11 | }, 12 | }); 13 | 14 | mixin.grafanaDashboards 15 | -------------------------------------------------------------------------------- /jsonnet/kubernetes-mixin/render.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | jsonnet kubernetes-mixin.jsonnet 4 | -------------------------------------------------------------------------------- /jsonnet/kubernetes-mixin/setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest 4 | go install github.com/google/go-jsonnet/cmd/jsonnet@latest 5 | -------------------------------------------------------------------------------- /jsonnet/playground/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Playground 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 29 | 30 | 31 | 32 |

Playground

33 |
34 |
35 |

Jsonnet

36 | 37 |
38 |
39 |

JSON

40 | 41 |
42 |
43 |

44 |     
71 | 
72 | 
73 | 


--------------------------------------------------------------------------------
/jsonnet/playground/js/CodeMirror/LICENSE:
--------------------------------------------------------------------------------
 1 | MIT License
 2 | 
 3 | Copyright (C) 2017 by Marijn Haverbeke  and others
 4 | 
 5 | Permission is hereby granted, free of charge, to any person obtaining a copy
 6 | of this software and associated documentation files (the "Software"), to deal
 7 | in the Software without restriction, including without limitation the rights
 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 | 
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 | 
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 | 


--------------------------------------------------------------------------------
/jsonnet/playground/js/CodeMirror/README.md:
--------------------------------------------------------------------------------
 1 | # CodeMirror
 2 | [![Build Status](https://travis-ci.org/codemirror/CodeMirror.svg)](https://travis-ci.org/codemirror/CodeMirror)
 3 | [![NPM version](https://img.shields.io/npm/v/codemirror.svg)](https://www.npmjs.org/package/codemirror)
 4 | [![Join the chat at https://gitter.im/codemirror/CodeMirror](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/codemirror/CodeMirror)  
 5 | [Funding status: ![maintainer happiness](https://marijnhaverbeke.nl/fund/status_s.png?again)](https://marijnhaverbeke.nl/fund/)
 6 | 
 7 | CodeMirror is a versatile text editor implemented in JavaScript for
 8 | the browser. It is specialized for editing code, and comes with over
 9 | 100 language modes and various addons that implement more advanced
10 | editing functionality. Every language comes with fully-featured code
11 | and syntax highlighting to help with reading and editing complex code.
12 | 
13 | A rich programming API and a CSS theming system are available for
14 | customizing CodeMirror to fit your application, and extending it with
15 | new functionality.
16 | 
17 | You can find more information (and the
18 | [manual](http://codemirror.net/doc/manual.html)) on the [project
19 | page](http://codemirror.net). For questions and discussion, use the
20 | [discussion forum](https://discuss.codemirror.net/).
21 | 
22 | See
23 | [CONTRIBUTING.md](https://github.com/codemirror/CodeMirror/blob/master/CONTRIBUTING.md)
24 | for contributing guidelines.
25 | 
26 | The CodeMirror community aims to be welcoming to everybody. We use the
27 | [Contributor Covenant
28 | (1.1)](http://contributor-covenant.org/version/1/1/0/) as our code of
29 | conduct.
30 | 
31 | ### Quickstart
32 | 
33 | To build the project, make sure you have Node.js installed (at least version 6)
34 | and then `npm install`. To run, just open `index.html` in your
35 | browser (you don't need to run a webserver). Run the tests with `npm test`.
36 | 


--------------------------------------------------------------------------------
/jsonnet/playground/js/CodeMirror/mode/yaml/yaml.js:
--------------------------------------------------------------------------------
  1 | // CodeMirror, copyright (c) by Marijn Haverbeke and others
  2 | // Distributed under an MIT license: http://codemirror.net/LICENSE
  3 | 
  4 | (function(mod) {
  5 |   if (typeof exports == "object" && typeof module == "object") // CommonJS
  6 |     mod(require("../../lib/codemirror"));
  7 |   else if (typeof define == "function" && define.amd) // AMD
  8 |     define(["../../lib/codemirror"], mod);
  9 |   else // Plain browser env
 10 |     mod(CodeMirror);
 11 | })(function(CodeMirror) {
 12 | "use strict";
 13 | 
 14 | CodeMirror.defineMode("yaml", function() {
 15 | 
 16 |   var cons = ['true', 'false', 'on', 'off', 'yes', 'no'];
 17 |   var keywordRegex = new RegExp("\\b(("+cons.join(")|(")+"))$", 'i');
 18 | 
 19 |   return {
 20 |     token: function(stream, state) {
 21 |       var ch = stream.peek();
 22 |       var esc = state.escaped;
 23 |       state.escaped = false;
 24 |       /* comments */
 25 |       if (ch == "#" && (stream.pos == 0 || /\s/.test(stream.string.charAt(stream.pos - 1)))) {
 26 |         stream.skipToEnd();
 27 |         return "comment";
 28 |       }
 29 | 
 30 |       if (stream.match(/^('([^']|\\.)*'?|"([^"]|\\.)*"?)/))
 31 |         return "string";
 32 | 
 33 |       if (state.literal && stream.indentation() > state.keyCol) {
 34 |         stream.skipToEnd(); return "string";
 35 |       } else if (state.literal) { state.literal = false; }
 36 |       if (stream.sol()) {
 37 |         state.keyCol = 0;
 38 |         state.pair = false;
 39 |         state.pairStart = false;
 40 |         /* document start */
 41 |         if(stream.match(/---/)) { return "def"; }
 42 |         /* document end */
 43 |         if (stream.match(/\.\.\./)) { return "def"; }
 44 |         /* array list item */
 45 |         if (stream.match(/\s*-\s+/)) { return 'meta'; }
 46 |       }
 47 |       /* inline pairs/lists */
 48 |       if (stream.match(/^(\{|\}|\[|\])/)) {
 49 |         if (ch == '{')
 50 |           state.inlinePairs++;
 51 |         else if (ch == '}')
 52 |           state.inlinePairs--;
 53 |         else if (ch == '[')
 54 |           state.inlineList++;
 55 |         else
 56 |           state.inlineList--;
 57 |         return 'meta';
 58 |       }
 59 | 
 60 |       /* list seperator */
 61 |       if (state.inlineList > 0 && !esc && ch == ',') {
 62 |         stream.next();
 63 |         return 'meta';
 64 |       }
 65 |       /* pairs seperator */
 66 |       if (state.inlinePairs > 0 && !esc && ch == ',') {
 67 |         state.keyCol = 0;
 68 |         state.pair = false;
 69 |         state.pairStart = false;
 70 |         stream.next();
 71 |         return 'meta';
 72 |       }
 73 | 
 74 |       /* start of value of a pair */
 75 |       if (state.pairStart) {
 76 |         /* block literals */
 77 |         if (stream.match(/^\s*(\||\>)\s*/)) { state.literal = true; return 'meta'; };
 78 |         /* references */
 79 |         if (stream.match(/^\s*(\&|\*)[a-z0-9\._-]+\b/i)) { return 'variable-2'; }
 80 |         /* numbers */
 81 |         if (state.inlinePairs == 0 && stream.match(/^\s*-?[0-9\.\,]+\s?$/)) { return 'number'; }
 82 |         if (state.inlinePairs > 0 && stream.match(/^\s*-?[0-9\.\,]+\s?(?=(,|}))/)) { return 'number'; }
 83 |         /* keywords */
 84 |         if (stream.match(keywordRegex)) { return 'keyword'; }
 85 |       }
 86 | 
 87 |       /* pairs (associative arrays) -> key */
 88 |       if (!state.pair && stream.match(/^\s*(?:[,\[\]{}&*!|>'"%@`][^\s'":]|[^,\[\]{}#&*!|>'"%@`])[^#]*?(?=\s*:($|\s))/)) {
 89 |         state.pair = true;
 90 |         state.keyCol = stream.indentation();
 91 |         return "atom";
 92 |       }
 93 |       if (state.pair && stream.match(/^:\s*/)) { state.pairStart = true; return 'meta'; }
 94 | 
 95 |       /* nothing found, continue */
 96 |       state.pairStart = false;
 97 |       state.escaped = (ch == '\\');
 98 |       stream.next();
 99 |       return null;
100 |     },
101 |     startState: function() {
102 |       return {
103 |         pair: false,
104 |         pairStart: false,
105 |         keyCol: 0,
106 |         inlinePairs: 0,
107 |         inlineList: 0,
108 |         literal: false,
109 |         escaped: false
110 |       };
111 |     },
112 |     lineComment: "#"
113 |   };
114 | });
115 | 
116 | CodeMirror.defineMIME("text/x-yaml", "yaml");
117 | CodeMirror.defineMIME("text/yaml", "yaml");
118 | 
119 | });
120 | 


--------------------------------------------------------------------------------
/jsonnet/playground/js/libjsonnet.wasm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neticdk/k8s-workshop/00fe5c7bb1039398da16059e1f4cdf3611ad4e0a/jsonnet/playground/js/libjsonnet.wasm


--------------------------------------------------------------------------------
/jsonnet/playground/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | python3 -m http.server
4 | 


--------------------------------------------------------------------------------
/kubedoom/README.md:
--------------------------------------------------------------------------------
 1 | # kubedoom
 2 | 
 3 | The [kubedoom](https://github.com/storax/kubedoom) is a fun way to show the
 4 | self-healing capabilities of Kubernetes.
 5 | 
 6 | ## Install
 7 | Please ensure that you are in the `kubedoom`folder and startup cluster and install kubedoom following these steps.
 8 | 
 9 | 1. Create local [kind](https://kind.sigs.k8s.io/) cluster - on *nix run `./create_cluster.sh`
10 | 2. Deploy kubedoom
11 |    ```console
12 |    kubectl apply -k ./deploy
13 |    ```
14 | 
15 | kubedoom should be running. To see what was created run `kubectl get all -n kubedoom` which
16 | will show resources inside of the `kubedoom` namespace.
17 | 
18 | ## Install "Demons"
19 | 
20 | The Kubernetes pods will show as demons which can be killed inside of the game. The following
21 | command will start up some Kubernetes pods.
22 | 
23 | ```console
24 | kubectl apply -k ./demons
25 | ```
26 | 
27 | To see what was created run `kubectl get all -n demons`. 6 pods should be running or starting up.
28 | It is possible to reduce or increase the number of demons by scaling the deployment.
29 | 
30 | ## Play
31 | 
32 | A vnc client is needed to connect to the game and get started.
33 | 
34 | ### MacOS
35 | MacOS has built-in support for connecting to vnc servers.
36 | 
37 | 1. Open "Finder"
38 | 2. Select "Go" -> "Connect to server"
39 | 3. Type `vnc://localhost:5900`
40 | 
41 | Password is `idbehold`
42 | 
43 | ### Linux/Windows
44 | Linux and Windows requires a separate vnc client to conenct to the game.
45 | 
46 | 1. Install a vnc client, e.g., https://www.realvnc.com/en/connect/download/viewer/
47 | 2. Connect to `localhost:5900`
48 | 
49 | Password is `idbehold`
50 | 
51 | 
52 | ### Find some Demons
53 | 
54 | Now go find some pods.
55 | 
56 | ![Doom](doom.png)
57 | 
58 | Typing `idspispopd` will make it possible to go through the walls - the demons should
59 | be to the right when starting the game. `idkfa` will supply extra weapons and more.
60 | 
61 | Possibly watch the `demons` namespace while playing seeing the pods being terminated.
62 | 
63 | ```console
64 | kubectl get pod -n demons --watch
65 | ```
66 | 
67 | ## Takeways
68 |  - it is possible to run Kubernetes on your local machine
69 |  - you would be able to deploy this on a lot of other infrastructures including public cloud
70 |  - whatever can be containerised can run inside a kubernetes cluster, however a lot needs
71 |    to be added before having something that is ok for production use, which is a completly
72 |    different set of workshops.
73 |  - we see that kubernetes aims at keeping the number of pods at the defined level - this
74 |    is called the reconcillilation loop - or "self healing"
75 | 
76 | [Read more](https://opensource.com/article/21/6/kube-doom)
77 | 
78 | Play away....
79 | 
80 | ## Clean up
81 | ```console
82 | ./delete_cluster.sh
83 | ```
84 | 


--------------------------------------------------------------------------------
/kubedoom/create_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind create cluster --name kubedoom --config=kind-config.yaml
4 | 


--------------------------------------------------------------------------------
/kubedoom/delete_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind delete cluster --name kubedoom
4 | 


--------------------------------------------------------------------------------
/kubedoom/demons.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | kubectl apply -k ./demons
3 | 


--------------------------------------------------------------------------------
/kubedoom/demons/deployment.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   name: demon-one
 6 |   namespace: demons
 7 | spec:
 8 |   replicas: 2
 9 |   selector:
10 |     matchLabels:
11 |       app: deployments-invader-one-app
12 |   template:
13 |     metadata:
14 |       labels:
15 |         app: deployments-invader-one-app
16 |     spec:
17 |       containers:
18 |         - name: alpine
19 |           image: mirror.gcr.io/library/alpine
20 |           command: ["sh", "-c", "tail -f /dev/null"]
21 | ---
22 | apiVersion: apps/v1
23 | kind: Deployment
24 | metadata:
25 |   name: demon-two
26 |   namespace: demons
27 | spec:
28 |   replicas: 2
29 |   selector:
30 |     matchLabels:
31 |       app: deployments-invader-one-app
32 |   template:
33 |     metadata:
34 |       labels:
35 |         app: deployments-invader-one-app
36 |     spec:
37 |       containers:
38 |         - name: alpine
39 |           image: mirror.gcr.io/library/alpine
40 |           command: ["sh", "-c", "tail -f /dev/null"]
41 | ---
42 | apiVersion: apps/v1
43 | kind: Deployment
44 | metadata:
45 |   name: demon-three
46 |   namespace: demons
47 | spec:
48 |   replicas: 2
49 |   selector:
50 |     matchLabels:
51 |       app: deployments-invader-one-app
52 |   template:
53 |     metadata:
54 |       labels:
55 |         app: deployments-invader-one-app
56 |     spec:
57 |       containers:
58 |         - name: alpine
59 |           image: mirror.gcr.io/library/alpine
60 |           command: ["sh", "-c", "tail -f /dev/null"]
61 | 


--------------------------------------------------------------------------------
/kubedoom/demons/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 |   - namespace.yaml
5 |   - deployment.yaml
6 | 


--------------------------------------------------------------------------------
/kubedoom/demons/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 |   name: demons
5 | 


--------------------------------------------------------------------------------
/kubedoom/deploy/deployment.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: apps/v1
 2 | kind: Deployment
 3 | metadata:
 4 |   labels:
 5 |     app: kubedoom
 6 |   name: kubedoom
 7 |   namespace: kubedoom
 8 | spec:
 9 |   replicas: 1
10 |   selector:
11 |     matchLabels:
12 |       app: kubedoom
13 |   template:
14 |     metadata:
15 |       labels:
16 |         app: kubedoom
17 |     spec:
18 |       hostNetwork: true
19 |       serviceAccountName: kubedoom
20 |       containers:
21 |         - image: ghcr.io/storax/kubedoom:latest
22 |           env:
23 |             - name: NAMESPACE
24 |               value: demons
25 |           name: kubedoom
26 |           ports:
27 |             - containerPort: 5900
28 |               name: vnc
29 | 


--------------------------------------------------------------------------------
/kubedoom/deploy/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 |   - namespace.yaml
5 |   - deployment.yaml
6 |   - rbac.yaml
7 | 


--------------------------------------------------------------------------------
/kubedoom/deploy/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 |   name: kubedoom
5 | 


--------------------------------------------------------------------------------
/kubedoom/deploy/rbac.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: v1
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   name: kubedoom
 6 |   namespace: kubedoom
 7 | ---
 8 | apiVersion: rbac.authorization.k8s.io/v1
 9 | kind: ClusterRoleBinding
10 | metadata:
11 |   name: kubedoom
12 | roleRef:
13 |   apiGroup: rbac.authorization.k8s.io
14 |   kind: ClusterRole
15 |   name: cluster-admin
16 | subjects:
17 |   - kind: ServiceAccount
18 |     name: kubedoom
19 |     namespace: kubedoom
20 | 


--------------------------------------------------------------------------------
/kubedoom/doom.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neticdk/k8s-workshop/00fe5c7bb1039398da16059e1f4cdf3611ad4e0a/kubedoom/doom.png


--------------------------------------------------------------------------------
/kubedoom/kind-config.yaml:
--------------------------------------------------------------------------------
 1 | kind: Cluster
 2 | apiVersion: kind.x-k8s.io/v1alpha4
 3 | name: kubedoom
 4 | nodes:
 5 | - role: control-plane
 6 | - role: worker
 7 |   extraPortMappings:
 8 |   - containerPort: 5900
 9 |     hostPort: 5900
10 |     listenAddress: "127.0.0.1"
11 | 


--------------------------------------------------------------------------------
/kubedoom/kubedoom.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | kubectl apply -k ./deploy
3 | 


--------------------------------------------------------------------------------
/observability-helm/README.md:
--------------------------------------------------------------------------------
 1 | # Kubernetes Observabiltity Stack
 2 | 
 3 | To gain insights into the health of workloads in Kubernetes it is normal to install observability
 4 | tooling. Most observability stacks on Kubernetes is based on the work of
 5 | [Prometheus Monotoring Mixin for Kubernetes](https://github.com/kubernetes-monitoring/kubernetes-mixin).
 6 | 
 7 | Netic provides an open source distribution based on the standard Kubernetes Monitoring Mixin named [oaas-observability](https://github.com/neticdk/k8s-oaas-observability).
 8 | 
 9 | _Note_ this example requires the installation of the Helm package manager for Kubernetes.
10 | 
11 | ## Install
12 | 
13 | Create cluster (`create_cluster.sh`):
14 | 
15 | ```console
16 | kind create cluster --name observability-helm --config=kind-config.yaml
17 | ```
18 | 
19 | Install the monitoring stack (`install.sh`):
20 | 
21 | ```console
22 | helm repo add netic-oaas https://neticdk.github.io/k8s-oaas-observability
23 | helm upgrade -i oaas-observability netic-oaas/oaas-observability \
24 |   --set opentelemetry-operator.enabled=false \
25 |   --set vector-agent.enabled=false \
26 |   --set grafana.adminPassword=workshop
27 | ```
28 | 
29 | If you see a warning concerning an annotation, that is ok.
30 | 
31 | 
32 | ## Access to Dashboards
33 | 
34 | It is not possible to access dashboards showing the data from the cluster through Grafana by
35 | port-forwarding to the Grafana pod.
36 | 
37 | ```console
38 | kubectl port-forward svc/oaas-observability-grafana 3000:80
39 | ```
40 | 
41 | Go to http://localhost:3000 login is `admin` and password is `workshop`.
42 | 
43 | 
44 | ## What did we learn
45 | We probably learned that using helm can be an easy way to deploy a bunch of stuff into a cluster.
46 | 
47 | 
48 | ## Clean up
49 | ```console
50 | ./delete_cluster.sh
51 | ```
52 | 


--------------------------------------------------------------------------------
/observability-helm/create_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind create cluster --name observability-helm --config=kind-config.yaml
4 | 


--------------------------------------------------------------------------------
/observability-helm/delete_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind delete cluster --name observability-helm
4 | 


--------------------------------------------------------------------------------
/observability-helm/install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | helm repo add netic-oaas https://neticdk.github.io/k8s-oaas-observability
4 | helm upgrade -i oaas-observability netic-oaas/oaas-observability --set opentelemetry-operator.enabled=false --set vector-agent.enabled=false --set grafana.adminPassword=workshop
5 | 


--------------------------------------------------------------------------------
/observability-helm/kind-config.yaml:
--------------------------------------------------------------------------------
 1 | kind: Cluster
 2 | apiVersion: kind.x-k8s.io/v1alpha4
 3 | name: observability
 4 | nodes:
 5 | - role: control-plane
 6 |   kubeadmConfigPatches:
 7 |   - |
 8 |     kind: InitConfiguration
 9 |     nodeRegistration:
10 |       kubeletExtraArgs:
11 |         node-labels: "ingress-ready=true"
12 |   extraPortMappings:
13 |   - containerPort: 80
14 |     hostPort: 80
15 |     protocol: TCP
16 |   - containerPort: 443
17 |     hostPort: 443
18 |     protocol: TCP
19 | - role: worker
20 | - role: worker
21 | 


--------------------------------------------------------------------------------
/observability/README.md:
--------------------------------------------------------------------------------
  1 | # Install observability stack on kubernetes
  2 | 
  3 | In order to get started with this part of the workshop, please navigate to the `observability` folder
  4 | 
  5 | Create the cluster for this part of the workshop:
  6 | 
  7 | ```console
  8 | ./create_cluster.sh
  9 | ```
 10 | 
 11 | Install namespace
 12 | ```console
 13 | kubectl create -f ./namespace.yaml
 14 | ```
 15 | Get the installed namespaces
 16 | ```console
 17 | kubectl get namespaces
 18 | ```
 19 | List the contents of the previously installed namespace declaration
 20 | ```console
 21 | cat namespace.yaml
 22 | ```
 23 | 
 24 | ## install operator
 25 | 
 26 | You may install everything in a folder using a declarative approach, here we install everything from the `setup` folder in one command
 27 | ```console
 28 | kubectl create -f setup
 29 | ```
 30 | Which installs a set of Custom Resource Defintitions or in short CRDs - if you want to see them, you can get the installed custom ressources by:
 31 | ```console
 32 | kubectl get customresourcedefinition.apiextensions.k8s.io
 33 | kubectl get customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com -o yaml
 34 | ````
 35 | 
 36 | And you may describe one of the custom ressources by: 
 37 | ```console
 38 | kubectl describe customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com
 39 | ```
 40 | 
 41 | ## install reminder
 42 | Now we have the CRDs installed and we are going to get the remaining components installed. These are e.g. prometheus (a metrics component), alertmanager (alerting component) and grafana (a visualizing tool).
 43 | We use the same principle as above using the install from a folder called `install`
 44 | ```console
 45 | kubectl create -f install
 46 | ```
 47 | Watch as the pods are created and gets ready, by looking for the pods and adding a `-w` which allows you to see as the pods get ready 
 48 | ## check running pods
 49 | ```console
 50 | kubectl get pods -n monitoring -w
 51 | ```
 52 | 
 53 | At the time where all pods are running:
 54 | ## check servises
 55 | ```console
 56 | kubectl get svc -n monitoring
 57 | ```
 58 | Note that there is a grafana service, a prometheus service and an alert manager service.
 59 | 
 60 | ## access grafana 
 61 | ```console
 62 | kubectl --namespace monitoring port-forward svc/grafana 3000
 63 | ```
 64 | 
 65 | http://localhost:3000
 66 | Username: admin
 67 | Password: admin (which you have to change on first login please change to admin2)
 68 | 
 69 | 
 70 | ## access prometheus
 71 | ```console
 72 | kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090
 73 | ```
 74 | 
 75 | http://localhost:9090
 76 | 
 77 | ## access alertmanager
 78 | ```console
 79 | kubectl --namespace monitoring port-forward svc/alertmanager-main 9093
 80 | ```
 81 | http://localhost:9093
 82 | 
 83 | ## why no data
 84 | When you look at the various dashboards you see that there are no data in them.
 85 | This is done to a set of network policies that are installed by default into the cluster.
 86 | These are useful objects and have a very significant impact on security in real clusters, 
 87 | this is however just a workshop and thus we remove them instead of working with them.
 88 | If you are on windows, you may not experience the same results.
 89 | 
 90 | ### check network policies
 91 | ```console
 92 | kubectl get networkpolicies
 93 | ```
 94 | 
 95 | ### get rid of them now
 96 | ```console
 97 | kubectl -n monitoring delete networkpolicies.networking.k8s.io --all
 98 | ```
 99 | 
100 | Now there is data in the daskboards.
101 | 
102 | ![grafana](grafana.png)
103 | 
104 | ![prometheus](prometheus.png)
105 | 
106 | ![alertmanager](alertmanager.png)
107 | 
108 | [Read More](https://computingforgeeks.com/setup-prometheus-and-grafana-on-kubernetes/)
109 | 
110 | ## Clean up
111 | ```console
112 | ./delete_cluster.sh
113 | ```
114 | 


--------------------------------------------------------------------------------
/observability/alertmanager.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neticdk/k8s-workshop/00fe5c7bb1039398da16059e1f4cdf3611ad4e0a/observability/alertmanager.png


--------------------------------------------------------------------------------
/observability/config.yaml:
--------------------------------------------------------------------------------
 1 | kind: Cluster
 2 | apiVersion: kind.x-k8s.io/v1alpha4
 3 | name: observability
 4 | nodes:
 5 | - role: control-plane
 6 |   kubeadmConfigPatches:
 7 |   - |
 8 |     kind: InitConfiguration
 9 |     nodeRegistration:
10 |       kubeletExtraArgs:
11 |         node-labels: "ingress-ready=true"
12 |   extraPortMappings:
13 |   - containerPort: 80
14 |     hostPort: 80
15 |     protocol: TCP
16 |   - containerPort: 443
17 |     hostPort: 443
18 |     protocol: TCP
19 | - role: worker
20 | - role: worker
21 | 


--------------------------------------------------------------------------------
/observability/create_cluster.sh:
--------------------------------------------------------------------------------
1 | kind create cluster --name observability --config=config.yaml
2 | 
3 | 


--------------------------------------------------------------------------------
/observability/delete_cluster.sh:
--------------------------------------------------------------------------------
1 |  kind delete cluster --name observability
2 | 
3 | 


--------------------------------------------------------------------------------
/observability/grafana.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neticdk/k8s-workshop/00fe5c7bb1039398da16059e1f4cdf3611ad4e0a/observability/grafana.png


--------------------------------------------------------------------------------
/observability/install/alertmanager-alertmanager.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: Alertmanager
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: alert-router
 6 |     app.kubernetes.io/instance: main
 7 |     app.kubernetes.io/name: alertmanager
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.25.0
10 |   name: main
11 |   namespace: monitoring
12 | spec:
13 |   image: quay.io/prometheus/alertmanager:v0.25.0
14 |   nodeSelector:
15 |     kubernetes.io/os: linux
16 |   podMetadata:
17 |     labels:
18 |       app.kubernetes.io/component: alert-router
19 |       app.kubernetes.io/instance: main
20 |       app.kubernetes.io/name: alertmanager
21 |       app.kubernetes.io/part-of: kube-prometheus
22 |       app.kubernetes.io/version: 0.25.0
23 |   replicas: 3
24 |   resources:
25 |     limits:
26 |       cpu: 100m
27 |       memory: 100Mi
28 |     requests:
29 |       cpu: 4m
30 |       memory: 100Mi
31 |   securityContext:
32 |     fsGroup: 2000
33 |     runAsNonRoot: true
34 |     runAsUser: 1000
35 |   serviceAccountName: alertmanager-main
36 |   version: 0.25.0
37 | 


--------------------------------------------------------------------------------
/observability/install/alertmanager-networkPolicy.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: NetworkPolicy
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: alert-router
 6 |     app.kubernetes.io/instance: main
 7 |     app.kubernetes.io/name: alertmanager
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.25.0
10 |   name: alertmanager-main
11 |   namespace: monitoring
12 | spec:
13 |   egress:
14 |   - {}
15 |   ingress:
16 |   - from:
17 |     - podSelector:
18 |         matchLabels:
19 |           app.kubernetes.io/name: prometheus
20 |     ports:
21 |     - port: 9093
22 |       protocol: TCP
23 |     - port: 8080
24 |       protocol: TCP
25 |   - from:
26 |     - podSelector:
27 |         matchLabels:
28 |           app.kubernetes.io/name: alertmanager
29 |     ports:
30 |     - port: 9094
31 |       protocol: TCP
32 |     - port: 9094
33 |       protocol: UDP
34 |   podSelector:
35 |     matchLabels:
36 |       app.kubernetes.io/component: alert-router
37 |       app.kubernetes.io/instance: main
38 |       app.kubernetes.io/name: alertmanager
39 |       app.kubernetes.io/part-of: kube-prometheus
40 |   policyTypes:
41 |   - Egress
42 |   - Ingress
43 | 


--------------------------------------------------------------------------------
/observability/install/alertmanager-podDisruptionBudget.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: policy/v1
 2 | kind: PodDisruptionBudget
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: alert-router
 6 |     app.kubernetes.io/instance: main
 7 |     app.kubernetes.io/name: alertmanager
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.25.0
10 |   name: alertmanager-main
11 |   namespace: monitoring
12 | spec:
13 |   maxUnavailable: 1
14 |   selector:
15 |     matchLabels:
16 |       app.kubernetes.io/component: alert-router
17 |       app.kubernetes.io/instance: main
18 |       app.kubernetes.io/name: alertmanager
19 |       app.kubernetes.io/part-of: kube-prometheus
20 | 


--------------------------------------------------------------------------------
/observability/install/alertmanager-secret.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Secret
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: alert-router
 6 |     app.kubernetes.io/instance: main
 7 |     app.kubernetes.io/name: alertmanager
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.25.0
10 |   name: alertmanager-main
11 |   namespace: monitoring
12 | stringData:
13 |   alertmanager.yaml: |-
14 |     "global":
15 |       "resolve_timeout": "5m"
16 |     "inhibit_rules":
17 |     - "equal":
18 |       - "namespace"
19 |       - "alertname"
20 |       "source_matchers":
21 |       - "severity = critical"
22 |       "target_matchers":
23 |       - "severity =~ warning|info"
24 |     - "equal":
25 |       - "namespace"
26 |       - "alertname"
27 |       "source_matchers":
28 |       - "severity = warning"
29 |       "target_matchers":
30 |       - "severity = info"
31 |     - "equal":
32 |       - "namespace"
33 |       "source_matchers":
34 |       - "alertname = InfoInhibitor"
35 |       "target_matchers":
36 |       - "severity = info"
37 |     "receivers":
38 |     - "name": "Default"
39 |     - "name": "Watchdog"
40 |     - "name": "Critical"
41 |     - "name": "null"
42 |     "route":
43 |       "group_by":
44 |       - "namespace"
45 |       "group_interval": "5m"
46 |       "group_wait": "30s"
47 |       "receiver": "Default"
48 |       "repeat_interval": "12h"
49 |       "routes":
50 |       - "matchers":
51 |         - "alertname = Watchdog"
52 |         "receiver": "Watchdog"
53 |       - "matchers":
54 |         - "alertname = InfoInhibitor"
55 |         "receiver": "null"
56 |       - "matchers":
57 |         - "severity = critical"
58 |         "receiver": "Critical"
59 | type: Opaque
60 | 


--------------------------------------------------------------------------------
/observability/install/alertmanager-service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: alert-router
 6 |     app.kubernetes.io/instance: main
 7 |     app.kubernetes.io/name: alertmanager
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.25.0
10 |   name: alertmanager-main
11 |   namespace: monitoring
12 | spec:
13 |   ports:
14 |   - name: web
15 |     port: 9093
16 |     targetPort: web
17 |   - name: reloader-web
18 |     port: 8080
19 |     targetPort: reloader-web
20 |   selector:
21 |     app.kubernetes.io/component: alert-router
22 |     app.kubernetes.io/instance: main
23 |     app.kubernetes.io/name: alertmanager
24 |     app.kubernetes.io/part-of: kube-prometheus
25 |   sessionAffinity: ClientIP
26 | 


--------------------------------------------------------------------------------
/observability/install/alertmanager-serviceAccount.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | automountServiceAccountToken: false
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/component: alert-router
 7 |     app.kubernetes.io/instance: main
 8 |     app.kubernetes.io/name: alertmanager
 9 |     app.kubernetes.io/part-of: kube-prometheus
10 |     app.kubernetes.io/version: 0.25.0
11 |   name: alertmanager-main
12 |   namespace: monitoring
13 | 


--------------------------------------------------------------------------------
/observability/install/alertmanager-serviceMonitor.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: alert-router
 6 |     app.kubernetes.io/instance: main
 7 |     app.kubernetes.io/name: alertmanager
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.25.0
10 |   name: alertmanager-main
11 |   namespace: monitoring
12 | spec:
13 |   endpoints:
14 |   - interval: 30s
15 |     port: web
16 |   - interval: 30s
17 |     port: reloader-web
18 |   selector:
19 |     matchLabels:
20 |       app.kubernetes.io/component: alert-router
21 |       app.kubernetes.io/instance: main
22 |       app.kubernetes.io/name: alertmanager
23 |       app.kubernetes.io/part-of: kube-prometheus
24 | 


--------------------------------------------------------------------------------
/observability/install/blackboxExporter-clusterRole.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRole
 3 | metadata:
 4 |   name: blackbox-exporter
 5 | rules:
 6 | - apiGroups:
 7 |   - authentication.k8s.io
 8 |   resources:
 9 |   - tokenreviews
10 |   verbs:
11 |   - create
12 | - apiGroups:
13 |   - authorization.k8s.io
14 |   resources:
15 |   - subjectaccessreviews
16 |   verbs:
17 |   - create
18 | 


--------------------------------------------------------------------------------
/observability/install/blackboxExporter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: blackbox-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.23.0
 9 |   name: blackbox-exporter
10 |   namespace: monitoring
11 | roleRef:
12 |   apiGroup: rbac.authorization.k8s.io
13 |   kind: ClusterRole
14 |   name: blackbox-exporter
15 | subjects:
16 | - kind: ServiceAccount
17 |   name: blackbox-exporter
18 |   namespace: monitoring
19 | 


--------------------------------------------------------------------------------
/observability/install/blackboxExporter-configuration.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | data:
 3 |   config.yml: |-
 4 |     "modules":
 5 |       "http_2xx":
 6 |         "http":
 7 |           "preferred_ip_protocol": "ip4"
 8 |         "prober": "http"
 9 |       "http_post_2xx":
10 |         "http":
11 |           "method": "POST"
12 |           "preferred_ip_protocol": "ip4"
13 |         "prober": "http"
14 |       "irc_banner":
15 |         "prober": "tcp"
16 |         "tcp":
17 |           "preferred_ip_protocol": "ip4"
18 |           "query_response":
19 |           - "send": "NICK prober"
20 |           - "send": "USER prober prober prober :prober"
21 |           - "expect": "PING :([^ ]+)"
22 |             "send": "PONG ${1}"
23 |           - "expect": "^:[^ ]+ 001"
24 |       "pop3s_banner":
25 |         "prober": "tcp"
26 |         "tcp":
27 |           "preferred_ip_protocol": "ip4"
28 |           "query_response":
29 |           - "expect": "^+OK"
30 |           "tls": true
31 |           "tls_config":
32 |             "insecure_skip_verify": false
33 |       "ssh_banner":
34 |         "prober": "tcp"
35 |         "tcp":
36 |           "preferred_ip_protocol": "ip4"
37 |           "query_response":
38 |           - "expect": "^SSH-2.0-"
39 |       "tcp_connect":
40 |         "prober": "tcp"
41 |         "tcp":
42 |           "preferred_ip_protocol": "ip4"
43 | kind: ConfigMap
44 | metadata:
45 |   labels:
46 |     app.kubernetes.io/component: exporter
47 |     app.kubernetes.io/name: blackbox-exporter
48 |     app.kubernetes.io/part-of: kube-prometheus
49 |     app.kubernetes.io/version: 0.23.0
50 |   name: blackbox-exporter-configuration
51 |   namespace: monitoring
52 | 


--------------------------------------------------------------------------------
/observability/install/blackboxExporter-deployment.yaml:
--------------------------------------------------------------------------------
  1 | apiVersion: apps/v1
  2 | kind: Deployment
  3 | metadata:
  4 |   labels:
  5 |     app.kubernetes.io/component: exporter
  6 |     app.kubernetes.io/name: blackbox-exporter
  7 |     app.kubernetes.io/part-of: kube-prometheus
  8 |     app.kubernetes.io/version: 0.23.0
  9 |   name: blackbox-exporter
 10 |   namespace: monitoring
 11 | spec:
 12 |   replicas: 1
 13 |   selector:
 14 |     matchLabels:
 15 |       app.kubernetes.io/component: exporter
 16 |       app.kubernetes.io/name: blackbox-exporter
 17 |       app.kubernetes.io/part-of: kube-prometheus
 18 |   template:
 19 |     metadata:
 20 |       annotations:
 21 |         kubectl.kubernetes.io/default-container: blackbox-exporter
 22 |       labels:
 23 |         app.kubernetes.io/component: exporter
 24 |         app.kubernetes.io/name: blackbox-exporter
 25 |         app.kubernetes.io/part-of: kube-prometheus
 26 |         app.kubernetes.io/version: 0.23.0
 27 |     spec:
 28 |       automountServiceAccountToken: true
 29 |       containers:
 30 |       - args:
 31 |         - --config.file=/etc/blackbox_exporter/config.yml
 32 |         - --web.listen-address=:19115
 33 |         image: quay.io/prometheus/blackbox-exporter:v0.23.0
 34 |         name: blackbox-exporter
 35 |         ports:
 36 |         - containerPort: 19115
 37 |           name: http
 38 |         resources:
 39 |           limits:
 40 |             cpu: 20m
 41 |             memory: 40Mi
 42 |           requests:
 43 |             cpu: 10m
 44 |             memory: 20Mi
 45 |         securityContext:
 46 |           allowPrivilegeEscalation: false
 47 |           capabilities:
 48 |             drop:
 49 |             - ALL
 50 |           readOnlyRootFilesystem: true
 51 |           runAsNonRoot: true
 52 |           runAsUser: 65534
 53 |         volumeMounts:
 54 |         - mountPath: /etc/blackbox_exporter/
 55 |           name: config
 56 |           readOnly: true
 57 |       - args:
 58 |         - --webhook-url=http://localhost:19115/-/reload
 59 |         - --volume-dir=/etc/blackbox_exporter/
 60 |         image: jimmidyson/configmap-reload:v0.5.0
 61 |         name: module-configmap-reloader
 62 |         resources:
 63 |           limits:
 64 |             cpu: 20m
 65 |             memory: 40Mi
 66 |           requests:
 67 |             cpu: 10m
 68 |             memory: 20Mi
 69 |         securityContext:
 70 |           allowPrivilegeEscalation: false
 71 |           capabilities:
 72 |             drop:
 73 |             - ALL
 74 |           readOnlyRootFilesystem: true
 75 |           runAsNonRoot: true
 76 |           runAsUser: 65534
 77 |         terminationMessagePath: /dev/termination-log
 78 |         terminationMessagePolicy: FallbackToLogsOnError
 79 |         volumeMounts:
 80 |         - mountPath: /etc/blackbox_exporter/
 81 |           name: config
 82 |           readOnly: true
 83 |       - args:
 84 |         - --logtostderr
 85 |         - --secure-listen-address=:9115
 86 |         - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
 87 |         - --upstream=http://127.0.0.1:19115/
 88 |         image: quay.io/brancz/kube-rbac-proxy:v0.14.0
 89 |         name: kube-rbac-proxy
 90 |         ports:
 91 |         - containerPort: 9115
 92 |           name: https
 93 |         resources:
 94 |           limits:
 95 |             cpu: 20m
 96 |             memory: 40Mi
 97 |           requests:
 98 |             cpu: 10m
 99 |             memory: 20Mi
100 |         securityContext:
101 |           allowPrivilegeEscalation: false
102 |           capabilities:
103 |             drop:
104 |             - ALL
105 |           readOnlyRootFilesystem: true
106 |           runAsGroup: 65532
107 |           runAsNonRoot: true
108 |           runAsUser: 65532
109 |       nodeSelector:
110 |         kubernetes.io/os: linux
111 |       serviceAccountName: blackbox-exporter
112 |       volumes:
113 |       - configMap:
114 |           name: blackbox-exporter-configuration
115 |         name: config
116 | 


--------------------------------------------------------------------------------
/observability/install/blackboxExporter-networkPolicy.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: NetworkPolicy
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: blackbox-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.23.0
 9 |   name: blackbox-exporter
10 |   namespace: monitoring
11 | spec:
12 |   egress:
13 |   - {}
14 |   ingress:
15 |   - from:
16 |     - podSelector:
17 |         matchLabels:
18 |           app.kubernetes.io/name: prometheus
19 |     ports:
20 |     - port: 9115
21 |       protocol: TCP
22 |     - port: 19115
23 |       protocol: TCP
24 |   podSelector:
25 |     matchLabels:
26 |       app.kubernetes.io/component: exporter
27 |       app.kubernetes.io/name: blackbox-exporter
28 |       app.kubernetes.io/part-of: kube-prometheus
29 |   policyTypes:
30 |   - Egress
31 |   - Ingress
32 | 


--------------------------------------------------------------------------------
/observability/install/blackboxExporter-service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: blackbox-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.23.0
 9 |   name: blackbox-exporter
10 |   namespace: monitoring
11 | spec:
12 |   ports:
13 |   - name: https
14 |     port: 9115
15 |     targetPort: https
16 |   - name: probe
17 |     port: 19115
18 |     targetPort: http
19 |   selector:
20 |     app.kubernetes.io/component: exporter
21 |     app.kubernetes.io/name: blackbox-exporter
22 |     app.kubernetes.io/part-of: kube-prometheus
23 | 


--------------------------------------------------------------------------------
/observability/install/blackboxExporter-serviceAccount.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | automountServiceAccountToken: false
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/component: exporter
 7 |     app.kubernetes.io/name: blackbox-exporter
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.23.0
10 |   name: blackbox-exporter
11 |   namespace: monitoring
12 | 


--------------------------------------------------------------------------------
/observability/install/blackboxExporter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: blackbox-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.23.0
 9 |   name: blackbox-exporter
10 |   namespace: monitoring
11 | spec:
12 |   endpoints:
13 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 |     interval: 30s
15 |     path: /metrics
16 |     port: https
17 |     scheme: https
18 |     tlsConfig:
19 |       insecureSkipVerify: true
20 |   selector:
21 |     matchLabels:
22 |       app.kubernetes.io/component: exporter
23 |       app.kubernetes.io/name: blackbox-exporter
24 |       app.kubernetes.io/part-of: kube-prometheus
25 | 


--------------------------------------------------------------------------------
/observability/install/grafana-config.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Secret
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: grafana
 6 |     app.kubernetes.io/name: grafana
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 9.3.6
 9 |   name: grafana-config
10 |   namespace: monitoring
11 | stringData:
12 |   grafana.ini: |
13 |     [date_formats]
14 |     default_timezone = UTC
15 | type: Opaque
16 | 


--------------------------------------------------------------------------------
/observability/install/grafana-dashboardDatasources.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Secret
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: grafana
 6 |     app.kubernetes.io/name: grafana
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 9.3.6
 9 |   name: grafana-datasources
10 |   namespace: monitoring
11 | stringData:
12 |   datasources.yaml: |-
13 |     {
14 |         "apiVersion": 1,
15 |         "datasources": [
16 |             {
17 |                 "access": "proxy",
18 |                 "editable": false,
19 |                 "name": "prometheus",
20 |                 "orgId": 1,
21 |                 "type": "prometheus",
22 |                 "url": "http://prometheus-k8s.monitoring.svc:9090",
23 |                 "version": 1
24 |             }
25 |         ]
26 |     }
27 | type: Opaque
28 | 


--------------------------------------------------------------------------------
/observability/install/grafana-dashboardSources.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | data:
 3 |   dashboards.yaml: |-
 4 |     {
 5 |         "apiVersion": 1,
 6 |         "providers": [
 7 |             {
 8 |                 "folder": "Default",
 9 |                 "folderUid": "",
10 |                 "name": "0",
11 |                 "options": {
12 |                     "path": "/grafana-dashboard-definitions/0"
13 |                 },
14 |                 "orgId": 1,
15 |                 "type": "file"
16 |             }
17 |         ]
18 |     }
19 | kind: ConfigMap
20 | metadata:
21 |   labels:
22 |     app.kubernetes.io/component: grafana
23 |     app.kubernetes.io/name: grafana
24 |     app.kubernetes.io/part-of: kube-prometheus
25 |     app.kubernetes.io/version: 9.3.6
26 |   name: grafana-dashboards
27 |   namespace: monitoring
28 | 


--------------------------------------------------------------------------------
/observability/install/grafana-networkPolicy.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: NetworkPolicy
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: grafana
 6 |     app.kubernetes.io/name: grafana
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 9.3.6
 9 |   name: grafana
10 |   namespace: monitoring
11 | spec:
12 |   egress:
13 |   - {}
14 |   ingress:
15 |   - from:
16 |     - podSelector:
17 |         matchLabels:
18 |           app.kubernetes.io/name: prometheus
19 |     ports:
20 |     - port: 3000
21 |       protocol: TCP
22 |   podSelector:
23 |     matchLabels:
24 |       app.kubernetes.io/component: grafana
25 |       app.kubernetes.io/name: grafana
26 |       app.kubernetes.io/part-of: kube-prometheus
27 |   policyTypes:
28 |   - Egress
29 |   - Ingress
30 | 


--------------------------------------------------------------------------------
/observability/install/grafana-prometheusRule.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: PrometheusRule
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: grafana
 6 |     app.kubernetes.io/name: grafana
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 9.3.6
 9 |     prometheus: k8s
10 |     role: alert-rules
11 |   name: grafana-rules
12 |   namespace: monitoring
13 | spec:
14 |   groups:
15 |   - name: GrafanaAlerts
16 |     rules:
17 |     - alert: GrafanaRequestsFailing
18 |       annotations:
19 |         message: '{{ $labels.namespace }}/{{ $labels.job }}/{{ $labels.handler }}
20 |           is experiencing {{ $value | humanize }}% errors'
21 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/grafana/grafanarequestsfailing
22 |       expr: |
23 |         100 * namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query", status_code=~"5.."}
24 |         / ignoring (status_code)
25 |         sum without (status_code) (namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query"})
26 |         > 50
27 |       for: 5m
28 |       labels:
29 |         severity: warning
30 |   - name: grafana_rules
31 |     rules:
32 |     - expr: |
33 |         sum by (namespace, job, handler, status_code) (rate(grafana_http_request_duration_seconds_count[5m]))
34 |       record: namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m
35 | 


--------------------------------------------------------------------------------
/observability/install/grafana-service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: grafana
 6 |     app.kubernetes.io/name: grafana
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 9.3.6
 9 |   name: grafana
10 |   namespace: monitoring
11 | spec:
12 |   ports:
13 |   - name: http
14 |     port: 3000
15 |     targetPort: http
16 |   selector:
17 |     app.kubernetes.io/component: grafana
18 |     app.kubernetes.io/name: grafana
19 |     app.kubernetes.io/part-of: kube-prometheus
20 | 


--------------------------------------------------------------------------------
/observability/install/grafana-serviceAccount.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | automountServiceAccountToken: false
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/component: grafana
 7 |     app.kubernetes.io/name: grafana
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 9.3.6
10 |   name: grafana
11 |   namespace: monitoring
12 | 


--------------------------------------------------------------------------------
/observability/install/grafana-serviceMonitor.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: grafana
 6 |     app.kubernetes.io/name: grafana
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 9.3.6
 9 |   name: grafana
10 |   namespace: monitoring
11 | spec:
12 |   endpoints:
13 |   - interval: 15s
14 |     port: http
15 |   selector:
16 |     matchLabels:
17 |       app.kubernetes.io/name: grafana
18 | 


--------------------------------------------------------------------------------
/observability/install/kubePrometheus-prometheusRule.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: PrometheusRule
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: kube-prometheus
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     prometheus: k8s
 9 |     role: alert-rules
10 |   name: kube-prometheus-rules
11 |   namespace: monitoring
12 | spec:
13 |   groups:
14 |   - name: general.rules
15 |     rules:
16 |     - alert: TargetDown
17 |       annotations:
18 |         description: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service
19 |           }} targets in {{ $labels.namespace }} namespace are down.'
20 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/targetdown
21 |         summary: One or more targets are unreachable.
22 |       expr: 100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job,
23 |         namespace, service)) > 10
24 |       for: 10m
25 |       labels:
26 |         severity: warning
27 |     - alert: Watchdog
28 |       annotations:
29 |         description: |
30 |           This is an alert meant to ensure that the entire alerting pipeline is functional.
31 |           This alert is always firing, therefore it should always be firing in Alertmanager
32 |           and always fire against a receiver. There are integrations with various notification
33 |           mechanisms that send a notification when this alert is not firing. For example the
34 |           "DeadMansSnitch" integration in PagerDuty.
35 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/watchdog
36 |         summary: An alert that should always be firing to certify that Alertmanager
37 |           is working properly.
38 |       expr: vector(1)
39 |       labels:
40 |         severity: none
41 |     - alert: InfoInhibitor
42 |       annotations:
43 |         description: |
44 |           This is an alert that is used to inhibit info alerts.
45 |           By themselves, the info-level alerts are sometimes very noisy, but they are relevant when combined with
46 |           other alerts.
47 |           This alert fires whenever there's a severity="info" alert, and stops firing when another alert with a
48 |           severity of 'warning' or 'critical' starts firing on the same namespace.
49 |           This alert should be routed to a null receiver and configured to inhibit alerts with severity="info".
50 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/infoinhibitor
51 |         summary: Info-level alert inhibition.
52 |       expr: ALERTS{severity = "info"} == 1 unless on(namespace) ALERTS{alertname !=
53 |         "InfoInhibitor", severity =~ "warning|critical", alertstate="firing"} == 1
54 |       labels:
55 |         severity: none
56 |   - name: node-network
57 |     rules:
58 |     - alert: NodeNetworkInterfaceFlapping
59 |       annotations:
60 |         description: Network interface "{{ $labels.device }}" changing its up status
61 |           often on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}
62 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/nodenetworkinterfaceflapping
63 |         summary: Network interface is often changing its status
64 |       expr: |
65 |         changes(node_network_up{job="node-exporter",device!~"veth.+"}[2m]) > 2
66 |       for: 2m
67 |       labels:
68 |         severity: warning
69 |   - name: kube-prometheus-node-recording.rules
70 |     rules:
71 |     - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[3m]))
72 |         BY (instance)
73 |       record: instance:node_cpu:rate:sum
74 |     - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance)
75 |       record: instance:node_network_receive_bytes:rate:sum
76 |     - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance)
77 |       record: instance:node_network_transmit_bytes:rate:sum
78 |     - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m]))
79 |         WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total)
80 |         BY (instance, cpu)) BY (instance)
81 |       record: instance:node_cpu:ratio
82 |     - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m]))
83 |       record: cluster:node_cpu:sum_rate5m
84 |     - expr: cluster:node_cpu:sum_rate5m / count(sum(node_cpu_seconds_total) BY (instance,
85 |         cpu))
86 |       record: cluster:node_cpu:ratio
87 |   - name: kube-prometheus-general.rules
88 |     rules:
89 |     - expr: count without(instance, pod, node) (up == 1)
90 |       record: count:up1
91 |     - expr: count without(instance, pod, node) (up == 0)
92 |       record: count:up0
93 | 


--------------------------------------------------------------------------------
/observability/install/kubeStateMetrics-clusterRole.yaml:
--------------------------------------------------------------------------------
  1 | apiVersion: rbac.authorization.k8s.io/v1
  2 | kind: ClusterRole
  3 | metadata:
  4 |   labels:
  5 |     app.kubernetes.io/component: exporter
  6 |     app.kubernetes.io/name: kube-state-metrics
  7 |     app.kubernetes.io/part-of: kube-prometheus
  8 |     app.kubernetes.io/version: 2.8.0
  9 |   name: kube-state-metrics
 10 | rules:
 11 | - apiGroups:
 12 |   - ""
 13 |   resources:
 14 |   - configmaps
 15 |   - secrets
 16 |   - nodes
 17 |   - pods
 18 |   - services
 19 |   - serviceaccounts
 20 |   - resourcequotas
 21 |   - replicationcontrollers
 22 |   - limitranges
 23 |   - persistentvolumeclaims
 24 |   - persistentvolumes
 25 |   - namespaces
 26 |   - endpoints
 27 |   verbs:
 28 |   - list
 29 |   - watch
 30 | - apiGroups:
 31 |   - apps
 32 |   resources:
 33 |   - statefulsets
 34 |   - daemonsets
 35 |   - deployments
 36 |   - replicasets
 37 |   verbs:
 38 |   - list
 39 |   - watch
 40 | - apiGroups:
 41 |   - batch
 42 |   resources:
 43 |   - cronjobs
 44 |   - jobs
 45 |   verbs:
 46 |   - list
 47 |   - watch
 48 | - apiGroups:
 49 |   - autoscaling
 50 |   resources:
 51 |   - horizontalpodautoscalers
 52 |   verbs:
 53 |   - list
 54 |   - watch
 55 | - apiGroups:
 56 |   - authentication.k8s.io
 57 |   resources:
 58 |   - tokenreviews
 59 |   verbs:
 60 |   - create
 61 | - apiGroups:
 62 |   - authorization.k8s.io
 63 |   resources:
 64 |   - subjectaccessreviews
 65 |   verbs:
 66 |   - create
 67 | - apiGroups:
 68 |   - policy
 69 |   resources:
 70 |   - poddisruptionbudgets
 71 |   verbs:
 72 |   - list
 73 |   - watch
 74 | - apiGroups:
 75 |   - certificates.k8s.io
 76 |   resources:
 77 |   - certificatesigningrequests
 78 |   verbs:
 79 |   - list
 80 |   - watch
 81 | - apiGroups:
 82 |   - discovery.k8s.io
 83 |   resources:
 84 |   - endpointslices
 85 |   verbs:
 86 |   - list
 87 |   - watch
 88 | - apiGroups:
 89 |   - storage.k8s.io
 90 |   resources:
 91 |   - storageclasses
 92 |   - volumeattachments
 93 |   verbs:
 94 |   - list
 95 |   - watch
 96 | - apiGroups:
 97 |   - admissionregistration.k8s.io
 98 |   resources:
 99 |   - mutatingwebhookconfigurations
100 |   - validatingwebhookconfigurations
101 |   verbs:
102 |   - list
103 |   - watch
104 | - apiGroups:
105 |   - networking.k8s.io
106 |   resources:
107 |   - networkpolicies
108 |   - ingressclasses
109 |   - ingresses
110 |   verbs:
111 |   - list
112 |   - watch
113 | - apiGroups:
114 |   - coordination.k8s.io
115 |   resources:
116 |   - leases
117 |   verbs:
118 |   - list
119 |   - watch
120 | - apiGroups:
121 |   - rbac.authorization.k8s.io
122 |   resources:
123 |   - clusterrolebindings
124 |   - clusterroles
125 |   - rolebindings
126 |   - roles
127 |   verbs:
128 |   - list
129 |   - watch
130 | 


--------------------------------------------------------------------------------
/observability/install/kubeStateMetrics-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: kube-state-metrics
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 2.8.0
 9 |   name: kube-state-metrics
10 | roleRef:
11 |   apiGroup: rbac.authorization.k8s.io
12 |   kind: ClusterRole
13 |   name: kube-state-metrics
14 | subjects:
15 | - kind: ServiceAccount
16 |   name: kube-state-metrics
17 |   namespace: monitoring
18 | 


--------------------------------------------------------------------------------
/observability/install/kubeStateMetrics-deployment.yaml:
--------------------------------------------------------------------------------
  1 | apiVersion: apps/v1
  2 | kind: Deployment
  3 | metadata:
  4 |   labels:
  5 |     app.kubernetes.io/component: exporter
  6 |     app.kubernetes.io/name: kube-state-metrics
  7 |     app.kubernetes.io/part-of: kube-prometheus
  8 |     app.kubernetes.io/version: 2.8.0
  9 |   name: kube-state-metrics
 10 |   namespace: monitoring
 11 | spec:
 12 |   replicas: 1
 13 |   selector:
 14 |     matchLabels:
 15 |       app.kubernetes.io/component: exporter
 16 |       app.kubernetes.io/name: kube-state-metrics
 17 |       app.kubernetes.io/part-of: kube-prometheus
 18 |   template:
 19 |     metadata:
 20 |       annotations:
 21 |         kubectl.kubernetes.io/default-container: kube-state-metrics
 22 |       labels:
 23 |         app.kubernetes.io/component: exporter
 24 |         app.kubernetes.io/name: kube-state-metrics
 25 |         app.kubernetes.io/part-of: kube-prometheus
 26 |         app.kubernetes.io/version: 2.8.0
 27 |     spec:
 28 |       automountServiceAccountToken: true
 29 |       containers:
 30 |       - args:
 31 |         - --host=127.0.0.1
 32 |         - --port=8081
 33 |         - --telemetry-host=127.0.0.1
 34 |         - --telemetry-port=8082
 35 |         image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.8.0
 36 |         name: kube-state-metrics
 37 |         resources:
 38 |           limits:
 39 |             cpu: 100m
 40 |             memory: 250Mi
 41 |           requests:
 42 |             cpu: 10m
 43 |             memory: 190Mi
 44 |         securityContext:
 45 |           allowPrivilegeEscalation: false
 46 |           capabilities:
 47 |             drop:
 48 |             - ALL
 49 |           readOnlyRootFilesystem: true
 50 |           runAsUser: 65534
 51 |       - args:
 52 |         - --logtostderr
 53 |         - --secure-listen-address=:8443
 54 |         - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
 55 |         - --upstream=http://127.0.0.1:8081/
 56 |         image: quay.io/brancz/kube-rbac-proxy:v0.14.0
 57 |         name: kube-rbac-proxy-main
 58 |         ports:
 59 |         - containerPort: 8443
 60 |           name: https-main
 61 |         resources:
 62 |           limits:
 63 |             cpu: 40m
 64 |             memory: 40Mi
 65 |           requests:
 66 |             cpu: 20m
 67 |             memory: 20Mi
 68 |         securityContext:
 69 |           allowPrivilegeEscalation: false
 70 |           capabilities:
 71 |             drop:
 72 |             - ALL
 73 |           readOnlyRootFilesystem: true
 74 |           runAsGroup: 65532
 75 |           runAsNonRoot: true
 76 |           runAsUser: 65532
 77 |       - args:
 78 |         - --logtostderr
 79 |         - --secure-listen-address=:9443
 80 |         - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
 81 |         - --upstream=http://127.0.0.1:8082/
 82 |         image: quay.io/brancz/kube-rbac-proxy:v0.14.0
 83 |         name: kube-rbac-proxy-self
 84 |         ports:
 85 |         - containerPort: 9443
 86 |           name: https-self
 87 |         resources:
 88 |           limits:
 89 |             cpu: 20m
 90 |             memory: 40Mi
 91 |           requests:
 92 |             cpu: 10m
 93 |             memory: 20Mi
 94 |         securityContext:
 95 |           allowPrivilegeEscalation: false
 96 |           capabilities:
 97 |             drop:
 98 |             - ALL
 99 |           readOnlyRootFilesystem: true
100 |           runAsGroup: 65532
101 |           runAsNonRoot: true
102 |           runAsUser: 65532
103 |       nodeSelector:
104 |         kubernetes.io/os: linux
105 |       serviceAccountName: kube-state-metrics
106 | 


--------------------------------------------------------------------------------
/observability/install/kubeStateMetrics-networkPolicy.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: NetworkPolicy
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: kube-state-metrics
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 2.8.0
 9 |   name: kube-state-metrics
10 |   namespace: monitoring
11 | spec:
12 |   egress:
13 |   - {}
14 |   ingress:
15 |   - from:
16 |     - podSelector:
17 |         matchLabels:
18 |           app.kubernetes.io/name: prometheus
19 |     ports:
20 |     - port: 8443
21 |       protocol: TCP
22 |     - port: 9443
23 |       protocol: TCP
24 |   podSelector:
25 |     matchLabels:
26 |       app.kubernetes.io/component: exporter
27 |       app.kubernetes.io/name: kube-state-metrics
28 |       app.kubernetes.io/part-of: kube-prometheus
29 |   policyTypes:
30 |   - Egress
31 |   - Ingress
32 | 


--------------------------------------------------------------------------------
/observability/install/kubeStateMetrics-prometheusRule.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: PrometheusRule
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: kube-state-metrics
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 2.8.0
 9 |     prometheus: k8s
10 |     role: alert-rules
11 |   name: kube-state-metrics-rules
12 |   namespace: monitoring
13 | spec:
14 |   groups:
15 |   - name: kube-state-metrics
16 |     rules:
17 |     - alert: KubeStateMetricsListErrors
18 |       annotations:
19 |         description: kube-state-metrics is experiencing errors at an elevated rate
20 |           in list operations. This is likely causing it to not be able to expose metrics
21 |           about Kubernetes objects correctly or at all.
22 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricslisterrors
23 |         summary: kube-state-metrics is experiencing errors in list operations.
24 |       expr: |
25 |         (sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m]))
26 |           /
27 |         sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m])))
28 |         > 0.01
29 |       for: 15m
30 |       labels:
31 |         severity: critical
32 |     - alert: KubeStateMetricsWatchErrors
33 |       annotations:
34 |         description: kube-state-metrics is experiencing errors at an elevated rate
35 |           in watch operations. This is likely causing it to not be able to expose
36 |           metrics about Kubernetes objects correctly or at all.
37 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricswatcherrors
38 |         summary: kube-state-metrics is experiencing errors in watch operations.
39 |       expr: |
40 |         (sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m]))
41 |           /
42 |         sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m])))
43 |         > 0.01
44 |       for: 15m
45 |       labels:
46 |         severity: critical
47 |     - alert: KubeStateMetricsShardingMismatch
48 |       annotations:
49 |         description: kube-state-metrics pods are running with different --total-shards
50 |           configuration, some Kubernetes objects may be exposed multiple times or
51 |           not exposed at all.
52 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardingmismatch
53 |         summary: kube-state-metrics sharding is misconfigured.
54 |       expr: |
55 |         stdvar (kube_state_metrics_total_shards{job="kube-state-metrics"}) != 0
56 |       for: 15m
57 |       labels:
58 |         severity: critical
59 |     - alert: KubeStateMetricsShardsMissing
60 |       annotations:
61 |         description: kube-state-metrics shards are missing, some Kubernetes objects
62 |           are not being exposed.
63 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardsmissing
64 |         summary: kube-state-metrics shards are missing.
65 |       expr: |
66 |         2^max(kube_state_metrics_total_shards{job="kube-state-metrics"}) - 1
67 |           -
68 |         sum( 2 ^ max by (shard_ordinal) (kube_state_metrics_shard_ordinal{job="kube-state-metrics"}) )
69 |         != 0
70 |       for: 15m
71 |       labels:
72 |         severity: critical
73 | 


--------------------------------------------------------------------------------
/observability/install/kubeStateMetrics-service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: kube-state-metrics
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 2.8.0
 9 |   name: kube-state-metrics
10 |   namespace: monitoring
11 | spec:
12 |   clusterIP: None
13 |   ports:
14 |   - name: https-main
15 |     port: 8443
16 |     targetPort: https-main
17 |   - name: https-self
18 |     port: 9443
19 |     targetPort: https-self
20 |   selector:
21 |     app.kubernetes.io/component: exporter
22 |     app.kubernetes.io/name: kube-state-metrics
23 |     app.kubernetes.io/part-of: kube-prometheus
24 | 


--------------------------------------------------------------------------------
/observability/install/kubeStateMetrics-serviceAccount.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | automountServiceAccountToken: false
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/component: exporter
 7 |     app.kubernetes.io/name: kube-state-metrics
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.8.0
10 |   name: kube-state-metrics
11 |   namespace: monitoring
12 | 


--------------------------------------------------------------------------------
/observability/install/kubeStateMetrics-serviceMonitor.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: kube-state-metrics
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 2.8.0
 9 |   name: kube-state-metrics
10 |   namespace: monitoring
11 | spec:
12 |   endpoints:
13 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 |     honorLabels: true
15 |     interval: 30s
16 |     metricRelabelings:
17 |     - action: drop
18 |       regex: kube_endpoint_address_not_ready|kube_endpoint_address_available
19 |       sourceLabels:
20 |       - __name__
21 |     port: https-main
22 |     relabelings:
23 |     - action: labeldrop
24 |       regex: (pod|service|endpoint|namespace)
25 |     scheme: https
26 |     scrapeTimeout: 30s
27 |     tlsConfig:
28 |       insecureSkipVerify: true
29 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
30 |     interval: 30s
31 |     port: https-self
32 |     scheme: https
33 |     tlsConfig:
34 |       insecureSkipVerify: true
35 |   jobLabel: app.kubernetes.io/name
36 |   selector:
37 |     matchLabels:
38 |       app.kubernetes.io/component: exporter
39 |       app.kubernetes.io/name: kube-state-metrics
40 |       app.kubernetes.io/part-of: kube-prometheus
41 | 


--------------------------------------------------------------------------------
/observability/install/kubernetesControlPlane-serviceMonitorCoreDNS.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/name: coredns
 6 |     app.kubernetes.io/part-of: kube-prometheus
 7 |   name: coredns
 8 |   namespace: monitoring
 9 | spec:
10 |   endpoints:
11 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
12 |     interval: 15s
13 |     metricRelabelings:
14 |     - action: drop
15 |       regex: coredns_cache_misses_total
16 |       sourceLabels:
17 |       - __name__
18 |     port: metrics
19 |   jobLabel: app.kubernetes.io/name
20 |   namespaceSelector:
21 |     matchNames:
22 |     - kube-system
23 |   selector:
24 |     matchLabels:
25 |       k8s-app: kube-dns
26 | 


--------------------------------------------------------------------------------
/observability/install/kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/name: kube-controller-manager
 6 |     app.kubernetes.io/part-of: kube-prometheus
 7 |   name: kube-controller-manager
 8 |   namespace: monitoring
 9 | spec:
10 |   endpoints:
11 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
12 |     interval: 30s
13 |     metricRelabelings:
14 |     - action: drop
15 |       regex: kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds)
16 |       sourceLabels:
17 |       - __name__
18 |     - action: drop
19 |       regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds)
20 |       sourceLabels:
21 |       - __name__
22 |     - action: drop
23 |       regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers)
24 |       sourceLabels:
25 |       - __name__
26 |     - action: drop
27 |       regex: kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout)
28 |       sourceLabels:
29 |       - __name__
30 |     - action: drop
31 |       regex: reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total)
32 |       sourceLabels:
33 |       - __name__
34 |     - action: drop
35 |       regex: etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|object_counts|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary)
36 |       sourceLabels:
37 |       - __name__
38 |     - action: drop
39 |       regex: transformation_(transformation_latencies_microseconds|failures_total)
40 |       sourceLabels:
41 |       - __name__
42 |     - action: drop
43 |       regex: (admission_quota_controller_adds|admission_quota_controller_depth|admission_quota_controller_longest_running_processor_microseconds|admission_quota_controller_queue_latency|admission_quota_controller_unfinished_work_seconds|admission_quota_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|APIServiceOpenAPIAggregationControllerQueue1_depth|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_retries|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_adds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|APIServiceRegistrationController_queue_latency|APIServiceRegistrationController_retries|APIServiceRegistrationController_unfinished_work_seconds|APIServiceRegistrationController_work_duration|autoregister_adds|autoregister_depth|autoregister_longest_running_processor_microseconds|autoregister_queue_latency|autoregister_retries|autoregister_unfinished_work_seconds|autoregister_work_duration|AvailableConditionController_adds|AvailableConditionController_depth|AvailableConditionController_longest_running_processor_microseconds|AvailableConditionController_queue_latency|AvailableConditionController_retries|AvailableConditionController_unfinished_work_seconds|AvailableConditionController_work_duration|crd_autoregistration_controller_adds|crd_autoregistration_controller_depth|crd_autoregistration_controller_longest_running_processor_microseconds|crd_autoregistration_controller_queue_latency|crd_autoregistration_controller_retries|crd_autoregistration_controller_unfinished_work_seconds|crd_autoregistration_controller_work_duration|crdEstablishing_adds|crdEstablishing_depth|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_queue_latency|crdEstablishing_retries|crdEstablishing_unfinished_work_seconds|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_finalizer_longest_running_processor_microseconds|crd_finalizer_queue_latency|crd_finalizer_retries|crd_finalizer_unfinished_work_seconds|crd_finalizer_work_duration|crd_naming_condition_controller_adds|crd_naming_condition_controller_depth|crd_naming_condition_controller_longest_running_processor_microseconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|crd_naming_condition_controller_unfinished_work_seconds|crd_naming_condition_controller_work_duration|crd_openapi_controller_adds|crd_openapi_controller_depth|crd_openapi_controller_longest_running_processor_microseconds|crd_openapi_controller_queue_latency|crd_openapi_controller_retries|crd_openapi_controller_unfinished_work_seconds|crd_openapi_controller_work_duration|DiscoveryController_adds|DiscoveryController_depth|DiscoveryController_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_retries|DiscoveryController_unfinished_work_seconds|DiscoveryController_work_duration|kubeproxy_sync_proxy_rules_latency_microseconds|non_structural_schema_condition_controller_adds|non_structural_schema_condition_controller_depth|non_structural_schema_condition_controller_longest_running_processor_microseconds|non_structural_schema_condition_controller_queue_latency|non_structural_schema_condition_controller_retries|non_structural_schema_condition_controller_unfinished_work_seconds|non_structural_schema_condition_controller_work_duration|rest_client_request_latency_seconds|storage_operation_errors_total|storage_operation_status_count)
44 |       sourceLabels:
45 |       - __name__
46 |     - action: drop
47 |       regex: etcd_(debugging|disk|request|server).*
48 |       sourceLabels:
49 |       - __name__
50 |     port: https-metrics
51 |     scheme: https
52 |     tlsConfig:
53 |       insecureSkipVerify: true
54 |   jobLabel: app.kubernetes.io/name
55 |   namespaceSelector:
56 |     matchNames:
57 |     - kube-system
58 |   selector:
59 |     matchLabels:
60 |       app.kubernetes.io/name: kube-controller-manager
61 | 


--------------------------------------------------------------------------------
/observability/install/kubernetesControlPlane-serviceMonitorKubeScheduler.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/name: kube-scheduler
 6 |     app.kubernetes.io/part-of: kube-prometheus
 7 |   name: kube-scheduler
 8 |   namespace: monitoring
 9 | spec:
10 |   endpoints:
11 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
12 |     interval: 30s
13 |     port: https-metrics
14 |     scheme: https
15 |     tlsConfig:
16 |       insecureSkipVerify: true
17 |   jobLabel: app.kubernetes.io/name
18 |   namespaceSelector:
19 |     matchNames:
20 |     - kube-system
21 |   selector:
22 |     matchLabels:
23 |       app.kubernetes.io/name: kube-scheduler
24 | 


--------------------------------------------------------------------------------
/observability/install/nodeExporter-clusterRole.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRole
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: node-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 1.5.0
 9 |   name: node-exporter
10 |   namespace: monitoring
11 | rules:
12 | - apiGroups:
13 |   - authentication.k8s.io
14 |   resources:
15 |   - tokenreviews
16 |   verbs:
17 |   - create
18 | - apiGroups:
19 |   - authorization.k8s.io
20 |   resources:
21 |   - subjectaccessreviews
22 |   verbs:
23 |   - create
24 | 


--------------------------------------------------------------------------------
/observability/install/nodeExporter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: node-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 1.5.0
 9 |   name: node-exporter
10 |   namespace: monitoring
11 | roleRef:
12 |   apiGroup: rbac.authorization.k8s.io
13 |   kind: ClusterRole
14 |   name: node-exporter
15 | subjects:
16 | - kind: ServiceAccount
17 |   name: node-exporter
18 |   namespace: monitoring
19 | 


--------------------------------------------------------------------------------
/observability/install/nodeExporter-daemonset.yaml:
--------------------------------------------------------------------------------
  1 | apiVersion: apps/v1
  2 | kind: DaemonSet
  3 | metadata:
  4 |   labels:
  5 |     app.kubernetes.io/component: exporter
  6 |     app.kubernetes.io/name: node-exporter
  7 |     app.kubernetes.io/part-of: kube-prometheus
  8 |     app.kubernetes.io/version: 1.5.0
  9 |   name: node-exporter
 10 |   namespace: monitoring
 11 | spec:
 12 |   selector:
 13 |     matchLabels:
 14 |       app.kubernetes.io/component: exporter
 15 |       app.kubernetes.io/name: node-exporter
 16 |       app.kubernetes.io/part-of: kube-prometheus
 17 |   template:
 18 |     metadata:
 19 |       annotations:
 20 |         kubectl.kubernetes.io/default-container: node-exporter
 21 |       labels:
 22 |         app.kubernetes.io/component: exporter
 23 |         app.kubernetes.io/name: node-exporter
 24 |         app.kubernetes.io/part-of: kube-prometheus
 25 |         app.kubernetes.io/version: 1.5.0
 26 |     spec:
 27 |       automountServiceAccountToken: true
 28 |       containers:
 29 |       - args:
 30 |         - --web.listen-address=127.0.0.1:9100
 31 |         - --path.sysfs=/host/sys
 32 |         - --path.rootfs=/host/root
 33 |         - --path.udev.data=/host/root/run/udev/data
 34 |         - --no-collector.wifi
 35 |         - --no-collector.hwmon
 36 |         - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)
 37 |         - --collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15})$
 38 |         - --collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15})$
 39 |         image: quay.io/prometheus/node-exporter:v1.5.0
 40 |         name: node-exporter
 41 |         resources:
 42 |           limits:
 43 |             cpu: 250m
 44 |             memory: 180Mi
 45 |           requests:
 46 |             cpu: 102m
 47 |             memory: 180Mi
 48 |         securityContext:
 49 |           allowPrivilegeEscalation: false
 50 |           capabilities:
 51 |             add:
 52 |             - SYS_TIME
 53 |             drop:
 54 |             - ALL
 55 |           readOnlyRootFilesystem: true
 56 |         volumeMounts:
 57 |         - mountPath: /host/sys
 58 |           mountPropagation: HostToContainer
 59 |           name: sys
 60 |           readOnly: true
 61 |         - mountPath: /host/root
 62 |           mountPropagation: HostToContainer
 63 |           name: root
 64 |           readOnly: true
 65 |       - args:
 66 |         - --logtostderr
 67 |         - --secure-listen-address=[$(IP)]:9100
 68 |         - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
 69 |         - --upstream=http://127.0.0.1:9100/
 70 |         env:
 71 |         - name: IP
 72 |           valueFrom:
 73 |             fieldRef:
 74 |               fieldPath: status.podIP
 75 |         image: quay.io/brancz/kube-rbac-proxy:v0.14.0
 76 |         name: kube-rbac-proxy
 77 |         ports:
 78 |         - containerPort: 9100
 79 |           hostPort: 9100
 80 |           name: https
 81 |         resources:
 82 |           limits:
 83 |             cpu: 20m
 84 |             memory: 40Mi
 85 |           requests:
 86 |             cpu: 10m
 87 |             memory: 20Mi
 88 |         securityContext:
 89 |           allowPrivilegeEscalation: false
 90 |           capabilities:
 91 |             drop:
 92 |             - ALL
 93 |           readOnlyRootFilesystem: true
 94 |           runAsGroup: 65532
 95 |           runAsNonRoot: true
 96 |           runAsUser: 65532
 97 |       hostNetwork: true
 98 |       hostPID: true
 99 |       nodeSelector:
100 |         kubernetes.io/os: linux
101 |       priorityClassName: system-cluster-critical
102 |       securityContext:
103 |         runAsNonRoot: true
104 |         runAsUser: 65534
105 |       serviceAccountName: node-exporter
106 |       tolerations:
107 |       - operator: Exists
108 |       volumes:
109 |       - hostPath:
110 |           path: /sys
111 |         name: sys
112 |       - hostPath:
113 |           path: /
114 |         name: root
115 |   updateStrategy:
116 |     rollingUpdate:
117 |       maxUnavailable: 10%
118 |     type: RollingUpdate
119 | 


--------------------------------------------------------------------------------
/observability/install/nodeExporter-networkPolicy.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: NetworkPolicy
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: node-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 1.5.0
 9 |   name: node-exporter
10 |   namespace: monitoring
11 | spec:
12 |   egress:
13 |   - {}
14 |   ingress:
15 |   - from:
16 |     - podSelector:
17 |         matchLabels:
18 |           app.kubernetes.io/name: prometheus
19 |     ports:
20 |     - port: 9100
21 |       protocol: TCP
22 |   podSelector:
23 |     matchLabels:
24 |       app.kubernetes.io/component: exporter
25 |       app.kubernetes.io/name: node-exporter
26 |       app.kubernetes.io/part-of: kube-prometheus
27 |   policyTypes:
28 |   - Egress
29 |   - Ingress
30 | 


--------------------------------------------------------------------------------
/observability/install/nodeExporter-service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: node-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 1.5.0
 9 |   name: node-exporter
10 |   namespace: monitoring
11 | spec:
12 |   clusterIP: None
13 |   ports:
14 |   - name: https
15 |     port: 9100
16 |     targetPort: https
17 |   selector:
18 |     app.kubernetes.io/component: exporter
19 |     app.kubernetes.io/name: node-exporter
20 |     app.kubernetes.io/part-of: kube-prometheus
21 | 


--------------------------------------------------------------------------------
/observability/install/nodeExporter-serviceAccount.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | automountServiceAccountToken: false
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/component: exporter
 7 |     app.kubernetes.io/name: node-exporter
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 1.5.0
10 |   name: node-exporter
11 |   namespace: monitoring
12 | 


--------------------------------------------------------------------------------
/observability/install/nodeExporter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: exporter
 6 |     app.kubernetes.io/name: node-exporter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 1.5.0
 9 |   name: node-exporter
10 |   namespace: monitoring
11 | spec:
12 |   endpoints:
13 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 |     interval: 15s
15 |     port: https
16 |     relabelings:
17 |     - action: replace
18 |       regex: (.*)
19 |       replacement: $1
20 |       sourceLabels:
21 |       - __meta_kubernetes_pod_node_name
22 |       targetLabel: instance
23 |     scheme: https
24 |     tlsConfig:
25 |       insecureSkipVerify: true
26 |   jobLabel: app.kubernetes.io/name
27 |   selector:
28 |     matchLabels:
29 |       app.kubernetes.io/component: exporter
30 |       app.kubernetes.io/name: node-exporter
31 |       app.kubernetes.io/part-of: kube-prometheus
32 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-clusterRole.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRole
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: prometheus-k8s
11 | rules:
12 | - apiGroups:
13 |   - ""
14 |   resources:
15 |   - nodes/metrics
16 |   verbs:
17 |   - get
18 | - nonResourceURLs:
19 |   - /metrics
20 |   verbs:
21 |   - get
22 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: prometheus-k8s
11 | roleRef:
12 |   apiGroup: rbac.authorization.k8s.io
13 |   kind: ClusterRole
14 |   name: prometheus-k8s
15 | subjects:
16 | - kind: ServiceAccount
17 |   name: prometheus-k8s
18 |   namespace: monitoring
19 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-networkPolicy.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: NetworkPolicy
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: prometheus-k8s
11 |   namespace: monitoring
12 | spec:
13 |   egress:
14 |   - {}
15 |   ingress:
16 |   - from:
17 |     - podSelector:
18 |         matchLabels:
19 |           app.kubernetes.io/name: prometheus
20 |     ports:
21 |     - port: 9090
22 |       protocol: TCP
23 |     - port: 8080
24 |       protocol: TCP
25 |   - from:
26 |     - podSelector:
27 |         matchLabels:
28 |           app.kubernetes.io/name: grafana
29 |     ports:
30 |     - port: 9090
31 |       protocol: TCP
32 |   podSelector:
33 |     matchLabels:
34 |       app.kubernetes.io/component: prometheus
35 |       app.kubernetes.io/instance: k8s
36 |       app.kubernetes.io/name: prometheus
37 |       app.kubernetes.io/part-of: kube-prometheus
38 |   policyTypes:
39 |   - Egress
40 |   - Ingress
41 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-podDisruptionBudget.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: policy/v1
 2 | kind: PodDisruptionBudget
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: prometheus-k8s
11 |   namespace: monitoring
12 | spec:
13 |   minAvailable: 1
14 |   selector:
15 |     matchLabels:
16 |       app.kubernetes.io/component: prometheus
17 |       app.kubernetes.io/instance: k8s
18 |       app.kubernetes.io/name: prometheus
19 |       app.kubernetes.io/part-of: kube-prometheus
20 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-prometheus.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: Prometheus
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: k8s
11 |   namespace: monitoring
12 | spec:
13 |   alerting:
14 |     alertmanagers:
15 |     - apiVersion: v2
16 |       name: alertmanager-main
17 |       namespace: monitoring
18 |       port: web
19 |   enableFeatures: []
20 |   externalLabels: {}
21 |   image: quay.io/prometheus/prometheus:v2.42.0
22 |   nodeSelector:
23 |     kubernetes.io/os: linux
24 |   podMetadata:
25 |     labels:
26 |       app.kubernetes.io/component: prometheus
27 |       app.kubernetes.io/instance: k8s
28 |       app.kubernetes.io/name: prometheus
29 |       app.kubernetes.io/part-of: kube-prometheus
30 |       app.kubernetes.io/version: 2.42.0
31 |   podMonitorNamespaceSelector: {}
32 |   podMonitorSelector: {}
33 |   probeNamespaceSelector: {}
34 |   probeSelector: {}
35 |   replicas: 2
36 |   resources:
37 |     requests:
38 |       memory: 400Mi
39 |   ruleNamespaceSelector: {}
40 |   ruleSelector: {}
41 |   securityContext:
42 |     fsGroup: 2000
43 |     runAsNonRoot: true
44 |     runAsUser: 1000
45 |   serviceAccountName: prometheus-k8s
46 |   serviceMonitorNamespaceSelector: {}
47 |   serviceMonitorSelector: {}
48 |   version: 2.42.0
49 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-roleBindingConfig.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: RoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: prometheus-k8s-config
11 |   namespace: monitoring
12 | roleRef:
13 |   apiGroup: rbac.authorization.k8s.io
14 |   kind: Role
15 |   name: prometheus-k8s-config
16 | subjects:
17 | - kind: ServiceAccount
18 |   name: prometheus-k8s
19 |   namespace: monitoring
20 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-roleBindingSpecificNamespaces.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | items:
 3 | - apiVersion: rbac.authorization.k8s.io/v1
 4 |   kind: RoleBinding
 5 |   metadata:
 6 |     labels:
 7 |       app.kubernetes.io/component: prometheus
 8 |       app.kubernetes.io/instance: k8s
 9 |       app.kubernetes.io/name: prometheus
10 |       app.kubernetes.io/part-of: kube-prometheus
11 |       app.kubernetes.io/version: 2.42.0
12 |     name: prometheus-k8s
13 |     namespace: default
14 |   roleRef:
15 |     apiGroup: rbac.authorization.k8s.io
16 |     kind: Role
17 |     name: prometheus-k8s
18 |   subjects:
19 |   - kind: ServiceAccount
20 |     name: prometheus-k8s
21 |     namespace: monitoring
22 | - apiVersion: rbac.authorization.k8s.io/v1
23 |   kind: RoleBinding
24 |   metadata:
25 |     labels:
26 |       app.kubernetes.io/component: prometheus
27 |       app.kubernetes.io/instance: k8s
28 |       app.kubernetes.io/name: prometheus
29 |       app.kubernetes.io/part-of: kube-prometheus
30 |       app.kubernetes.io/version: 2.42.0
31 |     name: prometheus-k8s
32 |     namespace: kube-system
33 |   roleRef:
34 |     apiGroup: rbac.authorization.k8s.io
35 |     kind: Role
36 |     name: prometheus-k8s
37 |   subjects:
38 |   - kind: ServiceAccount
39 |     name: prometheus-k8s
40 |     namespace: monitoring
41 | - apiVersion: rbac.authorization.k8s.io/v1
42 |   kind: RoleBinding
43 |   metadata:
44 |     labels:
45 |       app.kubernetes.io/component: prometheus
46 |       app.kubernetes.io/instance: k8s
47 |       app.kubernetes.io/name: prometheus
48 |       app.kubernetes.io/part-of: kube-prometheus
49 |       app.kubernetes.io/version: 2.42.0
50 |     name: prometheus-k8s
51 |     namespace: monitoring
52 |   roleRef:
53 |     apiGroup: rbac.authorization.k8s.io
54 |     kind: Role
55 |     name: prometheus-k8s
56 |   subjects:
57 |   - kind: ServiceAccount
58 |     name: prometheus-k8s
59 |     namespace: monitoring
60 | kind: RoleBindingList
61 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-roleConfig.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: Role
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: prometheus-k8s-config
11 |   namespace: monitoring
12 | rules:
13 | - apiGroups:
14 |   - ""
15 |   resources:
16 |   - configmaps
17 |   verbs:
18 |   - get
19 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-roleSpecificNamespaces.yaml:
--------------------------------------------------------------------------------
  1 | apiVersion: rbac.authorization.k8s.io/v1
  2 | items:
  3 | - apiVersion: rbac.authorization.k8s.io/v1
  4 |   kind: Role
  5 |   metadata:
  6 |     labels:
  7 |       app.kubernetes.io/component: prometheus
  8 |       app.kubernetes.io/instance: k8s
  9 |       app.kubernetes.io/name: prometheus
 10 |       app.kubernetes.io/part-of: kube-prometheus
 11 |       app.kubernetes.io/version: 2.42.0
 12 |     name: prometheus-k8s
 13 |     namespace: default
 14 |   rules:
 15 |   - apiGroups:
 16 |     - ""
 17 |     resources:
 18 |     - services
 19 |     - endpoints
 20 |     - pods
 21 |     verbs:
 22 |     - get
 23 |     - list
 24 |     - watch
 25 |   - apiGroups:
 26 |     - extensions
 27 |     resources:
 28 |     - ingresses
 29 |     verbs:
 30 |     - get
 31 |     - list
 32 |     - watch
 33 |   - apiGroups:
 34 |     - networking.k8s.io
 35 |     resources:
 36 |     - ingresses
 37 |     verbs:
 38 |     - get
 39 |     - list
 40 |     - watch
 41 | - apiVersion: rbac.authorization.k8s.io/v1
 42 |   kind: Role
 43 |   metadata:
 44 |     labels:
 45 |       app.kubernetes.io/component: prometheus
 46 |       app.kubernetes.io/instance: k8s
 47 |       app.kubernetes.io/name: prometheus
 48 |       app.kubernetes.io/part-of: kube-prometheus
 49 |       app.kubernetes.io/version: 2.42.0
 50 |     name: prometheus-k8s
 51 |     namespace: kube-system
 52 |   rules:
 53 |   - apiGroups:
 54 |     - ""
 55 |     resources:
 56 |     - services
 57 |     - endpoints
 58 |     - pods
 59 |     verbs:
 60 |     - get
 61 |     - list
 62 |     - watch
 63 |   - apiGroups:
 64 |     - extensions
 65 |     resources:
 66 |     - ingresses
 67 |     verbs:
 68 |     - get
 69 |     - list
 70 |     - watch
 71 |   - apiGroups:
 72 |     - networking.k8s.io
 73 |     resources:
 74 |     - ingresses
 75 |     verbs:
 76 |     - get
 77 |     - list
 78 |     - watch
 79 | - apiVersion: rbac.authorization.k8s.io/v1
 80 |   kind: Role
 81 |   metadata:
 82 |     labels:
 83 |       app.kubernetes.io/component: prometheus
 84 |       app.kubernetes.io/instance: k8s
 85 |       app.kubernetes.io/name: prometheus
 86 |       app.kubernetes.io/part-of: kube-prometheus
 87 |       app.kubernetes.io/version: 2.42.0
 88 |     name: prometheus-k8s
 89 |     namespace: monitoring
 90 |   rules:
 91 |   - apiGroups:
 92 |     - ""
 93 |     resources:
 94 |     - services
 95 |     - endpoints
 96 |     - pods
 97 |     verbs:
 98 |     - get
 99 |     - list
100 |     - watch
101 |   - apiGroups:
102 |     - extensions
103 |     resources:
104 |     - ingresses
105 |     verbs:
106 |     - get
107 |     - list
108 |     - watch
109 |   - apiGroups:
110 |     - networking.k8s.io
111 |     resources:
112 |     - ingresses
113 |     verbs:
114 |     - get
115 |     - list
116 |     - watch
117 | kind: RoleList
118 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: prometheus-k8s
11 |   namespace: monitoring
12 | spec:
13 |   ports:
14 |   - name: web
15 |     port: 9090
16 |     targetPort: web
17 |   - name: reloader-web
18 |     port: 8080
19 |     targetPort: reloader-web
20 |   selector:
21 |     app.kubernetes.io/component: prometheus
22 |     app.kubernetes.io/instance: k8s
23 |     app.kubernetes.io/name: prometheus
24 |     app.kubernetes.io/part-of: kube-prometheus
25 |   sessionAffinity: ClientIP
26 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-serviceAccount.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | automountServiceAccountToken: true
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/component: prometheus
 7 |     app.kubernetes.io/instance: k8s
 8 |     app.kubernetes.io/name: prometheus
 9 |     app.kubernetes.io/part-of: kube-prometheus
10 |     app.kubernetes.io/version: 2.42.0
11 |   name: prometheus-k8s
12 |   namespace: monitoring
13 | 


--------------------------------------------------------------------------------
/observability/install/prometheus-serviceMonitor.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: prometheus
 6 |     app.kubernetes.io/instance: k8s
 7 |     app.kubernetes.io/name: prometheus
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 2.42.0
10 |   name: prometheus-k8s
11 |   namespace: monitoring
12 | spec:
13 |   endpoints:
14 |   - interval: 30s
15 |     port: web
16 |   - interval: 30s
17 |     port: reloader-web
18 |   selector:
19 |     matchLabels:
20 |       app.kubernetes.io/component: prometheus
21 |       app.kubernetes.io/instance: k8s
22 |       app.kubernetes.io/name: prometheus
23 |       app.kubernetes.io/part-of: kube-prometheus
24 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-apiService.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: apiregistration.k8s.io/v1
 2 | kind: APIService
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: v1beta1.metrics.k8s.io
10 | spec:
11 |   group: metrics.k8s.io
12 |   groupPriorityMinimum: 100
13 |   insecureSkipTLSVerify: true
14 |   service:
15 |     name: prometheus-adapter
16 |     namespace: monitoring
17 |   version: v1beta1
18 |   versionPriority: 100
19 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-clusterRole.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRole
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: prometheus-adapter
10 |   namespace: monitoring
11 | rules:
12 | - apiGroups:
13 |   - ""
14 |   resources:
15 |   - nodes
16 |   - namespaces
17 |   - pods
18 |   - services
19 |   verbs:
20 |   - get
21 |   - list
22 |   - watch
23 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRole
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |     rbac.authorization.k8s.io/aggregate-to-admin: "true"
10 |     rbac.authorization.k8s.io/aggregate-to-edit: "true"
11 |     rbac.authorization.k8s.io/aggregate-to-view: "true"
12 |   name: system:aggregated-metrics-reader
13 |   namespace: monitoring
14 | rules:
15 | - apiGroups:
16 |   - metrics.k8s.io
17 |   resources:
18 |   - pods
19 |   - nodes
20 |   verbs:
21 |   - get
22 |   - list
23 |   - watch
24 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: prometheus-adapter
10 |   namespace: monitoring
11 | roleRef:
12 |   apiGroup: rbac.authorization.k8s.io
13 |   kind: ClusterRole
14 |   name: prometheus-adapter
15 | subjects:
16 | - kind: ServiceAccount
17 |   name: prometheus-adapter
18 |   namespace: monitoring
19 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-clusterRoleBindingDelegator.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: resource-metrics:system:auth-delegator
10 |   namespace: monitoring
11 | roleRef:
12 |   apiGroup: rbac.authorization.k8s.io
13 |   kind: ClusterRole
14 |   name: system:auth-delegator
15 | subjects:
16 | - kind: ServiceAccount
17 |   name: prometheus-adapter
18 |   namespace: monitoring
19 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-clusterRoleServerResources.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRole
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: resource-metrics-server-resources
10 |   namespace: monitoring
11 | rules:
12 | - apiGroups:
13 |   - metrics.k8s.io
14 |   resources:
15 |   - '*'
16 |   verbs:
17 |   - '*'
18 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-configMap.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | data:
 3 |   config.yaml: |-
 4 |     "resourceRules":
 5 |       "cpu":
 6 |         "containerLabel": "container"
 7 |         "containerQuery": |
 8 |           sum by (<<.GroupBy>>) (
 9 |             irate (
10 |                 container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="",pod!=""}[120s]
11 |             )
12 |           )
13 |         "nodeQuery": |
14 |           sum by (<<.GroupBy>>) (
15 |             1 - irate(
16 |               node_cpu_seconds_total{mode="idle"}[60s]
17 |             )
18 |             * on(namespace, pod) group_left(node) (
19 |               node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}
20 |             )
21 |           )
22 |           or sum by (<<.GroupBy>>) (
23 |             1 - irate(
24 |               windows_cpu_time_total{mode="idle", job="windows-exporter",<<.LabelMatchers>>}[4m]
25 |             )
26 |           )
27 |         "resources":
28 |           "overrides":
29 |             "namespace":
30 |               "resource": "namespace"
31 |             "node":
32 |               "resource": "node"
33 |             "pod":
34 |               "resource": "pod"
35 |       "memory":
36 |         "containerLabel": "container"
37 |         "containerQuery": |
38 |           sum by (<<.GroupBy>>) (
39 |             container_memory_working_set_bytes{<<.LabelMatchers>>,container!="",pod!=""}
40 |           )
41 |         "nodeQuery": |
42 |           sum by (<<.GroupBy>>) (
43 |             node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>}
44 |             -
45 |             node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}
46 |           )
47 |           or sum by (<<.GroupBy>>) (
48 |             windows_cs_physical_memory_bytes{job="windows-exporter",<<.LabelMatchers>>}
49 |             -
50 |             windows_memory_available_bytes{job="windows-exporter",<<.LabelMatchers>>}
51 |           )
52 |         "resources":
53 |           "overrides":
54 |             "instance":
55 |               "resource": "node"
56 |             "namespace":
57 |               "resource": "namespace"
58 |             "pod":
59 |               "resource": "pod"
60 |       "window": "5m"
61 | kind: ConfigMap
62 | metadata:
63 |   labels:
64 |     app.kubernetes.io/component: metrics-adapter
65 |     app.kubernetes.io/name: prometheus-adapter
66 |     app.kubernetes.io/part-of: kube-prometheus
67 |     app.kubernetes.io/version: 0.10.0
68 |   name: adapter-config
69 |   namespace: monitoring
70 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-deployment.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: apps/v1
 2 | kind: Deployment
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: prometheus-adapter
10 |   namespace: monitoring
11 | spec:
12 |   replicas: 2
13 |   selector:
14 |     matchLabels:
15 |       app.kubernetes.io/component: metrics-adapter
16 |       app.kubernetes.io/name: prometheus-adapter
17 |       app.kubernetes.io/part-of: kube-prometheus
18 |   strategy:
19 |     rollingUpdate:
20 |       maxSurge: 1
21 |       maxUnavailable: 1
22 |   template:
23 |     metadata:
24 |       labels:
25 |         app.kubernetes.io/component: metrics-adapter
26 |         app.kubernetes.io/name: prometheus-adapter
27 |         app.kubernetes.io/part-of: kube-prometheus
28 |         app.kubernetes.io/version: 0.10.0
29 |     spec:
30 |       automountServiceAccountToken: true
31 |       containers:
32 |       - args:
33 |         - --cert-dir=/var/run/serving-cert
34 |         - --config=/etc/adapter/config.yaml
35 |         - --logtostderr=true
36 |         - --metrics-relist-interval=1m
37 |         - --prometheus-url=http://prometheus-k8s.monitoring.svc:9090/
38 |         - --secure-port=6443
39 |         - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
40 |         image: registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.10.0
41 |         livenessProbe:
42 |           failureThreshold: 5
43 |           httpGet:
44 |             path: /livez
45 |             port: https
46 |             scheme: HTTPS
47 |           initialDelaySeconds: 30
48 |           periodSeconds: 5
49 |         name: prometheus-adapter
50 |         ports:
51 |         - containerPort: 6443
52 |           name: https
53 |         readinessProbe:
54 |           failureThreshold: 5
55 |           httpGet:
56 |             path: /readyz
57 |             port: https
58 |             scheme: HTTPS
59 |           initialDelaySeconds: 30
60 |           periodSeconds: 5
61 |         resources:
62 |           limits:
63 |             cpu: 250m
64 |             memory: 180Mi
65 |           requests:
66 |             cpu: 102m
67 |             memory: 180Mi
68 |         securityContext:
69 |           allowPrivilegeEscalation: false
70 |           capabilities:
71 |             drop:
72 |             - ALL
73 |           readOnlyRootFilesystem: true
74 |         volumeMounts:
75 |         - mountPath: /tmp
76 |           name: tmpfs
77 |           readOnly: false
78 |         - mountPath: /var/run/serving-cert
79 |           name: volume-serving-cert
80 |           readOnly: false
81 |         - mountPath: /etc/adapter
82 |           name: config
83 |           readOnly: false
84 |       nodeSelector:
85 |         kubernetes.io/os: linux
86 |       serviceAccountName: prometheus-adapter
87 |       volumes:
88 |       - emptyDir: {}
89 |         name: tmpfs
90 |       - emptyDir: {}
91 |         name: volume-serving-cert
92 |       - configMap:
93 |           name: adapter-config
94 |         name: config
95 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-networkPolicy.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: NetworkPolicy
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: prometheus-adapter
10 |   namespace: monitoring
11 | spec:
12 |   egress:
13 |   - {}
14 |   ingress:
15 |   - {}
16 |   podSelector:
17 |     matchLabels:
18 |       app.kubernetes.io/component: metrics-adapter
19 |       app.kubernetes.io/name: prometheus-adapter
20 |       app.kubernetes.io/part-of: kube-prometheus
21 |   policyTypes:
22 |   - Egress
23 |   - Ingress
24 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-podDisruptionBudget.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: policy/v1
 2 | kind: PodDisruptionBudget
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: prometheus-adapter
10 |   namespace: monitoring
11 | spec:
12 |   minAvailable: 1
13 |   selector:
14 |     matchLabels:
15 |       app.kubernetes.io/component: metrics-adapter
16 |       app.kubernetes.io/name: prometheus-adapter
17 |       app.kubernetes.io/part-of: kube-prometheus
18 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-roleBindingAuthReader.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: RoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: resource-metrics-auth-reader
10 |   namespace: kube-system
11 | roleRef:
12 |   apiGroup: rbac.authorization.k8s.io
13 |   kind: Role
14 |   name: extension-apiserver-authentication-reader
15 | subjects:
16 | - kind: ServiceAccount
17 |   name: prometheus-adapter
18 |   namespace: monitoring
19 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: prometheus-adapter
10 |   namespace: monitoring
11 | spec:
12 |   ports:
13 |   - name: https
14 |     port: 443
15 |     targetPort: 6443
16 |   selector:
17 |     app.kubernetes.io/component: metrics-adapter
18 |     app.kubernetes.io/name: prometheus-adapter
19 |     app.kubernetes.io/part-of: kube-prometheus
20 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-serviceAccount.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | automountServiceAccountToken: false
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/component: metrics-adapter
 7 |     app.kubernetes.io/name: prometheus-adapter
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.10.0
10 |   name: prometheus-adapter
11 |   namespace: monitoring
12 | 


--------------------------------------------------------------------------------
/observability/install/prometheusAdapter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: metrics-adapter
 6 |     app.kubernetes.io/name: prometheus-adapter
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.10.0
 9 |   name: prometheus-adapter
10 |   namespace: monitoring
11 | spec:
12 |   endpoints:
13 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 |     interval: 30s
15 |     metricRelabelings:
16 |     - action: drop
17 |       regex: (apiserver_client_certificate_.*|apiserver_envelope_.*|apiserver_flowcontrol_.*|apiserver_storage_.*|apiserver_webhooks_.*|workqueue_.*)
18 |       sourceLabels:
19 |       - __name__
20 |     port: https
21 |     scheme: https
22 |     tlsConfig:
23 |       insecureSkipVerify: true
24 |   selector:
25 |     matchLabels:
26 |       app.kubernetes.io/component: metrics-adapter
27 |       app.kubernetes.io/name: prometheus-adapter
28 |       app.kubernetes.io/part-of: kube-prometheus
29 | 


--------------------------------------------------------------------------------
/observability/install/prometheusOperator-clusterRole.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRole
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: controller
 6 |     app.kubernetes.io/name: prometheus-operator
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.63.0
 9 |   name: prometheus-operator
10 | rules:
11 | - apiGroups:
12 |   - monitoring.coreos.com
13 |   resources:
14 |   - alertmanagers
15 |   - alertmanagers/finalizers
16 |   - alertmanagers/status
17 |   - alertmanagerconfigs
18 |   - prometheuses
19 |   - prometheuses/finalizers
20 |   - prometheuses/status
21 |   - thanosrulers
22 |   - thanosrulers/finalizers
23 |   - servicemonitors
24 |   - podmonitors
25 |   - probes
26 |   - prometheusrules
27 |   verbs:
28 |   - '*'
29 | - apiGroups:
30 |   - apps
31 |   resources:
32 |   - statefulsets
33 |   verbs:
34 |   - '*'
35 | - apiGroups:
36 |   - ""
37 |   resources:
38 |   - configmaps
39 |   - secrets
40 |   verbs:
41 |   - '*'
42 | - apiGroups:
43 |   - ""
44 |   resources:
45 |   - pods
46 |   verbs:
47 |   - list
48 |   - delete
49 | - apiGroups:
50 |   - ""
51 |   resources:
52 |   - services
53 |   - services/finalizers
54 |   - endpoints
55 |   verbs:
56 |   - get
57 |   - create
58 |   - update
59 |   - delete
60 | - apiGroups:
61 |   - ""
62 |   resources:
63 |   - nodes
64 |   verbs:
65 |   - list
66 |   - watch
67 | - apiGroups:
68 |   - ""
69 |   resources:
70 |   - namespaces
71 |   verbs:
72 |   - get
73 |   - list
74 |   - watch
75 | - apiGroups:
76 |   - networking.k8s.io
77 |   resources:
78 |   - ingresses
79 |   verbs:
80 |   - get
81 |   - list
82 |   - watch
83 | - apiGroups:
84 |   - authentication.k8s.io
85 |   resources:
86 |   - tokenreviews
87 |   verbs:
88 |   - create
89 | - apiGroups:
90 |   - authorization.k8s.io
91 |   resources:
92 |   - subjectaccessreviews
93 |   verbs:
94 |   - create
95 | 


--------------------------------------------------------------------------------
/observability/install/prometheusOperator-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: rbac.authorization.k8s.io/v1
 2 | kind: ClusterRoleBinding
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: controller
 6 |     app.kubernetes.io/name: prometheus-operator
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.63.0
 9 |   name: prometheus-operator
10 | roleRef:
11 |   apiGroup: rbac.authorization.k8s.io
12 |   kind: ClusterRole
13 |   name: prometheus-operator
14 | subjects:
15 | - kind: ServiceAccount
16 |   name: prometheus-operator
17 |   namespace: monitoring
18 | 


--------------------------------------------------------------------------------
/observability/install/prometheusOperator-deployment.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: apps/v1
 2 | kind: Deployment
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: controller
 6 |     app.kubernetes.io/name: prometheus-operator
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.63.0
 9 |   name: prometheus-operator
10 |   namespace: monitoring
11 | spec:
12 |   replicas: 1
13 |   selector:
14 |     matchLabels:
15 |       app.kubernetes.io/component: controller
16 |       app.kubernetes.io/name: prometheus-operator
17 |       app.kubernetes.io/part-of: kube-prometheus
18 |   template:
19 |     metadata:
20 |       annotations:
21 |         kubectl.kubernetes.io/default-container: prometheus-operator
22 |       labels:
23 |         app.kubernetes.io/component: controller
24 |         app.kubernetes.io/name: prometheus-operator
25 |         app.kubernetes.io/part-of: kube-prometheus
26 |         app.kubernetes.io/version: 0.63.0
27 |     spec:
28 |       automountServiceAccountToken: true
29 |       containers:
30 |       - args:
31 |         - --kubelet-service=kube-system/kubelet
32 |         - --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.63.0
33 |         image: quay.io/prometheus-operator/prometheus-operator:v0.63.0
34 |         name: prometheus-operator
35 |         ports:
36 |         - containerPort: 8080
37 |           name: http
38 |         resources:
39 |           limits:
40 |             cpu: 200m
41 |             memory: 200Mi
42 |           requests:
43 |             cpu: 100m
44 |             memory: 100Mi
45 |         securityContext:
46 |           allowPrivilegeEscalation: false
47 |           capabilities:
48 |             drop:
49 |             - ALL
50 |           readOnlyRootFilesystem: true
51 |       - args:
52 |         - --logtostderr
53 |         - --secure-listen-address=:8443
54 |         - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
55 |         - --upstream=http://127.0.0.1:8080/
56 |         image: quay.io/brancz/kube-rbac-proxy:v0.14.0
57 |         name: kube-rbac-proxy
58 |         ports:
59 |         - containerPort: 8443
60 |           name: https
61 |         resources:
62 |           limits:
63 |             cpu: 20m
64 |             memory: 40Mi
65 |           requests:
66 |             cpu: 10m
67 |             memory: 20Mi
68 |         securityContext:
69 |           allowPrivilegeEscalation: false
70 |           capabilities:
71 |             drop:
72 |             - ALL
73 |           readOnlyRootFilesystem: true
74 |           runAsGroup: 65532
75 |           runAsNonRoot: true
76 |           runAsUser: 65532
77 |       nodeSelector:
78 |         kubernetes.io/os: linux
79 |       securityContext:
80 |         runAsNonRoot: true
81 |         runAsUser: 65534
82 |       serviceAccountName: prometheus-operator
83 | 


--------------------------------------------------------------------------------
/observability/install/prometheusOperator-networkPolicy.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: NetworkPolicy
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: controller
 6 |     app.kubernetes.io/name: prometheus-operator
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.63.0
 9 |   name: prometheus-operator
10 |   namespace: monitoring
11 | spec:
12 |   egress:
13 |   - {}
14 |   ingress:
15 |   - from:
16 |     - podSelector:
17 |         matchLabels:
18 |           app.kubernetes.io/name: prometheus
19 |     ports:
20 |     - port: 8443
21 |       protocol: TCP
22 |   podSelector:
23 |     matchLabels:
24 |       app.kubernetes.io/component: controller
25 |       app.kubernetes.io/name: prometheus-operator
26 |       app.kubernetes.io/part-of: kube-prometheus
27 |   policyTypes:
28 |   - Egress
29 |   - Ingress
30 | 


--------------------------------------------------------------------------------
/observability/install/prometheusOperator-prometheusRule.yaml:
--------------------------------------------------------------------------------
  1 | apiVersion: monitoring.coreos.com/v1
  2 | kind: PrometheusRule
  3 | metadata:
  4 |   labels:
  5 |     app.kubernetes.io/component: controller
  6 |     app.kubernetes.io/name: prometheus-operator
  7 |     app.kubernetes.io/part-of: kube-prometheus
  8 |     app.kubernetes.io/version: 0.63.0
  9 |     prometheus: k8s
 10 |     role: alert-rules
 11 |   name: prometheus-operator-rules
 12 |   namespace: monitoring
 13 | spec:
 14 |   groups:
 15 |   - name: prometheus-operator
 16 |     rules:
 17 |     - alert: PrometheusOperatorListErrors
 18 |       annotations:
 19 |         description: Errors while performing List operations in controller {{$labels.controller}}
 20 |           in {{$labels.namespace}} namespace.
 21 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorlisterrors
 22 |         summary: Errors while performing list operations in controller.
 23 |       expr: |
 24 |         (sum by (controller,namespace) (rate(prometheus_operator_list_operations_failed_total{job="prometheus-operator",namespace="monitoring"}[10m])) / sum by (controller,namespace) (rate(prometheus_operator_list_operations_total{job="prometheus-operator",namespace="monitoring"}[10m]))) > 0.4
 25 |       for: 15m
 26 |       labels:
 27 |         severity: warning
 28 |     - alert: PrometheusOperatorWatchErrors
 29 |       annotations:
 30 |         description: Errors while performing watch operations in controller {{$labels.controller}}
 31 |           in {{$labels.namespace}} namespace.
 32 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorwatcherrors
 33 |         summary: Errors while performing watch operations in controller.
 34 |       expr: |
 35 |         (sum by (controller,namespace) (rate(prometheus_operator_watch_operations_failed_total{job="prometheus-operator",namespace="monitoring"}[5m])) / sum by (controller,namespace) (rate(prometheus_operator_watch_operations_total{job="prometheus-operator",namespace="monitoring"}[5m]))) > 0.4
 36 |       for: 15m
 37 |       labels:
 38 |         severity: warning
 39 |     - alert: PrometheusOperatorSyncFailed
 40 |       annotations:
 41 |         description: Controller {{ $labels.controller }} in {{ $labels.namespace }}
 42 |           namespace fails to reconcile {{ $value }} objects.
 43 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorsyncfailed
 44 |         summary: Last controller reconciliation failed
 45 |       expr: |
 46 |         min_over_time(prometheus_operator_syncs{status="failed",job="prometheus-operator",namespace="monitoring"}[5m]) > 0
 47 |       for: 10m
 48 |       labels:
 49 |         severity: warning
 50 |     - alert: PrometheusOperatorReconcileErrors
 51 |       annotations:
 52 |         description: '{{ $value | humanizePercentage }} of reconciling operations
 53 |           failed for {{ $labels.controller }} controller in {{ $labels.namespace }}
 54 |           namespace.'
 55 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorreconcileerrors
 56 |         summary: Errors while reconciling controller.
 57 |       expr: |
 58 |         (sum by (controller,namespace) (rate(prometheus_operator_reconcile_errors_total{job="prometheus-operator",namespace="monitoring"}[5m]))) / (sum by (controller,namespace) (rate(prometheus_operator_reconcile_operations_total{job="prometheus-operator",namespace="monitoring"}[5m]))) > 0.1
 59 |       for: 10m
 60 |       labels:
 61 |         severity: warning
 62 |     - alert: PrometheusOperatorNodeLookupErrors
 63 |       annotations:
 64 |         description: Errors while reconciling Prometheus in {{ $labels.namespace }}
 65 |           Namespace.
 66 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatornodelookuperrors
 67 |         summary: Errors while reconciling Prometheus.
 68 |       expr: |
 69 |         rate(prometheus_operator_node_address_lookup_errors_total{job="prometheus-operator",namespace="monitoring"}[5m]) > 0.1
 70 |       for: 10m
 71 |       labels:
 72 |         severity: warning
 73 |     - alert: PrometheusOperatorNotReady
 74 |       annotations:
 75 |         description: Prometheus operator in {{ $labels.namespace }} namespace isn't
 76 |           ready to reconcile {{ $labels.controller }} resources.
 77 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatornotready
 78 |         summary: Prometheus operator not ready
 79 |       expr: |
 80 |         min by (controller,namespace) (max_over_time(prometheus_operator_ready{job="prometheus-operator",namespace="monitoring"}[5m]) == 0)
 81 |       for: 5m
 82 |       labels:
 83 |         severity: warning
 84 |     - alert: PrometheusOperatorRejectedResources
 85 |       annotations:
 86 |         description: Prometheus operator in {{ $labels.namespace }} namespace rejected
 87 |           {{ printf "%0.0f" $value }} {{ $labels.controller }}/{{ $labels.resource
 88 |           }} resources.
 89 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/prometheusoperatorrejectedresources
 90 |         summary: Resources rejected by Prometheus operator
 91 |       expr: |
 92 |         min_over_time(prometheus_operator_managed_resources{state="rejected",job="prometheus-operator",namespace="monitoring"}[5m]) > 0
 93 |       for: 5m
 94 |       labels:
 95 |         severity: warning
 96 |   - name: config-reloaders
 97 |     rules:
 98 |     - alert: ConfigReloaderSidecarErrors
 99 |       annotations:
100 |         description: |-
101 |           Errors encountered while the {{$labels.pod}} config-reloader sidecar attempts to sync config in {{$labels.namespace}} namespace.
102 |           As a result, configuration for service running in {{$labels.pod}} may be stale and cannot be updated anymore.
103 |         runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/configreloadersidecarerrors
104 |         summary: config-reloader sidecar has not had a successful reload for 10m
105 |       expr: |
106 |         max_over_time(reloader_last_reload_successful{namespace=~".+"}[5m]) == 0
107 |       for: 10m
108 |       labels:
109 |         severity: warning
110 | 


--------------------------------------------------------------------------------
/observability/install/prometheusOperator-service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: controller
 6 |     app.kubernetes.io/name: prometheus-operator
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.63.0
 9 |   name: prometheus-operator
10 |   namespace: monitoring
11 | spec:
12 |   clusterIP: None
13 |   ports:
14 |   - name: https
15 |     port: 8443
16 |     targetPort: https
17 |   selector:
18 |     app.kubernetes.io/component: controller
19 |     app.kubernetes.io/name: prometheus-operator
20 |     app.kubernetes.io/part-of: kube-prometheus
21 | 


--------------------------------------------------------------------------------
/observability/install/prometheusOperator-serviceAccount.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | automountServiceAccountToken: false
 3 | kind: ServiceAccount
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/component: controller
 7 |     app.kubernetes.io/name: prometheus-operator
 8 |     app.kubernetes.io/part-of: kube-prometheus
 9 |     app.kubernetes.io/version: 0.63.0
10 |   name: prometheus-operator
11 |   namespace: monitoring
12 | 


--------------------------------------------------------------------------------
/observability/install/prometheusOperator-serviceMonitor.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: monitoring.coreos.com/v1
 2 | kind: ServiceMonitor
 3 | metadata:
 4 |   labels:
 5 |     app.kubernetes.io/component: controller
 6 |     app.kubernetes.io/name: prometheus-operator
 7 |     app.kubernetes.io/part-of: kube-prometheus
 8 |     app.kubernetes.io/version: 0.63.0
 9 |   name: prometheus-operator
10 |   namespace: monitoring
11 | spec:
12 |   endpoints:
13 |   - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 |     honorLabels: true
15 |     port: https
16 |     scheme: https
17 |     tlsConfig:
18 |       insecureSkipVerify: true
19 |   selector:
20 |     matchLabels:
21 |       app.kubernetes.io/component: controller
22 |       app.kubernetes.io/name: prometheus-operator
23 |       app.kubernetes.io/part-of: kube-prometheus
24 |       app.kubernetes.io/version: 0.63.0
25 | 


--------------------------------------------------------------------------------
/observability/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 |   name: monitoring
5 | 


--------------------------------------------------------------------------------
/observability/prometheus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neticdk/k8s-workshop/00fe5c7bb1039398da16059e1f4cdf3611ad4e0a/observability/prometheus.png


--------------------------------------------------------------------------------
/observability/setup/0prometheusruleCustomResourceDefinition.yaml:
--------------------------------------------------------------------------------
  1 | apiVersion: apiextensions.k8s.io/v1
  2 | kind: CustomResourceDefinition
  3 | metadata:
  4 |   annotations:
  5 |     controller-gen.kubebuilder.io/version: v0.11.1
  6 |   creationTimestamp: null
  7 |   name: prometheusrules.monitoring.coreos.com
  8 | spec:
  9 |   group: monitoring.coreos.com
 10 |   names:
 11 |     categories:
 12 |     - prometheus-operator
 13 |     kind: PrometheusRule
 14 |     listKind: PrometheusRuleList
 15 |     plural: prometheusrules
 16 |     shortNames:
 17 |     - promrule
 18 |     singular: prometheusrule
 19 |   scope: Namespaced
 20 |   versions:
 21 |   - name: v1
 22 |     schema:
 23 |       openAPIV3Schema:
 24 |         description: PrometheusRule defines recording and alerting rules for a Prometheus
 25 |           instance
 26 |         properties:
 27 |           apiVersion:
 28 |             description: 'APIVersion defines the versioned schema of this representation
 29 |               of an object. Servers should convert recognized schemas to the latest
 30 |               internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
 31 |             type: string
 32 |           kind:
 33 |             description: 'Kind is a string value representing the REST resource this
 34 |               object represents. Servers may infer this from the endpoint the client
 35 |               submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
 36 |             type: string
 37 |           metadata:
 38 |             type: object
 39 |           spec:
 40 |             description: Specification of desired alerting rule definitions for Prometheus.
 41 |             properties:
 42 |               groups:
 43 |                 description: Content of Prometheus rule file
 44 |                 items:
 45 |                   description: RuleGroup is a list of sequentially evaluated recording
 46 |                     and alerting rules.
 47 |                   properties:
 48 |                     interval:
 49 |                       description: Interval determines how often rules in the group
 50 |                         are evaluated.
 51 |                       pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
 52 |                       type: string
 53 |                     name:
 54 |                       description: Name of the rule group.
 55 |                       minLength: 1
 56 |                       type: string
 57 |                     partial_response_strategy:
 58 |                       description: 'PartialResponseStrategy is only used by ThanosRuler
 59 |                         and will be ignored by Prometheus instances. More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response'
 60 |                       pattern: ^(?i)(abort|warn)?$
 61 |                       type: string
 62 |                     rules:
 63 |                       description: List of alerting and recording rules.
 64 |                       items:
 65 |                         description: 'Rule describes an alerting or recording rule
 66 |                           See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)
 67 |                           or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules)
 68 |                           rule'
 69 |                         properties:
 70 |                           alert:
 71 |                             description: Name of the alert. Must be a valid label
 72 |                               value. Only one of `record` and `alert` must be set.
 73 |                             type: string
 74 |                           annotations:
 75 |                             additionalProperties:
 76 |                               type: string
 77 |                             description: Annotations to add to each alert. Only valid
 78 |                               for alerting rules.
 79 |                             type: object
 80 |                           expr:
 81 |                             anyOf:
 82 |                             - type: integer
 83 |                             - type: string
 84 |                             description: PromQL expression to evaluate.
 85 |                             x-kubernetes-int-or-string: true
 86 |                           for:
 87 |                             description: Alerts are considered firing once they have
 88 |                               been returned for this long.
 89 |                             pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$
 90 |                             type: string
 91 |                           labels:
 92 |                             additionalProperties:
 93 |                               type: string
 94 |                             description: Labels to add or overwrite.
 95 |                             type: object
 96 |                           record:
 97 |                             description: Name of the time series to output to. Must
 98 |                               be a valid metric name. Only one of `record` and `alert`
 99 |                               must be set.
100 |                             type: string
101 |                         required:
102 |                         - expr
103 |                         type: object
104 |                       type: array
105 |                   required:
106 |                   - name
107 |                   - rules
108 |                   type: object
109 |                 type: array
110 |                 x-kubernetes-list-map-keys:
111 |                 - name
112 |                 x-kubernetes-list-type: map
113 |             type: object
114 |         required:
115 |         - spec
116 |         type: object
117 |     served: true
118 |     storage: true
119 | 


--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 |   "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3 |   "extends": [
4 |     "local>neticdk/renovate-config"
5 |   ]
6 | }
7 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-ingress/all.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | 
 3 | echo "deploying ingress controller" 
 4 | kubectl create -f ./ingress-controller.yaml
 5 | kubectl wait pod -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx,app.kubernetes.io/component=controller --for condition=Ready  --timeout=45s
 6 | 
 7 | echo "deploying services" 
 8 | kubectl create -f ./services.yaml
 9 | 
10 | echo "deploying applications"
11 | kubectl create -f ./deployments.yaml
12 | kubectl create -f ./ingress.yaml
13 | 
14 | echo "all set"
15 | kubectl get all -A
16 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-ingress/config.yaml:
--------------------------------------------------------------------------------
 1 | kind: Cluster
 2 | apiVersion: kind.x-k8s.io/v1alpha4
 3 | name: ingress
 4 | nodes:
 5 | - role: control-plane
 6 |   kubeadmConfigPatches:
 7 |   - |
 8 |     kind: InitConfiguration
 9 |     nodeRegistration:
10 |       kubeletExtraArgs:
11 |         node-labels: "ingress-ready=true"
12 |   extraPortMappings:
13 |   - containerPort: 80
14 |     hostPort: 80
15 |     protocol: TCP
16 |   - containerPort: 443
17 |     hostPort: 443
18 |     protocol: TCP
19 | - role: worker
20 | - role: worker
21 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-ingress/create_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind create cluster --name ingress --config=config.yaml
4 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-ingress/delete_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind delete cluster --name ingress
4 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-ingress/deployments.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/name: hello-foo
 7 |     app.kubernetes.io/instance: hello-foo
 8 |   name: hello-foo-app
 9 | spec:
10 |   replicas: 1
11 |   selector:
12 |     matchLabels:
13 |       app.kubernetes.io/name: hello-foo
14 |       app.kubernetes.io/instance: hello-foo
15 |   template:
16 |     metadata:
17 |       labels:
18 |         app.kubernetes.io/name: hello-foo
19 |         app.kubernetes.io/instance: hello-foo
20 |     spec:
21 |       containers:
22 |         - name: hello-foo-app
23 |           command:
24 |             - /agnhost
25 |             - netexec
26 |             - --http-port=8080
27 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
28 |           ports:
29 |             - name: http
30 |               containerPort: 8080
31 |               protocol: TCP
32 | ---
33 | apiVersion: apps/v1
34 | kind: Deployment
35 | metadata:
36 |   labels:
37 |     app.kubernetes.io/name: hello-bar
38 |     app.kubernetes.io/instance: hello-bar
39 |   name: hello-bar-app
40 | spec:
41 |   replicas: 1
42 |   selector:
43 |     matchLabels:
44 |       app.kubernetes.io/name: hello-bar
45 |       app.kubernetes.io/instance: hello-bar
46 |   template:
47 |     metadata:
48 |       labels:
49 |         app.kubernetes.io/name: hello-bar
50 |         app.kubernetes.io/instance: hello-bar
51 |     spec:
52 |       containers:
53 |         - command:
54 |             - /agnhost
55 |             - netexec
56 |             - --http-port=8080
57 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
58 |           name: hello-bar-app
59 |           ports:
60 |             - name: http
61 |               containerPort: 8080
62 |               protocol: TCP
63 | ---
64 | apiVersion: apps/v1
65 | kind: Deployment
66 | metadata:
67 |   labels:
68 |     app.kubernetes.io/name: hello-baz
69 |     app.kubernetes.io/instance: hello-baz
70 |   name: hello-baz-app
71 | spec:
72 |   replicas: 4
73 |   selector:
74 |     matchLabels:
75 |       app.kubernetes.io/name: hello-baz
76 |       app.kubernetes.io/instance: hello-baz
77 |   template:
78 |     metadata:
79 |       labels:
80 |         app.kubernetes.io/name: hello-baz
81 |         app.kubernetes.io/instance: hello-baz
82 |     spec:
83 |       containers:
84 |         - command:
85 |             - /agnhost
86 |             - netexec
87 |             - --http-port=8080
88 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
89 |           name: hello-baz-app
90 |           ports:
91 |             - name: http
92 |               containerPort: 8080
93 |               protocol: TCP
94 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-ingress/ingress.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: Ingress
 3 | metadata:
 4 |   name: hello-ingress
 5 |   annotations:
 6 |     nginx.ingress.kubernetes.io/rewrite-target: /$2
 7 | spec:
 8 |   rules:
 9 |     - http:
10 |         paths:
11 |           - pathType: Prefix
12 |             path: /hello-foo(/|$)(.*)
13 |             backend:
14 |               service:
15 |                 name: hello-foo-service
16 |                 port:
17 |                   name: http
18 |           - pathType: Prefix
19 |             path: /hello-bar(/|$)(.*)
20 |             backend:
21 |               service:
22 |                 name: hello-bar-service
23 |                 port:
24 |                   name: http
25 |           - pathType: Prefix
26 |             path: /hello-baz(/|$)(.*)
27 |             backend:
28 |               service:
29 |                 name: hello-baz-service
30 |                 port:
31 |                   name: http
32 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-ingress/multiple-domains/ingress.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: Ingress
 3 | metadata:
 4 |   name: hello-ingress-multiple
 5 | spec:
 6 |   rules:
 7 |     - host: foo-127-0-0-1.nip.io
 8 |       http:
 9 |         paths:
10 |           - pathType: Prefix
11 |             path: /
12 |             backend:
13 |               service:
14 |                 name: hello-foo-service
15 |                 port:
16 |                   name: http
17 |     - host: bar-127-0-0-1.nip.io
18 |       http:
19 |         paths:
20 |           - pathType: Prefix
21 |             path: /
22 |             backend:
23 |               service:
24 |                 name: hello-bar-service
25 |                 port:
26 |                   name: http
27 |     - host: baz-127-0-0-1.nip.io
28 |       http:
29 |         paths:
30 |           - pathType: Prefix
31 |             path: /
32 |             backend:
33 |               service:
34 |                 name: hello-baz-service
35 |                 port:
36 |                   name: http
37 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-ingress/services.yaml:
--------------------------------------------------------------------------------
 1 | kind: Service
 2 | apiVersion: v1
 3 | metadata:
 4 |   name: hello-foo-service
 5 | spec:
 6 |   selector:
 7 |     app.kubernetes.io/name: hello-foo
 8 |     app.kubernetes.io/instance: hello-foo
 9 |   ports:
10 |     - name: http
11 |       port: 8080
12 |       targetPort: http
13 | ---
14 | kind: Service
15 | apiVersion: v1
16 | metadata:
17 |   name: hello-bar-service
18 | spec:
19 |   selector:
20 |     app.kubernetes.io/name: hello-bar
21 |     app.kubernetes.io/instance: hello-bar
22 |   ports:
23 |     - name: http
24 |       port: 8080
25 |       targetPort: http
26 | ---
27 | kind: Service
28 | apiVersion: v1
29 | metadata:
30 |   name: hello-baz-service
31 | spec:
32 |   selector:
33 |     app.kubernetes.io/name: hello-baz
34 |     app.kubernetes.io/instance: hello-baz
35 |   ports:
36 |     - name: http
37 |       port: 8080
38 |       targetPort: http
39 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/all.sh:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env bash
 2 | 
 3 | echo "deploying ingress controller" 
 4 | kubectl create -f ./ingress-controller.yaml
 5 | kubectl wait pod -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx,app.kubernetes.io/component=controller --for condition=Ready  --timeout=45s
 6 | 
 7 | kubectl create -f ./priority-classes.yaml
 8 | kubectl create -f ./deployment-no-prio.yaml
 9 | kubectl create -f ./deployment-medium-prio.yaml
10 | kubectl create -f ./deployment-low-prio.yaml
11 | kubectl create -f ./deployment-high-prio.yaml
12 | 
13 | kubectl create -f ./poddisruptionbudgets.yaml
14 | kubectl create -f ./deployment-high-prio-pdb.yaml
15 | kubectl create -f ./deployment-medium-prio-pbd.yaml


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/config.yaml:
--------------------------------------------------------------------------------
 1 | kind: Cluster
 2 | apiVersion: kind.x-k8s.io/v1alpha4
 3 | name: ingress
 4 | nodes:
 5 | - role: control-plane
 6 |   kubeadmConfigPatches:
 7 |   - |
 8 |     kind: InitConfiguration
 9 |     nodeRegistration:
10 |       kubeletExtraArgs:
11 |         node-labels: "ingress-ready=true"
12 |   extraPortMappings:
13 |   - containerPort: 80
14 |     hostPort: 80
15 |     protocol: TCP
16 |   - containerPort: 443
17 |     hostPort: 443
18 |     protocol: TCP
19 | - role: worker
20 | - role: worker
21 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/create_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind create cluster --name priority --config=config.yaml
4 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/delete_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind delete cluster --name priority
4 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/deployment-high-prio-nolimit.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/name: hello-foo-nl
 7 |     app.kubernetes.io/instance: hello-foo-nl
 8 |   name: hello-foo-app-nl
 9 | spec:
10 |   replicas: 2
11 |   selector:
12 |     matchLabels:
13 |       app.kubernetes.io/name: hello-foo-nl
14 |       app.kubernetes.io/instance: hello-foo-nl
15 |   template:
16 |     metadata:
17 |       labels:
18 |         app.kubernetes.io/name: hello-foo-nl
19 |         app.kubernetes.io/instance: hello-foo-nl
20 |     spec:
21 |       priorityClassName: higest-priority
22 |       containers:
23 |         - name: hello-foo-app-nl
24 |           command:
25 |             - /agnhost
26 |             - netexec
27 |             - --http-port=8080
28 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
29 |           ports:
30 |             - name: http
31 |               containerPort: 8080
32 |               protocol: TCP
33 |           resources: 
34 |             limits: 
35 |               memory: 25Mi
36 |             requests: 
37 |               cpu: 2800m
38 |               memory: 25Mi
39 |               


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/deployment-high-prio-pdb.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/name: hello-foo-pdb
 7 |     app.kubernetes.io/instance: hello-foo-pdb
 8 |   name: hello-foo-pdb-app
 9 | spec:
10 |   replicas: 4
11 |   selector:
12 |     matchLabels:
13 |       app.kubernetes.io/name: hello-foo-pdb
14 |       app.kubernetes.io/instance: hello-foo-pdb
15 |   template:
16 |     metadata:
17 |       labels:
18 |         app.kubernetes.io/name: hello-foo-pdb
19 |         app.kubernetes.io/instance: hello-foo-pdb
20 |     spec:
21 |       priorityClassName: higest-priority
22 |       containers:
23 |         - name: hello-foo-pdb-app
24 |           command:
25 |             - /agnhost
26 |             - netexec
27 |             - --http-port=8080
28 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
29 |           ports:
30 |             - name: http
31 |               containerPort: 8080
32 |               protocol: TCP
33 |           resources: 
34 |             limits: 
35 |               cpu: 90m
36 |               memory: 25Mi
37 |             requests: 
38 |               cpu: 80m
39 |               memory: 25Mi
40 |               


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/deployment-high-prio.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/name: hello-foo
 7 |     app.kubernetes.io/instance: hello-foo
 8 |   name: hello-foo-app
 9 | spec:
10 |   replicas: 2
11 |   selector:
12 |     matchLabels:
13 |       app.kubernetes.io/name: hello-foo
14 |       app.kubernetes.io/instance: hello-foo
15 |   template:
16 |     metadata:
17 |       labels:
18 |         app.kubernetes.io/name: hello-foo
19 |         app.kubernetes.io/instance: hello-foo
20 |     spec:
21 |       priorityClassName: higest-priority
22 |       containers:
23 |         - name: hello-foo-app
24 |           command:
25 |             - /agnhost
26 |             - netexec
27 |             - --http-port=8080
28 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
29 |           ports:
30 |             - name: http
31 |               containerPort: 8080
32 |               protocol: TCP
33 |           resources: 
34 |             limits: 
35 |               cpu: 90m
36 |               memory: 25Mi
37 |             requests: 
38 |               cpu: 80m
39 |               memory: 25Mi
40 |               


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/deployment-low-prio.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/name: hello-baz
 7 |     app.kubernetes.io/instance: hello-baz
 8 |   name: hello-baz-app
 9 | spec:
10 |   replicas: 8
11 |   selector:
12 |     matchLabels:
13 |       app.kubernetes.io/name: hello-baz
14 |       app.kubernetes.io/instance: hello-baz
15 |   template:
16 |     metadata:
17 |       labels:
18 |         app.kubernetes.io/name: hello-baz
19 |         app.kubernetes.io/instance: hello-baz
20 |     spec:
21 |       priorityClassName: low-priority
22 |       containers:
23 |         - command:
24 |             - /agnhost
25 |             - netexec
26 |             - --http-port=8080
27 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
28 |           name: hello-baz-app
29 |           ports:
30 |             - name: http
31 |               containerPort: 8080
32 |               protocol: TCP
33 |           resources: 
34 |             limits: 
35 |               cpu: 505m
36 |               memory: 25Mi
37 |             requests: 
38 |               cpu: 185m
39 |               memory: 25Mi
40 | 
41 | 
42 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/deployment-medium-prio-pbd.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/name: hello-bar-pdb
 7 |     app.kubernetes.io/instance: hello-bar-pdb
 8 |   name: hello-bar-pdb-app
 9 | spec:
10 |   replicas: 8
11 |   selector:
12 |     matchLabels:
13 |       app.kubernetes.io/name: hello-bar-pdb
14 |       app.kubernetes.io/instance: hello-bar-pdb
15 |   template:
16 |     metadata:
17 |       labels:
18 |         app.kubernetes.io/name: hello-bar-pdb
19 |         app.kubernetes.io/instance: hello-bar-pdb
20 |     spec:
21 |       priorityClassName: medium-priority
22 |       containers:
23 |         - command:
24 |             - /agnhost
25 |             - netexec
26 |             - --http-port=8080
27 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
28 |           name: hello-bar-pdb-app
29 |           ports:
30 |             - name: http
31 |               containerPort: 8080
32 |               protocol: TCP
33 |           resources: 
34 |             limits: 
35 |               cpu: 125m
36 |               memory: 25Mi
37 |             requests: 
38 |               cpu: 125m
39 |               memory: 25Mi
40 |               


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/deployment-medium-prio.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/name: hello-bar
 7 |     app.kubernetes.io/instance: hello-bar
 8 |   name: hello-bar-app
 9 | spec:
10 |   replicas: 4
11 |   selector:
12 |     matchLabels:
13 |       app.kubernetes.io/name: hello-bar
14 |       app.kubernetes.io/instance: hello-bar
15 |   template:
16 |     metadata:
17 |       labels:
18 |         app.kubernetes.io/name: hello-bar
19 |         app.kubernetes.io/instance: hello-bar
20 |     spec:
21 |       priorityClassName: medium-priority
22 |       containers:
23 |         - command:
24 |             - /agnhost
25 |             - netexec
26 |             - --http-port=8080
27 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
28 |           name: hello-bar-app
29 |           ports:
30 |             - name: http
31 |               containerPort: 8080
32 |               protocol: TCP
33 |           resources: 
34 |             limits: 
35 |               cpu: 125m
36 |               memory: 25Mi
37 |             requests: 
38 |               cpu: 125m
39 |               memory: 25Mi
40 |               


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/deployment-no-prio.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: apps/v1
 3 | kind: Deployment
 4 | metadata:
 5 |   labels:
 6 |     app.kubernetes.io/name: hello-foobar
 7 |     app.kubernetes.io/instance: hello-foobar
 8 |   name: hello-foobar-app
 9 | spec:
10 |   replicas: 16
11 |   selector:
12 |     matchLabels:
13 |       app.kubernetes.io/name: hello-foobar
14 |       app.kubernetes.io/instance: hello-foobar
15 |   template:
16 |     metadata:
17 |       labels:
18 |         app.kubernetes.io/name: hello-foobar
19 |         app.kubernetes.io/instance: hello-foobar
20 |     spec:
21 |       containers:
22 |         - command:
23 |             - /agnhost
24 |             - netexec
25 |             - --http-port=8080
26 |           image: registry.k8s.io/e2e-test-images/agnhost:2.39
27 |           name: hello-foobar-app
28 |           ports:
29 |             - name: http
30 |               containerPort: 8080
31 |               protocol: TCP
32 |           resources: 
33 |             limits: 
34 |               cpu: 505m
35 |               memory: 25Mi
36 |             requests: 
37 |               cpu: 185m
38 |               memory: 25Mi
39 | 
40 | 
41 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/ingress.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: Ingress
 3 | metadata:
 4 |   name: hello-ingress
 5 |   annotations:
 6 |     nginx.ingress.kubernetes.io/rewrite-target: /$2
 7 | spec:
 8 |   rules:
 9 |     - http:
10 |         paths:
11 |           - pathType: Prefix
12 |             path: /hello-foo(/|$)(.*)
13 |             backend:
14 |               service:
15 |                 name: hello-foo-service
16 |                 port:
17 |                   name: http
18 |           - pathType: Prefix
19 |             path: /hello-bar(/|$)(.*)
20 |             backend:
21 |               service:
22 |                 name: hello-bar-service
23 |                 port:
24 |                   name: http
25 |           - pathType: Prefix
26 |             path: /hello-baz(/|$)(.*)
27 |             backend:
28 |               service:
29 |                 name: hello-baz-service
30 |                 port:
31 |                   name: http
32 |           - pathType: Prefix
33 |             path: /hello-foobar(/|$)(.*)
34 |             backend:
35 |               service:
36 |                 name: hello-foobar-service
37 |                 port:
38 |                   name: http
39 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/multiple-domains/ingress.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: networking.k8s.io/v1
 2 | kind: Ingress
 3 | metadata:
 4 |   name: hello-ingress-multiple
 5 | spec:
 6 |   rules:
 7 |     - host: foo-127-0-0-1.nip.io
 8 |       http:
 9 |         paths:
10 |           - pathType: Prefix
11 |             path: /
12 |             backend:
13 |               service:
14 |                 name: hello-foo-service
15 |                 port:
16 |                   name: http
17 |     - host: foopdb-127-0-0-1.nip.io
18 |       http:
19 |         paths:
20 |           - pathType: Prefix
21 |             path: /
22 |             backend:
23 |               service:
24 |                 name: hello-foo-pdb-service
25 |                 port:
26 |                   name: http
27 |     - host: bar-127-0-0-1.nip.io
28 |       http:
29 |         paths:
30 |           - pathType: Prefix
31 |             path: /
32 |             backend:
33 |               service:
34 |                 name: hello-bar-service
35 |                 port:
36 |                   name: http
37 |     - host: barpdb-127-0-0-1.nip.io
38 |       http:
39 |         paths:
40 |           - pathType: Prefix
41 |             path: /
42 |             backend:
43 |               service:
44 |                 name: hello-bar-pdb-service
45 |                 port:
46 |                   name: http
47 |     - host: baz-127-0-0-1.nip.io
48 |       http:
49 |         paths:
50 |           - pathType: Prefix
51 |             path: /
52 |             backend:
53 |               service:
54 |                 name: hello-baz-service
55 |                 port:
56 |                   name: http
57 |     - host: foobar-127-0-0-1.nip.io
58 |       http:
59 |         paths:
60 |           - pathType: Prefix
61 |             path: /
62 |             backend:
63 |               service:
64 |                 name: hello-foobar-service
65 |                 port:
66 |                   name: http
67 | 


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/poddisruptionbudgets.yaml:
--------------------------------------------------------------------------------
 1 | ---
 2 | apiVersion: policy/v1
 3 | kind: PodDisruptionBudget
 4 | metadata:
 5 |   name: high-prio-pdb
 6 | spec:
 7 |   minAvailable: 1
 8 |   selector:
 9 |     matchLabels:
10 |       app.kubernetes.io/name: hello-foo-pdb
11 | ---
12 | apiVersion: policy/v1
13 | kind: PodDisruptionBudget
14 | metadata:
15 |   name: medium-prio-pdb
16 | spec:
17 |   minAvailable: 3
18 |   selector:
19 |     matchLabels:
20 |       app.kubernetes.io/name: hello-bar-pdb
21 | ---      


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/priority-classes.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: scheduling.k8s.io/v1
 2 | kind: PriorityClass
 3 | metadata:
 4 |   name: higest-priority
 5 | value: 0000090000
 6 | preemptionPolicy: "PreemptLowerPriority"
 7 | globalDefault: false
 8 | description: "This is the higest Priority Class value for this example"
 9 | ---
10 | apiVersion: scheduling.k8s.io/v1
11 | kind: PriorityClass
12 | metadata:
13 |   name: medium-priority
14 | value: 0000080000
15 | preemptionPolicy: "PreemptLowerPriority"
16 | globalDefault: false
17 | description: "This is the medium Priority Class value for this example"
18 | ---
19 | apiVersion: scheduling.k8s.io/v1
20 | kind: PriorityClass
21 | metadata:
22 |   name: low-priority
23 | value: 0000060000
24 | preemptionPolicy: "PreemptLowerPriority"
25 | globalDefault: true
26 | description: "This is the normal and lowest Priority Class value for this example"
27 | ---


--------------------------------------------------------------------------------
/simple-kubernetes-with-priority/services.yaml:
--------------------------------------------------------------------------------
 1 | kind: Service
 2 | apiVersion: v1
 3 | metadata:
 4 |   name: hello-foo-service
 5 | spec:
 6 |   selector:
 7 |     app.kubernetes.io/name: hello-foo
 8 |     app.kubernetes.io/instance: hello-foo
 9 |   ports:
10 |     - name: http
11 |       port: 8080
12 |       targetPort: http
13 | ---
14 | kind: Service
15 | apiVersion: v1
16 | metadata:
17 |   name: hello-foo-pdb-service
18 | spec:
19 |   selector:
20 |     app.kubernetes.io/name: hello-foo-pdb
21 |     app.kubernetes.io/instance: hello-foo-pdb
22 |   ports:
23 |     - name: http
24 |       port: 8080
25 |       targetPort: http
26 | ---
27 | kind: Service
28 | apiVersion: v1
29 | metadata:
30 |   name: hello-bar-service
31 | spec:
32 |   selector:
33 |     app.kubernetes.io/name: hello-bar
34 |     app.kubernetes.io/instance: hello-bar
35 |   ports:
36 |     - name: http
37 |       port: 8080
38 |       targetPort: http
39 | ---
40 | kind: Service
41 | apiVersion: v1
42 | metadata:
43 |   name: hello-bar-pdb-service
44 | spec:
45 |   selector:
46 |     app.kubernetes.io/name: hello-bar-pdb
47 |     app.kubernetes.io/instance: hello-bar-pdb
48 |   ports:
49 |     - name: http
50 |       port: 8080
51 |       targetPort: http
52 | ---
53 | kind: Service
54 | apiVersion: v1
55 | metadata:
56 |   name: hello-baz-service
57 | spec:
58 |   selector:
59 |     app.kubernetes.io/name: hello-baz
60 |     app.kubernetes.io/instance: hello-baz
61 |   ports:
62 |     - name: http
63 |       port: 8080
64 |       targetPort: http
65 | ---
66 | kind: Service
67 | apiVersion: v1
68 | metadata:
69 |   name: hello-foobar-service
70 | spec:
71 |   selector:
72 |     app.kubernetes.io/name: hello-foobar
73 |     app.kubernetes.io/instance: hello-foobar
74 |   ports:
75 |     - name: http
76 |       port: 8080
77 |       targetPort: http
78 | 


--------------------------------------------------------------------------------
/simple-kubernetes/create_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind create cluster
4 | 


--------------------------------------------------------------------------------
/simple-kubernetes/delete_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | 
3 | kind delete cluster --name kind
4 | 


--------------------------------------------------------------------------------
/simple-kubernetes/deployment.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: apps/v1
 2 | kind: Deployment
 3 | metadata:
 4 |   labels:
 5 |     app: hello-app
 6 |   name: hello-app
 7 |   namespace: hello-workshop
 8 | spec:
 9 |   replicas: 1
10 |   selector:
11 |     matchLabels:
12 |       app: hello-app
13 |   template:
14 |     metadata:
15 |       creationTimestamp: null
16 |       labels:
17 |         app: hello-app
18 |     spec:
19 |       containers:
20 |       - name: agnhost
21 |         command:
22 |         - /agnhost
23 |         - netexec
24 |         - --http-port=8080
25 |         image: registry.k8s.io/e2e-test-images/agnhost:2.39
26 | 


--------------------------------------------------------------------------------
/simple-kubernetes/multi-node-cluster/cluster-config.yaml:
--------------------------------------------------------------------------------
 1 | kind: Cluster
 2 | apiVersion: kind.x-k8s.io/v1alpha4
 3 | name: observability
 4 | nodes:
 5 | - role: control-plane
 6 |   kubeadmConfigPatches:
 7 |   - |
 8 |     kind: InitConfiguration
 9 |     nodeRegistration:
10 |       kubeletExtraArgs:
11 |         node-labels: "ingress-ready=true"
12 |   extraPortMappings:
13 |   - containerPort: 80
14 |     hostPort: 80
15 |     protocol: TCP
16 |   - containerPort: 443
17 |     hostPort: 443
18 |     protocol: TCP
19 | - role: worker
20 | - role: worker
21 | 


--------------------------------------------------------------------------------
/simple-kubernetes/multi-node-cluster/create_cluster.sh:
--------------------------------------------------------------------------------
1 | kind create cluster --name simple-multi-node --config=cluster-config.yaml
2 | 
3 | 


--------------------------------------------------------------------------------
/simple-kubernetes/multi-node-cluster/delete_cluster.sh:
--------------------------------------------------------------------------------
1 |  kind delete cluster --name  simple-multi-node
2 | 
3 | 


--------------------------------------------------------------------------------
/simple-kubernetes/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 |   name: hello-workshop
5 |   


--------------------------------------------------------------------------------
/simple-kubernetes/service.yaml:
--------------------------------------------------------------------------------
 1 | apiVersion: v1
 2 | kind: Service
 3 | metadata:
 4 |   labels:
 5 |     app: hello-app
 6 |   name: hello-app
 7 |   namespace: hello-workshop
 8 | spec:
 9 |   ports:
10 |   - port: 8080
11 |     protocol: TCP
12 |     targetPort: 8080
13 |   selector:
14 |     app: hello-app
15 |   type: ClusterIP
16 | 


--------------------------------------------------------------------------------