├── .gitignore ├── kustom-charts ├── ingress-nginx │ ├── .gitignore │ └── kustomization.yaml ├── kube-prometheus-stack │ ├── .gitignore │ ├── overlays │ │ ├── grafana │ │ │ ├── delete-grafana-secret.yaml │ │ │ └── node.yaml │ │ └── blackbox │ │ │ └── node.yaml │ ├── base │ │ └── grafana │ │ │ └── secrets │ │ │ ├── example.ldap.toml │ │ │ └── grafana-secret.yaml │ ├── README.md │ ├── kustomization.yaml │ └── values.yaml └── README.md ├── kustom ├── argo-cd │ ├── base │ │ ├── argocd-namespace.yaml │ │ ├── argocd-ingress.yaml │ │ ├── argocd-cm.yaml │ │ └── argocd-repo-key.yaml │ ├── repos │ │ └── kustomization.yaml │ ├── overlays │ │ ├── argo-cd-server-deployment.yaml │ │ ├── argocd-cm-configmap.yaml │ │ └── argocd-ssh-known-hosts-cm.yaml │ ├── kustomization.yaml │ └── README.md ├── sealed-secrets │ ├── namespace.yaml │ ├── kustomization.yaml │ ├── README.md │ └── controller.yaml └── README.md ├── apps ├── kustomization.yaml ├── templates │ ├── root.yaml │ ├── argocd.yaml │ ├── sealed-secrets.yaml │ ├── ingress-nginx.yaml │ └── kube-prometheus-stack.yaml └── README.md └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | key.yaml -------------------------------------------------------------------------------- /kustom-charts/ingress-nginx/.gitignore: -------------------------------------------------------------------------------- 1 | charts -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/.gitignore: -------------------------------------------------------------------------------- 1 | charts 2 | -------------------------------------------------------------------------------- /kustom/argo-cd/base/argocd-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: argocd -------------------------------------------------------------------------------- /kustom/sealed-secrets/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: sealed-secrets -------------------------------------------------------------------------------- /kustom/argo-cd/repos/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: -------------------------------------------------------------------------------- /kustom/README.md: -------------------------------------------------------------------------------- 1 | # Kustom 2 | This folder will hold all [kustomize](https://kustomize.io) templates. These allow us to easily customize various apps and change them depending on environment. -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/overlays/grafana/delete-grafana-secret.yaml: -------------------------------------------------------------------------------- 1 | $patch: delete 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: argus-grafana 6 | namespace: monitoring -------------------------------------------------------------------------------- /kustom/sealed-secrets/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - namespace.yaml 6 | - controller.yaml 7 | 8 | namespace: sealed-secrets -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/overlays/blackbox/node.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: argus-prometheus-blackbox-exporter 5 | spec: 6 | template: 7 | spec: 8 | nodeSelector: 9 | grafana.node: running -------------------------------------------------------------------------------- /kustom-charts/ingress-nginx/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | helmCharts: 5 | - name: ingress-nginx 6 | releaseName: ingress 7 | version: 4.0.6 8 | repo: https://kubernetes.github.io/ingress-nginx 9 | 10 | namespace: default -------------------------------------------------------------------------------- /apps/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - templates/argocd.yaml 6 | - templates/root.yaml 7 | - templates/sealed-secrets.yaml 8 | - templates/ingress-nginx.yaml 9 | - templates/kube-prometheus-stack.yaml 10 | 11 | namespace: argocd -------------------------------------------------------------------------------- /kustom/argo-cd/overlays/argo-cd-server-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: argocd-server 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: argocd-server 10 | command: 11 | - argocd-server 12 | - --insecure 13 | -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/overlays/grafana/node.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | selector: 8 | matchLabels: 9 | app.kubernetes.io/name: grafana 10 | template: 11 | spec: 12 | nodeSelector: 13 | grafana.node: running 14 | 15 | -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/base/grafana/secrets/example.ldap.toml: -------------------------------------------------------------------------------- 1 | [[servers]] 2 | host = 3 | port = 636 4 | use_ssl = true 5 | start_tls = false 6 | ssl_skip_verify = false 7 | 8 | bind_dn = 9 | bind_password = "" 10 | search_filter = "(uid=%s)" 11 | search_base_dns = 12 | 13 | [servers.attributes] 14 | name = "givenName" 15 | surname = "sn" 16 | username = "uid" 17 | email = "mail" 18 | 19 | [[servers.group_mappings]] 20 | group_dn = "*" 21 | org_role = "Editor" -------------------------------------------------------------------------------- /kustom/argo-cd/base/argocd-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: argocd-server-ingress 5 | namespace: argocd 6 | annotations: 7 | kubernetes.io/ingress.class: nginx 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/ssl-passthrough: "true" 10 | spec: 11 | rules: 12 | - host: insert.url.here 13 | http: 14 | paths: 15 | - backend: 16 | serviceName: argocd-server 17 | servicePort: https -------------------------------------------------------------------------------- /apps/templates/root.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: root 5 | finalizers: 6 | - resources-finalizer.argocd.argoproj.io 7 | spec: 8 | destination: 9 | server: https://kubernetes.default.svc 10 | namespace: argocd 11 | project: default 12 | source: 13 | path: apps/ 14 | repoURL: git@github.com:SelfhostedPro/argo-cd-aoa-boilerplate.git 15 | targetRevision: master 16 | syncPolicy: 17 | automated: 18 | prune: true 19 | selfHeal: true -------------------------------------------------------------------------------- /kustom/argo-cd/base/argocd-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: argocd 5 | namespace: argocd 6 | labels: 7 | app.kubernetes.io/name: argocd-cm 8 | app.kubernetes.io/part-of: argocd 9 | data: 10 | repositories: | 11 | - url: git@github.com:SelfhostedPro/argo-cd-aoa-boilerplate.git 12 | sshPrivateKeySecret: 13 | name: argo-github-key 14 | key: sshPrivateKey 15 | helm.repositories: | 16 | - name: stable 17 | url: https://kubernetes-charts.storage.googleapis.com -------------------------------------------------------------------------------- /apps/templates/argocd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: argo-cd 5 | namespace: argocd 6 | finalizers: 7 | - resources-finalizer.argocd.argoproj.io 8 | spec: 9 | destination: 10 | server: https://kubernetes.default.svc 11 | namespace: argocd 12 | project: default 13 | source: 14 | path: kustom/argo-cd 15 | repoURL: git@github.com:SelfhostedPro/argo-cd-aoa-boilerplate.git 16 | targetRevision: master 17 | syncPolicy: 18 | automated: 19 | prune: true 20 | selfHeal: true -------------------------------------------------------------------------------- /apps/README.md: -------------------------------------------------------------------------------- 1 | # Apps 2 | This folder stores all of our application templates. These templates refrence folders in our git repo to use for argo-cd deployments. In order to add applications for argo-cd to manage, add their application manifest to the templates folder and add a refrence to them in the kustomization.yaml in this directory. The manifests and deployments should be placed in the kustom folder at the root of this repository. If you're looking to add another type of project (helm, jsonnet, etc.) create a new folder for them in the root of this repo so we can better track where apps are located. -------------------------------------------------------------------------------- /kustom/argo-cd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - base/argocd-namespace.yaml 6 | - base/argocd-repo-key.yaml 7 | - base/argocd-cm.yaml 8 | - https://raw.githubusercontent.com/argoproj/argo-cd/v2.1.3/manifests/install.yaml 9 | - base/argocd-ingress.yaml 10 | - repos/ 11 | 12 | patchesStrategicMerge: 13 | - overlays/argo-cd-server-deployment.yaml 14 | - overlays/argocd-cm-configmap.yaml 15 | - overlays/argocd-ssh-known-hosts-cm.yaml # Fix for argo issue 16 | 17 | 18 | namespace: argocd -------------------------------------------------------------------------------- /apps/templates/sealed-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: sealed-secrets 5 | namespace: argocd 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "5" 8 | spec: 9 | project: default 10 | source: 11 | repoURL: git@github.com:SelfhostedPro/argo-cd-aoa-boilerplate.git 12 | path: kustom/sealed-secrets 13 | targetRevision: master 14 | destination: 15 | server: https://kubernetes.default.svc 16 | namespace: sealed-secrets 17 | syncPolicy: 18 | automated: 19 | prune: false 20 | selfHeal: true -------------------------------------------------------------------------------- /kustom-charts/README.md: -------------------------------------------------------------------------------- 1 | # Kustom-charts 2 | This folder will hold all [kustomized-helm](https://github.com/kubernetes-sigs/kustomize/blob/master/examples/chart.md) charts. This allows for easy deployment of applications with the ability to kustomize them. 3 | 4 | 5 | This is the command that's run on the argocd side of things to deploy these apps. 6 | ```bash 7 | kustomize build --enable-helm 8 | ``` 9 | This runs the equivalent of the helm template command, makes changes to the output, and applys the result. This is essential for things like removing the default password for grafana, ldap settings, and using sealed-secrets. -------------------------------------------------------------------------------- /kustom/argo-cd/README.md: -------------------------------------------------------------------------------- 1 | # Argo-CD 2 | 3 | [Argo-cd](https://argo-cd.readthedocs.io/en/stable/) helps keep our kubernetes cluster in sync with this repository. Anytime a change is made to this repo, argocd will take those changes and apply them to the cluster in order to keep it in sync. This gives us the bennifit of having versioning, records, and consistent deployments. 4 | 5 | # Setup 6 | 7 | 1. (Optional) If you want to enable ingress, change the url in base/argocd-ingress.yaml 8 | 2. (Optional) If you want to add private repos, add a credential template into the repos folder and add it to your kustomization.yaml 9 | -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/README.md: -------------------------------------------------------------------------------- 1 | # kube-prometheus-stack 2 | This is a full implementation of a monitoring system based on the community kube-prometheus-stack helm template. Full documentation on that project is available [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) 3 | 4 | ## Folder Structure 5 | In the base directory we have the base and overlays folders. Items in base are resources that are added without having to modify existing resources. Items in overlays are merged with existing resources (or in the case of delete-grafana-secret.yaml remove default resources from the helm chart). -------------------------------------------------------------------------------- /apps/templates/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: ingress-nginx 5 | namespace: argocd 6 | finalizers: 7 | - resources-finalizer.argocd.argoproj.io 8 | spec: 9 | destination: 10 | server: https://kubernetes.default.svc 11 | namespace: argocd 12 | project: default 13 | source: 14 | path: kustom-charts/ingress-nginx 15 | repoURL: git@github.com:SelfhostedPro/argo-cd-aoa-boilerplate.git 16 | targetRevision: master 17 | plugin: 18 | name: kustomized-helm 19 | syncPolicy: 20 | automated: 21 | prune: true 22 | selfHeal: true -------------------------------------------------------------------------------- /kustom/sealed-secrets/README.md: -------------------------------------------------------------------------------- 1 | # Sealed Secrets 2 | Bitnami sealed secrets allow us to asymmetrically encrypt sensitive data and have our kubernetes cluster "unseal"(decrypt) them for use with our various applications. This allows us to store those secrets in github safely and have argo-cd use them when deploying apps and itself. 3 | 4 | Because the secret is stored in the kubernetes cluster, if we want to redeploy without having to regenerate all of our sealed secrets we'll need to back them up and store them in our password manager. Instructions on this process (and restoring) are available [here](https://github.com/bitnami-labs/sealed-secrets#how-can-i-do-a-backup-of-my-sealedsecrets). -------------------------------------------------------------------------------- /kustom/argo-cd/overlays/argocd-cm-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: argocd-cm 5 | labels: 6 | app.kubernetes.io/name: argocd-cm 7 | app.kubernetes.io/part-of: argocd 8 | data: 9 | helm.repositories: | 10 | - name: stable 11 | url: https://charts.helm.sh/stable 12 | - name: prometheus-community 13 | url: https://prometheus-community.github.io/helm-charts 14 | resource.customizations.health.bitnami.com_SealedSecret: | 15 | hs = {} 16 | hs.status = "Healthy" 17 | hs.message = "Controller doesn't report resource status" 18 | return hs 19 | configManagementPlugins: | 20 | - name: kustomized-helm 21 | generate: 22 | command: [sh, -c] 23 | args: ["kustomize build --enable-helm"] -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/kustomization.yaml: -------------------------------------------------------------------------------- 1 | helmCharts: 2 | - name: kube-prometheus-stack 3 | releaseName: boilerplate 4 | version: 23.1.1 5 | repo: https://prometheus-community.github.io/helm-charts 6 | valuesFile: values.yaml 7 | - name: prometheus-blackbox-exporter 8 | releaseName: boilerplate 9 | version: 5.3.1 10 | repo: https://prometheus-community.github.io/helm-charts 11 | 12 | resources: 13 | # - "base/grafana/datasources/datasource.yaml" uncomment this and replace the path with any datasources you want to add. 14 | - "base/grafana/secrets/grafana-secret.yaml" 15 | 16 | patchesStrategicMerge: 17 | - "overlays/grafana/delete-grafana-secret.yaml" 18 | # Uncomment these and modify them if you want to force grafana onto a certain node. 19 | # - "overlays/grafana/node.yaml" 20 | # - "overlays/blackbox/node.yaml" -------------------------------------------------------------------------------- /apps/templates/kube-prometheus-stack.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: kube-prometheus-stack 5 | namespace: monitoring 6 | finalizers: 7 | - resources-finalizer.argocd.argoproj.io 8 | annotations: 9 | - notifications.argoproj.io/subscribe.on-sync-succeeded.slack: devops-notifications 10 | - notifications.argoproj.io/subscribe.on-deployed.slack: devops-notifications 11 | - notifications.argoproj.io/subscribe.on-sync-running.slack: devops-notifications 12 | - notifications.argoproj.io/subscribe.on-sync-failed: devops-notifications 13 | - notifications.argoproj.io/subscribe.on-sync-status-unknown: devops-notifications 14 | spec: 15 | destination: 16 | server: https://kubernetes.default.svc 17 | namespace: monitoring 18 | project: default 19 | source: 20 | path: kustom-charts/kube-prometheus-stack 21 | repoURL: git@github.com:SelfhostedPro/argo-cd-aoa-boilerplate.git 22 | targetRevision: master 23 | plugin: 24 | name: kustomized-helm 25 | syncPolicy: 26 | manual: 27 | syncOptions: 28 | - CreateNamespace=true -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/base/grafana/secrets/grafana-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: bitnami.com/v1alpha1 2 | kind: SealedSecret 3 | metadata: 4 | creationTimestamp: null 5 | name: grafana 6 | namespace: monitoring 7 | annotations: 8 | sealedsecrets.bitnami.com/managed: "true" 9 | spec: 10 | encryptedData: 11 | admin-password: AgA/SDNEo5BFfx5Etd+vzlFaPzVR6hWpk8ezOAM6o/btt/Xn3cKoFfNQW1SpXr1EeyJ1E3hVlNTMC3eDHg/WcFeNIgT+RDHvZepOAW0z+ZtDRkUKSzV2jBuIs0ZweHWezrpkXyBcsxC0ZWBrAiOBf/JSye1Wy0yayEnpjhRz2XzeUq9gevwntw8oezLOkeqJefh5Bd18lqCYFxf2EZr6mgV8+/bdUUb/7ilECBTum7TwZZ9bqrmMAJa+Tii6f+9CDEeuWJD6h0uiyC8Z2+iBotDJIMm/w9QpDnGZi34qZY0skPlnXjhP+WcBI9AivBHvFeQqmqnp/8wrJa9CSHQYOhqnGC/ioEB6kfa4voncQgW/RHupAuhj3G8MaQ0JTn2uHrfXg/t5Au/d6FO5cCMttn6vDJxX19HwmgEqkVLL7T3n0tkHZmWTKGaX4ebsNU8yDMOI0t4SBfskFvwG+WpgoCTv9VpMHAnE35NF9Zq9+qKrUj/E+6EfKkxu2kwy/o+yGwDAA3PyL4ZwGj2F7XpRGcXevEC3yo733Kna6iQ7c3x0EQGWhAwSsNf7syJMTfo0iG/7bezp+pTQb0x2YTccR1eFWS+k9a62bH4jCfulFex877C2kD/aImeS7TJ+t0fexWvyKEwPNfB+6eAlp5DdrDNp1tL2sIOzEDg+NqiR9xn/lfXgIq/WvyJ+UXPwiRer3hoOleRYuIGBCAO3cl01vYUA6H3bRIJLb1R+53Q= 12 | admin-user: AgCcnFiITx/G1FG5bMqPDePsXB1yXvhWmflVcELP4++U1Z5vCtsrUFvaBFgX7F6Jt4l+Mf02uNbAKkXsuze5iplaflcpekksECTfnpXKsNLJHevmXEhyxPOtsA3I/bHtwoO+z7uY2Y/nztRHosUQrflqZ0xL/F+TMpcDkXekSRuO3zrbHixEirG7qsoA5EHPipXd3qyXE5+ti8ad4aOz3RV2SDh1U46iUkY/rVAK4ECNaiRr/9opX2Y2Vpc1iX3XwvqTwnFBT6RIbUfZH0xxrygBOTqgAi2vTdXFUXZxpqXvwZWsiq9fc+nRCdAMS2H8zUkfMn4745iMHmm+oDwc1vcObii8opB1MC2GInkWe00Pdbf3Xrz9lnScEE+34iuwwsg97pG7KDO39wqjmYJRqjcjx3KUkGoJhvXTK8t587yzm3WVHogRBPjUdgB2FzIYeQfqgu25p2eMk3fZhNoisBz4m8Rzn/jxulLUc+3Xo+6MN2W+TUsr+OM9Ej1fFI23kGNkL7Kd/jYQ77fphYybTPy+dG6FXQvDbGCQVpt3O5Au4+bQ9OBawLce2N5vqXHoV0yvu9rl1PaZvoIy1eCyTHEM/JIYrxM7ZAHmedn1Xk1AKzCUUwtC+HyAPgCTGaH0t0mhRY0yC5Ws6hR7I1H1pKL8BK3eqQBgKqfcnEycYVysvRU++Apv8rBi8LEjwPkIitKeCSc7JQ== 13 | template: 14 | data: null 15 | metadata: 16 | creationTimestamp: null 17 | name: grafana 18 | namespace: monitoring 19 | type: Opaque 20 | 21 | -------------------------------------------------------------------------------- /kustom/argo-cd/overlays/argocd-ssh-known-hosts-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | 4 | metadata: 5 | name: argocd-ssh-known-hosts-cm 6 | labels: 7 | app.kubernetes.io/name: argocd-ssh-known-hosts-cm 8 | 9 | data: 10 | ssh_known_hosts: | 11 | bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== 12 | github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl 13 | github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== 14 | github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= 15 | gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= 16 | gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf 17 | gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 18 | ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H 19 | vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Argo-cd App of Apps Boilerplate 2 | 3 | This repo is a boilerplate to get argo-cd up and running in a self-managed state quickly and easily with a private github repo. (also works for public repos) 4 | 5 | ## Setup 6 | 7 | ### Requirments on local machine 8 | 9 | - [kubectl](https://kubernetes.io/docs/tasks/tools/) - Kubernetes cli tool 10 | - [kustomize](https://kustomize.io) - Templating utility for kubernetes manifests 11 | - [kubeseal](https://github.com/bitnami-labs/sealed-secrets#homebrew) - Utility for generating sealed secrets 12 | 13 | ### Public Repo Changes Needed: 14 | 15 | When using a public repo we're not going to need to setup sealed secrets (At least for the core argo-cd setup). You'll want to make the following changes: 16 | 17 | 1. Delete the following files/folders: 18 | ```bash 19 | apps/templates/sealed-secrets.yaml 20 | kustom/sealed-secrets 21 | kustom/argo-cd/base/argocd-repo-key.yaml 22 | ``` 23 | 24 | 2. Then change `apps/kustomization.yaml` and remove the following line: 25 | ```yaml 26 | - templates/sealed-secrets.yaml 27 | ``` 28 | 29 | 3. Finally change `kustom/argo-cd/kustomization.yaml` and remove the following line: 30 | ```yaml 31 | - base/argocd-repo-key.yaml 32 | ``` 33 | 34 | **End of public repo setup** 35 | 36 | ### Deployment 37 | 38 | 1. Fork this repository. 39 | 2. Change all of the git refrences to match your repository: 40 | 41 | git urls are located in the following files: 42 | 43 | ```bash 44 | apps/templates/* 45 | kustom/argo-cd/base/argocd-cm 46 | ``` 47 | 48 | 3. Spin up a fresh kubernetes cluster and then clone your forked repository to your local machine. 49 | 50 | 4. Open a shell in the cloned repo and run the following command to initialize argo-cd: 51 | ```bash 52 | kustomize build kustom/argo-cd/ | kubectl apply -n argocd -f - 53 | ``` 54 | 55 | 5. Run the following command to get the argo password: 56 | ```bash 57 | kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d 58 | ``` 59 | 60 | 6. Port forward the webui to your local port 8080: 61 | ```bash 62 | kubectl port-forward svc/argocd-server -n argocd 8080:443 63 | ``` 64 | 65 | 7. Open the argo-cd ui, login, and click on the settings cog in the left bar. Add your forked github repo and generate a new ssh key that your're going to be using (make sure you've added it to your github account). Once finished, click on connect. 66 | 67 | 8. Now that the repo credentials have been added to argo-cd we're going to add our apps to argo-cd with the following command: 68 | ```bash 69 | kustomize build apps/ | kubectl -n argocd apply -f - 70 | ``` 71 | 72 | 9. You should see the apps appear in the argo-cd UI. 73 | 74 | The argo-cd app will encounter a sync error if you're using a private repo, this is normal. We're going to generate a sealed secret in order to give argo-cd access to the repo for that app. (this sealed secret is going to be a private key with read access to our git repo, it's safe to put into the repo because it's been asymmetrically encrypted with a key stored on the kubernetes cluster) 75 | 76 | 10. Once the sealed-secrets app is synced you're going to copy the secret that was created when we initially added the repo via the UI above (should be in the argocd namespace with a name similar to 77 | repo-1340168060). Save this as key.yaml. 78 | ``` 79 | kubectl get secret -n argocd repo-1340168060 -o yaml > key.yaml 80 | ``` 81 | 82 | 11. Run the following command to generate a sealed key and then move it to the right location with the right name: 83 | ``` 84 | kubeseal --controller-namespace sealed-secrets --format yaml < key.yaml > sealedkey.yaml 85 | mv sealedkey.yaml kustom/argo-cd/base/argocd-repo-key.yaml 86 | ``` 87 | 88 | 12. Change the name key's value in kustom/argo-cd/base/argocd-repo-key.yaml to argo-github-key 89 | 90 | 13. Commit this change and push it to the repo in order for argo to finish the setup for argo-cd to manage the cluster and itself. 91 | -------------------------------------------------------------------------------- /kustom/argo-cd/base/argocd-repo-key.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: bitnami.com/v1alpha1 2 | kind: SealedSecret 3 | metadata: 4 | creationTimestamp: null 5 | name: argo-github-key 6 | namespace: argocd 7 | spec: 8 | encryptedData: 9 | sshPrivateKey: AgBLpP8OcwGYxV8GFkYdxNu2cZnSuGb6ctfyodMh6WlApFgOV76hoz/NEJzj/P0YKT+lwVg8k6cPW9Z2LneLk5CnLoX9ZvH18l9ofGJX5kwmQf3VFY4kv71BZSsVrqnJDvlufwfP7dGdjih7roiQJNxbIYaMAcgszWC17B4LbPM5OZveThhpSqjXXFRm2nwtbqvV+WhHMVslhKisGy/0VqMWA6SYcKcjcy2G4RZ+c/sEG1V41ecxFm8ssylC8dKQHkPkZqVvr3P0IsIhDRX1zgIcjMfi8M0kuAXzCJboKPKH/sRJVCY+RnCDZRRgIh1yDmXQBBUaNnv70ImmFISXnSiKKq5GLwpqQ1iA+JjyIu+v6W7RnH6LoVKFsCfLMXRs7vKXKTShbu8Eg1pDhBfAylewG3Vih/F2Unot2X0b50H81alSpdGkW8uZMeb0YJr1X2ThsI8dkJLIiEox5JXr8+ITdQ1G2Y2frXPMkM6gia2cSUIUrm9qj5sapXNyYNKawNcZZubvyvBBehrlUHN2e677JbJRLzZ+Tys71plnF0VNlMu0KomDtJ6Pcve8dEz6WfBbVjuzRr4JdvmBbuo9x2WZfZxDnvysDfcZwRrAWJZ5tJSJsDk/mkBf8VK1djE5TUBvcfux/hdDXyJNSz+iBNHh3mGA59c//RuVOZiNSjO15VLAKHCVmTd4S0wHXrJWJU0EMN04heS+hbq0c56HgrVQdQEU4GzhAGRFWyCus2pJWpfVOfW4MRNQocHFKOp61rbig4r2iecenkBeIuMJnoz8ia++ATaFmqMlK2yOA95fCgG45wqAu2BS+i7uMD4BKWAGjQZoXNnt61dZ7Rd3arp8Vj14evf006UV12tWe3Pthl+Go9bk9B9rynrnrNThnGLwdkK8KrTs2+q5pjL4/Ja5pTpyKe3gObDkBw9/g0QdHKhyrQIvVT0vD+Yldu5Q6b+245wz4WTno7QTKSHOx2unrbhafqSUJWUDlBm8K2+K1EI8Xx5cHqtE1WUSMsznQdyYDv++TSe4Z6kSKvmxQ8PU2YOq87lYA6XD/XBV3IeginUlvlpNE6eWzrTlz8SS9tETCKrwWjgTMxvjTYd6/f8+0VG8Eq1JvM70hgu+++DUmm3NSepPgRWQzHEGwndM9GXm3845Jj5PO74Vs7BB/i8WvOnlrSQW+LqwgYbA95al+ippKdJgKjfhhfwbjTZxCKxK//Mo1qowPFNXzlzDbUUPzHyKan2cWfMWUFyQmwzxBIU+OZRZ4u11zMfy9flK/fwMoxHvXDzzhRj/Wq9bvj/FOZVSMvPg4z7Iq/0LbOR4bU6KJ2a+HjgI4EEEnLFjvRwenFcraAI+e/hH+S4IcX8kk5Og1igwYOzx0KWqHSHi8Zz6QjsiPc/9xW8LfPewEas4P9mT8md7E3DUKGAIHaliCMYMhDDexncEA7pc6RqUK7JE4+PiIsE/2jU7QjN/sT5zy8P/MyZaUQNKFbbLI6Wf8jAV/qIxqCn5DsiAZZVD5PWwSaPVOcYWlCDwEcrEN/XgGAmFXq8NOqGrKv4A0sSdVChBWkvlGfoMhKxwMPniWaftNOKMkMIB4CxCmjdGsqhi4m/o+irfEu8AS8rvb6whSCrw35m7uzCqGPPHwEFTV7HfR0sD310hlV9raGblTMnIRSw5WLWT0Xi5OtXDhlqHPp37HpVcrbrrT/OZ/KAhkZdpyyPE3MhbwgG091MeC900GJAuCzC7Hk41rpxGm/ULIu0rA/DxoYY0wYXyqxm9Or4tohcjzbw6EKcoPNn8f9b3BLHIbOg5CP5IVvqJymSvZkNLGY2KU7pHO/2xAQLciesDWe3Hjy84YAHHs71Y99+lYX3OWqvScMLpwyGx+6ydcd4j72P2tkXYPDtdsZz1FkYdA8KZ1MPy3NgmGqGTd9vt7azbM7NG6O4p2YVjhPqFoNR5tamF3GXW+bVXkUCpxh7H4zUYq6NKy8hNtAKF2xL3dnS+Yl6VPQ1824wsBcbewa3sVF4oaoT/olKT80mbQQpoiy7qngU4Rq1och9OtdRME88sns0H7TgK6POAkCIFL6wK14ABrrzeIT4n7C0YfAY9/p073wY58DOdhgAhBUTBI5qJjf595Y4FvZnoDepW0yV2/hKTOK7yNYm2be8DbINIzVocTIp4fFrceLnhUjbfzsn45PXT3wNk4fwiu3je47K53ScYZVtUrzgb+k0Se4imISebAyopwwb88d+Xb2mLQC/Form2IoyBMNQq5jtGCtT/26678+86NG0/T4el4kqixp+rcwUYDKMlmpkRBqWHm+zXH4yObU41K1GRQkR2yOB69qgzf0zReZZ6xMK/C4oxM28byynqOxpQQ7OCnbPVWR4EtirLMTvGandfD4wLZo6OIfSkKjPFewjMnLgRCaKfkHQx2bpcbujo81tuu/c3lrMBaiqsfKFZadsFVsFpRQoW6xtghabvpwETnsioKLptj8mIXIVcwNFYNYG1po0wEHYUdQlMfxYPZ8lwYlW0wiTzQmE5Hwo3xBGf38QYIG21piP9mhUwXazYS0kCKJ49epjJpZAYBn2qLbePHktV5bRPuRenI/DbQYlnDwc/YggVTuW9xSDg4i4/ecT0Dxfnta824xSfDGLYSiFiqnZAfhrKmOGAwRvLpwxySvgP84fNRX1L4GlnABVh3DYmxfXfxT7LGDHKgd2DTenwyloel6xJu5q3OjV0LlZup7t508w9cNmQxpzAssro+YMxoTGhSzfSSY8MPa4/mdSzBzAQ9i4AgWa28BJFHV0Ry4I9K5fR+MyG6w1n50f5DuKUOnCB/tqrfxlh8KEJ/M+IViy5oZOGYvWmF1R4c57qhQ6UhWvuJXXWZM7KVlabnclPJxHD5BzQDQXtw05MYpWGEnV2MyCmSq17xhpYKZR8wsEGy/HP71bA0UAYQDfkMNIoXNZo+TGhjHsLsSTGV4b4YvqY4sq1zN/D2iZuvM62s42rgKdKuXZ6DkFgkgEuC8ebhSlxcIA6+I/iGevyQgQMEvXSxQSxVVur7zCOPlVIkK3YibP5tOyG1UD1xqs2wBIi7lMwS9+N2XTOMhEO7+RwIW/aT3C3hSO+20T94HxAdYUQ8rSf5RnsKuFn+6rVvHZojCc2cDOU55vPGB+nG1gN+/5ySyi9Vo4HIzKBHS/lZPbvU1CESRw58U6BNBOYCZfb+6mRScw6h1r983NPwD2PHv6wV2t/9SPjvRRy0XHMfMSm/ygyn7VLR0rJ8A+HGYhoNy4t40RdeXRu9WQvoovor0UQJPwJYRoWdBkZ7smTwHam7QvxKz9SRPggZ47HJaSldQo5JzlWDjIyXMnUDw30vlX4FzrGf+gkOFhK7lCD8ivh5fUnDdkPhFIsfoM14ueKpyFbCgd8HIzRilmUag3HA5LUL9Acp1h0uf6Pw8sB6w5ul1k2LrkpKxQT8bTtw9sFXgoArqtA4tEyRU58EyefSN0gb5Ti3o/uYXpo9++uDYol1ZVbJcGsmkkzj2mX1buWcB5IjjVjIe/TT1bqPLU85rbdwwFd6JGd4GM2dmDuHbTvK+aLdRGJK1pelvRscz3Oh4XIpltw9fVz9I0HNoFG8naA5kK989TIysashje3H51Sffagxj7th61Wv7rtEzKKWtmiUqHRnYiAYj0+BnSuuW97nLv+1m0y32Tw7xBICB1Y7dIJUdRQaZ19hxfnSPH4CX65AdUe+F7YGTMgpPEXpdtFSH3Esa4IWudYPhrzSI/BLiEO1gYTEq/v8seM1xxghAtfu+TX9WNscgb6hA51FQvNnpm2gQjRw+sHijAOEVj4fklNX5quhjuQJ2pLX/uwJl1yQ1jpmhjYb+b2xYhz0X9BErfQD05Fjam7Pv6aCn7FKHCQOQnYZ2FU8UkrbxIjs1f+NVUVwRv16bTmoXSRzQJPlqrS7YuhVcKyEr0MCbMy8ESGFBzLCEoC/ZtF2r+EgK9L1E3YzidkMgEYWRUk8xDWEWBRgZ/UQoBpwfyDdBNGDJ9976QT5TB2GS9o62GXY+gJ81bNpe60bjAPKb2ks5huzosrvIbHinwi7VxnrG3eSOg6LYuzU8T5G3l1wJFgXjpuTTzPVTF37VmloJyokPN1j8DKgkhzK6fg/KQLzIZ+T90rYYD+JaAR4NYUtSYDmtzxXjt1vQkGNmgx3HoM7n5TRA== 10 | url: AgBvgjpBdttjJtEbjdzURoLlBOKma9bdY6jxHRXb4kWVCLQAU2d3PEjOjxfAIPPLGa47CgCAB+ytF0kt5xMqMMblNfzYldwjPGeIt62bdfTbk+QSWdByxqMR1N5KrDV50dgp5mdjXjKHXx7Y0QAidiXZQ+NEE748eNpwjci33bt5crZcK9WJuurZUvuPruJJZ2nc49l9NcJh3VTlnp3Lt8Jr/+o95rPbbWlaS+5qQWShzKxGtgAF2cmx8Pt6aqIKR0plNEWlJDcrZTZBbz2/C+yvCT0Z+NJjzUprGAWQ14ddqu689gPbe06UEoBvc3FcPJlUZa5CqgBAzEpLkbdQjANyqFPYG0n8RPDsPOUbBowaGCnmiTY9xVrYDfzmk745tjz7Ii0pKC0kPmkfKLWDaCOh6GmZIVC5Hm36vljEePzRnKYki/Yb0TXVv44SN/CYGxKv4NwTD+sTn9QMT2uS8DC8k7QjZiuTJyGvQ1oRv6qp6CfeBKZZ066z980YIFuvu+MI1FDy8q6W0kHV6WMn+aBw8lymZbosux2thK4xy8nmmkPo/hh/l0Nr81Ebdp0Ei9rGcQvCYquUK/T8lemL6pUWBKdAxMkl81+Ke2cZLPci6apLsafxz+cDnQt1dZryNlLpg4BuzvPCaiNOtDTD/QwJKhXF/kKaUiZ46YCTI2azLaMTjrNl9/t9QfnDfda39fXRlBEMSP2hfm1nsvXTc32F4FJ241yQKwee3OyXcsym6Ov3PAHzPYJbB9bE+8HLtW3mPWyRUPT5Kg== 11 | template: 12 | data: null 13 | metadata: 14 | annotations: 15 | managed-by: argocd.argoproj.io 16 | creationTimestamp: null 17 | labels: 18 | argocd.argoproj.io/secret-type: repo-creds 19 | managedFields: 20 | - apiVersion: v1 21 | fieldsType: FieldsV1 22 | fieldsV1: 23 | f:data: 24 | .: {} 25 | f:sshPrivateKey: {} 26 | f:url: {} 27 | f:metadata: 28 | f:annotations: 29 | .: {} 30 | f:managed-by: {} 31 | f:labels: 32 | .: {} 33 | f:argocd.argoproj.io/secret-type: {} 34 | f:type: {} 35 | manager: argocd-server 36 | operation: Update 37 | time: "2021-10-16T18:11:34Z" 38 | name: argocd-github-key 39 | namespace: argocd 40 | type: Opaque 41 | 42 | -------------------------------------------------------------------------------- /kustom/sealed-secrets/controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: Role 4 | metadata: 5 | annotations: {} 6 | labels: 7 | name: sealed-secrets-service-proxier 8 | name: sealed-secrets-service-proxier 9 | namespace: kube-system 10 | rules: 11 | - apiGroups: 12 | - "" 13 | resourceNames: 14 | - 'http:sealed-secrets-controller:' 15 | - sealed-secrets-controller 16 | resources: 17 | - services/proxy 18 | verbs: 19 | - create 20 | - get 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1beta1 23 | kind: Role 24 | metadata: 25 | annotations: {} 26 | labels: 27 | name: sealed-secrets-key-admin 28 | name: sealed-secrets-key-admin 29 | namespace: kube-system 30 | rules: 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - secrets 35 | verbs: 36 | - create 37 | - list 38 | --- 39 | apiVersion: v1 40 | kind: Service 41 | metadata: 42 | annotations: {} 43 | labels: 44 | name: sealed-secrets-controller 45 | name: sealed-secrets-controller 46 | namespace: kube-system 47 | spec: 48 | ports: 49 | - port: 8080 50 | targetPort: 8080 51 | selector: 52 | name: sealed-secrets-controller 53 | type: ClusterIP 54 | --- 55 | apiVersion: rbac.authorization.k8s.io/v1beta1 56 | kind: RoleBinding 57 | metadata: 58 | annotations: {} 59 | labels: 60 | name: sealed-secrets-service-proxier 61 | name: sealed-secrets-service-proxier 62 | namespace: kube-system 63 | roleRef: 64 | apiGroup: rbac.authorization.k8s.io 65 | kind: Role 66 | name: sealed-secrets-service-proxier 67 | subjects: 68 | - apiGroup: rbac.authorization.k8s.io 69 | kind: Group 70 | name: system:authenticated 71 | --- 72 | apiVersion: apiextensions.k8s.io/v1 73 | kind: CustomResourceDefinition 74 | metadata: 75 | name: sealedsecrets.bitnami.com 76 | spec: 77 | group: bitnami.com 78 | names: 79 | kind: SealedSecret 80 | listKind: SealedSecretList 81 | plural: sealedsecrets 82 | singular: sealedsecret 83 | scope: Namespaced 84 | versions: 85 | - name: v1alpha1 86 | schema: 87 | openAPIV3Schema: 88 | properties: 89 | spec: 90 | type: object 91 | x-kubernetes-preserve-unknown-fields: true 92 | status: 93 | x-kubernetes-preserve-unknown-fields: true 94 | type: object 95 | served: true 96 | storage: true 97 | subresources: 98 | status: {} 99 | --- 100 | apiVersion: rbac.authorization.k8s.io/v1beta1 101 | kind: RoleBinding 102 | metadata: 103 | annotations: {} 104 | labels: 105 | name: sealed-secrets-controller 106 | name: sealed-secrets-controller 107 | namespace: kube-system 108 | roleRef: 109 | apiGroup: rbac.authorization.k8s.io 110 | kind: Role 111 | name: sealed-secrets-key-admin 112 | subjects: 113 | - kind: ServiceAccount 114 | name: sealed-secrets-controller 115 | namespace: kube-system 116 | --- 117 | apiVersion: rbac.authorization.k8s.io/v1beta1 118 | kind: ClusterRoleBinding 119 | metadata: 120 | annotations: {} 121 | labels: 122 | name: sealed-secrets-controller 123 | name: sealed-secrets-controller 124 | roleRef: 125 | apiGroup: rbac.authorization.k8s.io 126 | kind: ClusterRole 127 | name: secrets-unsealer 128 | subjects: 129 | - kind: ServiceAccount 130 | name: sealed-secrets-controller 131 | namespace: kube-system 132 | --- 133 | apiVersion: rbac.authorization.k8s.io/v1beta1 134 | kind: ClusterRole 135 | metadata: 136 | annotations: {} 137 | labels: 138 | name: secrets-unsealer 139 | name: secrets-unsealer 140 | rules: 141 | - apiGroups: 142 | - bitnami.com 143 | resources: 144 | - sealedsecrets 145 | verbs: 146 | - get 147 | - list 148 | - watch 149 | - apiGroups: 150 | - bitnami.com 151 | resources: 152 | - sealedsecrets/status 153 | verbs: 154 | - update 155 | - apiGroups: 156 | - "" 157 | resources: 158 | - secrets 159 | verbs: 160 | - get 161 | - create 162 | - update 163 | - delete 164 | - apiGroups: 165 | - "" 166 | resources: 167 | - events 168 | verbs: 169 | - create 170 | - patch 171 | --- 172 | apiVersion: v1 173 | kind: ServiceAccount 174 | metadata: 175 | annotations: {} 176 | labels: 177 | name: sealed-secrets-controller 178 | name: sealed-secrets-controller 179 | namespace: kube-system 180 | --- 181 | apiVersion: apps/v1 182 | kind: Deployment 183 | metadata: 184 | annotations: {} 185 | labels: 186 | name: sealed-secrets-controller 187 | name: sealed-secrets-controller 188 | namespace: kube-system 189 | spec: 190 | minReadySeconds: 30 191 | replicas: 1 192 | revisionHistoryLimit: 10 193 | selector: 194 | matchLabels: 195 | name: sealed-secrets-controller 196 | strategy: 197 | rollingUpdate: 198 | maxSurge: 25% 199 | maxUnavailable: 25% 200 | type: RollingUpdate 201 | template: 202 | metadata: 203 | annotations: {} 204 | labels: 205 | name: sealed-secrets-controller 206 | spec: 207 | containers: 208 | - args: [] 209 | command: 210 | - controller 211 | env: [] 212 | image: quay.io/bitnami/sealed-secrets-controller:v0.16.0 213 | imagePullPolicy: Always 214 | livenessProbe: 215 | httpGet: 216 | path: /healthz 217 | port: http 218 | name: sealed-secrets-controller 219 | ports: 220 | - containerPort: 8080 221 | name: http 222 | readinessProbe: 223 | httpGet: 224 | path: /healthz 225 | port: http 226 | securityContext: 227 | readOnlyRootFilesystem: true 228 | runAsNonRoot: true 229 | runAsUser: 1001 230 | stdin: false 231 | tty: false 232 | volumeMounts: 233 | - mountPath: /tmp 234 | name: tmp 235 | imagePullSecrets: [] 236 | initContainers: [] 237 | securityContext: 238 | fsGroup: 65534 239 | serviceAccountName: sealed-secrets-controller 240 | terminationGracePeriodSeconds: 30 241 | volumes: 242 | - emptyDir: {} 243 | name: tmp 244 | -------------------------------------------------------------------------------- /kustom-charts/kube-prometheus-stack/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for kube-prometheus-stack. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | ## Provide a name in place of kube-prometheus-stack for `app:` labels 6 | ## 7 | nameOverride: "" 8 | 9 | ## Override the deployment namespace 10 | ## 11 | namespaceOverride: "monitoring" 12 | 13 | ## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 14 | ## 15 | kubeTargetVersionOverride: "" 16 | 17 | ## Allow kubeVersion to be overridden while creating the ingress 18 | ## 19 | kubeVersionOverride: "" 20 | 21 | ## Provide a name to substitute for the full names of resources 22 | ## 23 | fullnameOverride: "" 24 | 25 | ## Labels to apply to all resources 26 | ## 27 | commonLabels: {} 28 | # scmhash: abc123 29 | # myLabel: aakkmd 30 | 31 | ## Create default rules for monitoring the cluster 32 | ## 33 | defaultRules: 34 | create: true 35 | rules: 36 | alertmanager: true 37 | etcd: true 38 | general: true 39 | k8s: true 40 | kubeApiserver: true 41 | kubeApiserverAvailability: true 42 | kubeApiserverError: true 43 | kubeApiserverSlos: true 44 | kubelet: true 45 | kubePrometheusGeneral: true 46 | kubePrometheusNodeAlerting: true 47 | kubePrometheusNodeRecording: true 48 | kubernetesAbsent: true 49 | kubernetesApps: true 50 | kubernetesResources: true 51 | kubernetesStorage: true 52 | kubernetesSystem: true 53 | kubeScheduler: true 54 | kubeStateMetrics: true 55 | network: true 56 | node: true 57 | prometheus: true 58 | prometheusOperator: true 59 | time: true 60 | 61 | ## Runbook url prefix for default rules 62 | runbookUrl: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md# 63 | ## Reduce app namespace alert scope 64 | appNamespacesTarget: ".*" 65 | 66 | ## Labels for default rules 67 | labels: {} 68 | ## Annotations for default rules 69 | annotations: {} 70 | 71 | ## Additional labels for PrometheusRule alerts 72 | additionalRuleLabels: {} 73 | 74 | ## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. 75 | ## 76 | # additionalPrometheusRules: [] 77 | # - name: my-rule-file 78 | # groups: 79 | # - name: my_group 80 | # rules: 81 | # - record: my_record 82 | # expr: 100 * my_record 83 | 84 | ## Provide custom recording or alerting rules to be deployed into the cluster. 85 | ## 86 | additionalPrometheusRulesMap: {} 87 | # rule-name: 88 | # groups: 89 | # - name: my_group 90 | # rules: 91 | # - record: my_record 92 | # expr: 100 * my_record 93 | 94 | ## 95 | global: 96 | rbac: 97 | create: true 98 | pspEnabled: true 99 | pspAnnotations: {} 100 | ## Specify pod annotations 101 | ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor 102 | ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp 103 | ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl 104 | ## 105 | # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 106 | # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' 107 | # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' 108 | 109 | ## Reference to one or more secrets to be used when pulling images 110 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 111 | ## 112 | imagePullSecrets: [] 113 | # - name: "image-pull-secret" 114 | 115 | ## Configuration for alertmanager 116 | ## ref: https://prometheus.io/docs/alerting/alertmanager/ 117 | ## 118 | alertmanager: 119 | 120 | ## Deploy alertmanager 121 | ## 122 | enabled: true 123 | 124 | ## Annotations for Alertmanager 125 | ## 126 | annotations: {} 127 | 128 | ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 129 | ## 130 | apiVersion: v2 131 | 132 | ## Service account for Alertmanager to use. 133 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ 134 | ## 135 | serviceAccount: 136 | create: true 137 | name: "" 138 | annotations: {} 139 | 140 | ## Configure pod disruption budgets for Alertmanager 141 | ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget 142 | ## This configuration is immutable once created and will require the PDB to be deleted to be changed 143 | ## https://github.com/kubernetes/kubernetes/issues/45398 144 | ## 145 | podDisruptionBudget: 146 | enabled: false 147 | minAvailable: 1 148 | maxUnavailable: "" 149 | 150 | ## Alertmanager configuration directives 151 | ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file 152 | ## https://prometheus.io/webtools/alerting/routing-tree-editor/ 153 | ## 154 | config: 155 | global: 156 | resolve_timeout: 5m 157 | route: 158 | group_by: ['job'] 159 | group_wait: 30s 160 | group_interval: 5m 161 | repeat_interval: 12h 162 | receiver: 'null' 163 | routes: 164 | - match: 165 | alertname: Watchdog 166 | receiver: 'null' 167 | receivers: 168 | - name: 'null' 169 | templates: 170 | - '/etc/alertmanager/config/*.tmpl' 171 | 172 | ## Pass the Alertmanager configuration directives through Helm's templating 173 | ## engine. If the Alertmanager configuration contains Alertmanager templates, 174 | ## they'll need to be properly escaped so that they are not interpreted by 175 | ## Helm 176 | ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function 177 | ## https://prometheus.io/docs/alerting/configuration/#tmpl_string 178 | ## https://prometheus.io/docs/alerting/notifications/ 179 | ## https://prometheus.io/docs/alerting/notification_examples/ 180 | tplConfig: false 181 | 182 | ## Alertmanager template files to format alerts 183 | ## By default, templateFiles are placed in /etc/alertmanager/config/ and if 184 | ## they have a .tmpl file suffix will be loaded. See config.templates above 185 | ## to change, add other suffixes. If adding other suffixes, be sure to update 186 | ## config.templates above to include those suffixes. 187 | ## ref: https://prometheus.io/docs/alerting/notifications/ 188 | ## https://prometheus.io/docs/alerting/notification_examples/ 189 | ## 190 | templateFiles: {} 191 | # 192 | ## An example template: 193 | # template_1.tmpl: |- 194 | # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} 195 | # 196 | # {{ define "slack.myorg.text" }} 197 | # {{- $root := . -}} 198 | # {{ range .Alerts }} 199 | # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` 200 | # *Cluster:* {{ template "cluster" $root }} 201 | # *Description:* {{ .Annotations.description }} 202 | # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> 203 | # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> 204 | # *Details:* 205 | # {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}` 206 | # {{ end }} 207 | # {{ end }} 208 | # {{ end }} 209 | 210 | ingress: 211 | enabled: false 212 | 213 | # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName 214 | # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress 215 | # ingressClassName: nginx 216 | 217 | annotations: {} 218 | 219 | labels: {} 220 | 221 | ## Hosts must be provided if Ingress is enabled. 222 | ## 223 | hosts: [] 224 | # - alertmanager.domain.com 225 | 226 | ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix 227 | ## 228 | paths: [] 229 | # - / 230 | 231 | ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) 232 | ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types 233 | # pathType: ImplementationSpecific 234 | 235 | ## TLS configuration for Alertmanager Ingress 236 | ## Secret must be manually created in the namespace 237 | ## 238 | tls: [] 239 | # - secretName: alertmanager-general-tls 240 | # hosts: 241 | # - alertmanager.example.com 242 | 243 | ## Configuration for Alertmanager secret 244 | ## 245 | secret: 246 | annotations: {} 247 | 248 | ## Configuration for creating an Ingress that will map to each Alertmanager replica service 249 | ## alertmanager.servicePerReplica must be enabled 250 | ## 251 | ingressPerReplica: 252 | enabled: false 253 | 254 | # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName 255 | # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress 256 | # ingressClassName: nginx 257 | 258 | annotations: {} 259 | labels: {} 260 | 261 | ## Final form of the hostname for each per replica ingress is 262 | ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} 263 | ## 264 | ## Prefix for the per replica ingress that will have `-$replicaNumber` 265 | ## appended to the end 266 | hostPrefix: "" 267 | ## Domain that will be used for the per replica ingress 268 | hostDomain: "" 269 | 270 | ## Paths to use for ingress rules 271 | ## 272 | paths: [] 273 | # - / 274 | 275 | ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) 276 | ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types 277 | # pathType: ImplementationSpecific 278 | 279 | ## Secret name containing the TLS certificate for alertmanager per replica ingress 280 | ## Secret must be manually created in the namespace 281 | tlsSecretName: "" 282 | 283 | ## Separated secret for each per replica Ingress. Can be used together with cert-manager 284 | ## 285 | tlsSecretPerReplica: 286 | enabled: false 287 | ## Final form of the secret for each per replica ingress is 288 | ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} 289 | ## 290 | prefix: "alertmanager" 291 | 292 | ## Configuration for Alertmanager service 293 | ## 294 | service: 295 | annotations: {} 296 | labels: {} 297 | clusterIP: "" 298 | 299 | ## Port for Alertmanager Service to listen on 300 | ## 301 | port: 9093 302 | ## To be used with a proxy extraContainer port 303 | ## 304 | targetPort: 9093 305 | ## Port to expose on each node 306 | ## Only used if service.type is 'NodePort' 307 | ## 308 | nodePort: 30903 309 | ## List of IP addresses at which the Prometheus server service is available 310 | ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips 311 | ## 312 | 313 | ## Additional ports to open for Alertmanager service 314 | additionalPorts: [] 315 | 316 | externalIPs: [] 317 | loadBalancerIP: "" 318 | loadBalancerSourceRanges: [] 319 | ## Service type 320 | ## 321 | type: ClusterIP 322 | 323 | ## Configuration for creating a separate Service for each statefulset Alertmanager replica 324 | ## 325 | servicePerReplica: 326 | enabled: false 327 | annotations: {} 328 | 329 | ## Port for Alertmanager Service per replica to listen on 330 | ## 331 | port: 9093 332 | 333 | ## To be used with a proxy extraContainer port 334 | targetPort: 9093 335 | 336 | ## Port to expose on each node 337 | ## Only used if servicePerReplica.type is 'NodePort' 338 | ## 339 | nodePort: 30904 340 | 341 | ## Loadbalancer source IP ranges 342 | ## Only used if servicePerReplica.type is "LoadBalancer" 343 | loadBalancerSourceRanges: [] 344 | ## Service type 345 | ## 346 | type: ClusterIP 347 | 348 | ## If true, create a serviceMonitor for alertmanager 349 | ## 350 | serviceMonitor: 351 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 352 | ## 353 | interval: "" 354 | selfMonitor: true 355 | 356 | ## proxyUrl: URL of a proxy that should be used for scraping. 357 | ## 358 | proxyUrl: "" 359 | 360 | ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. 361 | scheme: "" 362 | 363 | ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. 364 | ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig 365 | tlsConfig: {} 366 | 367 | bearerTokenFile: 368 | 369 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 370 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 371 | ## 372 | metricRelabelings: [] 373 | # - action: keep 374 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 375 | # sourceLabels: [__name__] 376 | 377 | ## RelabelConfigs to apply to samples before scraping 378 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 379 | ## 380 | relabelings: [] 381 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 382 | # separator: ; 383 | # regex: ^(.*)$ 384 | # targetLabel: nodename 385 | # replacement: $1 386 | # action: replace 387 | 388 | ## Settings affecting alertmanagerSpec 389 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec 390 | ## 391 | alertmanagerSpec: 392 | ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata 393 | ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. 394 | ## 395 | podMetadata: {} 396 | 397 | ## Image of Alertmanager 398 | ## 399 | image: 400 | repository: quay.io/prometheus/alertmanager 401 | tag: v0.22.2 402 | sha: "" 403 | 404 | ## If true then the user will be responsible to provide a secret with alertmanager configuration 405 | ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used 406 | ## 407 | useExistingSecret: false 408 | 409 | ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the 410 | ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. 411 | ## 412 | secrets: [] 413 | 414 | ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. 415 | ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. 416 | ## 417 | configMaps: [] 418 | 419 | ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for 420 | ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. 421 | ## 422 | # configSecret: 423 | 424 | ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with. 425 | ## 426 | alertmanagerConfigSelector: {} 427 | ## Example which selects all alertmanagerConfig resources 428 | ## with label "alertconfig" with values any of "example-config" or "example-config-2" 429 | # alertmanagerConfigSelector: 430 | # matchExpressions: 431 | # - key: alertconfig 432 | # operator: In 433 | # values: 434 | # - example-config 435 | # - example-config-2 436 | # 437 | ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config" 438 | # alertmanagerConfigSelector: 439 | # matchLabels: 440 | # role: example-config 441 | 442 | ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. 443 | ## 444 | alertmanagerConfigNamespaceSelector: {} 445 | ## Example which selects all namespaces 446 | ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2" 447 | # alertmanagerConfigNamespaceSelector: 448 | # matchExpressions: 449 | # - key: alertmanagerconfig 450 | # operator: In 451 | # values: 452 | # - example-namespace 453 | # - example-namespace-2 454 | 455 | ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled" 456 | # alertmanagerConfigNamespaceSelector: 457 | # matchLabels: 458 | # alertmanagerconfig: enabled 459 | 460 | ## Define Log Format 461 | # Use logfmt (default) or json logging 462 | logFormat: logfmt 463 | 464 | ## Log level for Alertmanager to be configured with. 465 | ## 466 | logLevel: info 467 | 468 | ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the 469 | ## running cluster equal to the expected size. 470 | replicas: 1 471 | 472 | ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression 473 | ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). 474 | ## 475 | retention: 120h 476 | 477 | ## Storage is the definition of how storage will be used by the Alertmanager instances. 478 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md 479 | ## 480 | storage: {} 481 | # volumeClaimTemplate: 482 | # spec: 483 | # storageClassName: gluster 484 | # accessModes: ["ReadWriteOnce"] 485 | # resources: 486 | # requests: 487 | # storage: 50Gi 488 | # selector: {} 489 | 490 | 491 | ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false 492 | ## 493 | externalUrl: 494 | 495 | ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, 496 | ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. 497 | ## 498 | routePrefix: / 499 | 500 | ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. 501 | ## 502 | paused: false 503 | 504 | ## Define which Nodes the Pods are scheduled on. 505 | ## ref: https://kubernetes.io/docs/user-guide/node-selection/ 506 | ## 507 | nodeSelector: {} 508 | 509 | ## Define resources requests and limits for single Pods. 510 | ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ 511 | ## 512 | resources: {} 513 | # requests: 514 | # memory: 400Mi 515 | 516 | ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. 517 | ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. 518 | ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. 519 | ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. 520 | ## 521 | podAntiAffinity: "" 522 | 523 | ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. 524 | ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone 525 | ## 526 | podAntiAffinityTopologyKey: kubernetes.io/hostname 527 | 528 | ## Assign custom affinity rules to the alertmanager instance 529 | ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 530 | ## 531 | affinity: {} 532 | # nodeAffinity: 533 | # requiredDuringSchedulingIgnoredDuringExecution: 534 | # nodeSelectorTerms: 535 | # - matchExpressions: 536 | # - key: kubernetes.io/e2e-az-name 537 | # operator: In 538 | # values: 539 | # - e2e-az1 540 | # - e2e-az2 541 | 542 | ## If specified, the pod's tolerations. 543 | ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ 544 | ## 545 | tolerations: [] 546 | # - key: "key" 547 | # operator: "Equal" 548 | # value: "value" 549 | # effect: "NoSchedule" 550 | 551 | ## If specified, the pod's topology spread constraints. 552 | ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ 553 | ## 554 | topologySpreadConstraints: [] 555 | # - maxSkew: 1 556 | # topologyKey: topology.kubernetes.io/zone 557 | # whenUnsatisfiable: DoNotSchedule 558 | # labelSelector: 559 | # matchLabels: 560 | # app: alertmanager 561 | 562 | ## SecurityContext holds pod-level security attributes and common container settings. 563 | ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false 564 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ 565 | ## 566 | securityContext: 567 | runAsGroup: 2000 568 | runAsNonRoot: true 569 | runAsUser: 1000 570 | fsGroup: 2000 571 | 572 | ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. 573 | ## Note this is only for the Alertmanager UI, not the gossip communication. 574 | ## 575 | listenLocal: false 576 | 577 | ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. 578 | ## 579 | containers: [] 580 | 581 | # Additional volumes on the output StatefulSet definition. 582 | volumes: [] 583 | 584 | # Additional VolumeMounts on the output StatefulSet definition. 585 | volumeMounts: [] 586 | 587 | ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes 588 | ## (permissions, dir tree) on mounted volumes before starting prometheus 589 | initContainers: [] 590 | 591 | ## Priority class assigned to the Pods 592 | ## 593 | priorityClassName: "" 594 | 595 | ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. 596 | ## 597 | additionalPeers: [] 598 | 599 | ## PortName to use for Alert Manager. 600 | ## 601 | portName: "web" 602 | 603 | ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918 604 | ## 605 | clusterAdvertiseAddress: false 606 | 607 | ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. 608 | ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. 609 | forceEnableClusterMode: false 610 | 611 | ## ExtraSecret can be used to store various data in an extra secret 612 | ## (use it for example to store hashed basic auth credentials) 613 | extraSecret: 614 | ## if not set, name will be auto generated 615 | # name: "" 616 | annotations: {} 617 | data: {} 618 | # auth: | 619 | # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 620 | # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. 621 | 622 | ## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml 623 | ## 624 | grafana: 625 | enabled: true 626 | namespaceOverride: "monitoring" 627 | 628 | # grafana.ini: 629 | # dashboards: 630 | # default_home_dashboard_path: /tmp/dashboards/General_Overview.json 631 | # users: 632 | # viewers_can_edit: false 633 | # auth: 634 | # disable_login_form: false 635 | # disable_signout_menu: false 636 | # auth.ldap: 637 | # enabled: true 638 | # allow_sign_up: true 639 | # config_file: /etc/grafana/ldap.toml 640 | # auth.anonymous: 641 | # enabled: true 642 | # org_role: Viewer 643 | 644 | # ldap: 645 | # enabled: true 646 | # existingSecret: "grafana" 647 | 648 | ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled 649 | ## 650 | forceDeployDatasources: false 651 | 652 | ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled 653 | ## 654 | forceDeployDashboards: false 655 | 656 | ## Deploy default dashboards 657 | ## 658 | defaultDashboardsEnabled: true 659 | 660 | ## Timezone for the default dashboards 661 | ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg 662 | ## 663 | defaultDashboardsTimezone: utc 664 | 665 | adminPassword: prom-operator 666 | 667 | ingress: 668 | ## If true, Grafana Ingress will be created 669 | ## 670 | enabled: false 671 | 672 | ## Annotations for Grafana Ingress 673 | ## 674 | annotations: 675 | kubernetes.io/ingress.class: nginx 676 | # kubernetes.io/tls-acme: "true" 677 | 678 | ## Labels to be added to the Ingress 679 | ## 680 | labels: {} 681 | 682 | ## Hostnames. 683 | ## Must be provided if Ingress is enable. 684 | ## 685 | # hosts: 686 | # - grafana.domain.com 687 | hosts: 688 | 689 | ## Path for grafana ingress 690 | path: / 691 | 692 | ## TLS configuration for grafana Ingress 693 | ## Secret must be manually created in the namespace 694 | ## 695 | # tls: 696 | 697 | sidecar: 698 | notifiers: 699 | enabled: false 700 | label: grafana_notifier 701 | dashboards: 702 | enabled: false 703 | label: grafana_dashboard 704 | folder: /tmp/dashboards 705 | ## Annotations for Grafana dashboard configmaps 706 | ## 707 | annotations: {} 708 | multicluster: 709 | global: 710 | enabled: false 711 | etcd: 712 | enabled: false 713 | provider: 714 | allowUiUpdates: false 715 | foldersFromFilesStructure: true 716 | folderAnnotation: "grafana_dashboard_folder" 717 | datasources: 718 | enabled: true 719 | defaultDatasourceEnabled: true 720 | 721 | ## URL of prometheus datasource 722 | ## 723 | # url: http://prometheus-stack-prometheus:9090/ 724 | 725 | # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default 726 | # defaultDatasourceScrapeInterval: 15s 727 | 728 | ## Annotations for Grafana datasource configmaps 729 | ## 730 | annotations: {} 731 | 732 | ## Create datasource for each Pod of Prometheus StatefulSet; 733 | ## this uses headless service `prometheus-operated` which is 734 | ## created by Prometheus Operator 735 | ## ref: https://git.io/fjaBS 736 | createPrometheusReplicasDatasources: false 737 | label: grafana_datasource 738 | 739 | extraConfigmapMounts: [] 740 | # - name: certs-configmap 741 | # mountPath: /etc/grafana/ssl/ 742 | # configMap: certs-configmap 743 | # readOnly: true 744 | 745 | ## Configure additional grafana datasources (passed through tpl) 746 | ## ref: http://docs.grafana.org/administration/provisioning/#datasources 747 | additionalDataSources: [] 748 | # - name: prometheus-sample 749 | # access: proxy 750 | # basicAuth: true 751 | # basicAuthPassword: pass 752 | # basicAuthUser: daco 753 | # editable: false 754 | # jsonData: 755 | # tlsSkipVerify: true 756 | # orgId: 1 757 | # type: prometheus 758 | # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090 759 | # version: 1 760 | 761 | ## Passed to grafana subchart and used by servicemonitor below 762 | ## 763 | service: 764 | portName: service 765 | 766 | ## If true, create a serviceMonitor for grafana 767 | ## 768 | serviceMonitor: 769 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 770 | ## 771 | interval: "" 772 | selfMonitor: true 773 | 774 | # Path to use for scraping metrics. Might be different if server.root_url is set 775 | # in grafana.ini 776 | path: "/metrics" 777 | 778 | 779 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 780 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 781 | ## 782 | metricRelabelings: [] 783 | # - action: keep 784 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 785 | # sourceLabels: [__name__] 786 | 787 | ## RelabelConfigs to apply to samples before scraping 788 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 789 | ## 790 | relabelings: [] 791 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 792 | # separator: ; 793 | # regex: ^(.*)$ 794 | # targetLabel: nodename 795 | # replacement: $1 796 | # action: replace 797 | 798 | ## Component scraping the kube api server 799 | ## 800 | kubeApiServer: 801 | enabled: true 802 | tlsConfig: 803 | serverName: kubernetes 804 | insecureSkipVerify: false 805 | serviceMonitor: 806 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 807 | ## 808 | interval: "" 809 | ## proxyUrl: URL of a proxy that should be used for scraping. 810 | ## 811 | proxyUrl: "" 812 | 813 | jobLabel: component 814 | selector: 815 | matchLabels: 816 | component: apiserver 817 | provider: kubernetes 818 | 819 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 820 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 821 | ## 822 | metricRelabelings: [] 823 | # - action: keep 824 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 825 | # sourceLabels: [__name__] 826 | 827 | ## RelabelConfigs to apply to samples before scraping 828 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 829 | ## 830 | relabelings: [] 831 | # - sourceLabels: 832 | # - __meta_kubernetes_namespace 833 | # - __meta_kubernetes_service_name 834 | # - __meta_kubernetes_endpoint_port_name 835 | # action: keep 836 | # regex: default;kubernetes;https 837 | # - targetLabel: __address__ 838 | # replacement: kubernetes.default.svc:443 839 | 840 | ## Component scraping the kubelet and kubelet-hosted cAdvisor 841 | ## 842 | kubelet: 843 | enabled: true 844 | namespace: kube-system 845 | 846 | serviceMonitor: 847 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 848 | ## 849 | interval: "" 850 | 851 | ## proxyUrl: URL of a proxy that should be used for scraping. 852 | ## 853 | proxyUrl: "" 854 | 855 | ## Enable scraping the kubelet over https. For requirements to enable this see 856 | ## https://github.com/prometheus-operator/prometheus-operator/issues/926 857 | ## 858 | https: true 859 | 860 | ## Enable scraping /metrics/cadvisor from kubelet's service 861 | ## 862 | cAdvisor: true 863 | 864 | ## Enable scraping /metrics/probes from kubelet's service 865 | ## 866 | probes: true 867 | 868 | ## Enable scraping /metrics/resource from kubelet's service 869 | ## This is disabled by default because container metrics are already exposed by cAdvisor 870 | ## 871 | resource: false 872 | # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource 873 | resourcePath: "/metrics/resource/v1alpha1" 874 | 875 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 876 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 877 | ## 878 | cAdvisorMetricRelabelings: [] 879 | # - sourceLabels: [__name__, image] 880 | # separator: ; 881 | # regex: container_([a-z_]+); 882 | # replacement: $1 883 | # action: drop 884 | # - sourceLabels: [__name__] 885 | # separator: ; 886 | # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) 887 | # replacement: $1 888 | # action: drop 889 | 890 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 891 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 892 | ## 893 | probesMetricRelabelings: [] 894 | # - sourceLabels: [__name__, image] 895 | # separator: ; 896 | # regex: container_([a-z_]+); 897 | # replacement: $1 898 | # action: drop 899 | # - sourceLabels: [__name__] 900 | # separator: ; 901 | # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) 902 | # replacement: $1 903 | # action: drop 904 | 905 | ## RelabelConfigs to apply to samples before scraping 906 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 907 | ## 908 | ## metrics_path is required to match upstream rules and charts 909 | cAdvisorRelabelings: 910 | - sourceLabels: [__metrics_path__] 911 | targetLabel: metrics_path 912 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 913 | # separator: ; 914 | # regex: ^(.*)$ 915 | # targetLabel: nodename 916 | # replacement: $1 917 | # action: replace 918 | 919 | ## RelabelConfigs to apply to samples before scraping 920 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 921 | ## 922 | probesRelabelings: 923 | - sourceLabels: [__metrics_path__] 924 | targetLabel: metrics_path 925 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 926 | # separator: ; 927 | # regex: ^(.*)$ 928 | # targetLabel: nodename 929 | # replacement: $1 930 | # action: replace 931 | 932 | ## RelabelConfigs to apply to samples before scraping 933 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 934 | ## 935 | resourceRelabelings: 936 | - sourceLabels: [__metrics_path__] 937 | targetLabel: metrics_path 938 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 939 | # separator: ; 940 | # regex: ^(.*)$ 941 | # targetLabel: nodename 942 | # replacement: $1 943 | # action: replace 944 | 945 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 946 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 947 | ## 948 | metricRelabelings: [] 949 | # - sourceLabels: [__name__, image] 950 | # separator: ; 951 | # regex: container_([a-z_]+); 952 | # replacement: $1 953 | # action: drop 954 | # - sourceLabels: [__name__] 955 | # separator: ; 956 | # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) 957 | # replacement: $1 958 | # action: drop 959 | 960 | ## RelabelConfigs to apply to samples before scraping 961 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 962 | ## 963 | ## metrics_path is required to match upstream rules and charts 964 | relabelings: 965 | - sourceLabels: [__metrics_path__] 966 | targetLabel: metrics_path 967 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 968 | # separator: ; 969 | # regex: ^(.*)$ 970 | # targetLabel: nodename 971 | # replacement: $1 972 | # action: replace 973 | 974 | ## Component scraping the kube controller manager 975 | ## 976 | kubeControllerManager: 977 | enabled: true 978 | 979 | ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on 980 | ## 981 | endpoints: [] 982 | # - 10.141.4.22 983 | # - 10.141.4.23 984 | # - 10.141.4.24 985 | 986 | ## If using kubeControllerManager.endpoints only the port and targetPort are used 987 | ## 988 | service: 989 | enabled: true 990 | port: 10252 991 | targetPort: 10252 992 | # selector: 993 | # component: kube-controller-manager 994 | 995 | serviceMonitor: 996 | enabled: true 997 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 998 | ## 999 | interval: "" 1000 | 1001 | ## proxyUrl: URL of a proxy that should be used for scraping. 1002 | ## 1003 | proxyUrl: "" 1004 | 1005 | ## Enable scraping kube-controller-manager over https. 1006 | ## Requires proper certs (not self-signed) and delegated authentication/authorization checks 1007 | ## 1008 | https: false 1009 | 1010 | # Skip TLS certificate validation when scraping 1011 | insecureSkipVerify: null 1012 | 1013 | # Name of the server to use when validating TLS certificate 1014 | serverName: null 1015 | 1016 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1017 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1018 | ## 1019 | metricRelabelings: [] 1020 | # - action: keep 1021 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1022 | # sourceLabels: [__name__] 1023 | 1024 | ## RelabelConfigs to apply to samples before scraping 1025 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1026 | ## 1027 | relabelings: [] 1028 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1029 | # separator: ; 1030 | # regex: ^(.*)$ 1031 | # targetLabel: nodename 1032 | # replacement: $1 1033 | # action: replace 1034 | 1035 | ## Component scraping coreDns. Use either this or kubeDns 1036 | ## 1037 | coreDns: 1038 | enabled: true 1039 | service: 1040 | port: 9153 1041 | targetPort: 9153 1042 | # selector: 1043 | # k8s-app: kube-dns 1044 | serviceMonitor: 1045 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 1046 | ## 1047 | interval: "" 1048 | 1049 | ## proxyUrl: URL of a proxy that should be used for scraping. 1050 | ## 1051 | proxyUrl: "" 1052 | 1053 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1054 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1055 | ## 1056 | metricRelabelings: [] 1057 | # - action: keep 1058 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1059 | # sourceLabels: [__name__] 1060 | 1061 | ## RelabelConfigs to apply to samples before scraping 1062 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1063 | ## 1064 | relabelings: [] 1065 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1066 | # separator: ; 1067 | # regex: ^(.*)$ 1068 | # targetLabel: nodename 1069 | # replacement: $1 1070 | # action: replace 1071 | 1072 | ## Component scraping kubeDns. Use either this or coreDns 1073 | ## 1074 | kubeDns: 1075 | enabled: false 1076 | service: 1077 | dnsmasq: 1078 | port: 10054 1079 | targetPort: 10054 1080 | skydns: 1081 | port: 10055 1082 | targetPort: 10055 1083 | # selector: 1084 | # k8s-app: kube-dns 1085 | serviceMonitor: 1086 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 1087 | ## 1088 | interval: "" 1089 | 1090 | ## proxyUrl: URL of a proxy that should be used for scraping. 1091 | ## 1092 | proxyUrl: "" 1093 | 1094 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1095 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1096 | ## 1097 | metricRelabelings: [] 1098 | # - action: keep 1099 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1100 | # sourceLabels: [__name__] 1101 | 1102 | ## RelabelConfigs to apply to samples before scraping 1103 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1104 | ## 1105 | relabelings: [] 1106 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1107 | # separator: ; 1108 | # regex: ^(.*)$ 1109 | # targetLabel: nodename 1110 | # replacement: $1 1111 | # action: replace 1112 | 1113 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1114 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1115 | ## 1116 | dnsmasqMetricRelabelings: [] 1117 | # - action: keep 1118 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1119 | # sourceLabels: [__name__] 1120 | 1121 | ## RelabelConfigs to apply to samples before scraping 1122 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1123 | ## 1124 | dnsmasqRelabelings: [] 1125 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1126 | # separator: ; 1127 | # regex: ^(.*)$ 1128 | # targetLabel: nodename 1129 | # replacement: $1 1130 | # action: replace 1131 | 1132 | ## Component scraping etcd 1133 | ## 1134 | kubeEtcd: 1135 | enabled: true 1136 | 1137 | ## If your etcd is not deployed as a pod, specify IPs it can be found on 1138 | ## 1139 | endpoints: [] 1140 | # - 10.141.4.22 1141 | # - 10.141.4.23 1142 | # - 10.141.4.24 1143 | 1144 | ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used 1145 | ## 1146 | service: 1147 | enabled: true 1148 | port: 2379 1149 | targetPort: 2379 1150 | # selector: 1151 | # component: etcd 1152 | 1153 | ## Configure secure access to the etcd cluster by loading a secret into prometheus and 1154 | ## specifying security configuration below. For example, with a secret named etcd-client-cert 1155 | ## 1156 | ## serviceMonitor: 1157 | ## scheme: https 1158 | ## insecureSkipVerify: false 1159 | ## serverName: localhost 1160 | ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca 1161 | ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client 1162 | ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key 1163 | ## 1164 | serviceMonitor: 1165 | enabled: true 1166 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 1167 | ## 1168 | interval: "" 1169 | ## proxyUrl: URL of a proxy that should be used for scraping. 1170 | ## 1171 | proxyUrl: "" 1172 | scheme: http 1173 | insecureSkipVerify: false 1174 | serverName: "" 1175 | caFile: "" 1176 | certFile: "" 1177 | keyFile: "" 1178 | 1179 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1180 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1181 | ## 1182 | metricRelabelings: [] 1183 | # - action: keep 1184 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1185 | # sourceLabels: [__name__] 1186 | 1187 | ## RelabelConfigs to apply to samples before scraping 1188 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1189 | ## 1190 | relabelings: [] 1191 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1192 | # separator: ; 1193 | # regex: ^(.*)$ 1194 | # targetLabel: nodename 1195 | # replacement: $1 1196 | # action: replace 1197 | 1198 | 1199 | ## Component scraping kube scheduler 1200 | ## 1201 | kubeScheduler: 1202 | enabled: true 1203 | 1204 | ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on 1205 | ## 1206 | endpoints: [] 1207 | # - 10.141.4.22 1208 | # - 10.141.4.23 1209 | # - 10.141.4.24 1210 | 1211 | ## If using kubeScheduler.endpoints only the port and targetPort are used 1212 | ## 1213 | service: 1214 | enabled: true 1215 | port: 10251 1216 | targetPort: 10251 1217 | # selector: 1218 | # component: kube-scheduler 1219 | 1220 | serviceMonitor: 1221 | enabled: true 1222 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 1223 | ## 1224 | interval: "" 1225 | ## proxyUrl: URL of a proxy that should be used for scraping. 1226 | ## 1227 | proxyUrl: "" 1228 | ## Enable scraping kube-scheduler over https. 1229 | ## Requires proper certs (not self-signed) and delegated authentication/authorization checks 1230 | ## 1231 | https: false 1232 | 1233 | ## Skip TLS certificate validation when scraping 1234 | insecureSkipVerify: null 1235 | 1236 | ## Name of the server to use when validating TLS certificate 1237 | serverName: null 1238 | 1239 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1240 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1241 | ## 1242 | metricRelabelings: [] 1243 | # - action: keep 1244 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1245 | # sourceLabels: [__name__] 1246 | 1247 | ## RelabelConfigs to apply to samples before scraping 1248 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1249 | ## 1250 | relabelings: [] 1251 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1252 | # separator: ; 1253 | # regex: ^(.*)$ 1254 | # targetLabel: nodename 1255 | # replacement: $1 1256 | # action: replace 1257 | 1258 | 1259 | ## Component scraping kube proxy 1260 | ## 1261 | kubeProxy: 1262 | enabled: true 1263 | 1264 | ## If your kube proxy is not deployed as a pod, specify IPs it can be found on 1265 | ## 1266 | endpoints: [] 1267 | # - 10.141.4.22 1268 | # - 10.141.4.23 1269 | # - 10.141.4.24 1270 | 1271 | service: 1272 | enabled: true 1273 | port: 10249 1274 | targetPort: 10249 1275 | # selector: 1276 | # k8s-app: kube-proxy 1277 | 1278 | serviceMonitor: 1279 | enabled: true 1280 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 1281 | ## 1282 | interval: "" 1283 | 1284 | ## proxyUrl: URL of a proxy that should be used for scraping. 1285 | ## 1286 | proxyUrl: "" 1287 | 1288 | ## Enable scraping kube-proxy over https. 1289 | ## Requires proper certs (not self-signed) and delegated authentication/authorization checks 1290 | ## 1291 | https: false 1292 | 1293 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1294 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1295 | ## 1296 | metricRelabelings: [] 1297 | # - action: keep 1298 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1299 | # sourceLabels: [__name__] 1300 | 1301 | ## RelabelConfigs to apply to samples before scraping 1302 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1303 | ## 1304 | relabelings: [] 1305 | # - action: keep 1306 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1307 | # sourceLabels: [__name__] 1308 | 1309 | 1310 | ## Component scraping kube state metrics 1311 | ## 1312 | kubeStateMetrics: 1313 | enabled: true 1314 | serviceMonitor: 1315 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 1316 | ## 1317 | interval: "" 1318 | ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. 1319 | ## 1320 | scrapeTimeout: "" 1321 | ## proxyUrl: URL of a proxy that should be used for scraping. 1322 | ## 1323 | proxyUrl: "" 1324 | ## Override serviceMonitor selector 1325 | ## 1326 | selectorOverride: {} 1327 | 1328 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1329 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1330 | ## 1331 | metricRelabelings: [] 1332 | # - action: keep 1333 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1334 | # sourceLabels: [__name__] 1335 | 1336 | ## RelabelConfigs to apply to samples before scraping 1337 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1338 | ## 1339 | relabelings: [] 1340 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1341 | # separator: ; 1342 | # regex: ^(.*)$ 1343 | # targetLabel: nodename 1344 | # replacement: $1 1345 | # action: replace 1346 | 1347 | # Keep labels from scraped data, overriding server-side labels 1348 | honorLabels: true 1349 | 1350 | # Enable self metrics configuration for Service Monitor 1351 | selfMonitor: 1352 | enabled: false 1353 | 1354 | ## Configuration for kube-state-metrics subchart 1355 | ## 1356 | kube-state-metrics: 1357 | namespaceOverride: "monitoring" 1358 | rbac: 1359 | create: true 1360 | podSecurityPolicy: 1361 | enabled: true 1362 | 1363 | ## Deploy node exporter as a daemonset to all nodes 1364 | ## 1365 | nodeExporter: 1366 | enabled: true 1367 | 1368 | ## Use the value configured in prometheus-node-exporter.podLabels 1369 | ## 1370 | jobLabel: jobLabel 1371 | 1372 | serviceMonitor: 1373 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 1374 | ## 1375 | interval: "" 1376 | 1377 | ## proxyUrl: URL of a proxy that should be used for scraping. 1378 | ## 1379 | proxyUrl: "" 1380 | 1381 | ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. 1382 | ## 1383 | scrapeTimeout: "" 1384 | 1385 | ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. 1386 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1387 | ## 1388 | metricRelabelings: [] 1389 | # - sourceLabels: [__name__] 1390 | # separator: ; 1391 | # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ 1392 | # replacement: $1 1393 | # action: drop 1394 | 1395 | ## RelabelConfigs to apply to samples before scraping 1396 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig 1397 | ## 1398 | relabelings: [] 1399 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1400 | # separator: ; 1401 | # regex: ^(.*)$ 1402 | # targetLabel: nodename 1403 | # replacement: $1 1404 | # action: replace 1405 | 1406 | ## Configuration for prometheus-node-exporter subchart 1407 | ## 1408 | prometheus-node-exporter: 1409 | namespaceOverride: "monitoring" 1410 | podLabels: 1411 | ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards 1412 | ## 1413 | jobLabel: node-exporter 1414 | extraArgs: 1415 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) 1416 | - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 1417 | 1418 | ## Manages Prometheus and Alertmanager components 1419 | ## 1420 | prometheusOperator: 1421 | enabled: true 1422 | 1423 | ## Prometheus-Operator v0.39.0 and later support TLS natively. 1424 | ## 1425 | tls: 1426 | enabled: true 1427 | # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants 1428 | tlsMinVersion: VersionTLS13 1429 | # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. 1430 | internalPort: 10250 1431 | 1432 | ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted 1433 | ## rules from making their way into prometheus and potentially preventing the container from starting 1434 | admissionWebhooks: 1435 | failurePolicy: Fail 1436 | enabled: true 1437 | ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate. 1438 | ## If unspecified, system trust roots on the apiserver are used. 1439 | caBundle: "" 1440 | ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. 1441 | ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own 1442 | ## certs ahead of time if you wish. 1443 | ## 1444 | patch: 1445 | enabled: true 1446 | image: 1447 | repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen 1448 | tag: v1.0 1449 | sha: "f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068" 1450 | pullPolicy: IfNotPresent 1451 | resources: {} 1452 | ## Provide a priority class name to the webhook patching job 1453 | ## 1454 | priorityClassName: "" 1455 | podAnnotations: {} 1456 | nodeSelector: {} 1457 | affinity: {} 1458 | tolerations: [] 1459 | 1460 | ## SecurityContext holds pod-level security attributes and common container settings. 1461 | ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false 1462 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ 1463 | ## 1464 | securityContext: 1465 | runAsGroup: 2000 1466 | runAsNonRoot: true 1467 | runAsUser: 2000 1468 | 1469 | # Use certmanager to generate webhook certs 1470 | certManager: 1471 | enabled: false 1472 | # issuerRef: 1473 | # name: "issuer" 1474 | # kind: "ClusterIssuer" 1475 | 1476 | ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). 1477 | ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration 1478 | ## 1479 | namespaces: {} 1480 | # releaseNamespace: true 1481 | # additional: 1482 | # - kube-system 1483 | 1484 | ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). 1485 | ## 1486 | denyNamespaces: [] 1487 | 1488 | ## Filter namespaces to look for prometheus-operator custom resources 1489 | ## 1490 | alertmanagerInstanceNamespaces: [] 1491 | prometheusInstanceNamespaces: [] 1492 | thanosRulerInstanceNamespaces: [] 1493 | 1494 | ## The clusterDomain value will be added to the cluster.peer option of the alertmanager. 1495 | ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value) 1496 | ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094 1497 | ## 1498 | # clusterDomain: "cluster.local" 1499 | 1500 | ## Service account for Alertmanager to use. 1501 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ 1502 | ## 1503 | serviceAccount: 1504 | create: true 1505 | name: "" 1506 | 1507 | ## Configuration for Prometheus operator service 1508 | ## 1509 | service: 1510 | annotations: {} 1511 | labels: {} 1512 | clusterIP: "" 1513 | 1514 | ## Port to expose on each node 1515 | ## Only used if service.type is 'NodePort' 1516 | ## 1517 | nodePort: 30080 1518 | 1519 | nodePortTls: 30443 1520 | 1521 | ## Additional ports to open for Prometheus service 1522 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services 1523 | ## 1524 | additionalPorts: [] 1525 | 1526 | ## Loadbalancer IP 1527 | ## Only use if service.type is "LoadBalancer" 1528 | ## 1529 | loadBalancerIP: "" 1530 | loadBalancerSourceRanges: [] 1531 | 1532 | ## Service type 1533 | ## NodePort, ClusterIP, LoadBalancer 1534 | ## 1535 | type: ClusterIP 1536 | 1537 | ## List of IP addresses at which the Prometheus server service is available 1538 | ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips 1539 | ## 1540 | externalIPs: [] 1541 | 1542 | ## Labels to add to the operator pod 1543 | ## 1544 | podLabels: {} 1545 | 1546 | ## Annotations to add to the operator pod 1547 | ## 1548 | podAnnotations: {} 1549 | 1550 | ## Assign a PriorityClassName to pods if set 1551 | # priorityClassName: "" 1552 | 1553 | ## Define Log Format 1554 | # Use logfmt (default) or json logging 1555 | # logFormat: logfmt 1556 | 1557 | ## Decrease log verbosity to errors only 1558 | # logLevel: error 1559 | 1560 | ## If true, the operator will create and maintain a service for scraping kubelets 1561 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/helm/prometheus-operator/README.md 1562 | ## 1563 | kubeletService: 1564 | enabled: true 1565 | namespace: kube-system 1566 | ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default 1567 | name: "" 1568 | 1569 | ## Create a servicemonitor for the operator 1570 | ## 1571 | serviceMonitor: 1572 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 1573 | ## 1574 | interval: "" 1575 | ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. 1576 | scrapeTimeout: "" 1577 | selfMonitor: true 1578 | 1579 | ## Metric relabel configs to apply to samples before ingestion. 1580 | ## 1581 | metricRelabelings: [] 1582 | # - action: keep 1583 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 1584 | # sourceLabels: [__name__] 1585 | 1586 | # relabel configs to apply to samples before ingestion. 1587 | ## 1588 | relabelings: [] 1589 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 1590 | # separator: ; 1591 | # regex: ^(.*)$ 1592 | # targetLabel: nodename 1593 | # replacement: $1 1594 | # action: replace 1595 | 1596 | ## Resource limits & requests 1597 | ## 1598 | resources: {} 1599 | # limits: 1600 | # cpu: 200m 1601 | # memory: 200Mi 1602 | # requests: 1603 | # cpu: 100m 1604 | # memory: 100Mi 1605 | 1606 | # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), 1607 | # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working 1608 | ## 1609 | hostNetwork: false 1610 | 1611 | ## Define which Nodes the Pods are scheduled on. 1612 | ## ref: https://kubernetes.io/docs/user-guide/node-selection/ 1613 | ## 1614 | nodeSelector: {} 1615 | 1616 | ## Tolerations for use with node taints 1617 | ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ 1618 | ## 1619 | tolerations: [] 1620 | # - key: "key" 1621 | # operator: "Equal" 1622 | # value: "value" 1623 | # effect: "NoSchedule" 1624 | 1625 | ## Assign custom affinity rules to the prometheus operator 1626 | ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 1627 | ## 1628 | affinity: {} 1629 | # nodeAffinity: 1630 | # requiredDuringSchedulingIgnoredDuringExecution: 1631 | # nodeSelectorTerms: 1632 | # - matchExpressions: 1633 | # - key: kubernetes.io/e2e-az-name 1634 | # operator: In 1635 | # values: 1636 | # - e2e-az1 1637 | # - e2e-az2 1638 | dnsConfig: {} 1639 | # nameservers: 1640 | # - 1.2.3.4 1641 | # searches: 1642 | # - ns1.svc.cluster-domain.example 1643 | # - my.dns.search.suffix 1644 | # options: 1645 | # - name: ndots 1646 | # value: "2" 1647 | # - name: edns0 1648 | securityContext: 1649 | fsGroup: 65534 1650 | runAsGroup: 65534 1651 | runAsNonRoot: true 1652 | runAsUser: 65534 1653 | 1654 | ## Prometheus-operator image 1655 | ## 1656 | image: 1657 | repository: quay.io/prometheus-operator/prometheus-operator 1658 | tag: v0.52.0 1659 | sha: "" 1660 | pullPolicy: IfNotPresent 1661 | 1662 | ## Prometheus image to use for prometheuses managed by the operator 1663 | ## 1664 | # prometheusDefaultBaseImage: quay.io/prometheus/prometheus 1665 | 1666 | ## Alertmanager image to use for alertmanagers managed by the operator 1667 | ## 1668 | # alertmanagerDefaultBaseImage: quay.io/prometheus/alertmanager 1669 | 1670 | ## Prometheus-config-reloader image to use for config and rule reloading 1671 | ## 1672 | prometheusConfigReloaderImage: 1673 | repository: quay.io/prometheus-operator/prometheus-config-reloader 1674 | tag: v0.52.0 1675 | sha: "" 1676 | 1677 | ## Set the prometheus config reloader side-car CPU limit 1678 | ## 1679 | configReloaderCpu: 100m 1680 | 1681 | ## Set the prometheus config reloader side-car memory limit 1682 | ## 1683 | configReloaderMemory: 50Mi 1684 | 1685 | ## Thanos side-car image when configured 1686 | ## 1687 | thanosImage: 1688 | repository: quay.io/thanos/thanos 1689 | tag: v0.17.2 1690 | sha: "" 1691 | 1692 | ## Set a Field Selector to filter watched secrets 1693 | ## 1694 | secretFieldSelector: "" 1695 | 1696 | ## Deploy a Prometheus instance 1697 | ## 1698 | prometheus: 1699 | 1700 | enabled: true 1701 | 1702 | ## Annotations for Prometheus 1703 | ## 1704 | annotations: {} 1705 | 1706 | ## Service account for Prometheuses to use. 1707 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ 1708 | ## 1709 | serviceAccount: 1710 | create: true 1711 | name: "" 1712 | annotations: {} 1713 | 1714 | # Service for thanos service discovery on sidecar 1715 | # Enable this can make Thanos Query can use 1716 | # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery 1717 | # Thanos sidecar on prometheus nodes 1718 | # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!) 1719 | thanosService: 1720 | enabled: false 1721 | annotations: {} 1722 | labels: {} 1723 | 1724 | ## Service type 1725 | ## 1726 | type: ClusterIP 1727 | 1728 | ## gRPC port config 1729 | portName: grpc 1730 | port: 10901 1731 | targetPort: "grpc" 1732 | 1733 | ## HTTP port config (for metrics) 1734 | httpPortName: http 1735 | httpPort: 10902 1736 | targetHttpPort: "http" 1737 | 1738 | ## ClusterIP to assign 1739 | # Default is to make this a headless service ("None") 1740 | clusterIP: "None" 1741 | 1742 | ## Port to expose on each node, if service type is NodePort 1743 | ## 1744 | nodePort: 30901 1745 | httpNodePort: 30902 1746 | 1747 | # ServiceMonitor to scrape Sidecar metrics 1748 | # Needs thanosService to be enabled as well 1749 | thanosServiceMonitor: 1750 | enabled: false 1751 | interval: "" 1752 | 1753 | ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. 1754 | scheme: "" 1755 | 1756 | ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. 1757 | ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig 1758 | tlsConfig: {} 1759 | 1760 | bearerTokenFile: 1761 | 1762 | ## Metric relabel configs to apply to samples before ingestion. 1763 | metricRelabelings: [] 1764 | 1765 | ## relabel configs to apply to samples before ingestion. 1766 | relabelings: [] 1767 | 1768 | # Service for external access to sidecar 1769 | # Enabling this creates a service to expose thanos-sidecar outside the cluster. 1770 | thanosServiceExternal: 1771 | enabled: false 1772 | annotations: {} 1773 | labels: {} 1774 | loadBalancerIP: "" 1775 | loadBalancerSourceRanges: [] 1776 | 1777 | ## gRPC port config 1778 | portName: grpc 1779 | port: 10901 1780 | targetPort: "grpc" 1781 | 1782 | ## HTTP port config (for metrics) 1783 | httpPortName: http 1784 | httpPort: 10902 1785 | targetHttpPort: "http" 1786 | 1787 | ## Service type 1788 | ## 1789 | type: LoadBalancer 1790 | 1791 | ## Port to expose on each node 1792 | ## 1793 | nodePort: 30901 1794 | httpNodePort: 30902 1795 | 1796 | ## Configuration for Prometheus service 1797 | ## 1798 | service: 1799 | annotations: {} 1800 | labels: {} 1801 | clusterIP: "" 1802 | 1803 | ## Port for Prometheus Service to listen on 1804 | ## 1805 | port: 9090 1806 | 1807 | ## To be used with a proxy extraContainer port 1808 | targetPort: 9090 1809 | 1810 | ## List of IP addresses at which the Prometheus server service is available 1811 | ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips 1812 | ## 1813 | externalIPs: [] 1814 | 1815 | ## Port to expose on each node 1816 | ## Only used if service.type is 'NodePort' 1817 | ## 1818 | nodePort: 30090 1819 | 1820 | ## Loadbalancer IP 1821 | ## Only use if service.type is "LoadBalancer" 1822 | loadBalancerIP: "" 1823 | loadBalancerSourceRanges: [] 1824 | ## Service type 1825 | ## 1826 | type: ClusterIP 1827 | 1828 | sessionAffinity: "" 1829 | 1830 | ## Configuration for creating a separate Service for each statefulset Prometheus replica 1831 | ## 1832 | servicePerReplica: 1833 | enabled: false 1834 | annotations: {} 1835 | 1836 | ## Port for Prometheus Service per replica to listen on 1837 | ## 1838 | port: 9090 1839 | 1840 | ## To be used with a proxy extraContainer port 1841 | targetPort: 9090 1842 | 1843 | ## Port to expose on each node 1844 | ## Only used if servicePerReplica.type is 'NodePort' 1845 | ## 1846 | nodePort: 30091 1847 | 1848 | ## Loadbalancer source IP ranges 1849 | ## Only used if servicePerReplica.type is "LoadBalancer" 1850 | loadBalancerSourceRanges: [] 1851 | ## Service type 1852 | ## 1853 | type: ClusterIP 1854 | 1855 | ## Configure pod disruption budgets for Prometheus 1856 | ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget 1857 | ## This configuration is immutable once created and will require the PDB to be deleted to be changed 1858 | ## https://github.com/kubernetes/kubernetes/issues/45398 1859 | ## 1860 | podDisruptionBudget: 1861 | enabled: false 1862 | minAvailable: 1 1863 | maxUnavailable: "" 1864 | 1865 | # Ingress exposes thanos sidecar outside the cluster 1866 | thanosIngress: 1867 | enabled: false 1868 | 1869 | # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName 1870 | # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress 1871 | # ingressClassName: nginx 1872 | 1873 | annotations: {} 1874 | labels: {} 1875 | servicePort: 10901 1876 | 1877 | ## Port to expose on each node 1878 | ## Only used if service.type is 'NodePort' 1879 | ## 1880 | nodePort: 30901 1881 | 1882 | ## Hosts must be provided if Ingress is enabled. 1883 | ## 1884 | hosts: [] 1885 | # - thanos-gateway.domain.com 1886 | 1887 | ## Paths to use for ingress rules 1888 | ## 1889 | paths: [] 1890 | # - / 1891 | 1892 | ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) 1893 | ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types 1894 | # pathType: ImplementationSpecific 1895 | 1896 | ## TLS configuration for Thanos Ingress 1897 | ## Secret must be manually created in the namespace 1898 | ## 1899 | tls: [] 1900 | # - secretName: thanos-gateway-tls 1901 | # hosts: 1902 | # - thanos-gateway.domain.com 1903 | # 1904 | 1905 | ## ExtraSecret can be used to store various data in an extra secret 1906 | ## (use it for example to store hashed basic auth credentials) 1907 | extraSecret: 1908 | ## if not set, name will be auto generated 1909 | # name: "" 1910 | annotations: {} 1911 | data: {} 1912 | # auth: | 1913 | # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 1914 | # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. 1915 | 1916 | ingress: 1917 | enabled: false 1918 | 1919 | # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName 1920 | # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress 1921 | # ingressClassName: nginx 1922 | 1923 | annotations: {} 1924 | labels: {} 1925 | 1926 | ## Hostnames. 1927 | ## Must be provided if Ingress is enabled. 1928 | ## 1929 | # hosts: 1930 | # - prometheus.domain.com 1931 | hosts: [] 1932 | 1933 | ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix 1934 | ## 1935 | paths: [] 1936 | # - / 1937 | 1938 | ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) 1939 | ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types 1940 | # pathType: ImplementationSpecific 1941 | 1942 | ## TLS configuration for Prometheus Ingress 1943 | ## Secret must be manually created in the namespace 1944 | ## 1945 | tls: [] 1946 | # - secretName: prometheus-general-tls 1947 | # hosts: 1948 | # - prometheus.example.com 1949 | 1950 | ## Configuration for creating an Ingress that will map to each Prometheus replica service 1951 | ## prometheus.servicePerReplica must be enabled 1952 | ## 1953 | ingressPerReplica: 1954 | enabled: false 1955 | 1956 | # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName 1957 | # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress 1958 | # ingressClassName: nginx 1959 | 1960 | annotations: {} 1961 | labels: {} 1962 | 1963 | ## Final form of the hostname for each per replica ingress is 1964 | ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} 1965 | ## 1966 | ## Prefix for the per replica ingress that will have `-$replicaNumber` 1967 | ## appended to the end 1968 | hostPrefix: "" 1969 | ## Domain that will be used for the per replica ingress 1970 | hostDomain: "" 1971 | 1972 | ## Paths to use for ingress rules 1973 | ## 1974 | paths: [] 1975 | # - / 1976 | 1977 | ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) 1978 | ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types 1979 | # pathType: ImplementationSpecific 1980 | 1981 | ## Secret name containing the TLS certificate for Prometheus per replica ingress 1982 | ## Secret must be manually created in the namespace 1983 | tlsSecretName: "" 1984 | 1985 | ## Separated secret for each per replica Ingress. Can be used together with cert-manager 1986 | ## 1987 | tlsSecretPerReplica: 1988 | enabled: false 1989 | ## Final form of the secret for each per replica ingress is 1990 | ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} 1991 | ## 1992 | prefix: "prometheus" 1993 | 1994 | ## Configure additional options for default pod security policy for Prometheus 1995 | ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ 1996 | podSecurityPolicy: 1997 | allowedCapabilities: [] 1998 | allowedHostPaths: [] 1999 | volumes: [] 2000 | 2001 | serviceMonitor: 2002 | ## Scrape interval. If not set, the Prometheus default scrape interval is used. 2003 | ## 2004 | interval: "" 2005 | selfMonitor: true 2006 | 2007 | ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. 2008 | scheme: "" 2009 | 2010 | ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. 2011 | ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig 2012 | tlsConfig: {} 2013 | 2014 | bearerTokenFile: 2015 | 2016 | ## Metric relabel configs to apply to samples before ingestion. 2017 | ## 2018 | metricRelabelings: [] 2019 | # - action: keep 2020 | # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' 2021 | # sourceLabels: [__name__] 2022 | 2023 | # relabel configs to apply to samples before ingestion. 2024 | ## 2025 | relabelings: [] 2026 | # - sourceLabels: [__meta_kubernetes_pod_node_name] 2027 | # separator: ; 2028 | # regex: ^(.*)$ 2029 | # targetLabel: nodename 2030 | # replacement: $1 2031 | # action: replace 2032 | 2033 | ## Settings affecting prometheusSpec 2034 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusspec 2035 | ## 2036 | prometheusSpec: 2037 | ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos 2038 | ## 2039 | disableCompaction: false 2040 | ## APIServerConfig 2041 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#apiserverconfig 2042 | ## 2043 | apiserverConfig: {} 2044 | 2045 | ## Interval between consecutive scrapes. 2046 | ## Defaults to 30s. 2047 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 2048 | ## 2049 | scrapeInterval: "" 2050 | 2051 | ## Number of seconds to wait for target to respond before erroring 2052 | ## 2053 | scrapeTimeout: "" 2054 | 2055 | ## Interval between consecutive evaluations. 2056 | ## 2057 | evaluationInterval: "" 2058 | 2059 | ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. 2060 | ## 2061 | listenLocal: false 2062 | 2063 | ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. 2064 | ## This is disabled by default. 2065 | ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis 2066 | ## 2067 | enableAdminAPI: false 2068 | 2069 | ## WebTLSConfig defines the TLS parameters for HTTPS 2070 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#webtlsconfig 2071 | web: {} 2072 | 2073 | # EnableFeatures API enables access to Prometheus disabled features. 2074 | # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/ 2075 | enableFeatures: [] 2076 | # - exemplar-storage 2077 | 2078 | ## Image of Prometheus. 2079 | ## 2080 | image: 2081 | repository: quay.io/prometheus/prometheus 2082 | tag: v2.28.1 2083 | sha: "" 2084 | 2085 | ## Tolerations for use with node taints 2086 | ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ 2087 | ## 2088 | tolerations: [] 2089 | # - key: "key" 2090 | # operator: "Equal" 2091 | # value: "value" 2092 | # effect: "NoSchedule" 2093 | 2094 | ## If specified, the pod's topology spread constraints. 2095 | ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ 2096 | ## 2097 | topologySpreadConstraints: [] 2098 | # - maxSkew: 1 2099 | # topologyKey: topology.kubernetes.io/zone 2100 | # whenUnsatisfiable: DoNotSchedule 2101 | # labelSelector: 2102 | # matchLabels: 2103 | # app: prometheus 2104 | 2105 | ## Alertmanagers to which alerts will be sent 2106 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints 2107 | ## 2108 | ## Default configuration will connect to the alertmanager deployed as part of this release 2109 | ## 2110 | alertingEndpoints: [] 2111 | # - name: "" 2112 | # namespace: "" 2113 | # port: http 2114 | # scheme: http 2115 | # pathPrefix: "" 2116 | # tlsConfig: {} 2117 | # bearerTokenFile: "" 2118 | # apiVersion: v2 2119 | 2120 | ## External labels to add to any time series or alerts when communicating with external systems 2121 | ## 2122 | externalLabels: {} 2123 | 2124 | ## Name of the external label used to denote replica name 2125 | ## 2126 | replicaExternalLabelName: "" 2127 | 2128 | ## If true, the Operator won't add the external label used to denote replica name 2129 | ## 2130 | replicaExternalLabelNameClear: false 2131 | 2132 | ## Name of the external label used to denote Prometheus instance name 2133 | ## 2134 | prometheusExternalLabelName: "" 2135 | 2136 | ## If true, the Operator won't add the external label used to denote Prometheus instance name 2137 | ## 2138 | prometheusExternalLabelNameClear: false 2139 | 2140 | ## External URL at which Prometheus will be reachable. 2141 | ## 2142 | externalUrl: "" 2143 | 2144 | ## Define which Nodes the Pods are scheduled on. 2145 | ## ref: https://kubernetes.io/docs/user-guide/node-selection/ 2146 | ## 2147 | nodeSelector: {} 2148 | 2149 | ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. 2150 | ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not 2151 | ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated 2152 | ## with the new list of secrets. 2153 | ## 2154 | secrets: [] 2155 | 2156 | ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. 2157 | ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. 2158 | ## 2159 | configMaps: [] 2160 | 2161 | ## QuerySpec defines the query command line flags when starting Prometheus. 2162 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#queryspec 2163 | ## 2164 | query: {} 2165 | 2166 | ## Namespaces to be selected for PrometheusRules discovery. 2167 | ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. 2168 | ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage 2169 | ## 2170 | ruleNamespaceSelector: {} 2171 | 2172 | ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the 2173 | ## prometheus resource to be created with selectors based on values in the helm deployment, 2174 | ## which will also match the PrometheusRule resources created 2175 | ## 2176 | ruleSelectorNilUsesHelmValues: true 2177 | 2178 | ## PrometheusRules to be selected for target discovery. 2179 | ## If {}, select all PrometheusRules 2180 | ## 2181 | ruleSelector: {} 2182 | ## Example which select all PrometheusRules resources 2183 | ## with label "prometheus" with values any of "example-rules" or "example-rules-2" 2184 | # ruleSelector: 2185 | # matchExpressions: 2186 | # - key: prometheus 2187 | # operator: In 2188 | # values: 2189 | # - example-rules 2190 | # - example-rules-2 2191 | # 2192 | ## Example which select all PrometheusRules resources with label "role" set to "example-rules" 2193 | # ruleSelector: 2194 | # matchLabels: 2195 | # role: example-rules 2196 | 2197 | ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the 2198 | ## prometheus resource to be created with selectors based on values in the helm deployment, 2199 | ## which will also match the servicemonitors created 2200 | ## 2201 | serviceMonitorSelectorNilUsesHelmValues: true 2202 | 2203 | ## ServiceMonitors to be selected for target discovery. 2204 | ## If {}, select all ServiceMonitors 2205 | ## 2206 | serviceMonitorSelector: {} 2207 | ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" 2208 | # serviceMonitorSelector: 2209 | # matchLabels: 2210 | # prometheus: somelabel 2211 | 2212 | ## Namespaces to be selected for ServiceMonitor discovery. 2213 | ## 2214 | serviceMonitorNamespaceSelector: {} 2215 | ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel" 2216 | # serviceMonitorNamespaceSelector: 2217 | # matchLabels: 2218 | # prometheus: somelabel 2219 | 2220 | ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the 2221 | ## prometheus resource to be created with selectors based on values in the helm deployment, 2222 | ## which will also match the podmonitors created 2223 | ## 2224 | podMonitorSelectorNilUsesHelmValues: true 2225 | 2226 | ## PodMonitors to be selected for target discovery. 2227 | ## If {}, select all PodMonitors 2228 | ## 2229 | podMonitorSelector: {} 2230 | ## Example which selects PodMonitors with label "prometheus" set to "somelabel" 2231 | # podMonitorSelector: 2232 | # matchLabels: 2233 | # prometheus: somelabel 2234 | 2235 | ## Namespaces to be selected for PodMonitor discovery. 2236 | ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage 2237 | ## 2238 | podMonitorNamespaceSelector: {} 2239 | 2240 | ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the 2241 | ## prometheus resource to be created with selectors based on values in the helm deployment, 2242 | ## which will also match the probes created 2243 | ## 2244 | probeSelectorNilUsesHelmValues: true 2245 | 2246 | ## Probes to be selected for target discovery. 2247 | ## If {}, select all Probes 2248 | ## 2249 | probeSelector: {} 2250 | ## Example which selects Probes with label "prometheus" set to "somelabel" 2251 | # probeSelector: 2252 | # matchLabels: 2253 | # prometheus: somelabel 2254 | 2255 | ## Namespaces to be selected for Probe discovery. 2256 | ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage 2257 | ## 2258 | probeNamespaceSelector: {} 2259 | 2260 | ## How long to retain metrics 2261 | ## 2262 | retention: 10d 2263 | 2264 | ## Maximum size of metrics 2265 | ## 2266 | retentionSize: "" 2267 | 2268 | ## Enable compression of the write-ahead log using Snappy. 2269 | ## 2270 | walCompression: false 2271 | 2272 | ## If true, the Operator won't process any Prometheus configuration changes 2273 | ## 2274 | paused: false 2275 | 2276 | ## Number of replicas of each shard to deploy for a Prometheus deployment. 2277 | ## Number of replicas multiplied by shards is the total number of Pods created. 2278 | ## 2279 | replicas: 1 2280 | 2281 | ## EXPERIMENTAL: Number of shards to distribute targets onto. 2282 | ## Number of replicas multiplied by shards is the total number of Pods created. 2283 | ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. 2284 | ## Increasing shards will not reshard data either but it will continue to be available from the same instances. 2285 | ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. 2286 | ## Sharding is done on the content of the `__address__` target meta-label. 2287 | ## 2288 | shards: 1 2289 | 2290 | ## Log level for Prometheus be configured in 2291 | ## 2292 | logLevel: info 2293 | 2294 | ## Log format for Prometheus be configured in 2295 | ## 2296 | logFormat: logfmt 2297 | 2298 | ## Prefix used to register routes, overriding externalUrl route. 2299 | ## Useful for proxies that rewrite URLs. 2300 | ## 2301 | routePrefix: / 2302 | 2303 | ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata 2304 | ## Metadata Labels and Annotations gets propagated to the prometheus pods. 2305 | ## 2306 | podMetadata: {} 2307 | # labels: 2308 | # app: prometheus 2309 | # k8s-app: prometheus 2310 | 2311 | ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. 2312 | ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. 2313 | ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. 2314 | ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. 2315 | podAntiAffinity: "" 2316 | 2317 | ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. 2318 | ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone 2319 | ## 2320 | podAntiAffinityTopologyKey: kubernetes.io/hostname 2321 | 2322 | ## Assign custom affinity rules to the prometheus instance 2323 | ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ 2324 | ## 2325 | affinity: {} 2326 | # nodeAffinity: 2327 | # requiredDuringSchedulingIgnoredDuringExecution: 2328 | # nodeSelectorTerms: 2329 | # - matchExpressions: 2330 | # - key: kubernetes.io/e2e-az-name 2331 | # operator: In 2332 | # values: 2333 | # - e2e-az1 2334 | # - e2e-az2 2335 | 2336 | ## The remote_read spec configuration for Prometheus. 2337 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec 2338 | remoteRead: [] 2339 | # - url: http://remote1/read 2340 | ## additionalRemoteRead is appended to remoteRead 2341 | additionalRemoteRead: [] 2342 | 2343 | ## The remote_write spec configuration for Prometheus. 2344 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec 2345 | remoteWrite: [] 2346 | # - url: http://remote1/push 2347 | ## additionalRemoteWrite is appended to remoteWrite 2348 | additionalRemoteWrite: [] 2349 | 2350 | ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature 2351 | remoteWriteDashboards: false 2352 | 2353 | ## Resource limits & requests 2354 | ## 2355 | resources: {} 2356 | # requests: 2357 | # memory: 400Mi 2358 | 2359 | ## Prometheus StorageSpec for persistent data 2360 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md 2361 | ## 2362 | storageSpec: {} 2363 | ## Using PersistentVolumeClaim 2364 | ## 2365 | # volumeClaimTemplate: 2366 | # spec: 2367 | # storageClassName: gluster 2368 | # accessModes: ["ReadWriteOnce"] 2369 | # resources: 2370 | # requests: 2371 | # storage: 50Gi 2372 | # selector: {} 2373 | 2374 | ## Using tmpfs volume 2375 | ## 2376 | # emptyDir: 2377 | # medium: Memory 2378 | 2379 | # Additional volumes on the output StatefulSet definition. 2380 | volumes: [] 2381 | 2382 | # Additional VolumeMounts on the output StatefulSet definition. 2383 | volumeMounts: [] 2384 | 2385 | ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations 2386 | ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form 2387 | ## as specified in the official Prometheus documentation: 2388 | ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are 2389 | ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility 2390 | ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible 2391 | ## scrape configs are going to break Prometheus after the upgrade. 2392 | ## 2393 | ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the 2394 | ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes 2395 | ## 2396 | # additionalScrapeConfigs: 2397 | # - job_name: blackbox 2398 | # metrics_path: /probe 2399 | # params: 2400 | # module: [http_2xx] 2401 | # static_configs: 2402 | # - targets: 2403 | # - https://url.here 2404 | # relabel_configs: 2405 | # - source_labels: [__address__] 2406 | # target_label: __param_target 2407 | # - source_labels: [__param_target] 2408 | # target_label: target 2409 | # - target_label: __address__ 2410 | # replacement: prometheus-blackbox-exporter.monitoring:9115 2411 | # - job_name: kube-etcd 2412 | # kubernetes_sd_configs: 2413 | # - role: node 2414 | # scheme: https 2415 | # tls_config: 2416 | # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca 2417 | # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client 2418 | # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key 2419 | # relabel_configs: 2420 | # - action: labelmap 2421 | # regex: __meta_kubernetes_node_label_(.+) 2422 | # - source_labels: [__address__] 2423 | # action: replace 2424 | # targetLabel: __address__ 2425 | # regex: ([^:;]+):(\d+) 2426 | # replacement: ${1}:2379 2427 | # - source_labels: [__meta_kubernetes_node_name] 2428 | # action: keep 2429 | # regex: .*mst.* 2430 | # - source_labels: [__meta_kubernetes_node_name] 2431 | # action: replace 2432 | # targetLabel: node 2433 | # regex: (.*) 2434 | # replacement: ${1} 2435 | # metric_relabel_configs: 2436 | # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) 2437 | # action: labeldrop 2438 | 2439 | ## If additional scrape configurations are already deployed in a single secret file you can use this section. 2440 | ## Expected values are the secret name and key 2441 | ## Cannot be used with additionalScrapeConfigs 2442 | additionalScrapeConfigsSecret: {} 2443 | # enabled: false 2444 | # name: 2445 | # key: 2446 | 2447 | ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful 2448 | ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' 2449 | additionalPrometheusSecretsAnnotations: {} 2450 | 2451 | ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified 2452 | ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. 2453 | ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. 2454 | ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this 2455 | ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release 2456 | ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. 2457 | ## 2458 | additionalAlertManagerConfigs: [] 2459 | # - consul_sd_configs: 2460 | # - server: consul.dev.test:8500 2461 | # scheme: http 2462 | # datacenter: dev 2463 | # tag_separator: ',' 2464 | # services: 2465 | # - metrics-prometheus-alertmanager 2466 | 2467 | ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage 2468 | ## them separately from the helm deployment, you can use this section. 2469 | ## Expected values are the secret name and key 2470 | ## Cannot be used with additionalAlertManagerConfigs 2471 | additionalAlertManagerConfigsSecret: {} 2472 | # name: 2473 | # key: 2474 | 2475 | ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended 2476 | ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the 2477 | ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. 2478 | ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the 2479 | ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel 2480 | ## configs are going to break Prometheus after the upgrade. 2481 | ## 2482 | additionalAlertRelabelConfigs: [] 2483 | # - separator: ; 2484 | # regex: prometheus_replica 2485 | # replacement: $1 2486 | # action: labeldrop 2487 | 2488 | ## SecurityContext holds pod-level security attributes and common container settings. 2489 | ## This defaults to non root user with uid 1000 and gid 2000. 2490 | ## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md 2491 | ## 2492 | securityContext: 2493 | runAsGroup: 2000 2494 | runAsNonRoot: true 2495 | runAsUser: 1000 2496 | fsGroup: 2000 2497 | 2498 | ## Priority class assigned to the Pods 2499 | ## 2500 | priorityClassName: "" 2501 | 2502 | ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. 2503 | ## This section is experimental, it may change significantly without deprecation notice in any release. 2504 | ## This is experimental and may change significantly without backward compatibility in any release. 2505 | ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#thanosspec 2506 | ## 2507 | thanos: {} 2508 | # secretProviderClass: 2509 | # provider: gcp 2510 | # parameters: 2511 | # secrets: | 2512 | # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest" 2513 | # fileName: "objstore.yaml" 2514 | # objectStorageConfigFile: /var/secrets/object-store.yaml 2515 | 2516 | ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. 2517 | ## if using proxy extraContainer update targetPort with proxy container port 2518 | containers: [] 2519 | 2520 | ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes 2521 | ## (permissions, dir tree) on mounted volumes before starting prometheus 2522 | initContainers: [] 2523 | 2524 | ## PortName to use for Prometheus. 2525 | ## 2526 | portName: "web" 2527 | 2528 | ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files 2529 | ## on the file system of the Prometheus container e.g. bearer token files. 2530 | arbitraryFSAccessThroughSMs: false 2531 | 2532 | ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor 2533 | ## or PodMonitor to true, this overrides honor_labels to false. 2534 | overrideHonorLabels: false 2535 | 2536 | ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. 2537 | overrideHonorTimestamps: false 2538 | 2539 | ## IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor 2540 | ## configs, and they will only discover endpoints within their current namespace. Defaults to false. 2541 | ignoreNamespaceSelectors: false 2542 | 2543 | ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. 2544 | ## The label value will always be the namespace of the object that is being created. 2545 | ## Disabled by default 2546 | enforcedNamespaceLabel: "" 2547 | 2548 | ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. 2549 | ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair 2550 | prometheusRulesExcludedFromEnforce: [] 2551 | 2552 | ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, 2553 | ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such 2554 | ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions 2555 | ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) 2556 | queryLogFile: false 2557 | 2558 | ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit 2559 | ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall 2560 | ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. 2561 | enforcedSampleLimit: false 2562 | 2563 | ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set 2564 | ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall 2565 | ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except 2566 | ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced. 2567 | enforcedTargetLimit: false 2568 | 2569 | 2570 | ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present 2571 | ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions 2572 | ## 2.27.0 and newer. 2573 | enforcedLabelLimit: false 2574 | 2575 | ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number 2576 | ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions 2577 | ## 2.27.0 and newer. 2578 | enforcedLabelNameLengthLimit: false 2579 | 2580 | ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this 2581 | ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus 2582 | ## versions 2.27.0 and newer. 2583 | enforcedLabelValueLengthLimit: false 2584 | 2585 | ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental 2586 | ## in Prometheus so it may change in any upcoming release. 2587 | allowOverlappingBlocks: false 2588 | 2589 | additionalRulesForClusterRole: [] 2590 | # - apiGroups: [ "" ] 2591 | # resources: 2592 | # - nodes/proxy 2593 | # verbs: [ "get", "list", "watch" ] 2594 | 2595 | additionalServiceMonitors: [] 2596 | ## Name of the ServiceMonitor to create 2597 | ## 2598 | # - name: "" 2599 | 2600 | ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from 2601 | ## the chart 2602 | ## 2603 | # additionalLabels: {} 2604 | 2605 | ## Service label for use in assembling a job name of the form