├── .pre-commit-config.yaml ├── Dockerfile ├── Dockerfile.Jenkins ├── Jenkinsfile ├── README.md ├── app.py ├── assets ├── add_gke_con.gif ├── chatbot.gif ├── chatbot_ui.png ├── compute_instance.png ├── connect2gke.png ├── create_jenkins_pipe.gif ├── dockerhub_token.gif ├── github_token.gif ├── gke_ui.png ├── grafana.gif ├── jenkins_logs.png ├── jenkins_setup.gif ├── jenkins_stage.png ├── jenkins_success.png ├── monitor.gif ├── new_pipeline.png ├── newest_pipeline.png ├── ngrok.gif ├── pipeline.png ├── popup.png ├── test.pdf └── tgi.gif ├── env.py ├── helm ├── model-serving │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── nginx-ingress.yaml │ │ └── service.yaml │ └── values.yaml ├── monitor │ ├── Chart.yaml │ ├── charts │ │ ├── kube-prometheus-stack │ │ │ ├── CONTRIBUTING.md │ │ │ ├── Chart.lock │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── charts │ │ │ │ ├── grafana │ │ │ │ │ ├── .helmignore │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── README.md │ │ │ │ │ ├── ci │ │ │ │ │ │ ├── default-values.yaml │ │ │ │ │ │ ├── with-affinity-values.yaml │ │ │ │ │ │ ├── with-dashboard-json-values.yaml │ │ │ │ │ │ ├── with-dashboard-values.yaml │ │ │ │ │ │ ├── with-extraconfigmapmounts-values.yaml │ │ │ │ │ │ ├── with-image-renderer-values.yaml │ │ │ │ │ │ └── with-persistence.yaml │ │ │ │ │ ├── dashboards │ │ │ │ │ │ └── custom-dashboard.json │ │ │ │ │ ├── templates │ │ │ │ │ │ ├── NOTES.txt │ │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ │ ├── _pod.tpl │ │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ │ ├── configmap-dashboard-provider.yaml │ │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ │ ├── dashboards-json-configmap.yaml │ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ │ ├── extra-manifests.yaml │ │ │ │ │ │ ├── headless-service.yaml │ │ │ │ │ │ ├── hpa.yaml │ │ │ │ │ │ ├── image-renderer-deployment.yaml │ │ │ │ │ │ ├── image-renderer-hpa.yaml │ │ │ │ │ │ ├── image-renderer-network-policy.yaml │ │ │ │ │ │ ├── image-renderer-service.yaml │ │ │ │ │ │ ├── image-renderer-servicemonitor.yaml │ │ │ │ │ │ ├── ingress.yaml │ │ │ │ │ │ ├── networkpolicy.yaml │ │ │ │ │ │ ├── poddisruptionbudget.yaml │ │ │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ │ │ ├── pvc.yaml │ │ │ │ │ │ ├── role.yaml │ │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ │ ├── secret-env.yaml │ │ │ │ │ │ ├── secret.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ │ ├── statefulset.yaml │ │ │ │ │ │ └── tests │ │ │ │ │ │ │ ├── test-configmap.yaml │ │ │ │ │ │ │ ├── test-podsecuritypolicy.yaml │ │ │ │ │ │ │ ├── test-role.yaml │ │ │ │ │ │ │ ├── test-rolebinding.yaml │ │ │ │ │ │ │ ├── test-serviceaccount.yaml │ │ │ │ │ │ │ └── test.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── kube-state-metrics │ │ │ │ │ ├── .helmignore │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── README.md │ │ │ │ │ ├── templates │ │ │ │ │ │ ├── NOTES.txt │ │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ │ ├── ciliumnetworkpolicy.yaml │ │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ │ ├── crs-configmap.yaml │ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ │ ├── kubeconfig-secret.yaml │ │ │ │ │ │ ├── networkpolicy.yaml │ │ │ │ │ │ ├── pdb.yaml │ │ │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ │ │ ├── rbac-configmap.yaml │ │ │ │ │ │ ├── role.yaml │ │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ │ ├── stsdiscovery-role.yaml │ │ │ │ │ │ ├── stsdiscovery-rolebinding.yaml │ │ │ │ │ │ └── verticalpodautoscaler.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── prometheus-node-exporter │ │ │ │ │ ├── .helmignore │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── README.md │ │ │ │ │ ├── ci │ │ │ │ │ │ └── port-values.yaml │ │ │ │ │ ├── templates │ │ │ │ │ │ ├── NOTES.txt │ │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ │ ├── daemonset.yaml │ │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ │ ├── networkpolicy.yaml │ │ │ │ │ │ ├── podmonitor.yaml │ │ │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ │ │ ├── psp.yaml │ │ │ │ │ │ ├── rbac-configmap.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ │ └── verticalpodautoscaler.yaml │ │ │ │ │ └── values.yaml │ │ │ │ └── prometheus-windows-exporter │ │ │ │ │ ├── .helmignore │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── README.md │ │ │ │ │ ├── templates │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ ├── config.yaml │ │ │ │ │ ├── daemonset.yaml │ │ │ │ │ ├── podmonitor.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ └── values.yaml │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── alertmanager │ │ │ │ │ ├── alertmanager.yaml │ │ │ │ │ ├── extrasecret.yaml │ │ │ │ │ ├── ingress.yaml │ │ │ │ │ ├── ingressperreplica.yaml │ │ │ │ │ ├── podDisruptionBudget.yaml │ │ │ │ │ ├── psp-role.yaml │ │ │ │ │ ├── psp-rolebinding.yaml │ │ │ │ │ ├── psp.yaml │ │ │ │ │ ├── secret.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ └── serviceperreplica.yaml │ │ │ │ ├── exporters │ │ │ │ │ ├── core-dns │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ ├── kube-api-server │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ ├── kube-controller-manager │ │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ ├── kube-dns │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ ├── kube-etcd │ │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ ├── kube-proxy │ │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ ├── kube-scheduler │ │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ └── kubelet │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ ├── extra-objects.yaml │ │ │ │ ├── grafana │ │ │ │ │ ├── configmap-dashboards.yaml │ │ │ │ │ ├── configmaps-datasources.yaml │ │ │ │ │ └── dashboards-1.14 │ │ │ │ │ │ ├── alertmanager-overview.yaml │ │ │ │ │ │ ├── apiserver.yaml │ │ │ │ │ │ ├── cluster-total.yaml │ │ │ │ │ │ ├── controller-manager.yaml │ │ │ │ │ │ ├── etcd.yaml │ │ │ │ │ │ ├── grafana-overview.yaml │ │ │ │ │ │ ├── k8s-coredns.yaml │ │ │ │ │ │ ├── k8s-resources-cluster.yaml │ │ │ │ │ │ ├── k8s-resources-multicluster.yaml │ │ │ │ │ │ ├── k8s-resources-namespace.yaml │ │ │ │ │ │ ├── k8s-resources-node.yaml │ │ │ │ │ │ ├── k8s-resources-pod.yaml │ │ │ │ │ │ ├── k8s-resources-windows-cluster.yaml │ │ │ │ │ │ ├── k8s-resources-windows-namespace.yaml │ │ │ │ │ │ ├── k8s-resources-windows-pod.yaml │ │ │ │ │ │ ├── k8s-resources-workload.yaml │ │ │ │ │ │ ├── k8s-resources-workloads-namespace.yaml │ │ │ │ │ │ ├── k8s-windows-cluster-rsrc-use.yaml │ │ │ │ │ │ ├── k8s-windows-node-rsrc-use.yaml │ │ │ │ │ │ ├── kubelet.yaml │ │ │ │ │ │ ├── namespace-by-pod.yaml │ │ │ │ │ │ ├── namespace-by-workload.yaml │ │ │ │ │ │ ├── node-cluster-rsrc-use.yaml │ │ │ │ │ │ ├── node-rsrc-use.yaml │ │ │ │ │ │ ├── nodes-darwin.yaml │ │ │ │ │ │ ├── nodes.yaml │ │ │ │ │ │ ├── persistentvolumesusage.yaml │ │ │ │ │ │ ├── pod-total.yaml │ │ │ │ │ │ ├── prometheus-remote-write.yaml │ │ │ │ │ │ ├── prometheus.yaml │ │ │ │ │ │ ├── proxy.yaml │ │ │ │ │ │ ├── scheduler.yaml │ │ │ │ │ │ └── workload-total.yaml │ │ │ │ ├── prometheus-operator │ │ │ │ │ ├── admission-webhooks │ │ │ │ │ │ ├── job-patch │ │ │ │ │ │ │ ├── ciliumnetworkpolicy-createSecret.yaml │ │ │ │ │ │ │ ├── ciliumnetworkpolicy-patchWebhook.yaml │ │ │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ │ │ ├── job-createSecret.yaml │ │ │ │ │ │ │ ├── job-patchWebhook.yaml │ │ │ │ │ │ │ ├── networkpolicy-createSecret.yaml │ │ │ │ │ │ │ ├── networkpolicy-patchWebhook.yaml │ │ │ │ │ │ │ ├── psp.yaml │ │ │ │ │ │ │ ├── role.yaml │ │ │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ │ ├── mutatingWebhookConfiguration.yaml │ │ │ │ │ │ └── validatingWebhookConfiguration.yaml │ │ │ │ │ ├── aggregate-clusterroles.yaml │ │ │ │ │ ├── certmanager.yaml │ │ │ │ │ ├── ciliumnetworkpolicy.yaml │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ ├── networkpolicy.yaml │ │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ │ ├── psp.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ └── verticalpodautoscaler.yaml │ │ │ │ ├── prometheus │ │ │ │ │ ├── _rules.tpl │ │ │ │ │ ├── additionalAlertRelabelConfigs.yaml │ │ │ │ │ ├── additionalAlertmanagerConfigs.yaml │ │ │ │ │ ├── additionalPrometheusRules.yaml │ │ │ │ │ ├── additionalScrapeConfigs.yaml │ │ │ │ │ ├── ciliumnetworkpolicy.yaml │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ ├── csi-secret.yaml │ │ │ │ │ ├── extrasecret.yaml │ │ │ │ │ ├── ingress.yaml │ │ │ │ │ ├── ingressThanosSidecar.yaml │ │ │ │ │ ├── ingressperreplica.yaml │ │ │ │ │ ├── networkpolicy.yaml │ │ │ │ │ ├── podDisruptionBudget.yaml │ │ │ │ │ ├── podmonitors.yaml │ │ │ │ │ ├── prometheus.yaml │ │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ │ ├── psp.yaml │ │ │ │ │ ├── rules-1.14 │ │ │ │ │ │ ├── alertmanager.rules.yaml │ │ │ │ │ │ ├── config-reloaders.yaml │ │ │ │ │ │ ├── etcd.yaml │ │ │ │ │ │ ├── general.rules.yaml │ │ │ │ │ │ ├── k8s.rules.yaml │ │ │ │ │ │ ├── kube-apiserver-availability.rules.yaml │ │ │ │ │ │ ├── kube-apiserver-burnrate.rules.yaml │ │ │ │ │ │ ├── kube-apiserver-histogram.rules.yaml │ │ │ │ │ │ ├── kube-apiserver-slos.yaml │ │ │ │ │ │ ├── kube-prometheus-general.rules.yaml │ │ │ │ │ │ ├── kube-prometheus-node-recording.rules.yaml │ │ │ │ │ │ ├── kube-scheduler.rules.yaml │ │ │ │ │ │ ├── kube-state-metrics.yaml │ │ │ │ │ │ ├── kubelet.rules.yaml │ │ │ │ │ │ ├── kubernetes-apps.yaml │ │ │ │ │ │ ├── kubernetes-resources.yaml │ │ │ │ │ │ ├── kubernetes-storage.yaml │ │ │ │ │ │ ├── kubernetes-system-apiserver.yaml │ │ │ │ │ │ ├── kubernetes-system-controller-manager.yaml │ │ │ │ │ │ ├── kubernetes-system-kube-proxy.yaml │ │ │ │ │ │ ├── kubernetes-system-kubelet.yaml │ │ │ │ │ │ ├── kubernetes-system-scheduler.yaml │ │ │ │ │ │ ├── kubernetes-system.yaml │ │ │ │ │ │ ├── node-exporter.rules.yaml │ │ │ │ │ │ ├── node-exporter.yaml │ │ │ │ │ │ ├── node-network.yaml │ │ │ │ │ │ ├── node.rules.yaml │ │ │ │ │ │ ├── prometheus-operator.yaml │ │ │ │ │ │ ├── prometheus.yaml │ │ │ │ │ │ ├── windows.node.rules.yaml │ │ │ │ │ │ └── windows.pod.rules.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceThanosSidecar.yaml │ │ │ │ │ ├── serviceThanosSidecarExternal.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ ├── servicemonitorThanosSidecar.yaml │ │ │ │ │ ├── servicemonitors.yaml │ │ │ │ │ └── serviceperreplica.yaml │ │ │ │ └── thanos-ruler │ │ │ │ │ ├── extrasecret.yaml │ │ │ │ │ ├── ingress.yaml │ │ │ │ │ ├── podDisruptionBudget.yaml │ │ │ │ │ ├── ruler.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ └── servicemonitor.yaml │ │ │ └── values.yaml │ │ ├── loki-stack │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── charts │ │ │ │ ├── fluent-bit │ │ │ │ │ ├── .helmignore │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── README.md │ │ │ │ │ ├── templates │ │ │ │ │ │ ├── NOTES.txt │ │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ │ ├── daemonset.yaml │ │ │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ │ │ ├── role.yaml │ │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ │ ├── service-headless.yaml │ │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ │ └── values.yaml │ │ │ │ └── loki │ │ │ │ │ ├── .helmignore │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── README.md │ │ │ │ │ ├── templates │ │ │ │ │ ├── NOTES.txt │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ ├── configmap-alert.yaml │ │ │ │ │ ├── ingress.yaml │ │ │ │ │ ├── networkpolicy.yaml │ │ │ │ │ ├── pdb.yaml │ │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ │ ├── prometheusrule.yaml │ │ │ │ │ ├── role.yaml │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ ├── secret.yaml │ │ │ │ │ ├── service-headless.yaml │ │ │ │ │ ├── service-memberlist.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ └── statefulset.yaml │ │ │ │ │ └── values.yaml │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── datasources.yaml │ │ │ │ └── tests │ │ │ │ │ ├── loki-test-configmap.yaml │ │ │ │ │ └── loki-test-pod.yaml │ │ │ └── values.yaml │ │ ├── opentelemetry-collector │ │ │ ├── .helmignore │ │ │ ├── CONTRIBUTING.md │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── UPGRADING.md │ │ │ ├── ci │ │ │ │ ├── GOMEMLIMIT-values.yaml │ │ │ │ ├── clusterrole-values.yaml │ │ │ │ ├── config-override-values.yaml │ │ │ │ ├── daemonset-values.yaml │ │ │ │ ├── deployment-values.yaml │ │ │ │ ├── disabling-protocols-values.yaml │ │ │ │ ├── hpa-deployment-values.yaml │ │ │ │ ├── hpa-statefulset-values.yaml │ │ │ │ ├── multiple-ingress-values.yaml │ │ │ │ ├── networkpolicy-override-values.yaml │ │ │ │ ├── networkpolicy-values.yaml │ │ │ │ ├── preset-clustermetrics-values.yaml │ │ │ │ ├── preset-hostmetrics-values.yaml │ │ │ │ ├── preset-k8sevents-values.yaml │ │ │ │ ├── preset-kubeletmetrics-values.yaml │ │ │ │ ├── preset-kubernetesattributes-values.yaml │ │ │ │ ├── preset-logscollection-values.yaml │ │ │ │ ├── probes-values.yaml │ │ │ │ └── statefulset-values.yaml │ │ │ ├── examples │ │ │ │ ├── README.md │ │ │ │ ├── daemonset-and-deployment │ │ │ │ │ ├── daemonset-values.yaml │ │ │ │ │ ├── deployment-values.yaml │ │ │ │ │ └── rendered │ │ │ │ │ │ ├── configmap-agent.yaml │ │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ │ ├── daemonset.yaml │ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ ├── daemonset-collector-logs │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap-agent.yaml │ │ │ │ │ │ ├── daemonset.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── daemonset-hostmetrics │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap-agent.yaml │ │ │ │ │ │ ├── daemonset.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── daemonset-lifecycle-hooks │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap-agent.yaml │ │ │ │ │ │ ├── daemonset.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── daemonset-only │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap-agent.yaml │ │ │ │ │ │ ├── daemonset.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── deployment-only │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── deployment-otlp-traces │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── deployment-use-existing-configMap │ │ │ │ │ ├── deployment-values.yaml │ │ │ │ │ └── rendered │ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ ├── kubernetesAttributes │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── statefulset-only │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap-statefulset.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ │ └── statefulset.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── statefulset-with-pvc │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap-statefulset.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ │ └── statefulset.yaml │ │ │ │ │ └── values.yaml │ │ │ │ ├── using-GOMEMLIMIT │ │ │ │ │ ├── rendered │ │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ │ ├── service.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ │ └── using-custom-config │ │ │ │ │ ├── rendered │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ └── values.yaml │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _config.tpl │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── _pod.tpl │ │ │ │ ├── clusterrole.yaml │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ ├── configmap-agent.yaml │ │ │ │ ├── configmap-statefulset.yaml │ │ │ │ ├── configmap.yaml │ │ │ │ ├── daemonset.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── hpa.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── networkpolicy.yaml │ │ │ │ ├── pdb.yaml │ │ │ │ ├── podmonitor.yaml │ │ │ │ ├── prometheusrule.yaml │ │ │ │ ├── service.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ ├── servicemonitor.yaml │ │ │ │ └── statefulset.yaml │ │ │ ├── values.schema.json │ │ │ └── values.yaml │ │ └── tempo │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── README.md.gotmpl │ │ │ ├── templates │ │ │ ├── _helpers.tpl │ │ │ ├── configmap-tempo-query.yaml │ │ │ ├── configmap-tempo.yaml │ │ │ ├── ingress-tempo-query.yaml │ │ │ ├── service.yaml │ │ │ ├── serviceaccount.yaml │ │ │ ├── servicemonitor.yaml │ │ │ └── statefulset.yaml │ │ │ └── values.yaml │ ├── crds │ │ ├── crd-alertmanagerconfigs.yaml │ │ ├── crd-alertmanagers.yaml │ │ ├── crd-podmonitors.yaml │ │ ├── crd-probes.yaml │ │ ├── crd-prometheusagents.yaml │ │ ├── crd-prometheuses.yaml │ │ ├── crd-prometheusrules.yaml │ │ ├── crd-scrapeconfigs.yaml │ │ ├── crd-servicemonitors.yaml │ │ └── crd-thanosrulers.yaml │ └── values.yaml └── nginx-ingress │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── crds │ ├── appprotect.f5.com_aplogconfs.yaml │ ├── appprotect.f5.com_appolicies.yaml │ ├── appprotect.f5.com_apusersigs.yaml │ ├── appprotectdos.f5.com_apdoslogconfs.yaml │ ├── appprotectdos.f5.com_apdospolicy.yaml │ ├── appprotectdos.f5.com_dosprotectedresources.yaml │ ├── externaldns.nginx.org_dnsendpoints.yaml │ ├── k8s.nginx.org_globalconfigurations.yaml │ ├── k8s.nginx.org_policies.yaml │ ├── k8s.nginx.org_transportservers.yaml │ ├── k8s.nginx.org_virtualserverroutes.yaml │ └── k8s.nginx.org_virtualservers.yaml │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── controller-configmap.yaml │ ├── controller-daemonset.yaml │ ├── controller-deployment.yaml │ ├── controller-globalconfiguration.yaml │ ├── controller-hpa.yaml │ ├── controller-ingress-class.yaml │ ├── controller-leader-election-configmap.yaml │ ├── controller-pdb.yaml │ ├── controller-secret.yaml │ ├── controller-service.yaml │ ├── controller-serviceaccount.yaml │ ├── controller-servicemonitor.yaml │ ├── controller-wildcard-secret.yaml │ └── rbac.yaml │ ├── values-icp.yaml │ ├── values-nsm.yaml │ ├── values-plus.yaml │ ├── values.schema.json │ └── values.yaml ├── iac ├── ansible │ ├── inventory │ ├── playbooks │ │ ├── create_compute_instance.yml │ │ └── install_docker.yaml │ └── requirements.txt └── terraform │ ├── main.tf │ └── variables.tf ├── notebooks ├── Dataset_Creating_For_Fine_Tuning_TinyLlama.ipynb └── Finetuning_TinyLlama.ipynb ├── requirements.txt └── utils.py /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: "^\ 2 | (third-party/.*)\ 3 | " 4 | 5 | repos: 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v4.1.0 8 | hooks: 9 | - id: detect-private-key 10 | - id: end-of-file-fixer 11 | - id: requirements-txt-fixer 12 | - id: trailing-whitespace 13 | 14 | # Format Python files 15 | - repo: https://github.com/psf/black 16 | rev: 23.7.0 17 | hooks: 18 | - id: black 19 | 20 | # Sort the order of importing libs 21 | - repo: https://github.com/PyCQA/isort 22 | rev: 5.12.0 23 | hooks: 24 | - id: isort 25 | args: [--profile=black] 26 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt /app 6 | 7 | RUN pip install -r requirements.txt --no-cache-dir 8 | 9 | COPY app.py /app 10 | 11 | COPY utils.py /app 12 | 13 | COPY env.py /app 14 | 15 | EXPOSE 8051 16 | 17 | CMD [ "streamlit", "run", "app.py", "--server.port", "8051", "--server.enableXsrfProtection", "false", "--server.enableCORS", "false" ] 18 | -------------------------------------------------------------------------------- /Dockerfile.Jenkins: -------------------------------------------------------------------------------- 1 | # Ref: https://hackmamba.io/blog/2022/04/running-docker-in-a-jenkins-container/ 2 | FROM jenkins/jenkins:lts 3 | USER root 4 | RUN curl https://get.docker.com > dockerinstall && chmod 777 dockerinstall && ./dockerinstall && \ 5 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ 6 | chmod +x ./kubectl && \ 7 | mv ./kubectl /usr/local/bin/kubectl && \ 8 | curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash 9 | USER jenkins 10 | -------------------------------------------------------------------------------- /assets/add_gke_con.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/add_gke_con.gif -------------------------------------------------------------------------------- /assets/chatbot.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/chatbot.gif -------------------------------------------------------------------------------- /assets/chatbot_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/chatbot_ui.png -------------------------------------------------------------------------------- /assets/compute_instance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/compute_instance.png -------------------------------------------------------------------------------- /assets/connect2gke.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/connect2gke.png -------------------------------------------------------------------------------- /assets/create_jenkins_pipe.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/create_jenkins_pipe.gif -------------------------------------------------------------------------------- /assets/dockerhub_token.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/dockerhub_token.gif -------------------------------------------------------------------------------- /assets/github_token.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/github_token.gif -------------------------------------------------------------------------------- /assets/gke_ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/gke_ui.png -------------------------------------------------------------------------------- /assets/grafana.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/grafana.gif -------------------------------------------------------------------------------- /assets/jenkins_logs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/jenkins_logs.png -------------------------------------------------------------------------------- /assets/jenkins_setup.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/jenkins_setup.gif -------------------------------------------------------------------------------- /assets/jenkins_stage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/jenkins_stage.png -------------------------------------------------------------------------------- /assets/jenkins_success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/jenkins_success.png -------------------------------------------------------------------------------- /assets/monitor.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/monitor.gif -------------------------------------------------------------------------------- /assets/new_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/new_pipeline.png -------------------------------------------------------------------------------- /assets/newest_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/newest_pipeline.png -------------------------------------------------------------------------------- /assets/ngrok.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/ngrok.gif -------------------------------------------------------------------------------- /assets/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/pipeline.png -------------------------------------------------------------------------------- /assets/popup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/popup.png -------------------------------------------------------------------------------- /assets/test.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/test.pdf -------------------------------------------------------------------------------- /assets/tgi.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LongDaHo/Chatbot-with-LLM/b6eb90811daec471fa5a2db8c6793a8f40e55d1b/assets/tgi.gif -------------------------------------------------------------------------------- /env.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") 4 | TOKENIZER_NAME = os.getenv("TOKENIZER_NAME") 5 | INFERENCE_SERVER_URL = os.getenv("INFERENCE_SERVER_URL") 6 | OTLP_ENDPOINT = os.getenv("OTLP_ENDPOINT") 7 | SVC_NAME = os.getenv("SVC_NAME") 8 | -------------------------------------------------------------------------------- /helm/model-serving/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /helm/model-serving/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: chatbot 3 | description: My Helm Chart for Chatbot 4 | 5 | # A chart can be `application` or `library`, 6 | # we don't use `library` so often 7 | type: application 8 | 9 | # The chart vesion, which should be changed every time 10 | # you make an update to the chart 11 | version: 0.1.0 12 | 13 | # The version number of the application being deployed 14 | appVersion: "1.0.0" 15 | 16 | maintainers: 17 | - email: long.dh2000.bachkhoa@gmail.com 18 | name: daohoanglong 19 | -------------------------------------------------------------------------------- /helm/model-serving/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | The chat-with-gemma server can be accessed via port 8051 on the following DNS name from within your cluster 2 | -------------------------------------------------------------------------------- /helm/model-serving/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Release.Name }}-configmap 5 | data: 6 | huggingface_api_token: {{ .Values.config.huggingface_api_token }} 7 | tokenizer_name: {{ .Values.config.tokenizer_name }} 8 | inference_server_url: {{ .Values.config.inference_server_url }} 9 | otlp_endpoint: {{ .Values.config.otlp_endpoint }} 10 | svc_name: {{ .Values.config.svc_name }} 11 | -------------------------------------------------------------------------------- /helm/model-serving/templates/nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: {{ .Release.Name }}-nginx-ingress 5 | namespace: model-serving 6 | # https://cloud.google.com/kubernetes-engine/docs/concepts/ingress#controller_summary 7 | annotations: 8 | kubernetes.io/ingress.class: "nginx" # which can be replaced by gce, gce-internal or istio 9 | nginx.org/websocket-services: {{ .Release.Name }} 10 | 11 | spec: 12 | rules: 13 | - host: mlops.chatbot.com 14 | http: 15 | paths: 16 | - path: / 17 | pathType: Prefix 18 | backend: 19 | service: 20 | name: {{ .Release.Name }} 21 | port: 22 | number: 8051 23 | -------------------------------------------------------------------------------- /helm/model-serving/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Release.Name }} 5 | labels: 6 | app: {{ .Release.Name }} 7 | namespace: model-serving 8 | spec: 9 | selector: 10 | app: {{ .Release.Name }} 11 | ports: 12 | - port: 8051 13 | protocol: TCP 14 | targetPort: 8051 15 | type: LoadBalancer 16 | -------------------------------------------------------------------------------- /helm/model-serving/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: hoanglong2410/chatbot 3 | tag: "latest" 4 | pullPolicy: Always 5 | config: 6 | huggingface_api_token: "" 7 | tokenizer_name: "BAAI/llm-embedder" 8 | inference_server_url: "" 9 | otlp_endpoint: "http://monitor-opentelemetry-collector.observability.svc.cluster.local:4317" 10 | svc_name: "tgi-traces" 11 | -------------------------------------------------------------------------------- /helm/monitor/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: monitor 3 | description: My Helm Chart for Monitor 4 | 5 | # A chart can be `application` or `library`, 6 | # we don't use `library` so often 7 | type: application 8 | 9 | # The chart vesion, which should be changed every time 10 | # you make an update to the chart 11 | version: 0.1.0 12 | 13 | # The version number of the application being deployed 14 | appVersion: "1.0.0" 15 | 16 | 17 | maintainers: 18 | - email: long.dh2000.bachkhoa@gmail.com 19 | name: daohoanglong 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | ## How to contribute to this chart 4 | 5 | 1. Fork this repository, develop and test your Chart. 6 | 1. Bump the chart version for every change. 7 | 1. Ensure PR title has the prefix `[kube-prometheus-stack]` 8 | 1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories 9 | 1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes. 10 | 1. Check for changes of RBAC rules. 11 | 1. Check for changes in CRD specs. 12 | 1. PR must pass the linter (`helm lint`) 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/Chart.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: kube-state-metrics 3 | repository: https://prometheus-community.github.io/helm-charts 4 | version: 5.8.1 5 | - name: prometheus-node-exporter 6 | repository: https://prometheus-community.github.io/helm-charts 7 | version: 4.18.1 8 | - name: grafana 9 | repository: https://grafana.github.io/helm-charts 10 | version: 6.57.4 11 | - name: prometheus-windows-exporter 12 | repository: https://prometheus-community.github.io/helm-charts 13 | version: 0.1.0 14 | digest: sha256:f6e8d4fe2fef5f6bc7534383e1671051e168711b3483fa8dfbcf0b0522d998ae 15 | generated: "2023-07-04T07:13:46.021269833Z" 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .vscode 20 | .project 21 | .idea/ 22 | *.tmproj 23 | OWNERS 24 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 9.2.4 3 | description: The leading tool for querying and visualizing time series and metrics. 4 | home: https://grafana.net 5 | icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png 6 | kubeVersion: ^1.8.0-0 7 | maintainers: 8 | - email: zanhsieh@gmail.com 9 | name: zanhsieh 10 | - email: rluckie@cisco.com 11 | name: rtluckie 12 | - email: maor.friedman@redhat.com 13 | name: maorfr 14 | - email: miroslav.hadzhiev@gmail.com 15 | name: Xtigyro 16 | - email: mail@torstenwalter.de 17 | name: torstenwalter 18 | name: grafana 19 | sources: 20 | - https://github.com/grafana/grafana 21 | type: application 22 | version: 6.43.5 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/ci/default-values.yaml: -------------------------------------------------------------------------------- 1 | # Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. 2 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/ci/with-affinity-values.yaml: -------------------------------------------------------------------------------- 1 | affinity: 2 | podAntiAffinity: 3 | preferredDuringSchedulingIgnoredDuringExecution: 4 | - podAffinityTerm: 5 | labelSelector: 6 | matchLabels: 7 | app.kubernetes.io/instance: grafana-test 8 | app.kubernetes.io/name: grafana 9 | topologyKey: failure-domain.beta.kubernetes.io/zone 10 | weight: 100 11 | requiredDuringSchedulingIgnoredDuringExecution: 12 | - labelSelector: 13 | matchLabels: 14 | app.kubernetes.io/instance: grafana-test 15 | app.kubernetes.io/name: grafana 16 | topologyKey: kubernetes.io/hostname 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/ci/with-dashboard-values.yaml: -------------------------------------------------------------------------------- 1 | dashboards: 2 | my-provider: 3 | my-awesome-dashboard: 4 | gnetId: 10000 5 | revision: 1 6 | datasource: Prometheus 7 | dashboardProviders: 8 | dashboardproviders.yaml: 9 | apiVersion: 1 10 | providers: 11 | - name: 'my-provider' 12 | orgId: 1 13 | folder: '' 14 | type: file 15 | updateIntervalSeconds: 10 16 | disableDeletion: true 17 | editable: true 18 | options: 19 | path: /var/lib/grafana/dashboards/my-provider 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/ci/with-extraconfigmapmounts-values.yaml: -------------------------------------------------------------------------------- 1 | extraConfigmapMounts: 2 | - name: '{{ template "grafana.fullname" . }}' 3 | configMap: '{{ template "grafana.fullname" . }}' 4 | mountPath: /var/lib/grafana/dashboards/test-dashboard.json 5 | # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap 6 | subPath: grafana.ini 7 | readOnly: true 8 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/ci/with-image-renderer-values.yaml: -------------------------------------------------------------------------------- 1 | podLabels: 2 | customLableA: Aaaaa 3 | imageRenderer: 4 | enabled: true 5 | env: 6 | RENDERING_ARGS: --disable-gpu,--window-size=1280x758 7 | RENDERING_MODE: clustered 8 | podLabels: 9 | customLableB: Bbbbb 10 | networkPolicy: 11 | limitIngress: true 12 | limitEgress: true 13 | resources: 14 | limits: 15 | cpu: 1000m 16 | memory: 1000Mi 17 | requests: 18 | cpu: 500m 19 | memory: 50Mi 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/ci/with-persistence.yaml: -------------------------------------------------------------------------------- 1 | persistence: 2 | type: pvc 3 | enabled: true 4 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/dashboards/custom-dashboard.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingRole) }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | labels: 6 | {{- include "grafana.labels" . | nindent 4 }} 7 | {{- with .Values.annotations }} 8 | annotations: 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | name: {{ include "grafana.fullname" . }}-clusterrole 12 | {{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} 13 | rules: 14 | {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} 15 | - apiGroups: [""] # "" indicates the core API group 16 | resources: ["configmaps", "secrets"] 17 | verbs: ["get", "watch", "list"] 18 | {{- end}} 19 | {{- with .Values.rbac.extraClusterRoleRules }} 20 | {{- toYaml . | nindent 2 }} 21 | {{- end}} 22 | {{- else }} 23 | rules: [] 24 | {{- end}} 25 | {{- end}} 26 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ include "grafana.fullname" . }}-clusterrolebinding 6 | labels: 7 | {{- include "grafana.labels" . | nindent 4 }} 8 | {{- with .Values.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ include "grafana.serviceAccountName" . }} 15 | namespace: {{ include "grafana.namespace" . }} 16 | roleRef: 17 | kind: ClusterRole 18 | {{- if .Values.rbac.useExistingRole }} 19 | name: {{ .Values.rbac.useExistingRole }} 20 | {{- else }} 21 | name: {{ include "grafana.fullname" . }}-clusterrole 22 | {{- end }} 23 | apiGroup: rbac.authorization.k8s.io 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/configmap-dashboard-provider.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | labels: 6 | {{- include "grafana.labels" . | nindent 4 }} 7 | {{- with .Values.annotations }} 8 | annotations: 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | name: {{ include "grafana.fullname" . }}-config-dashboards 12 | namespace: {{ include "grafana.namespace" . }} 13 | data: 14 | provider.yaml: |- 15 | apiVersion: 1 16 | providers: 17 | - name: '{{ .Values.sidecar.dashboards.provider.name }}' 18 | orgId: {{ .Values.sidecar.dashboards.provider.orgid }} 19 | {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} 20 | folder: '{{ .Values.sidecar.dashboards.provider.folder }}' 21 | {{- end }} 22 | type: {{ .Values.sidecar.dashboards.provider.type }} 23 | disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} 24 | allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} 25 | updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} 26 | options: 27 | foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} 28 | path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/dashboards-json-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.dashboards }} 2 | {{ $files := .Files }} 3 | {{- range $provider, $dashboards := .Values.dashboards }} 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} 8 | namespace: {{ include "grafana.namespace" $ }} 9 | labels: 10 | {{- include "grafana.labels" $ | nindent 4 }} 11 | dashboard-provider: {{ $provider }} 12 | {{- if $dashboards }} 13 | data: 14 | {{- $dashboardFound := false }} 15 | {{- range $key, $value := $dashboards }} 16 | {{- if (or (hasKey $value "json") (hasKey $value "file")) }} 17 | {{- $dashboardFound = true }} 18 | {{- print $key | nindent 2 }}.json: 19 | {{- if hasKey $value "json" }} 20 | |- 21 | {{- $value.json | nindent 6 }} 22 | {{- end }} 23 | {{- if hasKey $value "file" }} 24 | {{- toYaml ( $files.Get $value.file ) | nindent 4}} 25 | {{- end }} 26 | {{- end }} 27 | {{- end }} 28 | {{- if not $dashboardFound }} 29 | {} 30 | {{- end }} 31 | {{- end }} 32 | --- 33 | {{- end }} 34 | 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/extra-manifests.yaml: -------------------------------------------------------------------------------- 1 | {{ range .Values.extraObjects }} 2 | --- 3 | {{ tpl (toYaml .) $ }} 4 | {{ end }} 5 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/headless-service.yaml: -------------------------------------------------------------------------------- 1 | {{- $sts := list "sts" "StatefulSet" "statefulset" -}} 2 | {{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: {{ include "grafana.fullname" . }}-headless 7 | namespace: {{ include "grafana.namespace" . }} 8 | labels: 9 | {{- include "grafana.labels" . | nindent 4 }} 10 | {{- with .Values.annotations }} 11 | annotations: 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | spec: 15 | clusterIP: None 16 | selector: 17 | {{- include "grafana.selectorLabels" . | nindent 4 }} 18 | type: ClusterIP 19 | ports: 20 | - name: {{ .Values.gossipPortName }}-tcp 21 | port: 9094 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/image-renderer-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "grafana.fullname" . }}-image-renderer 6 | namespace: {{ include "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.imageRenderer.labels" . | nindent 4 }} 9 | {{- with .Values.imageRenderer.service.labels }} 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- with .Values.imageRenderer.service.annotations }} 13 | annotations: 14 | {{- toYaml . | nindent 4 }} 15 | {{- end }} 16 | spec: 17 | type: ClusterIP 18 | {{- with .Values.imageRenderer.service.clusterIP }} 19 | clusterIP: {{ . }} 20 | {{- end }} 21 | ports: 22 | - name: {{ .Values.imageRenderer.service.portName }} 23 | port: {{ .Values.imageRenderer.service.port }} 24 | protocol: TCP 25 | targetPort: {{ .Values.imageRenderer.service.targetPort }} 26 | {{- with .Values.imageRenderer.appProtocol }} 27 | appProtocol: {{ . }} 28 | {{- end }} 29 | selector: 30 | {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/poddisruptionbudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget }} 2 | apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ include "grafana.fullname" . }} 6 | namespace: {{ include "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.labels }} 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | {{- with .Values.podDisruptionBudget.minAvailable }} 14 | minAvailable: {{ . }} 15 | {{- end }} 16 | {{- with .Values.podDisruptionBudget.maxUnavailable }} 17 | maxUnavailable: {{ . }} 18 | {{- end }} 19 | selector: 20 | matchLabels: 21 | {{- include "grafana.selectorLabels" . | nindent 6 }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: {{ include "grafana.fullname" . }} 6 | namespace: {{ include "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.persistence.extraPvcLabels }} 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- with .Values.persistence.annotations }} 13 | annotations: 14 | {{- toYaml . | nindent 4 }} 15 | {{- end }} 16 | {{- with .Values.persistence.finalizers }} 17 | finalizers: 18 | {{- toYaml . | nindent 4 }} 19 | {{- end }} 20 | spec: 21 | accessModes: 22 | {{- range .Values.persistence.accessModes }} 23 | - {{ . | quote }} 24 | {{- end }} 25 | resources: 26 | requests: 27 | storage: {{ .Values.persistence.size | quote }} 28 | {{- with .Values.persistence.storageClassName }} 29 | storageClassName: {{ . }} 30 | {{- end }} 31 | {{- with .Values.persistence.selectorLabels }} 32 | selector: 33 | matchLabels: 34 | {{- toYaml . | nindent 6 }} 35 | {{- end }} 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} 2 | apiVersion: {{ include "grafana.rbac.apiVersion" . }} 3 | kind: Role 4 | metadata: 5 | name: {{ include "grafana.fullname" . }} 6 | namespace: {{ include "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | {{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} 14 | rules: 15 | {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 16 | - apiGroups: ['extensions'] 17 | resources: ['podsecuritypolicies'] 18 | verbs: ['use'] 19 | resourceNames: [{{ include "grafana.fullname" . }}] 20 | {{- end }} 21 | {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} 22 | - apiGroups: [""] # "" indicates the core API group 23 | resources: ["configmaps", "secrets"] 24 | verbs: ["get", "watch", "list"] 25 | {{- end }} 26 | {{- with .Values.rbac.extraRoleRules }} 27 | {{- toYaml . | nindent 2 }} 28 | {{- end}} 29 | {{- else }} 30 | rules: [] 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: {{ include "grafana.rbac.apiVersion" . }} 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ include "grafana.fullname" . }} 6 | namespace: {{ include "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: Role 16 | {{- if .Values.rbac.useExistingRole }} 17 | name: {{ .Values.rbac.useExistingRole }} 18 | {{- else }} 19 | name: {{ include "grafana.fullname" . }} 20 | {{- end }} 21 | subjects: 22 | - kind: ServiceAccount 23 | name: {{ include "grafana.serviceAccountName" . }} 24 | namespace: {{ include "grafana.namespace" . }} 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/secret-env.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.envRenderSecret }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ include "grafana.fullname" . }}-env 6 | namespace: {{ include "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | type: Opaque 10 | data: 11 | {{- range $key, $val := .Values.envRenderSecret }} 12 | {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} 13 | {{- end }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ include "grafana.fullname" . }} 6 | namespace: {{ include "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | type: Opaque 14 | data: 15 | {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} 16 | admin-user: {{ .Values.adminUser | b64enc | quote }} 17 | {{- if .Values.adminPassword }} 18 | admin-password: {{ .Values.adminPassword | b64enc | quote }} 19 | {{- else }} 20 | admin-password: {{ include "grafana.password" . }} 21 | {{- end }} 22 | {{- end }} 23 | {{- if not .Values.ldap.existingSecret }} 24 | ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} 25 | {{- end }} 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | {{- $root := . -}} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | labels: 7 | {{- include "grafana.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.labels }} 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | {{- with .Values.serviceAccount.annotations }} 12 | annotations: 13 | {{- tpl (toYaml . | nindent 4) $root }} 14 | {{- end }} 15 | name: {{ include "grafana.serviceAccountName" . }} 16 | namespace: {{ include "grafana.namespace" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/tests/test-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.testFramework.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ include "grafana.fullname" . }}-test 6 | namespace: {{ include "grafana.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": test-success 9 | "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" 10 | labels: 11 | {{- include "grafana.labels" . | nindent 4 }} 12 | data: 13 | run.sh: |- 14 | @test "Test Health" { 15 | url="http://{{ include "grafana.fullname" . }}/api/health" 16 | 17 | code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') 18 | [ "$code" == "200" ] 19 | } 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/tests/test-podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ include "grafana.fullname" . }}-test 6 | annotations: 7 | "helm.sh/hook": test-success 8 | "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" 9 | labels: 10 | {{- include "grafana.labels" . | nindent 4 }} 11 | spec: 12 | allowPrivilegeEscalation: true 13 | privileged: false 14 | hostNetwork: false 15 | hostIPC: false 16 | hostPID: false 17 | fsGroup: 18 | rule: RunAsAny 19 | seLinux: 20 | rule: RunAsAny 21 | supplementalGroups: 22 | rule: RunAsAny 23 | runAsUser: 24 | rule: RunAsAny 25 | volumes: 26 | - configMap 27 | - downwardAPI 28 | - emptyDir 29 | - projected 30 | - csi 31 | - secret 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/tests/test-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ include "grafana.fullname" . }}-test 6 | namespace: {{ include "grafana.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": test-success 9 | "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" 10 | labels: 11 | {{- include "grafana.labels" . | nindent 4 }} 12 | rules: 13 | - apiGroups: ['policy'] 14 | resources: ['podsecuritypolicies'] 15 | verbs: ['use'] 16 | resourceNames: [{{ include "grafana.fullname" . }}-test] 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/tests/test-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ include "grafana.fullname" . }}-test 6 | namespace: {{ include "grafana.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": test-success 9 | "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" 10 | labels: 11 | {{- include "grafana.labels" . | nindent 4 }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: {{ include "grafana.fullname" . }}-test 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ include "grafana.serviceAccountNameTest" . }} 19 | namespace: {{ include "grafana.namespace" . }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/grafana/templates/tests/test-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "grafana.labels" . | nindent 4 }} 7 | name: {{ include "grafana.serviceAccountNameTest" . }} 8 | namespace: {{ include "grafana.namespace" . }} 9 | annotations: 10 | "helm.sh/hook": test-success 11 | "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 2.9.2 3 | description: Install kube-state-metrics to generate and expose cluster-level metrics 4 | home: https://github.com/kubernetes/kube-state-metrics/ 5 | keywords: 6 | - metric 7 | - monitoring 8 | - prometheus 9 | - kubernetes 10 | maintainers: 11 | - email: tariq.ibrahim@mulesoft.com 12 | name: tariq1890 13 | - email: manuel@rueg.eu 14 | name: mrueg 15 | - email: david@0xdc.me 16 | name: dotdc 17 | name: kube-state-metrics 18 | sources: 19 | - https://github.com/kubernetes/kube-state-metrics/ 20 | type: application 21 | version: 5.8.1 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. 2 | The exposed metrics can be found here: 3 | https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics 4 | 5 | The metrics are exported on the HTTP endpoint /metrics on the listening port. 6 | In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics 7 | 8 | They are served either as plaintext or protobuf depending on the Accept header. 9 | They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. 10 | 11 | {{- if .Values.kubeRBACProxy.enabled}} 12 | 13 | kube-rbac-proxy endpoint protections is enabled: 14 | - Metrics endpoints are now HTTPS 15 | - Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions: 16 | ``` 17 | rules: 18 | - apiGroups: [ "" ] 19 | resources: ["services/{{ template "kube-state-metrics.fullname" . }}"] 20 | verbs: 21 | - get 22 | ``` 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "cilium") }} 2 | apiVersion: cilium.io/v2 3 | kind: CiliumNetworkPolicy 4 | metadata: 5 | {{- if .Values.annotations }} 6 | annotations: 7 | {{ toYaml .Values.annotations | nindent 4 }} 8 | {{- end }} 9 | labels: 10 | {{- include "kube-state-metrics.labels" . | indent 4 }} 11 | name: {{ template "kube-state-metrics.fullname" . }} 12 | namespace: {{ template "kube-state-metrics.namespace" . }} 13 | spec: 14 | endpointSelector: 15 | matchLabels: 16 | {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} 17 | egress: 18 | {{- if and .Values.networkPolicy.cilium .Values.networkPolicy.cilium.kubeApiServerSelector }} 19 | {{ toYaml .Values.networkPolicy.cilium.kubeApiServerSelector | nindent 6 }} 20 | {{- else }} 21 | - toEntities: 22 | - kube-apiserver 23 | {{- end }} 24 | ingress: 25 | - toPorts: 26 | - ports: 27 | - port: {{ .Values.service.port | quote }} 28 | protocol: TCP 29 | {{- if .Values.selfMonitor.enabled }} 30 | - port: {{ .Values.selfMonitor.telemetryPort | default 8081 | quote }} 31 | protocol: TCP 32 | {{ end }} 33 | {{ end }} 34 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | {{- include "kube-state-metrics.labels" . | indent 4 }} 7 | name: {{ template "kube-state-metrics.fullname" . }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | {{- if .Values.rbac.useExistingRole }} 12 | name: {{ .Values.rbac.useExistingRole }} 13 | {{- else }} 14 | name: {{ template "kube-state-metrics.fullname" . }} 15 | {{- end }} 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 19 | namespace: {{ template "kube-state-metrics.namespace" . }} 20 | {{- end -}} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/crs-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.customResourceState.enabled}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }}-customresourcestate-config 6 | data: 7 | config.yaml: | 8 | {{- toYaml .Values.customResourceState.config | nindent 4 }} 9 | {{- end }} 10 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/kubeconfig-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeconfig.enabled -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | {{- include "kube-state-metrics.labels" . | indent 4 }} 9 | type: Opaque 10 | data: 11 | config: '{{ .Values.kubeconfig.secret }}' 12 | {{- end -}} 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget -}} 2 | {{ if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}} 3 | apiVersion: policy/v1 4 | {{- else -}} 5 | apiVersion: policy/v1beta1 6 | {{- end }} 7 | kind: PodDisruptionBudget 8 | metadata: 9 | name: {{ template "kube-state-metrics.fullname" . }} 10 | namespace: {{ template "kube-state-metrics.namespace" . }} 11 | labels: 12 | {{- include "kube-state-metrics.labels" . | indent 4 }} 13 | spec: 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 17 | {{ toYaml .Values.podDisruptionBudget | indent 2 }} 18 | {{- end -}} 19 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }} 6 | labels: 7 | {{- include "kube-state-metrics.labels" . | indent 4 }} 8 | {{- if .Values.podSecurityPolicy.annotations }} 9 | annotations: 10 | {{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} 11 | {{- end }} 12 | spec: 13 | privileged: false 14 | volumes: 15 | - 'secret' 16 | {{- if .Values.podSecurityPolicy.additionalVolumes }} 17 | {{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} 18 | {{- end }} 19 | hostNetwork: false 20 | hostIPC: false 21 | hostPID: false 22 | runAsUser: 23 | rule: 'MustRunAsNonRoot' 24 | seLinux: 25 | rule: 'RunAsAny' 26 | supplementalGroups: 27 | rule: 'MustRunAs' 28 | ranges: 29 | # Forbid adding the root group. 30 | - min: 1 31 | max: 65535 32 | fsGroup: 33 | rule: 'MustRunAs' 34 | ranges: 35 | # Forbid adding the root group. 36 | - min: 1 37 | max: 65535 38 | readOnlyRootFilesystem: false 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | {{- include "kube-state-metrics.labels" . | indent 4 }} 7 | name: psp-{{ template "kube-state-metrics.fullname" . }} 8 | rules: 9 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 10 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 11 | - apiGroups: ['policy'] 12 | {{- else }} 13 | - apiGroups: ['extensions'] 14 | {{- end }} 15 | resources: ['podsecuritypolicies'] 16 | verbs: ['use'] 17 | resourceNames: 18 | - {{ template "kube-state-metrics.fullname" . }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | {{- include "kube-state-metrics.labels" . | indent 4 }} 7 | name: psp-{{ template "kube-state-metrics.fullname" . }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: psp-{{ template "kube-state-metrics.fullname" . }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 15 | namespace: {{ template "kube-state-metrics.namespace" . }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/rbac-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeRBACProxy.enabled}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }}-rbac-config 6 | data: 7 | config-file.yaml: |+ 8 | authorization: 9 | resourceAttributes: 10 | namespace: {{ template "kube-state-metrics.namespace" . }} 11 | apiVersion: v1 12 | resource: services 13 | subresource: {{ template "kube-state-metrics.fullname" . }} 14 | name: {{ template "kube-state-metrics.fullname" . }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} 2 | {{- range (join "," $.Values.namespaces) | split "," }} 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: RoleBinding 6 | metadata: 7 | labels: 8 | {{- include "kube-state-metrics.labels" $ | indent 4 }} 9 | name: {{ template "kube-state-metrics.fullname" $ }} 10 | namespace: {{ . }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | {{- if (not $.Values.rbac.useExistingRole) }} 15 | name: {{ template "kube-state-metrics.fullname" $ }} 16 | {{- else }} 17 | name: {{ $.Values.rbac.useExistingRole }} 18 | {{- end }} 19 | subjects: 20 | - kind: ServiceAccount 21 | name: {{ template "kube-state-metrics.serviceAccountName" $ }} 22 | namespace: {{ template "kube-state-metrics.namespace" $ }} 23 | {{- end -}} 24 | {{- end -}} 25 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "kube-state-metrics.labels" . | indent 4 }} 7 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 8 | namespace: {{ template "kube-state-metrics.namespace" . }} 9 | {{- if .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{ toYaml .Values.serviceAccount.annotations | indent 4 }} 12 | {{- end }} 13 | imagePullSecrets: 14 | {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} 15 | {{- end -}} 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/stsdiscovery-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.autosharding.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | {{- include "kube-state-metrics.labels" . | indent 4 }} 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - pods 14 | verbs: 15 | - get 16 | - apiGroups: 17 | - apps 18 | resourceNames: 19 | - {{ template "kube-state-metrics.fullname" . }} 20 | resources: 21 | - statefulsets 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.autosharding.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | {{- include "kube-state-metrics.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 16 | namespace: {{ template "kube-state-metrics.namespace" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | artifacthub.io/license: Apache-2.0 3 | artifacthub.io/links: | 4 | - name: Chart Source 5 | url: https://github.com/prometheus-community/helm-charts 6 | apiVersion: v2 7 | appVersion: 1.6.0 8 | description: A Helm chart for prometheus node-exporter 9 | home: https://github.com/prometheus/node_exporter/ 10 | keywords: 11 | - node-exporter 12 | - prometheus 13 | - exporter 14 | maintainers: 15 | - email: gianrubio@gmail.com 16 | name: gianrubio 17 | - email: zanhsieh@gmail.com 18 | name: zanhsieh 19 | - email: rootsandtrees@posteo.de 20 | name: zeritti 21 | name: prometheus-node-exporter 22 | sources: 23 | - https://github.com/prometheus/node_exporter/ 24 | type: application 25 | version: 4.18.1 26 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/ci/port-values.yaml: -------------------------------------------------------------------------------- 1 | service: 2 | targetPort: 9102 3 | port: 9102 4 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.fullname" . }} 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 9 | rules: 10 | {{- if $.Values.kubeRBACProxy.enabled }} 11 | - apiGroups: [ "authentication.k8s.io" ] 12 | resources: 13 | - tokenreviews 14 | verbs: [ "create" ] 15 | - apiGroups: [ "authorization.k8s.io" ] 16 | resources: 17 | - subjectaccessreviews 18 | verbs: [ "create" ] 19 | {{- end }} 20 | {{- end -}} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 7 | name: {{ template "prometheus-node-exporter.fullname" . }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | {{- if .Values.rbac.useExistingRole }} 12 | name: {{ .Values.rbac.useExistingRole }} 13 | {{- else }} 14 | name: {{ template "prometheus-node-exporter.fullname" . }} 15 | {{- end }} 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ template "prometheus-node-exporter.serviceAccountName" . }} 19 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 20 | {{- end -}} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.fullname" . }} 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 9 | subsets: 10 | - addresses: 11 | {{- range .Values.endpoints }} 12 | - ip: {{ . }} 13 | {{- end }} 14 | ports: 15 | - name: {{ .Values.service.portName }} 16 | port: 9100 17 | protocol: TCP 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.networkPolicy.enabled }} 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.fullname" . }} 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-node-exporter.labels" $ | nindent 4 }} 9 | {{- with .Values.service.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | spec: 14 | ingress: 15 | - ports: 16 | - port: {{ .Values.service.port }} 17 | policyTypes: 18 | - Egress 19 | - Ingress 20 | podSelector: 21 | matchLabels: 22 | {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: psp-{{ include "prometheus-node-exporter.fullname" . }} 6 | labels: 7 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 8 | rules: 9 | - apiGroups: ['extensions'] 10 | resources: ['podsecuritypolicies'] 11 | verbs: ['use'] 12 | resourceNames: 13 | - {{ include "prometheus-node-exporter.fullname" . }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: psp-{{ include "prometheus-node-exporter.fullname" . }} 6 | labels: 7 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: psp-{{ include "prometheus-node-exporter.fullname" . }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ include "prometheus-node-exporter.fullname" . }} 15 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/rbac-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeRBACProxy.enabled}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config 6 | data: 7 | config-file.yaml: |+ 8 | authorization: 9 | resourceAttributes: 10 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 11 | apiVersion: v1 12 | resource: services 13 | subresource: {{ template "prometheus-node-exporter.fullname" . }} 14 | name: {{ template "prometheus-node-exporter.fullname" . }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "prometheus-node-exporter.fullname" . }} 5 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 6 | labels: 7 | {{- include "prometheus-node-exporter.labels" $ | nindent 4 }} 8 | {{- with .Values.service.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | type: {{ .Values.service.type }} 14 | ports: 15 | - port: {{ .Values.service.port }} 16 | {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} 17 | nodePort: {{ .Values.service.nodePort }} 18 | {{- end }} 19 | targetPort: {{ .Values.service.targetPort }} 20 | protocol: TCP 21 | name: {{ .Values.service.portName }} 22 | selector: 23 | {{- include "prometheus-node-exporter.selectorLabels" . | nindent 4 }} 24 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.serviceAccountName" . }} 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | {{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }} 14 | imagePullSecrets: 15 | {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} 16 | {{- end }} 17 | {{- end -}} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }} 2 | apiVersion: autoscaling.k8s.io/v1 3 | kind: VerticalPodAutoscaler 4 | metadata: 5 | name: {{ include "prometheus-node-exporter.fullname" . }} 6 | namespace: {{ include "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-node-exporter.labels" . | nindent 4 }} 9 | spec: 10 | resourcePolicy: 11 | containerPolicies: 12 | - containerName: node-exporter 13 | {{- with .Values.verticalPodAutoscaler.controlledResources }} 14 | controlledResources: {{ . }} 15 | {{- end }} 16 | {{- with .Values.verticalPodAutoscaler.maxAllowed }} 17 | maxAllowed: 18 | {{- toYaml . | nindent 8 }} 19 | {{- end }} 20 | {{- with .Values.verticalPodAutoscaler.minAllowed }} 21 | minAllowed: 22 | {{- toYaml . | nindent 8 }} 23 | {{- end }} 24 | targetRef: 25 | apiVersion: apps/v1 26 | kind: DaemonSet 27 | name: {{ include "prometheus-node-exporter.fullname" . }} 28 | {{- if .Values.verticalPodAutoscaler.updatePolicy }} 29 | updatePolicy: 30 | {{- with .Values.verticalPodAutoscaler.updatePolicy.updateMode }} 31 | updateMode: {{ . }} 32 | {{- end }} 33 | {{- end }} 34 | {{- end }} 35 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-windows-exporter/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-windows-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 0.22.0 3 | description: A Helm chart for prometheus windows-exporter 4 | home: https://github.com/prometheus-community/windows_exporter/ 5 | keywords: 6 | - windows-exporter 7 | - windows 8 | - prometheus 9 | - exporter 10 | maintainers: 11 | - email: github@jkroepke.de 12 | name: jkroepke 13 | name: prometheus-windows-exporter 14 | sources: 15 | - https://github.com/prometheus-community/windows_exporter/ 16 | type: application 17 | version: 0.1.0 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-windows-exporter/templates/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "prometheus-windows-exporter.fullname" . }} 5 | namespace: {{ include "prometheus-windows-exporter.namespace" . }} 6 | labels: 7 | {{- include "prometheus-windows-exporter.labels" $ | nindent 4 }} 8 | {{- with .Values.service.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | data: 13 | config.yml: | 14 | {{- .Values.config | nindent 4 }} 15 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-windows-exporter/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "prometheus-windows-exporter.fullname" . }} 5 | namespace: {{ include "prometheus-windows-exporter.namespace" . }} 6 | labels: 7 | {{- include "prometheus-windows-exporter.labels" $ | nindent 4 }} 8 | {{- if or .Values.prometheus.monitor.enabled .Values.prometheus.podMonitor.enabled }} 9 | {{- with .Values.service.annotations }} 10 | annotations: 11 | {{- unset . "prometheus.io/scrape" | toYaml | nindent 4 }} 12 | {{- end }} 13 | {{- else }} 14 | annotations: 15 | prometheus.io/scrape: "true" 16 | {{- with .Values.service.annotations }} 17 | {{- toYaml . | nindent 4 }} 18 | {{- end }} 19 | {{- end }} 20 | spec: 21 | type: {{ .Values.service.type }} 22 | ports: 23 | - port: {{ .Values.service.port }} 24 | {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} 25 | nodePort: {{ .Values.service.nodePort }} 26 | {{- end }} 27 | targetPort: {{ .Values.service.portName }} 28 | protocol: TCP 29 | appProtocol: http 30 | name: {{ .Values.service.portName }} 31 | selector: 32 | {{- include "prometheus-windows-exporter.selectorLabels" . | nindent 4 }} 33 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/charts/prometheus-windows-exporter/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "prometheus-windows-exporter.serviceAccountName" . }} 6 | namespace: {{ include "prometheus-windows-exporter.namespace" . }} 7 | labels: 8 | {{- include "prometheus-windows-exporter.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | {{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }} 14 | imagePullSecrets: 15 | {{- include "prometheus-windows-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} 16 | {{- end }} 17 | {{- end -}} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | {{ $.Chart.Name }} has been installed. Check its status by running: 2 | kubectl --namespace {{ template "kube-prometheus-stack.namespace" . }} get pods -l "release={{ $.Release.Name }}" 3 | 4 | Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. 5 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/alertmanager/extrasecret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.alertmanager.extraSecret.data -}} 2 | {{- $secretName := printf "alertmanager-%s-extra" (include "kube-prometheus-stack.fullname" . ) -}} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ default $secretName .Values.alertmanager.extraSecret.name }} 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | {{- if .Values.alertmanager.extraSecret.annotations }} 9 | annotations: 10 | {{ toYaml .Values.alertmanager.extraSecret.annotations | indent 4 }} 11 | {{- end }} 12 | labels: 13 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 14 | app.kubernetes.io/component: alertmanager 15 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 16 | data: 17 | {{- range $key, $val := .Values.alertmanager.extraSecret.data }} 18 | {{ $key }}: {{ $val | b64enc | quote }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/alertmanager/podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.podDisruptionBudget.enabled }} 2 | apiVersion: {{ include "kube-prometheus-stack.pdb.apiVersion" . }} 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | {{- if .Values.alertmanager.podDisruptionBudget.minAvailable }} 12 | minAvailable: {{ .Values.alertmanager.podDisruptionBudget.minAvailable }} 13 | {{- end }} 14 | {{- if .Values.alertmanager.podDisruptionBudget.maxUnavailable }} 15 | maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} 16 | {{- end }} 17 | selector: 18 | matchLabels: 19 | app.kubernetes.io/name: alertmanager 20 | alertmanager: {{ template "kube-prometheus-stack.alertmanager.crname" . }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/alertmanager/psp-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} 3 | kind: Role 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | labels: 9 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 10 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 11 | rules: 12 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 13 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 14 | - apiGroups: ['policy'] 15 | {{- else }} 16 | - apiGroups: ['extensions'] 17 | {{- end }} 18 | resources: ['podsecuritypolicies'] 19 | verbs: ['use'] 20 | resourceNames: 21 | - {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 22 | {{- end }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/alertmanager/psp-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | labels: 9 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 10 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }} 18 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/alertmanager/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: alertmanager-{{ template "kube-prometheus-stack.alertmanager.crname" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | {{- if .Values.alertmanager.secret.annotations }} 8 | annotations: 9 | {{ toYaml .Values.alertmanager.secret.annotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 13 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 14 | data: 15 | {{- if .Values.alertmanager.tplConfig }} 16 | {{- if .Values.alertmanager.stringConfig }} 17 | alertmanager.yaml: {{ tpl (.Values.alertmanager.stringConfig) . | b64enc | quote }} 18 | {{- else if eq (typeOf .Values.alertmanager.config) "string" }} 19 | alertmanager.yaml: {{ tpl (.Values.alertmanager.config) . | b64enc | quote }} 20 | {{- else }} 21 | alertmanager.yaml: {{ tpl (toYaml .Values.alertmanager.config) . | b64enc | quote }} 22 | {{- end }} 23 | {{- else }} 24 | alertmanager.yaml: {{ toYaml .Values.alertmanager.config | b64enc | quote }} 25 | {{- end }} 26 | {{- range $key, $val := .Values.alertmanager.templateFiles }} 27 | {{ $key }}: {{ $val | b64enc | quote }} 28 | {{- end }} 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/alertmanager/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 9 | app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-alertmanager 10 | app.kubernetes.io/component: alertmanager 11 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 12 | {{- if .Values.alertmanager.serviceAccount.annotations }} 13 | annotations: 14 | {{ toYaml .Values.alertmanager.serviceAccount.annotations | indent 4 }} 15 | {{- end }} 16 | automountServiceAccountToken: {{ .Values.alertmanager.serviceAccount.automountServiceAccountToken }} 17 | {{- if .Values.global.imagePullSecrets }} 18 | imagePullSecrets: 19 | {{ include "kube-prometheus-stack.imagePullSecrets" . | trim | indent 2}} 20 | {{- end }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/core-dns/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.coreDns.enabled .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-coredns 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-coredns 8 | jobLabel: coredns 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.coreDns.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.coreDns.service.targetPort }} 18 | selector: 19 | {{- if .Values.coreDns.service.selector }} 20 | {{ toYaml .Values.coreDns.service.selector | indent 4 }} 21 | {{- else}} 22 | k8s-app: kube-dns 23 | {{- end}} 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/kube-controller-manager/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.endpoints .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager 8 | k8s-app: kube-controller-manager 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeControllerManager.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | {{- $kubeControllerManagerDefaultInsecurePort := 10252 }} 19 | {{- $kubeControllerManagerDefaultSecurePort := 10257 }} 20 | port: {{ include "kube-prometheus-stack.kubeControllerManager.insecureScrape" (list . $kubeControllerManagerDefaultInsecurePort $kubeControllerManagerDefaultSecurePort .Values.kubeControllerManager.service.port) }} 21 | protocol: TCP 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/kube-dns/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeDns.enabled .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-dns 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-dns 8 | jobLabel: kube-dns 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics-dnsmasq 15 | port: {{ .Values.kubeDns.service.dnsmasq.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeDns.service.dnsmasq.targetPort }} 18 | - name: http-metrics-skydns 19 | port: {{ .Values.kubeDns.service.skydns.port }} 20 | protocol: TCP 21 | targetPort: {{ .Values.kubeDns.service.skydns.targetPort }} 22 | selector: 23 | {{- if .Values.kubeDns.service.selector }} 24 | {{ toYaml .Values.kubeDns.service.selector | indent 4 }} 25 | {{- else}} 26 | k8s-app: kube-dns 27 | {{- end}} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/kube-etcd/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.endpoints .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd 8 | k8s-app: etcd-server 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeEtcd.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeEtcd.service.port }} 19 | protocol: TCP 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/kube-etcd/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.service.enabled .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd 8 | jobLabel: kube-etcd 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.kubeEtcd.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeEtcd.service.targetPort }} 18 | {{- if .Values.kubeEtcd.endpoints }}{{- else }} 19 | selector: 20 | {{- if .Values.kubeEtcd.service.selector }} 21 | {{ toYaml .Values.kubeEtcd.service.selector | indent 4 }} 22 | {{- else}} 23 | component: etcd 24 | {{- end}} 25 | {{- end }} 26 | type: ClusterIP 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/kube-proxy/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeProxy.enabled .Values.kubeProxy.endpoints .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy 8 | k8s-app: kube-proxy 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeProxy.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeProxy.service.port }} 19 | protocol: TCP 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/kube-proxy/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeProxy.enabled .Values.kubeProxy.service.enabled .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy 8 | jobLabel: kube-proxy 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.kubeProxy.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeProxy.service.targetPort }} 18 | {{- if .Values.kubeProxy.endpoints }}{{- else }} 19 | selector: 20 | {{- if .Values.kubeProxy.service.selector }} 21 | {{ toYaml .Values.kubeProxy.service.selector | indent 4 }} 22 | {{- else}} 23 | k8s-app: kube-proxy 24 | {{- end}} 25 | {{- end }} 26 | type: ClusterIP 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/kube-scheduler/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.endpoints .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler 8 | k8s-app: kube-scheduler 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeScheduler.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | {{- $kubeSchedulerDefaultInsecurePort := 10251 }} 19 | {{- $kubeSchedulerDefaultSecurePort := 10259 }} 20 | port: {{ include "kube-prometheus-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.port) }} 21 | protocol: TCP 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/exporters/kube-scheduler/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.service.enabled .Values.kubernetesServiceMonitors.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler 8 | jobLabel: kube-scheduler 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | {{- $kubeSchedulerDefaultInsecurePort := 10251 }} 16 | {{- $kubeSchedulerDefaultSecurePort := 10259 }} 17 | port: {{ include "kube-prometheus-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.port) }} 18 | protocol: TCP 19 | targetPort: {{ include "kube-prometheus-stack.kubeScheduler.insecureScrape" (list . $kubeSchedulerDefaultInsecurePort $kubeSchedulerDefaultSecurePort .Values.kubeScheduler.service.targetPort) }} 20 | {{- if .Values.kubeScheduler.endpoints }}{{- else }} 21 | selector: 22 | {{- if .Values.kubeScheduler.service.selector }} 23 | {{ toYaml .Values.kubeScheduler.service.selector | indent 4 }} 24 | {{- else}} 25 | component: kube-scheduler 26 | {{- end}} 27 | {{- end }} 28 | type: ClusterIP 29 | {{- end -}} 30 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/extra-objects.yaml: -------------------------------------------------------------------------------- 1 | {{ range .Values.extraManifests }} 2 | --- 3 | {{ tpl (toYaml .) $ }} 4 | {{ end }} 5 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/grafana/configmap-dashboards.yaml: -------------------------------------------------------------------------------- 1 | {{- if or (and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled) .Values.grafana.forceDeployDashboards }} 2 | {{- $files := .Files.Glob "dashboards-1.14/*.json" }} 3 | {{- if $files }} 4 | apiVersion: v1 5 | kind: ConfigMapList 6 | items: 7 | {{- range $path, $fileContents := $files }} 8 | {{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} 9 | - apiVersion: v1 10 | kind: ConfigMap 11 | metadata: 12 | name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) $dashboardName | trunc 63 | trimSuffix "-" }} 13 | namespace: {{ template "kube-prometheus-stack-grafana.namespace" $ }} 14 | labels: 15 | {{- if $.Values.grafana.sidecar.dashboards.label }} 16 | {{ $.Values.grafana.sidecar.dashboards.label }}: {{ ternary $.Values.grafana.sidecar.dashboards.labelValue "1" (not (empty $.Values.grafana.sidecar.dashboards.labelValue)) | quote }} 17 | {{- end }} 18 | app: {{ template "kube-prometheus-stack.name" $ }}-grafana 19 | {{ include "kube-prometheus-stack.labels" $ | indent 6 }} 20 | data: 21 | {{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | annotations: 7 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 8 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 9 | labels: 10 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 11 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 19 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/networkpolicy-createSecret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.networkPolicy.enabled (eq .Values.prometheusOperator.networkPolicy.flavor "kubernetes") }} 2 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 3 | apiVersion: networking.k8s.io/v1 4 | kind: NetworkPolicy 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission-create 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | annotations: 9 | "helm.sh/hook": pre-install,pre-upgrade 10 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 11 | ## Ensure this is run before the job 12 | "helm.sh/hook-weight": "-5" 13 | {{- with .Values.prometheusOperator.admissionWebhooks.annotations }} 14 | {{ toYaml . | indent 4 }} 15 | {{- end }} 16 | labels: 17 | app: {{ template "kube-prometheus-stack.name" $ }}-admission-create 18 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 19 | spec: 20 | podSelector: 21 | matchLabels: 22 | app: {{ template "kube-prometheus-stack.name" $ }}-admission-create 23 | {{- include "kube-prometheus-stack.labels" $ | indent 6 }} 24 | egress: 25 | - {} 26 | policyTypes: 27 | - Egress 28 | {{- end }} 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/networkpolicy-patchWebhook.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.networkPolicy.enabled (eq .Values.prometheusOperator.networkPolicy.flavor "kubernetes") }} 2 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 3 | apiVersion: networking.k8s.io/v1 4 | kind: NetworkPolicy 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission-patch 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | annotations: 9 | "helm.sh/hook": post-install,post-upgrade 10 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 11 | ## Ensure this is run before the job 12 | "helm.sh/hook-weight": "-5" 13 | {{- with .Values.prometheusOperator.admissionWebhooks.patch.annotations }} 14 | {{ toYaml . | indent 4 }} 15 | {{- end }} 16 | labels: 17 | app: {{ template "kube-prometheus-stack.name" $ }}-admission-patch 18 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 19 | spec: 20 | podSelector: 21 | matchLabels: 22 | app: {{ template "kube-prometheus-stack.name" $ }}-admission-patch 23 | {{- include "kube-prometheus-stack.labels" $ | indent 6 }} 24 | egress: 25 | - {} 26 | policyTypes: 27 | - Egress 28 | {{- end }} 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 9 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 10 | labels: 11 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 12 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - secrets 18 | verbs: 19 | - get 20 | - create 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 9 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 10 | labels: 11 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 12 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: Role 16 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 17 | subjects: 18 | - kind: ServiceAccount 19 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 20 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 9 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 10 | labels: 11 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 12 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 13 | {{- if .Values.global.imagePullSecrets }} 14 | imagePullSecrets: 15 | {{ include "kube-prometheus-stack.imagePullSecrets" . | trim | indent 2 }} 16 | {{- end }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/ciliumnetworkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.networkPolicy.enabled (eq .Values.prometheusOperator.networkPolicy.flavor "cilium") }} 2 | apiVersion: cilium.io/v2 3 | kind: CiliumNetworkPolicy 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-operator 9 | {{- include "kube-prometheus-stack.labels" . | nindent 4 }} 10 | spec: 11 | endpointSelector: 12 | matchLabels: 13 | app: {{ template "kube-prometheus-stack.name" . }}-operator 14 | {{- include "kube-prometheus-stack.labels" $ | nindent 6 }} 15 | egress: 16 | {{- if and .Values.prometheusOperator.networkPolicy.cilium .Values.prometheusOperator.networkPolicy.cilium.egress }} 17 | {{ toYaml .Values.prometheusOperator.networkPolicy.cilium.egress | nindent 6 }} 18 | {{- else }} 19 | - toEntities: 20 | - kube-apiserver 21 | {{- end }} 22 | ingress: 23 | - toPorts: 24 | - ports: 25 | {{- if .Values.prometheusOperator.tls.enabled }} 26 | - port: {{ .Values.prometheusOperator.tls.internalPort | quote }} 27 | {{- else }} 28 | - port: "8080" 29 | {{- end }} 30 | protocol: "TCP" 31 | rules: 32 | http: 33 | - method: "GET" 34 | path: "/metrics" 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-operator 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} 16 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.networkPolicy.enabled (eq .Values.prometheusOperator.networkPolicy.flavor "kubernetes") }} 2 | apiVersion: {{ template "kube-prometheus-stack.prometheus.networkPolicy.apiVersion" . }} 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-operator 9 | {{- include "kube-prometheus-stack.labels" . | nindent 4 }} 10 | spec: 11 | egress: 12 | - {} 13 | ingress: 14 | - ports: 15 | {{- if .Values.prometheusOperator.tls.enabled }} 16 | - port: {{ .Values.prometheusOperator.tls.internalPort }} 17 | {{- else }} 18 | - port: 8080 19 | {{- end }} 20 | policyTypes: 21 | - Egress 22 | - Ingress 23 | podSelector: 24 | matchLabels: 25 | app: {{ template "kube-prometheus-stack.name" . }}-operator 26 | release: {{ $.Release.Name | quote }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} 3 | kind: ClusterRole 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-operator 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | rules: 11 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 12 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 13 | - apiGroups: ['policy'] 14 | {{- else }} 15 | - apiGroups: ['extensions'] 16 | {{- end }} 17 | resources: ['podsecuritypolicies'] 18 | verbs: ['use'] 19 | resourceNames: 20 | - {{ template "kube-prometheus-stack.fullname" . }}-operator 21 | {{- end }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} 3 | kind: ClusterRoleBinding 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-operator 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp 14 | subjects: 15 | - kind: ServiceAccount 16 | name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} 17 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 18 | {{- end }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus-operator/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-operator 9 | app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-prometheus-operator 10 | app.kubernetes.io/component: prometheus-operator 11 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 12 | {{- if .Values.global.imagePullSecrets }} 13 | imagePullSecrets: 14 | {{ include "kube-prometheus-stack.imagePullSecrets" . | trim | indent 2 }} 15 | {{- end }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/_rules.tpl: -------------------------------------------------------------------------------- 1 | {{- /* 2 | Generated file. Do not change in-place! In order to change this file first read following link: 3 | https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack 4 | */ -}} 5 | {{- define "rules.names" }} 6 | rules: 7 | - "alertmanager.rules" 8 | - "config-reloaders" 9 | - "etcd" 10 | - "general.rules" 11 | - "k8s.rules" 12 | - "kube-apiserver-availability.rules" 13 | - "kube-apiserver-burnrate.rules" 14 | - "kube-apiserver-histogram.rules" 15 | - "kube-apiserver-slos" 16 | - "kube-prometheus-general.rules" 17 | - "kube-prometheus-node-recording.rules" 18 | - "kube-scheduler.rules" 19 | - "kube-state-metrics" 20 | - "kubelet.rules" 21 | - "kubernetes-apps" 22 | - "kubernetes-resources" 23 | - "kubernetes-storage" 24 | - "kubernetes-system" 25 | - "kubernetes-system-kube-proxy" 26 | - "kubernetes-system-apiserver" 27 | - "kubernetes-system-kubelet" 28 | - "kubernetes-system-controller-manager" 29 | - "kubernetes-system-scheduler" 30 | - "node-exporter.rules" 31 | - "node-exporter" 32 | - "node.rules" 33 | - "node-network" 34 | - "prometheus-operator" 35 | - "prometheus" 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/additionalAlertRelabelConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-am-relabel-confg 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | {{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} 8 | annotations: 9 | {{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus-am-relabel-confg 13 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 14 | data: 15 | additional-alert-relabel-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs | b64enc | quote }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/additionalAlertmanagerConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-am-confg 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | {{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} 8 | annotations: 9 | {{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus-am-confg 13 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 14 | data: 15 | additional-alertmanager-configs.yaml: {{ tpl (toYaml .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs) . | b64enc | quote }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/additionalScrapeConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalScrapeConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-scrape-confg 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | {{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} 8 | annotations: 9 | {{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus-scrape-confg 13 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 14 | data: 15 | {{- if eq ( typeOf .Values.prometheus.prometheusSpec.additionalScrapeConfigs ) "string" }} 16 | additional-scrape-configs.yaml: {{ tpl .Values.prometheus.prometheusSpec.additionalScrapeConfigs $ | b64enc | quote }} 17 | {{- else }} 18 | additional-scrape-configs.yaml: {{ tpl (toYaml .Values.prometheus.prometheusSpec.additionalScrapeConfigs) $ | b64enc | quote }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/ciliumnetworkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.networkPolicy.enabled (eq .Values.prometheus.networkPolicy.flavor "cilium") }} 2 | apiVersion: cilium.io/v2 3 | kind: CiliumNetworkPolicy 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 9 | {{- include "kube-prometheus-stack.labels" . | nindent 4 }} 10 | spec: 11 | endpointSelector: 12 | {{- if .Values.prometheus.networkPolicy.cilium.endpointSelector }} 13 | {{- toYaml .Values.prometheus.networkPolicy.cilium.endpointSelector | nindent 4 }} 14 | {{- else }} 15 | matchExpressions: 16 | - {key: app.kubernetes.io/name, operator: In, values: [prometheus]} 17 | - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]} 18 | {{- end }} 19 | {{- if and .Values.prometheus.networkPolicy.cilium .Values.prometheus.networkPolicy.cilium.egress }} 20 | egress: 21 | {{ toYaml .Values.prometheus.networkPolicy.cilium.egress | nindent 4 }} 22 | {{- end }} 23 | {{- if and .Values.prometheus.networkPolicy.cilium .Values.prometheus.networkPolicy.cilium.ingress }} 24 | ingress: 25 | {{ toYaml .Values.prometheus.networkPolicy.cilium.ingress | nindent 4 }} 26 | {{- end }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | rules: 10 | # This permission are not in the kube-prometheus repo 11 | # they're grabbed from https://github.com/prometheus/prometheus/blob/master/documentation/examples/rbac-setup.yml 12 | - apiGroups: [""] 13 | resources: 14 | - nodes 15 | - nodes/metrics 16 | - services 17 | - endpoints 18 | - pods 19 | verbs: ["get", "list", "watch"] 20 | - apiGroups: 21 | - "networking.k8s.io" 22 | resources: 23 | - ingresses 24 | verbs: ["get", "list", "watch"] 25 | - nonResourceURLs: ["/metrics", "/metrics/cadvisor"] 26 | verbs: ["get"] 27 | {{- if .Values.prometheus.additionalRulesForClusterRole }} 28 | {{ toYaml .Values.prometheus.additionalRulesForClusterRole | indent 0 }} 29 | {{- end }} 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} 16 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/csi-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.prometheusSpec.thanos .Values.prometheus.prometheusSpec.thanos.secretProviderClass }} 2 | --- 3 | apiVersion: secrets-store.csi.x-k8s.io/v1alpha1 4 | kind: SecretProviderClass 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | labels: 9 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 10 | spec: 11 | {{ toYaml .Values.prometheus.prometheusSpec.thanos.secretProviderClass | indent 2 }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/extrasecret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheus.extraSecret.data -}} 2 | {{- $secretName := printf "prometheus-%s-extra" (include "kube-prometheus-stack.fullname" . ) -}} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ default $secretName .Values.prometheus.extraSecret.name }} 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | {{- if .Values.prometheus.extraSecret.annotations }} 9 | annotations: 10 | {{ toYaml .Values.prometheus.extraSecret.annotations | indent 4 }} 11 | {{- end }} 12 | labels: 13 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 14 | app.kubernetes.io/component: prometheus 15 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 16 | data: 17 | {{- range $key, $val := .Values.prometheus.extraSecret.data }} 18 | {{ $key }}: {{ $val | b64enc | quote }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.networkPolicy.enabled (eq .Values.prometheus.networkPolicy.flavor "kubernetes") }} 2 | apiVersion: {{ template "kube-prometheus-stack.prometheus.networkPolicy.apiVersion" . }} 3 | kind: NetworkPolicy 4 | metadata: 5 | labels: 6 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 7 | {{- include "kube-prometheus-stack.labels" . | nindent 4 }} 8 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 9 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 10 | spec: 11 | {{- if .Values.prometheus.networkPolicy.egress }} 12 | egress: 13 | {{- toYaml .Values.prometheus.networkPolicy.egress | nindent 4 }} 14 | {{- end }} 15 | {{- if .Values.prometheus.networkPolicy.ingress }} 16 | ingress: 17 | {{- toYaml .Values.prometheus.networkPolicy.ingress | nindent 4 }} 18 | {{- end }} 19 | policyTypes: 20 | - Egress 21 | - Ingress 22 | podSelector: 23 | {{- if .Values.prometheus.networkPolicy.podSelector }} 24 | {{- toYaml .Values.prometheus.networkPolicy.podSelector | nindent 4 }} 25 | {{- else }} 26 | matchExpressions: 27 | - {key: app.kubernetes.io/name, operator: In, values: [prometheus]} 28 | - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.prometheus.crname" . }}]} 29 | {{- end }} 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.podDisruptionBudget.enabled }} 2 | apiVersion: {{ include "kube-prometheus-stack.pdb.apiVersion" . }} 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | {{- if .Values.prometheus.podDisruptionBudget.minAvailable }} 12 | minAvailable: {{ .Values.prometheus.podDisruptionBudget.minAvailable }} 13 | {{- end }} 14 | {{- if .Values.prometheus.podDisruptionBudget.maxUnavailable }} 15 | maxUnavailable: {{ .Values.prometheus.podDisruptionBudget.maxUnavailable }} 16 | {{- end }} 17 | selector: 18 | matchLabels: 19 | app.kubernetes.io/name: prometheus 20 | prometheus: {{ template "kube-prometheus-stack.prometheus.crname" . }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/podmonitors.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.additionalPodMonitors }} 2 | apiVersion: v1 3 | kind: List 4 | items: 5 | {{- range .Values.prometheus.additionalPodMonitors }} 6 | - apiVersion: monitoring.coreos.com/v1 7 | kind: PodMonitor 8 | metadata: 9 | name: {{ .name }} 10 | namespace: {{ template "kube-prometheus-stack.namespace" $ }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" $ }}-prometheus 13 | {{ include "kube-prometheus-stack.labels" $ | indent 8 }} 14 | {{- if .additionalLabels }} 15 | {{ toYaml .additionalLabels | indent 8 }} 16 | {{- end }} 17 | spec: 18 | {{- include "servicemonitor.scrapeLimits" . | nindent 6 }} 19 | podMetricsEndpoints: 20 | {{ toYaml .podMetricsEndpoints | indent 8 }} 21 | {{- if .jobLabel }} 22 | jobLabel: {{ .jobLabel }} 23 | {{- end }} 24 | {{- if .namespaceSelector }} 25 | namespaceSelector: 26 | {{ toYaml .namespaceSelector | indent 8 }} 27 | {{- end }} 28 | selector: 29 | {{ toYaml .selector | indent 8 }} 30 | {{- if .podTargetLabels }} 31 | podTargetLabels: 32 | {{ toYaml .podTargetLabels | indent 8 }} 33 | {{- end }} 34 | {{- if .sampleLimit }} 35 | sampleLimit: {{ .sampleLimit }} 36 | {{- end }} 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} 3 | kind: ClusterRole 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | rules: 11 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 12 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 13 | - apiGroups: ['policy'] 14 | {{- else }} 15 | - apiGroups: ['extensions'] 16 | {{- end }} 17 | resources: ['podsecuritypolicies'] 18 | verbs: ['use'] 19 | resourceNames: 20 | - {{ template "kube-prometheus-stack.fullname" . }}-prometheus 21 | {{- end }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp 14 | subjects: 15 | - kind: ServiceAccount 16 | name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} 17 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 18 | {{- end }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 9 | app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-prometheus 10 | app.kubernetes.io/component: prometheus 11 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 12 | {{- if .Values.prometheus.serviceAccount.annotations }} 13 | annotations: 14 | {{ toYaml .Values.prometheus.serviceAccount.annotations | indent 4 }} 15 | {{- end }} 16 | {{- if .Values.global.imagePullSecrets }} 17 | imagePullSecrets: 18 | {{ include "kube-prometheus-stack.imagePullSecrets" . | trim | indent 2 }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/prometheus/servicemonitors.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.additionalServiceMonitors }} 2 | apiVersion: v1 3 | kind: List 4 | items: 5 | {{- range .Values.prometheus.additionalServiceMonitors }} 6 | - apiVersion: monitoring.coreos.com/v1 7 | kind: ServiceMonitor 8 | metadata: 9 | name: {{ .name }} 10 | namespace: {{ template "kube-prometheus-stack.namespace" $ }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" $ }}-prometheus 13 | {{ include "kube-prometheus-stack.labels" $ | indent 8 }} 14 | {{- if .additionalLabels }} 15 | {{ toYaml .additionalLabels | indent 8 }} 16 | {{- end }} 17 | spec: 18 | {{- include "servicemonitor.scrapeLimits" . | nindent 6 }} 19 | endpoints: 20 | {{ toYaml .endpoints | indent 8 }} 21 | {{- if .jobLabel }} 22 | jobLabel: {{ .jobLabel }} 23 | {{- end }} 24 | {{- if .namespaceSelector }} 25 | namespaceSelector: 26 | {{ toYaml .namespaceSelector | indent 8 }} 27 | {{- end }} 28 | selector: 29 | {{ toYaml .selector | indent 8 }} 30 | {{- if .targetLabels }} 31 | targetLabels: 32 | {{ toYaml .targetLabels | indent 8 }} 33 | {{- end }} 34 | {{- if .podTargetLabels }} 35 | podTargetLabels: 36 | {{ toYaml .podTargetLabels | indent 8 }} 37 | {{- end }} 38 | {{- end }} 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/thanos-ruler/extrasecret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.thanosRuler.extraSecret.data -}} 2 | {{- $secretName := printf "%s-extra" (include "kube-prometheus-stack.thanosRuler.name" . ) -}} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ default $secretName .Values.thanosRuler.extraSecret.name }} 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | {{- if .Values.thanosRuler.extraSecret.annotations }} 9 | annotations: 10 | {{ toYaml .Values.thanosRuler.extraSecret.annotations | indent 4 }} 11 | {{- end }} 12 | labels: 13 | app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} 14 | app.kubernetes.io/component: thanos-ruler 15 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 16 | data: 17 | {{- range $key, $val := .Values.thanosRuler.extraSecret.data }} 18 | {{ $key }}: {{ $val | b64enc | quote }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/thanos-ruler/podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.thanosRuler.enabled .Values.thanosRuler.podDisruptionBudget.enabled }} 2 | apiVersion: {{ include "kube-prometheus-stack.pdb.apiVersion" . }} 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.thanosRuler.name" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | {{- if .Values.thanosRuler.podDisruptionBudget.minAvailable }} 12 | minAvailable: {{ .Values.thanosRuler.podDisruptionBudget.minAvailable }} 13 | {{- end }} 14 | {{- if .Values.thanosRuler.podDisruptionBudget.maxUnavailable }} 15 | maxUnavailable: {{ .Values.thanosRuler.podDisruptionBudget.maxUnavailable }} 16 | {{- end }} 17 | selector: 18 | matchLabels: 19 | app.kubernetes.io/name: thanos-ruler 20 | thanos-ruler: {{ template "kube-prometheus-stack.thanosRuler.name" . }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/kube-prometheus-stack/templates/thanos-ruler/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.thanosRuler.enabled .Values.thanosRuler.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.thanosRuler.serviceAccountName" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.thanosRuler.name" . }} 9 | app.kubernetes.io/name: {{ template "kube-prometheus-stack.thanosRuler.name" . }} 10 | app.kubernetes.io/component: thanos-ruler 11 | {{- include "kube-prometheus-stack.labels" . | indent 4 -}} 12 | {{- if .Values.thanosRuler.serviceAccount.annotations }} 13 | annotations: 14 | {{ toYaml .Values.thanosRuler.serviceAccount.annotations | indent 4 }} 15 | {{- end }} 16 | {{- if .Values.global.imagePullSecrets }} 17 | imagePullSecrets: 18 | {{ toYaml .Values.global.imagePullSecrets | indent 2 }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: v2.6.1 3 | description: 'Loki: like Prometheus, but for logs.' 4 | home: https://grafana.com/loki 5 | icon: https://raw.githubusercontent.com/grafana/loki/master/docs/sources/logo.png 6 | kubeVersion: ^1.10.0-0 7 | maintainers: 8 | - email: lokiproject@googlegroups.com 9 | name: Loki Maintainers 10 | name: loki-stack 11 | sources: 12 | - https://github.com/grafana/loki 13 | version: 2.9.10 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: v2.1.0 3 | deprecated: true 4 | description: Uses fluent-bit Loki go plugin for gathering logs and sending them to 5 | Loki 6 | home: https://grafana.com/loki 7 | icon: https://raw.githubusercontent.com/grafana/loki/master/docs/sources/logo.png 8 | kubeVersion: ^1.10.0-0 9 | maintainers: 10 | - email: lokiproject@googlegroups.com 11 | name: Loki Maintainers 12 | name: fluent-bit 13 | sources: 14 | - https://github.com/grafana/loki 15 | version: 2.5.0 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | !WARNING! !WARNING! !WARNING! !WARNING! !WARNING! 2 | 3 | Please use the official fluent-bit chart 4 | 5 | https://github.com/fluent/helm-charts 6 | 7 | !WARNING! !WARNING! !WARNING! !WARNING! !WARNING! 8 | 9 | Verify the application is working by running these commands: 10 | kubectl --namespace {{ .Release.Namespace }} port-forward daemonset/{{ include "fluent-bit-loki.fullname" . }} {{ .Values.config.port }} 11 | curl http://127.0.0.1:{{ .Values.config.port }}/api/v1/metrics/prometheus 12 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | labels: 6 | app: {{ template "fluent-bit-loki.name" . }} 7 | chart: {{ template "fluent-bit-loki.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | name: {{ template "fluent-bit-loki.fullname" . }}-clusterrole 11 | rules: 12 | - apiGroups: [""] # "" indicates the core API group 13 | resources: 14 | - namespaces 15 | - pods 16 | verbs: ["get", "watch", "list"] 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "fluent-bit-loki.fullname" . }}-clusterrolebinding 6 | labels: 7 | app: {{ template "fluent-bit-loki.name" . }} 8 | chart: {{ template "fluent-bit-loki.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | subjects: 12 | - kind: ServiceAccount 13 | name: {{ template "fluent-bit-loki.serviceAccountName" . }} 14 | namespace: {{ .Release.Namespace }} 15 | roleRef: 16 | kind: ClusterRole 17 | name: {{ template "fluent-bit-loki.fullname" . }}-clusterrole 18 | apiGroup: rbac.authorization.k8s.io 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} 3 | apiVersion: policy/v1beta1 4 | kind: PodSecurityPolicy 5 | metadata: 6 | name: {{ template "fluent-bit-loki.fullname" . }} 7 | labels: 8 | app: {{ template "fluent-bit-loki.name" . }} 9 | chart: {{ template "fluent-bit-loki.chart" . }} 10 | heritage: {{ .Release.Service }} 11 | release: {{ .Release.Name }} 12 | spec: 13 | privileged: false 14 | allowPrivilegeEscalation: false 15 | volumes: 16 | - 'secret' 17 | - 'configMap' 18 | - 'hostPath' 19 | - 'projected' 20 | - 'downwardAPI' 21 | hostNetwork: false 22 | hostIPC: false 23 | hostPID: false 24 | runAsUser: 25 | rule: 'RunAsAny' 26 | seLinux: 27 | rule: 'RunAsAny' 28 | supplementalGroups: 29 | rule: 'RunAsAny' 30 | fsGroup: 31 | rule: 'RunAsAny' 32 | readOnlyRootFilesystem: true 33 | requiredDropCapabilities: 34 | - ALL 35 | {{- end }} 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ template "fluent-bit-loki.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "fluent-bit-loki.name" . }} 9 | chart: {{ template "fluent-bit-loki.chart" . }} 10 | heritage: {{ .Release.Service }} 11 | release: {{ .Release.Name }} 12 | {{- if .Values.rbac.pspEnabled }} 13 | rules: 14 | - apiGroups: ['extensions'] 15 | resources: ['podsecuritypolicies'] 16 | verbs: ['use'] 17 | resourceNames: [{{ template "fluent-bit-loki.fullname" . }}] 18 | {{- end }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "fluent-bit-loki.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "fluent-bit-loki.name" . }} 9 | chart: {{ template "fluent-bit-loki.chart" . }} 10 | heritage: {{ .Release.Service }} 11 | release: {{ .Release.Name }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: {{ template "fluent-bit-loki.fullname" . }} 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ template "fluent-bit-loki.serviceAccountName" . }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/service-headless.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceMonitor.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "fluent-bit-loki.fullname" . }}-headless 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "fluent-bit-loki.name" . }} 9 | chart: {{ template "fluent-bit-loki.chart" . }} 10 | release: {{ .Release.Name }} 11 | heritage: {{ .Release.Service }} 12 | spec: 13 | clusterIP: None 14 | ports: 15 | - port: {{ .Values.config.port }} 16 | protocol: TCP 17 | name: http-metrics 18 | targetPort: http-metrics 19 | selector: 20 | app: {{ template "fluent-bit-loki.name" . }} 21 | release: {{ .Release.Name }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app: {{ template "fluent-bit-loki.name" . }} 7 | chart: {{ template "fluent-bit-loki.chart" . }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: {{ template "fluent-bit-loki.serviceAccountName" . }} 11 | namespace: {{ .Release.Namespace }} 12 | automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/fluent-bit/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "fluent-bit-loki.fullname" . }} 6 | labels: 7 | app: {{ template "fluent-bit-loki.name" . }} 8 | chart: {{ template "fluent-bit-loki.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | {{- if .Values.serviceMonitor.additionalLabels }} 12 | {{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }} 13 | {{- end }} 14 | {{- if .Values.serviceMonitor.annotations }} 15 | annotations: 16 | {{ toYaml .Values.serviceMonitor.annotations | indent 4 }} 17 | {{- end }} 18 | spec: 19 | selector: 20 | matchLabels: 21 | app: {{ template "fluent-bit-loki.name" . }} 22 | release: {{ .Release.Name | quote }} 23 | namespaceSelector: 24 | matchNames: 25 | - {{ .Release.Namespace | quote }} 26 | endpoints: 27 | - port: http-metrics 28 | path: /api/v1/metrics/prometheus 29 | {{- if .Values.serviceMonitor.interval }} 30 | interval: {{ .Values.serviceMonitor.interval }} 31 | {{- end }} 32 | {{- if .Values.serviceMonitor.scrapeTimeout }} 33 | scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: v2.6.1 3 | description: 'Loki: like Prometheus, but for logs.' 4 | home: https://grafana.com/loki 5 | icon: https://raw.githubusercontent.com/grafana/loki/master/docs/sources/logo.png 6 | kubeVersion: ^1.10.0-0 7 | maintainers: 8 | - email: lokiproject@googlegroups.com 9 | name: Loki Maintainers 10 | name: loki 11 | sources: 12 | - https://github.com/grafana/loki 13 | version: 2.16.0 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Verify the application is working by running these commands: 2 | kubectl --namespace {{ .Release.Namespace }} port-forward service/{{ include "loki.fullname" . }} {{ .Values.service.port }} 3 | curl http://127.0.0.1:{{ .Values.service.port }}/api/prom/label 4 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/configmap-alert.yaml: -------------------------------------------------------------------------------- 1 | {{- if or (.Values.useExistingAlertingGroup.enabled) (gt (len .Values.alerting_groups) 0) }} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: {{ template "loki.fullname" . }}-alerting-rules 7 | namespace: {{ .Release.Namespace }} 8 | labels: 9 | {{- include "loki.labels" . | nindent 4 }} 10 | data: 11 | {{ template "loki.fullname" . }}-alerting-rules.yaml: |- 12 | groups: 13 | {{- toYaml .Values.alerting_groups | nindent 6 }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.networkPolicy.enabled }} 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{ template "loki.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "loki.labels" . | nindent 4 }} 9 | spec: 10 | podSelector: 11 | matchLabels: 12 | name: {{ template "loki.fullname" . }} 13 | app: {{ template "loki.name" . }} 14 | release: {{ .Release.Name }} 15 | ingress: 16 | - from: 17 | - podSelector: 18 | matchLabels: 19 | app: {{ template "client.name" . }} 20 | release: {{ .Release.Name }} 21 | - ports: 22 | - port: {{ .Values.service.port }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget -}} 2 | apiVersion: {{ include "loki.podDisruptionBudget.apiVersion" . }} 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "loki.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "loki.labels" . | nindent 4 }} 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: {{ template "loki.name" . }} 13 | {{ toYaml .Values.podDisruptionBudget | indent 2 }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | {{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} 3 | apiVersion: policy/v1beta1 4 | kind: PodSecurityPolicy 5 | metadata: 6 | name: {{ template "loki.fullname" . }} 7 | labels: 8 | {{- include "loki.labels" . | nindent 4 }} 9 | spec: 10 | privileged: false 11 | allowPrivilegeEscalation: false 12 | volumes: 13 | - 'configMap' 14 | - 'emptyDir' 15 | - 'persistentVolumeClaim' 16 | - 'secret' 17 | - 'projected' 18 | - 'downwardAPI' 19 | hostNetwork: false 20 | hostIPC: false 21 | hostPID: false 22 | runAsUser: 23 | rule: 'MustRunAsNonRoot' 24 | seLinux: 25 | rule: 'RunAsAny' 26 | supplementalGroups: 27 | rule: 'MustRunAs' 28 | ranges: 29 | - min: 1 30 | max: 65535 31 | fsGroup: 32 | rule: 'MustRunAs' 33 | ranges: 34 | - min: 1 35 | max: 65535 36 | readOnlyRootFilesystem: true 37 | requiredDropCapabilities: 38 | - ALL 39 | {{- end }} 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/prometheusrule.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.serviceMonitor.enabled .Values.serviceMonitor.prometheusRule.enabled -}} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: {{ template "loki.fullname" . }} 6 | {{- if .Values.serviceMonitor.prometheusRule.namespace }} 7 | namespace: {{ .Values.serviceMonitor.prometheusRule.namespace | quote }} 8 | {{- end }} 9 | labels: 10 | {{- include "loki.labels" . | nindent 4 }} 11 | {{- if .Values.serviceMonitor.prometheusRule.additionalLabels }} 12 | {{- toYaml .Values.serviceMonitor.prometheusRule.additionalLabels | nindent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if .Values.serviceMonitor.prometheusRule.rules }} 16 | groups: 17 | - name: {{ template "loki.fullname" . }} 18 | rules: {{- toYaml .Values.serviceMonitor.prometheusRule.rules | nindent 4 }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ template "loki.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "loki.labels" . | nindent 4 }} 9 | {{- if .Values.rbac.pspEnabled }} 10 | rules: 11 | - apiGroups: ['extensions'] 12 | resources: ['podsecuritypolicies'] 13 | verbs: ['use'] 14 | resourceNames: [{{ template "loki.fullname" . }}] 15 | {{- end }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "loki.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "loki.labels" . | nindent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: {{ template "loki.fullname" . }} 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "loki.serviceAccountName" . }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.config.existingSecret -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "loki.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "loki.labels" . | nindent 4 }} 9 | data: 10 | loki.yaml: {{ tpl (toYaml .Values.config) . | b64enc}} 11 | {{- end -}} 12 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/service-headless.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "loki.fullname" . }}-headless 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{- include "loki.labels" . | nindent 4 }} 8 | {{- with .Values.service.labels }} 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | variant: headless 12 | spec: 13 | clusterIP: None 14 | ports: 15 | - port: {{ .Values.service.port }} 16 | protocol: TCP 17 | name: http-metrics 18 | targetPort: {{ .Values.service.targetPort }} 19 | {{- if .Values.extraPorts }} 20 | {{ toYaml .Values.extraPorts | indent 4}} 21 | {{- end }} 22 | selector: 23 | app: {{ template "loki.name" . }} 24 | release: {{ .Release.Name }} 25 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/service-memberlist.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.config.memberlist -}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ include "loki.fullname" . }}-memberlist 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "loki.labels" . | nindent 4 }} 9 | spec: 10 | type: ClusterIP 11 | clusterIP: None 12 | publishNotReadyAddresses: true 13 | ports: 14 | - name: http 15 | port: {{ .Values.config.memberlist.bind_port | default 7946 }} 16 | targetPort: memberlist-port 17 | protocol: TCP 18 | selector: 19 | app: {{ template "loki.name" . }} 20 | release: {{ .Release.Name }} 21 | {{- end -}} 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "loki.labels" . | nindent 4 }} 7 | annotations: 8 | {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} 9 | name: {{ template "loki.serviceAccountName" . }} 10 | namespace: {{ .Release.Namespace }} 11 | automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/charts/loki/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "loki.fullname" . }} 6 | labels: 7 | {{- include "loki.labels" . | nindent 4 }} 8 | {{- if .Values.serviceMonitor.additionalLabels }} 9 | {{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }} 10 | {{- end }} 11 | {{- if .Values.serviceMonitor.annotations }} 12 | annotations: 13 | {{ toYaml .Values.serviceMonitor.annotations | indent 4 }} 14 | {{- end }} 15 | spec: 16 | selector: 17 | matchLabels: 18 | app: {{ template "loki.name" . }} 19 | release: {{ .Release.Name | quote }} 20 | variant: headless 21 | namespaceSelector: 22 | matchNames: 23 | - {{ .Release.Namespace | quote }} 24 | endpoints: 25 | - port: http-metrics 26 | {{- if .Values.serviceMonitor.interval }} 27 | interval: {{ .Values.serviceMonitor.interval }} 28 | {{- end }} 29 | {{- if .Values.serviceMonitor.scrapeTimeout }} 30 | scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} 31 | {{- end }} 32 | {{- if .Values.serviceMonitor.path }} 33 | path: {{ .Values.serviceMonitor.path }} 34 | {{- end }} 35 | {{- with .Values.serviceMonitor.scheme }} 36 | scheme: {{ . }} 37 | {{- end }} 38 | {{- with .Values.serviceMonitor.tlsConfig }} 39 | tlsConfig: 40 | {{- toYaml . | nindent 6 }} 41 | {{- end }} 42 | {{- end }} 43 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | The Loki stack has been deployed to your cluster. Loki can now be added as a datasource in Grafana. 2 | 3 | See http://docs.grafana.org/features/datasources/loki/ for more detail. 4 | -------------------------------------------------------------------------------- /helm/monitor/charts/loki-stack/templates/tests/loki-test-pod.yaml: -------------------------------------------------------------------------------- 1 | {{- if (and .Values.test_pod.enabled .Values.loki.enabled) }} 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | annotations: 6 | "helm.sh/hook": test-success 7 | labels: 8 | app: {{ template "loki-stack.name" . }} 9 | chart: {{ template "loki-stack.chart" . }} 10 | release: {{ .Release.Name }} 11 | heritage: {{ .Release.Service }} 12 | name: {{ template "loki-stack.fullname" . }}-test 13 | spec: 14 | containers: 15 | - name: test 16 | image: "{{ .Values.test_pod.image }}" 17 | imagePullPolicy: "{{ .Values.test_pod.imagePullPolicy}}" 18 | args: 19 | - /var/lib/loki/test.sh 20 | env: 21 | - name: LOKI_SERVICE 22 | value: {{ template "loki.serviceName" . }} 23 | - name: LOKI_PORT 24 | value: "{{ .Values.loki.service.port }}" 25 | {{- with .Values.proxy.http_proxy }} 26 | - name: HTTP_PROXY 27 | value: "{{ . }}" 28 | {{- end }} 29 | {{- with .Values.proxy.https_proxy }} 30 | - name: HTTPS_PROXY 31 | value: "{{ . }}" 32 | {{- end }} 33 | {{- with .Values.proxy.no_proxy }} 34 | - name: NO_PROXY 35 | value: "{{ . }}" 36 | {{- end }} 37 | volumeMounts: 38 | - name: tests 39 | mountPath: /var/lib/loki 40 | restartPolicy: Never 41 | volumes: 42 | - name: tests 43 | configMap: 44 | name: {{ template "loki-stack.fullname" . }}-test 45 | {{- end }} 46 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | 25 | # Ignore unittest 26 | tests/ 27 | */__snapshot__/* 28 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Collector Chart Contributing Guide 2 | 3 | All changes to the chart require a bump to the version in `chart.yaml`. See the [Contributing Guide](https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/CONTRIBUTING.md#versioning) for our versioning requirements. 4 | 5 | Once the chart version is bumped, the examples must be regenerated. You can regenerate examples by running `make generate-examples CHARTS=opentelemetry-collector`. 6 | 7 | ## Bumping Default Collector Version 8 | 9 | 1. Increase the minor version of the chart by one and set the patch version to zero. 10 | 2. Update the chart's `appVersion` to match the new collector version. This version will be used as the image tag by default. 11 | 3. Review the corresponding release notes in [Collector Core](https://github.com/open-telemetry/opentelemetry-collector/releases), [Collector Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases), and [Collector Releases](https://github.com/open-telemetry/opentelemetry-collector-releases/releases). If any changes affect the helm charts, adjust the helm chart accordingly. 12 | 4. Run `make generate-examples CHARTS=opentelemetry-collector`. 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: opentelemetry-collector 3 | version: 0.92.0 4 | description: OpenTelemetry Collector Helm chart for Kubernetes 5 | type: application 6 | home: https://opentelemetry.io/ 7 | sources: 8 | - https://github.com/open-telemetry/opentelemetry-collector 9 | - https://github.com/open-telemetry/opentelemetry-collector-contrib 10 | maintainers: 11 | - name: dmitryax 12 | - name: TylerHelmuth 13 | icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png 14 | appVersion: 0.101.0 15 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/GOMEMLIMIT-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | useGOMEMLIMIT: true 10 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/clusterrole-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | clusterRole: 10 | create: true 11 | name: "testing-clusterrole" 12 | rules: 13 | - apiGroups: 14 | - '' 15 | resources: 16 | - 'pods' 17 | - 'nodes' 18 | verbs: 19 | - 'get' 20 | - 'list' 21 | - 'watch' 22 | clusterRoleBinding: 23 | name: "testing-clusterrolebinding" 24 | resources: 25 | limits: 26 | cpu: 100m 27 | memory: 200M 28 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/config-override-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | config: 10 | receivers: 11 | jaeger: null 12 | otlp: null 13 | zipkin: null 14 | hostmetrics: 15 | scrapers: 16 | cpu: 17 | disk: 18 | filesystem: 19 | service: 20 | pipelines: 21 | metrics: 22 | receivers: 23 | - prometheus 24 | - hostmetrics 25 | traces: null 26 | logs: null 27 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/daemonset-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | resources: 10 | limits: 11 | cpu: 100m 12 | memory: 200M 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/deployment-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | test: templated-value 3 | 4 | mode: deployment 5 | 6 | image: 7 | repository: "otel/opentelemetry-collector-k8s" 8 | 9 | command: 10 | name: "otelcol-k8s" 11 | 12 | resources: 13 | limits: 14 | cpu: 100m 15 | memory: 200M 16 | 17 | # Tests `tpl` function reference used in pod labels and 18 | # ingress.hosts[*] 19 | podLabels: 20 | testLabel: "{{ .Values.global.test }}" 21 | 22 | ingress: 23 | enabled: true 24 | hosts: 25 | - host: "{{ .Values.global.test }}" 26 | paths: 27 | - path: / 28 | pathType: Prefix 29 | port: 4318 30 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/disabling-protocols-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | ports: 10 | jaeger-compact: 11 | enabled: false 12 | jaeger-thrift: 13 | enabled: false 14 | jaeger-grpc: 15 | enabled: false 16 | zipkin: 17 | enabled: false 18 | resources: 19 | limits: 20 | cpu: 100m 21 | memory: 200M 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/hpa-deployment-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | autoscaling: 10 | enabled: true 11 | minReplicas: 1 12 | maxReplicas: 10 13 | behavior: {} 14 | targetCPUUtilizationPercentage: 80 15 | targetMemoryUtilizationPercentage: 80 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/hpa-statefulset-values.yaml: -------------------------------------------------------------------------------- 1 | mode: statefulset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | autoscaling: 10 | enabled: true 11 | minReplicas: 1 12 | maxReplicas: 10 13 | behavior: {} 14 | targetCPUUtilizationPercentage: 80 15 | targetMemoryUtilizationPercentage: 80 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/multiple-ingress-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | resources: 10 | limits: 11 | cpu: 100m 12 | memory: 200M 13 | 14 | ingress: 15 | enabled: true 16 | 17 | ingressClassName: nginx 18 | annotations: 19 | test.io/collector: default 20 | hosts: 21 | - host: defaultcollector.example.com 22 | paths: 23 | - path: / 24 | pathType: Prefix 25 | port: 4318 26 | 27 | additionalIngresses: 28 | - name: additional-basic 29 | hosts: 30 | - host: additional-basic.example.com 31 | paths: 32 | - path: / 33 | pathType: Prefix 34 | port: 4318 35 | 36 | - name: additional-advanced 37 | ingressClassName: nginx 38 | annotations: 39 | test.io/ingress: additional-advanced 40 | hosts: 41 | - host: additional-advanced.example.com 42 | paths: 43 | - path: / 44 | pathType: Exact 45 | port: 4318 46 | tls: 47 | - secretName: somesecret 48 | hosts: 49 | - additional-advanced.example.com 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/networkpolicy-override-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | resources: 10 | limits: 11 | cpu: 100m 12 | memory: 200M 13 | 14 | networkPolicy: 15 | enabled: true 16 | 17 | allowIngressFrom: 18 | - namespaceSelector: {} 19 | - ipBlock: 20 | cidr: 127.0.0.1/32 21 | 22 | extraIngressRules: 23 | - ports: 24 | - port: metrics 25 | protocol: TCP 26 | from: 27 | - ipBlock: 28 | cidr: 127.0.0.1/32 29 | 30 | egressRules: 31 | - to: 32 | - podSelector: 33 | matchLabels: 34 | app: jaeger 35 | ports: 36 | - port: 4317 37 | protocol: TCP 38 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/networkpolicy-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | resources: 10 | limits: 11 | cpu: 100m 12 | memory: 200M 13 | 14 | networkPolicy: 15 | enabled: true 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/preset-clustermetrics-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | clusterMetrics: 11 | enabled: true 12 | 13 | resources: 14 | limits: 15 | cpu: 100m 16 | memory: 200M 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/preset-hostmetrics-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | hostMetrics: 11 | enabled: true 12 | 13 | resources: 14 | limits: 15 | cpu: 100m 16 | memory: 200M 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/preset-k8sevents-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | kubernetesEvents: 11 | enabled: true 12 | 13 | resources: 14 | limits: 15 | cpu: 100m 16 | memory: 200M 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/preset-kubeletmetrics-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | kubeletMetrics: 11 | enabled: true 12 | 13 | resources: 14 | limits: 15 | cpu: 100m 16 | memory: 200M 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/preset-kubernetesattributes-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | kubernetesAttributes: 11 | enabled: true 12 | 13 | resources: 14 | limits: 15 | cpu: 100m 16 | memory: 200M 17 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/preset-logscollection-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | logsCollection: 11 | enabled: true 12 | includeCollectorLogs: true 13 | 14 | resources: 15 | limits: 16 | cpu: 100m 17 | memory: 200M 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/probes-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | livenessProbe: 10 | initialDelaySeconds: 10 11 | periodSeconds: 5 12 | timeoutSeconds: 3 13 | failureThreshold: 2 14 | terminationGracePeriodSeconds: 40 15 | httpGet: 16 | port: 8989 17 | path: /healthz 18 | 19 | readinessProbe: 20 | initialDelaySeconds: 10 21 | periodSeconds: 5 22 | timeoutSeconds: 3 23 | successThreshold: 2 24 | failureThreshold: 2 25 | httpGet: 26 | port: 8989 27 | path: /healthz 28 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/ci/statefulset-values.yaml: -------------------------------------------------------------------------------- 1 | mode: statefulset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | replicaCount: 2 10 | resources: 11 | limits: 12 | cpu: 100m 13 | memory: 200M 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples of chart configuration 2 | 3 | Here is a collection of common configurations for the OpenTelemetry collector. Each folder contains an example `values.yaml` and the resulting configurations that are generated by the opentelemetry-collector helm charts. 4 | 5 | - [Daemonset only](daemonset-only) 6 | - [Deployment only](deployment-only) 7 | - [Daemonset and deployment](daemonset-and-deployment) 8 | - [Log collection, including collector logs](daemonset-collector-logs) 9 | - [Add component (hostmetrics)](daemonset-hostmetrics) 10 | 11 | The manifests are rendered using the `helm template` command and the specific example folder's values.yaml. 12 | 13 | Examples are generated by (from root of the repo): 14 | 15 | ```sh 16 | make generate-examples CHARTS=opentelemetry-collector 17 | ``` 18 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-and-deployment/daemonset-values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | config: 10 | exporters: 11 | otlp: 12 | endpoint: example-opentelemetry-collector:4317 13 | tls: 14 | insecure: true 15 | service: 16 | pipelines: 17 | logs: 18 | exporters: 19 | - otlp 20 | - debug 21 | metrics: 22 | exporters: 23 | - otlp 24 | - debug 25 | traces: 26 | exporters: 27 | - otlp 28 | - debug 29 | 30 | resources: 31 | limits: 32 | cpu: 100m 33 | memory: 200M 34 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-and-deployment/deployment-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | resources: 10 | limits: 11 | cpu: 100m 12 | memory: 200M 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: standalone-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: jaeger-compact 21 | port: 6831 22 | targetPort: 6831 23 | protocol: UDP 24 | - name: jaeger-grpc 25 | port: 14250 26 | targetPort: 14250 27 | protocol: TCP 28 | - name: jaeger-thrift 29 | port: 14268 30 | targetPort: 14268 31 | protocol: TCP 32 | - name: otlp 33 | port: 4317 34 | targetPort: 4317 35 | protocol: TCP 36 | appProtocol: grpc 37 | - name: otlp-http 38 | port: 4318 39 | targetPort: 4318 40 | protocol: TCP 41 | - name: zipkin 42 | port: 9411 43 | targetPort: 9411 44 | protocol: TCP 45 | selector: 46 | app.kubernetes.io/name: opentelemetry-collector 47 | app.kubernetes.io/instance: example 48 | component: standalone-collector 49 | internalTrafficPolicy: Cluster 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-collector-logs/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-collector-logs/values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | logsCollection: 11 | enabled: true 12 | includeCollectorLogs: true 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-hostmetrics/values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | hostMetrics: 11 | enabled: true 12 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-lifecycle-hooks/values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | global: 10 | image: busybox:latest 11 | initContainers: 12 | - name: test 13 | command: 14 | - cp 15 | args: 16 | - /bin/sleep 17 | - /test/sleep 18 | image: "{{ .Values.global.image }}" 19 | volumeMounts: 20 | - name: test 21 | mountPath: /test 22 | 23 | extraVolumes: 24 | - name: test 25 | emptyDir: {} 26 | 27 | extraVolumeMounts: 28 | - name: test 29 | mountPath: /test 30 | 31 | lifecycleHooks: 32 | preStop: 33 | exec: 34 | command: 35 | - /test/sleep 36 | - "5" 37 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-only/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/daemonset-only/values.yaml: -------------------------------------------------------------------------------- 1 | mode: daemonset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-only/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: standalone-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: jaeger-compact 21 | port: 6831 22 | targetPort: 6831 23 | protocol: UDP 24 | - name: jaeger-grpc 25 | port: 14250 26 | targetPort: 14250 27 | protocol: TCP 28 | - name: jaeger-thrift 29 | port: 14268 30 | targetPort: 14268 31 | protocol: TCP 32 | - name: otlp 33 | port: 4317 34 | targetPort: 4317 35 | protocol: TCP 36 | appProtocol: grpc 37 | - name: otlp-http 38 | port: 4318 39 | targetPort: 4318 40 | protocol: TCP 41 | - name: zipkin 42 | port: 9411 43 | targetPort: 9411 44 | protocol: TCP 45 | selector: 46 | app.kubernetes.io/name: opentelemetry-collector 47 | app.kubernetes.io/instance: example 48 | component: standalone-collector 49 | internalTrafficPolicy: Cluster 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-only/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-only/values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | replicaCount: 3 10 | 11 | resources: 12 | limits: 13 | cpu: 2 14 | memory: 4Gi 15 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/configmap.yaml 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | data: 16 | relay: | 17 | exporters: 18 | debug: {} 19 | extensions: 20 | health_check: 21 | endpoint: ${env:MY_POD_IP}:13133 22 | processors: 23 | batch: {} 24 | memory_limiter: 25 | check_interval: 5s 26 | limit_percentage: 80 27 | spike_limit_percentage: 25 28 | receivers: 29 | otlp: 30 | protocols: 31 | grpc: 32 | endpoint: ${env:MY_POD_IP}:4317 33 | http: 34 | endpoint: ${env:MY_POD_IP}:4318 35 | service: 36 | extensions: 37 | - health_check 38 | pipelines: 39 | traces: 40 | exporters: 41 | - debug 42 | processors: 43 | - memory_limiter 44 | - batch 45 | receivers: 46 | - otlp 47 | telemetry: 48 | metrics: 49 | address: ${env:MY_POD_IP}:8888 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: standalone-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: otlp 21 | port: 4317 22 | targetPort: 4317 23 | protocol: TCP 24 | appProtocol: grpc 25 | - name: otlp-http 26 | port: 4318 27 | targetPort: 4318 28 | protocol: TCP 29 | selector: 30 | app.kubernetes.io/name: opentelemetry-collector 31 | app.kubernetes.io/instance: example 32 | component: standalone-collector 33 | internalTrafficPolicy: Cluster 34 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-otlp-traces/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-otlp-traces/values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | ports: 10 | jaeger-compact: 11 | enabled: false 12 | jaeger-thrift: 13 | enabled: false 14 | jaeger-grpc: 15 | enabled: false 16 | zipkin: 17 | enabled: false 18 | 19 | config: 20 | receivers: 21 | jaeger: null 22 | prometheus: null 23 | zipkin: null 24 | service: 25 | pipelines: 26 | traces: 27 | receivers: 28 | - otlp 29 | metrics: null 30 | logs: null 31 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/deployment-values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | resources: 4 | limits: 5 | cpu: 100m 6 | memory: 200M 7 | 8 | configMap: 9 | create: false 10 | 11 | image: 12 | repository: "otel/opentelemetry-collector-k8s" 13 | 14 | command: 15 | name: "otelcol-k8s" 16 | extraArgs: ["--config=/conf/config.yaml"] 17 | 18 | extraVolumes: 19 | - name: custom-otelcol-configmap 20 | configMap: 21 | name: custom-otel-collector-config 22 | items: 23 | - key: config 24 | path: config.yaml 25 | defaultMode: 420 26 | extraVolumeMounts: 27 | - name: custom-otelcol-configmap 28 | mountPath: /conf/config.yaml 29 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: standalone-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: jaeger-compact 21 | port: 6831 22 | targetPort: 6831 23 | protocol: UDP 24 | - name: jaeger-grpc 25 | port: 14250 26 | targetPort: 14250 27 | protocol: TCP 28 | - name: jaeger-thrift 29 | port: 14268 30 | targetPort: 14268 31 | protocol: TCP 32 | - name: otlp 33 | port: 4317 34 | targetPort: 4317 35 | protocol: TCP 36 | appProtocol: grpc 37 | - name: otlp-http 38 | port: 4318 39 | targetPort: 4318 40 | protocol: TCP 41 | - name: zipkin 42 | port: 9411 43 | targetPort: 9411 44 | protocol: TCP 45 | selector: 46 | app.kubernetes.io/name: opentelemetry-collector 47 | app.kubernetes.io/instance: example 48 | component: standalone-collector 49 | internalTrafficPolicy: Cluster 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/clusterrole.yaml 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: example-opentelemetry-collector 7 | labels: 8 | helm.sh/chart: opentelemetry-collector-0.92.0 9 | app.kubernetes.io/name: opentelemetry-collector 10 | app.kubernetes.io/instance: example 11 | app.kubernetes.io/version: "0.101.0" 12 | app.kubernetes.io/managed-by: Helm 13 | 14 | rules: 15 | - apiGroups: [""] 16 | resources: ["pods", "namespaces"] 17 | verbs: ["get", "watch", "list"] 18 | - apiGroups: ["apps"] 19 | resources: ["replicasets"] 20 | verbs: ["get", "list", "watch"] 21 | - apiGroups: ["extensions"] 22 | resources: ["replicasets"] 23 | verbs: ["get", "list", "watch"] 24 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/clusterrolebinding.yaml 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | name: example-opentelemetry-collector 7 | labels: 8 | helm.sh/chart: opentelemetry-collector-0.92.0 9 | app.kubernetes.io/name: opentelemetry-collector 10 | app.kubernetes.io/instance: example 11 | app.kubernetes.io/version: "0.101.0" 12 | app.kubernetes.io/managed-by: Helm 13 | 14 | roleRef: 15 | apiGroup: rbac.authorization.k8s.io 16 | kind: ClusterRole 17 | name: example-opentelemetry-collector 18 | subjects: 19 | - kind: ServiceAccount 20 | name: example-opentelemetry-collector 21 | namespace: default 22 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: standalone-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: jaeger-compact 21 | port: 6831 22 | targetPort: 6831 23 | protocol: UDP 24 | - name: jaeger-grpc 25 | port: 14250 26 | targetPort: 14250 27 | protocol: TCP 28 | - name: jaeger-thrift 29 | port: 14268 30 | targetPort: 14268 31 | protocol: TCP 32 | - name: otlp 33 | port: 4317 34 | targetPort: 4317 35 | protocol: TCP 36 | appProtocol: grpc 37 | - name: otlp-http 38 | port: 4318 39 | targetPort: 4318 40 | protocol: TCP 41 | - name: zipkin 42 | port: 9411 43 | targetPort: 9411 44 | protocol: TCP 45 | selector: 46 | app.kubernetes.io/name: opentelemetry-collector 47 | app.kubernetes.io/instance: example 48 | component: standalone-collector 49 | internalTrafficPolicy: Cluster 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/kubernetesAttributes/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/kubernetesAttributes/values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | presets: 10 | kubernetesAttributes: 11 | enabled: true 12 | 13 | config: 14 | service: 15 | pipelines: 16 | traces: 17 | processors: 18 | - resource 19 | - k8sattributes 20 | - batch 21 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/statefulset-only/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: statefulset-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: jaeger-compact 21 | port: 6831 22 | targetPort: 6831 23 | protocol: UDP 24 | - name: jaeger-grpc 25 | port: 14250 26 | targetPort: 14250 27 | protocol: TCP 28 | - name: jaeger-thrift 29 | port: 14268 30 | targetPort: 14268 31 | protocol: TCP 32 | - name: otlp 33 | port: 4317 34 | targetPort: 4317 35 | protocol: TCP 36 | appProtocol: grpc 37 | - name: otlp-http 38 | port: 4318 39 | targetPort: 4318 40 | protocol: TCP 41 | - name: zipkin 42 | port: 9411 43 | targetPort: 9411 44 | protocol: TCP 45 | selector: 46 | app.kubernetes.io/name: opentelemetry-collector 47 | app.kubernetes.io/instance: example 48 | component: statefulset-collector 49 | internalTrafficPolicy: Cluster 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/statefulset-only/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/statefulset-only/values.yaml: -------------------------------------------------------------------------------- 1 | mode: statefulset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | replicaCount: 2 10 | 11 | resources: 12 | limits: 13 | cpu: 100m 14 | memory: 200M 15 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/statefulset-with-pvc/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: statefulset-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: jaeger-compact 21 | port: 6831 22 | targetPort: 6831 23 | protocol: UDP 24 | - name: jaeger-grpc 25 | port: 14250 26 | targetPort: 14250 27 | protocol: TCP 28 | - name: jaeger-thrift 29 | port: 14268 30 | targetPort: 14268 31 | protocol: TCP 32 | - name: otlp 33 | port: 4317 34 | targetPort: 4317 35 | protocol: TCP 36 | appProtocol: grpc 37 | - name: otlp-http 38 | port: 4318 39 | targetPort: 4318 40 | protocol: TCP 41 | - name: zipkin 42 | port: 9411 43 | targetPort: 9411 44 | protocol: TCP 45 | selector: 46 | app.kubernetes.io/name: opentelemetry-collector 47 | app.kubernetes.io/instance: example 48 | component: statefulset-collector 49 | internalTrafficPolicy: Cluster 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/statefulset-with-pvc/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/statefulset-with-pvc/values.yaml: -------------------------------------------------------------------------------- 1 | mode: statefulset 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | replicaCount: 2 10 | 11 | resources: 12 | limits: 13 | cpu: 100m 14 | memory: 200M 15 | 16 | statefulset: 17 | persistentVolumeClaimRetentionPolicy: 18 | enabled: true 19 | whenDeleted: Delete 20 | whenScaled: Delete 21 | 22 | volumeClaimTemplates: 23 | - metadata: 24 | name: queue 25 | spec: 26 | storageClassName: standard 27 | accessModes: 28 | - ReadWriteOnce 29 | resources: 30 | requests: 31 | storage: "1Gi" 32 | 33 | extraVolumeMounts: 34 | - name: queue 35 | mountPath: /var/lib/storage/queue 36 | 37 | initContainers: 38 | - name: init-fs 39 | image: busybox:latest 40 | command: 41 | - sh 42 | - "-c" 43 | - "chown -R 10001: /var/lib/storage/queue" 44 | volumeMounts: 45 | - name: queue 46 | mountPath: /var/lib/storage/queue 47 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: standalone-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: jaeger-compact 21 | port: 6831 22 | targetPort: 6831 23 | protocol: UDP 24 | - name: jaeger-grpc 25 | port: 14250 26 | targetPort: 14250 27 | protocol: TCP 28 | - name: jaeger-thrift 29 | port: 14268 30 | targetPort: 14268 31 | protocol: TCP 32 | - name: otlp 33 | port: 4317 34 | targetPort: 4317 35 | protocol: TCP 36 | appProtocol: grpc 37 | - name: otlp-http 38 | port: 4318 39 | targetPort: 4318 40 | protocol: TCP 41 | - name: zipkin 42 | port: 9411 43 | targetPort: 9411 44 | protocol: TCP 45 | selector: 46 | app.kubernetes.io/name: opentelemetry-collector 47 | app.kubernetes.io/instance: example 48 | component: standalone-collector 49 | internalTrafficPolicy: Cluster 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/using-GOMEMLIMIT/values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | command: 7 | name: "otelcol-k8s" 8 | 9 | resources: 10 | limits: 11 | cpu: 100m 12 | memory: 200M 13 | useGOMEMLIMIT: true 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/using-custom-config/rendered/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/service.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | 15 | component: standalone-collector 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | 20 | - name: jaeger-compact 21 | port: 6831 22 | targetPort: 6831 23 | protocol: UDP 24 | - name: jaeger-grpc 25 | port: 14250 26 | targetPort: 14250 27 | protocol: TCP 28 | - name: jaeger-thrift 29 | port: 14268 30 | targetPort: 14268 31 | protocol: TCP 32 | - name: otlp 33 | port: 4317 34 | targetPort: 4317 35 | protocol: TCP 36 | appProtocol: grpc 37 | - name: otlp-http 38 | port: 4318 39 | targetPort: 4318 40 | protocol: TCP 41 | - name: zipkin 42 | port: 9411 43 | targetPort: 9411 44 | protocol: TCP 45 | selector: 46 | app.kubernetes.io/name: opentelemetry-collector 47 | app.kubernetes.io/instance: example 48 | component: standalone-collector 49 | internalTrafficPolicy: Cluster 50 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/using-custom-config/rendered/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: opentelemetry-collector/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: example-opentelemetry-collector 7 | namespace: default 8 | labels: 9 | helm.sh/chart: opentelemetry-collector-0.92.0 10 | app.kubernetes.io/name: opentelemetry-collector 11 | app.kubernetes.io/instance: example 12 | app.kubernetes.io/version: "0.101.0" 13 | app.kubernetes.io/managed-by: Helm 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/examples/using-custom-config/values.yaml: -------------------------------------------------------------------------------- 1 | mode: deployment 2 | 3 | image: 4 | repository: "otel/opentelemetry-collector-k8s" 5 | 6 | configMap: 7 | create: false 8 | existingName: user-config 9 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if or (.Values.clusterRole.create) (.Values.presets.kubernetesAttributes.enabled) (.Values.presets.clusterMetrics.enabled) (.Values.presets.kubeletMetrics.enabled) (.Values.presets.kubernetesEvents.enabled) -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ include "opentelemetry-collector.clusterRoleBindingName" . }} 6 | labels: 7 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 8 | {{- if .Values.clusterRole.clusterRoleBinding.annotations }} 9 | annotations: 10 | {{- range $key, $value := .Values.clusterRole.clusterRoleBinding.annotations }} 11 | {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} 12 | {{- end }} 13 | {{- end }} 14 | roleRef: 15 | apiGroup: rbac.authorization.k8s.io 16 | kind: ClusterRole 17 | name: {{ include "opentelemetry-collector.clusterRoleName" . }} 18 | subjects: 19 | - kind: ServiceAccount 20 | name: {{ include "opentelemetry-collector.serviceAccountName" . }} 21 | namespace: {{ include "opentelemetry-collector.namespace" . }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/configmap-agent.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.mode "daemonset") (.Values.configMap.create) (not .Values.configMap.existingName) -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ include "opentelemetry-collector.fullname" . }}-agent 6 | namespace: {{ template "opentelemetry-collector.namespace" . }} 7 | labels: 8 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 9 | data: 10 | relay: | 11 | {{- include "opentelemetry-collector.daemonsetConfig" . | nindent 4 -}} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/configmap-statefulset.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.mode "statefulset") (.Values.configMap.create) (not .Values.configMap.existingName) -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ include "opentelemetry-collector.fullname" . }}-statefulset 6 | namespace: {{ template "opentelemetry-collector.namespace" . }} 7 | labels: 8 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 9 | data: 10 | relay: | 11 | {{- include "opentelemetry-collector.deploymentConfig" . | nindent 4 -}} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.mode "deployment") (.Values.configMap.create) (not .Values.configMap.existingName) -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ include "opentelemetry-collector.fullname" . }} 6 | namespace: {{ template "opentelemetry-collector.namespace" . }} 7 | labels: 8 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 9 | data: 10 | relay: | 11 | {{- include "opentelemetry-collector.deploymentConfig" . | nindent 4 -}} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.autoscaling.enabled (or (eq .Values.mode "deployment") (eq .Values.mode "statefulset")) }} 2 | apiVersion: autoscaling/v2 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: {{ include "opentelemetry-collector.fullname" . }} 6 | namespace: {{ template "opentelemetry-collector.namespace" . }} 7 | labels: 8 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 9 | spec: 10 | scaleTargetRef: 11 | apiVersion: apps/v1 12 | kind: {{ include "opentelemetry-collector.hpaKind" . }} 13 | name: {{ include "opentelemetry-collector.fullname" . }} 14 | minReplicas: {{ .Values.autoscaling.minReplicas }} 15 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 16 | {{- if .Values.autoscaling.behavior }} 17 | behavior: 18 | {{- toYaml .Values.autoscaling.behavior | nindent 4 }} 19 | {{- end }} 20 | metrics: 21 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} 22 | - type: Resource 23 | resource: 24 | name: memory 25 | target: 26 | type: Utilization 27 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} 28 | {{- end }} 29 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} 30 | - type: Resource 31 | resource: 32 | name: cpu 33 | target: 34 | type: Utilization 35 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} 36 | {{- end }} 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podDisruptionBudget.enabled (eq .Values.mode "deployment") }} 2 | apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ include "opentelemetry-collector.fullname" . }} 6 | namespace: {{ template "opentelemetry-collector.namespace" . }} 7 | labels: 8 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 9 | spec: 10 | {{- if .Values.podDisruptionBudget.minAvailable }} 11 | minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} 12 | {{- end }} 13 | {{- if .Values.podDisruptionBudget.maxUnavailable }} 14 | maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} 15 | {{- end }} 16 | selector: 17 | matchLabels: 18 | {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/podmonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podMonitor.enabled .Values.podMonitor.metricsEndpoints (eq .Values.mode "daemonset") }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PodMonitor 4 | metadata: 5 | name: {{ include "opentelemetry-collector.fullname" . }}-agent 6 | namespace: {{ template "opentelemetry-collector.namespace" . }} 7 | labels: 8 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 9 | {{- range $key, $value := .Values.podMonitor.extraLabels }} 10 | {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | selector: 14 | matchLabels: 15 | {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} 16 | {{- include "opentelemetry-collector.component" . | nindent 6 }} 17 | podMetricsEndpoints: 18 | {{- toYaml .Values.podMonitor.metricsEndpoints | nindent 2 }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if or (.Values.serviceAccount.create) (.Values.presets.kubeletMetrics.enabled) -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "opentelemetry-collector.serviceAccountName" . }} 6 | namespace: {{ template "opentelemetry-collector.namespace" . }} 7 | labels: 8 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 9 | {{- if .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- range $key, $value := .Values.serviceAccount.annotations }} 12 | {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} 13 | {{- end }} 14 | {{- end }} 15 | {{ end }} 16 | -------------------------------------------------------------------------------- /helm/monitor/charts/opentelemetry-collector/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.serviceMonitor.enabled .Values.serviceMonitor.metricsEndpoints (or (eq .Values.mode "deployment") (eq .Values.mode "statefulset")) }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ include "opentelemetry-collector.fullname" . }} 6 | namespace: {{ template "opentelemetry-collector.namespace" . }} 7 | labels: 8 | {{- include "opentelemetry-collector.labels" . | nindent 4 }} 9 | {{- range $key, $value := .Values.serviceMonitor.extraLabels }} 10 | {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | selector: 14 | matchLabels: 15 | {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} 16 | {{- include "opentelemetry-collector.component" . | nindent 6 }} 17 | endpoints: 18 | {{- toYaml .Values.serviceMonitor.metricsEndpoints | nindent 2 }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/tempo/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .vscode 20 | .project 21 | .idea/ 22 | *.tmproj 23 | OWNERS 24 | -------------------------------------------------------------------------------- /helm/monitor/charts/tempo/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 2.1.1 3 | description: Grafana Tempo Single Binary Mode 4 | home: https://grafana.net 5 | icon: https://raw.githubusercontent.com/grafana/tempo/master/docs/tempo/website/logo_and_name.png 6 | maintainers: 7 | - email: number101010@gmail.com 8 | name: joe-elliott 9 | - email: 9215868@gmail.com 10 | name: swartz-k 11 | - name: annanay25 12 | - name: mdisibio 13 | - name: dgzlopes 14 | - name: mapno 15 | name: tempo 16 | sources: 17 | - https://github.com/grafana/tempo 18 | type: application 19 | version: 1.3.1 20 | -------------------------------------------------------------------------------- /helm/monitor/charts/tempo/templates/configmap-tempo-query.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.tempoQuery.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "tempo.name" . }}-query 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "tempo.labels" . | nindent 4 }} 9 | data: 10 | tempo-query.yaml: | 11 | backend: {{ template "tempo.fullname" . }}:{{ .Values.tempo.server.http_listen_port }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/monitor/charts/tempo/templates/configmap-tempo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "tempo.name" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{- include "tempo.labels" . | nindent 4 }} 8 | data: 9 | overrides.yaml: | 10 | overrides: 11 | {{- toYaml .Values.tempo.overrides | nindent 6 }} 12 | tempo.yaml: | 13 | {{- tpl .Values.config . | nindent 4 }} 14 | -------------------------------------------------------------------------------- /helm/monitor/charts/tempo/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "tempo.serviceAccountName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "tempo.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.labels }} 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- with .Values.serviceAccount.annotations }} 13 | annotations: 14 | {{- toYaml . | nindent 4 }} 15 | {{- end }} 16 | {{- with .Values.serviceAccount.imagePullSecrets }} 17 | imagePullSecrets: 18 | {{- toYaml . | nindent 2 }} 19 | {{- end }} 20 | automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/nginx-ingress/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | *.png 3 | -------------------------------------------------------------------------------- /helm/nginx-ingress/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 3.2.1 3 | description: NGINX Ingress Controller 4 | home: https://github.com/nginxinc/kubernetes-ingress 5 | icon: https://raw.githubusercontent.com/nginxinc/kubernetes-ingress/v3.2.1/deployments/helm-chart/chart-icon.png 6 | keywords: 7 | - ingress 8 | - nginx 9 | kubeVersion: '>= 1.22.0-0' 10 | maintainers: 11 | - email: kubernetes@nginx.com 12 | name: nginxinc 13 | name: nginx-ingress 14 | sources: 15 | - https://github.com/nginxinc/kubernetes-ingress/tree/v3.2.1/deployments/helm-chart 16 | type: application 17 | version: 0.18.1 18 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | The NGINX Ingress Controller has been installed. 2 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.controller.customConfigMap -}} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ include "nginx-ingress.configName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "nginx-ingress.labels" . | nindent 4 }} 9 | {{- if .Values.controller.config.annotations }} 10 | annotations: 11 | {{ toYaml .Values.controller.config.annotations | indent 4 }} 12 | {{- end }} 13 | data: 14 | {{- if .Values.controller.config.entries }} 15 | {{ toYaml .Values.controller.config.entries | indent 2 }} 16 | {{- end }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-globalconfiguration.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.controller.globalConfiguration.create }} 2 | apiVersion: k8s.nginx.org/v1alpha1 3 | kind: GlobalConfiguration 4 | metadata: 5 | name: {{ include "nginx-ingress.controller.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "nginx-ingress.labels" . | nindent 4 }} 9 | spec: 10 | {{ toYaml .Values.controller.globalConfiguration.spec | indent 2 }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-ingress-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: IngressClass 3 | metadata: 4 | name: {{ .Values.controller.ingressClass }} 5 | labels: 6 | {{- include "nginx-ingress.labels" . | nindent 4 }} 7 | {{- if .Values.controller.setAsDefaultIngress }} 8 | annotations: 9 | ingressclass.kubernetes.io/is-default-class: "true" 10 | {{- end }} 11 | spec: 12 | controller: nginx.org/ingress-controller 13 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-leader-election-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.controller.reportIngressStatus.enableLeaderElection }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ include "nginx-ingress.leaderElectionName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "nginx-ingress.labels" . | nindent 4 }} 9 | {{- if .Values.controller.reportIngressStatus.annotations }} 10 | annotations: 11 | {{ toYaml .Values.controller.reportIngressStatus.annotations | indent 4 }} 12 | {{- end }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.controller.podDisruptionBudget.enabled -}} 2 | apiVersion: policy/v1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ include "nginx-ingress.controller.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "nginx-ingress.labels" . | nindent 4 }} 9 | {{- if .Values.controller.podDisruptionBudget.annotations }} 10 | annotations: 11 | {{ toYaml .Values.controller.podDisruptionBudget.annotations | indent 4 }} 12 | {{- end }} 13 | spec: 14 | selector: 15 | matchLabels: 16 | {{- include "nginx-ingress.selectorLabels" . | nindent 6 }} 17 | {{- if .Values.controller.podDisruptionBudget.minAvailable }} 18 | minAvailable: {{ .Values.controller.podDisruptionBudget.minAvailable }} 19 | {{- end }} 20 | {{- if .Values.controller.podDisruptionBudget.maxUnavailable }} 21 | maxUnavailable: {{ .Values.controller.podDisruptionBudget.maxUnavailable }} 22 | {{- end }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-secret.yaml: -------------------------------------------------------------------------------- 1 | {{ if and (not .Values.controller.defaultTLS.secret) (.Values.controller.defaultTLS.cert) (.Values.controller.defaultTLS.key) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ include "nginx-ingress.defaultTLSName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "nginx-ingress.labels" . | nindent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | tls.crt: {{ .Values.controller.defaultTLS.cert }} 12 | tls.key: {{ .Values.controller.defaultTLS.key }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "nginx-ingress.serviceAccountName" . }} 6 | {{- if .Values.controller.serviceAccount.annotations }} 7 | annotations: {{- toYaml .Values.controller.serviceAccount.annotations | nindent 4 }} 8 | {{- end }} 9 | namespace: {{ .Release.Namespace }} 10 | labels: 11 | {{- include "nginx-ingress.labels" . | nindent 4 }} 12 | {{- if .Values.controller.serviceAccount.imagePullSecretName }} 13 | imagePullSecrets: 14 | - name: {{ .Values.controller.serviceAccount.imagePullSecretName }} 15 | {{- end }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.controller.serviceMonitor.create }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ include "nginx-ingress.controller.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "nginx-ingress.labels" . | nindent 4 }} 9 | {{- if .Values.controller.serviceMonitor.labels -}} 10 | {{- toYaml .Values.controller.serviceMonitor.labels | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | selector: 14 | matchLabels: 15 | {{- if .Values.controller.serviceMonitor.selectorMatchLabels -}} 16 | {{- toYaml .Values.controller.serviceMonitor.selectorMatchLabels | nindent 6 }} 17 | {{- end }} 18 | {{- include "nginx-ingress.selectorLabels" . | nindent 6 }} 19 | endpoints: 20 | {{- toYaml .Values.controller.serviceMonitor.endpoints | nindent 4 }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/nginx-ingress/templates/controller-wildcard-secret.yaml: -------------------------------------------------------------------------------- 1 | {{ if and (not .Values.controller.wildcardTLS.secret) (and .Values.controller.wildcardTLS.cert .Values.controller.wildcardTLS.key) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ include "nginx-ingress.wildcardTLSName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "nginx-ingress.labels" . | nindent 4 }} 9 | type: kubernetes.io/tls 10 | data: 11 | tls.crt: {{ .Values.controller.wildcardTLS.cert }} 12 | tls.key: {{ .Values.controller.wildcardTLS.key }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /helm/nginx-ingress/values-icp.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | name: controller 3 | kind: daemonset 4 | nginxplus: true 5 | image: 6 | repository: mycluster.icp:8500/kube-system/nginx-plus-ingress 7 | tag: "3.2.1" 8 | nodeSelector: 9 | beta.kubernetes.io/arch: "amd64" 10 | proxy: true 11 | terminationGracePeriodSeconds: 60 12 | tolerations: 13 | - key: "dedicated" 14 | operator: "Exists" 15 | effect: "NoSchedule" 16 | - key: "CriticalAddonsOnly" 17 | operator: "Exists" 18 | -------------------------------------------------------------------------------- /helm/nginx-ingress/values-nsm.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | name: controller 3 | enableLatencyMetrics: true 4 | nginxServiceMesh: 5 | enable: true 6 | enableEgress: true 7 | -------------------------------------------------------------------------------- /helm/nginx-ingress/values-plus.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | name: controller 3 | nginxplus: true 4 | image: 5 | repository: nginx-plus-ingress 6 | tag: "3.2.1" 7 | -------------------------------------------------------------------------------- /iac/ansible/inventory: -------------------------------------------------------------------------------- 1 | [servers_1] 2 | ansible_ssh_private_key_file=/home/hoanglong/.ssh/id_rsa 3 | -------------------------------------------------------------------------------- /iac/ansible/requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==8.3.0 2 | google-auth==2.28.1 3 | requests==2.31.0 4 | -------------------------------------------------------------------------------- /iac/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | // Variables to use accross the project 2 | // which can be accessed by var.project_id 3 | variable "project_id" { 4 | description = "The project ID to host the cluster in" 5 | default = "evocative-reef-424101-n0" 6 | } 7 | 8 | variable "region" { 9 | description = "The region the cluster in" 10 | default = "asia-southeast1" 11 | } 12 | 13 | 14 | variable "zone" { 15 | description = "The zone the cluster in" 16 | default = "asia-southeast1-b" 17 | } 18 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | chromadb 3 | faiss-cpu 4 | langchain 5 | langchain_community 6 | langchain_text_splitters 7 | opentelemetry-api==1.19.0 8 | opentelemetry-exporter-jaeger==1.19.0 9 | opentelemetry-exporter-otlp-proto-grpc==1.19.0 10 | opentelemetry-exporter-prometheus==1.12.0rc1 11 | opentelemetry-instrumentation-asgi==0.40b0 12 | opentelemetry-instrumentation-fastapi==0.40b0 13 | opentelemetry-instrumentation-logging==0.40b0 14 | opentelemetry-instrumentation-requests==0.40b0 15 | opentelemetry-sdk==1.19.0 16 | pypdf 17 | sentence-transformers 18 | streamlit 19 | text-generation 20 | transformers 21 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | 4 | import streamlit as st 5 | 6 | 7 | # decorator 8 | def enable_chat_history(func): 9 | # to show chat history on ui 10 | if "messages" not in st.session_state: 11 | st.session_state["messages"] = [ 12 | {"role": "assistant", "content": "How can I help you?"} 13 | ] 14 | for msg in st.session_state["messages"]: 15 | st.chat_message(msg["role"]).write(msg["content"]) 16 | 17 | def execute(*args, **kwargs): 18 | func(*args, **kwargs) 19 | 20 | return execute 21 | 22 | 23 | def display_msg(msg, author): 24 | """Method to display message on the UI 25 | 26 | Args: 27 | msg (str): message to display 28 | author (str): author of the message -user/assistant 29 | """ 30 | st.session_state.messages.append({"role": author, "content": msg}) 31 | st.chat_message(author).write(msg) 32 | --------------------------------------------------------------------------------