├── .gitignore ├── README.md ├── example ├── basic │ ├── config │ │ ├── configmap-demo.yaml │ │ ├── opaque-secret.yaml │ │ └── secret-service-account.sh │ ├── init-containers │ │ ├── inited-services.yaml │ │ └── pod-with-init.yaml │ ├── net │ │ ├── nginx-ingress │ │ │ ├── ingress-nginx-controller-deploy.yml │ │ │ └── tomcat-deployment.yaml │ │ └── service │ │ │ ├── nginx-deploy.yaml │ │ │ ├── service-clusterip.yaml │ │ │ ├── service-externalname.yaml │ │ │ ├── service-headless.yaml │ │ │ └── service-nodeport.yaml │ ├── pod-controller │ │ ├── demo-cronjob.yaml │ │ ├── fluentd-daemonset.yaml │ │ ├── hpa │ │ │ ├── hpa-test.sh │ │ │ ├── nginx-hpa.yaml │ │ │ └── tools.sh │ │ ├── nginx-daemonset.yaml │ │ ├── nginx-deployment.yaml │ │ ├── nginx-replica-set.yaml │ │ ├── nginx-replication-controller.yaml │ │ ├── nginx-stateful.yaml │ │ └── perl-job.yaml │ ├── pod │ │ ├── busybox-pod.yaml │ │ ├── lifecycle-action.yaml │ │ └── tomcat-pod.yaml │ ├── probe │ │ ├── liveness-probe-exec.yaml │ │ ├── liveness-probe-tcp.yaml │ │ └── readiness-probe-http.yaml │ ├── resource │ │ ├── limit-range.yaml │ │ ├── resource-quota.yaml │ │ └── resource-yaml.yaml │ ├── schedule │ │ ├── affinity │ │ │ ├── node-affinity-preferred.yaml │ │ │ └── node-affinity-required.yaml │ │ ├── direct │ │ │ ├── direct-nodename.yaml │ │ │ └── direct-selector.yaml │ │ └── taint-tolerance │ │ │ ├── pod-tolerance.yaml │ │ │ └── settup-taint.sh │ ├── security │ │ ├── rbac │ │ │ └── service-account.yaml │ │ └── user │ │ │ ├── common │ │ │ ├── ca │ │ │ │ ├── ca-key.pem │ │ │ │ ├── ca.pem │ │ │ │ └── gene-ca.sh │ │ │ ├── csr.json │ │ │ ├── gene-user.sh │ │ │ └── user │ │ │ │ └── kk │ │ │ │ ├── kk-key.pem │ │ │ │ ├── kk.csr │ │ │ │ ├── kk.kubeconfig │ │ │ │ └── kk.pem │ │ │ └── default-kubeconfig.yaml │ └── storage │ │ ├── nfs │ │ ├── nfs-provisioner.yaml │ │ ├── nfs-pvc.yaml │ │ └── nfs-server.sh │ │ ├── pv │ │ ├── data │ │ │ └── busybox │ │ │ │ └── test │ │ ├── dynamic-pv.yaml │ │ └── static-pv.yaml │ │ └── volume │ │ ├── emptydir-demo.yaml │ │ └── hostpath-demo.yaml └── helm │ ├── hello-world │ ├── chart.yaml │ ├── cmds.sh │ ├── hello-world-chart.zip │ └── templates │ │ ├── deployment.yaml │ │ └── service.yaml │ └── redis │ ├── cmds.sh │ └── startup-log.sh ├── imgs ├── all-in-one.png ├── dashboard.png ├── env-docker.png ├── env-k8s.png ├── grafana.png ├── hpa-yaml.png ├── init-ctn-mind.png ├── k8s-DCOM.png ├── k8s-and-docker.jpg ├── k8s-guide5-code.png ├── k8s-guide5-cover.png ├── k8s-mind-full.png ├── k8s-outline.png ├── k8s-service.png ├── kube-prometheus-stack-mind.png ├── local-k8s-home.png ├── mind-bond.png ├── pod-init-yaml.png ├── pod-ready-after-init.png ├── pod-wait-service.png ├── rancher-home.png ├── setup-service.png └── trees.png ├── meta └── service-account-token.json └── support ├── helm ├── fast-helm.sh ├── install.sh ├── kube-prometheus-stack │ ├── README.md │ ├── chart │ │ ├── CONTRIBUTING.md │ │ ├── Chart.lock │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── charts │ │ │ ├── grafana │ │ │ │ ├── .helmignore │ │ │ │ ├── Chart.yaml │ │ │ │ ├── README.md │ │ │ │ ├── ci │ │ │ │ │ ├── default-values.yaml │ │ │ │ │ ├── with-dashboard-json-values.yaml │ │ │ │ │ ├── with-dashboard-values.yaml │ │ │ │ │ └── with-image-renderer-values.yaml │ │ │ │ ├── dashboards │ │ │ │ │ └── custom-dashboard.json │ │ │ │ ├── templates │ │ │ │ │ ├── NOTES.txt │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ ├── _pod.tpl │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ ├── configmap-dashboard-provider.yaml │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ ├── dashboards-json-configmap.yaml │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ ├── headless-service.yaml │ │ │ │ │ ├── hpa.yaml │ │ │ │ │ ├── image-renderer-deployment.yaml │ │ │ │ │ ├── image-renderer-network-policy.yaml │ │ │ │ │ ├── image-renderer-service.yaml │ │ │ │ │ ├── ingress.yaml │ │ │ │ │ ├── poddisruptionbudget.yaml │ │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ │ ├── pvc.yaml │ │ │ │ │ ├── role.yaml │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ ├── secret-env.yaml │ │ │ │ │ ├── secret.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ ├── statefulset.yaml │ │ │ │ │ └── tests │ │ │ │ │ │ ├── test-configmap.yaml │ │ │ │ │ │ ├── test-podsecuritypolicy.yaml │ │ │ │ │ │ ├── test-role.yaml │ │ │ │ │ │ ├── test-rolebinding.yaml │ │ │ │ │ │ ├── test-serviceaccount.yaml │ │ │ │ │ │ └── test.yaml │ │ │ │ └── values.yaml │ │ │ ├── kube-state-metrics │ │ │ │ ├── .helmignore │ │ │ │ ├── Chart.yaml │ │ │ │ ├── OWNERS │ │ │ │ ├── README.md │ │ │ │ ├── templates │ │ │ │ │ ├── NOTES.txt │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ ├── kubeconfig-secret.yaml │ │ │ │ │ ├── pdb.yaml │ │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ │ ├── role.yaml │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceaccount.yaml │ │ │ │ │ ├── servicemonitor.yaml │ │ │ │ │ ├── stsdiscovery-role.yaml │ │ │ │ │ └── stsdiscovery-rolebinding.yaml │ │ │ │ └── values.yaml │ │ │ └── prometheus-node-exporter │ │ │ │ ├── .helmignore │ │ │ │ ├── Chart.yaml │ │ │ │ ├── README.md │ │ │ │ ├── ci │ │ │ │ └── port-values.yaml │ │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── daemonset.yaml │ │ │ │ ├── endpoints.yaml │ │ │ │ ├── monitor.yaml │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ ├── psp.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── serviceaccount.yaml │ │ │ │ └── values.yaml │ │ ├── crds │ │ │ ├── crd-alertmanagerconfigs.yaml │ │ │ ├── crd-alertmanagers.yaml │ │ │ ├── crd-podmonitors.yaml │ │ │ ├── crd-probes.yaml │ │ │ ├── crd-prometheuses.yaml │ │ │ ├── crd-prometheusrules.yaml │ │ │ ├── crd-servicemonitors.yaml │ │ │ └── crd-thanosrulers.yaml │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── alertmanager │ │ │ │ ├── alertmanager.yaml │ │ │ │ ├── extrasecret.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── ingressperreplica.yaml │ │ │ │ ├── podDisruptionBudget.yaml │ │ │ │ ├── psp-role.yaml │ │ │ │ ├── psp-rolebinding.yaml │ │ │ │ ├── psp.yaml │ │ │ │ ├── secret.yaml │ │ │ │ ├── service.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ ├── servicemonitor.yaml │ │ │ │ └── serviceperreplica.yaml │ │ │ ├── exporters │ │ │ │ ├── core-dns │ │ │ │ │ ├── service.yaml │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ ├── kube-api-server │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ ├── kube-controller-manager │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ ├── kube-dns │ │ │ │ │ ├── service.yaml │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ ├── kube-etcd │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ ├── kube-proxy │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ ├── kube-scheduler │ │ │ │ │ ├── endpoints.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ ├── kube-state-metrics │ │ │ │ │ └── serviceMonitor.yaml │ │ │ │ ├── kubelet │ │ │ │ │ └── servicemonitor.yaml │ │ │ │ └── node-exporter │ │ │ │ │ └── servicemonitor.yaml │ │ │ ├── grafana │ │ │ │ ├── configmap-dashboards.yaml │ │ │ │ ├── configmaps-datasources.yaml │ │ │ │ ├── dashboards-1.14 │ │ │ │ │ ├── alertmanager-overview.yaml │ │ │ │ │ ├── apiserver.yaml │ │ │ │ │ ├── cluster-total.yaml │ │ │ │ │ ├── controller-manager.yaml │ │ │ │ │ ├── etcd.yaml │ │ │ │ │ ├── k8s-coredns.yaml │ │ │ │ │ ├── k8s-resources-cluster.yaml │ │ │ │ │ ├── k8s-resources-namespace.yaml │ │ │ │ │ ├── k8s-resources-node.yaml │ │ │ │ │ ├── k8s-resources-pod.yaml │ │ │ │ │ ├── k8s-resources-workload.yaml │ │ │ │ │ ├── k8s-resources-workloads-namespace.yaml │ │ │ │ │ ├── kubelet.yaml │ │ │ │ │ ├── namespace-by-pod.yaml │ │ │ │ │ ├── namespace-by-workload.yaml │ │ │ │ │ ├── node-cluster-rsrc-use.yaml │ │ │ │ │ ├── node-rsrc-use.yaml │ │ │ │ │ ├── nodes.yaml │ │ │ │ │ ├── persistentvolumesusage.yaml │ │ │ │ │ ├── pod-total.yaml │ │ │ │ │ ├── prometheus-remote-write.yaml │ │ │ │ │ ├── prometheus.yaml │ │ │ │ │ ├── proxy.yaml │ │ │ │ │ ├── scheduler.yaml │ │ │ │ │ ├── statefulset.yaml │ │ │ │ │ └── workload-total.yaml │ │ │ │ └── servicemonitor.yaml │ │ │ ├── prometheus-operator │ │ │ │ ├── admission-webhooks │ │ │ │ │ ├── job-patch │ │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ │ │ ├── job-createSecret.yaml │ │ │ │ │ │ ├── job-patchWebhook.yaml │ │ │ │ │ │ ├── psp.yaml │ │ │ │ │ │ ├── role.yaml │ │ │ │ │ │ ├── rolebinding.yaml │ │ │ │ │ │ └── serviceaccount.yaml │ │ │ │ │ ├── mutatingWebhookConfiguration.yaml │ │ │ │ │ └── validatingWebhookConfiguration.yaml │ │ │ │ ├── certmanager.yaml │ │ │ │ ├── clusterrole.yaml │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ ├── psp.yaml │ │ │ │ ├── service.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ └── servicemonitor.yaml │ │ │ └── prometheus │ │ │ │ ├── _rules.tpl │ │ │ │ ├── additionalAlertRelabelConfigs.yaml │ │ │ │ ├── additionalAlertmanagerConfigs.yaml │ │ │ │ ├── additionalPrometheusRules.yaml │ │ │ │ ├── additionalScrapeConfigs.yaml │ │ │ │ ├── clusterrole.yaml │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ ├── csi-secret.yaml │ │ │ │ ├── extrasecret.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── ingressThanosSidecar.yaml │ │ │ │ ├── ingressperreplica.yaml │ │ │ │ ├── podDisruptionBudget.yaml │ │ │ │ ├── podmonitors.yaml │ │ │ │ ├── prometheus.yaml │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ ├── psp.yaml │ │ │ │ ├── rules-1.14 │ │ │ │ ├── alertmanager.rules.yaml │ │ │ │ ├── etcd.yaml │ │ │ │ ├── general.rules.yaml │ │ │ │ ├── k8s.rules.yaml │ │ │ │ ├── kube-apiserver-availability.rules.yaml │ │ │ │ ├── kube-apiserver-burnrate.rules.yaml │ │ │ │ ├── kube-apiserver-histogram.rules.yaml │ │ │ │ ├── kube-apiserver-slos.yaml │ │ │ │ ├── kube-apiserver.rules.yaml │ │ │ │ ├── kube-prometheus-general.rules.yaml │ │ │ │ ├── kube-prometheus-node-recording.rules.yaml │ │ │ │ ├── kube-scheduler.rules.yaml │ │ │ │ ├── kube-state-metrics.yaml │ │ │ │ ├── kubelet.rules.yaml │ │ │ │ ├── kubernetes-apps.yaml │ │ │ │ ├── kubernetes-resources.yaml │ │ │ │ ├── kubernetes-storage.yaml │ │ │ │ ├── kubernetes-system-apiserver.yaml │ │ │ │ ├── kubernetes-system-controller-manager.yaml │ │ │ │ ├── kubernetes-system-kubelet.yaml │ │ │ │ ├── kubernetes-system-scheduler.yaml │ │ │ │ ├── kubernetes-system.yaml │ │ │ │ ├── node-exporter.rules.yaml │ │ │ │ ├── node-exporter.yaml │ │ │ │ ├── node-network.yaml │ │ │ │ ├── node.rules.yaml │ │ │ │ ├── prometheus-operator.yaml │ │ │ │ └── prometheus.yaml │ │ │ │ ├── service.yaml │ │ │ │ ├── serviceThanosSidecar.yaml │ │ │ │ ├── serviceThanosSidecarExternal.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ ├── servicemonitor.yaml │ │ │ │ ├── servicemonitorThanosSidecar.yaml │ │ │ │ ├── servicemonitors.yaml │ │ │ │ └── serviceperreplica.yaml │ │ └── values.yaml │ ├── imgs │ │ ├── Prometheus-targets.png │ │ ├── alert-manager.png │ │ ├── grafana.png │ │ ├── install-log.png │ │ ├── kube-metrics-metrics.png │ │ ├── kube-metrics.png │ │ ├── kube-prometheus-stack-mind.png │ │ ├── node-exporter.png │ │ ├── prometheus-arch.png │ │ ├── prometheus-operator-arch.png │ │ ├── stack-components.png │ │ └── uninstall-log.png │ ├── install.sh │ ├── log │ │ └── startup.log.sh │ ├── uninstall.sh │ └── values.yaml └── metrics-server │ ├── fast-metrics.sh │ ├── log │ └── startup.log.sh │ ├── metrics-server.sh │ ├── metrics-server.yaml │ ├── metrics │ └── nodes.json │ └── uninstall.sh ├── kubectl ├── fast-kubectl.sh └── top │ └── kubectl-top-help.sh ├── kubernetes-dashboard ├── README.md ├── dashboard-v2.2.0.yaml ├── imgs │ ├── dashboard-home.png │ ├── dashboard-pods.png │ ├── login-token.png │ ├── start-log.png │ └── uninstall-log.png ├── kubernetes-dashboard-account.yaml ├── start.sh └── unstall.sh ├── nginx-ingress ├── ingress-nginx-controller-deploy.yml └── tomcat-deployment.yaml └── rancher-ui ├── imgs ├── add-agent.png ├── add-cluster.png ├── agent.png ├── cattle-resources.png ├── chs.png ├── import.png ├── install.png ├── local-k8s-home.png ├── local-k8s.png ├── password.png ├── server-url.png └── uninsall-log.png ├── install.sh ├── readme.md ├── tools.sh └── uninstall.sh /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | [Common] 3 | tmp 4 | 5 | [Mac] 6 | .DS_Store 7 | 8 | [IDE] 9 | .idea 10 | 11 | [Maven] 12 | target 13 | 14 | [Android] 15 | .gradle 16 | *.iml 17 | build 18 | output 19 | local.properties 20 | 21 | [Node] 22 | node_modules 23 | 24 | [bower] 25 | bower_components 26 | 27 | [Python] 28 | venv 29 | **chache** 30 | 31 | [docker] 32 | volumes 33 | 34 | [log] 35 | *.log 36 | 37 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k-kubernetes-public 2 | 3 | 面向 docker/kubernates 初学者的快速本地部署和测试 docker/k8s 各种功能特性。 [Kubernates 实战专栏](https://juejin.cn/column/7021047815069499423) 4 | 5 | ### 运行环境 6 | 7 | - MacOs Big Sur 11.4+ 8 | - Docker v20.10.7+ 9 | - Kubernetes v1.21.2+ 10 | 11 | ### 开箱即用 12 | 13 | 集成 k8s 部署和测试 常用的基础套件,无需各博客搜索、自建,减少弯路。 14 | 15 | 目前集成的有 16 | 17 | - kubernates dashboard - 官方Dashboard 18 | - Rancher UI - 强大的 k8s/非k8s 集群部署工具 19 | - helm - 基于包的k8s应用部署工具 20 | - Nginx Ingress Controller - 支持 Ingress L7 负载均衡 21 | - metrics-server - k8s 指标聚合器 22 | - kube-prometheus-stack - 一站式的 k8s 集群监控系统 23 | 24 | ![](./imgs/all-in-one.png) 25 | 26 | ![](./imgs/dashboard.png) 27 | ![](./imgs/rancher-home.png) 28 | ![](./imgs/grafana.png) 29 | 30 | ### 无障碍运行 31 | 32 | 所有配置和脚本均精心设计,利于测试和体验 k8s 的指定特性,且均反复验证,可在 Mac OS 下无障碍运行。 33 | 34 | ![](./imgs/hpa-yaml.png) 35 | 36 | ### 脑图 37 | 38 | 从入门到进阶的脑图+实践,建立 k8s 基础知识网络。 39 | 40 | - 解构图 41 | 42 | ![](./imgs/k8s-DCOM.png) 43 | 44 | - 脑图[(完整版)](http://kjeek.com/k8s/awesome-kubernates/) 45 | 46 | ![](./imgs/k8s-outline.png) 47 | 48 | ![](./imgs/k8s-service.png) 49 | 50 | ### 使用方法 51 | 52 | #### 学习 Init 容器 53 | 54 | ![](./imgs/init-ctn-mind.png) 55 | 56 | #### 测试example 57 | 58 | ![](./imgs/pod-init-yaml.png) 59 | ![](./imgs/pod-wait-service.png) 60 | ![](./imgs/setup-service.png) 61 | ![](./imgs/pod-ready-after-init.png) 62 | 63 | -------------------------------------------------------------------------------- /example/basic/config/configmap-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: special-config 5 | data: # 字面值创建 6 | special.how: very 7 | special.type: charm2 8 | 9 | # 作为容器的环境变量 10 | --- 11 | apiVersion: v1 12 | kind: Pod 13 | metadata: 14 | name: configmap-as-env 15 | spec: 16 | containers: 17 | - name: busybox-env 18 | image: busybox 19 | command: [ "sleep", "10000" ] 20 | # 引用 configmap 的全部值 21 | envFrom: 22 | - configMapRef: 23 | name: special-config # 和 ConfigMap 的 metadata.name 一直 24 | env: # 引用某个值,赋值给某个环境变量 25 | - name: SPECIAL_TYPE_KEY 26 | valueFrom: 27 | configMapKeyRef: 28 | name: special-config 29 | key: special.type 30 | 31 | # 使用存储在 ConfigMap 中的数据填充数据卷 32 | --- 33 | apiVersion: v1 34 | kind: Pod 35 | metadata: 36 | name: configmap-as-vol 37 | spec: 38 | # 挂载 configmap 39 | volumes: 40 | - name: config-volume 41 | configMap: 42 | name: special-config # 和 ConfigMap 的 metadata.name 一直 43 | containers: 44 | - name: busybox-vol 45 | image: busybox 46 | command: [ "sleep", "10000" ] 47 | volumeMounts: 48 | - name: config-volume 49 | mountPath: /etc/config 50 | 51 | -------------------------------------------------------------------------------- /example/basic/config/opaque-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mysecret 5 | type: Opaque 6 | data: 7 | # value 必须是base64 编码 8 | password: MWYyZDF1MmU2N2Rm # 明文 1f2d1u2e67df 9 | username: YWRtaW4= 10 | 11 | # Opaque 秘钥挂载到导入 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | labels: 17 | name: seret-test 18 | name: seret-test 19 | spec: 20 | volumes: 21 | # 导入secret 22 | - name: secrets 23 | secret: 24 | secretName: mysecret 25 | containers: 26 | - image: tomcat:9.0.20-jre8-alpine 27 | name: tomcat9 28 | # mounts中使用volume中的secret 29 | volumeMounts: 30 | - name: secrets # 导入的秘钥 31 | mountPath: "/etc/secrets" # 挂载的路径 32 | readOnly: true 33 | 34 | --- 35 | apiVersion: v1 36 | kind: Pod 37 | metadata: 38 | name: mysql 39 | spec: 40 | containers: 41 | - name: mysql 42 | image: mysql:8.0 43 | # 将secret的值赋给环境变量 44 | env: 45 | - name: db-user 46 | valueFrom: 47 | secretKeyRef: 48 | name: mysecret 49 | key: username 50 | - name: MYSQL_ROOT_PASSWORD 51 | valueFrom: 52 | secretKeyRef: 53 | name: mysecret 54 | key: password 55 | -------------------------------------------------------------------------------- /example/basic/config/secret-service-account.sh: -------------------------------------------------------------------------------- 1 | kubectl get pods -n kube-system 2 | 3 | kubectl exec -n kube-system kube-proxy-7f8nh -- \ 4 | ls /run/secrets/kubernetes.io/serviceaccount 5 | 6 | kubectl exec -n kube-system kube-proxy-7f8nh -- \ 7 | cat /run/secrets/kubernetes.io/serviceaccount/ca.crt 8 | 9 | kubectl exec -n kube-system kube-proxy-7f8nh -- \ 10 | cat /run/secrets/kubernetes.io/serviceaccount/token 11 | -------------------------------------------------------------------------------- /example/basic/init-containers/inited-services.yaml: -------------------------------------------------------------------------------- 1 | # 主容器依赖的服务环境,由init容器进行检测是否就绪 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: init-service 6 | spec: 7 | ports: 8 | - protocol: TCP 9 | port: 80 10 | targetPort: 9376 11 | --- 12 | apiVersion: v1 13 | kind: Service 14 | metadata: 15 | name: init-db 16 | spec: 17 | ports: 18 | - protocol: TCP 19 | port: 80 20 | targetPort: 9377 21 | -------------------------------------------------------------------------------- /example/basic/init-containers/pod-with-init.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: app-pod 5 | labels: 6 | app: app 7 | spec: 8 | # init 初始化准备容器,用于检测主容器是否具备启动的依赖条件 9 | initContainers: 10 | - name: init-service # 检查 init-service 服务是否就绪 11 | image: busybox:1.28 12 | command: 13 | - "sh" 14 | - "-c" 15 | - "until nslookup init-service.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for init-service; sleep 2; done" 16 | - name: init-db 17 | image: busybox:1.28 18 | command: # 检查 init-db 服务是否就绪 19 | - "sh" 20 | - "-c" 21 | - "until nslookup init-db.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for init-db; sleep 2; done" 22 | # 主容器,待上述所有init初始化检查重启检测成功后,才会启动主容器 23 | containers: 24 | - name: app-container 25 | image: busybox:1.28 26 | command: [ 'sh', '-c', 'echo The app is running! && sleep 3600' ] 27 | -------------------------------------------------------------------------------- /example/basic/net/nginx-ingress/tomcat-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: tomcat-deployment 5 | labels: 6 | app: tomcat-deployment 7 | spec: 8 | replicas: 3 # 副本数量 9 | strategy: # 更新策略 10 | type: RollingUpdate 11 | selector: # 通过标签选择,限制该 Deployment 管理的 Pod 12 | matchLabels: 13 | app: tomcat9 # 需与 spec.template.metadata.labels.app 属性对应 14 | # 部署/更新 pod 的模板 15 | template: 16 | metadata: 17 | name: tomcat-deployment-tmpl 18 | labels: 19 | app: tomcat9 # 标签,会绑定到该Deployment锁管理的所有Pod上 20 | spec: 21 | # 指定在某个Node节点运行 22 | nodeName: docker-desktop 23 | containers: 24 | - name: tomcat-cotainer 25 | image: tomcat:9.0.20-jre8-alpine 26 | imagePullPolicy: IfNotPresent # 镜像拉取策略 27 | ports: 28 | - containerPort: 8080 # 容器内的应用端控,无法被外界访问 29 | 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: tomcat-svc 35 | spec: 36 | selector: 37 | app: tomcat9 # 与控制器 spec.selector.matchLabels.app 属性对应 38 | type: NodePort # 让外部可以访问服务 39 | ports: 40 | - targetPort: 8080 # 容器内应用端口 spec.template.spec.containers[0].ports[0].containerPort 41 | port: 8888 # 暴露给集群内其他应用访问的端口 42 | nodePort: 30001 # 绑定主机(节点Node)端口,供集群外部访问 43 | 44 | # 定义 Ingress规则 45 | --- 46 | apiVersion: extensions/v1beta1 47 | kind: Ingress 48 | metadata: 49 | name: tomcat-ingress 50 | annotations: 51 | # nginx-controller的命令行参数指定加载对应class的ingress规则 52 | kubernetes.io/ingress.class: nginx 53 | spec: 54 | rules: 55 | - host: localhost 56 | http: 57 | paths: 58 | - path: / 59 | backend: 60 | serviceName: tomcat-svc # 转发目标的服务名称 61 | servicePort: 8888 # 服务的端口 62 | -------------------------------------------------------------------------------- /example/basic/net/service/nginx-deploy.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: nginx-deploy 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: nginx 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx 19 | ports: 20 | - name: http 21 | containerPort: 80 22 | - name: busybox # 包含ping、telnet等工具 23 | image: busybox 24 | command: 25 | - sleep 26 | - "13600" 27 | -------------------------------------------------------------------------------- /example/basic/net/service/service-clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: clusterip-svc 5 | spec: 6 | type: ClusterIP 7 | ports: 8 | - name: http 9 | port: 80 10 | targetPort: 80 # 容器的端口 11 | selector: # 必须指定目标标签的pod 12 | app: nginx 13 | -------------------------------------------------------------------------------- /example/basic/net/service/service-externalname.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: externalname-svc 5 | spec: 6 | type: ExternalName 7 | # externalName: baidu.com # cname到百度地址 8 | externalName: dashboard-metrics-scraper.kubernetes-dashboard.svc.cluster.local # cname到其他namespace下的服务 9 | -------------------------------------------------------------------------------- /example/basic/net/service/service-headless.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: headless-svc 5 | spec: 6 | clusterIP: "None" # 无头服务 7 | ports: 8 | - port: 80 9 | targetPort: 80 10 | selector: # 必须指定目标标签的pod 11 | app: nginx 12 | -------------------------------------------------------------------------------- /example/basic/net/service/service-nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nodeport-svc 5 | spec: 6 | type: NodePort 7 | ports: 8 | - name: http 9 | port: 80 10 | targetPort: 80 11 | nodePort: 30001 # 主机 Port 12 | selector: # 必须指定目标标签的pod 13 | app: nginx 14 | -------------------------------------------------------------------------------- /example/basic/pod-controller/demo-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hello-cjob 5 | spec: 6 | schedule: "*/1 * * * *" # 每1min执行 7 | jobTemplate: # job 模板 8 | spec: 9 | template: # pod 模板 10 | spec: 11 | restartPolicy: OnFailure 12 | containers: 13 | - name: hello 14 | image: busybox 15 | args: 16 | - /bin/sh 17 | - -c 18 | - date; echo Hello from the Kubernetes cluster 19 | -------------------------------------------------------------------------------- /example/basic/pod-controller/fluentd-daemonset.yaml: -------------------------------------------------------------------------------- 1 | # 每个节点运行一个 日志收集服务 fluentd 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: fluentd-logging 6 | labels: 7 | k8s-app: fluentd-logging 8 | spec: 9 | selector: 10 | matchLabels: 11 | name: fluentd-logging 12 | template: 13 | metadata: 14 | labels: 15 | name: fluentd-logging 16 | spec: 17 | volumes: 18 | - name: varlog 19 | hostPath: 20 | path: /var/log 21 | - name: docker-containers 22 | hostPath: 23 | path: /var/lib/docker/containers 24 | tolerations: 25 | - key: node-role.kubernetes.io/master 26 | effect: NoSchedule 27 | containers: 28 | - name: fluentd-logging 29 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 30 | resources: # 资源 31 | limits: 32 | memory: 200Mi 33 | requests: 34 | cpu: 100m 35 | memory: 200Mi 36 | volumeMounts: 37 | - name: varlog 38 | mountPath: /var/log 39 | - name: docker-containers 40 | mountPath: /var/lib/docker/containers 41 | readOnly: true 42 | terminationGracePeriodSeconds: 30 43 | -------------------------------------------------------------------------------- /example/basic/pod-controller/hpa/hpa-test.sh: -------------------------------------------------------------------------------- 1 | echo '----- 创建 HPA 对象 ...' 2 | kubectl autoscale deployment nginx-hpa --cpu-percent=10 --min=1 --max=10 3 | echo 4 | 5 | echo '----- 增加负载 ...' 6 | curTime=`date '+%s'` 7 | 8 | while true; do 9 | wget -q -O- http://localhost:30001; 10 | done 11 | 12 | 13 | Kubernetes HPA 使用详解 14 | -------------------------------------------------------------------------------- /example/basic/pod-controller/hpa/nginx-hpa.yaml: -------------------------------------------------------------------------------- 1 | # 无状态服务部署 Deployment 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: nginx-hpa 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: nginx-hpa 11 | template: 12 | metadata: 13 | name: nginx-hpa 14 | labels: # 必须指定标签 15 | app: nginx-hpa 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | ports: 21 | - containerPort: 80 22 | # hpa 生效必须的资源请求配置 23 | resources: 24 | requests: 25 | memory: 50Mi 26 | cpu: 10m # 设置小些,便于增加负载测试 27 | 28 | # 暴露 nginx ,便于测试 29 | --- 30 | apiVersion: v1 31 | kind: Service 32 | metadata: 33 | name: nodeport-svc 34 | spec: 35 | type: NodePort 36 | ports: 37 | - name: http 38 | port: 80 39 | targetPort: 80 40 | nodePort: 30001 # 主机 Port 41 | selector: # 必须指定目标标签的pod 42 | app: nginx-hpa 43 | -------------------------------------------------------------------------------- /example/basic/pod-controller/hpa/tools.sh: -------------------------------------------------------------------------------- 1 | # 查看 hpa 对象及其事件 2 | kubectl describe hpa nginx-hpa 3 | 4 | # 查看 pods数量 变化 5 | k8s get pods 6 | 7 | # 删除 hpa 8 | kubectl delete hpa nginx-hpa 9 | -------------------------------------------------------------------------------- /example/basic/pod-controller/nginx-daemonset.yaml: -------------------------------------------------------------------------------- 1 | # 每个节点运行一个 Nginx 守护服务 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: nginx-daemonset 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: nginx 10 | template: 11 | metadata: 12 | name: nginx-daemonset-tmpl 13 | labels: 14 | app: nginx 15 | spec: 16 | containers: 17 | - name: nginx-daemonset 18 | image: nginx:1.17.10-alpine 19 | -------------------------------------------------------------------------------- /example/basic/pod-controller/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | # 无状态服务部署 Deployment 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: nginx-deploy 6 | spec: 7 | replicas: 5 8 | selector: # 基于 RS,必须配置标签选择器 9 | matchLabels: # 比RC新增的标签选择器 10 | app: nginx # 和 spec.template.metadata.labels.app 一致 11 | template: 12 | metadata: 13 | name: nginx 14 | labels: # 必须指定标签 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | ports: 21 | - containerPort: 90 22 | -------------------------------------------------------------------------------- /example/basic/pod-controller/nginx-replica-set.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: nginx-rs 5 | spec: 6 | replicas: 4 7 | selector: # 标签选择器 8 | matchLabels: # 比RC新增的标签选择器 9 | app: nginx # 和 spec.template.metadata.labels.app 一致 10 | template: 11 | metadata: 12 | name: nginx 13 | labels: 14 | app: nginx 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx 19 | ports: 20 | - containerPort: 80 21 | -------------------------------------------------------------------------------- /example/basic/pod-controller/nginx-replication-controller.yaml: -------------------------------------------------------------------------------- 1 | # ReplicationController 副本数控制器 2 | apiVersion: v1 3 | kind: ReplicationController 4 | metadata: 5 | name: nginx-rc 6 | spec: 7 | replicas: 4 8 | selector: # 标签选择器 9 | app: nginx # 和 spec.template.metadata.labels.app 一致 10 | template: 11 | metadata: 12 | name: nginx 13 | labels: 14 | app: nginx 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx 19 | ports: 20 | - containerPort: 80 21 | -------------------------------------------------------------------------------- /example/basic/pod-controller/nginx-stateful.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: web 6 | spec: 7 | serviceName: nginx # 必须的headless service 8 | podManagementPolicy: Parallel 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | # PV/PVC 模板 14 | volumeClaimTemplates: 15 | - metadata: 16 | name: www 17 | spec: 18 | accessModes: [ "ReadWriteOnce" ] 19 | resources: 20 | requests: 21 | storage: 1Gi 22 | # Pod 模板 23 | template: 24 | metadata: 25 | labels: 26 | app: nginx 27 | spec: 28 | containers: 29 | - name: nginx 30 | image: nginx 31 | volumeMounts: # 和 spec.volumeClaimTemplates[0].metadata.name 一致 32 | - name: www 33 | mountPath: /usr/share/nginx/html 34 | ports: 35 | - containerPort: 80 36 | name: web 37 | -------------------------------------------------------------------------------- /example/basic/pod-controller/perl-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: pi 5 | spec: 6 | completions: 6 # 至少完成10次 7 | parallelism: 2 # 每次最多2个并行 8 | backoffLimit: 4 # 容错次数 9 | template: 10 | spec: 11 | restartPolicy: Never 12 | containers: 13 | - name: pi 14 | image: perl:slim 15 | command: [ "perl", "-Mbignum=bpi", "-wle", "print bpi(100)" ] 16 | 17 | -------------------------------------------------------------------------------- /example/basic/pod/busybox-pod.yaml: -------------------------------------------------------------------------------- 1 | # 最基本的Pod方式部署 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: busybox 6 | labels: 7 | app: busybox 8 | spec: 9 | restartPolicy: Always 10 | containers: 11 | - name: busybox 12 | image: busybox 13 | command: 14 | - sleep 15 | - "3600" 16 | -------------------------------------------------------------------------------- /example/basic/pod/lifecycle-action.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: lifecycle-demo 5 | spec: 6 | containers: 7 | - name: lifecycle-demo-container 8 | image: nginx 9 | # 生命周期回调,可在启动后初始化配置,也可以在停止前释放资源 10 | lifecycle: 11 | postStart: 12 | exec: 13 | command: 14 | - "/bin/sh" 15 | - "-c" 16 | - "echo Hello from the postStart handler > /usr/share/start.log" 17 | preStop: 18 | exec: 19 | command: 20 | - "/bin/sh" 21 | - "-c" 22 | - "echo Hello from the poststop handler > /usr/share/stop.log && sleep 20" 23 | -------------------------------------------------------------------------------- /example/basic/pod/tomcat-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod # 资源类型 3 | metadata: # 资源元数据 4 | name: tomcat9 # 必填 5 | labels: 6 | app: tomcat9 7 | spec: 8 | containers: 9 | - name: tomcat9 10 | image: tomcat:9.0.20-jre8-alpine 11 | imagePullPolicy: IfNotPresent # 镜像拉取策略 12 | # ----- 启动检测 13 | startupProbe: 14 | # http方式 15 | httpGet: 16 | port: 8080 17 | path: /index.html 18 | periodSeconds: 10 19 | failureThreshold: 30 20 | # ----- 活性检测 21 | livenessProbe: 22 | exec: 23 | command: [ "test","-e","/tmp/livenesspod" ] # 0 - 成功,1 -失败 24 | initialDelaySeconds: 1 25 | periodSeconds: 3 26 | # ----- 就绪检测 27 | readinessProbe: # 就绪检测 28 | tcpSocket: # 端口检测 29 | port: 8080 30 | initialDelaySeconds: 10 31 | periodSeconds: 3 32 | timeoutSeconds: 5 33 | # ----- 生命周期 34 | lifecycle: 35 | postStart: # 在容器启动后执行该指令 36 | exec: 37 | command: [ 'mkdir','-p','/lagou/k8s/index.html' ] 38 | -------------------------------------------------------------------------------- /example/basic/probe/liveness-probe-exec.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: liveness-exec-pod 5 | namespace: default 6 | spec: 7 | containers: 8 | - name: liveness-exec-container 9 | image: busybox 10 | imagePullPolicy: IfNotPresent 11 | command: 12 | - "/bin/sh" 13 | - "-c" 14 | - "touch /tmp/live ; sleep 60; rm -rf /tmp/live; sleep 3600 " 15 | # 存活性检测,判断容器是否存活 16 | livenessProbe: 17 | exec: 18 | command: ["test", "-e", "/tmp/live"] 19 | initialDelaySeconds: 1 20 | periodSeconds: 3 21 | -------------------------------------------------------------------------------- /example/basic/probe/liveness-probe-tcp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: liveness-probe-tcp 5 | spec: 6 | containers: 7 | - name: nginx 8 | image: nginx 9 | livenessProbe: 10 | initialDelaySeconds: 5 11 | timeoutSeconds: 1 12 | tcpSocket: 13 | port: 80 14 | -------------------------------------------------------------------------------- /example/basic/probe/readiness-probe-http.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: readiness-http-pod 6 | namespace: default 7 | spec: 8 | containers: 9 | - name: readiness-http-container 10 | image: nginx:1.17.10-alpine 11 | readinessProbe: 12 | httpGet: 13 | port: 80 14 | # path: /index.html # 正常 15 | path: /index1.html # 失败,不存在的页面 16 | initialDelaySeconds: 1 17 | periodSeconds: 3 18 | -------------------------------------------------------------------------------- /example/basic/resource/limit-range.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: limited-ns 5 | 6 | --- 7 | apiVersion: v1 8 | kind: LimitRange 9 | metadata: 10 | name: limitrange-test 11 | namespace: limited-ns 12 | spec: 13 | limits: 14 | - type: Pod # Pod级限制 15 | max: 16 | cpu: 1000m 17 | memory: 1024Mi 18 | min: 19 | cpu: 100m 20 | memory: 128Mi 21 | 22 | - type: Container # Container级限制 23 | max: 24 | cpu: 300m 25 | memory: 1024Mi 26 | min: 27 | cpu: 10m 28 | memory: 128Mi 29 | 30 | default: # 即limit 31 | cpu: 100m 32 | memory: 512Mi 33 | defaultRequest: 34 | cpu: 50m 35 | memory: 256Mi 36 | 37 | --- 38 | # 测试 limitrange 39 | apiVersion: v1 40 | kind: Pod 41 | metadata: 42 | name: busybox 43 | namespace: limited-ns 44 | labels: 45 | app: busybox 46 | spec: 47 | restartPolicy: Always 48 | containers: 49 | - name: busybox 50 | image: busybox 51 | command: 52 | - sleep 53 | - "3600" 54 | # 如果不限制,则会报错 pods "busybox" is forbidden: minimum cpu usage per Pod is 100m, but request is 50m 55 | # resources: 56 | # requests: 57 | # cpu: 100m 58 | -------------------------------------------------------------------------------- /example/basic/resource/resource-quota.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: limited-ns 5 | 6 | --- 7 | apiVersion: v1 8 | kind: ResourceQuota 9 | metadata: 10 | name: compute-quota 11 | namespace: limited-ns 12 | spec: 13 | hard: 14 | requests.cpu: 100m 15 | requests.memory: 100Mi 16 | limits.cpu: 200m 17 | limits.memory: 200Mi 18 | 19 | --- 20 | # 测试 21 | apiVersion: v1 22 | kind: Pod 23 | metadata: 24 | name: busybox 25 | namespace: limited-ns 26 | labels: 27 | app: busybox 28 | spec: 29 | restartPolicy: Always 30 | containers: 31 | - name: busybox 32 | image: busybox 33 | command: 34 | - sleep 35 | - "3600" 36 | resources: 37 | requests: 38 | cpu: 100m 39 | memory: 100Mi 40 | limits: 41 | cpu: 200m 42 | memory: 200Mi 43 | 44 | 45 | -------------------------------------------------------------------------------- /example/basic/resource/resource-yaml.yaml: -------------------------------------------------------------------------------- 1 | # 以Yaml方式配置对象的规约(Spec),描述该对象的基本信息以及期望状态,供命令行客户端 kubectl 使用 2 | apiVersion: apps/v1 # api版本,不同资源可能属于不同的CRD API,因此需要根据资源类型,选择兼容的版本 3 | kind: Deployment # 资源种类,可以是k8s内置的资源,也可以通过CRD扩展自己的资源种类 4 | 5 | metadata: # 元数据,该资源的元数据,用于标记资源,以便k8s进行更高维度的管理 6 | name: nginx-deployment # 资源名,建议在同一namespace下是唯一的 7 | namespace: app-test # 命名空间,非集群级资源,需要绑定命名空间,进行隔离 8 | labels: # 标签(特征点) 9 | apps: nginx 10 | 11 | spec: # 规约,核心,定义资源本身的属性和期望状态,以便k8s创建和部署该资源,以达到期望的状态 12 | selector: # 标签选择器,表示当前资源(如Deployment)仅对指定标签的受控资源(如Pod)生效、管理 13 | matchLabels: 14 | app: nginx 15 | replicas: 2 # (Pod)副本数 16 | template: # (Pod)模板(核心),用于创建、部署Pod(最小部署单元)的模板 17 | metadata: 18 | labels: 19 | app: nginx 20 | spec: 21 | containers: 22 | - name: nginx 23 | image: nginx:1.14.2 24 | ports: 25 | - containerPort: 80 26 | -------------------------------------------------------------------------------- /example/basic/schedule/affinity/node-affinity-preferred.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: node-affinity-pod 5 | spec: 6 | containers: 7 | - name: node-affinity-ctn 8 | image: busybox 9 | command: [ "sleep", "10000" ] 10 | affinity: 11 | nodeAffinity: # 节点亲和性 12 | preferredDuringSchedulingIgnoredDuringExecution: # 软策略 13 | - weight: 1 14 | preference: 15 | matchExpressions: 16 | - key: kubernetes.io/hostname 17 | operator: In 18 | values: 19 | - docker-desktop # ✅本地可部署 20 | # - docker-desktop2 # ⚠️匹配失败,但仍可以部署 21 | -------------------------------------------------------------------------------- /example/basic/schedule/affinity/node-affinity-required.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: node-affinity-pod 5 | spec: 6 | containers: 7 | - name: node-affinity-ctn 8 | image: busybox 9 | command: [ "sleep", "10000" ] 10 | affinity: 11 | nodeAffinity: # 节点亲和性 12 | requiredDuringSchedulingIgnoredDuringExecution: # 硬策略 13 | nodeSelectorTerms: # 强制匹配节点的标签 14 | - matchExpressions: 15 | - key: kubernetes.io/hostname 16 | operator: In 17 | values: 18 | - docker-desktop # ✅本地可部署 19 | - docker-desktop2 # ⚠️本地不可部署 20 | -------------------------------------------------------------------------------- /example/basic/schedule/direct/direct-nodename.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: busybox 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: busybox 10 | template: 11 | metadata: 12 | labels: 13 | app: busybox 14 | spec: 15 | # 直接指定 节点名 16 | # nodeName: docker-desktop # ✅ 17 | nodeName: k8s-node1 # ❌ 18 | containers: 19 | - name: busybox 20 | image: busybox 21 | command: [ "sleep", "10000" ] 22 | -------------------------------------------------------------------------------- /example/basic/schedule/direct/direct-selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: direct-selector-deploy 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: direct-selector-pod 10 | template: 11 | metadata: 12 | name: direct-selector 13 | labels: 14 | app: direct-selector-pod 15 | spec: 16 | # 直接指定 标签选择器,限制指定cpu架构的主机才能部署 17 | nodeSelector: 18 | # kubernetes.io/arch: x86 # ❌ mac amd64 上无法安装 19 | kubernetes.io/arch: amd64 # ✅ mac amd64 上可以安装 20 | containers: 21 | - name: direct-selector 22 | image: busybox 23 | command: [ "sleep", "10000" ] 24 | -------------------------------------------------------------------------------- /example/basic/schedule/taint-tolerance/pod-tolerance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-tole 5 | labels: 6 | app: test 7 | spec: 8 | containers: 9 | - name: tole-ctn 10 | image: busybox 11 | command: [ "sleep", "10000" ] 12 | # 容忍app=test污点 13 | tolerations: 14 | - key: "app" 15 | operator: "Equal" 16 | value: "test" 17 | -------------------------------------------------------------------------------- /example/basic/schedule/taint-tolerance/settup-taint.sh: -------------------------------------------------------------------------------- 1 | # 添加污点:不能部署测试类服务 2 | kubectl taint nodes docker-desktop app=test:NoSchedule 3 | 4 | # 移除污点 5 | # kubectl taint nodes docker-desktop app=test:NoSchedule- 6 | 7 | 8 | -------------------------------------------------------------------------------- /example/basic/security/rbac/service-account.yaml: -------------------------------------------------------------------------------- 1 | # 创建集群私密功能的集群角色:秘钥只读 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | # 集群级别 无需 namespace 7 | name: secret-reader 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["secrets"] 11 | verbs: ["get", "watch", "list"] 12 | 13 | # 创建特定权限的角色:pod只读 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: Role 17 | metadata: 18 | name: pod-reader 19 | namespace: default # namespace级别 20 | rules: 21 | - apiGroups: [""] # "" 指定核心 API 组 22 | resources: ["pods"] # 指定此权限的作用的资源类型 23 | verbs: ["get", "watch", "list"] # 指定权限 24 | 25 | # 创建账户 26 | --- 27 | apiVersion: v1 28 | kind: ServiceAccount 29 | metadata: 30 | name: kk 31 | namespace: default 32 | 33 | # 将角色绑定到用户,使得用户具备相应角色的权限 34 | --- 35 | apiVersion: rbac.authorization.k8s.io/v1 36 | kind: RoleBinding 37 | metadata: 38 | name: read-pods 39 | namespace: default 40 | subjects: # 用户主体 41 | - apiGroup: rbac.authorization.k8s.io 42 | kind: User 43 | name: kk # 用户名 44 | roleRef: # 绑定的角色 45 | kind: Role # 确定 Role 或 ClusterRole 的类型 46 | name: pod-reader # 指定要绑定的 Role 或 ClusterRole 的名称 47 | apiGroup: rbac.authorization.k8s.io 48 | -------------------------------------------------------------------------------- /example/basic/security/user/common/ca/ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFkTCCA3mgAwIBAgIUeVhQUChFDBDqo90XzeysyESqotowDQYJKoZIhvcNAQEL 3 | BQAwWDELMAkGA1UEBhMCY24xEDAOBgNVBAgMB2JlaWppbmcxEDAOBgNVBAcMB2Jl 4 | aWppbmcxCzAJBgNVBAoMAmtrMQswCQYDVQQLDAJrazELMAkGA1UEAwwCa2swHhcN 5 | MjExMDA1MDA1NTQ0WhcNMjQwNzAxMDA1NTQ0WjBYMQswCQYDVQQGEwJjbjEQMA4G 6 | A1UECAwHYmVpamluZzEQMA4GA1UEBwwHYmVpamluZzELMAkGA1UECgwCa2sxCzAJ 7 | BgNVBAsMAmtrMQswCQYDVQQDDAJrazCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC 8 | AgoCggIBAOaXCGdaO4RAzfl8L1p+79Vk0hyQu36Aglxs5/qQDTkHG2jjxlCJZgBs 9 | puhig+ip1StSZ5cl/WVUErnUoKOpzeV+CWn5EHK2+vgfkA41hbpL3LBFOqj6jRMH 10 | O7cUOkzZYfmcpSc0HXYomL1pY8F7KKlX0DuRbGiBD5g/f48UM46PJGs1AuXrreCs 11 | UvAZgZYsVQFfwVnRlAl27cDDirdkg7sQ21pxTQONBKx+7NAxOvOwIfODSMjdnzeS 12 | 3pwR4zgG9IJD5xIdfh8K61jIZqc/KIXoKG9qLs9+vR08uOnmQb7KowtMyv1Kg9A0 13 | zfmmpbqc60oUoH6Tpi9/YDPypyekBe9OiqoEPXTjcL73vZG6genqFRL7O1WgF2Li 14 | x1PWZFwrbayjvjN/eFTJyPw5wPHFBhG4a/Ky7R8q4hoY3mINkyRsH1UaCOlF05F5 15 | Hfdiw2ypyQX6sPTfIou1qwDZMAMzhMXIStRLsFvPvIUidcGmQ6kr1ylcZ4MrpScM 16 | 1AULRSMEU/PNsTrVjiSv/oooU3ekTSctGcYe55DkVLm09o0a42nQVX/mkhucNM1L 17 | cl/FhJnWRl3U4ZwIsfbCUjYVw1s8HlGTHLPosTpsRc5jjluZ8eCd8FXOBMf5uZd0 18 | SL/JwEjn1esZrdXlb2I0yAlcPalaf8UX0FoJftnvAiil9TNbaaMnAgMBAAGjUzBR 19 | MB0GA1UdDgQWBBQ3ngXBo9uC+HNutPta6VREf+9vwDAfBgNVHSMEGDAWgBQ3ngXB 20 | o9uC+HNutPta6VREf+9vwDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA 21 | A4ICAQCYVxhAWpbH4AJgd9OZB1fq/PBJEp+6zspZpagIc05gisL0uMgvDJdqelUY 22 | xZrUjUoczekTOHtKX6FsIxdkTXgySxPMYbZDc6QEKF29Wz7AQ804oRh/2GnxCa0I 23 | fyuOq6cW87l+JhmcZV+8S6W2xBaxqdXaR570DFFOj/VCvYqiEwr9IxbXyFUWo3tI 24 | rUCuh+d8YD4n19d+a/7jJAc9PPZEfJQOAp8+qWe/TtcFPCZFa37gtYLWe8OGQZz5 25 | XfHFlrroQtEEYNHKH0Y92GwAKGiuCAYHnOdZod+re1UvRGz9Y4E/NB1q2+qUAtIj 26 | gKQrrEy+RcebSDfHLsfAxmvv/3PTffd/L+6XZT+kizDpJyv0JJC6xEUr6oA12WK/ 27 | IhhNfGvCoKYxUCrS6qpKU8QLG1C3nATLjobXLB/9Hk/x0jWTpRL/rJ8/N3pM4Jt/ 28 | +/XgF9M4yHydTyyuFQvh0S7MwMoLhtcg6GmZUUsStn66yFijITBI4+P+BQ8RTkSM 29 | /KxMh3Ea4TRgPXw6WMXDQhvtrEzkDjtjcqRBnB8OBysXxY7RiyqrEO7oM1aj/Pf1 30 | fsYR83wmhA8mPWjAf48oyigHcgYcjeD94+6f6Nok2j3RMliK+bHsVpD9NAKg8rSN 31 | ezJFxqUFzwqGqJ19uMVO9RG+/3CTcpmsNBLDxGYGeONY4njLLA== 32 | -----END CERTIFICATE----- 33 | -------------------------------------------------------------------------------- /example/basic/security/user/common/ca/gene-ca.sh: -------------------------------------------------------------------------------- 1 | ##### 2 | # ca根证书 3 | ##### 4 | 5 | # 生成 CA 密钥 6 | openssl genrsa -out ca-key.pem 4096 7 | 8 | # 生成 CA 根证书 9 | openssl req -new -x509 -days 1000 -key ca-key.pem -out ca.pem 10 | -------------------------------------------------------------------------------- /example/basic/security/user/common/csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kk", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "k8s", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /example/basic/security/user/common/gene-user.sh: -------------------------------------------------------------------------------- 1 | ##### 2 | # 生成任意普通用户 3 | ##### 4 | cluster="k8s" 5 | namespace="default" 6 | 7 | userDir="user" 8 | username="kk" 9 | 10 | echo 'clean old configs...' 11 | rm -r ./$userDir/${username} 12 | mkdir -p ./$userDir/${username} 13 | userDir="./$userDir/${username}" 14 | 15 | # 创建请求证书,会自动生csr(证书请求)和key.pem(证书私钥)文件 16 | echo 'gencert ....' 17 | cfssl gencert -ca=./ca/ca.pem -ca-key=./ca/ca-key.pem -profile=$cluster csr.json | cfssljson -bare ./$userDir/${username} 18 | 19 | # 导出 kubeconfig,包含基本的集群参数 clusters 20 | echo 'set-cluster ....' 21 | KUBE_APISERVER="https://kubernetes.docker.internal:6443" 22 | kubectl config set-cluster ${cluster} \ 23 | --kubeconfig=./$userDir/${username}.kubeconfig \ 24 | --embed-certs=true \ 25 | --certificate-authority=./$userDir/${username}.csr \ 26 | --server=${KUBE_APISERVER} 27 | 28 | # 设置客户端认证(users用户参数) 29 | echo 'set-credentials ....' 30 | kubectl config set-credentials ${username} \ 31 | --kubeconfig=./$userDir/${username}.kubeconfig \ 32 | --embed-certs=true \ 33 | --client-certificate=./$userDir/${username}.pem \ 34 | --client-key=./$userDir/${username}-key.pem 35 | 36 | # 设置上下文参数 37 | echo 'set-context ....' 38 | kubectl config set-context ${username} \ 39 | --kubeconfig=./$userDir/${username}.kubeconfig \ 40 | --cluster=${cluster} \ 41 | --namespace=${namespace} \ 42 | --user=${username} 43 | 44 | -------------------------------------------------------------------------------- /example/basic/security/user/common/user/kk/kk-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpQIBAAKCAQEAz8cc6a3WJQxUJrKwHR28XN/2XFuY434/2SVYO7VcsfynBvhS 3 | r3oevh0TCct343v8yyL4qowkz7hNEqD+ANk9V/Kg68XcpzSSY+JcPb/th8tACDJh 4 | dChEdWmaqY4UYL0F4hKZD9i1mrEI6zedLicx+Jw9rZ54NTzwbeajYTW//1k76o+E 5 | Kc3SH5PZzisNWb87IU8u9wl7UT3dbm1+5fz8kugX7b2CpoAMwHS6xfBYCGxxxB0y 6 | Qvl/H1dqvO59bm80fbm+vDdVmYSOg8v5ZJ5tuF5/PYQB2Sufoh6B7k4Xk0boxWEZ 7 | DMYNObOHEdpEQqTz6vYwK3k8vGfF2beKckBwiwIDAQABAoIBACSnIlGSqeklMJ8W 8 | /HxWZ4kWtRX7zo+rEMZUkSkPSEfZovlWyvdLmqq85EPhEcgeciy/gZBMYp4GDAqS 9 | PdyWdfzSv1J4OAGDA6P9JXp7JgLDdws7H/fmtpTjyTXJ64xZK/WChF/IQdkFC+a6 10 | i869eVI69aJwBxKVKnCos/+NBM6ddJQrve0MLj/ZGfAcl+KruszfdhOBFHhw9+Hw 11 | I+acGs7BqNi7/TlByIzuqGkP/ebvOVzgmmEDQ6ALnsiIuvg8Z09EBh5b8ipvR49g 12 | vwH9lulJ8Uc8194qz5suLYtfklPhbl6VOBwJs4ws2v1/UO9vMTQsJTyscBfjkY/N 13 | BiyCDvECgYEA+c3EuJd1reu4Ng+s18Ejttdw7EoXSZ9fPX563qxiviC3yuPG4YYA 14 | gxhr9Hv5fTwXWfY+1s5iwvZ6afBt/MAK69depbIWAM6UeHXTWpSlbkM5Sm3T9Hbj 15 | JPHedXOX/E5pNhPHbA0d0cKnWmZkk/BIltZOxh9j3dwiXEHFlnksyjkCgYEA1O57 16 | smCKmYSKcciqzmbnb6M2Cb7g95Q1utJMFr35yJRpe+hlyk7pHyzNM4GlY80VHTR1 17 | s6lA9Z1ruKFJ7h1dwvSJb3qeCANZcdMQGyGf1Bp+UithgduohS9FA9TJxsgo3o8+ 18 | KfLAcbgYkjYucZ36TGYLoFMgco3ntrX7E6BSIOMCgYEAhHB9zcgvUghZYAAriTAS 19 | +Ut7ySr28ceXPQkjp48JwvehueXNeaAMAXUfNz3LZoRHdSDC7Cv73JXFDQm1pn1C 20 | zQsgWQTz+XWM7di6N5n7o1vssfNMnp2xi9LYOLkSaWhBC7Ss1DKYDq/+/kSvKmgI 21 | dEAm/4+A6Q1fAj60vTR9dLECgYEAjrUC3CsN2RPWganeqBSZmd2F8wQl3GhNllcp 22 | Pbi3NrIY/D1mMy+t2AH7yUBjqz6gnwEwvWd7LOIDdytJ1DeERowDrF4+mdo80SeG 23 | zNg9OTdEYCfI4vJnwFNM6uNrFDnEAl14aT7TsaOTfDKp7+XUAe4Gbr4nqf7nRDI1 24 | AxNY18cCgYEA90f6a0R5UpzpJ9/fPFYms1YMyv8SXakwbpylS5Mk2dXl3L2AVF5x 25 | YgOXRvL3gmtPz9sodY0u26vd9MMZko7a/1L+nrF7WMTIndlhcg84zRne+QJk5V3t 26 | lss45Oz1RTbMQSAqAsipIskMN1KlnG1pmxnMnj9eaFrsBmwDPD4r0vU= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /example/basic/security/user/common/user/kk/kk.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIICojCCAYoCAQAwXTELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaUppbmcxEDAO 3 | BgNVBAcTB0JlaUppbmcxDDAKBgNVBAoTA2s4czEPMA0GA1UECxMGU3lzdGVtMQsw 4 | CQYDVQQDEwJrazCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM/HHOmt 5 | 1iUMVCaysB0dvFzf9lxbmON+P9klWDu1XLH8pwb4Uq96Hr4dEwnLd+N7/Msi+KqM 6 | JM+4TRKg/gDZPVfyoOvF3Kc0kmPiXD2/7YfLQAgyYXQoRHVpmqmOFGC9BeISmQ/Y 7 | tZqxCOs3nS4nMficPa2eeDU88G3mo2E1v/9ZO+qPhCnN0h+T2c4rDVm/OyFPLvcJ 8 | e1E93W5tfuX8/JLoF+29gqaADMB0usXwWAhsccQdMkL5fx9XarzufW5vNH25vrw3 9 | VZmEjoPL+WSebbhefz2EAdkrn6Iege5OF5NG6MVhGQzGDTmzhxHaREKk8+r2MCt5 10 | PLxnxdm3inJAcIsCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQBjtENOvTmoZH0d 11 | 3BacONraijYoC/B66HhXgFAQTYnG3koVybjA99X8n5liABKXG/9s0hxSryNRuk3T 12 | RsiAsrKvB0sDZ76sTkJR5OuiT0ojjXRt5c6cpLGSz9t8xsW2SfNxvvAGiNbh2aNm 13 | FZGuEfgDUC4oz66Z70sUeyjAuLkLEkimSwq9oko7Cs2EVpQWMetddiCnpfgqTIXt 14 | ypecrE3D1b2t63w5pF23mY0L/MJJzZPTNtlBFhBlANWg4YDW2pV+wPFbJ7Hfa7/r 15 | 9Hd9HBGPFA7dm3zUr67y7CCDubqMtvIqOnwfVUjZZoecZsc7oAz/mpjcM5EPfaW+ 16 | VlhypdCC 17 | -----END CERTIFICATE REQUEST----- 18 | -------------------------------------------------------------------------------- /example/basic/security/user/common/user/kk/kk.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEwjCCAqqgAwIBAgIUWUiSZA92a3Wee+VQNnl9bU8E1L8wDQYJKoZIhvcNAQEN 3 | BQAwWDELMAkGA1UEBhMCY24xEDAOBgNVBAgMB2JlaWppbmcxEDAOBgNVBAcMB2Jl 4 | aWppbmcxCzAJBgNVBAoMAmtrMQswCQYDVQQLDAJrazELMAkGA1UEAwwCa2swHhcN 5 | MjExMDA1MDU0NjAwWhcNMjIxMDA1MDU0NjAwWjBdMQswCQYDVQQGEwJDTjEQMA4G 6 | A1UECBMHQmVpSmluZzEQMA4GA1UEBxMHQmVpSmluZzEMMAoGA1UEChMDazhzMQ8w 7 | DQYDVQQLEwZTeXN0ZW0xCzAJBgNVBAMTAmtrMIIBIjANBgkqhkiG9w0BAQEFAAOC 8 | AQ8AMIIBCgKCAQEAz8cc6a3WJQxUJrKwHR28XN/2XFuY434/2SVYO7VcsfynBvhS 9 | r3oevh0TCct343v8yyL4qowkz7hNEqD+ANk9V/Kg68XcpzSSY+JcPb/th8tACDJh 10 | dChEdWmaqY4UYL0F4hKZD9i1mrEI6zedLicx+Jw9rZ54NTzwbeajYTW//1k76o+E 11 | Kc3SH5PZzisNWb87IU8u9wl7UT3dbm1+5fz8kugX7b2CpoAMwHS6xfBYCGxxxB0y 12 | Qvl/H1dqvO59bm80fbm+vDdVmYSOg8v5ZJ5tuF5/PYQB2Sufoh6B7k4Xk0boxWEZ 13 | DMYNObOHEdpEQqTz6vYwK3k8vGfF2beKckBwiwIDAQABo38wfTAOBgNVHQ8BAf8E 14 | BAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQC 15 | MAAwHQYDVR0OBBYEFGSCVuIlogfWd/tJIbTf0uqA5mCVMB8GA1UdIwQYMBaAFDee 16 | BcGj24L4c260+1rpVER/72/AMA0GCSqGSIb3DQEBDQUAA4ICAQBQZae3jxYgE1Ui 17 | v8z4bIzGIy4ulm3JObb2e2rzLruHC8w9ZSY39pZGLIPQhZVWwkISZb3OIMHdiEXJ 18 | y1eCJZJ9aYwaisF7dG4aq5qSoukMnU3RmzKoX1O+4QUvCiXUKi9cdyrgutC1HYFK 19 | IJMw4ufKFyEWhI+nfqVcIjaRUmr52UBl8RuQlYsRS05vwCuTo0xuTUIpYBhBACv9 20 | 1RZa2PAlkv4E7Y4J8z33P8r9hH4JjwAsUANc3cFlkNvhz2Qmm59+5GA0BC+g8fBD 21 | WBOuaOeiVCEzU/sG1ZC6S5neBjS0hiWIkO5IutMVHz96JKwTGNdvFQoVmzeVdvvu 22 | gj3Y5T7FAg/NLnRR3pJSCpbaNc3ohxt4ES2Ba8x7pY5H7XINehoSgA5S/RTju9+N 23 | mOnquRheH3NnnkSS85G3ukL/tfL/RD1iIiWodLYKvwdBKYrLkXglY6OOTQHIkiRQ 24 | P4MP1c/HflmX8EnSUjMXrvnb5kymjmboqZ5umJ3DBlnyWmHf5gbRWk9bC5jC5/Mm 25 | Uq0BTv6XUB/U5z7te+F9qi2DBs0Sck/bsDMRlzxLhyN6riy3qPeXygBV1Ng0mXv/ 26 | XwG1JrXBtEtZOJUUDSZn/5hdT3uced+UY66y+F1WLLKKeBITK67YOesihyCSh8WP 27 | bdkatWpZnC077HlNwEz5f2ZmEk6PbQ== 28 | -----END CERTIFICATE----- 29 | -------------------------------------------------------------------------------- /example/basic/storage/nfs/nfs-pvc.yaml: -------------------------------------------------------------------------------- 1 | # 声明nfs存储空间 2 | --- 3 | apiVersion: storage.k8s.io/v1 4 | kind: StorageClass 5 | metadata: 6 | name: nfs-storage # 需与 PVC 的 storageClassName 保持一致 7 | provisioner: nfs-client # 动态卷分配者名称,需与 Deployment 的 spec.template.spec.containers.env.PROVISIONER_NAME 一致 8 | parameters: 9 | archiveOnDelete: "true" # 设置为"false"时删除PVC不会保留数据,"true"则保留数据 10 | mountOptions: 11 | - hard # 指定为硬挂载方式 12 | - nfsvers=4 # 指定NFS版本,这个需要根据 NFS Server 版本号设置 13 | 14 | --- 15 | kind: PersistentVolumeClaim 16 | apiVersion: v1 17 | metadata: 18 | name: test-pvc 19 | spec: 20 | storageClassName: nfs-storage #---需要与上面创建的storageclass的名称一致 21 | accessModes: 22 | - ReadWriteOnce 23 | resources: 24 | requests: 25 | storage: 1Mi 26 | 27 | 28 | -------------------------------------------------------------------------------- /example/basic/storage/nfs/nfs-server.sh: -------------------------------------------------------------------------------- 1 | # CentOS7 搭建 NFS 服务器 http://www.mydlq.club/article/3/ 2 | 3 | # 停止并禁用防火墙 4 | systemctl stop firewalld 5 | systemctl disable firewalld 6 | 7 | # 关闭并禁用SELinux 8 | setenforce 0 9 | sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config 10 | 11 | # 安装nfs-utils和rpcbind 12 | yum install -y nfs-utils rpcbind 13 | 14 | # 创建文件夹 15 | mkdir /nfs 16 | chown -R nfsnobody:nfsnobody /nfs 17 | -------------------------------------------------------------------------------- /example/basic/storage/pv/data/busybox/test: -------------------------------------------------------------------------------- 1 | 🐂 2 | -------------------------------------------------------------------------------- /example/basic/storage/pv/dynamic-pv.yaml: -------------------------------------------------------------------------------- 1 | # 根据 PVC 动态创建合适的 PV 并绑定 2 | # 创建PV 3 | --- 4 | apiVersion: v1 5 | kind: PersistentVolume 6 | metadata: 7 | name: pv-static 8 | labels: 9 | pv: pv-static 10 | spec: 11 | storageClassName: hostpath # 默认 12 | hostPath: 13 | path: /Users/kiky/kk/k-k8s/example/basic/storage/pv/data 14 | accessModes: 15 | - ReadWriteOnce 16 | capacity: 17 | storage: 100Mi 18 | 19 | # 创建PVC 20 | --- 21 | apiVersion: v1 22 | kind: PersistentVolumeClaim 23 | metadata: 24 | name: pvc-static 25 | spec: 26 | storageClassName: hostpath 27 | resources: 28 | requests: 29 | storage: 110Mi # PVC > PV ,也会自动创建 30 | accessModes: 31 | - ReadWriteOnce 32 | selector: 33 | matchLabels: # 匹配标签为 pv: pv-static 的 PV 34 | pv: pv-static1 # 标签失败,则自动新建 35 | 36 | # 使用静态的PVC 37 | --- 38 | apiVersion: v1 39 | kind: Pod 40 | metadata: 41 | name: busybox 42 | labels: 43 | app: busybox 44 | spec: 45 | volumes: # PVC 关联到 Pod 46 | - name: pv-static-vol 47 | persistentVolumeClaim: 48 | claimName: pvc-static 49 | containers: 50 | - name: busybox 51 | image: busybox 52 | command: 53 | - sleep 54 | - "3600" 55 | volumeMounts: # 关联到容器 56 | - mountPath: /usr/share 57 | name: pv-static-vol 58 | 59 | -------------------------------------------------------------------------------- /example/basic/storage/pv/static-pv.yaml: -------------------------------------------------------------------------------- 1 | # 静态(手动)pv/pvc使用方式 2 | # 创建PV 3 | --- 4 | apiVersion: v1 5 | kind: PersistentVolume 6 | metadata: 7 | name: pv-static 8 | labels: 9 | pv: pv-static 10 | spec: 11 | storageClassName: hostpath # 默认 12 | hostPath: 13 | path: /Users/kiky/kk/k-k8s/example/basic/storage/pv/data 14 | accessModes: 15 | - ReadWriteOnce 16 | capacity: 17 | storage: 100Mi 18 | 19 | # 创建PVC 20 | --- 21 | apiVersion: v1 22 | kind: PersistentVolumeClaim 23 | metadata: 24 | name: pvc-static 25 | spec: 26 | resources: 27 | requests: 28 | storage: 10Mi # 匹配成功 29 | accessModes: 30 | - ReadWriteOnce 31 | selector: 32 | matchLabels: # 匹配标签为 pv: pv-static 的 PV 33 | pv: pv-static # 成功 34 | 35 | # 使用静态的PVC 36 | --- 37 | apiVersion: v1 38 | kind: Pod 39 | metadata: 40 | name: busybox 41 | labels: 42 | app: busybox 43 | spec: 44 | volumes: # PVC 关联到 Pod 45 | - name: pv-static-vol 46 | persistentVolumeClaim: 47 | claimName: pvc-static 48 | containers: 49 | - name: busybox 50 | image: busybox 51 | command: 52 | - sleep 53 | - "3600" 54 | volumeMounts: # 关联到容器 55 | - mountPath: /usr/share 56 | subPath: busybox 57 | name: pv-static-vol 58 | 59 | -------------------------------------------------------------------------------- /example/basic/storage/volume/emptydir-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: emptydir-demo 5 | spec: 6 | volumes: 7 | - name: emptydir-html 8 | emptyDir: { } # Pod 内部多容器间共享 9 | containers: 10 | - name: myapp 11 | image: nginx 12 | ports: 13 | - name: http 14 | containerPort: 80 15 | volumeMounts: 16 | - name: emptydir-html # 使用空目录 和spec.volumes[0].name 一致 17 | mountPath: /usr/share/nginx/html/ 18 | 19 | - name: busybox # 包含ping工具 20 | image: busybox 21 | volumeMounts: 22 | - name: emptydir-html # 使用空目录 和spec.volumes[0].name 一致 23 | mountPath: /data/ 24 | command: [ "/bin/sh", "-c" ] 25 | args: 26 | - "while true; do echo $(date) >> /data/index.html; sleep 3; done" 27 | 28 | -------------------------------------------------------------------------------- /example/basic/storage/volume/hostpath-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostpathpd 5 | spec: 6 | volumes: 7 | - name: hostpath-vol 8 | hostPath: # 主机路径型 9 | type: Directory 10 | # path: ./data 11 | path: /Users/kiky/kk/k-k8s/example/storage/data 12 | containers: 13 | - name: busybox 14 | image: busybox 15 | command: 16 | - sleep 17 | - "3600" 18 | volumeMounts: 19 | - mountPath: /usr/share/data 20 | name: hostpath-vol 21 | 22 | -------------------------------------------------------------------------------- /example/helm/hello-world/chart.yaml: -------------------------------------------------------------------------------- 1 | name: hello-world 2 | version: 1.0 3 | -------------------------------------------------------------------------------- /example/helm/hello-world/cmds.sh: -------------------------------------------------------------------------------- 1 | # 使用命令创建一次Release 2 | helm install hello-world hello-world-chart.zip 3 | 4 | # 修改配置文件内容之后可以通过如下命令更新 5 | helm upgrade nobby-eel . 6 | 7 | # 查看历史信息 8 | helm history nobby-eel 9 | 10 | # 查询一个特定的Release的状态 11 | helm status nobby-eel 12 | 13 | # 尝试运行 14 | helm install --dry-run . 15 | 16 | # 移除所有与这个Release相关的Kubernetes资源 17 | helm delete nobby-eel 18 | 19 | # 还原删除的helm服务或者回滚已经存在的服务操作 20 | helm rollback nobby-eel 2 21 | helm rollback nobby-eel 4 22 | 23 | # 使用命令移除所有与指定Release相关的Kubernetes资源和所有这个Release的记录 24 | helm delete --purge nobby-eel 25 | helm list --deleted 26 | -------------------------------------------------------------------------------- /example/helm/hello-world/hello-world-chart.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/example/helm/hello-world/hello-world-chart.zip -------------------------------------------------------------------------------- /example/helm/hello-world/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | # 无状态服务部署 Deployment 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: hello-world-deploy 6 | spec: 7 | replicas: 5 8 | selector: # 基于 RS,必须配置标签选择器 9 | matchLabels: # 比RC新增的标签选择器 10 | app: hello-world # 和 spec.template.metadata.labels.app 一致 11 | template: 12 | metadata: 13 | name: hello-world 14 | labels: # 必须指定标签 15 | app: hello-world 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | ports: 21 | - containerPort: 90 22 | -------------------------------------------------------------------------------- /example/helm/hello-world/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hello-world 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: hello-world 9 | ports: 10 | - port: 80 11 | targetPort: 80 12 | protocol: TCP 13 | -------------------------------------------------------------------------------- /example/helm/redis/cmds.sh: -------------------------------------------------------------------------------- 1 | ##### 2 | # helm 部署 redis 3 | # https://artifacthub.io/packages/helm/bitnami/redis 4 | ##### 5 | helm repo add bitnami https://charts.bitnami.com/bitnami 6 | helm install redis bitnami/redis 7 | 8 | 9 | -------------------------------------------------------------------------------- /example/helm/redis/startup-log.sh: -------------------------------------------------------------------------------- 1 | "bitnami" has been added to your repositories 2 | NAME: redis 3 | LAST DEPLOYED: Wed Oct 6 06:25:24 2021 4 | 5 | NAMESPACE: default 6 | STATUS: deployed 7 | REVISION: 1 8 | TEST SUITE: None 9 | 10 | NOTES: 11 | ** Please be patient while the chart is being deployed ** 12 | 13 | Redis™ can be accessed on the following DNS names from within your cluster: 14 | 15 | redis-master.default.svc.cluster.local for read/write operations (port 6379) 16 | redis-replicas.default.svc.cluster.local for read-only operations (port 6379) 17 | 18 | 19 | To get your password run: 20 | 21 | export REDIS_PASSWORD=$(kubectl get secret --namespace default redis -o jsonpath="{.data.redis-password}" | base64 --decode) 22 | 23 | To connect to your Redis™ server: 24 | 25 | 1. Run a Redis™ pod that you can use as a client: 26 | 27 | kubectl run --namespace default redis-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image docker.io/bitnami/redis:6.2.6-debian-10-r0 --command -- sleep infinity 28 | 29 | Use the following command to attach to the pod: 30 | 31 | kubectl exec --tty -i redis-client \ 32 | --namespace default -- bash 33 | 34 | 2. Connect using the Redis™ CLI: 35 | redis-cli -h redis-master -a $REDIS_PASSWORD 36 | redis-cli -h redis-replicas -a $REDIS_PASSWORD 37 | 38 | To connect to your database from outside the cluster execute the following commands: 39 | 40 | kubectl port-forward --namespace default svc/redis-master 6379:6379 & 41 | redis-cli -h 127.0.0.1 -p 6379 -a $REDIS_PASSWORD 42 | -------------------------------------------------------------------------------- /imgs/all-in-one.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/all-in-one.png -------------------------------------------------------------------------------- /imgs/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/dashboard.png -------------------------------------------------------------------------------- /imgs/env-docker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/env-docker.png -------------------------------------------------------------------------------- /imgs/env-k8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/env-k8s.png -------------------------------------------------------------------------------- /imgs/grafana.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/grafana.png -------------------------------------------------------------------------------- /imgs/hpa-yaml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/hpa-yaml.png -------------------------------------------------------------------------------- /imgs/init-ctn-mind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/init-ctn-mind.png -------------------------------------------------------------------------------- /imgs/k8s-DCOM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/k8s-DCOM.png -------------------------------------------------------------------------------- /imgs/k8s-and-docker.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/k8s-and-docker.jpg -------------------------------------------------------------------------------- /imgs/k8s-guide5-code.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/k8s-guide5-code.png -------------------------------------------------------------------------------- /imgs/k8s-guide5-cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/k8s-guide5-cover.png -------------------------------------------------------------------------------- /imgs/k8s-mind-full.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/k8s-mind-full.png -------------------------------------------------------------------------------- /imgs/k8s-outline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/k8s-outline.png -------------------------------------------------------------------------------- /imgs/k8s-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/k8s-service.png -------------------------------------------------------------------------------- /imgs/kube-prometheus-stack-mind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/kube-prometheus-stack-mind.png -------------------------------------------------------------------------------- /imgs/local-k8s-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/local-k8s-home.png -------------------------------------------------------------------------------- /imgs/mind-bond.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/mind-bond.png -------------------------------------------------------------------------------- /imgs/pod-init-yaml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/pod-init-yaml.png -------------------------------------------------------------------------------- /imgs/pod-ready-after-init.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/pod-ready-after-init.png -------------------------------------------------------------------------------- /imgs/pod-wait-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/pod-wait-service.png -------------------------------------------------------------------------------- /imgs/rancher-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/rancher-home.png -------------------------------------------------------------------------------- /imgs/setup-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/setup-service.png -------------------------------------------------------------------------------- /imgs/trees.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/imgs/trees.png -------------------------------------------------------------------------------- /meta/service-account-token.json: -------------------------------------------------------------------------------- 1 | { 2 | "iss": "kubernetes/serviceaccount", 3 | "kubernetes.io/serviceaccount/namespace": "default", 4 | "kubernetes.io/serviceaccount/secret.name": "default-token-tk94g", 5 | 6 | "kubernetes.io/serviceaccount/service-account.name": "default", 7 | "kubernetes.io/serviceaccount/service-account.uid": "27bc40d6-edf6-4334-a2e2-2199793260f4", 8 | 9 | "sub": "system:serviceaccount:default:default" 10 | } 11 | -------------------------------------------------------------------------------- /support/helm/fast-helm.sh: -------------------------------------------------------------------------------- 1 | helm search repo redis 2 | 3 | # 列出已经部署的Release 4 | helm list 5 | 6 | # 修改配置文件内容之后可以通过如下命令更新 7 | helm upgrade appName . 8 | 9 | # 查看历史信息 10 | helm history appName 11 | 12 | # 查询一个特定的Release的状态 13 | helm status appName 14 | 15 | # 尝试运行 16 | helm install --dry-run . 17 | 18 | # 移除所有与这个Release相关的Kubernetes资源 19 | helm delete appName 20 | 21 | # 还原删除的helm服务或者回滚已经存在的服务操作 22 | helm rollback appName 2 23 | helm rollback appName 4 24 | 25 | # 使用命令移除所有与指定Release相关的Kubernetes资源和所有这个Release的记录 26 | helm delete --purge appName 27 | helm list --deleted 28 | ------------------------------------------------ 29 | --all-namespaces 30 | ------------------------------------------------ 31 | helm uninstall --namespace kube-system metrics 32 | 33 | -------------------------------------------------------------------------------- /support/helm/install.sh: -------------------------------------------------------------------------------- 1 | ##### 2 | # 安装 helm 3 | ##### 4 | # mac 上安装 helm 客户端 5 | brew install kubernetes-helm 6 | 7 | # 添加镜像 更新速度:微软 > 阿里云 8 | # 40.73.39.89 mirror.azure.cn # 加上此host,否则会 403 9 | helm repo add stable http://mirror.azure.cn/kubernetes/charts/ 10 | helm repo update 11 | 12 | 13 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/README.md: -------------------------------------------------------------------------------- 1 | # kube-prometheus-stack 一站式 k8s 集群监控系统 2 | 3 | 基于 Operator 以及各 CRD 实现 k8s 集群上动态部署和管理 Prometheus 集群、告警规则等,降低手动部署的复杂度。 4 | 5 | ## 架构 6 | 7 | ![](./imgs/kube-prometheus-stack-mind.png) 8 | ![](./imgs/prometheus-arch.png) 9 | ![](./imgs/prometheus-operator-arch.png) 10 | 11 | 12 | ## 部署 13 | 14 | ### 一键安装 15 | 16 | 运行以下脚本即可在本地的 k8s 测试集群上安装一整套 Prometheus Operator 监控栈 17 | 18 | ``` 19 | ./install.sh 20 | ``` 21 | 22 | ![](./imgs/install-log.png) 23 | 24 | ![](./imgs/stack-components.png) 25 | 26 | ### 检查服务 27 | 28 | #### Prometheus Server 29 | 30 | 时序数据库以及监控服务 [http://localhost:30090](http://localhost:30090) 31 | 32 | ![](./imgs/Prometheus-targets.png) 33 | 34 | #### Alertmanager 35 | 36 | 告警规则管理 [http://localhost:30903](http://localhost:30903) 37 | 38 | ![](./imgs/alert-manager.png) 39 | 40 | #### kube-state-metrics 41 | 42 | 指标资源化、对象化服务,以支持高阶的自定义监控 [http://localhost:30100](http://localhost:30100) 43 | 44 | ![](./imgs/kube-metrics.png) 45 | ![](./imgs/kube-metrics-metrics.png) 46 | 47 | #### node-exporter 48 | 49 | 主机常规性能指标采集和输出 [http://localhost:30101](http://localhost:30101) 50 | 51 | ![](./imgs/node-exporter.png) 52 | 53 | #### Grafana 54 | 55 | 监控指标可视化展示工具 [http://localhost:30001](http://localhost:30001) 56 | 57 | ![](./imgs/grafana.png) 58 | 59 | ## 一键卸载 60 | 61 | 运行卸载脚本 `./uninstall.sh` 62 | 63 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | ## How to contribute to this chart 4 | 5 | 1. Fork this repository, develop and test your Chart. 6 | 1. Bump the chart version for every change. 7 | 1. Ensure PR title has the prefix `[kube-prometheus-stack]` 8 | 1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories 9 | 1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes. 10 | 1. Check for changes of RBAC rules. 11 | 1. Check for changes in CRD specs. 12 | 1. PR must pass the linter (`helm lint`) 13 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/Chart.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: kube-state-metrics 3 | repository: https://prometheus-community.github.io/helm-charts 4 | version: 3.5.2 5 | - name: prometheus-node-exporter 6 | repository: https://prometheus-community.github.io/helm-charts 7 | version: 2.0.4 8 | - name: grafana 9 | repository: https://grafana.github.io/helm-charts 10 | version: 6.16.10 11 | digest: sha256:94dad976ca1630e9e3cd006fadb255783387b53bd9d0d19e105bd39d8e8e34be 12 | generated: "2021-09-28T10:26:46.319411+07:00" 13 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | artifacthub.io/links: | 3 | - name: Chart Source 4 | url: https://github.com/prometheus-community/helm-charts 5 | - name: Upstream Project 6 | url: https://github.com/prometheus-operator/kube-prometheus 7 | artifacthub.io/operator: "true" 8 | apiVersion: v2 9 | appVersion: 0.50.0 10 | dependencies: 11 | - condition: kubeStateMetrics.enabled 12 | name: kube-state-metrics 13 | repository: https://prometheus-community.github.io/helm-charts 14 | version: 3.5.* 15 | - condition: nodeExporter.enabled 16 | name: prometheus-node-exporter 17 | repository: https://prometheus-community.github.io/helm-charts 18 | version: 2.0.* 19 | - condition: grafana.enabled 20 | name: grafana 21 | repository: https://grafana.github.io/helm-charts 22 | version: 6.16.* 23 | description: kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards, 24 | and Prometheus rules combined with documentation and scripts to provide easy to 25 | operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus 26 | Operator. 27 | home: https://github.com/prometheus-operator/kube-prometheus 28 | icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png 29 | keywords: 30 | - operator 31 | - prometheus 32 | - kube-prometheus 33 | kubeVersion: '>=1.16.0-0' 34 | maintainers: 35 | - name: vsliouniaev 36 | - name: bismarck 37 | - email: gianrubio@gmail.com 38 | name: gianrubio 39 | - email: github.gkarthiks@gmail.com 40 | name: gkarthiks 41 | - email: scott@r6by.com 42 | name: scottrigby 43 | - email: miroslav.hadzhiev@gmail.com 44 | name: Xtigyro 45 | name: kube-prometheus-stack 46 | sources: 47 | - https://github.com/prometheus-community/helm-charts 48 | - https://github.com/prometheus-operator/kube-prometheus 49 | type: application 50 | version: 19.0.2 51 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .vscode 20 | .project 21 | .idea/ 22 | *.tmproj 23 | OWNERS 24 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 8.1.5 3 | description: The leading tool for querying and visualizing time series and metrics. 4 | home: https://grafana.net 5 | icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png 6 | kubeVersion: ^1.8.0-0 7 | maintainers: 8 | - email: zanhsieh@gmail.com 9 | name: zanhsieh 10 | - email: rluckie@cisco.com 11 | name: rtluckie 12 | - email: maor.friedman@redhat.com 13 | name: maorfr 14 | - email: miroslav.hadzhiev@gmail.com 15 | name: Xtigyro 16 | - email: mail@torstenwalter.de 17 | name: torstenwalter 18 | name: grafana 19 | sources: 20 | - https://github.com/grafana/grafana 21 | type: application 22 | version: 6.16.10 23 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/ci/default-values.yaml: -------------------------------------------------------------------------------- 1 | # Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. 2 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/ci/with-dashboard-json-values.yaml: -------------------------------------------------------------------------------- 1 | dashboards: 2 | my-provider: 3 | my-awesome-dashboard: 4 | # An empty but valid dashboard 5 | json: | 6 | { 7 | "__inputs": [], 8 | "__requires": [ 9 | { 10 | "type": "grafana", 11 | "id": "grafana", 12 | "name": "Grafana", 13 | "version": "6.3.5" 14 | } 15 | ], 16 | "annotations": { 17 | "list": [ 18 | { 19 | "builtIn": 1, 20 | "datasource": "-- Grafana --", 21 | "enable": true, 22 | "hide": true, 23 | "iconColor": "rgba(0, 211, 255, 1)", 24 | "name": "Annotations & Alerts", 25 | "type": "dashboard" 26 | } 27 | ] 28 | }, 29 | "editable": true, 30 | "gnetId": null, 31 | "graphTooltip": 0, 32 | "id": null, 33 | "links": [], 34 | "panels": [], 35 | "schemaVersion": 19, 36 | "style": "dark", 37 | "tags": [], 38 | "templating": { 39 | "list": [] 40 | }, 41 | "time": { 42 | "from": "now-6h", 43 | "to": "now" 44 | }, 45 | "timepicker": { 46 | "refresh_intervals": ["5s"] 47 | }, 48 | "timezone": "", 49 | "title": "Dummy Dashboard", 50 | "uid": "IdcYQooWk", 51 | "version": 1 52 | } 53 | datasource: Prometheus 54 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/ci/with-dashboard-values.yaml: -------------------------------------------------------------------------------- 1 | dashboards: 2 | my-provider: 3 | my-awesome-dashboard: 4 | gnetId: 10000 5 | revision: 1 6 | datasource: Prometheus 7 | dashboardProviders: 8 | dashboardproviders.yaml: 9 | apiVersion: 1 10 | providers: 11 | - name: 'my-provider' 12 | orgId: 1 13 | folder: '' 14 | type: file 15 | updateIntervalSeconds: 10 16 | disableDeletion: true 17 | editable: true 18 | options: 19 | path: /var/lib/grafana/dashboards/my-provider 20 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/ci/with-image-renderer-values.yaml: -------------------------------------------------------------------------------- 1 | podLabels: 2 | customLableA: Aaaaa 3 | imageRenderer: 4 | enabled: true 5 | env: 6 | RENDERING_ARGS: --disable-gpu,--window-size=1280x758 7 | RENDERING_MODE: clustered 8 | podLabels: 9 | customLableB: Bbbbb 10 | networkPolicy: 11 | limitIngress: true 12 | limitEgress: true 13 | resources: 14 | limits: 15 | cpu: 1000m 16 | memory: 1000Mi 17 | requests: 18 | cpu: 500m 19 | memory: 50Mi 20 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/dashboards/custom-dashboard.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | labels: 6 | {{- include "grafana.labels" . | nindent 4 }} 7 | {{- with .Values.annotations }} 8 | annotations: 9 | {{ toYaml . | indent 4 }} 10 | {{- end }} 11 | name: {{ template "grafana.fullname" . }}-clusterrole 12 | {{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }} 13 | rules: 14 | {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} 15 | - apiGroups: [""] # "" indicates the core API group 16 | resources: ["configmaps", "secrets"] 17 | verbs: ["get", "watch", "list"] 18 | {{- end}} 19 | {{- with .Values.rbac.extraClusterRoleRules }} 20 | {{ toYaml . | indent 0 }} 21 | {{- end}} 22 | {{- else }} 23 | rules: [] 24 | {{- end}} 25 | {{- end}} 26 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-clusterrolebinding 6 | labels: 7 | {{- include "grafana.labels" . | nindent 4 }} 8 | {{- with .Values.annotations }} 9 | annotations: 10 | {{ toYaml . | indent 4 }} 11 | {{- end }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ template "grafana.serviceAccountName" . }} 15 | namespace: {{ template "grafana.namespace" . }} 16 | roleRef: 17 | kind: ClusterRole 18 | {{- if (not .Values.rbac.useExistingRole) }} 19 | name: {{ template "grafana.fullname" . }}-clusterrole 20 | {{- else }} 21 | name: {{ .Values.rbac.useExistingRole }} 22 | {{- end }} 23 | apiGroup: rbac.authorization.k8s.io 24 | {{- end -}} 25 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/configmap-dashboard-provider.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.sidecar.dashboards.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | labels: 6 | {{- include "grafana.labels" . | nindent 4 }} 7 | {{- with .Values.annotations }} 8 | annotations: 9 | {{ toYaml . | indent 4 }} 10 | {{- end }} 11 | name: {{ template "grafana.fullname" . }}-config-dashboards 12 | namespace: {{ template "grafana.namespace" . }} 13 | data: 14 | provider.yaml: |- 15 | apiVersion: 1 16 | providers: 17 | - name: '{{ .Values.sidecar.dashboards.provider.name }}' 18 | orgId: {{ .Values.sidecar.dashboards.provider.orgid }} 19 | {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} 20 | folder: '{{ .Values.sidecar.dashboards.provider.folder }}' 21 | {{- end}} 22 | type: {{ .Values.sidecar.dashboards.provider.type }} 23 | disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} 24 | allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} 25 | updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} 26 | options: 27 | foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} 28 | path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} 29 | {{- end}} 30 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/dashboards-json-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.dashboards }} 2 | {{ $files := .Files }} 3 | {{- range $provider, $dashboards := .Values.dashboards }} 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} 8 | namespace: {{ template "grafana.namespace" $ }} 9 | labels: 10 | {{- include "grafana.labels" $ | nindent 4 }} 11 | dashboard-provider: {{ $provider }} 12 | {{- if $dashboards }} 13 | data: 14 | {{- $dashboardFound := false }} 15 | {{- range $key, $value := $dashboards }} 16 | {{- if (or (hasKey $value "json") (hasKey $value "file")) }} 17 | {{- $dashboardFound = true }} 18 | {{ print $key | indent 2 }}.json: 19 | {{- if hasKey $value "json" }} 20 | |- 21 | {{ $value.json | indent 6 }} 22 | {{- end }} 23 | {{- if hasKey $value "file" }} 24 | {{ toYaml ( $files.Get $value.file ) | indent 4}} 25 | {{- end }} 26 | {{- end }} 27 | {{- end }} 28 | {{- if not $dashboardFound }} 29 | {} 30 | {{- end }} 31 | {{- end }} 32 | --- 33 | {{- end }} 34 | 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/headless-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-headless 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{ toYaml . | indent 4 }} 12 | {{- end }} 13 | spec: 14 | clusterIP: None 15 | selector: 16 | {{- include "grafana.selectorLabels" . | nindent 4 }} 17 | type: ClusterIP 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/hpa.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.autoscaling.enabled }} 2 | apiVersion: autoscaling/v2beta1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | labels: 7 | app.kubernetes.io/name: {{ template "grafana.name" . }} 8 | helm.sh/chart: {{ template "grafana.chart" . }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | app.kubernetes.io/instance: {{ .Release.Name }} 11 | spec: 12 | scaleTargetRef: 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | name: {{ template "grafana.fullname" . }} 16 | minReplicas: {{ .Values.autoscaling.minReplicas }} 17 | maxReplicas: {{ .Values.autoscaling.maxReplicas }} 18 | metrics: 19 | {{ toYaml .Values.autoscaling.metrics | indent 4 }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/image-renderer-service.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.imageRenderer.enabled }} 2 | {{ if .Values.imageRenderer.service.enabled }} 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: {{ template "grafana.fullname" . }}-image-renderer 7 | namespace: {{ template "grafana.namespace" . }} 8 | labels: 9 | {{- include "grafana.imageRenderer.labels" . | nindent 4 }} 10 | {{- if .Values.imageRenderer.service.labels }} 11 | {{ toYaml .Values.imageRenderer.service.labels | indent 4 }} 12 | {{- end }} 13 | {{- with .Values.imageRenderer.service.annotations }} 14 | annotations: 15 | {{ toYaml . | indent 4 }} 16 | {{- end }} 17 | spec: 18 | type: ClusterIP 19 | {{- if .Values.imageRenderer.service.clusterIP }} 20 | clusterIP: {{ .Values.imageRenderer.service.clusterIP }} 21 | {{end}} 22 | ports: 23 | - name: {{ .Values.imageRenderer.service.portName }} 24 | port: {{ .Values.imageRenderer.service.port }} 25 | protocol: TCP 26 | targetPort: {{ .Values.imageRenderer.service.targetPort }} 27 | selector: 28 | {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} 29 | {{ end }} 30 | {{ end }} 31 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/poddisruptionbudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget }} 2 | apiVersion: policy/v1beta1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- if .Values.labels }} 10 | {{ toYaml .Values.labels | indent 4 }} 11 | {{- end }} 12 | spec: 13 | {{- if .Values.podDisruptionBudget.minAvailable }} 14 | minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} 15 | {{- end }} 16 | {{- if .Values.podDisruptionBudget.maxUnavailable }} 17 | maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} 18 | {{- end }} 19 | selector: 20 | matchLabels: 21 | {{- include "grafana.selectorLabels" . | nindent 6 }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | labels: 7 | {{- include "grafana.labels" . | nindent 4 }} 8 | annotations: 9 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' 10 | seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' 11 | {{- if .Values.rbac.pspUseAppArmor }} 12 | apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' 13 | apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' 14 | {{- end }} 15 | spec: 16 | privileged: false 17 | allowPrivilegeEscalation: false 18 | requiredDropCapabilities: 19 | # Default set from Docker, with DAC_OVERRIDE and CHOWN 20 | - ALL 21 | volumes: 22 | - 'configMap' 23 | - 'emptyDir' 24 | - 'projected' 25 | - 'csi' 26 | - 'secret' 27 | - 'downwardAPI' 28 | - 'persistentVolumeClaim' 29 | hostNetwork: false 30 | hostIPC: false 31 | hostPID: false 32 | runAsUser: 33 | rule: 'RunAsAny' 34 | seLinux: 35 | rule: 'RunAsAny' 36 | supplementalGroups: 37 | rule: 'MustRunAs' 38 | ranges: 39 | # Forbid adding the root group. 40 | - min: 1 41 | max: 65535 42 | fsGroup: 43 | rule: 'MustRunAs' 44 | ranges: 45 | # Forbid adding the root group. 46 | - min: 1 47 | max: 65535 48 | readOnlyRootFilesystem: false 49 | {{- end }} 50 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.persistence.annotations }} 10 | annotations: 11 | {{ toYaml . | indent 4 }} 12 | {{- end }} 13 | {{- with .Values.persistence.finalizers }} 14 | finalizers: 15 | {{ toYaml . | indent 4 }} 16 | {{- end }} 17 | spec: 18 | accessModes: 19 | {{- range .Values.persistence.accessModes }} 20 | - {{ . | quote }} 21 | {{- end }} 22 | resources: 23 | requests: 24 | storage: {{ .Values.persistence.size | quote }} 25 | {{- if .Values.persistence.storageClassName }} 26 | storageClassName: {{ .Values.persistence.storageClassName }} 27 | {{- end -}} 28 | {{- with .Values.persistence.selectorLabels }} 29 | selector: 30 | matchLabels: 31 | {{ toYaml . | indent 6 }} 32 | {{- end }} 33 | {{- end -}} 34 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} 2 | apiVersion: {{ template "grafana.rbac.apiVersion" . }} 3 | kind: Role 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{ toYaml . | indent 4 }} 12 | {{- end }} 13 | {{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }} 14 | rules: 15 | {{- if .Values.rbac.pspEnabled }} 16 | - apiGroups: ['extensions'] 17 | resources: ['podsecuritypolicies'] 18 | verbs: ['use'] 19 | resourceNames: [{{ template "grafana.fullname" . }}] 20 | {{- end }} 21 | {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} 22 | - apiGroups: [""] # "" indicates the core API group 23 | resources: ["configmaps", "secrets"] 24 | verbs: ["get", "watch", "list"] 25 | {{- end }} 26 | {{- with .Values.rbac.extraRoleRules }} 27 | {{ toYaml . | indent 0 }} 28 | {{- end}} 29 | {{- else }} 30 | rules: [] 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create -}} 2 | apiVersion: {{ template "grafana.rbac.apiVersion" . }} 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{ toYaml . | indent 4 }} 12 | {{- end }} 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: Role 16 | {{- if (not .Values.rbac.useExistingRole) }} 17 | name: {{ template "grafana.fullname" . }} 18 | {{- else }} 19 | name: {{ .Values.rbac.useExistingRole }} 20 | {{- end }} 21 | subjects: 22 | - kind: ServiceAccount 23 | name: {{ template "grafana.serviceAccountName" . }} 24 | namespace: {{ template "grafana.namespace" . }} 25 | {{- end -}} 26 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/secret-env.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.envRenderSecret }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-env 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | type: Opaque 10 | data: 11 | {{- range $key, $val := .Values.envRenderSecret }} 12 | {{ $key }}: {{ $val | b64enc | quote }} 13 | {{- end -}} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- with .Values.annotations }} 10 | annotations: 11 | {{ toYaml . | indent 4 }} 12 | {{- end }} 13 | type: Opaque 14 | data: 15 | {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} 16 | admin-user: {{ .Values.adminUser | b64enc | quote }} 17 | {{- if .Values.adminPassword }} 18 | admin-password: {{ .Values.adminPassword | b64enc | quote }} 19 | {{- else }} 20 | admin-password: {{ template "grafana.password" . }} 21 | {{- end }} 22 | {{- end }} 23 | {{- if not .Values.ldap.existingSecret }} 24 | ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} 25 | {{- end }} 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/service.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.service.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | {{- if .Values.service.labels }} 10 | {{ toYaml .Values.service.labels | indent 4 }} 11 | {{- end }} 12 | {{- with .Values.service.annotations }} 13 | annotations: 14 | {{ toYaml . | indent 4 }} 15 | {{- end }} 16 | spec: 17 | {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} 18 | type: ClusterIP 19 | {{- if .Values.service.clusterIP }} 20 | clusterIP: {{ .Values.service.clusterIP }} 21 | {{end}} 22 | {{- else if eq .Values.service.type "LoadBalancer" }} 23 | type: {{ .Values.service.type }} 24 | {{- if .Values.service.loadBalancerIP }} 25 | loadBalancerIP: {{ .Values.service.loadBalancerIP }} 26 | {{- end }} 27 | {{- if .Values.service.loadBalancerSourceRanges }} 28 | loadBalancerSourceRanges: 29 | {{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} 30 | {{- end -}} 31 | {{- else }} 32 | type: {{ .Values.service.type }} 33 | {{- end }} 34 | {{- if .Values.service.externalIPs }} 35 | externalIPs: 36 | {{ toYaml .Values.service.externalIPs | indent 4 }} 37 | {{- end }} 38 | ports: 39 | - name: {{ .Values.service.portName }} 40 | port: {{ .Values.service.port }} 41 | protocol: TCP 42 | targetPort: {{ .Values.service.targetPort }} 43 | {{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} 44 | nodePort: {{.Values.service.nodePort}} 45 | {{ end }} 46 | {{- if .Values.extraExposePorts }} 47 | {{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }} 48 | {{- end }} 49 | selector: 50 | {{- include "grafana.selectorLabels" . | nindent 4 }} 51 | {{ end }} 52 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "grafana.labels" . | nindent 4 }} 7 | {{- with .Values.serviceAccount.annotations }} 8 | annotations: 9 | {{ toYaml . | indent 4 }} 10 | {{- end }} 11 | name: {{ template "grafana.serviceAccountName" . }} 12 | namespace: {{ template "grafana.namespace" . }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceMonitor.enabled }} 2 | --- 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | name: {{ template "grafana.fullname" . }} 7 | {{- if .Values.serviceMonitor.namespace }} 8 | namespace: {{ .Values.serviceMonitor.namespace }} 9 | {{- end }} 10 | labels: 11 | {{- include "grafana.labels" . | nindent 4 }} 12 | {{- if .Values.serviceMonitor.labels }} 13 | {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} 14 | {{- end }} 15 | spec: 16 | endpoints: 17 | - interval: {{ .Values.serviceMonitor.interval }} 18 | {{- if .Values.serviceMonitor.scrapeTimeout }} 19 | scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} 20 | {{- end }} 21 | honorLabels: true 22 | port: {{ .Values.service.portName }} 23 | path: {{ .Values.serviceMonitor.path }} 24 | scheme: {{ .Values.serviceMonitor.scheme }} 25 | {{- if .Values.serviceMonitor.tlsConfig }} 26 | tlsConfig: 27 | {{- toYaml .Values.serviceMonitor.tlsConfig | nindent 6 }} 28 | {{- end }} 29 | {{- if .Values.serviceMonitor.relabelings }} 30 | relabelings: 31 | {{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }} 32 | {{- end }} 33 | jobLabel: "{{ .Release.Name }}" 34 | selector: 35 | matchLabels: 36 | {{- include "grafana.selectorLabels" . | nindent 8 }} 37 | namespaceSelector: 38 | matchNames: 39 | - {{ .Release.Namespace }} 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/tests/test-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.testFramework.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-test 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | data: 10 | run.sh: |- 11 | @test "Test Health" { 12 | url="http://{{ template "grafana.fullname" . }}/api/health" 13 | 14 | code=$(wget --server-response --spider --timeout 10 --tries 1 ${url} 2>&1 | awk '/^ HTTP/{print $2}') 15 | [ "$code" == "200" ] 16 | } 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/tests/test-podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-test 6 | labels: 7 | {{- include "grafana.labels" . | nindent 4 }} 8 | spec: 9 | allowPrivilegeEscalation: true 10 | privileged: false 11 | hostNetwork: false 12 | hostIPC: false 13 | hostPID: false 14 | fsGroup: 15 | rule: RunAsAny 16 | seLinux: 17 | rule: RunAsAny 18 | supplementalGroups: 19 | rule: RunAsAny 20 | runAsUser: 21 | rule: RunAsAny 22 | volumes: 23 | - configMap 24 | - downwardAPI 25 | - emptyDir 26 | - projected 27 | - csi 28 | - secret 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/tests/test-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-test 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | rules: 10 | - apiGroups: ['policy'] 11 | resources: ['podsecuritypolicies'] 12 | verbs: ['use'] 13 | resourceNames: [{{ template "grafana.fullname" . }}-test] 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/tests/test-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-test 6 | namespace: {{ template "grafana.namespace" . }} 7 | labels: 8 | {{- include "grafana.labels" . | nindent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: {{ template "grafana.fullname" . }}-test 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "grafana.serviceAccountNameTest" . }} 16 | namespace: {{ template "grafana.namespace" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/tests/test-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "grafana.labels" . | nindent 4 }} 7 | name: {{ template "grafana.serviceAccountNameTest" . }} 8 | namespace: {{ template "grafana.namespace" . }} 9 | {{- end }} 10 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/grafana/templates/tests/test.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.testFramework.enabled }} 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-test 6 | labels: 7 | {{- include "grafana.labels" . | nindent 4 }} 8 | annotations: 9 | "helm.sh/hook": test-success 10 | namespace: {{ template "grafana.namespace" . }} 11 | spec: 12 | serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }} 13 | {{- if .Values.testFramework.securityContext }} 14 | securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }} 15 | {{- end }} 16 | {{- if .Values.image.pullSecrets }} 17 | imagePullSecrets: 18 | {{- range .Values.image.pullSecrets }} 19 | - name: {{ . }} 20 | {{- end}} 21 | {{- end }} 22 | {{- with .Values.nodeSelector }} 23 | nodeSelector: 24 | {{ toYaml . | indent 4 }} 25 | {{- end }} 26 | {{- with .Values.affinity }} 27 | affinity: 28 | {{ toYaml . | indent 4 }} 29 | {{- end }} 30 | {{- with .Values.tolerations }} 31 | tolerations: 32 | {{ toYaml . | indent 4 }} 33 | {{- end }} 34 | containers: 35 | - name: {{ .Release.Name }}-test 36 | image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" 37 | imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" 38 | command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] 39 | volumeMounts: 40 | - mountPath: /tests 41 | name: tests 42 | readOnly: true 43 | volumes: 44 | - name: tests 45 | configMap: 46 | name: {{ template "grafana.fullname" . }}-test 47 | restartPolicy: Never 48 | {{- end }} 49 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 2.2.0 3 | description: Install kube-state-metrics to generate and expose cluster-level metrics 4 | home: https://github.com/kubernetes/kube-state-metrics/ 5 | keywords: 6 | - metric 7 | - monitoring 8 | - prometheus 9 | - kubernetes 10 | maintainers: 11 | - email: tariq.ibrahim@mulesoft.com 12 | name: tariq1890 13 | - email: manuel@rueg.eu 14 | name: mrueg 15 | name: kube-state-metrics 16 | sources: 17 | - https://github.com/kubernetes/kube-state-metrics/ 18 | type: application 19 | version: 3.5.2 20 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - tariq1890 3 | - mrueg 4 | reviewers: 5 | - tariq1890 6 | - mrueg 7 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. 2 | The exposed metrics can be found here: 3 | https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics 4 | 5 | The metrics are exported on the HTTP endpoint /metrics on the listening port. 6 | In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics 7 | 8 | They are served either as plaintext or protobuf depending on the Accept header. 9 | They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. 10 | 11 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "kube-state-metrics.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "kube-state-metrics.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create the name of the service account to use 29 | */}} 30 | {{- define "kube-state-metrics.serviceAccountName" -}} 31 | {{- if .Values.serviceAccount.create -}} 32 | {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} 33 | {{- else -}} 34 | {{ default "default" .Values.serviceAccount.name }} 35 | {{- end -}} 36 | {{- end -}} 37 | 38 | {{/* 39 | Allow the release namespace to be overridden for multi-namespace deployments in combined charts 40 | */}} 41 | {{- define "kube-state-metrics.namespace" -}} 42 | {{- if .Values.namespaceOverride -}} 43 | {{- .Values.namespaceOverride -}} 44 | {{- else -}} 45 | {{- .Release.Namespace -}} 46 | {{- end -}} 47 | {{- end -}} 48 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 7 | helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | app.kubernetes.io/managed-by: {{ .Release.Service }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | name: {{ template "kube-state-metrics.fullname" . }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | {{- if .Values.rbac.useExistingRole }} 15 | name: {{ .Values.rbac.useExistingRole }} 16 | {{- else }} 17 | name: {{ template "kube-state-metrics.fullname" . }} 18 | {{- end }} 19 | subjects: 20 | - kind: ServiceAccount 21 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 22 | namespace: {{ template "kube-state-metrics.namespace" . }} 23 | {{- end -}} 24 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/kubeconfig-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeconfig.enabled -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 9 | helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 10 | app.kubernetes.io/instance: "{{ .Release.Name }}" 11 | app.kubernetes.io/managed-by: "{{ .Release.Service }}" 12 | type: Opaque 13 | data: 14 | config: '{{ .Values.kubeconfig.secret }}' 15 | {{- end -}} 16 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget -}} 2 | apiVersion: policy/v1beta1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }} 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 9 | helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 10 | app.kubernetes.io/instance: "{{ .Release.Name }}" 11 | app.kubernetes.io/managed-by: "{{ .Release.Service }}" 12 | {{- if .Values.customLabels }} 13 | {{ toYaml .Values.customLabels | indent 4 }} 14 | {{- end }} 15 | spec: 16 | selector: 17 | matchLabels: 18 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 19 | {{ toYaml .Values.podDisruptionBudget | indent 2 }} 20 | {{- end -}} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podSecurityPolicy.enabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }} 6 | labels: 7 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 8 | helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | app.kubernetes.io/instance: {{ .Release.Name }} 11 | {{- if .Values.podSecurityPolicy.annotations }} 12 | annotations: 13 | {{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} 14 | {{- end }} 15 | spec: 16 | privileged: false 17 | volumes: 18 | - 'secret' 19 | {{- if .Values.podSecurityPolicy.additionalVolumes }} 20 | {{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} 21 | {{- end }} 22 | hostNetwork: false 23 | hostIPC: false 24 | hostPID: false 25 | runAsUser: 26 | rule: 'MustRunAsNonRoot' 27 | seLinux: 28 | rule: 'RunAsAny' 29 | supplementalGroups: 30 | rule: 'MustRunAs' 31 | ranges: 32 | # Forbid adding the root group. 33 | - min: 1 34 | max: 65535 35 | fsGroup: 36 | rule: 'MustRunAs' 37 | ranges: 38 | # Forbid adding the root group. 39 | - min: 1 40 | max: 65535 41 | readOnlyRootFilesystem: false 42 | {{- end }} 43 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 7 | helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | app.kubernetes.io/managed-by: {{ .Release.Service }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | name: psp-{{ template "kube-state-metrics.fullname" . }} 11 | rules: 12 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 13 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 14 | - apiGroups: ['policy'] 15 | {{- else }} 16 | - apiGroups: ['extensions'] 17 | {{- end }} 18 | resources: ['podsecuritypolicies'] 19 | verbs: ['use'] 20 | resourceNames: 21 | - {{ template "kube-state-metrics.fullname" . }} 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 7 | helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | app.kubernetes.io/managed-by: {{ .Release.Service }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | name: psp-{{ template "kube-state-metrics.fullname" . }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: psp-{{ template "kube-state-metrics.fullname" . }} 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 18 | namespace: {{ template "kube-state-metrics.namespace" . }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} 2 | {{- range (split "," $.Values.namespaces) }} 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: RoleBinding 6 | metadata: 7 | labels: 8 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" $ }} 9 | helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }} 10 | app.kubernetes.io/managed-by: {{ $.Release.Service }} 11 | app.kubernetes.io/instance: {{ $.Release.Name }} 12 | name: {{ template "kube-state-metrics.fullname" $ }} 13 | namespace: {{ . }} 14 | roleRef: 15 | apiGroup: rbac.authorization.k8s.io 16 | kind: Role 17 | {{- if (not $.Values.rbac.useExistingRole) }} 18 | name: {{ template "kube-state-metrics.fullname" $ }} 19 | {{- else }} 20 | name: {{ $.Values.rbac.useExistingRole }} 21 | {{- end }} 22 | subjects: 23 | - kind: ServiceAccount 24 | name: {{ template "kube-state-metrics.serviceAccountName" $ }} 25 | namespace: {{ template "kube-state-metrics.namespace" $ }} 26 | {{- end -}} 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "kube-state-metrics.fullname" . }} 5 | namespace: {{ template "kube-state-metrics.namespace" . }} 6 | labels: 7 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 8 | helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | app.kubernetes.io/instance: "{{ .Release.Name }}" 10 | app.kubernetes.io/managed-by: "{{ .Release.Service }}" 11 | {{- if .Values.customLabels }} 12 | {{ toYaml .Values.customLabels | indent 4 }} 13 | {{- end }} 14 | annotations: 15 | {{- if .Values.prometheusScrape }} 16 | prometheus.io/scrape: '{{ .Values.prometheusScrape }}' 17 | {{- end }} 18 | {{- if .Values.service.annotations }} 19 | {{- toYaml .Values.service.annotations | nindent 4 }} 20 | {{- end }} 21 | spec: 22 | type: "{{ .Values.service.type }}" 23 | ports: 24 | - name: "http" 25 | protocol: TCP 26 | port: {{ .Values.service.port | default 8080}} 27 | {{- if .Values.service.nodePort }} 28 | nodePort: {{ .Values.service.nodePort }} 29 | {{- end }} 30 | targetPort: {{ .Values.service.port | default 8080}} 31 | {{ if .Values.selfMonitor.enabled }} 32 | - name: "metrics" 33 | protocol: TCP 34 | port: {{ .Values.selfMonitor.telemetryPort | default 8081 }} 35 | targetPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} 36 | {{ end }} 37 | {{- if .Values.service.loadBalancerIP }} 38 | loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" 39 | {{- end }} 40 | selector: 41 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 42 | app.kubernetes.io/instance: {{ .Release.Name }} 43 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 7 | helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | app.kubernetes.io/managed-by: {{ .Release.Service }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 11 | namespace: {{ template "kube-state-metrics.namespace" . }} 12 | {{- if .Values.serviceAccount.annotations }} 13 | annotations: 14 | {{ toYaml .Values.serviceAccount.annotations | indent 4 }} 15 | {{- end }} 16 | imagePullSecrets: 17 | {{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} 18 | {{- end -}} 19 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/stsdiscovery-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.autosharding.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 9 | helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | app.kubernetes.io/instance: {{ .Release.Name }} 12 | rules: 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - pods 17 | verbs: 18 | - get 19 | - apiGroups: 20 | - apps 21 | resourceNames: 22 | - {{ template "kube-state-metrics.fullname" . }} 23 | resources: 24 | - statefulsets 25 | verbs: 26 | - get 27 | - list 28 | - watch 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.autosharding.enabled .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 6 | namespace: {{ template "kube-state-metrics.namespace" . }} 7 | labels: 8 | app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} 9 | helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | app.kubernetes.io/instance: {{ .Release.Name }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ template "kube-state-metrics.serviceAccountName" . }} 19 | namespace: {{ template "kube-state-metrics.namespace" . }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.2.2 3 | description: A Helm chart for prometheus node-exporter 4 | home: https://github.com/prometheus/node_exporter/ 5 | keywords: 6 | - node-exporter 7 | - prometheus 8 | - exporter 9 | maintainers: 10 | - email: gianrubio@gmail.com 11 | name: gianrubio 12 | - name: vsliouniaev 13 | - name: bismarck 14 | name: prometheus-node-exporter 15 | sources: 16 | - https://github.com/prometheus/node_exporter/ 17 | type: application 18 | version: 2.0.4 19 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/README.md: -------------------------------------------------------------------------------- 1 | # Prometheus Node Exporter 2 | 3 | Prometheus exporter for hardware and OS metrics exposed by *NIX kernels, written in Go with pluggable metric collectors. 4 | 5 | This chart bootstraps a prometheus [Node Exporter](http://github.com/prometheus/node_exporter) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. 6 | 7 | ## Get Repo Info 8 | 9 | ```console 10 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 11 | helm repo update 12 | ``` 13 | 14 | _See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ 15 | 16 | ## Install Chart 17 | 18 | ```console 19 | helm install [RELEASE_NAME] prometheus-community/prometheus-node-exporter 20 | ``` 21 | 22 | _See [configuration](#configuration) below._ 23 | 24 | _See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ 25 | 26 | ## Uninstall Chart 27 | 28 | ```console 29 | helm uninstall [RELEASE_NAME] 30 | ``` 31 | 32 | This removes all the Kubernetes components associated with the chart and deletes the release. 33 | 34 | _See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ 35 | 36 | ## Upgrading Chart 37 | 38 | ```console 39 | helm upgrade [RELEASE_NAME] [CHART] --install 40 | ``` 41 | 42 | _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ 43 | 44 | ## Configuring 45 | 46 | See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: 47 | 48 | ```console 49 | helm show values prometheus-community/prometheus-node-exporter 50 | ``` 51 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/ci/port-values.yaml: -------------------------------------------------------------------------------- 1 | service: 2 | targetPort: 9102 3 | port: 9102 4 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if contains "NodePort" .Values.service.type }} 3 | export NODE_PORT=$(kubectl get --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-node-exporter.fullname" . }}) 4 | export NODE_IP=$(kubectl get nodes --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") 5 | echo http://$NODE_IP:$NODE_PORT 6 | {{- else if contains "LoadBalancer" .Values.service.type }} 7 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 8 | You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-node-exporter.fullname" . }}' 9 | export SERVICE_IP=$(kubectl get svc --namespace {{ template "prometheus-node-exporter.namespace" . }} {{ template "prometheus-node-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 10 | echo http://$SERVICE_IP:{{ .Values.service.port }} 11 | {{- else if contains "ClusterIP" .Values.service.type }} 12 | export POD_NAME=$(kubectl get pods --namespace {{ template "prometheus-node-exporter.namespace" . }} -l "app={{ template "prometheus-node-exporter.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 13 | echo "Visit http://127.0.0.1:9100 to use your application" 14 | kubectl port-forward --namespace {{ template "prometheus-node-exporter.namespace" . }} $POD_NAME 9100 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "prometheus-node-exporter.fullname" . }} 6 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 7 | labels: 8 | {{ include "prometheus-node-exporter.labels" . | indent 4 }} 9 | subsets: 10 | - addresses: 11 | {{- range .Values.endpoints }} 12 | - ip: {{ . }} 13 | {{- end }} 14 | ports: 15 | - name: metrics 16 | port: 9100 17 | protocol: TCP 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/templates/monitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheus.monitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-node-exporter.fullname" . }} 6 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 7 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 8 | {{- if .Values.prometheus.monitor.additionalLabels }} 9 | {{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} 10 | {{- end }} 11 | spec: 12 | selector: 13 | matchLabels: 14 | app: {{ template "prometheus-node-exporter.name" . }} 15 | release: {{ .Release.Name }} 16 | endpoints: 17 | - port: metrics 18 | scheme: {{ $.Values.prometheus.monitor.scheme }} 19 | {{- if $.Values.prometheus.monitor.bearerTokenFile }} 20 | bearerTokenFile: {{ $.Values.prometheus.monitor.bearerTokenFile }} 21 | {{- end }} 22 | {{- if $.Values.prometheus.monitor.tlsConfig }} 23 | tlsConfig: {{ toYaml $.Values.prometheus.monitor.tlsConfig | nindent 8 }} 24 | {{- end }} 25 | {{- if .Values.prometheus.monitor.proxyUrl }} 26 | proxyUrl: {{ .Values.prometheus.monitor.proxyUrl}} 27 | {{- end }} 28 | {{- if .Values.prometheus.monitor.scrapeTimeout }} 29 | scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} 30 | {{- end }} 31 | {{- if .Values.prometheus.monitor.relabelings }} 32 | relabelings: 33 | {{ toYaml .Values.prometheus.monitor.relabelings | indent 6 }} 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | {{- if .Values.rbac.pspEnabled }} 3 | kind: ClusterRole 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: psp-{{ template "prometheus-node-exporter.fullname" . }} 7 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 8 | rules: 9 | - apiGroups: ['extensions'] 10 | resources: ['podsecuritypolicies'] 11 | verbs: ['use'] 12 | resourceNames: 13 | - {{ template "prometheus-node-exporter.fullname" . }} 14 | {{- end }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | {{- if .Values.rbac.pspEnabled }} 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | name: psp-{{ template "prometheus-node-exporter.fullname" . }} 7 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: psp-{{ template "prometheus-node-exporter.fullname" . }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ template "prometheus-node-exporter.fullname" . }} 15 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 16 | {{- end }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/templates/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | {{- if .Values.rbac.pspEnabled }} 3 | apiVersion: policy/v1beta1 4 | kind: PodSecurityPolicy 5 | metadata: 6 | name: {{ template "prometheus-node-exporter.fullname" . }} 7 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 8 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 9 | {{- if .Values.rbac.pspAnnotations }} 10 | annotations: 11 | {{ toYaml .Values.rbac.pspAnnotations | indent 4 }} 12 | {{- end}} 13 | spec: 14 | privileged: false 15 | # Required to prevent escalations to root. 16 | # allowPrivilegeEscalation: false 17 | # This is redundant with non-root + disallow privilege escalation, 18 | # but we can provide it for defense in depth. 19 | #requiredDropCapabilities: 20 | # - ALL 21 | # Allow core volume types. 22 | volumes: 23 | - 'configMap' 24 | - 'emptyDir' 25 | - 'projected' 26 | - 'secret' 27 | - 'downwardAPI' 28 | - 'persistentVolumeClaim' 29 | - 'hostPath' 30 | hostNetwork: true 31 | hostIPC: false 32 | hostPID: true 33 | hostPorts: 34 | - min: 0 35 | max: 65535 36 | runAsUser: 37 | # Permits the container to run with root privileges as well. 38 | rule: 'RunAsAny' 39 | seLinux: 40 | # This policy assumes the nodes are using AppArmor rather than SELinux. 41 | rule: 'RunAsAny' 42 | supplementalGroups: 43 | rule: 'MustRunAs' 44 | ranges: 45 | # Forbid adding the root group. 46 | - min: 0 47 | max: 65535 48 | fsGroup: 49 | rule: 'MustRunAs' 50 | ranges: 51 | # Forbid adding the root group. 52 | - min: 0 53 | max: 65535 54 | readOnlyRootFilesystem: false 55 | {{- end }} 56 | {{- end }} 57 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "prometheus-node-exporter.fullname" . }} 5 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 6 | {{- if .Values.service.annotations }} 7 | annotations: 8 | {{ toYaml .Values.service.annotations | indent 4 }} 9 | {{- end }} 10 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 11 | spec: 12 | type: {{ .Values.service.type }} 13 | ports: 14 | - port: {{ .Values.service.port }} 15 | {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} 16 | nodePort: {{ .Values.service.nodePort }} 17 | {{- end }} 18 | targetPort: {{ .Values.service.targetPort }} 19 | protocol: TCP 20 | name: metrics 21 | selector: 22 | app: {{ template "prometheus-node-exporter.name" . }} 23 | release: {{ .Release.Name }} 24 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/charts/prometheus-node-exporter/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create -}} 2 | {{- if .Values.serviceAccount.create -}} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ template "prometheus-node-exporter.serviceAccountName" . }} 7 | namespace: {{ template "prometheus-node-exporter.namespace" . }} 8 | labels: 9 | app: {{ template "prometheus-node-exporter.name" . }} 10 | chart: {{ template "prometheus-node-exporter.chart" . }} 11 | release: "{{ .Release.Name }}" 12 | heritage: "{{ .Release.Service }}" 13 | annotations: 14 | {{ toYaml .Values.serviceAccount.annotations | indent 4 }} 15 | imagePullSecrets: 16 | {{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} 17 | {{- end -}} 18 | {{- end -}} 19 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | {{ $.Chart.Name }} has been installed. Check its status by running: 2 | kubectl --namespace {{ template "kube-prometheus-stack.namespace" . }} get pods -l "release={{ $.Release.Name }}" 3 | 4 | Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. 5 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/alertmanager/extrasecret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.alertmanager.extraSecret.data -}} 2 | {{- $secretName := printf "alertmanager-%s-extra" (include "kube-prometheus-stack.fullname" . ) -}} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ default $secretName .Values.alertmanager.extraSecret.name }} 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | {{- if .Values.alertmanager.extraSecret.annotations }} 9 | annotations: 10 | {{ toYaml .Values.alertmanager.extraSecret.annotations | indent 4 }} 11 | {{- end }} 12 | labels: 13 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 14 | app.kubernetes.io/component: alertmanager 15 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 16 | data: 17 | {{- range $key, $val := .Values.alertmanager.extraSecret.data }} 18 | {{ $key }}: {{ $val | b64enc | quote }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/alertmanager/podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.podDisruptionBudget.enabled }} 2 | apiVersion: {{ include "kube-prometheus-stack.pdb.apiVersion" . }} 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | {{- if .Values.alertmanager.podDisruptionBudget.minAvailable }} 12 | minAvailable: {{ .Values.alertmanager.podDisruptionBudget.minAvailable }} 13 | {{- end }} 14 | {{- if .Values.alertmanager.podDisruptionBudget.maxUnavailable }} 15 | maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} 16 | {{- end }} 17 | selector: 18 | matchLabels: 19 | app: alertmanager 20 | alertmanager: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/alertmanager/psp-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | rules: 11 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 12 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 13 | - apiGroups: ['policy'] 14 | {{- else }} 15 | - apiGroups: ['extensions'] 16 | {{- end }} 17 | resources: ['podsecuritypolicies'] 18 | verbs: ['use'] 19 | resourceNames: 20 | - {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/alertmanager/psp-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: Role 13 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 14 | subjects: 15 | - kind: ServiceAccount 16 | name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }} 17 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/alertmanager/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 8 | {{- if .Values.global.rbac.pspAnnotations }} 9 | annotations: 10 | {{ toYaml .Values.global.rbac.pspAnnotations | indent 4 }} 11 | {{- end }} 12 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 13 | spec: 14 | privileged: false 15 | # Required to prevent escalations to root. 16 | # allowPrivilegeEscalation: false 17 | # This is redundant with non-root + disallow privilege escalation, 18 | # but we can provide it for defense in depth. 19 | #requiredDropCapabilities: 20 | # - ALL 21 | # Allow core volume types. 22 | volumes: 23 | - 'configMap' 24 | - 'emptyDir' 25 | - 'projected' 26 | - 'secret' 27 | - 'downwardAPI' 28 | - 'persistentVolumeClaim' 29 | hostNetwork: false 30 | hostIPC: false 31 | hostPID: false 32 | runAsUser: 33 | # Permits the container to run with root privileges as well. 34 | rule: 'RunAsAny' 35 | seLinux: 36 | # This policy assumes the nodes are using AppArmor rather than SELinux. 37 | rule: 'RunAsAny' 38 | supplementalGroups: 39 | rule: 'MustRunAs' 40 | ranges: 41 | # Forbid adding the root group. 42 | - min: 0 43 | max: 65535 44 | fsGroup: 45 | rule: 'MustRunAs' 46 | ranges: 47 | # Forbid adding the root group. 48 | - min: 0 49 | max: 65535 50 | readOnlyRootFilesystem: false 51 | {{- end }} 52 | 53 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/alertmanager/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | {{- if .Values.alertmanager.secret.annotations }} 8 | annotations: 9 | {{ toYaml .Values.alertmanager.secret.annotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 13 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 14 | data: 15 | {{- if .Values.alertmanager.tplConfig }} 16 | {{- if eq (typeOf .Values.alertmanager.config) "string" }} 17 | alertmanager.yaml: {{ tpl (.Values.alertmanager.config) . | b64enc | quote }} 18 | {{- else }} 19 | alertmanager.yaml: {{ tpl (toYaml .Values.alertmanager.config) . | b64enc | quote }} 20 | {{- end }} 21 | {{- else }} 22 | alertmanager.yaml: {{ toYaml .Values.alertmanager.config | b64enc | quote }} 23 | {{- end}} 24 | {{- range $key, $val := .Values.alertmanager.templateFiles }} 25 | {{ $key }}: {{ $val | b64enc | quote }} 26 | {{- end }} 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/alertmanager/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-alertmanager 9 | app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-alertmanager 10 | app.kubernetes.io/component: alertmanager 11 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 12 | {{- if .Values.alertmanager.serviceAccount.annotations }} 13 | annotations: 14 | {{ toYaml .Values.alertmanager.serviceAccount.annotations | indent 4 }} 15 | {{- end }} 16 | {{- if .Values.global.imagePullSecrets }} 17 | imagePullSecrets: 18 | {{ toYaml .Values.global.imagePullSecrets | indent 2 }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/alertmanager/serviceperreplica.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.servicePerReplica.enabled }} 2 | {{- $count := .Values.alertmanager.alertmanagerSpec.replicas | int -}} 3 | {{- $serviceValues := .Values.alertmanager.servicePerReplica -}} 4 | apiVersion: v1 5 | kind: List 6 | metadata: 7 | name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-serviceperreplica 8 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 9 | items: 10 | {{- range $i, $e := until $count }} 11 | - apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} 15 | namespace: {{ template "kube-prometheus-stack.namespace" $ }} 16 | labels: 17 | app: {{ include "kube-prometheus-stack.name" $ }}-alertmanager 18 | {{ include "kube-prometheus-stack.labels" $ | indent 8 }} 19 | {{- if $serviceValues.annotations }} 20 | annotations: 21 | {{ toYaml $serviceValues.annotations | indent 8 }} 22 | {{- end }} 23 | spec: 24 | {{- if $serviceValues.clusterIP }} 25 | clusterIP: {{ $serviceValues.clusterIP }} 26 | {{- end }} 27 | {{- if $serviceValues.loadBalancerSourceRanges }} 28 | loadBalancerSourceRanges: 29 | {{- range $cidr := $serviceValues.loadBalancerSourceRanges }} 30 | - {{ $cidr }} 31 | {{- end }} 32 | {{- end }} 33 | ports: 34 | - name: {{ $.Values.alertmanager.alertmanagerSpec.portName }} 35 | {{- if eq $serviceValues.type "NodePort" }} 36 | nodePort: {{ $serviceValues.nodePort }} 37 | {{- end }} 38 | port: {{ $serviceValues.port }} 39 | targetPort: {{ $serviceValues.targetPort }} 40 | selector: 41 | app: alertmanager 42 | alertmanager: {{ template "kube-prometheus-stack.fullname" $ }}-alertmanager 43 | statefulset.kubernetes.io/pod-name: alertmanager-{{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} 44 | type: "{{ $serviceValues.type }}" 45 | {{- end }} 46 | {{- end }} 47 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/core-dns/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.coreDns.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-coredns 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-coredns 8 | jobLabel: coredns 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.coreDns.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.coreDns.service.targetPort }} 18 | selector: 19 | {{- if .Values.coreDns.service.selector }} 20 | {{ toYaml .Values.coreDns.service.selector | indent 4 }} 21 | {{- else}} 22 | k8s-app: kube-dns 23 | {{- end}} 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/core-dns/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.coreDns.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-coredns 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-coredns 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | jobLabel: jobLabel 12 | selector: 13 | matchLabels: 14 | app: {{ template "kube-prometheus-stack.name" . }}-coredns 15 | release: {{ $.Release.Name | quote }} 16 | namespaceSelector: 17 | matchNames: 18 | - "kube-system" 19 | endpoints: 20 | - port: http-metrics 21 | {{- if .Values.coreDns.serviceMonitor.interval}} 22 | interval: {{ .Values.coreDns.serviceMonitor.interval }} 23 | {{- end }} 24 | {{- if .Values.coreDns.serviceMonitor.proxyUrl }} 25 | proxyUrl: {{ .Values.coreDns.serviceMonitor.proxyUrl}} 26 | {{- end }} 27 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 28 | {{- if .Values.coreDns.serviceMonitor.metricRelabelings }} 29 | metricRelabelings: 30 | {{ tpl (toYaml .Values.coreDns.serviceMonitor.metricRelabelings | indent 4) . }} 31 | {{- end }} 32 | {{- if .Values.coreDns.serviceMonitor.relabelings }} 33 | relabelings: 34 | {{ toYaml .Values.coreDns.serviceMonitor.relabelings | indent 4 }} 35 | {{- end }} 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-api-server/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeApiServer.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-apiserver 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-apiserver 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | {{- if .Values.kubeApiServer.serviceMonitor.interval }} 14 | interval: {{ .Values.kubeApiServer.serviceMonitor.interval }} 15 | {{- end }} 16 | {{- if .Values.kubeApiServer.serviceMonitor.proxyUrl }} 17 | proxyUrl: {{ .Values.kubeApiServer.serviceMonitor.proxyUrl}} 18 | {{- end }} 19 | port: https 20 | scheme: https 21 | {{- if .Values.kubeApiServer.serviceMonitor.metricRelabelings }} 22 | metricRelabelings: 23 | {{ tpl (toYaml .Values.kubeApiServer.serviceMonitor.metricRelabelings | indent 6) . }} 24 | {{- end }} 25 | {{- if .Values.kubeApiServer.serviceMonitor.relabelings }} 26 | relabelings: 27 | {{ toYaml .Values.kubeApiServer.serviceMonitor.relabelings | indent 6 }} 28 | {{- end }} 29 | tlsConfig: 30 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 31 | serverName: {{ .Values.kubeApiServer.tlsConfig.serverName }} 32 | insecureSkipVerify: {{ .Values.kubeApiServer.tlsConfig.insecureSkipVerify }} 33 | jobLabel: {{ .Values.kubeApiServer.serviceMonitor.jobLabel }} 34 | namespaceSelector: 35 | matchNames: 36 | - default 37 | selector: 38 | {{ toYaml .Values.kubeApiServer.serviceMonitor.selector | indent 4 }} 39 | {{- end}} 40 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-controller-manager/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager 8 | k8s-app: kube-controller-manager 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeControllerManager.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeControllerManager.service.port }} 19 | protocol: TCP 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-controller-manager/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.service.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager 8 | jobLabel: kube-controller-manager 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.kubeControllerManager.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeControllerManager.service.targetPort }} 18 | {{- if .Values.kubeControllerManager.endpoints }}{{- else }} 19 | selector: 20 | {{- if .Values.kubeControllerManager.service.selector }} 21 | {{ toYaml .Values.kubeControllerManager.service.selector | indent 4 }} 22 | {{- else}} 23 | component: kube-controller-manager 24 | {{- end}} 25 | {{- end }} 26 | type: ClusterIP 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-dns/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeDns.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-dns 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-dns 8 | jobLabel: kube-dns 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics-dnsmasq 15 | port: {{ .Values.kubeDns.service.dnsmasq.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeDns.service.dnsmasq.targetPort }} 18 | - name: http-metrics-skydns 19 | port: {{ .Values.kubeDns.service.skydns.port }} 20 | protocol: TCP 21 | targetPort: {{ .Values.kubeDns.service.skydns.targetPort }} 22 | selector: 23 | {{- if .Values.kubeDns.service.selector }} 24 | {{ toYaml .Values.kubeDns.service.selector | indent 4 }} 25 | {{- else}} 26 | k8s-app: kube-dns 27 | {{- end}} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-etcd/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd 8 | k8s-app: etcd-server 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeEtcd.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeEtcd.service.port }} 19 | protocol: TCP 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-etcd/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.service.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd 8 | jobLabel: kube-etcd 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.kubeEtcd.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeEtcd.service.targetPort }} 18 | {{- if .Values.kubeEtcd.endpoints }}{{- else }} 19 | selector: 20 | {{- if .Values.kubeEtcd.service.selector }} 21 | {{ toYaml .Values.kubeEtcd.service.selector | indent 4 }} 22 | {{- else}} 23 | component: etcd 24 | {{- end}} 25 | {{- end }} 26 | type: ClusterIP 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-proxy/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeProxy.enabled .Values.kubeProxy.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy 8 | k8s-app: kube-proxy 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeProxy.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeProxy.service.port }} 19 | protocol: TCP 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-proxy/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeProxy.enabled .Values.kubeProxy.service.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy 8 | jobLabel: kube-proxy 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.kubeProxy.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeProxy.service.targetPort }} 18 | {{- if .Values.kubeProxy.endpoints }}{{- else }} 19 | selector: 20 | {{- if .Values.kubeProxy.service.selector }} 21 | {{ toYaml .Values.kubeProxy.service.selector | indent 4 }} 22 | {{- else}} 23 | k8s-app: kube-proxy 24 | {{- end}} 25 | {{- end }} 26 | type: ClusterIP 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-proxy/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeProxy.enabled .Values.kubeProxy.serviceMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | jobLabel: jobLabel 12 | selector: 13 | matchLabels: 14 | app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy 15 | release: {{ $.Release.Name | quote }} 16 | namespaceSelector: 17 | matchNames: 18 | - "kube-system" 19 | endpoints: 20 | - port: http-metrics 21 | {{- if .Values.kubeProxy.serviceMonitor.interval }} 22 | interval: {{ .Values.kubeProxy.serviceMonitor.interval }} 23 | {{- end }} 24 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 25 | {{- if .Values.kubeProxy.serviceMonitor.proxyUrl }} 26 | proxyUrl: {{ .Values.kubeProxy.serviceMonitor.proxyUrl}} 27 | {{- end }} 28 | {{- if .Values.kubeProxy.serviceMonitor.https }} 29 | scheme: https 30 | tlsConfig: 31 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 32 | {{- end}} 33 | {{- if .Values.kubeProxy.serviceMonitor.metricRelabelings }} 34 | metricRelabelings: 35 | {{ toYaml .Values.kubeProxy.serviceMonitor.metricRelabelings | indent 4 }} 36 | {{- end }} 37 | {{- if .Values.kubeProxy.serviceMonitor.relabelings }} 38 | relabelings: 39 | {{ toYaml .Values.kubeProxy.serviceMonitor.relabelings | indent 4 }} 40 | {{- end }} 41 | {{- end }} 42 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-scheduler/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler 8 | k8s-app: kube-scheduler 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeScheduler.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeScheduler.service.port }} 19 | protocol: TCP 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/kube-scheduler/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.service.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler 8 | jobLabel: kube-scheduler 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.kubeScheduler.service.port}} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeScheduler.service.targetPort}} 18 | {{- if .Values.kubeScheduler.endpoints }}{{- else }} 19 | selector: 20 | {{- if .Values.kubeScheduler.service.selector }} 21 | {{ toYaml .Values.kubeScheduler.service.selector | indent 4 }} 22 | {{- else}} 23 | component: kube-scheduler 24 | {{- end}} 25 | {{- end }} 26 | type: ClusterIP 27 | {{- end -}} 28 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/exporters/node-exporter/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.nodeExporter.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-node-exporter 6 | namespace: {{ template "kube-prometheus-stack-prometheus-node-exporter.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-node-exporter 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | jobLabel: {{ .Values.nodeExporter.jobLabel }} 12 | selector: 13 | matchLabels: 14 | app: prometheus-node-exporter 15 | release: {{ $.Release.Name }} 16 | namespaceSelector: 17 | matchNames: 18 | - {{ printf "%s" (include "kube-prometheus-stack-prometheus-node-exporter.namespace" .) | quote }} 19 | endpoints: 20 | - port: metrics 21 | {{- if .Values.nodeExporter.serviceMonitor.interval }} 22 | interval: {{ .Values.nodeExporter.serviceMonitor.interval }} 23 | {{- end }} 24 | {{- if .Values.nodeExporter.serviceMonitor.proxyUrl }} 25 | proxyUrl: {{ .Values.nodeExporter.serviceMonitor.proxyUrl}} 26 | {{- end }} 27 | {{- if .Values.nodeExporter.serviceMonitor.scrapeTimeout }} 28 | scrapeTimeout: {{ .Values.nodeExporter.serviceMonitor.scrapeTimeout }} 29 | {{- end }} 30 | {{- if .Values.nodeExporter.serviceMonitor.metricRelabelings }} 31 | metricRelabelings: 32 | {{ tpl (toYaml .Values.nodeExporter.serviceMonitor.metricRelabelings | indent 4) . }} 33 | {{- end }} 34 | {{- if .Values.nodeExporter.serviceMonitor.relabelings }} 35 | relabelings: 36 | {{ toYaml .Values.nodeExporter.serviceMonitor.relabelings | indent 4 }} 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/grafana/configmap-dashboards.yaml: -------------------------------------------------------------------------------- 1 | {{- if or (and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled) .Values.grafana.forceDeployDashboards }} 2 | {{- $files := .Files.Glob "dashboards-1.14/*.json" }} 3 | {{- if $files }} 4 | apiVersion: v1 5 | kind: ConfigMapList 6 | items: 7 | {{- range $path, $fileContents := $files }} 8 | {{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} 9 | - apiVersion: v1 10 | kind: ConfigMap 11 | metadata: 12 | name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) $dashboardName | trunc 63 | trimSuffix "-" }} 13 | namespace: {{ template "kube-prometheus-stack-grafana.namespace" $ }} 14 | labels: 15 | {{- if $.Values.grafana.sidecar.dashboards.label }} 16 | {{ $.Values.grafana.sidecar.dashboards.label }}: "1" 17 | {{- end }} 18 | app: {{ template "kube-prometheus-stack.name" $ }}-grafana 19 | {{ include "kube-prometheus-stack.labels" $ | indent 6 }} 20 | data: 21 | {{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/grafana/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.grafana.enabled .Values.grafana.serviceMonitor.selfMonitor }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-grafana 6 | namespace: {{ template "kube-prometheus-stack-grafana.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-grafana 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/name: grafana 14 | app.kubernetes.io/instance: {{ $.Release.Name | quote }} 15 | namespaceSelector: 16 | matchNames: 17 | - {{ printf "%s" (include "kube-prometheus-stack-grafana.namespace" .) | quote }} 18 | endpoints: 19 | - port: {{ .Values.grafana.service.portName }} 20 | {{- if .Values.grafana.serviceMonitor.interval }} 21 | interval: {{ .Values.grafana.serviceMonitor.interval }} 22 | {{- end }} 23 | path: {{ .Values.grafana.serviceMonitor.path | quote }} 24 | {{- if .Values.grafana.serviceMonitor.metricRelabelings }} 25 | metricRelabelings: 26 | {{ tpl (toYaml .Values.grafana.serviceMonitor.metricRelabelings | indent 6) . }} 27 | {{- end }} 28 | {{- if .Values.grafana.serviceMonitor.relabelings }} 29 | relabelings: 30 | {{ toYaml .Values.grafana.serviceMonitor.relabelings | indent 6 }} 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | annotations: 7 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 8 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 9 | labels: 10 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 11 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 12 | rules: 13 | - apiGroups: 14 | - admissionregistration.k8s.io 15 | resources: 16 | - validatingwebhookconfigurations 17 | - mutatingwebhookconfigurations 18 | verbs: 19 | - get 20 | - update 21 | {{- if .Values.global.rbac.pspEnabled }} 22 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 23 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 24 | - apiGroups: ['policy'] 25 | {{- else }} 26 | - apiGroups: ['extensions'] 27 | {{- end }} 28 | resources: ['podsecuritypolicies'] 29 | verbs: ['use'] 30 | resourceNames: 31 | - {{ template "kube-prometheus-stack.fullname" . }}-admission 32 | {{- end }} 33 | {{- end }} 34 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | annotations: 7 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 8 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 9 | labels: 10 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 11 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 16 | subjects: 17 | - kind: ServiceAccount 18 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 19 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | annotations: 7 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 8 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 9 | labels: 10 | app: {{ template "kube-prometheus-stack.name" . }}-admission 11 | {{- if .Values.global.rbac.pspAnnotations }} 12 | annotations: 13 | {{ toYaml .Values.global.rbac.pspAnnotations | indent 4 }} 14 | {{- end }} 15 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 16 | spec: 17 | privileged: false 18 | # Required to prevent escalations to root. 19 | # allowPrivilegeEscalation: false 20 | # This is redundant with non-root + disallow privilege escalation, 21 | # but we can provide it for defense in depth. 22 | #requiredDropCapabilities: 23 | # - ALL 24 | # Allow core volume types. 25 | volumes: 26 | - 'configMap' 27 | - 'emptyDir' 28 | - 'projected' 29 | - 'secret' 30 | - 'downwardAPI' 31 | - 'persistentVolumeClaim' 32 | hostNetwork: false 33 | hostIPC: false 34 | hostPID: false 35 | runAsUser: 36 | # Permits the container to run with root privileges as well. 37 | rule: 'RunAsAny' 38 | seLinux: 39 | # This policy assumes the nodes are using AppArmor rather than SELinux. 40 | rule: 'RunAsAny' 41 | supplementalGroups: 42 | rule: 'MustRunAs' 43 | ranges: 44 | # Forbid adding the root group. 45 | - min: 0 46 | max: 65535 47 | fsGroup: 48 | rule: 'MustRunAs' 49 | ranges: 50 | # Forbid adding the root group. 51 | - min: 0 52 | max: 65535 53 | readOnlyRootFilesystem: false 54 | {{- end }} 55 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 9 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 10 | labels: 11 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 12 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - secrets 18 | verbs: 19 | - get 20 | - create 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 9 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 10 | labels: 11 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 12 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: Role 16 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 17 | subjects: 18 | - kind: ServiceAccount 19 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 20 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | annotations: 8 | "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade 9 | "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded 10 | labels: 11 | app: {{ template "kube-prometheus-stack.name" $ }}-admission 12 | {{- include "kube-prometheus-stack.labels" $ | indent 4 }} 13 | {{- if .Values.global.imagePullSecrets }} 14 | imagePullSecrets: 15 | {{ toYaml .Values.global.imagePullSecrets | indent 2 }} 16 | {{- end }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-operator 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | rules: 10 | - apiGroups: 11 | - monitoring.coreos.com 12 | resources: 13 | - alertmanagers 14 | - alertmanagers/finalizers 15 | - alertmanagerconfigs 16 | - prometheuses 17 | - prometheuses/finalizers 18 | - thanosrulers 19 | - thanosrulers/finalizers 20 | - servicemonitors 21 | - podmonitors 22 | - probes 23 | - prometheusrules 24 | verbs: 25 | - '*' 26 | - apiGroups: 27 | - apps 28 | resources: 29 | - statefulsets 30 | verbs: 31 | - '*' 32 | - apiGroups: 33 | - "" 34 | resources: 35 | - configmaps 36 | - secrets 37 | verbs: 38 | - '*' 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - pods 43 | verbs: 44 | - list 45 | - delete 46 | - apiGroups: 47 | - "" 48 | resources: 49 | - services 50 | - services/finalizers 51 | - endpoints 52 | verbs: 53 | - get 54 | - create 55 | - update 56 | - delete 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - nodes 61 | verbs: 62 | - list 63 | - watch 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - namespaces 68 | verbs: 69 | - get 70 | - list 71 | - watch 72 | - apiGroups: 73 | - networking.k8s.io 74 | resources: 75 | - ingresses 76 | verbs: 77 | - get 78 | - list 79 | - watch 80 | {{- end }} 81 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-operator 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} 16 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-operator 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | rules: 10 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 11 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 12 | - apiGroups: ['policy'] 13 | {{- else }} 14 | - apiGroups: ['extensions'] 15 | {{- end }} 16 | resources: ['podsecuritypolicies'] 17 | verbs: ['use'] 18 | resourceNames: 19 | - {{ template "kube-prometheus-stack.fullname" . }}-operator 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-operator 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} 16 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-operator 8 | {{- if .Values.global.rbac.pspAnnotations }} 9 | annotations: 10 | {{ toYaml .Values.global.rbac.pspAnnotations | indent 4 }} 11 | {{- end }} 12 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 13 | spec: 14 | privileged: false 15 | # Required to prevent escalations to root. 16 | # allowPrivilegeEscalation: false 17 | # This is redundant with non-root + disallow privilege escalation, 18 | # but we can provide it for defense in depth. 19 | #requiredDropCapabilities: 20 | # - ALL 21 | # Allow core volume types. 22 | volumes: 23 | - 'configMap' 24 | - 'emptyDir' 25 | - 'projected' 26 | - 'secret' 27 | - 'downwardAPI' 28 | - 'persistentVolumeClaim' 29 | hostNetwork: {{ .Values.prometheusOperator.hostNetwork }} 30 | hostIPC: false 31 | hostPID: false 32 | runAsUser: 33 | # Permits the container to run with root privileges as well. 34 | rule: 'RunAsAny' 35 | seLinux: 36 | # This policy assumes the nodes are using AppArmor rather than SELinux. 37 | rule: 'RunAsAny' 38 | supplementalGroups: 39 | rule: 'MustRunAs' 40 | ranges: 41 | # Forbid adding the root group. 42 | - min: 0 43 | max: 65535 44 | fsGroup: 45 | rule: 'MustRunAs' 46 | ranges: 47 | # Forbid adding the root group. 48 | - min: 0 49 | max: 65535 50 | readOnlyRootFilesystem: false 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-operator 9 | app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-prometheus-operator 10 | app.kubernetes.io/component: prometheus-operator 11 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 12 | {{- if .Values.global.imagePullSecrets }} 13 | imagePullSecrets: 14 | {{ toYaml .Values.global.imagePullSecrets | indent 2 }} 15 | {{- end }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus-operator/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.serviceMonitor.selfMonitor }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-operator 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-operator 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | endpoints: 12 | {{- if .Values.prometheusOperator.tls.enabled }} 13 | - port: https 14 | scheme: https 15 | tlsConfig: 16 | serverName: {{ template "kube-prometheus-stack.operator.fullname" . }} 17 | ca: 18 | secret: 19 | name: {{ template "kube-prometheus-stack.fullname" . }}-admission 20 | key: {{ if .Values.prometheusOperator.admissionWebhooks.certManager.enabled }}ca.crt{{ else }}ca{{ end }} 21 | optional: false 22 | {{- else }} 23 | - port: http 24 | {{- end }} 25 | honorLabels: true 26 | {{- if .Values.prometheusOperator.serviceMonitor.interval }} 27 | interval: {{ .Values.prometheusOperator.serviceMonitor.interval }} 28 | {{- end }} 29 | {{- if .Values.prometheusOperator.serviceMonitor.metricRelabelings }} 30 | metricRelabelings: 31 | {{ tpl (toYaml .Values.prometheusOperator.serviceMonitor.metricRelabelings | indent 6) . }} 32 | {{- end }} 33 | {{- if .Values.prometheusOperator.serviceMonitor.relabelings }} 34 | relabelings: 35 | {{ toYaml .Values.prometheusOperator.serviceMonitor.relabelings | indent 6 }} 36 | {{- end }} 37 | selector: 38 | matchLabels: 39 | app: {{ template "kube-prometheus-stack.name" . }}-operator 40 | release: {{ $.Release.Name | quote }} 41 | namespaceSelector: 42 | matchNames: 43 | - {{ printf "%s" (include "kube-prometheus-stack.namespace" .) | quote }} 44 | {{- end }} 45 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/_rules.tpl: -------------------------------------------------------------------------------- 1 | {{- /* 2 | Generated file. Do not change in-place! In order to change this file first read following link: 3 | https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack 4 | */ -}} 5 | {{- define "rules.names" }} 6 | rules: 7 | - "alertmanager.rules" 8 | - "general.rules" 9 | - "k8s.rules" 10 | - "kube-apiserver.rules" 11 | - "kube-apiserver-availability.rules" 12 | - "kube-apiserver-error" 13 | - "kube-apiserver-slos" 14 | - "kube-prometheus-general.rules" 15 | - "kube-prometheus-node-alerting.rules" 16 | - "kube-prometheus-node-recording.rules" 17 | - "kube-scheduler.rules" 18 | - "kube-state-metrics" 19 | - "kubelet.rules" 20 | - "kubernetes-absent" 21 | - "kubernetes-resources" 22 | - "kubernetes-storage" 23 | - "kubernetes-system" 24 | - "kubernetes-system-apiserver" 25 | - "kubernetes-system-kubelet" 26 | - "kubernetes-system-controller-manager" 27 | - "kubernetes-system-scheduler" 28 | - "node-exporter.rules" 29 | - "node-exporter" 30 | - "node.rules" 31 | - "node-network" 32 | - "node-time" 33 | - "prometheus-operator" 34 | - "prometheus.rules" 35 | - "prometheus" 36 | - "kubernetes-apps" 37 | - "etcd" 38 | {{- end }} -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/additionalAlertRelabelConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-am-relabel-confg 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | {{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} 8 | annotations: 9 | {{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus-am-relabel-confg 13 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 14 | data: 15 | additional-alert-relabel-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs | b64enc | quote }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/additionalAlertmanagerConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-am-confg 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | {{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} 8 | annotations: 9 | {{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus-am-confg 13 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 14 | data: 15 | additional-alertmanager-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs | b64enc | quote }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/additionalPrometheusRules.yaml: -------------------------------------------------------------------------------- 1 | {{- if or .Values.additionalPrometheusRules .Values.additionalPrometheusRulesMap}} 2 | apiVersion: v1 3 | kind: List 4 | metadata: 5 | name: {{ include "kube-prometheus-stack.fullname" $ }}-additional-prometheus-rules 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | items: 8 | {{- if .Values.additionalPrometheusRulesMap }} 9 | {{- range $prometheusRuleName, $prometheusRule := .Values.additionalPrometheusRulesMap }} 10 | - apiVersion: monitoring.coreos.com/v1 11 | kind: PrometheusRule 12 | metadata: 13 | name: {{ template "kube-prometheus-stack.name" $ }}-{{ $prometheusRuleName }} 14 | namespace: {{ template "kube-prometheus-stack.namespace" $ }} 15 | labels: 16 | app: {{ template "kube-prometheus-stack.name" $ }} 17 | {{ include "kube-prometheus-stack.labels" $ | indent 8 }} 18 | {{- if $prometheusRule.additionalLabels }} 19 | {{ toYaml $prometheusRule.additionalLabels | indent 8 }} 20 | {{- end }} 21 | spec: 22 | groups: 23 | {{ toYaml $prometheusRule.groups| indent 8 }} 24 | {{- end }} 25 | {{- else }} 26 | {{- range .Values.additionalPrometheusRules }} 27 | - apiVersion: monitoring.coreos.com/v1 28 | kind: PrometheusRule 29 | metadata: 30 | name: {{ template "kube-prometheus-stack.name" $ }}-{{ .name }} 31 | namespace: {{ template "kube-prometheus-stack.namespace" $ }} 32 | labels: 33 | app: {{ template "kube-prometheus-stack.name" $ }} 34 | {{ include "kube-prometheus-stack.labels" $ | indent 8 }} 35 | {{- if .additionalLabels }} 36 | {{ toYaml .additionalLabels | indent 8 }} 37 | {{- end }} 38 | spec: 39 | groups: 40 | {{ toYaml .groups| indent 8 }} 41 | {{- end }} 42 | {{- end }} 43 | {{- end }} 44 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/additionalScrapeConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalScrapeConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-scrape-confg 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | {{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} 8 | annotations: 9 | {{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} 10 | {{- end }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus-scrape-confg 13 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 14 | data: 15 | additional-scrape-configs.yaml: {{ tpl (toYaml .Values.prometheus.prometheusSpec.additionalScrapeConfigs) $ | b64enc | quote }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | rules: 10 | # This permission are not in the kube-prometheus repo 11 | # they're grabbed from https://github.com/prometheus/prometheus/blob/master/documentation/examples/rbac-setup.yml 12 | - apiGroups: [""] 13 | resources: 14 | - nodes 15 | - nodes/metrics 16 | - services 17 | - endpoints 18 | - pods 19 | verbs: ["get", "list", "watch"] 20 | - apiGroups: 21 | - "networking.k8s.io" 22 | resources: 23 | - ingresses 24 | verbs: ["get", "list", "watch"] 25 | - nonResourceURLs: ["/metrics", "/metrics/cadvisor"] 26 | verbs: ["get"] 27 | {{- if .Values.prometheus.additionalRulesForClusterRole }} 28 | {{ toYaml .Values.prometheus.additionalRulesForClusterRole | indent 0 }} 29 | {{- end }} 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} 16 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 17 | {{- end }} 18 | 19 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/csi-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheus.prometheusSpec.thanos.secretProviderClass }} 2 | --- 3 | apiVersion: secrets-store.csi.x-k8s.io/v1alpha1 4 | kind: SecretProviderClass 5 | metadata: 6 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | labels: 9 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 10 | spec: 11 | {{ toYaml .Values.prometheus.prometheusSpec.thanos.secretProviderClass | indent 2 }} 12 | {{- end }} -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/extrasecret.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheus.extraSecret.data -}} 2 | {{- $secretName := printf "prometheus-%s-extra" (include "kube-prometheus-stack.fullname" . ) -}} 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ default $secretName .Values.prometheus.extraSecret.name }} 7 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 8 | {{- if .Values.prometheus.extraSecret.annotations }} 9 | annotations: 10 | {{ toYaml .Values.prometheus.extraSecret.annotations | indent 4 }} 11 | {{- end }} 12 | labels: 13 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 14 | app.kubernetes.io/component: prometheus 15 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 16 | data: 17 | {{- range $key, $val := .Values.prometheus.extraSecret.data }} 18 | {{ $key }}: {{ $val | b64enc | quote }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.podDisruptionBudget.enabled }} 2 | apiVersion: {{ include "kube-prometheus-stack.pdb.apiVersion" . }} 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | {{- if .Values.prometheus.podDisruptionBudget.minAvailable }} 12 | minAvailable: {{ .Values.prometheus.podDisruptionBudget.minAvailable }} 13 | {{- end }} 14 | {{- if .Values.prometheus.podDisruptionBudget.maxUnavailable }} 15 | maxUnavailable: {{ .Values.prometheus.podDisruptionBudget.maxUnavailable }} 16 | {{- end }} 17 | selector: 18 | matchLabels: 19 | app.kubernetes.io/name: prometheus 20 | prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/podmonitors.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.additionalPodMonitors }} 2 | apiVersion: v1 3 | kind: List 4 | items: 5 | {{- range .Values.prometheus.additionalPodMonitors }} 6 | - apiVersion: monitoring.coreos.com/v1 7 | kind: PodMonitor 8 | metadata: 9 | name: {{ .name }} 10 | namespace: {{ template "kube-prometheus-stack.namespace" $ }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" $ }}-prometheus 13 | {{ include "kube-prometheus-stack.labels" $ | indent 8 }} 14 | {{- if .additionalLabels }} 15 | {{ toYaml .additionalLabels | indent 8 }} 16 | {{- end }} 17 | spec: 18 | podMetricsEndpoints: 19 | {{ toYaml .podMetricsEndpoints | indent 8 }} 20 | {{- if .jobLabel }} 21 | jobLabel: {{ .jobLabel }} 22 | {{- end }} 23 | {{- if .namespaceSelector }} 24 | namespaceSelector: 25 | {{ toYaml .namespaceSelector | indent 8 }} 26 | {{- end }} 27 | selector: 28 | {{ toYaml .selector | indent 8 }} 29 | {{- if .podTargetLabels }} 30 | podTargetLabels: 31 | {{ toYaml .podTargetLabels | indent 8 }} 32 | {{- end }} 33 | {{- if .sampleLimit }} 34 | sampleLimit: {{ .sampleLimit }} 35 | {{- end }} 36 | {{- end }} 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | rules: 10 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 11 | {{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} 12 | - apiGroups: ['policy'] 13 | {{- else }} 14 | - apiGroups: ['extensions'] 15 | {{- end }} 16 | resources: ['podsecuritypolicies'] 17 | verbs: ['use'] 18 | resourceNames: 19 | - {{ template "kube-prometheus-stack.fullname" . }}-prometheus 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp 6 | labels: 7 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 8 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} 16 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 17 | {{- end }} 18 | 19 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml: -------------------------------------------------------------------------------- 1 | {{- /* 2 | Generated from 'kube-prometheus-general.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/manifests/kube-prometheus-prometheusRule.yaml 3 | Do not change in-place! In order to change this file first read following link: 4 | https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack 5 | */ -}} 6 | {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} 7 | {{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusGeneral }} 8 | apiVersion: monitoring.coreos.com/v1 9 | kind: PrometheusRule 10 | metadata: 11 | name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-prometheus-general.rules" | trunc 63 | trimSuffix "-" }} 12 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 13 | labels: 14 | app: {{ template "kube-prometheus-stack.name" . }} 15 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 16 | {{- if .Values.defaultRules.labels }} 17 | {{ toYaml .Values.defaultRules.labels | indent 4 }} 18 | {{- end }} 19 | {{- if .Values.defaultRules.annotations }} 20 | annotations: 21 | {{ toYaml .Values.defaultRules.annotations | indent 4 }} 22 | {{- end }} 23 | spec: 24 | groups: 25 | - name: kube-prometheus-general.rules 26 | rules: 27 | - expr: count without(instance, pod, node) (up == 1) 28 | record: count:up1 29 | - expr: count without(instance, pod, node) (up == 0) 30 | record: count:up0 31 | {{- end }} -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/serviceThanosSidecar.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.thanosService.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-discovery 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-thanos-discovery 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | {{- if .Values.prometheus.thanosService.labels }} 11 | {{ toYaml .Values.prometheus.thanosService.labels | indent 4 }} 12 | {{- end }} 13 | {{- if .Values.prometheus.thanosService.annotations }} 14 | annotations: 15 | {{ toYaml .Values.prometheus.thanosService.annotations | indent 4 }} 16 | {{- end }} 17 | spec: 18 | type: {{ .Values.prometheus.thanosService.type }} 19 | clusterIP: {{ .Values.prometheus.thanosService.clusterIP }} 20 | ports: 21 | - name: {{ .Values.prometheus.thanosService.portName }} 22 | port: {{ .Values.prometheus.thanosService.port }} 23 | targetPort: {{ .Values.prometheus.thanosService.targetPort }} 24 | {{- if eq .Values.prometheus.thanosService.type "NodePort" }} 25 | nodePort: {{ .Values.prometheus.thanosService.nodePort }} 26 | {{- end }} 27 | - name: {{ .Values.prometheus.thanosService.httpPortName }} 28 | port: {{ .Values.prometheus.thanosService.httpPort }} 29 | targetPort: {{ .Values.prometheus.thanosService.targetHttpPort }} 30 | {{- if eq .Values.prometheus.thanosService.type "NodePort" }} 31 | nodePort: {{ .Values.prometheus.thanosService.httpNodePort }} 32 | {{- end }} 33 | selector: 34 | app.kubernetes.io/name: prometheus 35 | prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 9 | app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-prometheus 10 | app.kubernetes.io/component: prometheus 11 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 12 | {{- if .Values.prometheus.serviceAccount.annotations }} 13 | annotations: 14 | {{ toYaml .Values.prometheus.serviceAccount.annotations | indent 4 }} 15 | {{- end }} 16 | {{- if .Values.global.imagePullSecrets }} 17 | imagePullSecrets: 18 | {{ toYaml .Values.global.imagePullSecrets | indent 2 }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.serviceMonitor.selfMonitor }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: {{ template "kube-prometheus-stack.name" . }}-prometheus 14 | release: {{ $.Release.Name | quote }} 15 | self-monitor: "true" 16 | namespaceSelector: 17 | matchNames: 18 | - {{ printf "%s" (include "kube-prometheus-stack.namespace" .) | quote }} 19 | endpoints: 20 | - port: {{ .Values.prometheus.prometheusSpec.portName }} 21 | {{- if .Values.prometheus.serviceMonitor.interval }} 22 | interval: {{ .Values.prometheus.serviceMonitor.interval }} 23 | {{- end }} 24 | {{- if .Values.prometheus.serviceMonitor.scheme }} 25 | scheme: {{ .Values.prometheus.serviceMonitor.scheme }} 26 | {{- end }} 27 | {{- if .Values.prometheus.serviceMonitor.tlsConfig }} 28 | tlsConfig: {{ toYaml .Values.prometheus.serviceMonitor.tlsConfig | nindent 6 }} 29 | {{- end }} 30 | {{- if .Values.prometheus.serviceMonitor.bearerTokenFile }} 31 | bearerTokenFile: {{ .Values.prometheus.serviceMonitor.bearerTokenFile }} 32 | {{- end }} 33 | path: "{{ trimSuffix "/" .Values.prometheus.prometheusSpec.routePrefix }}/metrics" 34 | {{- if .Values.prometheus.serviceMonitor.metricRelabelings }} 35 | metricRelabelings: 36 | {{ tpl (toYaml .Values.prometheus.serviceMonitor.metricRelabelings | indent 6) . }} 37 | {{- end }} 38 | {{- if .Values.prometheus.serviceMonitor.relabelings }} 39 | relabelings: 40 | {{ toYaml .Values.prometheus.serviceMonitor.relabelings | indent 6 }} 41 | {{- end }} 42 | {{- end }} 43 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/servicemonitorThanosSidecar.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.thanosService.enabled .Values.prometheus.thanosServiceMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-discovery 6 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 7 | labels: 8 | app: {{ template "kube-prometheus-stack.name" . }}-thanos-discovery 9 | {{ include "kube-prometheus-stack.labels" . | indent 4 }} 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: {{ template "kube-prometheus-stack.name" . }}-thanos-discovery 14 | release: {{ $.Release.Name | quote }} 15 | namespaceSelector: 16 | matchNames: 17 | - {{ printf "%s" (include "kube-prometheus-stack.namespace" .) | quote }} 18 | endpoints: 19 | - port: {{ .Values.prometheus.thanosService.httpPortName }} 20 | {{- if .Values.prometheus.thanosServiceMonitor.interval }} 21 | interval: {{ .Values.prometheus.thanosServiceMonitor.interval }} 22 | {{- end }} 23 | {{- if .Values.prometheus.thanosServiceMonitor.scheme }} 24 | scheme: {{ .Values.prometheus.thanosServiceMonitor.scheme }} 25 | {{- end }} 26 | {{- if .Values.prometheus.thanosServiceMonitor.tlsConfig }} 27 | tlsConfig: {{ toYaml .Values.prometheus.thanosServiceMonitor.tlsConfig | nindent 6 }} 28 | {{- end }} 29 | {{- if .Values.prometheus.thanosServiceMonitor.bearerTokenFile }} 30 | bearerTokenFile: {{ .Values.prometheus.thanosServiceMonitor.bearerTokenFile }} 31 | {{- end }} 32 | path: "/metrics" 33 | {{- if .Values.prometheus.thanosServiceMonitor.metricRelabelings }} 34 | metricRelabelings: 35 | {{ tpl (toYaml .Values.prometheus.thanosServiceMonitor.metricRelabelings | indent 6) . }} 36 | {{- end }} 37 | {{- if .Values.prometheus.thanosServiceMonitor.relabelings }} 38 | relabelings: 39 | {{ toYaml .Values.prometheus.thanosServiceMonitor.relabelings | indent 6 }} 40 | {{- end }} 41 | {{- end }} 42 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/servicemonitors.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.additionalServiceMonitors }} 2 | apiVersion: v1 3 | kind: List 4 | items: 5 | {{- range .Values.prometheus.additionalServiceMonitors }} 6 | - apiVersion: monitoring.coreos.com/v1 7 | kind: ServiceMonitor 8 | metadata: 9 | name: {{ .name }} 10 | namespace: {{ template "kube-prometheus-stack.namespace" $ }} 11 | labels: 12 | app: {{ template "kube-prometheus-stack.name" $ }}-prometheus 13 | {{ include "kube-prometheus-stack.labels" $ | indent 8 }} 14 | {{- if .additionalLabels }} 15 | {{ toYaml .additionalLabels | indent 8 }} 16 | {{- end }} 17 | spec: 18 | endpoints: 19 | {{ toYaml .endpoints | indent 8 }} 20 | {{- if .jobLabel }} 21 | jobLabel: {{ .jobLabel }} 22 | {{- end }} 23 | {{- if .namespaceSelector }} 24 | namespaceSelector: 25 | {{ toYaml .namespaceSelector | indent 8 }} 26 | {{- end }} 27 | selector: 28 | {{ toYaml .selector | indent 8 }} 29 | {{- if .targetLabels }} 30 | targetLabels: 31 | {{ toYaml .targetLabels | indent 8 }} 32 | {{- end }} 33 | {{- if .podTargetLabels }} 34 | podTargetLabels: 35 | {{ toYaml .podTargetLabels | indent 8 }} 36 | {{- end }} 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/chart/templates/prometheus/serviceperreplica.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.servicePerReplica.enabled }} 2 | {{- $count := .Values.prometheus.prometheusSpec.replicas | int -}} 3 | {{- $serviceValues := .Values.prometheus.servicePerReplica -}} 4 | apiVersion: v1 5 | kind: List 6 | metadata: 7 | name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-serviceperreplica 8 | namespace: {{ template "kube-prometheus-stack.namespace" . }} 9 | items: 10 | {{- range $i, $e := until $count }} 11 | - apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} 15 | namespace: {{ template "kube-prometheus-stack.namespace" $ }} 16 | labels: 17 | app: {{ include "kube-prometheus-stack.name" $ }}-prometheus 18 | {{ include "kube-prometheus-stack.labels" $ | indent 8 }} 19 | {{- if $serviceValues.annotations }} 20 | annotations: 21 | {{ toYaml $serviceValues.annotations | indent 8 }} 22 | {{- end }} 23 | spec: 24 | {{- if $serviceValues.clusterIP }} 25 | clusterIP: {{ $serviceValues.clusterIP }} 26 | {{- end }} 27 | {{- if $serviceValues.loadBalancerSourceRanges }} 28 | loadBalancerSourceRanges: 29 | {{- range $cidr := $serviceValues.loadBalancerSourceRanges }} 30 | - {{ $cidr }} 31 | {{- end }} 32 | {{- end }} 33 | ports: 34 | - name: {{ $.Values.prometheus.prometheusSpec.portName }} 35 | {{- if eq $serviceValues.type "NodePort" }} 36 | nodePort: {{ $serviceValues.nodePort }} 37 | {{- end }} 38 | port: {{ $serviceValues.port }} 39 | targetPort: {{ $serviceValues.targetPort }} 40 | selector: 41 | app.kubernetes.io/name: prometheus 42 | prometheus: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus 43 | statefulset.kubernetes.io/pod-name: prometheus-{{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} 44 | type: "{{ $serviceValues.type }}" 45 | {{- end }} 46 | {{- end }} 47 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/Prometheus-targets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/Prometheus-targets.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/alert-manager.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/alert-manager.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/grafana.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/grafana.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/install-log.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/install-log.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/kube-metrics-metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/kube-metrics-metrics.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/kube-metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/kube-metrics.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/kube-prometheus-stack-mind.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/kube-prometheus-stack-mind.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/node-exporter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/node-exporter.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/prometheus-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/prometheus-arch.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/prometheus-operator-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/prometheus-operator-arch.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/stack-components.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/stack-components.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/imgs/uninstall-log.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/helm/kube-prometheus-stack/imgs/uninstall-log.png -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/log/startup.log.sh: -------------------------------------------------------------------------------- 1 | NAME: prometheus-stack 2 | LAST DEPLOYED: Wed Oct 6 16:24:03 2021 3 | NAMESPACE: default 4 | STATUS: deployed 5 | REVISION: 1 6 | NOTES: 7 | kube-prometheus-stack has been installed. Check its status by running: 8 | kubectl --namespace default get pods -l "release=prometheus-stack" 9 | 10 | Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. 11 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/uninstall.sh: -------------------------------------------------------------------------------- 1 | 2 | echo 'uninstalling prometheus-stack...' 3 | helm uninstall prometheus-stack -n monitoring 4 | echo 5 | 6 | echo '---- clear 👌 ---- ' 7 | -------------------------------------------------------------------------------- /support/helm/kube-prometheus-stack/values.yaml: -------------------------------------------------------------------------------- 1 | ###### 覆盖默认值 2 | 3 | # 解决 node-exploter 挂载问题 https://github.com/prometheus-community/helm-charts/issues/467 4 | prometheus-node-exporter: 5 | hostRootFsMount: false 6 | -------------------------------------------------------------------------------- /support/helm/metrics-server/fast-metrics.sh: -------------------------------------------------------------------------------- 1 | # 查询 metrics 2 | kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" 3 | 4 | # top xxx 5 | kubectl top pod --all-namespaces 6 | kubectl top node 7 | -------------------------------------------------------------------------------- /support/helm/metrics-server/log/startup.log.sh: -------------------------------------------------------------------------------- 1 | NAME: metrics 2 | LAST DEPLOYED: Wed Oct 6 15:15:12 2021 3 | NAMESPACE: kube-system 4 | 5 | STATUS: deployed 6 | REVISION: 1 7 | TEST SUITE: None 8 | NOTES: 9 | ** Please be patient while the chart is being deployed ** 10 | 11 | The metric server has been deployed. 12 | 13 | In a few minutes you should be able to list metrics using the following 14 | command: 15 | 16 | kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" 17 | -------------------------------------------------------------------------------- /support/helm/metrics-server/metrics-server.sh: -------------------------------------------------------------------------------- 1 | ##### 2 | # 指标聚合服务 3 | # 4 | # bitnami/metrics-server 5 | # https://artifacthub.io/packages/helm/bitnami/metrics-server 6 | ##### 7 | helm install metrics bitnami/metrics-server -n metrics-server \ 8 | --namespace kube-system \ 9 | -f metrics-server.yaml 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /support/helm/metrics-server/metrics-server.yaml: -------------------------------------------------------------------------------- 1 | #args: 2 | # - --logtostderr 3 | # - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 4 | # - --kubelet-use-node-status-port 5 | # - --kubelet-insecure-tls 6 | # - --metric-resolution=15s 7 | 8 | # 覆盖 values 中的参数 9 | apiService: 10 | create: true 11 | 12 | extraArgs: 13 | logtostderr: true 14 | kubelet-insecure-tls: true 15 | kubelet-preferred-address-types: InternalIP,ExternalIP,Hostname 16 | kubelet-use-node-status-port: true 17 | metric-resolution: 15s 18 | -------------------------------------------------------------------------------- /support/helm/metrics-server/metrics/nodes.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "NodeMetricsList", 3 | "apiVersion": "metrics.k8s.io/v1beta1", 4 | "metadata": {}, 5 | "items": [ 6 | { 7 | "metadata": { 8 | "name": "docker-desktop", 9 | "creationTimestamp": "2021-10-06T07:29:15Z", 10 | "labels": { 11 | "beta.kubernetes.io/arch": "amd64", 12 | "beta.kubernetes.io/os": "linux", 13 | "kubernetes.io/arch": "amd64", 14 | "kubernetes.io/hostname": "docker-desktop", 15 | "kubernetes.io/os": "linux", 16 | "node-role.kubernetes.io/control-plane": "", 17 | "node-role.kubernetes.io/master": "", 18 | "node.kubernetes.io/exclude-from-external-load-balancers": "" 19 | } 20 | }, 21 | "timestamp": "2021-10-06T07:29:14Z", 22 | "window": "21s", 23 | "usage": { 24 | "cpu": "578340470n", 25 | "memory": "1895088Ki" 26 | } 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /support/helm/metrics-server/uninstall.sh: -------------------------------------------------------------------------------- 1 | ##### 2 | # 卸载 metrics server 3 | ##### 4 | helm uninstall --namespace kube-system metrics 5 | -------------------------------------------------------------------------------- /support/kubectl/top/kubectl-top-help.sh: -------------------------------------------------------------------------------- 1 | Display Resource (CPU/Memory) usage. 2 | 3 | The top command allows you to see the resource consumption for nodes or pods. 4 | 5 | This command requires Metrics Server to be correctly configured and working on the server. 6 | 7 | Available Commands: 8 | node 显示 nodes 的 Resource (CPU/Memory) 使用 9 | pod 显示 pods 的 Resource (CPU/Memory) 使用 10 | 11 | Usage: 12 | kubectl top [flags] [options] 13 | 14 | Use "kubectl --help" for more information about a given command. 15 | Use "kubectl options" for a list of global command-line options (applies to all commands). 16 | -------------------------------------------------------------------------------- /support/kubernetes-dashboard/README.md: -------------------------------------------------------------------------------- 1 | # kubernates dashboard starter 2 | 3 | 官方提供的 dashboard ,用于部署、管理和测试小规模简单集群 4 | 5 | ## 部署步骤 6 | 7 | ### 一键启动 8 | 9 | 运行安装脚本 `./start.sh` ,即可部署 dashboard 的所有资源,接入 k8s 集群,并自动打印默认的登录 tokeb 10 | 11 | ![](./imgs/start-log.png) 12 | ![](./imgs/dashboard-pods.png) 13 | 14 | ### 登录 15 | 16 | 打开 [dashboard](https://localhost:31443) ,并输入启动时打印的默认 token 17 | 18 | ![](./imgs/login-token.png) 19 | ![](./imgs/dashboard-home.png) 20 | 21 | ## 一键卸载 22 | 23 | 运行卸载脚本 `./unstall.sh` ,即可彻底卸载 dashboard 的所有资源,不污染本地测试环境。 24 | 25 | ![](./imgs/uninstall-log.png) 26 | -------------------------------------------------------------------------------- /support/kubernetes-dashboard/imgs/dashboard-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/kubernetes-dashboard/imgs/dashboard-home.png -------------------------------------------------------------------------------- /support/kubernetes-dashboard/imgs/dashboard-pods.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/kubernetes-dashboard/imgs/dashboard-pods.png -------------------------------------------------------------------------------- /support/kubernetes-dashboard/imgs/login-token.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/kubernetes-dashboard/imgs/login-token.png -------------------------------------------------------------------------------- /support/kubernetes-dashboard/imgs/start-log.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/kubernetes-dashboard/imgs/start-log.png -------------------------------------------------------------------------------- /support/kubernetes-dashboard/imgs/uninstall-log.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/kubernetes-dashboard/imgs/uninstall-log.png -------------------------------------------------------------------------------- /support/kubernetes-dashboard/kubernetes-dashboard-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kubernetes-dashboard 6 | -------------------------------------------------------------------------------- /support/kubernetes-dashboard/start.sh: -------------------------------------------------------------------------------- 1 | # Web 界面 (Dashboard) https://kubernetes.io/zh/docs/tasks/access-application-cluster/web-ui-dashboard/ 2 | echo '---- staring kubernetes-dashboard:v2.2.0 ...' 3 | kubectl apply -f ./dashboard-v2.2.0.yaml 4 | echo 5 | 6 | sleep 10 7 | ns=kubernetes-dashboard 8 | defaultAccount=`kubectl get secrets -n $ns | grep default-token | awk '{print $1}'` 9 | token=`kubectl get secrets ${defaultAccount} -n $ns -o jsonpath={.data.token} | base64 -d ` 10 | 11 | echo "$ns 默认账户:" $defaultAccount 12 | echo "$ns 默认token:" $token 13 | 14 | #echo 15 | echo '---- 打开 dashboard' 16 | echo https://localhost:31443 17 | open https://localhost:31443 18 | -------------------------------------------------------------------------------- /support/kubernetes-dashboard/unstall.sh: -------------------------------------------------------------------------------- 1 | ##### 2 | # 必须联合使用以下策略进行删除,否则无法实现删除,一直处于 Terminating... 3 | ##### 4 | kubectl delete -f dashboard-v2.2.0.yaml 5 | echo 6 | sleep 10 7 | 8 | echo '---- deleting force ...' 9 | kubectl delete --force --grace-period=0 deployment kubernetes-dashboard --namespace=kubernetes-dashboard 10 | kubectl delete --force --grace-period=0 service kubernetes-dashboard --namespace=kubernetes-dashboard 11 | kubectl delete --force --grace-period=0 sa kubernetes-dashboard --namespace=kubernetes-dashboard 12 | 13 | kubectl delete --force --grace-period=0 role kubernetes-dashboard-minimal --namespace=kubernetes-dashboard 14 | kubectl delete --force --grace-period=0 rolebinding kubernetes-dashboard-minimal --namespace=kubernetes-dashboard 15 | 16 | kubectl delete --force --grace-period=0 secret kubernetes-dashboard-certs --namespace=kubernetes-dashboard 17 | kubectl delete --force --grace-period=0 secret kubernetes-dashboard-csrf --namespace=kubernetes-dashboard 18 | kubectl delete --force --grace-period=0 secret kubernetes-dashboard-key-holder --namespace=kubernetes-dashboard 19 | echo 20 | sleep 10 21 | 22 | 23 | #echo '---- finalize....' 24 | ## 匿名管理员 25 | #kubectl create clusterrolebinding test:anonymous --clusterrole=cluster-admin --user=system:anonymous 26 | #kubectl get ns kubernetes-dashboard -o json >kubernetes-dashboard-ns.json 27 | #curl -k -H "Content-Type:application/json" -X PUT --data-binary @kubernetes-dashboard-ns.json https://localhost:6443/api/v1/namespaces/kubernetes-dashboard/finalize 28 | 29 | kubectl get namespaces | grep kubernetes-dashboard 30 | echo '---- well done 😁 ----' 31 | -------------------------------------------------------------------------------- /support/nginx-ingress/tomcat-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: tomcat-deployment 5 | labels: 6 | app: tomcat-deployment 7 | spec: 8 | replicas: 3 # 副本数量 9 | strategy: # 更新策略 10 | type: RollingUpdate 11 | selector: # 通过标签选择,限制该 Deployment 管理的 Pod 12 | matchLabels: 13 | app: tomcat9 # 需与 spec.template.metadata.labels.app 属性对应 14 | # 部署/更新 pod 的模板 15 | template: 16 | metadata: 17 | name: tomcat-deployment-tmpl 18 | labels: 19 | app: tomcat9 # 标签,会绑定到该Deployment锁管理的所有Pod上 20 | spec: 21 | # 指定在某个Node节点运行 22 | nodeName: docker-desktop 23 | containers: 24 | - name: tomcat-cotainer 25 | image: tomcat:9.0.20-jre8-alpine 26 | imagePullPolicy: IfNotPresent # 镜像拉取策略 27 | ports: 28 | - containerPort: 8080 # 容器内的应用端控,无法被外界访问 29 | 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: tomcat-svc 35 | spec: 36 | selector: 37 | app: tomcat9 # 与控制器 spec.selector.matchLabels.app 属性对应 38 | type: NodePort # 让外部可以访问服务 39 | ports: 40 | - targetPort: 8080 # 容器内应用端口 spec.template.spec.containers[0].ports[0].containerPort 41 | port: 8888 # 暴露给集群内其他应用访问的端口 42 | nodePort: 30001 # 绑定主机(节点Node)端口,供集群外部访问 43 | 44 | # 定义 Ingress规则 45 | --- 46 | apiVersion: extensions/v1beta1 47 | kind: Ingress 48 | metadata: 49 | name: tomcat-ingress 50 | annotations: 51 | # nginx-controller的命令行参数指定加载对应class的ingress规则 52 | kubernetes.io/ingress.class: nginx 53 | spec: 54 | rules: 55 | - host: localhost 56 | http: 57 | paths: 58 | - path: / 59 | backend: 60 | serviceName: tomcat-svc # 转发目标的服务名称 61 | servicePort: 8888 # 服务的端口 62 | -------------------------------------------------------------------------------- /support/rancher-ui/imgs/add-agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/add-agent.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/add-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/add-cluster.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/agent.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/cattle-resources.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/cattle-resources.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/chs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/chs.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/import.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/import.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/install.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/install.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/local-k8s-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/local-k8s-home.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/local-k8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/local-k8s.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/password.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/password.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/server-url.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/server-url.png -------------------------------------------------------------------------------- /support/rancher-ui/imgs/uninsall-log.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/imgs/uninsall-log.png -------------------------------------------------------------------------------- /support/rancher-ui/install.sh: -------------------------------------------------------------------------------- 1 | echo '---- starting rancher...' 2 | docker run -d --restart=unless-stopped \ 3 | -p 2080:80 -p 2443:443 \ 4 | --name rancher \ 5 | --privileged \ 6 | rancher/rancher:v2.4.4 7 | echo 8 | 9 | sleep 20 10 | # 必须使用内网ip,外网ip可能无法访问端口,localhost导入本地集群、启动 agent 时会报错 11 | innerIp=$(ifconfig | grep 192 | awk {'print $2'}) 12 | url=https://${innerIp}:2443 13 | 14 | echo '---- open rancher...' 15 | echo $url 16 | open $url 17 | -------------------------------------------------------------------------------- /support/rancher-ui/readme.md: -------------------------------------------------------------------------------- 1 | # Rancher UI 2 | 3 | 专业的集群运维 Dashboard,支持 k8s 以及非 k8s 集群治理框架 4 | 5 | 6 | ## 本地部署 7 | 8 | ### 启动 rancher 容器 9 | 10 | ``` 11 | ./install.sh 12 | ``` 13 | 14 | ![](./imgs/install.png) 15 | 16 | ### 初始化 dashboard 17 | 18 | 打开 [dashboard](https://localhost:2443) 19 | 20 | #### 设置密码 21 | 22 | 保存好密码,用于下次登录 23 | 24 | ![](./imgs/password.png) 25 | 26 | #### 设置 Server 地址 27 | 28 | 必须使用内网ip,如 `https://192.168.0.105:2443 ` ,外网ip可能无法访问端口,localhost导入本地集群、启动 agent 时会报错 29 | 30 | ![](./imgs/server-url.png) 31 | 32 | #### 设置语言 33 | 34 | ![](./imgs/chs.png) 35 | 36 | ### 导入本地集群 37 | 38 | ![](./imgs/add-cluster.png) 39 | 40 | ![](./imgs/import.png) 41 | 42 | ![](./imgs/add-agent.png) 43 | 44 | 执行脚本,在 k8s 集群上部署 rancher 代理 45 | 46 | ![](./imgs/agent.png) 47 | 48 | ![](./imgs/cattle-resources.png) 49 | 50 | ![](./imgs/local-k8s-home.png) 51 | 52 | 53 | -------------------------------------------------------------------------------- /support/rancher-ui/tools.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeartx/awesome-kubernetes/19da187a9b6c251ccfccdc9ed8c7edc63b73a690/support/rancher-ui/tools.sh -------------------------------------------------------------------------------- /support/rancher-ui/uninstall.sh: -------------------------------------------------------------------------------- 1 | echo '---- unsinstalling rancher ...' 2 | docker rm -f rancher --volumes 3 | echo 4 | 5 | echo '---- clean resources in k8s ...' 6 | ##### 7 | # 删除Rancher的空间cattle-system,状态一直是Terminating https://blog.51cto.com/michaelkang/2435467 8 | ##### 9 | kubectl patch namespace cattle-system -p '{"metadata":{"finalizers":[]}}' --type='merge' -n cattle-system 10 | kubectl delete namespace cattle-system --grace-period=0 --force 11 | 12 | kubectl patch namespace cattle-global-data -p '{"metadata":{"finalizers":[]}}' --type='merge' -n cattle-system 13 | kubectl delete namespace cattle-global-data --grace-period=0 --force 14 | 15 | kubectl patch namespace local -p '{"metadata":{"finalizers":[]}}' --type='merge' -n cattle-system 16 | for resource in `kubectl api-resources --verbs=list --namespaced -o name | xargs -n 1 kubectl get -o name -n local`; do kubectl patch $resource -p '{"metadata": {"finalizers": []}}' --type='merge' -n local; done 17 | kubectl delete namespace local --grace-period=0 --force 18 | echo 19 | 20 | echo '---- well done 👌 ...' 21 | --------------------------------------------------------------------------------