├── helm ├── prometheus-operator │ ├── charts │ │ ├── grafana │ │ │ ├── dashboards │ │ │ │ └── custom-dashboard.json │ │ │ ├── OWNERS │ │ │ ├── templates │ │ │ │ ├── tests │ │ │ │ │ ├── test-serviceaccount.yaml │ │ │ │ │ ├── test-configmap.yaml │ │ │ │ │ ├── test-role.yaml │ │ │ │ │ ├── test-rolebinding.yaml │ │ │ │ │ ├── test-podsecuritypolicy.yaml │ │ │ │ │ └── test.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ ├── clusterrole.yaml │ │ │ │ ├── secret.yaml │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ ├── pvc.yaml │ │ │ │ ├── configmap-dashboard-provider.yaml │ │ │ │ ├── rolebinding.yaml │ │ │ │ ├── dashboards-json-configmap.yaml │ │ │ │ ├── role.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── podsecuritypolicy.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── _helpers.tpl │ │ │ ├── .helmignore │ │ │ └── Chart.yaml │ │ ├── prometheus-node-exporter │ │ │ ├── OWNERS │ │ │ ├── Chart.yaml │ │ │ ├── .helmignore │ │ │ └── templates │ │ │ │ ├── endpoints.yaml │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ ├── monitor.yaml │ │ │ │ ├── service.yaml │ │ │ │ ├── NOTES.txt │ │ │ │ ├── psp.yaml │ │ │ │ └── _helpers.tpl │ │ └── kube-state-metrics │ │ │ ├── OWNERS │ │ │ ├── .helmignore │ │ │ ├── templates │ │ │ ├── serviceaccount.yaml │ │ │ ├── psp-clusterrole.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ ├── NOTES.txt │ │ │ ├── servicemonitor.yaml │ │ │ ├── service.yaml │ │ │ ├── podsecuritypolicy.yaml │ │ │ └── _helpers.tpl │ │ │ └── Chart.yaml │ ├── _dashboards │ │ └── README.md │ ├── OWNERS │ ├── hack │ │ ├── update-ci.sh │ │ └── minikube │ │ │ ├── values.yaml │ │ │ └── README.md │ ├── templates │ │ ├── NOTES.txt │ │ ├── prometheus │ │ │ ├── serviceaccount.yaml │ │ │ ├── additionalScrapeConfigs.yaml │ │ │ ├── additionalAlertmanagerConfigs.yaml │ │ │ ├── additionalAlertRelabelConfigs.yaml │ │ │ ├── psp-clusterrole.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ ├── podDisruptionBudget.yaml │ │ │ ├── clusterrole.yaml │ │ │ ├── servicemonitors.yaml │ │ │ ├── servicemonitor.yaml │ │ │ ├── rules │ │ │ │ ├── node-time.yaml │ │ │ │ ├── kube-apiserver.rules.yaml │ │ │ │ ├── general.rules.yaml │ │ │ │ ├── kube-prometheus-node-alerting.rules.yaml │ │ │ │ └── prometheus-operator.yaml │ │ │ ├── psp.yaml │ │ │ ├── additionalPrometheusRules.yaml │ │ │ ├── ingress.yaml │ │ │ └── service.yaml │ │ ├── alertmanager │ │ │ ├── serviceaccount.yaml │ │ │ ├── psp-clusterrole.yaml │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ ├── secret.yaml │ │ │ ├── podDisruptionBudget.yaml │ │ │ ├── servicemonitor.yaml │ │ │ ├── psp.yaml │ │ │ ├── service.yaml │ │ │ └── ingress.yaml │ │ ├── prometheus-operator │ │ │ ├── serviceaccount.yaml │ │ │ ├── psp-clusterrole.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ ├── servicemonitor.yaml │ │ │ ├── psp.yaml │ │ │ ├── clusterrole.yaml │ │ │ ├── service.yaml │ │ │ └── cleanup-crds.yaml │ │ ├── exporters │ │ │ ├── kube-etcd │ │ │ │ ├── endpoints.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── servicemonitor.yaml │ │ │ ├── core-dns │ │ │ │ ├── service.yaml │ │ │ │ └── servicemonitor.yaml │ │ │ ├── kube-scheduler │ │ │ │ ├── endpoints.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── servicemonitor.yaml │ │ │ ├── kube-dns │ │ │ │ ├── service.yaml │ │ │ │ └── servicemonitor.yaml │ │ │ ├── kube-controller-manager │ │ │ │ ├── endpoints.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── servicemonitor.yaml │ │ │ ├── node-exporter │ │ │ │ └── servicemonitor.yaml │ │ │ ├── kube-state-metrics │ │ │ │ └── serviceMonitor.yaml │ │ │ └── kube-api-server │ │ │ │ └── servicemonitor.yaml │ │ └── grafana │ │ │ ├── configmap-dashboards.yaml │ │ │ ├── configmaps-datasources.yaml │ │ │ └── servicemonitor.yaml │ ├── .helmignore │ ├── requirements.lock │ ├── requirements.yaml │ ├── psp.yaml │ ├── Chart.yaml │ └── CONTRIBUTING.md ├── mysql-servicemonitor │ ├── templates │ │ ├── NOTES.txt │ │ ├── endpoints.yaml │ │ ├── servicemonitor.yaml │ │ └── _helpers.tpl │ ├── Chart.yaml │ ├── .helmignore │ ├── values.yaml │ ├── bak │ │ └── prometheusrule.yaml │ └── README.md ├── redis-servicemonitor │ ├── templates │ │ ├── NOTES.txt │ │ ├── endpoints.yaml │ │ ├── servicemonitor.yaml │ │ └── _helpers.tpl │ ├── Chart.yaml │ ├── .helmignore │ ├── values.yaml │ ├── bak │ │ └── prometheusrule.yaml │ └── README.md ├── zookeeper-servicemonitor │ ├── templates │ │ ├── NOTES.txt │ │ ├── service.yaml │ │ ├── endpoints.yaml │ │ ├── servicemonitor.yaml │ │ └── _helpers.tpl │ ├── Chart.yaml │ ├── .helmignore │ ├── values.yaml │ ├── bak │ │ └── prometheusrule.yaml │ └── README.md ├── nginx-ingress-servicemonitor │ ├── templates │ │ ├── NOTES.txt │ │ ├── servicemonitor.yaml │ │ └── _helpers.tpl │ ├── Chart.yaml │ ├── .helmignore │ ├── values.yaml │ ├── bak │ │ └── prometheusrule.yaml │ └── README.md ├── node-exporter-servicemonitor │ ├── templates │ │ ├── NOTES.txt │ │ ├── service.yaml │ │ ├── linux.rules.yaml │ │ ├── endpoints.yaml │ │ ├── prometheusrule.yaml │ │ ├── servicemonitor.yaml │ │ └── _helpers.tpl │ ├── Chart.yaml │ ├── .helmignore │ ├── values.yaml │ └── README.md └── ceph-exporter │ ├── Chart.yaml │ ├── templates │ ├── ceph-exporter.rules.yaml │ ├── service.yaml │ ├── endpoints.yaml │ ├── servicemonitor.yaml │ ├── prometheusrule.yaml │ ├── NOTES.txt │ └── _helpers.tpl │ └── .helmignore ├── kubernetes-yaml ├── ceph │ ├── rbd │ │ ├── rbac │ │ │ ├── serviceaccount.yaml │ │ │ ├── role.yaml │ │ │ ├── rolebinding.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── deployment.yaml │ │ │ └── clusterrole.yaml │ │ ├── README.md │ │ └── class.yaml │ └── cephfs │ │ ├── rbac │ │ ├── serviceaccount.yaml │ │ ├── rolebinding.yaml │ │ ├── clusterrolebinding.yaml │ │ ├── role.yaml │ │ ├── deployment.yaml │ │ └── clusterrole.yaml │ │ ├── class.yaml │ │ └── README.md ├── rook-ceph │ ├── cephfs │ │ ├── rbac │ │ │ ├── serviceaccount.yaml │ │ │ ├── rolebinding.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── role.yaml │ │ │ ├── deployment.yaml │ │ │ └── clusterrole.yaml │ │ └── class.yaml │ ├── rook-ceph-rbd-pvc.yaml │ ├── rook-ceph-cephfs-pvc.yaml │ ├── rook-ceph-ceph-filesystem.yaml │ ├── rook-ceph-block-pool.yaml │ ├── rook-ceph-dashboard.yaml │ ├── rook-ceph-rbd-nginx.yaml │ ├── rook-ceph-cephfs-nginx.yaml │ ├── rook-ceph-mon-svc.yaml │ ├── rook-ceph-toolbox.yaml │ └── rook-ceph-storage-class.yaml ├── rook-external-ceph │ ├── cephfs │ │ ├── rbac │ │ │ ├── serviceaccount.yaml │ │ │ ├── rolebinding.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── role.yaml │ │ │ ├── deployment.yaml │ │ │ └── clusterrole.yaml │ │ ├── import_client_admin_secret.sh │ │ └── class.yaml │ ├── rook-ceph-ceph-filesystem.yaml │ ├── rook-ceph-block-pool.yaml │ ├── rook-ceph-config-override.yaml │ ├── cluster-external.yaml │ └── rook-ceph-dashboard.yaml ├── nginx-ingress │ ├── nginx-ingress-tcp.yaml │ └── nginx-ingress-config.yaml ├── dashboard │ ├── user │ │ ├── others-readonly-dev.yaml │ │ ├── get_token.sh │ │ └── kubeapps.sh │ └── kubernetes-dashboard-admin.rbac.yaml └── bootstrapping │ └── v1.13 │ ├── csrs-for-bootstrapping.yaml │ ├── auto-approve-csrs-for-group.yaml │ ├── auto-approve-renewals-for-nodes.yaml │ └── bootstrap-kubelet.conf ├── README.md └── shell └── pv-manage ├── k8s_patch_pv.sh └── README.md /helm/prometheus-operator/charts/grafana/dashboards/custom-dashboard.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /helm/prometheus-operator/_dashboards/README.md: -------------------------------------------------------------------------------- 1 | 手动导入在grafana中,这些dashboard即可支持修改保存。 2 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - gianrubio 3 | reviewers: 4 | - gianrubio -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/rbd/rbac/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: rbd-provisioner 5 | -------------------------------------------------------------------------------- /helm/mysql-servicemonitor/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | DEPRECATION NOTICE: 2 | 3 | - additionalRulesConfigMapLabels is not used anymore, use additionalRulesLabels -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - fiunchinho 3 | - tariq1890 4 | reviewers: 5 | - fiunchinho 6 | - tariq1890 7 | -------------------------------------------------------------------------------- /helm/redis-servicemonitor/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | DEPRECATION NOTICE: 2 | 3 | - additionalRulesConfigMapLabels is not used anymore, use additionalRulesLabels -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | DEPRECATION NOTICE: 2 | 3 | - additionalRulesConfigMapLabels is not used anymore, use additionalRulesLabels -------------------------------------------------------------------------------- /helm/nginx-ingress-servicemonitor/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | DEPRECATION NOTICE: 2 | 3 | - additionalRulesConfigMapLabels is not used anymore, use additionalRulesLabels -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | DEPRECATION NOTICE: 2 | 3 | - additionalRulesConfigMapLabels is not used anymore, use additionalRulesLabels -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - zanhsieh 3 | - rtluckie 4 | - maorfr 5 | reviewers: 6 | - zanhsieh 7 | - rtluckie 8 | - maorfr 9 | -------------------------------------------------------------------------------- /helm/prometheus-operator/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - gianrubio 3 | - vsliouniaev 4 | - anothertobi 5 | reviewers: 6 | - gianrubio 7 | - vsliouniaev 8 | - anothertobi 9 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/cephfs/rbac/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/cephfs/rbac/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cephfs/rbac/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## kubernetes-yaml 2 | Kubernetes编排整理 3 | 4 | ## helm 5 | 自己使用的helm包(可能有自行修改) 6 | 7 | ## kubeadm 8 | 一个安装3个master的shell脚本 9 | 10 | ## shell 11 | 管理kubernetes的一些脚本 12 | 13 | -------------------------------------------------------------------------------- /shell/pv-manage/k8s_patch_pv.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | exit 4 | kubectl patch pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' \ 5 | $(kubectl get pv|grep -v NAME|awk '{print $1}') 6 | -------------------------------------------------------------------------------- /helm/mysql-servicemonitor/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: A Helm chart servicemonitor for mysql exporter 3 | name: mysql-servicemonitor 4 | version: 1.0.0 5 | appVersion: 5.7.24 6 | maintainers: 7 | - name: Chinge Yang 8 | email: 29ygq@sina.com 9 | -------------------------------------------------------------------------------- /helm/redis-servicemonitor/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: A Helm chart servicemonitor for mysql exporter 3 | name: redis-servicemonitor 4 | version: 1.0.0 5 | appVersion: 5.7.24 6 | maintainers: 7 | - name: Chinge Yang 8 | email: 29ygq@sina.com 9 | -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: A Helm chart servicemonitor for mysql exporter 3 | name: zookeeper-servicemonitor 4 | version: 1.0.0 5 | appVersion: 5.7.24 6 | maintainers: 7 | - name: Chinge Yang 8 | email: 29ygq@sina.com 9 | -------------------------------------------------------------------------------- /helm/prometheus-operator/hack/update-ci.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | sed -e 's/cleanupCustomResource: false/cleanupCustomResource: true/' \ 3 | -e 's/cleanupCustomResourceBeforeInstall: false/cleanupCustomResourceBeforeInstall: true/' \ 4 | values.yaml > ci/test-values.yaml 5 | -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: A Helm chart servicemonitor for node exporter 3 | name: node-exporter-servicemonitor 4 | version: 1.0.0 5 | appVersion: v0.17.0 6 | maintainers: 7 | - name: Chinge Yang 8 | email: 29ygq@sina.com 9 | -------------------------------------------------------------------------------- /helm/nginx-ingress-servicemonitor/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: A Helm chart singleton for nginx ingress exporter 3 | name: nginx-ingress-servicemonitor 4 | version: 1.0.0 5 | appVersion: 0.20.0 6 | maintainers: 7 | - name: Chinge Yang 8 | email: 29ygq@sina.com 9 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-rbd-pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: rbd-pv-claim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: ceph-rbd 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cephfs/import_client_admin_secret.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export NAMESPACE=rook-ceph 4 | ceph auth get-key client.admin > /etc/ceph/ceph.client.admin.secret 5 | 6 | kubectl create secret generic ceph-admin-secret --from-file=/etc/ceph/ceph.client.admin.secret --namespace=$NAMESPACE 7 | -------------------------------------------------------------------------------- /kubernetes-yaml/nginx-ingress/nginx-ingress-tcp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | "2222": devops/gitlab-gitlab-shell:2222 4 | kind: ConfigMap 5 | metadata: 6 | labels: 7 | app: nginx-ingress 8 | component: controller 9 | release: nginx-ingress 10 | name: nginx-ingress-tcp 11 | namespace: nginx-ingress 12 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-cephfs-pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: cephfs-pv-claim 5 | annotations: 6 | volume.beta.kubernetes.io/storage-class: "cephfs" 7 | spec: 8 | accessModes: 9 | - ReadWriteMany 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/rbd/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: rbd-provisioner 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["secrets"] 8 | verbs: ["get"] 9 | - apiGroups: [""] 10 | resources: ["endpoints"] 11 | verbs: ["get", "list", "watch", "create", "update", "patch"] -------------------------------------------------------------------------------- /helm/ceph-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "2.0.1-luminous" 3 | description: A Helm chart for Kubernetes 4 | name: ceph-exporter 5 | version: 1.0.0 6 | home: https://github.com/digitalocean/ceph_exporter/ 7 | icon: 8 | keywords: 9 | - ceph 10 | - exporter 11 | maintainers: 12 | - email: 29ygq@sina.com 13 | name: Chinge Yang 14 | -------------------------------------------------------------------------------- /helm/prometheus-operator/hack/minikube/values.yaml: -------------------------------------------------------------------------------- 1 | prometheus: 2 | prometheusSpec: 3 | secrets: [etcd-certs] 4 | kubeEtcd: 5 | serviceMonitor: 6 | scheme: https 7 | caFile: /etc/prometheus/secrets/etcd-certs/ca.crt 8 | certFile: /etc/prometheus/secrets/etcd-certs/client.crt 9 | keyFile: /etc/prometheus/secrets/etcd-certs/client.key -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/rbd/rbac/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: rbd-provisioner 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: rbd-provisioner 9 | subjects: 10 | - kind: ServiceAccount 11 | name: rbd-provisioner 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | The Prometheus Operator has been installed. Check its status by running: 2 | kubectl --namespace {{ .Release.Namespace }} get pods -l "release={{ .Release.Name }}" 3 | 4 | Visit https://github.com/coreos/prometheus-operator for instructions on how 5 | to create & configure Alertmanager and Prometheus instances using the Operator. -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/cephfs/rbac/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: kube-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: cephfs-provisioner 10 | subjects: 11 | - kind: ServiceAccount 12 | name: cephfs-provisioner 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/cephfs/rbac/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: cephfs-provisioner 10 | subjects: 11 | - kind: ServiceAccount 12 | name: cephfs-provisioner 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/rbd/rbac/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rbd-provisioner 5 | subjects: 6 | - kind: ServiceAccount 7 | name: rbd-provisioner 8 | namespace: kube-system 9 | roleRef: 10 | kind: ClusterRole 11 | name: rbd-provisioner 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-ceph-filesystem.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1 2 | kind: CephFilesystem 3 | metadata: 4 | name: cephfs-k8s 5 | namespace: rook-ceph 6 | spec: 7 | metadataPool: 8 | replicated: 9 | size: 3 10 | dataPools: 11 | - replicated: 12 | size: 3 13 | metadataServer: 14 | activeCount: 1 15 | activeStandby: true 16 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cephfs/rbac/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: cephfs-provisioner 10 | subjects: 11 | - kind: ServiceAccount 12 | name: cephfs-provisioner 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/rook-ceph-ceph-filesystem.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1 2 | kind: CephFilesystem 3 | metadata: 4 | name: cephfs-k8s 5 | namespace: rook-ceph 6 | spec: 7 | metadataPool: 8 | replicated: 9 | size: 3 10 | dataPools: 11 | - replicated: 12 | size: 3 13 | metadataServer: 14 | activeCount: 1 15 | activeStandby: true 16 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/cephfs/rbac/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: cephfs-provisioner 5 | subjects: 6 | - kind: ServiceAccount 7 | name: cephfs-provisioner 8 | namespace: kube-system 9 | roleRef: 10 | kind: ClusterRole 11 | name: cephfs-provisioner 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/cephfs/rbac/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: cephfs-provisioner 5 | subjects: 6 | - kind: ServiceAccount 7 | name: cephfs-provisioner 8 | namespace: rook-ceph 9 | roleRef: 10 | kind: ClusterRole 11 | name: cephfs-provisioner 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /helm/prometheus-operator/hack/minikube/README.md: -------------------------------------------------------------------------------- 1 | The configuration in this folder lets you locally test the setup on minikube. Use cmd.sh to set up components and hack a working etcd scrape configuration. Run the commands in the sequence listed in the script to get a local working minikube cluster. 2 | 3 | If you're using windows, there's a commented-out section that you should add to the minikube command. -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cephfs/rbac/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: cephfs-provisioner 5 | subjects: 6 | - kind: ServiceAccount 7 | name: cephfs-provisioner 8 | namespace: rook-ceph 9 | roleRef: 10 | kind: ClusterRole 11 | name: cephfs-provisioner 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/dashboard/user/others-readonly-dev.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: role-bind-dev-cook-readonly-dev 5 | namespace: dev 6 | subjects: 7 | - kind: ServiceAccount 8 | name: dev-cook-user1 9 | namespace: dev-cook 10 | roleRef: 11 | kind: Role 12 | name: role-dev-readonly 13 | apiGroup: rbac.authorization.k8s.io 14 | --- 15 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/cephfs/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: kube-system 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["secrets"] 9 | verbs: ["create", "get", "delete"] 10 | - apiGroups: [""] 11 | resources: ["endpoints"] 12 | verbs: ["get", "list", "watch", "create", "update", "patch"] 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/cephfs/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["secrets"] 9 | verbs: ["create", "get", "delete"] 10 | - apiGroups: [""] 11 | resources: ["endpoints"] 12 | verbs: ["get", "list", "watch", "create", "update", "patch"] 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/cephfs/class.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: cephfs 5 | provisioner: ceph.com/cephfs 6 | reclaimPolicy: Retain 7 | parameters: 8 | monitors: 192.168.105.92:6789,192.168.105.93:6789,192.168.105.94:6789 9 | adminId: admin 10 | adminSecretName: ceph-admin-secret 11 | adminSecretNamespace: "kube-system" 12 | claimRoot: /volumes/kubernetes 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/cephfs/class.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: cephfs 5 | provisioner: ceph.com/cephfs 6 | reclaimPolicy: Retain 7 | parameters: 8 | monitors: 10.96.201.107:6789,10.96.105.92:6789,10.96.183.92:6789 9 | adminId: admin 10 | adminSecretName: ceph-admin-secret 11 | adminSecretNamespace: "rook-ceph" 12 | claimRoot: /volumes/kubernetes 13 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cephfs/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["secrets"] 9 | verbs: ["create", "get", "delete"] 10 | - apiGroups: [""] 11 | resources: ["endpoints"] 12 | verbs: ["get", "list", "watch", "create", "update", "patch"] 13 | -------------------------------------------------------------------------------- /shell/pv-manage/README.md: -------------------------------------------------------------------------------- 1 | # 1. 介绍 2 | `k8s_patch_pv.sh` 用于修改pv的回收策略 3 | `k8s_unbound_pvc.sh` 用于解决pv和pvc的绑定关系,让pv变成Available的可分配状态(不清除数据) 4 | `k8s_recycler_pv.sh` 用于清除pv内数据,让pv变成Available的可分配状态 5 | `k8s_delete_pv.sh` 用于删除pv及ceph集群内rbd或者cephfs目录及用户 6 | 7 | # 2. 依赖 8 | `k8s_delete_pv.sh` 删除详情: 9 | 删除ceph rbd是ssh到ceph管理节点操作删除rbd; 10 | 删除cephfs是ssh到ceph管理节点操作删除cephfs目录及用户,前提是cephfs已经挂载至该ceph管理节点; 11 | -------------------------------------------------------------------------------- /helm/ceph-exporter/templates/ceph-exporter.rules.yaml: -------------------------------------------------------------------------------- 1 | {{ define "ceph-exporter.rules.yaml.tpl" }} 2 | groups: 3 | - name: ceph-exporter.rules 4 | rules: 5 | - alert: CephExporterDown 6 | expr: absent(up{job="ceph-exporter"} == 1) 7 | for: 5m 8 | labels: 9 | severity: critical 10 | annotations: 11 | description: There is no running ceph exporter. 12 | summary: Ceph exporter is down 13 | {{ end }} 14 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cephfs/class.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: cephfs 5 | provisioner: ceph.com/cephfs 6 | reclaimPolicy: Retain 7 | parameters: 8 | monitors: ceph-mon1.utyun.cn:6789,ceph-mon2.utyun.cn:6789,ceph-mon3.utyun.cn:6789 9 | adminId: admin 10 | adminSecretName: ceph-admin-secret 11 | adminSecretNamespace: "rook-ceph" 12 | claimRoot: /volumes/kubernetes 13 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/tests/test-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app: {{ template "grafana.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: {{ template "grafana.serviceAccountNameTest" . }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /helm/ceph-exporter/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/mysql-servicemonitor/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/redis-servicemonitor/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /kubernetes-yaml/bootstrapping/v1.13/csrs-for-bootstrapping.yaml: -------------------------------------------------------------------------------- 1 | # enable bootstrapping nodes to create CSR 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: create-csrs-for-bootstrapping 6 | subjects: 7 | - kind: Group 8 | name: system:bootstrappers 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: ClusterRole 12 | name: system:node-bootstrapper 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/nginx-ingress-servicemonitor/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app: {{ template "grafana.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: {{ template "grafana.serviceAccountName" . }} 11 | namespace: {{ .Release.Namespace }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "0.18.0" 3 | description: A Helm chart for prometheus node-exporter 4 | name: prometheus-node-exporter 5 | version: 1.5.1 6 | home: https://github.com/prometheus/node_exporter/ 7 | sources: 8 | - https://github.com/prometheus/node_exporter/ 9 | keywords: 10 | - node-exporter 11 | - prometheus 12 | - exporter 13 | maintainers: 14 | - email: gianrubio@gmail.com 15 | name: gianrubio 16 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | OWNERS 23 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /kubernetes-yaml/bootstrapping/v1.13/auto-approve-csrs-for-group.yaml: -------------------------------------------------------------------------------- 1 | # Approve all CSRs for the group "system:bootstrappers" 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: auto-approve-csrs-for-group 6 | subjects: 7 | - kind: Group 8 | name: system:bootstrappers 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: ClusterRole 12 | name: system:certificates.k8s.io:certificatesigningrequests:nodeclient 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /kubernetes-yaml/bootstrapping/v1.13/auto-approve-renewals-for-nodes.yaml: -------------------------------------------------------------------------------- 1 | # Approve renewal CSRs for the group "system:nodes" 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: auto-approve-renewals-for-nodes 6 | subjects: 7 | - kind: Group 8 | name: system:nodes 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: ClusterRole 12 | name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "prometheus-operator.prometheus.serviceAccountName" . }} 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | imagePullSecrets: 10 | {{ toYaml .Values.global.imagePullSecrets | indent 2 }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-block-pool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1 2 | kind: CephBlockPool 3 | metadata: 4 | name: replicapool 5 | namespace: rook-ceph 6 | spec: 7 | failureDomain: host 8 | replicated: 9 | size: 2 10 | # Sets up the CRUSH rule for the pool to distribute data only on the specified device class. 11 | # If left empty or unspecified, the pool will use the cluster’s default CRUSH root, which usually distributes data over all OSDs, regardless of their class. 12 | # deviceClass: hdd 13 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "prometheus-node-exporter.fullname" . }} 6 | labels: 7 | {{ include "prometheus-node-exporter.labels" . | indent 4 }} 8 | subsets: 9 | - addresses: 10 | {{- range .Values.endpoints }} 11 | - ip: {{ . }} 12 | {{- end }} 13 | ports: 14 | - name: metrics 15 | port: 9100 16 | protocol: TCP 17 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/alertmanager/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "prometheus-operator.alertmanager.serviceAccountName" . }} 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-alertmanager 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | imagePullSecrets: 10 | {{ toYaml .Values.global.imagePullSecrets | indent 2 }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/rook-ceph-block-pool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1 2 | kind: CephBlockPool 3 | metadata: 4 | name: replicapool 5 | namespace: rook-ceph 6 | spec: 7 | failureDomain: host 8 | replicated: 9 | size: 2 10 | # Sets up the CRUSH rule for the pool to distribute data only on the specified device class. 11 | # If left empty or unspecified, the pool will use the cluster’s default CRUSH root, which usually distributes data over all OSDs, regardless of their class. 12 | # deviceClass: hdd 13 | -------------------------------------------------------------------------------- /helm/prometheus-operator/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | # helm/charts 23 | OWNERS 24 | hack/ 25 | ci/ 26 | prometheus-operator-*.tgz 27 | -------------------------------------------------------------------------------- /helm/prometheus-operator/requirements.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: kube-state-metrics 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 1.6.3 5 | - name: prometheus-node-exporter 6 | repository: https://kubernetes-charts.storage.googleapis.com/ 7 | version: 1.5.0 8 | - name: grafana 9 | repository: https://kubernetes-charts.storage.googleapis.com/ 10 | version: 3.3.10 11 | digest: sha256:7e2bb81348c99897fca9fffd21c0a2e5bbbd93200a249fabe4c1576b700f43de 12 | generated: 2019-06-01T00:40:22.8458175+01:00 13 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "prometheus-operator.operator.serviceAccountName" . }} 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-operator 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | imagePullSecrets: 10 | {{ toYaml .Values.global.imagePullSecrets | indent 2 }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app: {{ template "kube-state-metrics.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: {{ template "kube-state-metrics.fullname" . }} 11 | imagePullSecrets: 12 | {{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} 13 | {{- end -}} 14 | -------------------------------------------------------------------------------- /helm/prometheus-operator/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | 3 | - name: kube-state-metrics 4 | version: 1.6.* 5 | repository: https://kubernetes-charts.storage.googleapis.com/ 6 | condition: kubeStateMetrics.enabled 7 | 8 | - name: prometheus-node-exporter 9 | version: 1.5.* 10 | repository: https://kubernetes-charts.storage.googleapis.com/ 11 | condition: nodeExporter.enabled 12 | 13 | - name: grafana 14 | version: 3.3.* 15 | repository: https://kubernetes-charts.storage.googleapis.com/ 16 | condition: grafana.enabled 17 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: kube-state-metrics 3 | description: Install kube-state-metrics to generate and expose cluster-level metrics 4 | keywords: 5 | - metric 6 | - monitoring 7 | - prometheus 8 | - kubernetes 9 | version: 1.6.5 10 | appVersion: 1.6.0 11 | home: https://github.com/kubernetes/kube-state-metrics/ 12 | sources: 13 | - https://github.com/kubernetes/kube-state-metrics/ 14 | maintainers: 15 | - name: fiunchinho 16 | email: jose@armesto.net 17 | - name: tariq1890 18 | email: tariq.ibrahim@mulesoft.com 19 | -------------------------------------------------------------------------------- /kubernetes-yaml/nginx-ingress/nginx-ingress-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: dashboard-ingress 5 | namespace: kube-system 6 | spec: 7 | tls: 8 | - hosts: 9 | - k8s.linuxba.com 10 | secretName: ingress-secret 11 | rules: 12 | - host: k8s.linuxba.com 13 | http: 14 | paths: 15 | - path: / 16 | backend: 17 | serviceName: nginx-test 18 | servicePort: 80 19 | 20 | # - path: /nginx 21 | # backend: 22 | # serviceName: nginx 23 | # servicePort: 80 24 | -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "node-exporter-servicemonitor.fullname" . }} 5 | labels: 6 | app: {{ template "node-exporter-servicemonitor.name" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | heritage: "{{ .Release.Service }}" 9 | release: "{{ .Release.Name }}" 10 | spec: 11 | type: ClusterIP 12 | ports: 13 | - port: {{ .Values.metricsPort }} 14 | targetPort: {{ .Values.metricsPort }} 15 | protocol: TCP 16 | name: {{ .Values.metricsPortName }} 17 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/rbd/rbac/deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: rbd-provisioner 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: Recreate 10 | template: 11 | metadata: 12 | labels: 13 | app: rbd-provisioner 14 | spec: 15 | containers: 16 | - name: rbd-provisioner 17 | image: "quay.io/external_storage/rbd-provisioner:latest" 18 | env: 19 | - name: PROVISIONER_NAME 20 | value: ceph.com/rbd 21 | serviceAccount: rbd-provisioner 22 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/additionalScrapeConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalScrapeConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus-scrape-confg 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus-scrape-confg 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | data: 10 | additional-scrape-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalScrapeConfigs | b64enc | quote }} 11 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | {{- if .Values.rbac.pspEnabled }} 3 | kind: ClusterRole 4 | apiVersion: rbac.authorization.k8s.io/v1beta1 5 | metadata: 6 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 7 | name: psp-{{ template "prometheus-node-exporter.fullname" . }} 8 | rules: 9 | - apiGroups: ['extensions'] 10 | resources: ['podsecuritypolicies'] 11 | verbs: ['use'] 12 | resourceNames: 13 | - {{ template "prometheus-node-exporter.fullname" . }} 14 | {{- end }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/tests/test-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "grafana.fullname" . }}-test 5 | labels: 6 | app: {{ template "grafana.fullname" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | heritage: "{{ .Release.Service }}" 9 | release: "{{ .Release.Name }}" 10 | data: 11 | run.sh: |- 12 | @test "Test Health" { 13 | url="http://{{ template "grafana.fullname" . }}/api/health" 14 | 15 | code=$(curl -s -o /dev/null -I -w "%{http_code}" $url) 16 | [ "$code" == "200" ] 17 | } 18 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/additionalAlertmanagerConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus-am-confg 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus-am-confg 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | data: 10 | additional-alertmanager-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs | b64enc | quote }} 11 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/tests/test-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-test 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | rules: 12 | - apiGroups: ['policy'] 13 | resources: ['podsecuritypolicies'] 14 | verbs: ['use'] 15 | resourceNames: [{{ template "grafana.fullname" . }}-test] 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/rbd/README.md: -------------------------------------------------------------------------------- 1 | 2 | # 1. 将ceph密码环导入kubernetes 3 | 4 | ## 1.1 导入admin密钥环 5 | ```bash 6 | ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret 7 | kubectl create secret generic ceph-admin-secret --from-file=/tmp/secret --namespace=kube-system 8 | ``` 9 | 10 | ## 1.2 创建Ceph pool 和user secret 11 | ```bash 12 | ceph osd pool create kube 128 128 13 | ceph auth add client.kube mon 'allow r' osd 'allow rwx pool=kube' 14 | ceph auth get-key client.kube > /tmp/kube.secret 15 | kubectl create secret generic ceph-secret --from-file=/tmp/kube.secret --namespace=kube-system 16 | ``` -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | cert-manager.io/cluster-issuer: letsencrypt-prod 6 | kubernetes.io/tls-acme: "true" 7 | name: rook-ceph-mgr-dashboard 8 | namespace: rook-ceph 9 | spec: 10 | rules: 11 | - host: ceph-dashboard.utyun.com 12 | http: 13 | paths: 14 | - backend: 15 | serviceName: rook-ceph-mgr-dashboard 16 | servicePort: 7000 17 | path: / 18 | tls: 19 | - hosts: 20 | - ceph-dashboard.utyun.com 21 | secretName: tls-ceph-dashboard-utyun-com 22 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/additionalAlertRelabelConfigs.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus-am-relabel-confg 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus-am-relabel-confg 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | data: 10 | additional-alert-relabel-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs | b64enc | quote }} 11 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: grafana 3 | version: 3.5.7 4 | appVersion: 6.2.4 5 | kubeVersion: "^1.8.0-0" 6 | description: The leading tool for querying and visualizing time series and metrics. 7 | home: https://grafana.net 8 | icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png 9 | sources: 10 | - https://github.com/grafana/grafana 11 | maintainers: 12 | - name: zanhsieh 13 | email: zanhsieh@gmail.com 14 | - name: rtluckie 15 | email: rluckie@cisco.com 16 | - name: maorfr 17 | email: maor.friedman@redhat.com 18 | engine: gotpl 19 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create -}} 2 | {{- if .Values.serviceAccount.create -}} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ template "prometheus-node-exporter.serviceAccountName" . }} 7 | labels: 8 | app: {{ template "prometheus-node-exporter.name" . }} 9 | chart: {{ template "prometheus-node-exporter.chart" . }} 10 | release: "{{ .Release.Name }}" 11 | heritage: "{{ .Release.Service }}" 12 | imagePullSecrets: 13 | {{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} 14 | {{- end -}} 15 | {{- end -}} -------------------------------------------------------------------------------- /helm/nginx-ingress-servicemonitor/values.yaml: -------------------------------------------------------------------------------- 1 | # ingress namespace 2 | namespaceSelector: nginx-ingress 3 | # on what port are the metrics exposed by etcd 4 | metricsPortName: metrics 5 | # Are we talking http or https? 6 | scheme: http 7 | # default rules are in templates/nginx-ingress-servicemonitor.rules.yaml 8 | # prometheusRules: {} 9 | ## Custom Labels to be added to ServiceMonitor 10 | # 经过测试,servicemonitor标签添加prometheus operator的release标签即可正常监控 11 | additionalServiceMonitorLabels: 12 | release: prometheus-operator 13 | ##Custom Labels to be added to Prometheus Rules CRD 14 | additionalRulesLabels: 15 | release: prometheus-operator 16 | -------------------------------------------------------------------------------- /helm/ceph-exporter/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "ceph-exporter.fullname" . }} 5 | labels: 6 | app: {{ template "ceph-exporter.name" . }} 7 | chart: {{ template "ceph-exporter.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - port: {{ .Values.service.port }} 14 | targetPort: {{ .Values.service.port }} 15 | protocol: TCP 16 | name: http-metrics 17 | selector: 18 | app: {{ template "ceph-exporter.name" . }} 19 | release: {{ .Release.Name }} 20 | -------------------------------------------------------------------------------- /kubernetes-yaml/dashboard/kubernetes-dashboard-admin.rbac.yaml: -------------------------------------------------------------------------------- 1 | # Create Service Account 2 | # We are creating Service Account with name kubernetes-dashboard in namespace kube-system first. 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: admin-user 7 | namespace: kube-system 8 | --- 9 | # Create ClusterRoleBinding 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRoleBinding 12 | metadata: 13 | name: admin-user 14 | roleRef: 15 | apiGroup: rbac.authorization.k8s.io 16 | kind: ClusterRole 17 | name: cluster-admin 18 | subjects: 19 | - kind: ServiceAccount 20 | name: admin-user 21 | namespace: kube-system 22 | 23 | -------------------------------------------------------------------------------- /helm/mysql-servicemonitor/values.yaml: -------------------------------------------------------------------------------- 1 | # where the mysql installed namespace 2 | namespaceSelector: prod 3 | # endports 4 | endpoints: [] 5 | # on what port are the metrics exposed by etcd 6 | metricsPortName: metrics 7 | # Are we talking http or https? 8 | scheme: http 9 | # default rules are in templates/mysql-servicemonitor.rules.yaml 10 | # prometheusRules: {} 11 | ## Custom Labels to be added to ServiceMonitor 12 | # 经过测试,servicemonitor标签添加prometheus operator的release标签即可正常监控 13 | additionalServiceMonitorLabels: 14 | release: prometheus-operator 15 | ##Custom Labels to be added to Prometheus Rules CRD 16 | additionalRulesLabels: 17 | release: prometheus-operator 18 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app: {{ template "kube-state-metrics.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: psp-{{ template "kube-state-metrics.fullname" . }} 11 | rules: 12 | - apiGroups: ['extensions'] 13 | resources: ['podsecuritypolicies'] 14 | verbs: ['use'] 15 | resourceNames: 16 | - {{ template "kube-state-metrics.fullname" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus-psp 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | rules: 10 | - apiGroups: ['extensions'] 11 | resources: ['podsecuritypolicies'] 12 | verbs: ['use'] 13 | resourceNames: 14 | - {{ template "prometheus-operator.fullname" . }}-prometheus 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/rbd/class.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: ceph-rbd 5 | annotations: 6 | storageclass.beta.kubernetes.io/is-default-class: "true" 7 | reclaimPolicy: Retain 8 | provisioner: ceph.com/rbd 9 | allowVolumeExpansion: true 10 | parameters: 11 | monitors: 192.168.105.92:6789,192.168.105.93:6789,192.168.105.94:6789 12 | pool: kube 13 | adminId: admin 14 | adminSecretNamespace: kube-system 15 | adminSecretName: ceph-admin-secret 16 | userId: kube 17 | userSecretNamespace: kube-system 18 | userSecretName: ceph-secret 19 | fsType: ext4 20 | imageFormat: "2" 21 | imageFeatures: layering 22 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/alertmanager/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-alertmanager 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-alertmanager 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | rules: 10 | - apiGroups: ['extensions'] 11 | resources: ['podsecuritypolicies'] 12 | verbs: ['use'] 13 | resourceNames: 14 | - {{ template "prometheus-operator.fullname" . }}-alertmanager 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-operator-psp 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-operator 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | rules: 10 | - apiGroups: ['extensions'] 11 | resources: ['podsecuritypolicies'] 12 | verbs: ['use'] 13 | resourceNames: 14 | - {{ template "prometheus-operator.fullname" . }}-operator 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/prometheus-operator/psp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | name: prometheus-operator-grafana-test 5 | labels: 6 | app: grafana 7 | chart: grafana-3.5.7 8 | heritage: Tiller 9 | release: prometheus-operator 10 | spec: 11 | allowPrivilegeEscalation: true 12 | privileged: false 13 | hostNetwork: false 14 | hostIPC: false 15 | hostPID: false 16 | fsGroup: 17 | rule: RunAsAny 18 | seLinux: 19 | rule: RunAsAny 20 | supplementalGroups: 21 | rule: RunAsAny 22 | runAsUser: 23 | rule: RunAsAny 24 | volumes: 25 | - configMap 26 | - downwardAPI 27 | - emptyDir 28 | - projected 29 | - secret 30 | -------------------------------------------------------------------------------- /helm/redis-servicemonitor/values.yaml: -------------------------------------------------------------------------------- 1 | # where the mysql installed namespace 2 | namespaceSelector: prod 3 | # endports 4 | endpoints: [] 5 | # on what port are the metrics exposed by redis 6 | metricsPortName: metrics 7 | metricsPort: 9121 8 | # Are we talking http or https? 9 | scheme: http 10 | # default rules are in templates/redis-servicemonitor.rules.yaml 11 | # prometheusRules: {} 12 | ## Custom Labels to be added to ServiceMonitor 13 | # 经过测试,servicemonitor标签添加prometheus operator的release标签即可正常监控 14 | additionalServiceMonitorLabels: 15 | release: prometheus-operator 16 | ##Custom Labels to be added to Prometheus Rules CRD 17 | additionalRulesLabels: 18 | release: prometheus-operator 19 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-rbd-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-rbd-dy 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | name: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx 15 | imagePullPolicy: IfNotPresent 16 | ports: 17 | - containerPort: 80 18 | volumeMounts: 19 | - name: ceph-cephfs-volume 20 | mountPath: "/usr/share/nginx/html" 21 | volumes: 22 | - name: ceph-cephfs-volume 23 | persistentVolumeClaim: 24 | claimName: rbd-pv-claim 25 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-cephfs-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-cephfs-dy 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | name: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx 15 | imagePullPolicy: IfNotPresent 16 | ports: 17 | - containerPort: 80 18 | volumeMounts: 19 | - name: ceph-cephfs-volume 20 | mountPath: "/usr/share/nginx/html" 21 | volumes: 22 | - name: ceph-cephfs-volume 23 | persistentVolumeClaim: 24 | claimName: cephfs-pv-claim 25 | -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "zookeeper-servicemonitor.fullname" . }} 5 | labels: 6 | app: {{ template "zookeeper-servicemonitor.name" . }} 7 | chart: {{ template "zookeeper-servicemonitor.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | namespace: {{ .Values.namespaceSelector }} 11 | spec: 12 | type: ClusterIP 13 | ports: 14 | - port: {{ .Values.metricsPort }} 15 | targetPort: {{ .Values.metricsPort }} 16 | protocol: TCP 17 | name: "{{ .Values.metricsPortName }}" 18 | selector: 19 | app: zookeeper 20 | component: server 21 | -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/values.yaml: -------------------------------------------------------------------------------- 1 | # where the mysql installed namespace 2 | namespaceSelector: prod 3 | # endports 4 | endpoints: [] 5 | # on what port are the metrics exposed by redis 6 | metricsPortName: zookeeperxp 7 | metricsPort: 9141 8 | # Are we talking http or https? 9 | scheme: http 10 | # default rules are in templates/zookeeper-servicemonitor.rules.yaml 11 | # prometheusRules: {} 12 | ## Custom Labels to be added to ServiceMonitor 13 | # 经过测试,servicemonitor标签添加prometheus operator的release标签即可正常监控 14 | additionalServiceMonitorLabels: 15 | release: prometheus-operator 16 | ##Custom Labels to be added to Prometheus Rules CRD 17 | additionalRulesLabels: 18 | release: prometheus-operator 19 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/tests/test-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-test 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | name: {{ template "grafana.fullname" . }}-test 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ template "grafana.serviceAccountNameTest" . }} 18 | namespace: {{ .Release.Namespace }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-etcd/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-etcd 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-etcd 8 | k8s-app: etcd-server 9 | {{ include "prometheus-operator.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeEtcd.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeEtcd.service.port }} 19 | protocol: TCP 20 | {{- end }} -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-mon-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet" 6 | labels: 7 | app: rook-ceph-mon 8 | mon_cluster: rook-ceph 9 | rook_cluster: rook-ceph 10 | name: rook-ceph-mon 11 | namespace: rook-ceph 12 | spec: 13 | ports: 14 | - name: msgr1 15 | port: 6789 16 | protocol: TCP 17 | targetPort: 6789 18 | - name: msgr2 19 | port: 3300 20 | protocol: TCP 21 | targetPort: 3300 22 | selector: 23 | app: rook-ceph-mon 24 | mon_cluster: rook-ceph 25 | rook_cluster: rook-ceph 26 | sessionAffinity: None 27 | type: LoadBalancer 28 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | {{- if .Values.rbac.pspEnabled }} 3 | apiVersion: rbac.authorization.k8s.io/v1beta1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 7 | name: psp-{{ template "prometheus-node-exporter.fullname" . }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: psp-{{ template "prometheus-node-exporter.fullname" . }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: {{ template "prometheus-node-exporter.fullname" . }} 15 | namespace: {{ .Release.Namespace }} 16 | {{- end }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/templates/linux.rules.yaml: -------------------------------------------------------------------------------- 1 | {{ define "linux.rules.yaml.tpl" }} 2 | groups: 3 | - name: linux.rules 4 | rules: 5 | - record: disk_usage_percent 6 | expr: (node_filesystem_avail_bytes{fstype !~ "selinuxfs|nfs|rpc_pipefs|rootfs|tmpfs",mountpoint !~ "/boot|/net|/selinux"} / node_filesystem_size_bytes{fstype !~ "selinuxfs|nfs|rpc_pipefs|rootfs|tmpfs",mountpoint !~ "/boot|/net|/selinux"})*100 7 | - alert: MySQLGaleraNotReady 8 | expr: mysql_global_status_wsrep_ready != 1 9 | for: 5m 10 | labels: 11 | severity: critical 12 | annotations: 13 | description: '\{\{$labels.job}} on {{$labels.instance}} is not ready.' 14 | summary: Galera cluster node not ready 15 | {{ end }} 16 | -------------------------------------------------------------------------------- /helm/prometheus-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Provides easy monitoring definitions for Kubernetes services, and deployment and management of Prometheus instances. 3 | icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png 4 | engine: gotpl 5 | maintainers: 6 | - name: gianrubio 7 | email: gianrubio@gmail.com 8 | - name: anothertobi 9 | - name: vsliouniaev 10 | name: prometheus-operator 11 | sources: 12 | - https://github.com/coreos/prometheus-operator 13 | - https://coreos.com/operators/prometheus 14 | version: 5.14.1 15 | appVersion: 0.31.1 16 | home: https://github.com/coreos/prometheus-operator 17 | keywords: 18 | - operator 19 | - prometheus 20 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/core-dns/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.coreDns.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-coredns 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-coredns 8 | jobLabel: coredns 9 | {{ include "prometheus-operator.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.coreDns.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.coreDns.service.targetPort }} 18 | selector: 19 | {{ include "prometheus-operator.rangeskipempty" .Values.coreDns.service.selector | indent 4 }} 20 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-scheduler/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-scheduler 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-scheduler 8 | k8s-app: kube-scheduler 9 | {{ include "prometheus-operator.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeScheduler.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeScheduler.service.port }} 19 | protocol: TCP 20 | {{- end }} -------------------------------------------------------------------------------- /helm/ceph-exporter/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | labels: 6 | app: {{ template "ceph-exporter.name" . }} 7 | component: ceph-exporter 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | chart: {{ template "ceph-exporter.chart" . }} 11 | name: {{ template "ceph-exporter.fullname" . }} 12 | namespace: "{{ .Release.Namespace }}" 13 | subsets: 14 | - addresses: 15 | {{- range .Values.serviceMonitor.endpoints }} 16 | - ip: {{ . }} 17 | {{- end }} 18 | ports: 19 | - name: {{ .Values.serviceMonitor.scheme }}-metrics 20 | port: {{ .Values.serviceMonitor.exporterPort }} 21 | protocol: TCP 22 | {{- end }} 23 | -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/values.yaml: -------------------------------------------------------------------------------- 1 | # where the mysql installed namespace 2 | namespaceSelector: monitoring 3 | # endports 4 | endpoints: ["192.168.105.92", "192.168.105.93", "192.168.105.94"] 5 | # on what port are the metrics exposed by redis 6 | metricsPortName: metrics 7 | metricsPort: 9100 8 | # Are we talking http or https? 9 | scheme: http 10 | # default rules are in templates/node-exporter-servicemonitor.rules.yaml 11 | # prometheusRules: {} 12 | ## Custom Labels to be added to ServiceMonitor 13 | # 经过测试,servicemonitor标签添加prometheus operator的release标签即可正常监控 14 | additionalServiceMonitorLabels: 15 | release: prometheus-operator 16 | ##Custom Labels to be added to Prometheus Rules CRD 17 | additionalRulesLabels: 18 | release: prometheus-operator 19 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | app: {{ template "kube-state-metrics.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: {{ template "kube-state-metrics.fullname" . }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: {{ template "kube-state-metrics.fullname" . }} 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ template "kube-state-metrics.fullname" . }} 18 | namespace: {{ .Release.Namespace }} 19 | {{- end -}} 20 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-operator 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-operator 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "prometheus-operator.fullname" . }}-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "prometheus-operator.operator.serviceAccountName" . }} 16 | namespace: {{ .Release.Namespace }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "prometheus-operator.fullname" . }}-prometheus 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "prometheus-operator.prometheus.serviceAccountName" . }} 16 | namespace: {{ .Release.Namespace }} 17 | {{- end }} 18 | 19 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.podSecurityPolicy.enabled -}} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | labels: 6 | app: {{ template "kube-state-metrics.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: psp-{{ template "kube-state-metrics.fullname" . }} 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: psp-{{ template "kube-state-metrics.fullname" . }} 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ template "kube-state-metrics.fullname" . }} 18 | namespace: {{ .Release.Namespace }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. 2 | The exposed metrics can be found here: 3 | https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics 4 | 5 | The metrics are exported on the HTTP endpoint /metrics on the listening port. 6 | In your case, {{ template "kube-state-metrics.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port }}/metrics 7 | 8 | They are served either as plaintext or protobuf depending on the Accept header. 9 | They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. 10 | 11 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/cephfs/rbac/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: Recreate 10 | template: 11 | metadata: 12 | labels: 13 | app: cephfs-provisioner 14 | spec: 15 | containers: 16 | - name: cephfs-provisioner 17 | image: "quay.io/external_storage/cephfs-provisioner:latest" 18 | env: 19 | - name: PROVISIONER_NAME 20 | value: ceph.com/cephfs 21 | command: 22 | - "/usr/local/bin/cephfs-provisioner" 23 | args: 24 | - "-id=cephfs-provisioner-1" 25 | - '-disable-ceph-namespace-isolation=true' 26 | serviceAccount: cephfs-provisioner 27 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/cephfs/rbac/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: Recreate 10 | template: 11 | metadata: 12 | labels: 13 | app: cephfs-provisioner 14 | spec: 15 | containers: 16 | - name: cephfs-provisioner 17 | image: "quay.io/external_storage/cephfs-provisioner:latest" 18 | env: 19 | - name: PROVISIONER_NAME 20 | value: ceph.com/cephfs 21 | command: 22 | - "/usr/local/bin/cephfs-provisioner" 23 | args: 24 | - "-id=cephfs-provisioner-1" 25 | - '-disable-ceph-namespace-isolation=true' 26 | serviceAccount: cephfs-provisioner 27 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-dns/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeDns.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-dns 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-dns 8 | jobLabel: kube-dns 9 | {{ include "prometheus-operator.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics-dnsmasq 15 | port: 10054 16 | protocol: TCP 17 | targetPort: 10054 18 | - name: http-metrics-skydns 19 | port: 10055 20 | protocol: TCP 21 | targetPort: 10055 22 | selector: 23 | {{ include "prometheus-operator.rangeskipempty" .Values.kubeDns.service.selector | indent 4 }} 24 | {{- end }} -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cephfs/rbac/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: Recreate 10 | template: 11 | metadata: 12 | labels: 13 | app: cephfs-provisioner 14 | spec: 15 | containers: 16 | - name: cephfs-provisioner 17 | image: "quay.io/external_storage/cephfs-provisioner:latest" 18 | env: 19 | - name: PROVISIONER_NAME 20 | value: ceph.com/cephfs 21 | command: 22 | - "/usr/local/bin/cephfs-provisioner" 23 | args: 24 | - "-id=cephfs-provisioner-1" 25 | - '-disable-ceph-namespace-isolation=true' 26 | serviceAccount: cephfs-provisioner 27 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/alertmanager/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-alertmanager 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-alertmanager 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "prometheus-operator.fullname" . }}-alertmanager 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "prometheus-operator.alertmanager.serviceAccountName" . }} 16 | namespace: {{ .Release.Namespace }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/rook-ceph-config-override.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: rook-config-override 5 | namespace: rook-ceph 6 | data: 7 | config: | 8 | [global] 9 | osd crush update on start = false 10 | osd pool default size = 2 11 | # public network = 172.16.0.0/16 12 | fsid = 590ceb83-9c51-481e-9487-6bf394e73a9f 13 | mon_initial_members = utyun-node1 14 | mon_host = 172.16.138.26,172.16.138.31,172.16.138.33 15 | auth_cluster_required = cephx 16 | auth_service_required = cephx 17 | auth_client_required = cephx 18 | filestore_xattr_use_omap = true 19 | # 开启了rbd的一些属性,而这些属性有的内核版本是不支持的,会导致map不到device的情况 20 | # # 可以在创建时指定feature(我们就是这样做的),也可以在ceph配置文件中关闭这些新属性:rbd_default_features = 2 21 | rbd_default_features = 2 22 | -------------------------------------------------------------------------------- /helm/prometheus-operator/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | ## How to contribute to this chart 3 | 1. Fork this repository, develop and test your Chart. 4 | 1. Bump the chart version for every change. 5 | 1. Ensure PR title has the prefix `[stable/prometheus-operator]` 6 | 1. When making changes to values.yaml, update the files in `ci/` by running `hack/update-ci.sh` 7 | 1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories 8 | 1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes. 9 | 1. Check for changes of RBAC rules. 10 | 1. Check for changes in CRD specs. 11 | 1. PR must pass the linter (`helm lint`) -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheus.monitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }} 6 | labels: 7 | app: {{ template "kube-state-metrics.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | {{- if .Values.prometheus.monitor.additionalLabels }} 12 | {{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} 13 | {{- end }} 14 | spec: 15 | selector: 16 | matchLabels: 17 | app: {{ template "kube-state-metrics.name" . }} 18 | release: {{ .Release.Name }} 19 | endpoints: 20 | - port: http 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-operator-psp 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-operator 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "prometheus-operator.fullname" . }}-operator-psp 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "prometheus-operator.operator.serviceAccountName" . }} 16 | namespace: {{ .Release.Namespace }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus-psp 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: {{ template "prometheus-operator.fullname" . }}-prometheus-psp 13 | subjects: 14 | - kind: ServiceAccount 15 | name: {{ template "prometheus-operator.prometheus.serviceAccountName" . }} 16 | namespace: {{ .Release.Namespace }} 17 | {{- end }} 18 | 19 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-controller-manager/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-controller-manager 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-controller-manager 8 | k8s-app: kube-controller-manager 9 | {{ include "prometheus-operator.labels" . | indent 4 }} 10 | namespace: kube-system 11 | subsets: 12 | - addresses: 13 | {{- range .Values.kubeControllerManager.endpoints }} 14 | - ip: {{ . }} 15 | {{- end }} 16 | ports: 17 | - name: http-metrics 18 | port: {{ .Values.kubeControllerManager.service.port }} 19 | protocol: TCP 20 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | labels: 6 | app: {{ template "grafana.name" . }} 7 | chart: {{ template "grafana.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | {{- with .Values.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | name: {{ template "grafana.fullname" . }}-clusterrole 15 | {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} 16 | rules: 17 | - apiGroups: [""] # "" indicates the core API group 18 | resources: ["configmaps"] 19 | verbs: ["get", "watch", "list"] 20 | {{- else }} 21 | rules: [] 22 | {{- end}} 23 | {{- end}} 24 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/alertmanager/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: alertmanager-{{ template "prometheus-operator.fullname" . }}-alertmanager 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-alertmanager 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | data: 10 | {{- if .Values.alertmanager.tplConfig }} 11 | alertmanager.yaml: {{ tpl (toYaml .Values.alertmanager.config) . | b64enc | quote }} 12 | {{- else }} 13 | alertmanager.yaml: {{ toYaml .Values.alertmanager.config | b64enc | quote }} 14 | {{- end}} 15 | {{- range $key, $val := .Values.alertmanager.templateFiles }} 16 | {{ $key }}: {{ $val | b64enc | quote }} 17 | {{- end }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/monitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheus.monitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-node-exporter.fullname" . }} 6 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 7 | {{- if .Values.prometheus.monitor.additionalLabels }} 8 | {{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} 9 | {{- end }} 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: {{ template "prometheus-node-exporter.name" . }} 14 | release: {{ .Release.Name }} 15 | endpoints: 16 | - port: metrics 17 | {{- if .Values.prometheus.monitor.scrapeTimeout }} 18 | scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-etcd/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeEtcd.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-etcd 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-etcd 8 | jobLabel: kube-etcd 9 | {{ include "prometheus-operator.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.kubeEtcd.service.port }} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeEtcd.service.targetPort }} 18 | {{- if .Values.kubeEtcd.endpoints }}{{- else }} 19 | selector: 20 | {{ include "prometheus-operator.rangeskipempty" .Values.kubeEtcd.service.selector | indent 4 }} 21 | {{- end }} 22 | type: ClusterIP 23 | {{- end -}} -------------------------------------------------------------------------------- /helm/mysql-servicemonitor/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "mysql-servicemonitor.fullname" . }} 6 | labels: 7 | app: {{ template "mysql-servicemonitor.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | heritage: "{{ .Release.Service }}" 10 | release: "{{ .Release.Name }}" 11 | prometheus: {{ .Release.Name }} 12 | {{- if .Values.additionalServiceMonitorLabels }} 13 | {{ toYaml .Values.additionalServiceMonitorLabels | indent 4 }} 14 | {{- end }} 15 | subsets: 16 | - addresses: 17 | {{- range .Values.endpoints }} 18 | - ip: {{ . }} 19 | {{- end }} 20 | ports: 21 | - name: {{ .Values.metricsPort }} 22 | port: {{ int .Values.metricsPort }} 23 | protocol: TCP 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.admin.existingSecret }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "grafana.name" . }} 9 | chart: {{ template "grafana.chart" . }} 10 | release: {{ .Release.Name }} 11 | heritage: {{ .Release.Service }} 12 | type: Opaque 13 | data: 14 | admin-user: {{ .Values.adminUser | b64enc | quote }} 15 | {{- if .Values.adminPassword }} 16 | admin-password: {{ .Values.adminPassword | b64enc | quote }} 17 | {{- else }} 18 | admin-password: {{ randAlphaNum 40 | b64enc | quote }} 19 | {{- end }} 20 | {{- if not .Values.ldap.existingSecret }} 21 | ldap-toml: {{ .Values.ldap.config | b64enc | quote }} 22 | {{- end }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /helm/redis-servicemonitor/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "redis-servicemonitor.fullname" . }} 6 | labels: 7 | app: {{ template "redis-servicemonitor.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | heritage: "{{ .Release.Service }}" 10 | release: "{{ .Release.Name }}" 11 | prometheus: {{ .Release.Name }} 12 | {{- if .Values.additionalServiceMonitorLabels }} 13 | {{ toYaml .Values.additionalServiceMonitorLabels | indent 4 }} 14 | {{- end }} 15 | subsets: 16 | - addresses: 17 | {{- range .Values.endpoints }} 18 | - ip: {{ . }} 19 | {{- end }} 20 | ports: 21 | - name: {{ .Values.metricsPortName }} 22 | port: {{ .Values.metricsPort }} 23 | protocol: TCP 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/tests/test-podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | apiVersion: extensions/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-test 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | spec: 12 | allowPrivilegeEscalation: true 13 | privileged: false 14 | hostNetwork: false 15 | hostIPC: false 16 | hostPID: false 17 | fsGroup: 18 | rule: RunAsAny 19 | seLinux: 20 | rule: RunAsAny 21 | supplementalGroups: 22 | rule: RunAsAny 23 | runAsUser: 24 | rule: RunAsAny 25 | volumes: 26 | - configMap 27 | - downwardAPI 28 | - emptyDir 29 | - projected 30 | - secret 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "zookeeper-servicemonitor.fullname" . }} 6 | labels: 7 | app: {{ template "zookeeper-servicemonitor.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | heritage: "{{ .Release.Service }}" 10 | release: "{{ .Release.Name }}" 11 | prometheus: {{ .Release.Name }} 12 | {{- if .Values.additionalServiceMonitorLabels }} 13 | {{ toYaml .Values.additionalServiceMonitorLabels | indent 4 }} 14 | {{- end }} 15 | subsets: 16 | - addresses: 17 | {{- range .Values.endpoints }} 18 | - ip: {{ . }} 19 | {{- end }} 20 | ports: 21 | - name: {{ .Values.metricsPortName }} 22 | port: {{ .Values.metricsPort }} 23 | protocol: TCP 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.endpoints }} 2 | apiVersion: v1 3 | kind: Endpoints 4 | metadata: 5 | name: {{ template "node-exporter-servicemonitor.fullname" . }} 6 | labels: 7 | app: {{ template "node-exporter-servicemonitor.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | heritage: "{{ .Release.Service }}" 10 | release: "{{ .Release.Name }}" 11 | prometheus: {{ .Release.Name }} 12 | {{- if .Values.additionalServiceMonitorLabels }} 13 | {{ toYaml .Values.additionalServiceMonitorLabels | indent 4 }} 14 | {{- end }} 15 | subsets: 16 | - addresses: 17 | {{- range .Values.endpoints }} 18 | - ip: {{ . }} 19 | {{- end }} 20 | ports: 21 | - name: {{ .Values.metricsPortName }} 22 | port: {{ .Values.metricsPort }} 23 | protocol: TCP 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "grafana.fullname" . }}-clusterrolebinding 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ template "grafana.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | {{- with .Values.annotations }} 12 | annotations: 13 | {{ toYaml . | indent 4 }} 14 | {{- end }} 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ template "grafana.serviceAccountName" . }} 18 | namespace: {{ .Release.Namespace }} 19 | roleRef: 20 | kind: ClusterRole 21 | name: {{ template "grafana.fullname" . }}-clusterrole 22 | apiGroup: rbac.authorization.k8s.io 23 | {{- end -}} 24 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/rbd/rbac/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rbd-provisioner 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["persistentvolumes"] 8 | verbs: ["get", "list", "watch", "create", "delete"] 9 | - apiGroups: [""] 10 | resources: ["persistentvolumeclaims"] 11 | verbs: ["get", "list", "watch", "update"] 12 | - apiGroups: ["storage.k8s.io"] 13 | resources: ["storageclasses"] 14 | verbs: ["get", "list", "watch"] 15 | - apiGroups: [""] 16 | resources: ["events"] 17 | verbs: ["create", "update", "patch"] 18 | - apiGroups: [""] 19 | resources: ["services"] 20 | resourceNames: ["kube-dns","coredns"] 21 | verbs: ["list", "get"] 22 | - apiGroups: [""] 23 | resources: ["endpoints"] 24 | verbs: ["get", "list", "watch", "create", "update", "patch"] 25 | -------------------------------------------------------------------------------- /kubernetes-yaml/ceph/cephfs/rbac/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: kube-system 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["persistentvolumes"] 9 | verbs: ["get", "list", "watch", "create", "delete"] 10 | - apiGroups: [""] 11 | resources: ["persistentvolumeclaims"] 12 | verbs: ["get", "list", "watch", "update"] 13 | - apiGroups: ["storage.k8s.io"] 14 | resources: ["storageclasses"] 15 | verbs: ["get", "list", "watch"] 16 | - apiGroups: [""] 17 | resources: ["events"] 18 | verbs: ["create", "update", "patch"] 19 | - apiGroups: [""] 20 | resources: ["services"] 21 | resourceNames: ["kube-dns","coredns"] 22 | verbs: ["list", "get"] 23 | - apiGroups: [""] 24 | resources: ["secrets"] 25 | verbs: ["create", "get", "delete"] 26 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "prometheus-node-exporter.fullname" . }} 5 | {{- if .Values.service.annotations }} 6 | annotations: 7 | {{ toYaml .Values.service.annotations | indent 4 }} 8 | {{- end }} 9 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - port: {{ .Values.service.port }} 14 | {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} 15 | nodePort: {{ .Values.service.nodePort }} 16 | {{- end }} 17 | targetPort: {{ .Values.service.targetPort }} 18 | protocol: TCP 19 | name: metrics 20 | selector: 21 | app: {{ template "prometheus-node-exporter.name" . }} 22 | release: {{ .Release.Name }} 23 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-scheduler/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeScheduler.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-scheduler 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-scheduler 8 | jobLabel: kube-scheduler 9 | {{ include "prometheus-operator.labels" . | indent 4 }} 10 | namespace: kube-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: http-metrics 15 | port: {{ .Values.kubeScheduler.service.port}} 16 | protocol: TCP 17 | targetPort: {{ .Values.kubeScheduler.service.targetPort}} 18 | {{- if .Values.kubeScheduler.endpoints }}{{- else }} 19 | selector: 20 | {{ include "prometheus-operator.rangeskipempty" .Values.kubeScheduler.service.selector | indent 4 }} 21 | {{- end }} 22 | type: ClusterIP 23 | {{- end -}} -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/cephfs/rbac/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["persistentvolumes"] 9 | verbs: ["get", "list", "watch", "create", "delete"] 10 | - apiGroups: [""] 11 | resources: ["persistentvolumeclaims"] 12 | verbs: ["get", "list", "watch", "update"] 13 | - apiGroups: ["storage.k8s.io"] 14 | resources: ["storageclasses"] 15 | verbs: ["get", "list", "watch"] 16 | - apiGroups: [""] 17 | resources: ["events"] 18 | verbs: ["create", "update", "patch"] 19 | - apiGroups: [""] 20 | resources: ["services"] 21 | resourceNames: ["kube-dns","coredns"] 22 | verbs: ["list", "get"] 23 | - apiGroups: [""] 24 | resources: ["secrets"] 25 | verbs: ["create", "get", "delete"] 26 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "grafana.name" . }} 9 | chart: {{ template "grafana.chart" . }} 10 | release: {{ .Release.Name }} 11 | heritage: {{ .Release.Service }} 12 | {{- with .Values.persistence.annotations }} 13 | annotations: 14 | {{ toYaml . | indent 4 }} 15 | {{- end }} 16 | spec: 17 | accessModes: 18 | {{- range .Values.persistence.accessModes }} 19 | - {{ . | quote }} 20 | {{- end }} 21 | resources: 22 | requests: 23 | storage: {{ .Values.persistence.size | quote }} 24 | storageClassName: {{ .Values.persistence.storageClassName }} 25 | {{- end -}} 26 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cephfs/rbac/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: cephfs-provisioner 5 | namespace: rook-ceph 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["persistentvolumes"] 9 | verbs: ["get", "list", "watch", "create", "delete"] 10 | - apiGroups: [""] 11 | resources: ["persistentvolumeclaims"] 12 | verbs: ["get", "list", "watch", "update"] 13 | - apiGroups: ["storage.k8s.io"] 14 | resources: ["storageclasses"] 15 | verbs: ["get", "list", "watch"] 16 | - apiGroups: [""] 17 | resources: ["events"] 18 | verbs: ["create", "update", "patch"] 19 | - apiGroups: [""] 20 | resources: ["services"] 21 | resourceNames: ["kube-dns","coredns"] 22 | verbs: ["list", "get"] 23 | - apiGroups: [""] 24 | resources: ["secrets"] 25 | verbs: ["create", "get", "delete"] 26 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/configmap-dashboard-provider.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.sidecar.dashboards.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | labels: 6 | app: {{ template "grafana.name" . }} 7 | chart: {{ template "grafana.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | {{- with .Values.annotations }} 11 | annotations: 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | name: {{ template "grafana.fullname" . }}-config-dashboards 15 | namespace: {{ .Release.Namespace }} 16 | data: 17 | provider.yaml: |- 18 | apiVersion: 1 19 | providers: 20 | - name: 'default' 21 | orgId: 1 22 | folder: '' 23 | type: file 24 | disableDeletion: false 25 | options: 26 | path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} 27 | {{- end}} 28 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.podDisruptionBudget.enabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | {{- if .Values.prometheus.podDisruptionBudget.minAvailable }} 11 | minAvailable: {{ .Values.prometheus.podDisruptionBudget.minAvailable }} 12 | {{- end }} 13 | {{- if .Values.prometheus.podDisruptionBudget.maxUnavailable }} 14 | maxUnavailable: {{ .Values.prometheus.podDisruptionBudget.maxUnavailable }} 15 | {{- end }} 16 | selector: 17 | matchLabels: 18 | app: prometheus 19 | prometheus: {{ template "prometheus-operator.fullname" . }}-prometheus 20 | {{- end }} -------------------------------------------------------------------------------- /kubernetes-yaml/dashboard/user/get_token.sh: -------------------------------------------------------------------------------- 1 | namespace=$1 2 | user=$2 3 | 4 | [ -z $user ] && exit 1 5 | 6 | cd `dirname $0` 7 | pwd_path=$(pwd) 8 | 9 | #token获取: 10 | token=$(kubectl get -n $namespace secret $(kubectl get -n $namespace serviceaccount ${namespace}-${user} \ 11 | -ojsonpath='{.secrets[].name}') -o jsonpath='{.data.token}' | base64 -d) 12 | echo $token > ${namespace}/${user}-token.txt 13 | 14 | #生成config 15 | 16 | cat > $namespace/${user}-config <&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret 9 | kubectl create secret generic ceph-admin-secret --from-file=/tmp/secret --namespace=kube-system 10 | ``` 11 | 12 | # 2. 创建cephfs 13 | creatring pools 14 | 15 | ``` 16 | [root@ceph-1 ceph]# ceph osd pool create cephfs_data 64 17 | pool 'cephfs_data' created 18 | [root@ceph-1 ceph]# ceph osd pool create cephfs_metadata 64 19 | pool 'cephfs_metadata' created 20 | ``` 21 | 22 | creating a filesystem 23 | 24 | ``` 25 | [root@ceph-1 ceph]# ceph fs new cephfs cephfs_metadata cephfs_data 26 | new fs with metadata pool 2 and data pool 1 27 | [root@ceph-1 ceph]# ceph fs ls 28 | name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ] 29 | ``` 30 | 31 | 一旦文件系统创建好之后,mds的状态就会发生变化,如下所示: 32 | 33 | ``` 34 | [root@ceph-1 ceph]# ceph mds stat 35 | cephfs-1/1/1 up {0=ceph-2=up:active}, 2 up:standby 36 | ``` 37 | 38 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/grafana/configmaps-datasources.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.grafana.enabled .Values.grafana.sidecar.datasources.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-grafana-datasource 6 | labels: 7 | {{ $.Values.grafana.sidecar.datasources.label }}: "1" 8 | app: {{ template "prometheus-operator.name" $ }}-grafana 9 | {{ include "prometheus-operator.labels" $ | indent 4 }} 10 | data: 11 | datasource.yaml: |- 12 | apiVersion: 1 13 | datasources: 14 | {{- if .Values.grafana.sidecar.datasources.defaultDatasourceEnabled }} 15 | - name: Prometheus 16 | type: prometheus 17 | url: http://{{ template "prometheus-operator.fullname" . }}-prometheus:9090/{{ trimPrefix "/" .Values.prometheus.prometheusSpec.routePrefix }} 18 | access: proxy 19 | isDefault: true 20 | {{- end }} 21 | {{- if .Values.grafana.additionalDataSources }} 22 | {{ toYaml .Values.grafana.additionalDataSources | indent 4}} 23 | {{- end }} 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - nodes/metrics 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | # This permission are not in the prometheus-operator repo 19 | # they're grabbed from https://github.com/prometheus/prometheus/blob/master/documentation/examples/rbac-setup.yml 20 | - apiGroups: [""] 21 | resources: 22 | - nodes 23 | - nodes/proxy 24 | - services 25 | - endpoints 26 | - pods 27 | verbs: ["get", "list", "watch"] 28 | - apiGroups: 29 | - extensions 30 | resources: 31 | - ingresses 32 | verbs: ["get", "list", "watch"] 33 | - nonResourceURLs: ["/metrics"] 34 | verbs: ["get"] 35 | {{- end }} -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: {{ template "prometheus-operator.apiVersion" . }} 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app: {{ template "zookeeper-servicemonitor.name" . }} 6 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 7 | heritage: "{{ .Release.Service }}" 8 | release: "{{ .Release.Name }}" 9 | prometheus: {{ .Release.Name }} 10 | {{- if .Values.additionalServiceMonitorLabels }} 11 | {{ toYaml .Values.additionalServiceMonitorLabels | indent 4 }} 12 | {{- end }} 13 | name: {{ template "zookeeper-servicemonitor.fullname" . }} 14 | spec: 15 | selector: 16 | matchLabels: 17 | app: {{ template "zookeeper-servicemonitor.name" . }} 18 | chart: {{ template "zookeeper-servicemonitor.chart" . }} 19 | release: {{ .Release.Name }} 20 | namespaceSelector: 21 | matchNames: 22 | - {{ .Values.namespaceSelector }} 23 | endpoints: 24 | - port: "{{ .Values.metricsPortName }}" 25 | interval: 15s 26 | #bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 27 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podSecurityPolicy.enabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "kube-state-metrics.fullname" . }} 6 | labels: 7 | app: {{ template "kube-state-metrics.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | {{- if .Values.podSecurityPolicy.annotations }} 12 | annotations: 13 | {{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} 14 | {{- end }} 15 | spec: 16 | privileged: false 17 | volumes: 18 | - 'secret' 19 | hostNetwork: false 20 | hostIPC: false 21 | hostPID: false 22 | runAsUser: 23 | rule: 'MustRunAsNonRoot' 24 | seLinux: 25 | rule: 'RunAsAny' 26 | supplementalGroups: 27 | rule: 'MustRunAs' 28 | ranges: 29 | # Forbid adding the root group. 30 | - min: 1 31 | max: 65535 32 | fsGroup: 33 | rule: 'MustRunAs' 34 | ranges: 35 | # Forbid adding the root group. 36 | - min: 1 37 | max: 65535 38 | readOnlyRootFilesystem: false 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/cluster-external.yaml: -------------------------------------------------------------------------------- 1 | ################################################################################################################# 2 | # Define the settings for the rook-ceph-external cluster with common settings for a production cluster. 3 | 4 | # For example, if Rook is not managing any existing cluster in the 'rook-ceph' namespace do: 5 | # kubectl create -f common.yaml 6 | # kubectl create -f operator.yaml 7 | # kubectl create -f cluster-external.yaml 8 | 9 | # If there is already a cluster managed by Rook in 'rook-ceph' then do: 10 | # kubectl create -f common-external.yaml 11 | # kubectl create -f cluster-external.yaml 12 | ################################################################################################################# 13 | apiVersion: ceph.rook.io/v1 14 | kind: CephCluster 15 | metadata: 16 | name: rook-ceph-external 17 | namespace: rook-ceph 18 | spec: 19 | external: 20 | enable: true 21 | dataDirHostPath: /var/lib/rook 22 | # providing an image is optional, do this if you want to create other CRs (rgw, mds, nfs) 23 | cephVersion: 24 | image: ceph/ceph:v14.2.4-20190917 # MUST match external cluster version 25 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/servicemonitors.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.additionalServiceMonitors }} 2 | apiVersion: v1 3 | kind: List 4 | items: 5 | {{- range .Values.prometheus.additionalServiceMonitors }} 6 | - apiVersion: {{ printf "%s/v1" ($.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 7 | kind: ServiceMonitor 8 | metadata: 9 | name: {{ .name }} 10 | labels: 11 | app: {{ template "prometheus-operator.name" $ }}-prometheus 12 | {{ include "prometheus-operator.labels" $ | indent 8 }} 13 | {{- if .additionalLabels }} 14 | {{ toYaml .additionalLabels | indent 8 }} 15 | {{- end }} 16 | spec: 17 | endpoints: 18 | {{ toYaml .endpoints | indent 8 }} 19 | {{- if .jobLabel }} 20 | jobLabel: {{ .jobLabel }} 21 | {{- end }} 22 | {{- if .namespaceSelector }} 23 | namespaceSelector: 24 | {{ toYaml .namespaceSelector | indent 8 }} 25 | {{- end }} 26 | selector: 27 | {{ toYaml .selector | indent 8 }} 28 | {{- if .targetLabels }} 29 | targetLabels: 30 | {{ toYaml .targetLabels | indent 8 }} 31 | {{- end }} 32 | {{- end }} 33 | {{- end }} 34 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/node-exporter/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.nodeExporter.enabled }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-node-exporter 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-node-exporter 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | jobLabel: {{ .Values.nodeExporter.jobLabel }} 11 | selector: 12 | matchLabels: 13 | app: prometheus-node-exporter 14 | release: {{ .Release.Name }} 15 | endpoints: 16 | - port: metrics 17 | {{- if .Values.nodeExporter.serviceMonitor.interval }} 18 | interval: {{ .Values.nodeExporter.serviceMonitor.interval }} 19 | {{- end }} 20 | {{- if .Values.nodeExporter.serviceMonitor.metricRelabelings }} 21 | metricRelabelings: 22 | {{ tpl (toYaml .Values.nodeExporter.serviceMonitor.metricRelabelings | indent 4) . }} 23 | {{- end }} 24 | {{- if .Values.nodeExporter.serviceMonitor.relabelings }} 25 | relabelings: 26 | {{ toYaml .Values.nodeExporter.serviceMonitor.relabelings | indent 4 }} 27 | {{- end }} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create -}} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: Role 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "grafana.name" . }} 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | heritage: {{ .Release.Service }} 11 | release: {{ .Release.Name }} 12 | {{- with .Values.annotations }} 13 | annotations: 14 | {{ toYaml . | indent 4 }} 15 | {{- end }} 16 | {{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled)) }} 17 | rules: 18 | {{- if .Values.rbac.pspEnabled }} 19 | - apiGroups: ['extensions'] 20 | resources: ['podsecuritypolicies'] 21 | verbs: ['use'] 22 | resourceNames: [{{ template "grafana.fullname" . }}] 23 | {{- end }} 24 | {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} 25 | - apiGroups: [""] # "" indicates the core API group 26 | resources: ["configmaps"] 27 | verbs: ["get", "watch", "list"] 28 | {{- end }} 29 | {{- else }} 30 | rules: [] 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-external-ceph/rook-ceph-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rook-ceph-mgr-dashboard-external-http 5 | namespace: rook-ceph 6 | spec: 7 | ports: 8 | - name: dashboard 9 | port: 7000 10 | protocol: TCP 11 | targetPort: 7000 12 | type: ClusterIP 13 | --- 14 | apiVersion: v1 15 | kind: Endpoints 16 | metadata: 17 | name: rook-ceph-mgr-dashboard-external-http 18 | namespace: rook-ceph 19 | subsets: 20 | - addresses: 21 | - ip: 172.16.138.27 22 | - ip: 172.16.138.32 23 | - ip: 172.16.138.33 24 | ports: 25 | - name: dashboard 26 | port: 7000 27 | protocol: TCP 28 | --- 29 | apiVersion: extensions/v1beta1 30 | kind: Ingress 31 | metadata: 32 | annotations: 33 | cert-manager.io/cluster-issuer: letsencrypt-prod 34 | kubernetes.io/tls-acme: "true" 35 | name: rook-ceph-mgr-dashboard 36 | namespace: rook-ceph 37 | spec: 38 | rules: 39 | - host: ceph-dashboard.utyun.com 40 | http: 41 | paths: 42 | - backend: 43 | serviceName: rook-ceph-mgr-dashboard-external-http 44 | servicePort: 7000 45 | path: / 46 | tls: 47 | - hosts: 48 | - ceph-dashboard.utyun.com 49 | secretName: tls-ceph-dashboard-utyun-com 50 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-state-metrics/serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeStateMetrics.enabled }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-state-metrics 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-state-metrics 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | jobLabel: app 11 | endpoints: 12 | - port: http 13 | {{- if .Values.kubeStateMetrics.serviceMonitor.interval }} 14 | interval: {{ .Values.kubeStateMetrics.serviceMonitor.interval }} 15 | {{- end }} 16 | honorLabels: true 17 | {{- if .Values.kubeStateMetrics.serviceMonitor.metricRelabelings }} 18 | metricRelabelings: 19 | {{ tpl (toYaml .Values.kubeStateMetrics.serviceMonitor.metricRelabelings | indent 4) . }} 20 | {{- end }} 21 | {{- if .Values.kubeStateMetrics.serviceMonitor.relabelings }} 22 | relabelings: 23 | {{ toYaml .Values.kubeStateMetrics.serviceMonitor.relabelings | indent 4 }} 24 | {{- end }} 25 | selector: 26 | matchLabels: 27 | app: kube-state-metrics 28 | release: "{{ .Release.Name }}" 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/grafana/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.grafana.enabled .Values.grafana.serviceMonitor.selfMonitor }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-grafana 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-grafana 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | release: {{ .Release.Name | quote }} 14 | namespaceSelector: 15 | matchNames: 16 | - {{ .Release.Namespace | quote }} 17 | endpoints: 18 | - port: service 19 | {{- if .Values.grafana.serviceMonitor.interval }} 20 | interval: {{ .Values.grafana.serviceMonitor.interval }} 21 | {{- end }} 22 | path: "/metrics" 23 | {{- if .Values.grafana.serviceMonitor.metricRelabelings }} 24 | metricRelabelings: 25 | {{ tpl (toYaml .Values.grafana.serviceMonitor.metricRelabelings | indent 6) . }} 26 | {{- end }} 27 | {{- if .Values.grafana.serviceMonitor.relabelings }} 28 | relabelings: 29 | {{ toYaml .Values.grafana.serviceMonitor.relabelings | indent 6 }} 30 | {{- end }} 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/core-dns/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.coreDns.enabled }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-coredns 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-coredns 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | jobLabel: jobLabel 11 | selector: 12 | matchLabels: 13 | app: {{ template "prometheus-operator.name" . }}-coredns 14 | release: {{ .Release.Name | quote }} 15 | namespaceSelector: 16 | matchNames: 17 | - "kube-system" 18 | endpoints: 19 | - port: http-metrics 20 | {{- if .Values.coreDns.serviceMonitor.interval}} 21 | interval: {{ .Values.coreDns.serviceMonitor.interval }} 22 | {{- end }} 23 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 24 | {{- if .Values.coreDns.serviceMonitor.metricRelabelings }} 25 | metricRelabelings: 26 | {{ tpl (toYaml .Values.coreDns.serviceMonitor.metricRelabelings | indent 4) . }} 27 | {{- end }} 28 | {{- if .Values.coreDns.serviceMonitor.relabelings }} 29 | relabelings: 30 | {{ toYaml .Values.coreDns.serviceMonitor.relabelings | indent 4 }} 31 | {{- end }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "grafana.fullname" . -}} 3 | {{- $servicePort := .Values.service.port -}} 4 | {{- $ingressPath := .Values.ingress.path -}} 5 | apiVersion: extensions/v1beta1 6 | kind: Ingress 7 | metadata: 8 | name: {{ $fullName }} 9 | namespace: {{ .Release.Namespace }} 10 | labels: 11 | app: {{ template "grafana.name" . }} 12 | chart: {{ template "grafana.chart" . }} 13 | release: {{ .Release.Name }} 14 | heritage: {{ .Release.Service }} 15 | {{- if .Values.ingress.labels }} 16 | {{ toYaml .Values.ingress.labels | indent 4 }} 17 | {{- end }} 18 | {{- with .Values.ingress.annotations }} 19 | annotations: 20 | {{ toYaml . | indent 4 }} 21 | {{- end }} 22 | spec: 23 | {{- if .Values.ingress.tls }} 24 | tls: 25 | {{- range .Values.ingress.tls }} 26 | - hosts: 27 | {{- range .hosts }} 28 | - {{ . | quote }} 29 | {{- end }} 30 | secretName: {{ .secretName }} 31 | {{- end }} 32 | {{- end }} 33 | rules: 34 | {{- range .Values.ingress.hosts }} 35 | - host: {{ . }} 36 | http: 37 | paths: 38 | - path: {{ $ingressPath }} 39 | backend: 40 | serviceName: {{ $fullName }} 41 | servicePort: {{ $servicePort }} 42 | {{- end }} 43 | {{- end }} 44 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/kube-state-metrics/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "kube-state-metrics.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "kube-state-metrics.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create the name of the service account to use 29 | */}} 30 | {{- define "kube-state-metrics.serviceAccountName" -}} 31 | {{- if .Values.serviceAccount.create -}} 32 | {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} 33 | {{- else -}} 34 | {{ default "default" .Values.serviceAccount.name }} 35 | {{- end -}} 36 | {{- end -}} 37 | -------------------------------------------------------------------------------- /helm/ceph-exporter/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range .Values.ingress.hosts }} 4 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "ceph-exporter.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ template "ceph-exporter.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "ceph-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "ceph-exporter.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /helm/mysql-servicemonitor/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "mysql-servicemonitor.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "mysql-servicemonitor.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Return the appropriate apiVersion value to use for the prometheus-operator managed k8s resources 29 | */}} 30 | {{- define "prometheus-operator.apiVersion" -}} 31 | {{- if .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" }} 32 | {{- printf "%s" "monitoring.coreos.com/v1" -}} 33 | {{- else -}} 34 | {{- printf "%s" "monitoring.coreos.com/v1alpha1" -}} 35 | {{- end -}} 36 | {{- end -}} 37 | -------------------------------------------------------------------------------- /helm/redis-servicemonitor/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "redis-servicemonitor.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "redis-servicemonitor.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Return the appropriate apiVersion value to use for the prometheus-operator managed k8s resources 29 | */}} 30 | {{- define "prometheus-operator.apiVersion" -}} 31 | {{- if .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" }} 32 | {{- printf "%s" "monitoring.coreos.com/v1" -}} 33 | {{- else -}} 34 | {{- printf "%s" "monitoring.coreos.com/v1alpha1" -}} 35 | {{- end -}} 36 | {{- end -}} 37 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if contains "NodePort" .Values.service.type }} 3 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-node-exporter.fullname" . }}) 4 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 5 | echo http://$NODE_IP:$NODE_PORT 6 | {{- else if contains "LoadBalancer" .Values.service.type }} 7 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 8 | You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-node-exporter.fullname" . }}' 9 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus-node-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 10 | echo http://$SERVICE_IP:{{ .Values.service.port }} 11 | {{- else if contains "ClusterIP" .Values.service.type }} 12 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus-node-exporter.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 13 | echo "Visit http://127.0.0.1:8080 to use your application" 14 | kubectl port-forward $POD_NAME 8080:80 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.serviceMonitor.selfMonitor }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-operator 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-operator 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | endpoints: 11 | - port: http 12 | honorLabels: true 13 | {{- if .Values.prometheusOperator.serviceMonitor.interval }} 14 | interval: {{ .Values.prometheusOperator.serviceMonitor.interval }} 15 | {{- end }} 16 | {{- if .Values.prometheusOperator.serviceMonitor.metricRelabelings }} 17 | metricRelabelings: 18 | {{ tpl (toYaml .Values.prometheusOperator.serviceMonitor.metricRelabelings | indent 6) . }} 19 | {{- end }} 20 | {{- if .Values.prometheusOperator.serviceMonitor.relabelings }} 21 | relabelings: 22 | {{ toYaml .Values.prometheusOperator.serviceMonitor.relabelings | indent 6 }} 23 | {{- end }} 24 | selector: 25 | matchLabels: 26 | app: {{ template "prometheus-operator.name" . }}-operator 27 | release: {{ .Release.Name | quote }} 28 | namespaceSelector: 29 | matchNames: 30 | - {{ .Release.Namespace | quote }} 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.serviceMonitor.selfMonitor }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: {{ template "prometheus-operator.name" . }}-prometheus 13 | release: {{ .Release.Name | quote }} 14 | namespaceSelector: 15 | matchNames: 16 | - {{ .Release.Namespace | quote }} 17 | endpoints: 18 | - port: web 19 | {{- if .Values.prometheus.serviceMonitor.interval }} 20 | interval: {{ .Values.prometheus.serviceMonitor.interval }} 21 | {{- end }} 22 | path: "{{ trimSuffix "/" .Values.prometheus.prometheusSpec.routePrefix }}/metrics" 23 | {{- if .Values.prometheus.serviceMonitor.metricRelabelings }} 24 | metricRelabelings: 25 | {{ tpl (toYaml .Values.prometheus.serviceMonitor.metricRelabelings | indent 6) . }} 26 | {{- end }} 27 | {{- if .Values.prometheus.serviceMonitor.relabelings }} 28 | relabelings: 29 | {{ toYaml .Values.prometheus.serviceMonitor.relabelings | indent 6 }} 30 | {{- end }} 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /helm/nginx-ingress-servicemonitor/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "nginx-ingress-servicemonitor.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "nginx-ingress-servicemonitor.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Return the appropriate apiVersion value to use for the prometheus-operator managed k8s resources 29 | */}} 30 | {{- define "prometheus-operator.apiVersion" -}} 31 | {{- if .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" }} 32 | {{- printf "%s" "monitoring.coreos.com/v1" -}} 33 | {{- else -}} 34 | {{- printf "%s" "monitoring.coreos.com/v1alpha1" -}} 35 | {{- end -}} 36 | {{- end -}} 37 | -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "node-exporter-servicemonitor.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "node-exporter-servicemonitor.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Return the appropriate apiVersion value to use for the prometheus-operator managed k8s resources 29 | */}} 30 | {{- define "prometheus-operator.apiVersion" -}} 31 | {{- if .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" }} 32 | {{- printf "%s" "monitoring.coreos.com/v1" -}} 33 | {{- else -}} 34 | {{- printf "%s" "monitoring.coreos.com/v1alpha1" -}} 35 | {{- end -}} 36 | {{- end -}} 37 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/alertmanager/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceMonitor.selfMonitor }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-alertmanager 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-alertmanager 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: {{ template "prometheus-operator.name" . }}-alertmanager 13 | release: {{ .Release.Name | quote }} 14 | namespaceSelector: 15 | matchNames: 16 | - {{ .Release.Namespace | quote }} 17 | endpoints: 18 | - port: web 19 | {{- if .Values.alertmanager.serviceMonitor.interval }} 20 | interval: {{ .Values.alertmanager.serviceMonitor.interval }} 21 | {{- end }} 22 | path: "{{ trimSuffix "/" .Values.alertmanager.alertmanagerSpec.routePrefix }}/metrics" 23 | {{- if .Values.alertmanager.serviceMonitor.metricRelabelings }} 24 | metricRelabelings: 25 | {{ tpl (toYaml .Values.alertmanager.serviceMonitor.metricRelabelings | indent 6) . }} 26 | {{- end }} 27 | {{- if .Values.alertmanager.serviceMonitor.relabelings }} 28 | relabelings: 29 | {{ toYaml .Values.alertmanager.serviceMonitor.relabelings | indent 6 }} 30 | {{- end }} 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/rules/node-time.yaml: -------------------------------------------------------------------------------- 1 | # Generated from 'node-time' group from https://raw.githubusercontent.com/coreos/kube-prometheus/master/manifests/prometheus-rules.yaml 2 | # Do not change in-place! In order to change this file first read following link: 3 | # https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack 4 | {{- if and .Values.defaultRules.create }} 5 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 6 | kind: PrometheusRule 7 | metadata: 8 | name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "node-time" | trunc 63 | trimSuffix "-" }} 9 | labels: 10 | app: {{ template "prometheus-operator.name" . }} 11 | {{ include "prometheus-operator.labels" . | indent 4 }} 12 | {{- if .Values.defaultRules.labels }} 13 | {{ toYaml .Values.defaultRules.labels | indent 4 }} 14 | {{- end }} 15 | {{- if .Values.defaultRules.annotations }} 16 | annotations: 17 | {{ toYaml .Values.defaultRules.annotations | indent 4 }} 18 | {{- end }} 19 | spec: 20 | groups: 21 | - name: node-time 22 | rules: 23 | - alert: ClockSkewDetected 24 | annotations: 25 | message: Clock skew detected on node-exporter {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod }}`}}. Ensure NTP is configured correctly on this host. 26 | expr: abs(node_timex_offset_seconds{job="node-exporter"}) > 0.03 27 | for: 2m 28 | labels: 29 | severity: warning 30 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | privileged: false 11 | # Required to prevent escalations to root. 12 | # allowPrivilegeEscalation: false 13 | # This is redundant with non-root + disallow privilege escalation, 14 | # but we can provide it for defense in depth. 15 | #requiredDropCapabilities: 16 | # - ALL 17 | # Allow core volume types. 18 | volumes: 19 | - 'configMap' 20 | - 'emptyDir' 21 | - 'projected' 22 | - 'secret' 23 | - 'downwardAPI' 24 | - 'persistentVolumeClaim' 25 | hostNetwork: false 26 | hostIPC: false 27 | hostPID: false 28 | runAsUser: 29 | # Permits the container to run with root privileges as well. 30 | rule: 'RunAsAny' 31 | seLinux: 32 | # This policy assumes the nodes are using AppArmor rather than SELinux. 33 | rule: 'RunAsAny' 34 | supplementalGroups: 35 | rule: 'MustRunAs' 36 | ranges: 37 | # Forbid adding the root group. 38 | - min: 0 39 | max: 65535 40 | fsGroup: 41 | rule: 'MustRunAs' 42 | ranges: 43 | # Forbid adding the root group. 44 | - min: 0 45 | max: 65535 46 | readOnlyRootFilesystem: false 47 | {{- end }} 48 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | {{- if .Values.rbac.pspEnabled }} 3 | apiVersion: extensions/v1beta1 4 | kind: PodSecurityPolicy 5 | metadata: 6 | labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} 7 | name: {{ template "prometheus-node-exporter.fullname" . }} 8 | spec: 9 | privileged: false 10 | # Required to prevent escalations to root. 11 | # allowPrivilegeEscalation: false 12 | # This is redundant with non-root + disallow privilege escalation, 13 | # but we can provide it for defense in depth. 14 | #requiredDropCapabilities: 15 | # - ALL 16 | # Allow core volume types. 17 | volumes: 18 | - 'configMap' 19 | - 'emptyDir' 20 | - 'projected' 21 | - 'secret' 22 | - 'downwardAPI' 23 | - 'persistentVolumeClaim' 24 | - 'hostPath' 25 | hostNetwork: true 26 | hostIPC: false 27 | hostPID: true 28 | hostPorts: 29 | - min: 0 30 | max: 65535 31 | runAsUser: 32 | # Permits the container to run with root privileges as well. 33 | rule: 'RunAsAny' 34 | seLinux: 35 | # This policy assumes the nodes are using AppArmor rather than SELinux. 36 | rule: 'RunAsAny' 37 | supplementalGroups: 38 | rule: 'MustRunAs' 39 | ranges: 40 | # Forbid adding the root group. 41 | - min: 0 42 | max: 65535 43 | fsGroup: 44 | rule: 'MustRunAs' 45 | ranges: 46 | # Forbid adding the root group. 47 | - min: 0 48 | max: 65535 49 | readOnlyRootFilesystem: false 50 | {{- end }} 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/alertmanager/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-alertmanager 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-alertmanager 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | privileged: false 11 | # Required to prevent escalations to root. 12 | # allowPrivilegeEscalation: false 13 | # This is redundant with non-root + disallow privilege escalation, 14 | # but we can provide it for defense in depth. 15 | #requiredDropCapabilities: 16 | # - ALL 17 | # Allow core volume types. 18 | volumes: 19 | - 'configMap' 20 | - 'emptyDir' 21 | - 'projected' 22 | - 'secret' 23 | - 'downwardAPI' 24 | - 'persistentVolumeClaim' 25 | hostNetwork: false 26 | hostIPC: false 27 | hostPID: false 28 | runAsUser: 29 | # Permits the container to run with root privileges as well. 30 | rule: 'RunAsAny' 31 | seLinux: 32 | # This policy assumes the nodes are using AppArmor rather than SELinux. 33 | rule: 'RunAsAny' 34 | supplementalGroups: 35 | rule: 'MustRunAs' 36 | ranges: 37 | # Forbid adding the root group. 38 | - min: 0 39 | max: 65535 40 | fsGroup: 41 | rule: 'MustRunAs' 42 | ranges: 43 | # Forbid adding the root group. 44 | - min: 0 45 | max: 65535 46 | readOnlyRootFilesystem: false 47 | {{- end }} 48 | 49 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-operator 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-operator 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | privileged: false 11 | # Required to prevent escalations to root. 12 | # allowPrivilegeEscalation: false 13 | # This is redundant with non-root + disallow privilege escalation, 14 | # but we can provide it for defense in depth. 15 | #requiredDropCapabilities: 16 | # - ALL 17 | # Allow core volume types. 18 | volumes: 19 | - 'configMap' 20 | - 'emptyDir' 21 | - 'projected' 22 | - 'secret' 23 | - 'downwardAPI' 24 | - 'persistentVolumeClaim' 25 | hostNetwork: false 26 | hostIPC: false 27 | hostPID: false 28 | runAsUser: 29 | # Permits the container to run with root privileges as well. 30 | rule: 'RunAsAny' 31 | seLinux: 32 | # This policy assumes the nodes are using AppArmor rather than SELinux. 33 | rule: 'RunAsAny' 34 | supplementalGroups: 35 | rule: 'MustRunAs' 36 | ranges: 37 | # Forbid adding the root group. 38 | - min: 0 39 | max: 65535 40 | fsGroup: 41 | rule: 'MustRunAs' 42 | ranges: 43 | # Forbid adding the root group. 44 | - min: 0 45 | max: 65535 46 | readOnlyRootFilesystem: false 47 | {{- end }} 48 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-api-server/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeApiServer.enabled }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-apiserver 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-apiserver 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | {{- if .Values.kubeApiServer.serviceMonitor.interval }} 13 | interval: {{ .Values.kubeApiServer.serviceMonitor.interval }} 14 | {{- end }} 15 | port: https 16 | scheme: https 17 | {{- if .Values.kubeApiServer.serviceMonitor.metricRelabelings }} 18 | metricRelabelings: 19 | {{ tpl (toYaml .Values.kubeApiServer.serviceMonitor.metricRelabelings | indent 6) . }} 20 | {{- end }} 21 | {{- if .Values.kubeApiServer.relabelings }} 22 | relabelings: 23 | {{ toYaml .Values.kubeApiServer.relabelings | indent 6 }} 24 | {{- end }} 25 | tlsConfig: 26 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 27 | serverName: {{ .Values.kubeApiServer.tlsConfig.serverName }} 28 | insecureSkipVerify: {{ .Values.kubeApiServer.tlsConfig.insecureSkipVerify }} 29 | jobLabel: {{ .Values.kubeApiServer.serviceMonitor.jobLabel }} 30 | namespaceSelector: 31 | matchNames: 32 | - default 33 | selector: 34 | {{ toYaml .Values.kubeApiServer.serviceMonitor.selector | indent 4 }} 35 | {{- end}} 36 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-dns/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeDns.enabled }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-dns 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-dns 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | jobLabel: jobLabel 11 | selector: 12 | matchLabels: 13 | app: {{ template "prometheus-operator.name" . }}-kube-dns 14 | release: {{ .Release.Name | quote }} 15 | namespaceSelector: 16 | matchNames: 17 | - "kube-system" 18 | endpoints: 19 | - port: http-metrics-dnsmasq 20 | {{- if .Values.kubeDns.serviceMonitor.interval }} 21 | interval: {{ .Values.kubeDns.serviceMonitor.interval }} 22 | {{- end }} 23 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 24 | - port: http-metrics-skydns 25 | {{- if .Values.kubeDns.serviceMonitor.interval }} 26 | interval: {{ .Values.kubeDns.serviceMonitor.interval }} 27 | {{- end }} 28 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 29 | {{- if .Values.kubeDns.serviceMonitor.metricRelabelings }} 30 | metricRelabelings: 31 | {{ tpl (toYaml .Values.kubeDns.serviceMonitor.metricRelabelings | indent 4) . }} 32 | {{- end }} 33 | {{- if .Values.kubeDns.serviceMonitor.relabelings }} 34 | relabelings: 35 | {{ toYaml .Values.kubeDns.serviceMonitor.relabelings | indent 4 }} 36 | {{- end }} 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-scheduler/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeScheduler.enabled }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-scheduler 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-scheduler 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | jobLabel: jobLabel 11 | selector: 12 | matchLabels: 13 | app: {{ template "prometheus-operator.name" . }}-kube-scheduler 14 | release: {{ .Release.Name | quote }} 15 | namespaceSelector: 16 | matchNames: 17 | - "kube-system" 18 | endpoints: 19 | - port: http-metrics 20 | {{- if .Values.kubeScheduler.serviceMonitor.interval }} 21 | interval: {{ .Values.kubeScheduler.serviceMonitor.interval }} 22 | {{- end }} 23 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 24 | {{- if .Values.kubeScheduler.serviceMonitor.https }} 25 | scheme: https 26 | tlsConfig: 27 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 28 | {{- end}} 29 | {{- if .Values.kubeScheduler.serviceMonitor.metricRelabelings }} 30 | metricRelabelings: 31 | {{ tpl (toYaml .Values.kubeScheduler.serviceMonitor.metricRelabelings | indent 4) . }} 32 | {{- end }} 33 | {{- if .Values.kubeScheduler.serviceMonitor.relabelings }} 34 | relabelings: 35 | {{ toYaml .Values.kubeScheduler.serviceMonitor.relabelings | indent 4 }} 36 | {{- end }} 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/additionalPrometheusRules.yaml: -------------------------------------------------------------------------------- 1 | {{- if or .Values.additionalPrometheusRules .Values.additionalPrometheusRulesMap}} 2 | apiVersion: v1 3 | kind: List 4 | items: 5 | {{- if .Values.additionalPrometheusRulesMap }} 6 | {{- range $prometheusRuleName, $prometheusRule := .Values.additionalPrometheusRulesMap }} 7 | - apiVersion: {{ printf "%s/v1" ($.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 8 | kind: PrometheusRule 9 | metadata: 10 | name: {{ template "prometheus-operator.name" $ }}-{{ $prometheusRuleName }} 11 | labels: 12 | app: {{ template "prometheus-operator.name" $ }} 13 | {{ include "prometheus-operator.labels" $ | indent 8 }} 14 | {{- if $prometheusRule.additionalLabels }} 15 | {{ toYaml $prometheusRule.additionalLabels | indent 8 }} 16 | {{- end }} 17 | spec: 18 | groups: 19 | {{ toYaml $prometheusRule.groups| indent 8 }} 20 | {{- end }} 21 | {{- else }} 22 | {{- range .Values.additionalPrometheusRules }} 23 | - apiVersion: {{ printf "%s/v1" ($.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 24 | kind: PrometheusRule 25 | metadata: 26 | name: {{ template "prometheus-operator.name" $ }}-{{ .name }} 27 | labels: 28 | app: {{ template "prometheus-operator.name" $ }} 29 | {{ include "prometheus-operator.labels" $ | indent 8 }} 30 | {{- if .additionalLabels }} 31 | {{ toYaml .additionalLabels | indent 8 }} 32 | {{- end }} 33 | spec: 34 | groups: 35 | {{ toYaml .groups| indent 8 }} 36 | {{- end }} 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/tests/test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: {{ template "grafana.fullname" . }}-test 5 | labels: 6 | app: {{ template "grafana.fullname" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | heritage: "{{ .Release.Service }}" 9 | release: "{{ .Release.Name }}" 10 | annotations: 11 | "helm.sh/hook": test-success 12 | spec: 13 | serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }} 14 | initContainers: 15 | - name: test-framework 16 | image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" 17 | command: 18 | - "bash" 19 | - "-c" 20 | - | 21 | set -ex 22 | # copy bats to tools dir 23 | cp -R /usr/local/libexec/ /tools/bats/ 24 | volumeMounts: 25 | - mountPath: /tools 26 | name: tools 27 | {{- if .Values.image.pullSecrets }} 28 | imagePullSecrets: 29 | {{- range .Values.image.pullSecrets }} 30 | - name: {{ . }} 31 | {{- end}} 32 | {{- end }} 33 | containers: 34 | - name: {{ .Release.Name }}-test 35 | image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" 36 | command: ["/tools/bats/bats", "-t", "/tests/run.sh"] 37 | volumeMounts: 38 | - mountPath: /tests 39 | name: tests 40 | readOnly: true 41 | - mountPath: /tools 42 | name: tools 43 | volumes: 44 | - name: tests 45 | configMap: 46 | name: {{ template "grafana.fullname" . }}-test 47 | - name: tools 48 | emptyDir: {} 49 | restartPolicy: Never 50 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-operator 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-operator 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | rules: 10 | - apiGroups: 11 | - apiextensions.k8s.io 12 | resources: 13 | - customresourcedefinitions 14 | verbs: 15 | - '*' 16 | - apiGroups: 17 | - {{ .Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com" }} 18 | resources: 19 | - alertmanagers 20 | - prometheuses 21 | - prometheuses/finalizers 22 | - alertmanagers/finalizers 23 | - servicemonitors 24 | - podmonitors 25 | - prometheusrules 26 | verbs: 27 | - '*' 28 | - apiGroups: 29 | - apps 30 | resources: 31 | - statefulsets 32 | verbs: 33 | - '*' 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - configmaps 38 | - secrets 39 | verbs: 40 | - '*' 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - pods 45 | verbs: 46 | - list 47 | - delete 48 | - apiGroups: 49 | - "" 50 | resources: 51 | - services 52 | - services/finalizers 53 | - endpoints 54 | verbs: 55 | - get 56 | - create 57 | - update 58 | - delete 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - nodes 63 | verbs: 64 | - list 65 | - watch 66 | - apiGroups: 67 | - "" 68 | resources: 69 | - namespaces 70 | verbs: 71 | - get 72 | - list 73 | - watch 74 | {{- end }} 75 | -------------------------------------------------------------------------------- /helm/ceph-exporter/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "ceph-exporter.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "ceph-exporter.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "ceph-exporter.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Return the appropriate apiVersion value to use for the prometheus-operator managed k8s resources 36 | */}} 37 | {{- define "prometheus-operator.apiVersion" -}} 38 | {{- if .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" }} 39 | {{- printf "%s" "monitoring.coreos.com/v1" -}} 40 | {{- else -}} 41 | {{- printf "%s" "monitoring.coreos.com/v1alpha1" -}} 42 | {{- end -}} 43 | {{- end -}} 44 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/alertmanager/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.alertmanager.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-alertmanager 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-alertmanager 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | {{- if .Values.alertmanager.service.annotations }} 10 | annotations: 11 | {{ toYaml .Values.alertmanager.service.annotations | indent 4 }} 12 | {{- end }} 13 | spec: 14 | {{- if .Values.alertmanager.service.clusterIP }} 15 | clusterIP: {{ .Values.alertmanager.service.clusterIP }} 16 | {{- end }} 17 | {{- if .Values.alertmanager.service.externalIPs }} 18 | externalIPs: 19 | {{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} 20 | {{- end }} 21 | {{- if .Values.alertmanager.service.loadBalancerIP }} 22 | loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} 23 | {{- end }} 24 | {{- if .Values.alertmanager.service.loadBalancerSourceRanges }} 25 | loadBalancerSourceRanges: 26 | {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} 27 | - {{ $cidr }} 28 | {{- end }} 29 | {{- end }} 30 | ports: 31 | - name: web 32 | {{- if eq .Values.alertmanager.service.type "NodePort" }} 33 | nodePort: {{ .Values.alertmanager.service.nodePort }} 34 | {{- end }} 35 | port: 9093 36 | targetPort: 9093 37 | protocol: TCP 38 | selector: 39 | app: alertmanager 40 | alertmanager: {{ template "prometheus-operator.fullname" . }}-alertmanager 41 | type: "{{ .Values.alertmanager.service.type }}" 42 | {{- end }} 43 | -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "zookeeper-servicemonitor.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "zookeeper-servicemonitor.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "zookeeper-servicemonitor.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Return the appropriate apiVersion value to use for the prometheus-operator managed k8s resources 36 | */}} 37 | {{- define "prometheus-operator.apiVersion" -}} 38 | {{- if .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" }} 39 | {{- printf "%s" "monitoring.coreos.com/v1" -}} 40 | {{- else -}} 41 | {{- printf "%s" "monitoring.coreos.com/v1alpha1" -}} 42 | {{- end -}} 43 | {{- end -}} 44 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheusOperator.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-operator 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-operator 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | {{- if .Values.prometheusOperator.service.annotations }} 10 | annotations: 11 | {{ toYaml .Values.prometheusOperator.service.annotations | indent 4 }} 12 | {{- end }} 13 | spec: 14 | {{- if .Values.prometheusOperator.service.clusterIP }} 15 | clusterIP: {{ .Values.prometheusOperator.service.clusterIP }} 16 | {{- end }} 17 | {{- if .Values.prometheusOperator.service.externalIPs }} 18 | externalIPs: 19 | {{ toYaml .Values.prometheusOperator.service.externalIPs | indent 4 }} 20 | {{- end }} 21 | {{- if .Values.prometheusOperator.service.loadBalancerIP }} 22 | loadBalancerIP: {{ .Values.prometheusOperator.service.loadBalancerIP }} 23 | {{- end }} 24 | {{- if .Values.prometheusOperator.service.loadBalancerSourceRanges }} 25 | loadBalancerSourceRanges: 26 | {{- range $cidr := .Values.prometheusOperator.service.loadBalancerSourceRanges }} 27 | - {{ $cidr }} 28 | {{- end }} 29 | {{- end }} 30 | ports: 31 | - name: http 32 | {{- if eq .Values.prometheusOperator.service.type "NodePort" }} 33 | nodePort: {{ .Values.prometheusOperator.service.nodePort }} 34 | {{- end }} 35 | port: 8080 36 | targetPort: http 37 | selector: 38 | app: {{ template "prometheus-operator.name" . }}-operator 39 | release: {{ .Release.Name | quote }} 40 | type: "{{ .Values.prometheusOperator.service.type }}" 41 | {{- end }} 42 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-controller-manager/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeControllerManager.enabled }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-controller-manager 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-controller-manager 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | jobLabel: jobLabel 11 | selector: 12 | matchLabels: 13 | app: {{ template "prometheus-operator.name" . }}-kube-controller-manager 14 | release: {{ .Release.Name | quote }} 15 | namespaceSelector: 16 | matchNames: 17 | - "kube-system" 18 | endpoints: 19 | - port: http-metrics 20 | {{- if .Values.kubeControllerManager.serviceMonitor.interval }} 21 | interval: {{ .Values.kubeControllerManager.serviceMonitor.interval }} 22 | {{- end }} 23 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 24 | {{- if .Values.kubeControllerManager.serviceMonitor.https }} 25 | scheme: https 26 | tlsConfig: 27 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 28 | {{- end }} 29 | {{- if .Values.kubeControllerManager.serviceMonitor.metricRelabelings }} 30 | metricRelabelings: 31 | {{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.metricRelabelings | indent 4) . }} 32 | {{- end }} 33 | {{- if .Values.kubeControllerManager.serviceMonitor.relabelings }} 34 | relabelings: 35 | {{ toYaml .Values.kubeControllerManager.serviceMonitor.relabelings | indent 4 }} 36 | {{- end }} 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | apiVersion: extensions/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: {{ template "grafana.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: {{ template "grafana.name" . }} 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | heritage: {{ .Release.Service }} 11 | release: {{ .Release.Name }} 12 | annotations: 13 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default' 14 | seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' 15 | {{- if .Values.rbac.pspUseAppArmor }} 16 | apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' 17 | apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' 18 | {{- end }} 19 | spec: 20 | privileged: false 21 | allowPrivilegeEscalation: false 22 | requiredDropCapabilities: 23 | # Default set from Docker, without DAC_OVERRIDE or CHOWN 24 | - FOWNER 25 | - FSETID 26 | - KILL 27 | - SETGID 28 | - SETUID 29 | - SETPCAP 30 | - NET_BIND_SERVICE 31 | - NET_RAW 32 | - SYS_CHROOT 33 | - MKNOD 34 | - AUDIT_WRITE 35 | - SETFCAP 36 | volumes: 37 | - 'configMap' 38 | - 'emptyDir' 39 | - 'projected' 40 | - 'secret' 41 | - 'downwardAPI' 42 | - 'persistentVolumeClaim' 43 | hostNetwork: false 44 | hostIPC: false 45 | hostPID: false 46 | runAsUser: 47 | rule: 'RunAsAny' 48 | seLinux: 49 | rule: 'RunAsAny' 50 | supplementalGroups: 51 | rule: 'RunAsAny' 52 | fsGroup: 53 | rule: 'RunAsAny' 54 | readOnlyRootFilesystem: false 55 | {{- end }} 56 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheus.enabled .Values.prometheus.ingress.enabled }} 2 | {{- $serviceName := printf "%s-%s" (include "prometheus-operator.fullname" .) "prometheus" }} 3 | {{- $servicePort := 9090 -}} 4 | {{- $routePrefix := list .Values.prometheus.prometheusSpec.routePrefix }} 5 | {{- $paths := .Values.prometheus.ingress.paths | default $routePrefix -}} 6 | apiVersion: extensions/v1beta1 7 | kind: Ingress 8 | metadata: 9 | {{- if .Values.prometheus.ingress.annotations }} 10 | annotations: 11 | {{ toYaml .Values.prometheus.ingress.annotations | indent 4 }} 12 | {{- end }} 13 | name: {{ $serviceName }} 14 | labels: 15 | app: {{ template "prometheus-operator.name" . }}-prometheus 16 | {{ include "prometheus-operator.labels" . | indent 4 }} 17 | {{- if .Values.prometheus.ingress.labels }} 18 | {{ toYaml .Values.prometheus.ingress.labels | indent 4 }} 19 | {{- end }} 20 | spec: 21 | rules: 22 | {{- if .Values.prometheus.ingress.hosts }} 23 | {{- range $host := .Values.prometheus.ingress.hosts }} 24 | - host: {{ tpl $host $ }} 25 | http: 26 | paths: 27 | {{- range $p := $paths }} 28 | - path: {{ tpl $p $ }} 29 | backend: 30 | serviceName: {{ $serviceName }} 31 | servicePort: {{ $servicePort }} 32 | {{- end -}} 33 | {{- end -}} 34 | {{- else }} 35 | - http: 36 | paths: 37 | {{- range $p := $paths }} 38 | - path: {{ tpl $p $ }} 39 | backend: 40 | serviceName: {{ $serviceName }} 41 | servicePort: {{ $servicePort }} 42 | {{- end -}} 43 | {{- end -}} 44 | {{- if .Values.prometheus.ingress.tls }} 45 | tls: 46 | {{ toYaml .Values.prometheus.ingress.tls | indent 4 }} 47 | {{- end -}} 48 | {{- end -}} 49 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "grafana.fullname" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | app: {{ template "grafana.name" . }} 8 | chart: {{ template "grafana.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | {{- if .Values.service.labels }} 12 | {{ toYaml .Values.service.labels | indent 4 }} 13 | {{- end }} 14 | {{- with .Values.service.annotations }} 15 | annotations: 16 | {{ toYaml . | indent 4 }} 17 | {{- end }} 18 | spec: 19 | {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} 20 | type: ClusterIP 21 | {{- if .Values.service.clusterIP }} 22 | clusterIP: {{ .Values.service.clusterIP }} 23 | {{end}} 24 | {{- else if eq .Values.service.type "LoadBalancer" }} 25 | type: {{ .Values.service.type }} 26 | {{- if .Values.service.loadBalancerIP }} 27 | loadBalancerIP: {{ .Values.service.loadBalancerIP }} 28 | {{- end }} 29 | {{- if .Values.service.loadBalancerSourceRanges }} 30 | loadBalancerSourceRanges: 31 | {{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} 32 | {{- end -}} 33 | {{- else }} 34 | type: {{ .Values.service.type }} 35 | {{- end }} 36 | {{- if .Values.service.externalIPs }} 37 | externalIPs: 38 | {{ toYaml .Values.service.externalIPs | indent 4 }} 39 | {{- end }} 40 | ports: 41 | - name: service 42 | port: {{ .Values.service.port }} 43 | protocol: TCP 44 | targetPort: {{ .Values.service.targetPort }} 45 | {{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} 46 | nodePort: {{.Values.service.nodePort}} 47 | {{ end }} 48 | selector: 49 | app: {{ template "grafana.name" . }} 50 | release: {{ .Release.Name }} 51 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-toolbox.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rook-ceph-tools 5 | namespace: rook-ceph 6 | labels: 7 | app: rook-ceph-tools 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: rook-ceph-tools 13 | template: 14 | metadata: 15 | labels: 16 | app: rook-ceph-tools 17 | spec: 18 | dnsPolicy: ClusterFirstWithHostNet 19 | containers: 20 | - name: rook-ceph-tools 21 | image: rook/ceph:v1.1.0 22 | command: ["/tini"] 23 | args: ["-g", "--", "/usr/local/bin/toolbox.sh"] 24 | imagePullPolicy: IfNotPresent 25 | env: 26 | - name: ROOK_ADMIN_SECRET 27 | valueFrom: 28 | secretKeyRef: 29 | name: rook-ceph-mon 30 | key: admin-secret 31 | securityContext: 32 | privileged: true 33 | volumeMounts: 34 | - mountPath: /dev 35 | name: dev 36 | - mountPath: /sys/bus 37 | name: sysbus 38 | - mountPath: /lib/modules 39 | name: libmodules 40 | - name: mon-endpoint-volume 41 | mountPath: /etc/rook 42 | # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021 43 | hostNetwork: true 44 | volumes: 45 | - name: dev 46 | hostPath: 47 | path: /dev 48 | - name: sysbus 49 | hostPath: 50 | path: /sys/bus 51 | - name: libmodules 52 | hostPath: 53 | path: /lib/modules 54 | - name: mon-endpoint-volume 55 | configMap: 56 | name: rook-ceph-mon-endpoints 57 | items: 58 | - key: data 59 | path: mon-endpoints 60 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/alertmanager/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled }} 2 | {{- $serviceName := printf "%s-%s" (include "prometheus-operator.fullname" .) "alertmanager" }} 3 | {{- $servicePort := 9093 -}} 4 | {{- $routePrefix := list .Values.alertmanager.alertmanagerSpec.routePrefix }} 5 | {{- $paths := .Values.alertmanager.ingress.paths | default $routePrefix -}} 6 | apiVersion: extensions/v1beta1 7 | kind: Ingress 8 | metadata: 9 | name: {{ $serviceName }} 10 | {{- if .Values.alertmanager.ingress.annotations }} 11 | annotations: 12 | {{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} 13 | {{- end }} 14 | labels: 15 | app: {{ template "prometheus-operator.name" . }}-alertmanager 16 | {{- if .Values.alertmanager.ingress.labels }} 17 | {{ toYaml .Values.alertmanager.ingress.labels | indent 4 }} 18 | {{- end }} 19 | {{ include "prometheus-operator.labels" . | indent 4 }} 20 | spec: 21 | rules: 22 | {{- if .Values.alertmanager.ingress.hosts }} 23 | {{- range $host := .Values.alertmanager.ingress.hosts }} 24 | - host: {{ tpl $host $ }} 25 | http: 26 | paths: 27 | {{- range $p := $paths }} 28 | - path: {{ tpl $p $ }} 29 | backend: 30 | serviceName: {{ $serviceName }} 31 | servicePort: {{ $servicePort }} 32 | {{- end -}} 33 | {{- end -}} 34 | {{- else }} 35 | - http: 36 | paths: 37 | {{- range $p := $paths }} 38 | - path: {{ tpl $p $ }} 39 | backend: 40 | serviceName: {{ $serviceName }} 41 | servicePort: {{ $servicePort }} 42 | {{- end -}} 43 | {{- end -}} 44 | {{- if .Values.alertmanager.ingress.tls }} 45 | tls: 46 | {{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} 47 | {{- end -}} 48 | {{- end -}} 49 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/grafana/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "grafana.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "grafana.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "grafana.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Create the name of the service account 36 | */}} 37 | {{- define "grafana.serviceAccountName" -}} 38 | {{- if .Values.serviceAccount.create -}} 39 | {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} 40 | {{- else -}} 41 | {{ default "default" .Values.serviceAccount.name }} 42 | {{- end -}} 43 | {{- end -}} 44 | 45 | {{- define "grafana.serviceAccountNameTest" -}} 46 | {{- if .Values.serviceAccount.create -}} 47 | {{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} 48 | {{- else -}} 49 | {{ default "default" .Values.serviceAccount.nameTest }} 50 | {{- end -}} 51 | {{- end -}} 52 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.prometheus.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-prometheus 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-prometheus 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | {{- if .Values.prometheus.service.annotations }} 10 | annotations: 11 | {{ toYaml .Values.prometheus.service.annotations | indent 4 }} 12 | {{- end }} 13 | spec: 14 | {{- if .Values.prometheus.service.clusterIP }} 15 | clusterIP: {{ .Values.prometheus.service.clusterIP }} 16 | {{- end }} 17 | {{- if .Values.prometheus.service.externalIPs }} 18 | externalIPs: 19 | {{ toYaml .Values.prometheus.service.externalIPs | indent 4 }} 20 | {{- end }} 21 | {{- if .Values.prometheus.service.loadBalancerIP }} 22 | loadBalancerIP: {{ .Values.prometheus.service.loadBalancerIP }} 23 | {{- end }} 24 | {{- if .Values.prometheus.service.loadBalancerSourceRanges }} 25 | loadBalancerSourceRanges: 26 | {{- range $cidr := .Values.prometheus.service.loadBalancerSourceRanges }} 27 | - {{ $cidr }} 28 | {{- end }} 29 | {{- end }} 30 | ports: 31 | - name: web 32 | {{- if eq .Values.prometheus.service.type "NodePort" }} 33 | nodePort: {{ .Values.prometheus.service.nodePort }} 34 | {{- end }} 35 | port: 9090 36 | targetPort: {{ .Values.prometheus.service.targetPort }} 37 | {{- if .Values.prometheus.service.additionalPorts }} 38 | {{ toYaml .Values.prometheus.service.additionalPorts | indent 2 }} 39 | {{- end }} 40 | selector: 41 | app: prometheus 42 | prometheus: {{ template "prometheus-operator.fullname" . }}-prometheus 43 | {{- if .Values.prometheus.service.sessionAffinity }} 44 | sessionAffinity: {{ .Values.prometheus.service.sessionAffinity }} 45 | {{- end }} 46 | type: "{{ .Values.prometheus.service.type }}" 47 | {{- end }} 48 | -------------------------------------------------------------------------------- /kubernetes-yaml/bootstrapping/v1.13/bootstrap-kubelet.conf: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | # certificate-authority: /etc/kubernetes/pki/ca.crt 5 | certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1USXdPVEE1TXpFeE9Wb1hEVEk1TVRJd05qQTVNekV4T1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBSjYrCmdaMDJCYmRWOWVxM0JQbDV5MElURDZkL0RuOXEzMUFQeDdMeXFxMEkyRkdwdVIyWTE2Y2RMeGxKWEljMUliZEcKbDNYQUo3MlBGU1B5Y1FvaVFxd1lOQWN0bC80ejZXRmlEeXI3K2ZUbFFoSytDMmRiN2l4eE9HY2ZEZUc2a3JoZApVN3g1NklFa1J5TE93MTFOdjk2Z2VhdnpzbWxQYXZidlViVFEyV3dWa1ppN0xmTERoWi8rcnhMRTBwSktSVEJNCm9BeFFnSnZhajdRVkpXUlFqWTFHN1BEVDNiMzJzTlVibXU3cVM0ZHlmdi9TcnludFNKN0RRUXBwaHhGQ1NsZksKRVNCdG9wVzE2R2EyeVZqdUcydHN4U3hBM0J0Rk5lVWhHZ1ptUlUwQXlSNGU2ZUVyZjF0SXFRYU0yMTBTRk42MgpyQ2hHR2xPd1RGbHo5ZWVyMlJjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFEVTE0NTZLVzFnK09sMWFYRlBoZjVzeGthVE4KTDdDT0hEWEFvZFVkYi9mUGZoMU50YW44akxiVEZKbnNBUlAxeFVIdDM1MFAzU0xLQ2pGL0pZc0ljRHBVNGk3MAp1S1pHemlyL0JZbUZ4VjBiQzUvTmJrRk1acWpFMG1OWm05V3JDTXhjMkNjMEQ2aitFMjJ2Wll6bFl3Sk9CK2RsCnlqaVlHRGhOZnZGL0c3S1RjZTVsYndZNGF3ZDluRmN0KzBsbmNmN0FMLzB3TmJMeGJFUGsxQ05rb3NNdUFiaWQKSzBMUVljcXI1QjlINmZZMzYzMnU5Rkl2ZHdNeXNhUUpQa1N6bmc0UzBFWC84Y1FZSVEzMWlJVGhoMndFZTUxaQpiVXhnWFVnMUpROUN5Nk53RTBaRXAzTUdXQmtRYWJOdnRiYjhvY1lnMituV0dBQ25hUlNhR1RtK2U4bz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= 6 | server: https://192.168.105.158:8443 7 | name: kubernetes 8 | contexts: 9 | - context: 10 | cluster: kubernetes 11 | user: tls-bootstrap-token-user 12 | name: tls-bootstrap-token-user@kubernetes 13 | current-context: tls-bootstrap-token-user@kubernetes 14 | kind: Config 15 | preferences: {} 16 | users: 17 | - name: tls-bootstrap-token-user 18 | user: 19 | token: bak6fj.zh7xvypf3sy4w6sz 20 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/rules/kube-apiserver.rules.yaml: -------------------------------------------------------------------------------- 1 | # Generated from 'kube-apiserver.rules' group from https://raw.githubusercontent.com/coreos/kube-prometheus/master/manifests/prometheus-rules.yaml 2 | # Do not change in-place! In order to change this file first read following link: 3 | # https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack 4 | {{- if and .Values.defaultRules.create .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserver }} 5 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 6 | kind: PrometheusRule 7 | metadata: 8 | name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kube-apiserver.rules" | trunc 63 | trimSuffix "-" }} 9 | labels: 10 | app: {{ template "prometheus-operator.name" . }} 11 | {{ include "prometheus-operator.labels" . | indent 4 }} 12 | {{- if .Values.defaultRules.labels }} 13 | {{ toYaml .Values.defaultRules.labels | indent 4 }} 14 | {{- end }} 15 | {{- if .Values.defaultRules.annotations }} 16 | annotations: 17 | {{ toYaml .Values.defaultRules.annotations | indent 4 }} 18 | {{- end }} 19 | spec: 20 | groups: 21 | - name: kube-apiserver.rules 22 | rules: 23 | - expr: histogram_quantile(0.99, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 24 | labels: 25 | quantile: '0.99' 26 | record: cluster_quantile:apiserver_request_latencies:histogram_quantile 27 | - expr: histogram_quantile(0.9, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 28 | labels: 29 | quantile: '0.9' 30 | record: cluster_quantile:apiserver_request_latencies:histogram_quantile 31 | - expr: histogram_quantile(0.5, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 32 | labels: 33 | quantile: '0.5' 34 | record: cluster_quantile:apiserver_request_latencies:histogram_quantile 35 | {{- end }} -------------------------------------------------------------------------------- /helm/mysql-servicemonitor/README.md: -------------------------------------------------------------------------------- 1 | # mysql-servicemonitor - Prometheus Operator with nginx ingress controller 2 | 3 | [mysql-servicemonitor](https://)是什么 4 | 5 | ## Introduction 6 | 7 | This chart bootstraps prometheus servicemonitor on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. 8 | 9 | ## Prerequisites 10 | 11 | - Kubernetes 1.6+ 12 | - PV provisioner support in the underlying infrastructure 13 | 14 | ## Installing the Chart 15 | 16 | To install the chart with the release name `my-release`: 17 | 18 | ```bash 19 | $ helm install --name my-release ./mysql-servicemonitor 20 | ``` 21 | 22 | The command deploys ceph-exporter cluster on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. 23 | 24 | ### Uninstall 25 | 26 | To uninstall/delete the `my-release` deployment: 27 | 28 | ```bash 29 | $ helm delete my-release 30 | ``` 31 | 32 | ## Configuration 33 | 34 | The following table lists the configurable parameters of the FastDFS-Nginx chart and their default values. 35 | 36 | | Parameter | Description | Default | 37 | | ----------------------- | ----------------------------------- | -------------------------------------- | 38 | | `namespaceSelector` | nginx ingress deploy namespace | `nginx-ingress` 39 | | `schedulerPort` | nginx ingress metrics port | 9913 40 | | `scheme` | metrics web scheme | `http` 41 | | `prometheusRules` | prometheusRules | `{}` | 42 | | `additionalServiceMonitorLabels`| one of prometheus operator label| `release: prometheus-operator`| 43 | | `additionalRulesLabels` | one of prometheus operator label| `release: prometheus-operator` | 44 | 45 | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, 46 | 47 | 48 | -------------------------------------------------------------------------------- /helm/redis-servicemonitor/README.md: -------------------------------------------------------------------------------- 1 | # redis-servicemonitor - Prometheus Operator with nginx ingress controller 2 | 3 | [redis-servicemonitor](https://)是什么 4 | 5 | ## Introduction 6 | 7 | This chart bootstraps prometheus servicemonitor on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. 8 | 9 | ## Prerequisites 10 | 11 | - Kubernetes 1.6+ 12 | - PV provisioner support in the underlying infrastructure 13 | 14 | ## Installing the Chart 15 | 16 | To install the chart with the release name `my-release`: 17 | 18 | ```bash 19 | $ helm install --name my-release ./redis-servicemonitor 20 | ``` 21 | 22 | The command deploys ceph-exporter cluster on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. 23 | 24 | ### Uninstall 25 | 26 | To uninstall/delete the `my-release` deployment: 27 | 28 | ```bash 29 | $ helm delete my-release 30 | ``` 31 | 32 | ## Configuration 33 | 34 | The following table lists the configurable parameters of the FastDFS-Nginx chart and their default values. 35 | 36 | | Parameter | Description | Default | 37 | | ----------------------- | ----------------------------------- | -------------------------------------- | 38 | | `namespaceSelector` | nginx ingress deploy namespace | `nginx-ingress` 39 | | `schedulerPort` | nginx ingress metrics port | 9913 40 | | `scheme` | metrics web scheme | `http` 41 | | `prometheusRules` | prometheusRules | `{}` | 42 | | `additionalServiceMonitorLabels`| one of prometheus operator label| `release: prometheus-operator`| 43 | | `additionalRulesLabels` | one of prometheus operator label| `release: prometheus-operator` | 44 | 45 | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, 46 | 47 | 48 | -------------------------------------------------------------------------------- /helm/zookeeper-servicemonitor/README.md: -------------------------------------------------------------------------------- 1 | # zookeeper-servicemonitor - Prometheus Operator with nginx ingress controller 2 | 3 | [zookeeper-servicemonitor](https://)是什么 4 | 5 | ## Introduction 6 | 7 | This chart bootstraps prometheus servicemonitor on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. 8 | 9 | ## Prerequisites 10 | 11 | - Kubernetes 1.6+ 12 | - PV provisioner support in the underlying infrastructure 13 | 14 | ## Installing the Chart 15 | 16 | To install the chart with the release name `my-release`: 17 | 18 | ```bash 19 | $ helm install --name my-release ./zookeeper-servicemonitor 20 | ``` 21 | 22 | The command deploys ceph-exporter cluster on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. 23 | 24 | ### Uninstall 25 | 26 | To uninstall/delete the `my-release` deployment: 27 | 28 | ```bash 29 | $ helm delete my-release 30 | ``` 31 | 32 | ## Configuration 33 | 34 | The following table lists the configurable parameters of the FastDFS-Nginx chart and their default values. 35 | 36 | | Parameter | Description | Default | 37 | | ----------------------- | ----------------------------------- | -------------------------------------- | 38 | | `namespaceSelector` | nginx ingress deploy namespace | `nginx-ingress` 39 | | `schedulerPort` | nginx ingress metrics port | 9913 40 | | `scheme` | metrics web scheme | `http` 41 | | `prometheusRules` | prometheusRules | `{}` | 42 | | `additionalServiceMonitorLabels`| one of prometheus operator label| `release: prometheus-operator`| 43 | | `additionalRulesLabels` | one of prometheus operator label| `release: prometheus-operator` | 44 | 45 | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, 46 | 47 | 48 | -------------------------------------------------------------------------------- /helm/nginx-ingress-servicemonitor/README.md: -------------------------------------------------------------------------------- 1 | # nginx-ingress--exporter - Prometheus Operator with nginx ingress controller 2 | 3 | [nginx-ingress-servicemonitor](https://)是什么 4 | 5 | ## Introduction 6 | 7 | This chart bootstraps prometheus servicemonitor on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. 8 | 9 | ## Prerequisites 10 | 11 | - Kubernetes 1.6+ 12 | - PV provisioner support in the underlying infrastructure 13 | 14 | ## Installing the Chart 15 | 16 | To install the chart with the release name `my-release`: 17 | 18 | ```bash 19 | $ helm install --name my-release ./nginx-ingress-servicemonitor 20 | ``` 21 | 22 | The command deploys ceph-exporter cluster on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. 23 | 24 | ### Uninstall 25 | 26 | To uninstall/delete the `my-release` deployment: 27 | 28 | ```bash 29 | $ helm delete my-release 30 | ``` 31 | 32 | ## Configuration 33 | 34 | The following table lists the configurable parameters of the FastDFS-Nginx chart and their default values. 35 | 36 | | Parameter | Description | Default | 37 | | ----------------------- | ----------------------------------- | -------------------------------------- | 38 | | `namespaceSelector` | nginx ingress deploy namespace | `nginx-ingress` 39 | | `metricsPort` | nginx ingress metrics port | 9913 40 | | `scheme` | metrics web scheme | `http` 41 | | `prometheusRules` | prometheusRules | `{}` | 42 | | `additionalServiceMonitorLabels`| one of prometheus operator label| `release: prometheus-operator`| 43 | | `additionalRulesLabels` | one of prometheus operator label| `release: prometheus-operator` | 44 | 45 | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, 46 | 47 | 48 | -------------------------------------------------------------------------------- /helm/prometheus-operator/charts/prometheus-node-exporter/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "prometheus-node-exporter.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "prometheus-node-exporter.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* Generate basic labels */}} 28 | {{- define "prometheus-node-exporter.labels" }} 29 | app: {{ template "prometheus-node-exporter.name" . }} 30 | heritage: {{.Release.Service }} 31 | release: {{.Release.Name }} 32 | chart: {{ template "prometheus-node-exporter.chart" . }} 33 | {{- if .Values.podLabels}} 34 | {{ toYaml .Values.podLabels }} 35 | {{- end }} 36 | {{- end }} 37 | 38 | {{/* 39 | Create chart name and version as used by the chart label. 40 | */}} 41 | {{- define "prometheus-node-exporter.chart" -}} 42 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 43 | {{- end -}} 44 | 45 | 46 | {{/* 47 | Create the name of the service account to use 48 | */}} 49 | {{- define "prometheus-node-exporter.serviceAccountName" -}} 50 | {{- if .Values.serviceAccount.create -}} 51 | {{ default (include "prometheus-node-exporter.fullname" .) .Values.serviceAccount.name }} 52 | {{- else -}} 53 | {{ default "default" .Values.serviceAccount.name }} 54 | {{- end -}} 55 | {{- end -}} 56 | -------------------------------------------------------------------------------- /helm/node-exporter-servicemonitor/README.md: -------------------------------------------------------------------------------- 1 | # node-exporter-servicemonitor - Prometheus Operator with nginx ingress controller 2 | 3 | [node-exporter-servicemonitor](https://)是什么 4 | 5 | ## Introduction 6 | 7 | This chart bootstraps prometheus servicemonitor on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. 8 | 9 | ## Prerequisites 10 | 11 | - Kubernetes 1.6+ 12 | - PV provisioner support in the underlying infrastructure 13 | 14 | ## Installing the Chart 15 | 16 | To install the chart with the release name `my-release`: 17 | 18 | ```bash 19 | $ helm install --name my-release ./node-exporter-servicemonitor 20 | ``` 21 | 22 | The command deploys ceph-exporter cluster on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. 23 | 24 | ### Uninstall 25 | 26 | To uninstall/delete the `my-release` deployment: 27 | 28 | ```bash 29 | $ helm delete my-release 30 | ``` 31 | 32 | ## Configuration 33 | 34 | The following table lists the configurable parameters of the FastDFS-Nginx chart and their default values. 35 | 36 | | Parameter | Description | Default | 37 | | ----------------------- | ----------------------------------- | -------------------------------------- | 38 | | `namespaceSelector` | nginx ingress deploy namespace | `nginx-ingress` 39 | | `schedulerPort` | nginx ingress metrics port | 9913 40 | | `scheme` | metrics web scheme | `http` 41 | | `prometheusRules` | prometheusRules | `{}` | 42 | | `additionalServiceMonitorLabels`| one of prometheus operator label| `release: prometheus-operator`| 43 | | `additionalRulesLabels` | one of prometheus operator label| `release: prometheus-operator` | 44 | 45 | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, 46 | 47 | 48 | -------------------------------------------------------------------------------- /kubernetes-yaml/rook-ceph/rook-ceph-storage-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: ceph-rbd 5 | provisioner: ceph.rook.io/block 6 | parameters: 7 | blockPool: replicapool 8 | # The value of "clusterNamespace" MUST be the same as the one in which your rook cluster exist 9 | clusterNamespace: rook-ceph 10 | # Specify the filesystem type of the volume. If not specified, it will use `ext4`. 11 | fstype: xfs 12 | # Optional, default reclaimPolicy is "Delete". Other options are: "Retain", "Recycle" as documented in https://kubernetes.io/docs/concepts/storage/storage-classes/ 13 | reclaimPolicy: Retain 14 | # Optional, if you want to add dynamic resize for PVC. Works for Kubernetes 1.14+ 15 | # For now only ext3, ext4, xfs resize support provided, like in Kubernetes itself. 16 | allowVolumeExpansion: true 17 | --- 18 | # apiVersion: storage.k8s.io/v1 19 | # kind: StorageClass 20 | # metadata: 21 | # name: cephfs 22 | # # Change "rook-ceph" provisioner prefix to match the operator namespace if needed 23 | # provisioner: rook-ceph.cephfs.csi.ceph.com 24 | # parameters: 25 | # # clusterID is the namespace where operator is deployed. 26 | # clusterID: rook-ceph 27 | # 28 | # # CephFS filesystem name into which the volume shall be created 29 | # fsName: cephfs-k8s 30 | # 31 | # # Ceph pool into which the volume shall be created 32 | # # Required for provisionVolume: "true" 33 | # pool: cephfs-k8s-data0 34 | # 35 | # # Root path of an existing CephFS volume 36 | # # Required for provisionVolume: "false" 37 | # # rootPath: /absolute/path 38 | # 39 | # # The secrets contain Ceph admin credentials. These are generated automatically by the operator 40 | # # in the same namespace as the cluster. 41 | # csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner 42 | # csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph 43 | # csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node 44 | # csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph 45 | # 46 | # reclaimPolicy: Retain 47 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/rules/general.rules.yaml: -------------------------------------------------------------------------------- 1 | # Generated from 'general.rules' group from https://raw.githubusercontent.com/coreos/kube-prometheus/master/manifests/prometheus-rules.yaml 2 | # Do not change in-place! In order to change this file first read following link: 3 | # https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack 4 | {{- if and .Values.defaultRules.create .Values.defaultRules.rules.general }} 5 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 6 | kind: PrometheusRule 7 | metadata: 8 | name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "general.rules" | trunc 63 | trimSuffix "-" }} 9 | labels: 10 | app: {{ template "prometheus-operator.name" . }} 11 | {{ include "prometheus-operator.labels" . | indent 4 }} 12 | {{- if .Values.defaultRules.labels }} 13 | {{ toYaml .Values.defaultRules.labels | indent 4 }} 14 | {{- end }} 15 | {{- if .Values.defaultRules.annotations }} 16 | annotations: 17 | {{ toYaml .Values.defaultRules.annotations | indent 4 }} 18 | {{- end }} 19 | spec: 20 | groups: 21 | - name: general.rules 22 | rules: 23 | - alert: TargetDown 24 | annotations: 25 | message: '{{`{{ $value }}`}}% of the {{`{{ $labels.job }}`}} targets are down.' 26 | expr: 100 * (count(up == 0) BY (job) / count(up) BY (job)) > 10 27 | for: 10m 28 | labels: 29 | severity: warning 30 | - alert: Watchdog 31 | annotations: 32 | message: 'This is an alert meant to ensure that the entire alerting pipeline is functional. 33 | 34 | This alert is always firing, therefore it should always be firing in Alertmanager 35 | 36 | and always fire against a receiver. There are integrations with various notification 37 | 38 | mechanisms that send a notification when this alert is not firing. For example the 39 | 40 | "DeadMansSnitch" integration in PagerDuty. 41 | 42 | ' 43 | expr: vector(1) 44 | labels: 45 | severity: none 46 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus-operator/cleanup-crds.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.cleanupCustomResource }} 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-operator-cleanup 6 | namespace: {{ .Release.Namespace }} 7 | annotations: 8 | "helm.sh/hook": pre-delete 9 | "helm.sh/hook-weight": "3" 10 | "helm.sh/hook-delete-policy": hook-succeeded 11 | labels: 12 | app: {{ template "prometheus-operator.name" . }}-operator 13 | {{ include "prometheus-operator.labels" . | indent 4 }} 14 | spec: 15 | template: 16 | metadata: 17 | name: {{ template "prometheus-operator.fullname" . }}-operator-cleanup 18 | labels: 19 | app: {{ template "prometheus-operator.name" . }}-operator 20 | {{ include "prometheus-operator.labels" . | indent 8 }} 21 | spec: 22 | {{- if .Values.global.rbac.create }} 23 | serviceAccountName: {{ template "prometheus-operator.operator.serviceAccountName" . }} 24 | {{- end }} 25 | containers: 26 | - name: kubectl 27 | image: "{{ .Values.prometheusOperator.hyperkubeImage.repository }}:{{ .Values.prometheusOperator.hyperkubeImage.tag }}" 28 | imagePullPolicy: "{{ .Values.prometheusOperator.hyperkubeImage.pullPolicy }}" 29 | command: 30 | - /bin/sh 31 | - -c 32 | - > 33 | kubectl delete alertmanager --all; 34 | kubectl delete prometheus --all; 35 | kubectl delete prometheusrule --all; 36 | kubectl delete servicemonitor --all; 37 | sleep 10; 38 | kubectl delete crd alertmanagers.monitoring.coreos.com; 39 | kubectl delete crd prometheuses.monitoring.coreos.com; 40 | kubectl delete crd prometheusrules.monitoring.coreos.com; 41 | kubectl delete crd servicemonitors.monitoring.coreos.com; 42 | kubectl delete crd podmonitors.monitoring.coreos.com; 43 | restartPolicy: OnFailure 44 | {{- end }} 45 | -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml: -------------------------------------------------------------------------------- 1 | # Generated from 'kube-prometheus-node-alerting.rules' group from https://raw.githubusercontent.com/coreos/kube-prometheus/master/manifests/prometheus-rules.yaml 2 | # Do not change in-place! In order to change this file first read following link: 3 | # https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack 4 | {{- if and .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusNodeAlerting }} 5 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 6 | kind: PrometheusRule 7 | metadata: 8 | name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "kube-prometheus-node-alerting.rules" | trunc 63 | trimSuffix "-" }} 9 | labels: 10 | app: {{ template "prometheus-operator.name" . }} 11 | {{ include "prometheus-operator.labels" . | indent 4 }} 12 | {{- if .Values.defaultRules.labels }} 13 | {{ toYaml .Values.defaultRules.labels | indent 4 }} 14 | {{- end }} 15 | {{- if .Values.defaultRules.annotations }} 16 | annotations: 17 | {{ toYaml .Values.defaultRules.annotations | indent 4 }} 18 | {{- end }} 19 | spec: 20 | groups: 21 | - name: kube-prometheus-node-alerting.rules 22 | rules: 23 | - alert: NodeDiskRunningFull 24 | annotations: 25 | message: Device {{`{{ $labels.device }}`}} of node-exporter {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod }}`}} will be full within the next 24 hours. 26 | expr: '(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[6h], 3600 * 24) < 0)' 27 | for: 30m 28 | labels: 29 | severity: warning 30 | - alert: NodeDiskRunningFull 31 | annotations: 32 | message: Device {{`{{ $labels.device }}`}} of node-exporter {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod }}`}} will be full within the next 2 hours. 33 | expr: '(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[30m], 3600 * 2) < 0)' 34 | for: 10m 35 | labels: 36 | severity: critical 37 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/prometheus/rules/prometheus-operator.yaml: -------------------------------------------------------------------------------- 1 | # Generated from 'prometheus-operator' group from https://raw.githubusercontent.com/coreos/kube-prometheus/master/manifests/prometheus-rules.yaml 2 | # Do not change in-place! In order to change this file first read following link: 3 | # https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack 4 | {{- if and .Values.defaultRules.create .Values.defaultRules.rules.prometheusOperator }} 5 | {{- $operatorJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "operator" }} 6 | {{- $namespace := .Release.Namespace }} 7 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 8 | kind: PrometheusRule 9 | metadata: 10 | name: {{ printf "%s-%s" (include "prometheus-operator.fullname" .) "prometheus-operator" | trunc 63 | trimSuffix "-" }} 11 | labels: 12 | app: {{ template "prometheus-operator.name" . }} 13 | {{ include "prometheus-operator.labels" . | indent 4 }} 14 | {{- if .Values.defaultRules.labels }} 15 | {{ toYaml .Values.defaultRules.labels | indent 4 }} 16 | {{- end }} 17 | {{- if .Values.defaultRules.annotations }} 18 | annotations: 19 | {{ toYaml .Values.defaultRules.annotations | indent 4 }} 20 | {{- end }} 21 | spec: 22 | groups: 23 | - name: prometheus-operator 24 | rules: 25 | - alert: PrometheusOperatorReconcileErrors 26 | annotations: 27 | message: Errors while reconciling {{`{{ $labels.controller }}`}} in {{`{{ $labels.namespace }}`}} Namespace. 28 | expr: rate(prometheus_operator_reconcile_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1 29 | for: 10m 30 | labels: 31 | severity: warning 32 | - alert: PrometheusOperatorNodeLookupErrors 33 | annotations: 34 | message: Errors while reconciling Prometheus in {{`{{ $labels.namespace }}`}} Namespace. 35 | expr: rate(prometheus_operator_node_address_lookup_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1 36 | for: 10m 37 | labels: 38 | severity: warning 39 | {{- end }} -------------------------------------------------------------------------------- /helm/prometheus-operator/templates/exporters/kube-etcd/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.kubeEtcd.enabled }} 2 | apiVersion: {{ printf "%s/v1" (.Values.prometheusOperator.crdApiGroup | default "monitoring.coreos.com") }} 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "prometheus-operator.fullname" . }}-kube-etcd 6 | labels: 7 | app: {{ template "prometheus-operator.name" . }}-kube-etcd 8 | {{ include "prometheus-operator.labels" . | indent 4 }} 9 | spec: 10 | jobLabel: jobLabel 11 | selector: 12 | matchLabels: 13 | app: {{ template "prometheus-operator.name" . }}-kube-etcd 14 | release: {{ .Release.Name | quote }} 15 | namespaceSelector: 16 | matchNames: 17 | - "kube-system" 18 | endpoints: 19 | - port: http-metrics 20 | {{- if .Values.kubeEtcd.serviceMonitor.interval }} 21 | interval: {{ .Values.kubeEtcd.serviceMonitor.interval }} 22 | {{- end }} 23 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 24 | {{- if eq .Values.kubeEtcd.serviceMonitor.scheme "https" }} 25 | scheme: https 26 | tlsConfig: 27 | {{- if .Values.kubeEtcd.serviceMonitor.serverName }} 28 | serverName: {{ .Values.kubeEtcd.serviceMonitor.serverName }} 29 | {{- end }} 30 | {{- if .Values.kubeEtcd.serviceMonitor.caFile }} 31 | caFile: {{ .Values.kubeEtcd.serviceMonitor.caFile }} 32 | {{- end }} 33 | {{- if .Values.kubeEtcd.serviceMonitor.certFile }} 34 | certFile: {{ .Values.kubeEtcd.serviceMonitor.certFile }} 35 | {{- end }} 36 | {{- if .Values.kubeEtcd.serviceMonitor.keyFile }} 37 | keyFile: {{ .Values.kubeEtcd.serviceMonitor.keyFile }} 38 | {{- end}} 39 | insecureSkipVerify: {{ .Values.kubeEtcd.serviceMonitor.insecureSkipVerify }} 40 | {{- end }} 41 | {{- if .Values.kubeEtcd.serviceMonitor.metricRelabelings }} 42 | metricRelabelings: 43 | {{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.metricRelabelings | indent 4) . }} 44 | {{- end }} 45 | {{- if .Values.kubeEtcd.serviceMonitor.relabelings }} 46 | relabelings: 47 | {{ toYaml .Values.kubeEtcd.serviceMonitor.relabelings | indent 4 }} 48 | {{- end }} 49 | {{- end }} 50 | --------------------------------------------------------------------------------